1
/* Copyright William Schelter. All rights reserved.
3
Stratified Garbage Collection (SGC)
5
Write protects pages to tell which ones have been written
6
to recently, for more efficient garbage collection.
11
sgc_mark_object1(object);
14
sgc_mprotect(long, long, int);
18
/* ulong may have been defined in mp.h but the define is no longer needed */
21
#define PROT_READ_WRITE_EXEC (PROT_READ | PROT_WRITE |PROT_EXEC)
22
#define PROT_READ_EXEC (PROT_READ|PROT_EXEC)
25
#include <sys/vmuser.h>
26
#define PROT_READ_EXEC RDONLY /*FIXME*/
27
#define PROT_READ_WRITE_EXEC UDATAKEY
33
#define PROT_READ_WRITE_EXEC PAGE_EXECUTE_READWRITE
34
#define PROT_READ_EXEC PAGE_READONLY /*FIXME*/
36
int gclmprotect ( void *addr, size_t len, int prot ) {
38
rv = VirtualProtect ( (LPVOID) addr, len, prot, &old );
40
fprintf ( stderr, "mprotect: VirtualProtect %x %d %d failed\n", addr, len, prot );
47
/* Avoid clash with libgcc's mprotect */
48
#define mprotect gclmprotect
53
#include <sys/ucontext.h>
58
/* void segmentation_catcher(void); */
61
#define sgc_mark_pack_list(u) \
62
do {register object xtmp = u; \
63
while (xtmp != Cnil) \
64
{if (ON_WRITABLE_PAGE(xtmp)) {mark(xtmp);} \
65
sgc_mark_object(xtmp->c.c_car); \
66
xtmp=Scdr(xtmp);}}while(0)
76
/* sgc_mark_cons(object x) { */
80
/* /\* x is already marked. *\/ */
84
/* if(x==sdebug) joe1(); */
86
/* sgc_mark_object(x->c.c_car); */
88
/* IF_WRITABLE(x->c.c_car, goto MARK_CAR;); */
92
/* if (!is_marked_or_free(x->c.c_car)) { */
93
/* if (consp(x->c.c_car)) { */
94
/* mark(x->c.c_car); */
95
/* sgc_mark_cons(x->c.c_car); */
97
/* sgc_mark_object1(x->c.c_car);} */
100
/* /\* if (is_imm_fixnum(x->c.c_cdr)) return; *\/ */
102
/* IF_WRITABLE(x, goto WRITABLE_CDR;); */
105
/* if (is_marked_or_free(x)) return; */
106
/* if (consp(x)) { */
110
/* sgc_mark_object1(x); */
114
sgc_mark_cons(object x) {
119
sgc_mark_object(x->c.c_car);
121
if (!IS_WRITABLE(page(x)) || is_marked_or_free(x))/*catches Cnil*/
123
} while (cdr_listp(x));
128
/* Whenever two arrays are linked together by displacement,
129
if one is live, the other will be made live */
130
#define sgc_mark_displaced_field(ar) sgc_mark_object(ar->a.a_displaced)
133
/* structures and arrays of type t, need to be marked if their
134
bodies are not write protected even if the headers are.
135
So we should keep these on pages particular to them.
136
Actually we will change structure sets to touch the structure
137
header, that way we won't have to keep the headers in memory.
138
This takes only 1.47 as opposed to 1.33 microseconds per set.
141
sgc_mark_object1(object x) {
151
if (x == OBJNULL || !ON_WRITABLE_PAGE(x))
153
IF_WRITABLE(x,goto OK);
157
if (is_marked_or_free(x))
160
if(x==sdebug) joe1();
178
sgc_mark_object(x->rat.rat_num);
180
IF_WRITABLE(x,if(!is_marked_or_free(x)) goto BEGIN);
189
sgc_mark_object(x->cmp.cmp_imag);
191
IF_WRITABLE(x,if(!is_marked_or_free(x)) goto BEGIN);
197
IF_WRITABLE(x->s.s_plist,if(!is_marked_or_free(x->s.s_plist))
198
{/* mark(x->s.s_plist); */
199
sgc_mark_cons(x->s.s_plist);});
200
sgc_mark_object(x->s.s_gfdef);
201
sgc_mark_object(x->s.s_dbind);
202
if (x->s.s_self == NULL)
205
if (inheap(x->s.s_self)) {
206
if (what_to_collect == t_contiguous)
207
mark_contblock(x->s.s_self,x->s.s_fillp);
208
} else if (SGC_RELBLOCK_P(x->s.s_self) && COLLECT_RELBLOCK_P)
209
x->s.s_self = copy_relblock(x->s.s_self, x->s.s_fillp);
213
sgc_mark_object(x->p.p_name);
214
sgc_mark_object(x->p.p_nicknames);
215
sgc_mark_object(x->p.p_shadowings);
216
sgc_mark_object(x->p.p_uselist);
217
sgc_mark_object(x->p.p_usedbylist);
218
if (what_to_collect == t_contiguous) {
219
if (x->p.p_internal != NULL)
220
mark_contblock((char *)(x->p.p_internal),
221
x->p.p_internal_size*sizeof(object));
222
if (x->p.p_external != NULL)
223
mark_contblock((char *)(x->p.p_external),
224
x->p.p_external_size*sizeof(object));
229
sgc_mark_object(x->ht.ht_rhsize);
230
sgc_mark_object(x->ht.ht_rhthresh);
231
if (x->ht.ht_self == NULL)
233
for (i = 0, j = x->ht.ht_size; i < j; i++) {
234
if (ON_WRITABLE_PAGE(&x->ht.ht_self[i])) {
235
sgc_mark_object(x->ht.ht_self[i].hte_key);
236
sgc_mark_object(x->ht.ht_self[i].hte_value);
239
if (inheap(x->ht.ht_self)) {
240
if (what_to_collect == t_contiguous)
241
mark_contblock((char *)(x->ht.ht_self),j * sizeof(struct htent));
242
} else if (SGC_RELBLOCK_P(x->ht.ht_self) && COLLECT_RELBLOCK_P)
243
x->ht.ht_self=(void *)copy_relblock((char *)x->ht.ht_self,j*sizeof(struct htent));;
247
if ((x->a.a_displaced) != Cnil)
248
sgc_mark_displaced_field(x);
249
if (x->a.a_dims != NULL) {
250
if (inheap(x->a.a_dims)) {
251
if (what_to_collect == t_contiguous)
252
mark_contblock((char *)(x->a.a_dims),sizeof(int)*x->a.a_rank);
253
} else if (SGC_RELBLOCK_P(x->a.a_dims) && COLLECT_RELBLOCK_P)
254
x->a.a_dims = (int *) copy_relblock((char *)(x->a.a_dims),sizeof(int)*x->a.a_rank);
256
if ((enum aelttype)x->a.a_elttype == aet_ch)
258
if ((enum aelttype)x->a.a_elttype == aet_bit)
260
if ((enum aelttype)x->a.a_elttype == aet_object)
264
cp = (char *)(x->fixa.fixa_self);
267
/* set j to the size in char of the body of the array */
269
switch((enum aelttype)x->a.a_elttype){
271
j= sizeof(longfloat)*x->lfa.lfa_dim;
272
if ((COLLECT_RELBLOCK_P) && !(inheap(cp)) && SGC_RELBLOCK_P(x->a.a_self))
273
ROUND_RB_POINTERS_DOUBLE;/*FIXME gc space violation*/
277
j=sizeof(char)*x->a.a_dim;
281
j=sizeof(short)*x->a.a_dim;
284
j=sizeof(fixnum)*x->fixa.fixa_dim;}
292
|| (char *)p >= core_end
298
if (x->a.a_displaced->c.c_car == Cnil)
299
for (i = 0, j = x->a.a_dim; i < j; i++)
300
if (ON_WRITABLE_PAGE(&p[i]))
301
sgc_mark_object(p[i]);
306
if (what_to_collect == t_contiguous)
307
mark_contblock(cp, j);
308
} else if (SGC_RELBLOCK_P(cp) && COLLECT_RELBLOCK_P) {
309
if (x->a.a_displaced == Cnil) {
311
if (!NULL_OR_ON_C_STACK(cp)) /* only if body of array not on C stack */
313
x->a.a_self = (object *)copy_relblock(cp, j);
314
} else if (x->a.a_displaced->c.c_car == Cnil) {
315
i = (long)(object *)copy_relblock(cp, j) - (long)(x->a.a_self);
316
adjust_displaced(x, i);
322
if ((x->v.v_displaced) != Cnil)
323
sgc_mark_displaced_field(x);
324
if ((enum aelttype)x->v.v_elttype == aet_object)
331
if (TYPE_MAP(page(x->big.big_self)) < t_contiguous)
332
printf("bad body for %x (%x)\n",x,cp);
334
#ifndef GMP_USE_MALLOC
336
cp = (char *)MP_SELF(x);
339
j = j * MP_LIMB_SIZE;
341
if (what_to_collect == t_contiguous)
342
mark_contblock(cp, j);
343
} else if (SGC_RELBLOCK_P(cp) && COLLECT_RELBLOCK_P)
344
MP_SELF(x) = (void *) copy_relblock(cp, j);
345
#endif /* not GMP_USE_MALLOC */
351
if ((x->st.st_displaced) != Cnil)
352
sgc_mark_displaced_field(x);
360
if (what_to_collect == t_contiguous)
361
mark_contblock(cp, j);
362
} else if (SGC_RELBLOCK_P(cp) && COLLECT_RELBLOCK_P) {
363
if (x->st.st_displaced == Cnil)
364
x->st.st_self = copy_relblock(cp, j);
365
else if (x->st.st_displaced->c.c_car == Cnil) {
366
i = copy_relblock(cp, j) - cp;
367
adjust_displaced(x, i);
374
if ((x->bv.bv_displaced) != Cnil)
375
sgc_mark_displaced_field(x);
376
/* We make bitvectors multiple of sizeof(int) in size allocated
377
Assume 8 = number of bits in char */
379
#define W_SIZE (8*sizeof(fixnum))
381
((BV_OFFSET(x) + x->bv.bv_dim + W_SIZE -1)/W_SIZE);
388
sgc_mark_object(x->str.str_def);
393
object def=x->str.str_def;
394
unsigned char *s_type = &SLOT_TYPE(def,0);
395
unsigned short *s_pos = &SLOT_POS (def,0);
396
for (i = 0, j = S_DATA(def)->length; i < j; i++)
397
if (s_type[i]==0 && ON_WRITABLE_PAGE(&STREF(object,x,s_pos[i])))
398
sgc_mark_object(STREF(object,x,s_pos[i]));
399
if (inheap(x->str.str_self)) {
400
if (what_to_collect == t_contiguous)
401
mark_contblock((char *)p,S_DATA(def)->size);
402
} else if (SGC_RELBLOCK_P(p) && (COLLECT_RELBLOCK_P))
403
x->str.str_self = (object *) copy_relblock((char *)p, S_DATA(def)->size);
408
switch (x->sm.sm_mode) {
414
sgc_mark_object(x->sm.sm_object0);
415
sgc_mark_object(x->sm.sm_object1);
417
FILE *fp = x->sm.sm_fp;
418
if (fp != 0 && fp != stdin && fp !=stdout) {
424
if (what_to_collect == t_contiguous &&
427
mark_contblock(x->sm.sm_buffer, BUFSIZ);
431
sgc_mark_object(x->sm.sm_object0);
435
case smm_concatenated:
436
sgc_mark_object(x->sm.sm_object0);
441
sgc_mark_object(x->sm.sm_object0);
442
sgc_mark_object(x->sm.sm_object1);
445
case smm_string_input:
446
case smm_string_output:
447
sgc_mark_object(x->sm.sm_object0);
449
#ifdef USER_DEFINED_STREAMS
450
case smm_user_defined:
451
sgc_mark_object(x->sm.sm_object0);
452
sgc_mark_object(x->sm.sm_object1);
456
error("mark stream botch");
460
#define SGC_MARK_CP(a_,b_) {fixnum _t=(b_);if (inheap((a_))) {\
461
if (what_to_collect == t_contiguous) mark_contblock((void *)(a_),_t); \
462
} else if (SGC_RELBLOCK_P((a_)) && COLLECT_RELBLOCK_P) (a_)=(void *)copy_relblock((void *)(a_),_t);}
464
#define SGC_MARK_MP(a_) {if ((a_)->_mp_d) SGC_MARK_CP((a_)->_mp_d,(a_)->_mp_alloc*MP_LIMB_SIZE);}
467
SGC_MARK_MP(x->rnd.rnd_state._mp_seed);
468
#if __GNU_MP_VERSION < 4 || (__GNU_MP_VERSION == 4 && __GNU_MP_VERSION_MINOR < 2)
469
if (x->rnd.rnd_state._mp_algdata._mp_lc) {
470
SGC_MARK_MP(x->rnd.rnd_state._mp_algdata._mp_lc->_mp_a);
471
if (!x->rnd.rnd_state._mp_algdata._mp_lc->_mp_m2exp) SGC_MARK_MP(x->rnd.rnd_state._mp_algdata._mp_lc->_mp_m);
472
SGC_MARK_CP(x->rnd.rnd_state._mp_algdata._mp_lc,sizeof(*x->rnd.rnd_state._mp_algdata._mp_lc));
478
if (x->rt.rt_self == NULL)
480
if (what_to_collect == t_contiguous)
481
mark_contblock((char *)(x->rt.rt_self),RTABSIZE*sizeof(struct rtent));
482
for (i = 0; i < RTABSIZE; i++) {
483
sgc_mark_object(x->rt.rt_self[i].rte_macro);
484
if (x->rt.rt_self[i].rte_dtab != NULL) {
485
if (what_to_collect == t_contiguous)
486
mark_contblock((char *)(x->rt.rt_self[i].rte_dtab),RTABSIZE*sizeof(object));
487
for (j = 0; j < RTABSIZE; j++)
488
sgc_mark_object(x->rt.rt_self[i].rte_dtab[j]);
494
sgc_mark_object(x->pn.pn_host);
495
sgc_mark_object(x->pn.pn_device);
496
sgc_mark_object(x->pn.pn_directory);
497
sgc_mark_object(x->pn.pn_name);
498
sgc_mark_object(x->pn.pn_type);
499
sgc_mark_object(x->pn.pn_version);
505
if (what_to_collect == t_contiguous)
506
mark_contblock(x->cc.cc_turbo,x->cc.cc_envdim);
507
for (i= 0 ; i < x->cc.cc_envdim ; i++)
508
sgc_mark_object(x->cc.cc_turbo[i]);
516
sgc_mark_object(x->cf.cf_name);
517
sgc_mark_object(x->cf.cf_data);
522
if (x->cfd.cfd_self != NULL) {
523
int i=x->cfd.cfd_fillp;
525
sgc_mark_object(x->cfd.cfd_self[i]);
527
if (what_to_collect == t_contiguous) {
528
mark_contblock(x->cfd.cfd_start, x->cfd.cfd_size);
529
mark_link_array(x->cfd.cfd_start,x->cfd.cfd_start+x->cfd.cfd_size);
533
sgc_mark_object(x->cc.cc_name);
534
sgc_mark_object(x->cc.cc_env);
535
sgc_mark_object(x->cc.cc_data);
536
if (x->cc.cc_turbo!=NULL) sgc_mark_object(*(x->cc.cc_turbo-1));
537
if (what_to_collect == t_contiguous) {
538
if (x->cc.cc_turbo != NULL)
539
mark_contblock((char *)(x->cc.cc_turbo-1),(1+fix(*(x->cc.cc_turbo-1)))*sizeof(object));
549
printf("\ttype = %d\n", type_of(x));
557
sgc_mark_phase(void) {
560
STATIC struct package *pp;
562
STATIC frame_ptr frp;
564
STATIC struct pageinfo *v;
566
sgc_mark_object(Cnil->s.s_plist);
567
sgc_mark_object(Ct->s.s_plist);
569
/* mark all non recent data on writable pages */
571
long t,i=page(heap_end);
572
struct typemanager *tm;
575
for (v=cell_list_head;v;v=v->next) {
577
if (!WRITABLE_PAGE_P(i)) continue;
582
for (j = tm->tm_nppage; --j >= 0; p += tm->tm_size) {
583
object x = (object) p;
584
if (SGC_OR_M(x)) continue;
590
/* mark all non recent data on writable contiguous pages */
591
if (what_to_collect == t_contiguous)
592
for (v=contblock_list_head;v;v=v->next)
593
if (v->sgc_flags&SGC_PAGE_FLAG) {
594
void *s=CB_DATA_START(v),*e=CB_DATA_END(v),*p,*q;
595
bool z=get_sgc_bit(v,s);
599
set_mark_bits(v,p,q);
605
mark_stack_carefully(vs_top-1,vs_org,0);
606
mark_stack_carefully(MVloc+(sizeof(MVloc)/sizeof(object)),MVloc,0);
608
for (bdp = bds_org; bdp<=bds_top; bdp++) {
609
sgc_mark_object(bdp->bds_sym);
610
sgc_mark_object(bdp->bds_val);
613
for (frp = frs_org; frp <= frs_top; frp++)
614
sgc_mark_object(frp->frs_val);
616
for (ihsp = ihs_org; ihsp <= ihs_top; ihsp++)
617
sgc_mark_object(ihsp->ihs_function);
619
for (i = 0; i < mark_origin_max; i++)
620
sgc_mark_object(*mark_origin[i]);
621
for (i = 0; i < mark_origin_block_max; i++)
622
for (j = 0; j < mark_origin_block[i].mob_size; j++)
623
sgc_mark_object(mark_origin_block[i].mob_addr[j]);
625
for (pp = pack_pointer; pp != NULL; pp = pp->p_link)
626
sgc_mark_object((object)pp);
628
if (ovm_process_created)
629
sgc_mark_all_stacks();
634
printf("symbol navigation\n");
641
for (pp = pack_pointer; pp != NULL; pp = pp->p_link) {
642
size = pp->p_internal_size;
643
if (pp->p_internal != NULL)
644
for (i = 0; i < size; i++)
645
sgc_mark_pack_list(pp->p_internal[i]);
646
size = pp->p_external_size;
647
if (pp->p_external != NULL)
648
for (i = 0; i < size; i++)
649
sgc_mark_pack_list(pp->p_external[i]);
653
mark_c_stack(0,N_RECURSION_REQD,mark_stack_carefully);
658
sgc_sweep_phase(void) {
662
STATIC struct typemanager *tm;
665
STATIC struct pageinfo *v;
667
for (v=cell_list_head;v;v=v->next) {
669
tm = tm_of((enum type)v->type);
671
if (!WRITABLE_PAGE_P(page(v)))
674
p = pagetochar(page(v));
679
if (v->sgc_flags&SGC_PAGE_FLAG) {
681
for (j = tm->tm_nppage; --j >= 0; p += size) {
687
else if (is_marked(x)) {
692
if (TYPEWORD_TYPE_P(pageinfo(x)->type) && x->d.s == SGC_NORMAL)
695
/* it is ok to free x */
699
if (TYPEWORD_TYPE_P(v->type)) x->d.s = SGC_RECENT;
708
} else /*non sgc_page */
709
for (j = tm->tm_nppage; --j >= 0; p += size) {
711
if (is_marked(x) && !is_free(x)) {
721
sgc_contblock_sweep_phase(void) {
723
STATIC char *s, *e, *p, *q;
724
STATIC struct pageinfo *v;
728
for (v=contblock_list_head;v;v=v->next) {
731
if (!(v->sgc_flags&SGC_PAGE_FLAG)) continue;
738
q=get_mark_bits(v,p);
740
insert_contblock(p,q-p);
745
bzero(CB_MARK_START(v),CB_SGCF_START(v)-CB_MARK_START(v));
755
#define PAGE_ROUND_UP(adr) \
756
((char *)(PAGESIZE*(((long)(adr)+PAGESIZE -1) >> PAGEWIDTH)))
758
/* char *old_rb_start; */
763
sgc_count(object yy) {
769
printf("[length %x = %d]",yy,count);
775
fixnum writable_pages=0;
777
/* count writable pages excluding the hole */
779
sgc_count_writable(void) {
781
return page(core_end)-page(rb_start)+writable_pages-(page(old_rb_start)-page(heap_end));
787
sgc_count_type(int t) {
789
if (t==t_relocatable)
790
return page(rb_limit)-page(rb_start);
792
return tm_of(t)->tm_npage-tm_of(t)->tm_alt_npage;
796
#ifdef SGC_CONT_DEBUG
799
pcb(struct contblock *p) {
800
for (;p;p=p->cb_link)
801
printf("%p %d\n",p,p->cb_size);
805
overlap_check(struct contblock *t1,struct contblock *t2) {
809
for (;t1;t1=t1->cb_link) {
812
fprintf(stderr,"%p not in heap\n",t1);
816
for (p=t2;p;p=p->cb_link) {
819
fprintf(stderr,"%p not in heap\n",t1);
823
if ((p<=t1 && (void *)p+p->cb_size>(void *)t1) ||
824
(t1<=p && (void *)t1+t1->cb_size>(void *)p)) {
825
fprintf(stderr,"Overlap %u %p %u %p\n",t1->cb_size,t1,p->cb_size,p);
830
fprintf(stderr,"circle detected at %p\n",p);
836
if (t1==t1->cb_link) {
837
fprintf(stderr,"circle detected at %p\n",t1);
846
tcc(struct contblock *t) {
848
for (;t;t=t->cb_link) {
851
fprintf(stderr,"%p not in heap\n",t);
855
fprintf(stderr,"%u at %p\n",t->cb_size,t);
858
fprintf(stderr,"circle detected at %p\n",t);
868
typedef enum {memprotect_none,memprotect_cannot_protect,memprotect_sigaction,
869
memprotect_bad_return,memprotect_no_signal,
870
memprotect_multiple_invocations,memprotect_no_restart,
871
memprotect_bad_fault_address,memprotect_success} memprotect_enum;
872
static volatile memprotect_enum memprotect_result;
873
static int memprotect_handler_invocations,memprotect_print_enable;
874
static void *memprotect_test_address;
876
#define MEM_ERR_CASE(a_) \
878
fprintf(stderr,"The SGC segfault recovery test failed with %s, SGC disabled\n",#a_); \
882
memprotect_print(void) {
884
if (!memprotect_print_enable)
887
switch(memprotect_result) {
888
case memprotect_none: case memprotect_success:
891
MEM_ERR_CASE(memprotect_cannot_protect);
892
MEM_ERR_CASE(memprotect_sigaction);
893
MEM_ERR_CASE(memprotect_bad_return);
894
MEM_ERR_CASE(memprotect_no_signal);
895
MEM_ERR_CASE(memprotect_no_restart);
896
MEM_ERR_CASE(memprotect_bad_fault_address);
897
MEM_ERR_CASE(memprotect_multiple_invocations);
905
memprotect_handler_test(int sig, long code, void *scp, char *addr) {
908
faddr=GET_FAULT_ADDR(sig,code,scp,addr);
910
if (memprotect_handler_invocations) {
911
memprotect_result=memprotect_multiple_invocations;
914
memprotect_handler_invocations=1;
915
if (faddr!=memprotect_test_address)
916
memprotect_result=memprotect_bad_fault_address;
918
memprotect_result=memprotect_none;
919
mprotect(memprotect_test_address,PAGESIZE,PROT_READ_WRITE_EXEC);
924
memprotect_test(void) {
927
unsigned long p=PAGESIZE;
928
struct sigaction sa,sao,saob;
930
if (memprotect_result!=memprotect_none)
931
return memprotect_result!=memprotect_success;
932
if (atexit(memprotect_print)) {
933
fprintf(stderr,"Cannot setup memprotect_print on exit\n");
937
if (!(b1=alloca(2*p))) {
938
memprotect_result=memprotect_cannot_protect;
942
if (!(b2=alloca(p))) {
943
memprotect_result=memprotect_cannot_protect;
949
memprotect_test_address=(void *)(((unsigned long)b1+p-1) & ~(p-1));
950
sa.sa_sigaction=(void *)memprotect_handler_test;
951
sa.sa_flags=MPROTECT_ACTION_FLAGS;
952
if (sigaction(SIGSEGV,&sa,&sao)) {
953
memprotect_result=memprotect_sigaction;
956
if (sigaction(SIGBUS,&sa,&saob)) {
957
sigaction(SIGSEGV,&sao,NULL);
958
memprotect_result=memprotect_sigaction;
961
{ /* mips kernel bug test -- SIGBUS with no faddr when floating point is emulated. */
962
float *f1=(void *)memprotect_test_address,*f2=(void *)b2,*f1e=f1+p/sizeof(*f1);
964
if (mprotect(memprotect_test_address,p,PROT_READ_EXEC)) {
965
memprotect_result=memprotect_cannot_protect;
968
memprotect_result=memprotect_bad_return;
969
for (;f1<f1e;) *f1++=*f2;
970
if (memprotect_result==memprotect_bad_return)
971
memprotect_result=memprotect_no_signal;
972
if (memprotect_result!=memprotect_none) {
973
sigaction(SIGSEGV,&sao,NULL);
974
sigaction(SIGBUS,&saob,NULL);
977
memprotect_handler_invocations=0;
980
if (mprotect(memprotect_test_address,p,PROT_READ_EXEC)) {
981
memprotect_result=memprotect_cannot_protect;
984
memprotect_result=memprotect_bad_return;
985
memset(memprotect_test_address,0,p);
986
if (memprotect_result==memprotect_bad_return)
987
memprotect_result=memprotect_no_signal;
988
if (memprotect_result!=memprotect_none) {
989
sigaction(SIGSEGV,&sao,NULL);
990
sigaction(SIGBUS,&saob,NULL);
993
if (memcmp(memprotect_test_address,b2,p)) {
994
memprotect_result=memprotect_no_restart;
995
sigaction(SIGSEGV,&sao,NULL);
996
sigaction(SIGBUS,&saob,NULL);
999
memprotect_result=memprotect_success;
1000
sigaction(SIGSEGV,&sao,NULL);
1001
sigaction(SIGBUS,&saob,NULL);
1007
do_memprotect_test(void) {
1011
memprotect_print_enable=1;
1012
if (memprotect_test()) {
1018
memprotect_print_enable=0;
1024
memprotect_test_reset(void) {
1026
memprotect_result=memprotect_none;
1027
memprotect_handler_invocations=0;
1028
memprotect_test_address=NULL;
1031
do_memprotect_test();
1035
#define MMIN(a,b) ({long _a=a,_b=b;_a<_b ? _a : _b;})
1036
#define MMAX(a,b) ({long _a=a,_b=b;_a>_b ? _a : _b;})
1037
/* If opt_maxpage is set, don't lose balancing information gained thus
1038
far if we are triggered 'artificially' via a hole overrun. FIXME --
1039
try to allocate a small working set with the right proportions
1040
later on. 20040804 CM*/
1041
#define WSGC(tm) ({struct typemanager *_tm=tm;long _t=MMAX(MMIN(_tm->tm_opt_maxpage,_tm->tm_npage),_tm->tm_sgc);_t*scale;})
1042
/* If opt_maxpage is set, add full pages to the sgc set if needed
1044
/* #define FSGC(tm) (tm->tm_type==t_cons ? tm->tm_nppage : (tm->tm_opt_maxpage ? 0 : tm->tm_sgc_minfree)) */
1045
#define FSGC(tm) (!TYPEWORD_TYPE_P(tm->tm_type) ? tm->tm_nppage : tm->tm_sgc_minfree)
1047
DEFVAR("*WRITABLE*",sSAwritableA,SI,Cnil,"");
1049
unsigned char *wrimap=NULL;
1054
long i,count,minfree,allocate_more_pages=!saving_system && 10*available_pages>2*(real_maxpage-first_data_page);
1056
struct typemanager *tm;
1058
object omp=sSAoptimize_maximum_pagesA->s.s_dbind;
1061
sSAoptimize_maximum_pagesA->s.s_dbind=Cnil;
1063
if (memprotect_result!=memprotect_success && do_memprotect_test())
1069
/* Reset maxpage statistics if not invoked automatically on a hole
1070
overrun. 20040804 CM*/
1071
/* if (!hole_overrun) { */
1073
/* object *old_vs_base=vs_base; */
1074
/* vs_base=vs_top; */
1075
/* FFN(siLreset_gbc_count)(); */
1076
/* vs_base=old_vs_base; */
1080
for (i=t_start,scale=1.0,tmp=0.0;i<t_other;i++)
1081
if (TM_BASE_TYPE_P(i))
1082
tmp+=WSGC(tm_of(i));
1083
tmp+=WSGC(tm_of(t_relocatable));
1084
scale=tmp>available_pages/10 ? (float)available_pages/(10*tmp) : 1.0;
1086
for (i= t_start; i < t_contiguous ; i++) {
1088
if (!TM_BASE_TYPE_P(i) || !(np=(tm=tm_of(i))->tm_sgc)) continue;
1090
minfree = FSGC(tm) > 0 ? FSGC(tm) : 1;
1095
for (v=cell_list_head;v && (count<MMAX(tm->tm_sgc_max,WSGC(tm)));v=v->next) {
1097
if (v->type!=i || tm->tm_nppage-v->in_use<minfree) continue;
1099
v->sgc_flags|=SGC_PAGE_FLAG;
1104
if (count<WSGC(tm) && !FSGC(tm))
1105
for (v=cell_list_head;v && (count<MMAX(tm->tm_sgc_max,WSGC(tm)));v=v->next) {
1107
if (v->type!=i || tm->tm_nppage!=v->in_use) continue;
1109
v->sgc_flags|=SGC_PAGE_FLAG;
1111
if (count >= MMAX(tm->tm_sgc_max,WSGC(tm)))
1115
/* don't do any more allocations for this type if saving system */
1116
if (!allocate_more_pages)
1119
if (count < WSGC(tm)) {
1120
/* try to get some more free pages of type i */
1121
long n = WSGC(tm) - count;
1122
long again=0,nfree = tm->tm_nfree;
1123
char *p=alloc_page(n);
1124
if (tm->tm_nfree > nfree) again=1; /* gc freed some objects */
1125
if (tm->tm_npage+n>tm->tm_maxpage)
1126
if (!set_tm_maxpage(tm,tm->tm_npage+n))
1129
/* (sgc_enabled=1,add_page_to_freelist(p,tm),sgc_enabled=0); */
1130
add_page_to_freelist(p,tm);
1134
goto FIND_FREE_PAGES;
1140
/* SGC cont pages: Here we implement the contblock page division into
1141
SGC and non-SGC types. Unlike the other types, we need *whole*
1142
free pages for contblock SGC, as there is no persistent data
1143
element (e.g. .m) on an allocated block itself which can indicate
1144
its live status. If anything on a page which is to be marked
1145
read-only points to a live object on an SGC cont page, it will
1146
never be marked and will be erroneously swept. It is also possible
1147
for dead objects to unnecessarily mark dead regions on SGC pages
1148
and delay sweeping until the pointing type is GC'ed if SGC is
1149
turned off for the pointing type, e.g. tm_sgc=0. (This was so by
1150
default for a number of types, including bignums, and has now been
1151
corrected in gcl_init_alloc in alloc.c.) We can't get around this
1152
AFAICT, as old data on (writable) SGC pages must be marked lest it
1153
is lost, and (old) data on now writable non-SGC pages might point
1154
to live regions on SGC pages, yet might not themselves be reachable
1155
from the mark origin through an unbroken chain of writable pages.
1156
In any case, the possibility of a lot of garbage marks on contblock
1157
pages, especially when the blocks are small as in bignums, makes
1158
necessary the sweeping of minimal contblocks to prevent leaks. CM
1164
struct pageinfo *pi;
1166
struct contblock **cbpp;
1168
tm=tm_of(t_contiguous);
1170
for (pi=contblock_list_head;pi && count<WSGC(tm);pi=pi->next) {
1172
p=CB_DATA_START(pi);
1175
for (cbpp=&cb_pointer,j=0;*cbpp;cbpp=&(*cbpp)->cb_link)
1176
if ((void*)*cbpp>=p && (void *)*cbpp<pe)
1177
j+=(*cbpp)->cb_size;
1179
if (j*tm->tm_nppage<FSGC(tm)*(CB_DATA_END(pi)-CB_DATA_START(pi))) continue;
1181
pi->sgc_flags=SGC_PAGE_FLAG;
1185
i=allocate_more_pages ? WSGC(tm) : (saving_system ? 1 : 0);
1188
/* SGC cont pages: allocate more if necessary, dumping possible
1189
GBC freed pages onto the old contblock list. CM 20030827*/
1190
unsigned long z=(i-count)+1;
1191
void *old_contblock_list_tail=contblock_list_tail;
1193
if (maxcbpage<ncbpage+z)
1194
if (!set_tm_maxpage(tm_table+t_contiguous,ncbpage+z))
1197
add_pages(tm_table+t_contiguous,z);
1199
massert(old_contblock_list_tail!=contblock_list_tail);
1201
contblock_list_tail->sgc_flags=SGC_PAGE_FLAG;
1207
/* Now allocate the sgc relblock. We do this as the tail
1208
end of the ordinary rb. */
1211
tm=tm_of(t_relocatable);
1214
old_rb_start=rb_start;
1215
if(((unsigned long)WSGC(tm)) && allocate_more_pages) {
1216
new=alloc_relblock(((unsigned long)WSGC(tm))*PAGESIZE);
1217
/* the above may cause a gc, shifting the relblock */
1218
old_rb_start=rb_start;
1219
new= PAGE_ROUND_UP(new);
1220
} else new=PAGE_ROUND_UP(rb_pointer);
1221
rb_start=rb_pointer=new;
1224
/* the relblock has been allocated */
1226
sSAwritableA->s.s_dbind=fSmake_vector1_1((page(rb_start)-first_data_page),aet_bit,Cnil);
1227
wrimap=(void *)sSAwritableA->s.s_dbind->v.v_self;
1229
/* now move the sgc free lists into place. alt_free should
1230
contain the others */
1231
for (i= t_start; i < t_contiguous ; i++)
1232
if (TM_BASE_TYPE_P(i) && (np=(tm=tm_of(i))->tm_sgc)) {
1233
object f=tm->tm_free ,x,y,next;
1237
while (f!=OBJNULL) {
1241
printf("Not FREE in freelist f=%d",f);
1243
if (pageinfo(f)->sgc_flags&SGC_PAGE_FLAG) {
1245
if (TYPEWORD_TYPE_P(pageinfo(f)->type)) f->d.s = SGC_RECENT;
1250
if (TYPEWORD_TYPE_P(pageinfo(f)->type)) f->d.s = SGC_NORMAL;
1256
tm->tm_alt_free = y;
1257
tm->tm_alt_nfree = tm->tm_nfree - count;
1263
struct pageinfo *pi;
1267
struct contblock *new_cb_pointer=NULL,*tmp_cb_pointer=NULL,**cbpp;
1269
struct pageinfo *pi;
1271
for (pi=contblock_list_head;pi;pi=pi->next) {
1273
if (pi->sgc_flags!=SGC_PAGE_FLAG) continue;
1275
p=CB_DATA_START(pi);
1276
pe=p+CB_DATA_SIZE(pi->in_use);
1278
for (cbpp=&cb_pointer;*cbpp;)
1279
if ((void *)*cbpp>=p && (void *)*cbpp<pe) {
1280
void *s=*cbpp,*e=s+(*cbpp)->cb_size,*l=(*cbpp)->cb_link;
1281
set_sgc_bits(pi,s,e);
1282
tmp_cb_pointer=cb_pointer;
1283
cb_pointer=new_cb_pointer;
1284
insert_contblock(s,e-s);
1285
new_cb_pointer=cb_pointer;
1286
cb_pointer=tmp_cb_pointer;
1289
cbpp=&(*cbpp)->cb_link;
1293
/* SGC contblock pages: switch to new free SGC contblock list. CM
1295
old_cb_pointer=cb_pointer;
1296
cb_pointer=new_cb_pointer;
1298
#ifdef SGC_CONT_DEBUG
1299
overlap_check(old_cb_pointer,cb_pointer);
1303
for (i=t_start;i<t_other;i++)
1304
tm_of(i)->tm_alt_npage=0;
1307
for (pi=cell_list_head;pi;pi=pi->next) {
1308
if (pi->sgc_flags&SGC_WRITABLE)
1309
SET_WRITABLE(page(pi));
1311
tm_of(pi->type)->tm_alt_npage++;
1312
#ifndef NO_SETBUF /*FIXME, implement restartable getc with read in readc_stream*/
1315
if (pi->type!=(tm=tm_of(t_stream))->tm_type) continue;
1316
for (v=pagetochar(page(pi)),ve=v+tm->tm_nppage*tm->tm_size;v<ve;v+=tm->tm_size) {
1318
if (type_of(x)!=t_stream || is_free(x)) continue;
1319
if (x->sm.sm_buffer)
1320
for (i=page(x->sm.sm_buffer);i<=page(x->sm.sm_buffer+BUFSIZ-1);i++)
1326
for (pi=contblock_list_head;pi;pi=pi->next)/*FIXME*/
1327
if (pi->sgc_flags&SGC_WRITABLE)
1328
for (i=0;i<pi->in_use;i++)
1329
SET_WRITABLE(page(pi)+i);
1331
tm_of(t_contiguous)->tm_alt_npage+=pi->in_use;
1334
extern object gprof_array;
1335
if (gprof_array!=Cnil)
1336
for (i=0;i<(gprof_array->st.st_fillp +PAGESIZE-1)/PAGESIZE;i++)
1337
SET_WRITABLE(page(gprof_array->st.st_self)+i);
1340
for (i=page(heap_end);i<page(old_rb_start);i++)
1342
tm_of(t_relocatable)->tm_alt_npage=page(rb_start)-page(old_rb_start);
1343
for (i=page(rb_start);i<page(core_end);i++)
1350
/* Whew. We have now allocated the sgc space
1351
and modified the tm_table;
1352
Turn memory protection on for the pages which are writable.
1356
if (sSAnotify_gbcA->s.s_dbind != Cnil) {
1361
sSAoptimize_maximum_pagesA->s.s_dbind=omp;
1368
/* pdebug(void) { */
1370
/* extern object malloc_list; */
1371
/* object x=malloc_list; */
1372
/* struct pageinfo *v; */
1373
/* for (;x!=Cnil;x=x->c.c_cdr) */
1374
/* printf("%p %d\n",x->c.c_car->st.st_self,x->c.c_car->st.st_dim); */
1376
/* for (v=contblock_list_head;v;v=v->next) */
1377
/* printf("%p %ld\n",v,v->in_use<<12); */
1385
struct typemanager *tm;
1386
struct contblock *tmp_cb_pointer,*next;
1387
unsigned long i,j,np;
1393
if(sSAnotify_gbcA->s.s_dbind != Cnil)
1394
printf("[SGC off]"); fflush(stdout);
1399
sSAwritableA->s.s_dbind=Cnil;
1403
rb_start = old_rb_start;
1405
/* SGC cont pages: restore contblocks, each tmp_cb_pointer coming
1406
from the new list is guaranteed not to be on the old. Need to
1407
grab 'next' before insert_contblock writes is. CM 20030827 */
1409
if (old_cb_pointer) {
1410
#ifdef SGC_CONT_DEBUG
1411
overlap_check(old_cb_pointer,cb_pointer);
1413
tmp_cb_pointer=cb_pointer;
1414
cb_pointer=old_cb_pointer;
1415
for (;tmp_cb_pointer; tmp_cb_pointer=next) {
1416
next=tmp_cb_pointer->cb_link;
1417
insert_contblock((void *)tmp_cb_pointer,tmp_cb_pointer->cb_size);
1421
for (i= t_start; i < t_contiguous ; i++)
1423
if (TM_BASE_TYPE_P(i) && (np=(tm=tm_of(i))->tm_sgc)) {
1429
tm->tm_free=tm->tm_alt_free;
1431
/* tack the alt_free onto the end of free */
1435
while(y= (object) F_LINK(f)) {
1436
if(y->d.s != SGC_RECENT)
1437
printf("[bad %d]",y);
1442
if (f==tm->tm_alt_free)
1443
while(y= F_LINK(f)) {
1444
if(y->d.s != SGC_NORMAL)
1445
printf("[alt_bad %d]",y);
1451
while((y= (object) F_LINK(f))!=OBJNULL)
1453
F_LINK(f)= (long)(tm->tm_alt_free);
1455
/* tm->tm_free has all of the free objects */
1456
tm->tm_nfree += tm->tm_alt_nfree;
1457
tm->tm_alt_nfree = 0;
1458
tm->tm_alt_free = OBJNULL;
1463
/* remove the recent flag from any objects on sgc pages */
1464
for (v=cell_list_head;v;v=v->next)
1465
if (v->type==(tm=tm_of(v->type))->tm_type && TYPEWORD_TYPE_P(v->type) && v->sgc_flags & SGC_PAGE_FLAG)
1466
for (p=pagetochar(page(v)),j=tm->tm_nppage;j>0;--j,p+=tm->tm_size)
1467
((object) p)->d.s=SGC_NORMAL;
1469
for (v=contblock_list_head;v;v=v->next)
1470
if (v->sgc_flags&SGC_PAGE_FLAG)
1471
bzero(CB_SGCF_START(v),CB_DATA_START(v)-CB_SGCF_START(v));
1474
struct pageinfo *pi;
1475
for (pi=cell_list_head;pi;pi=pi->next)
1476
pi->sgc_flags&=SGC_PERM_WRITABLE;
1477
for (pi=contblock_list_head;pi;pi=pi->next)
1478
pi->sgc_flags&=SGC_PERM_WRITABLE;
1485
fixnum debug_fault =0;
1486
fixnum fault_count =0;
1489
memprotect_handler(int sig, long code, void *scp, char *addr) {
1492
void *faddr; /* Needed because we must not modify signal handler
1493
arguments on the stack! */
1494
#ifdef GET_FAULT_ADDR
1495
faddr=GET_FAULT_ADDR(sig,code,scp,addr);
1496
debug_fault = (long) faddr;
1497
#ifdef DEBUG_MPROTECT
1498
printf("fault:0x%x [%d] (%d) ",faddr,page(faddr),faddr >= core_end);
1500
if (faddr >= (void *)core_end || faddr < data_start) {
1501
if (fault_count > 300) error("fault count too high");
1503
INSTALL_MPROTECT_HANDLER;
1510
/* p = ROUND_DOWN_PAGE_NO(p); */
1511
if (p >= first_protectable_page
1512
&& faddr < (void *)core_end
1513
&& !(WRITABLE_PAGE_P(p))) {
1514
/* CHECK_RANGE(p,1); */
1515
#ifdef DEBUG_MPROTECT
1516
printf("mprotect(0x%x,0x%x,0x%x)\n",
1517
pagetoinfo(p),PAGESIZE, sbrk(0));
1522
INSTALL_MPROTECT_HANDLER;
1525
mprotect(pagetoinfo(p),PAGESIZE,PROT_READ_WRITE_EXEC);
1534
INSTALL_MPROTECT_HANDLER;
1537
segmentation_catcher(0);
1542
sgc_mprotect(long pbeg, long n, int writable) {
1543
/* CHECK_RANGE(pbeg,n); */
1544
#ifdef DEBUG_MPROTECT
1545
printf("prot[%d,%d,(%d),%s]\n",pbeg,pbeg+n,writable & SGC_WRITABLE,
1546
(writable & SGC_WRITABLE ? "writable" : "not writable"));
1547
printf("mprotect(0x%x,0x%x), sbrk(0)=0x%x\n",
1548
pagetoinfo(pbeg), n * PAGESIZE, sbrk(0));
1551
if(mprotect(pagetoinfo(pbeg),n*PAGESIZE,
1552
(writable & SGC_WRITABLE ? PROT_READ_WRITE_EXEC : PROT_READ_EXEC)))
1553
FEerror("Couldn't protect",0);
1559
memory_protect(int on) {
1561
unsigned long i,beg,end= page(core_end);
1563
extern void install_segmentation_catcher(void);
1566
first_protectable_page=first_data_page;
1568
/* turning it off */
1570
sgc_mprotect(first_protectable_page,end-first_protectable_page,SGC_WRITABLE);
1571
install_segmentation_catcher();
1575
INSTALL_MPROTECT_HANDLER;
1577
beg=first_protectable_page;
1578
writable = IS_WRITABLE(beg);
1579
for (i=beg ; ++i<= end; ) {
1581
if (writable==IS_WRITABLE(i) && i<=end) continue;
1583
sgc_mprotect(beg,i-beg,writable);
1584
writable=1-writable;
1592
FFN(siLsgc_on)(void) {
1594
if (vs_base==vs_top) {
1595
vs_base[0]=(sgc_enabled ? Ct :Cnil);
1596
vs_top=vs_base+1; return;
1599
if(vs_base[0]==Cnil)
1602
vs_base[0]=sgc_start() ? Ct : Cnil;
1606
system_error(void) {
1607
FEerror("System error",0);