3
Copyright (C) 1986, 1987, 1988, 1989, 1990, 1991, 1992, 1993, 1994,
4
1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005,
5
2006, 2007, 2008, 2009, 2010 Massachusetts Institute of Technology
7
This file is part of MIT/GNU Scheme.
9
MIT/GNU Scheme is free software; you can redistribute it and/or modify
10
it under the terms of the GNU General Public License as published by
11
the Free Software Foundation; either version 2 of the License, or (at
12
your option) any later version.
14
MIT/GNU Scheme is distributed in the hope that it will be useful, but
15
WITHOUT ANY WARRANTY; without even the implied warranty of
16
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17
General Public License for more details.
19
You should have received a copy of the GNU General Public License
20
along with MIT/GNU Scheme; if not, write to the Free Software
21
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301,
26
/* Compiled code interface for AMD x86-64. */
33
extern void * tospace_to_newspace (void *);
36
read_cc_entry_type (cc_entry_type_t * cet, insn_t * address)
38
return (decode_old_style_format_word (cet, (((uint16_t *) address) [-2])));
42
write_cc_entry_type (cc_entry_type_t * cet, insn_t * address)
44
return (encode_old_style_format_word (cet, ((uint16_t *) address) - 2));
48
read_cc_entry_offset (cc_entry_offset_t * ceo, insn_t * address)
50
uint16_t n = (((uint16_t *) address) [-1]);
51
(ceo->offset) = (n >> 1);
52
(ceo->continued_p) = ((n & 1) != 0);
57
write_cc_entry_offset (cc_entry_offset_t * ceo, insn_t * address)
59
if (! ((ceo->offset) < 0x4000))
61
(((uint16_t *) address) [-1])
62
= (((ceo->offset) << 1) | ((ceo->continued_p) ? 1 : 0));
66
/* Compiled closures */
68
/* MOV RAX,imm64 has two bytes of opcode cruft before the imm64. */
71
read_compiled_closure_target (insn_t * start)
73
return (* ((insn_t **) (start + CC_ENTRY_HEADER_SIZE + 2)));
77
write_compiled_closure_target (insn_t * target, insn_t * start)
79
(* ((insn_t **) (start + CC_ENTRY_HEADER_SIZE + 2))) = target;
83
compiled_closure_count (SCHEME_OBJECT * block)
85
/* `block' is a pointer to the first object after the manifest. The
86
first object following it is the entry count. */
87
return ((unsigned long) (* ((uint32_t *) block)));
91
compiled_closure_start (SCHEME_OBJECT * block)
93
/* Skip the 32-bit entry count. */
94
return (((insn_t *) block) + 4);
98
compiled_closure_entry (insn_t * start)
100
return (start + CC_ENTRY_HEADER_SIZE);
104
compiled_closure_next (insn_t * start)
106
return (start + CC_ENTRY_HEADER_SIZE + 12);
110
skip_compiled_closure_padding (insn_t * start)
112
/* The padding is the same size as the entry header (format word). */
113
return ((SCHEME_OBJECT *) (start + CC_ENTRY_HEADER_SIZE));
117
compiled_closure_entry_to_target (insn_t * entry)
119
/* `entry' points to the start of the MOV RAX,imm64 instruction,
120
which has two bytes of opcode cruft before the imm64. */
121
return (MAKE_CC_ENTRY (* ((long *) (entry + 2))));
124
/* Execution caches (UUO links)
126
An execution cache is a region of memory that lives in the
127
constants section of a compiled-code block. It is an indirection
128
for calling external procedures that allows the linker to control
129
the calling process without having to find and change all the
130
places in the compiled code that refer to it.
132
Prior to linking, the execution cache has two pieces of
133
information: (1) the name of the procedure being called (a symbol),
134
and (2) the number of arguments that will be passed to the
135
procedure. `saddr' points to the arity at the beginning of the
139
read_uuo_symbol (SCHEME_OBJECT * saddr)
145
read_uuo_frame_size (SCHEME_OBJECT * saddr)
147
return (* ((uint16_t *) saddr));
151
read_uuo_target (SCHEME_OBJECT * saddr)
153
insn_t * mov_addr = ((insn_t *) (saddr + 1));
154
return (* ((insn_t **) (mov_addr + 2)));
158
read_uuo_target_no_reloc (SCHEME_OBJECT * saddr)
160
return (read_uuo_target (saddr));
164
write_uuo_target (insn_t * target, SCHEME_OBJECT * saddr)
166
/* Skip the arity. */
167
insn_t * addr = ((insn_t *) (saddr + 1));
168
(*addr++) = 0x48; /* REX.W (64-bit operand size prefix) */
169
(*addr++) = 0xB8; /* MOV RAX,imm64 */
170
(* ((insn_t **) addr)) = target;
172
(*addr++) = 0xFF; /* JMP reg/mem64 */
173
(*addr++) = 0xE0; /* ModR/M for RAX */
176
#define BYTES_PER_TRAMPOLINE_ENTRY_PADDING 4
177
#define OBJECTS_PER_TRAMPOLINE_ENTRY 2
179
#define RSI_TRAMPOLINE_TO_INTERFACE_OFFSET \
180
((COMPILER_REGBLOCK_N_FIXED + (2 * COMPILER_HOOK_SIZE)) \
181
* SIZEOF_SCHEME_OBJECT)
184
trampoline_entry_size (unsigned long n_entries)
186
return (n_entries * OBJECTS_PER_TRAMPOLINE_ENTRY);
190
trampoline_entry_addr (SCHEME_OBJECT * block, unsigned long index)
192
return (((insn_t *) (block + 2 + (index * OBJECTS_PER_TRAMPOLINE_ENTRY)))
193
+ BYTES_PER_TRAMPOLINE_ENTRY_PADDING + CC_ENTRY_HEADER_SIZE);
197
store_trampoline_insns (insn_t * entry, byte_t code)
199
(*entry++) = 0xB0; /* MOV AL,code */
201
(*entry++) = 0xFF; /* CALL /2 disp32(RSI) */
203
(* ((uint32_t *) entry)) = RSI_TRAMPOLINE_TO_INTERFACE_OFFSET;
209
# define VM_PROT_SCHEME (VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE)
212
#define SETUP_REGISTER(hook) do \
214
(* ((unsigned long *) (rsi_value + offset))) \
215
= ((unsigned long) (hook)); \
216
offset += (COMPILER_HOOK_SIZE * (sizeof (SCHEME_OBJECT))); \
217
declare_builtin (((unsigned long) hook), #hook); \
221
x86_64_reset_hook (void)
223
int offset = (COMPILER_REGBLOCK_N_FIXED * (sizeof (SCHEME_OBJECT)));
224
unsigned char * rsi_value = ((unsigned char *) Registers);
226
/* These must match machines/x86-64/lapgen.scm */
228
SETUP_REGISTER (asm_scheme_to_interface); /* 0 */
229
SETUP_REGISTER (asm_scheme_to_interface_call); /* 1 */
231
if (offset != RSI_TRAMPOLINE_TO_INTERFACE_OFFSET)
233
outf_fatal ("\nx86_64_reset_hook: RSI_TRAMPOLINE_TO_INTERFACE_OFFSET\n");
234
Microcode_Termination (TERM_EXIT);
236
SETUP_REGISTER (asm_trampoline_to_interface); /* 2 */
238
SETUP_REGISTER (asm_interrupt_procedure); /* 3 */
239
SETUP_REGISTER (asm_interrupt_continuation); /* 4 */
240
SETUP_REGISTER (asm_interrupt_closure); /* 5 */
241
SETUP_REGISTER (asm_interrupt_dlink); /* 6 */
242
SETUP_REGISTER (asm_primitive_apply); /* 7 */
243
SETUP_REGISTER (asm_primitive_lexpr_apply); /* 8 */
244
SETUP_REGISTER (asm_assignment_trap); /* 9 */
245
SETUP_REGISTER (asm_reference_trap); /* 10 */
246
SETUP_REGISTER (asm_safe_reference_trap); /* 11 */
247
SETUP_REGISTER (asm_link); /* 12 */
248
SETUP_REGISTER (asm_error); /* 13 */
249
SETUP_REGISTER (asm_primitive_error); /* 14 */
250
SETUP_REGISTER (asm_generic_add); /* 15 */
251
SETUP_REGISTER (asm_generic_subtract); /* 16 */
252
SETUP_REGISTER (asm_generic_multiply); /* 17 */
253
SETUP_REGISTER (asm_generic_divide); /* 18 */
254
SETUP_REGISTER (asm_generic_equal); /* 19 */
255
SETUP_REGISTER (asm_generic_less); /* 20 */
256
SETUP_REGISTER (asm_generic_greater); /* 21 */
257
SETUP_REGISTER (asm_generic_increment); /* 22 */
258
SETUP_REGISTER (asm_generic_decrement); /* 23 */
259
SETUP_REGISTER (asm_generic_zero); /* 24 */
260
SETUP_REGISTER (asm_generic_positive); /* 25 */
261
SETUP_REGISTER (asm_generic_negative); /* 26 */
262
SETUP_REGISTER (asm_generic_quotient); /* 27 */
263
SETUP_REGISTER (asm_generic_remainder); /* 28 */
264
SETUP_REGISTER (asm_generic_modulo); /* 29 */
265
SETUP_REGISTER (asm_sc_apply); /* 30 */
266
SETUP_REGISTER (asm_sc_apply_size_1); /* 31 */
267
SETUP_REGISTER (asm_sc_apply_size_2); /* 32 */
268
SETUP_REGISTER (asm_sc_apply_size_3); /* 33 */
269
SETUP_REGISTER (asm_sc_apply_size_4); /* 34 */
270
SETUP_REGISTER (asm_sc_apply_size_5); /* 35 */
271
SETUP_REGISTER (asm_sc_apply_size_6); /* 36 */
272
SETUP_REGISTER (asm_sc_apply_size_7); /* 37 */
273
SETUP_REGISTER (asm_sc_apply_size_8); /* 38 */
275
/* Logically, this should be up by the other interrupt routines, but
276
I just wrote all those numbers above by hand and am too exhausted
277
by that gruelling effort to be inclined to go to the trouble to
278
renumber them now. */
279
SETUP_REGISTER (asm_interrupt_continuation_2); /* 39 */
281
SETUP_REGISTER (asm_fixnum_shift); /* 40 */
289
vm_inherit_t inheritance;
294
addr = ((vm_address_t) Heap);
295
if ((vm_region ((task_self ()), &addr, &size, &prot, &max_prot,
296
&inheritance, &shared, &object, &offset))
299
outf_fatal ( "compiler_reset: vm_region() failed.\n");
300
Microcode_Termination (TERM_EXIT);
303
if ((prot & VM_PROT_SCHEME) != VM_PROT_SCHEME)
305
if ((max_prot & VM_PROT_SCHEME) != VM_PROT_SCHEME)
308
"compiler_reset: inadequate protection for Heap.\n");
309
outf_fatal ( "maximum = 0x%lx; desired = 0x%lx\n",
310
((unsigned long) (max_prot & VM_PROT_SCHEME)),
311
((unsigned long) VM_PROT_SCHEME));
312
Microcode_Termination (TERM_EXIT);
315
if ((vm_protect ((task_self ()), ((vm_address_t) Heap),
316
(((char *) constant_end) - ((char *) Heap)),
320
outf_fatal ("Unable to change protection for Heap.\n");
321
outf_fatal ("actual = 0x%lx; desired = 0x%lx\n",
322
((unsigned long) (prot & VM_PROT_SCHEME)),
323
((unsigned long) VM_PROT_SCHEME));
324
Microcode_Termination (TERM_EXIT);
329
#endif /* _MACH_UNIX */