4
* Copyright (c) 2003 Fabrice Bellard
6
* This library is free software; you can redistribute it and/or
7
* modify it under the terms of the GNU Lesser General Public
8
* License as published by the Free Software Foundation; either
9
* version 2 of the License, or (at your option) any later version.
11
* This library is distributed in the hope that it will be useful,
12
* but WITHOUT ANY WARRANTY; without even the implied warranty of
13
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14
* Lesser General Public License for more details.
16
* You should have received a copy of the GNU Lesser General Public
17
* License along with this library; if not, write to the Free Software
18
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21
int __op_param1, __op_param2, __op_param3;
22
#if defined(__sparc__) || defined(__arm__)
23
void __op_gen_label1(){}
24
void __op_gen_label2(){}
25
void __op_gen_label3(){}
27
int __op_gen_label1, __op_gen_label2, __op_gen_label3;
29
int __op_jmp0, __op_jmp1, __op_jmp2, __op_jmp3;
31
#if defined(__i386__) || defined(__x86_64__) || defined(__s390__)
32
static inline void flush_icache_range(unsigned long start, unsigned long stop)
35
#elif defined(__ia64__)
36
static inline void flush_icache_range(unsigned long start, unsigned long stop)
38
while (start < stop) {
39
asm volatile ("fc %0" :: "r"(start));
42
asm volatile (";;sync.i;;srlz.i;;");
44
#elif defined(__powerpc__)
46
#define MIN_CACHE_LINE_SIZE 8 /* conservative value */
48
static inline void flush_icache_range(unsigned long start, unsigned long stop)
52
start &= ~(MIN_CACHE_LINE_SIZE - 1);
53
stop = (stop + MIN_CACHE_LINE_SIZE - 1) & ~(MIN_CACHE_LINE_SIZE - 1);
55
for (p = start; p < stop; p += MIN_CACHE_LINE_SIZE) {
56
asm volatile ("dcbst 0,%0" : : "r"(p) : "memory");
58
asm volatile ("sync" : : : "memory");
59
for (p = start; p < stop; p += MIN_CACHE_LINE_SIZE) {
60
asm volatile ("icbi 0,%0" : : "r"(p) : "memory");
62
asm volatile ("sync" : : : "memory");
63
asm volatile ("isync" : : : "memory");
65
#elif defined(__alpha__)
66
static inline void flush_icache_range(unsigned long start, unsigned long stop)
70
#elif defined(__sparc__)
71
static inline void flush_icache_range(unsigned long start, unsigned long stop)
75
p = start & ~(8UL - 1UL);
76
stop = (stop + (8UL - 1UL)) & ~(8UL - 1UL);
78
for (; p < stop; p += 8)
79
__asm__ __volatile__("flush\t%0" : : "r" (p));
81
#elif defined(__arm__)
82
static inline void flush_icache_range(unsigned long start, unsigned long stop)
84
register unsigned long _beg __asm ("a1") = start;
85
register unsigned long _end __asm ("a2") = stop;
86
register unsigned long _flg __asm ("a3") = 0;
87
__asm __volatile__ ("swi 0x9f0002" : : "r" (_beg), "r" (_end), "r" (_flg));
89
#elif defined(__mc68000)
91
# include <asm/cachectl.h>
92
static inline void flush_icache_range(unsigned long start, unsigned long stop)
94
cacheflush(start,FLUSH_SCOPE_LINE,FLUSH_CACHE_BOTH,stop-start+16);
96
#elif defined(__mips__)
98
#include <sys/cachectl.h>
99
static inline void flush_icache_range(unsigned long start, unsigned long stop)
101
_flush_cache ((void *)start, stop - start, BCACHE);
104
#error unsupported CPU
109
register int gp asm("$29");
111
static inline void immediate_ldah(void *p, int val) {
113
long high = ((val >> 16) + ((val >> 15) & 1)) & 0xffff;
119
static inline void immediate_lda(void *dest, int val) {
120
*(uint16_t *) dest = val;
122
void fix_bsr(void *p, int offset) {
124
*dest &= ~((1 << 21) - 1);
125
*dest |= (offset >> 2) & ((1 << 21) - 1);
128
#endif /* __alpha__ */
132
#define ARM_LDR_TABLE_SIZE 1024
134
typedef struct LDREntry {
140
static LDREntry arm_ldr_table[1024];
141
static uint32_t arm_data_table[ARM_LDR_TABLE_SIZE];
143
extern char exec_loop;
145
static inline void arm_reloc_pc24(uint32_t *ptr, uint32_t insn, int val)
147
*ptr = (insn & ~0xffffff) | ((insn + ((val - (int)ptr) >> 2)) & 0xffffff);
150
static uint8_t *arm_flush_ldr(uint8_t *gen_code_ptr,
151
LDREntry *ldr_start, LDREntry *ldr_end,
152
uint32_t *data_start, uint32_t *data_end,
157
int offset, data_size, target;
162
data_size = (data_end - data_start) << 2;
165
/* generate branch to skip the data */
168
target = (long)gen_code_ptr + data_size + 4;
169
arm_reloc_pc24((uint32_t *)gen_code_ptr, 0xeafffffe, target);
174
data_ptr = gen_code_ptr;
175
memcpy(gen_code_ptr, data_start, data_size);
176
gen_code_ptr += data_size;
178
/* patch the ldr to point to the data */
179
for(le = ldr_start; le < ldr_end; le++) {
180
ptr = (uint32_t *)le->ptr;
181
offset = ((unsigned long)(le->data_ptr) - (unsigned long)data_start) +
182
(unsigned long)data_ptr -
183
(unsigned long)ptr - 8;
185
fprintf(stderr, "Negative constant pool offset\n");
191
if (offset >= 4096) {
192
fprintf(stderr, "Bad ldr offset\n");
198
if (offset >= 1024 ) {
199
fprintf(stderr, "Bad ldc offset\n");
205
if (offset >= 1024 ) {
206
fprintf(stderr, "Bad add offset\n");
211
fprintf(stderr, "Bad pc relative fixup\n");
217
insn |= offset | 0x00800000;
220
insn |= (offset >> 2) | 0x00800000;
223
insn |= (offset >> 2) | 0xf00;
235
/* Patch instruction with "val" where "mask" has 1 bits. */
236
static inline void ia64_patch (uint64_t insn_addr, uint64_t mask, uint64_t val)
238
uint64_t m0, m1, v0, v1, b0, b1, *b = (uint64_t *) (insn_addr & -16);
239
# define insn_mask ((1UL << 41) - 1)
242
b0 = b[0]; b1 = b[1];
243
shift = 5 + 41 * (insn_addr % 16); /* 5 template, 3 x 41-bit insns */
245
m1 = mask << (shift - 64);
246
v1 = val << (shift - 64);
248
m0 = mask << shift; m1 = mask >> (64 - shift);
249
v0 = val << shift; v1 = val >> (64 - shift);
250
b[0] = (b0 & ~m0) | (v0 & m0);
252
b[1] = (b1 & ~m1) | (v1 & m1);
255
static inline void ia64_patch_imm60 (uint64_t insn_addr, uint64_t val)
257
ia64_patch(insn_addr,
259
( ((val & 0x0800000000000000UL) >> 23) /* bit 59 -> 36 */
260
| ((val & 0x00000000000fffffUL) << 13) /* bit 0 -> 13 */));
261
ia64_patch(insn_addr - 1, 0x1fffffffffcUL, val >> 18);
264
static inline void ia64_imm64 (void *insn, uint64_t val)
266
/* Ignore the slot number of the relocation; GCC and Intel
267
toolchains differed for some time on whether IMM64 relocs are
268
against slot 1 (Intel) or slot 2 (GCC). */
269
uint64_t insn_addr = (uint64_t) insn & ~3UL;
271
ia64_patch(insn_addr + 2,
273
( ((val & 0x8000000000000000UL) >> 27) /* bit 63 -> 36 */
274
| ((val & 0x0000000000200000UL) << 0) /* bit 21 -> 21 */
275
| ((val & 0x00000000001f0000UL) << 6) /* bit 16 -> 22 */
276
| ((val & 0x000000000000ff80UL) << 20) /* bit 7 -> 27 */
277
| ((val & 0x000000000000007fUL) << 13) /* bit 0 -> 13 */)
279
ia64_patch(insn_addr + 1, 0x1ffffffffffUL, val >> 22);
282
static inline void ia64_imm60b (void *insn, uint64_t val)
284
/* Ignore the slot number of the relocation; GCC and Intel
285
toolchains differed for some time on whether IMM64 relocs are
286
against slot 1 (Intel) or slot 2 (GCC). */
287
uint64_t insn_addr = (uint64_t) insn & ~3UL;
289
if (val + ((uint64_t) 1 << 59) >= (1UL << 60))
290
fprintf(stderr, "%s: value %ld out of IMM60 range\n",
291
__FUNCTION__, (int64_t) val);
292
ia64_patch_imm60(insn_addr + 2, val);
295
static inline void ia64_imm22 (void *insn, uint64_t val)
297
if (val + (1 << 21) >= (1 << 22))
298
fprintf(stderr, "%s: value %li out of IMM22 range\n",
299
__FUNCTION__, (int64_t)val);
300
ia64_patch((uint64_t) insn, 0x01fffcfe000UL,
301
( ((val & 0x200000UL) << 15) /* bit 21 -> 36 */
302
| ((val & 0x1f0000UL) << 6) /* bit 16 -> 22 */
303
| ((val & 0x00ff80UL) << 20) /* bit 7 -> 27 */
304
| ((val & 0x00007fUL) << 13) /* bit 0 -> 13 */));
307
/* Like ia64_imm22(), but also clear bits 20-21. For addl, this has
308
the effect of turning "addl rX=imm22,rY" into "addl
310
static inline void ia64_imm22_r0 (void *insn, uint64_t val)
312
if (val + (1 << 21) >= (1 << 22))
313
fprintf(stderr, "%s: value %li out of IMM22 range\n",
314
__FUNCTION__, (int64_t)val);
315
ia64_patch((uint64_t) insn, 0x01fffcfe000UL | (0x3UL << 20),
316
( ((val & 0x200000UL) << 15) /* bit 21 -> 36 */
317
| ((val & 0x1f0000UL) << 6) /* bit 16 -> 22 */
318
| ((val & 0x00ff80UL) << 20) /* bit 7 -> 27 */
319
| ((val & 0x00007fUL) << 13) /* bit 0 -> 13 */));
322
static inline void ia64_imm21b (void *insn, uint64_t val)
324
if (val + (1 << 20) >= (1 << 21))
325
fprintf(stderr, "%s: value %li out of IMM21b range\n",
326
__FUNCTION__, (int64_t)val);
327
ia64_patch((uint64_t) insn, 0x11ffffe000UL,
328
( ((val & 0x100000UL) << 16) /* bit 20 -> 36 */
329
| ((val & 0x0fffffUL) << 13) /* bit 0 -> 13 */));
332
static inline void ia64_nop_b (void *insn)
334
ia64_patch((uint64_t) insn, (1UL << 41) - 1, 2UL << 37);
337
static inline void ia64_ldxmov(void *insn, uint64_t val)
339
if (val + (1 << 21) < (1 << 22))
340
ia64_patch((uint64_t) insn, 0x1fff80fe000UL, 8UL << 37);
343
static inline int ia64_patch_ltoff(void *insn, uint64_t val,
346
if (relaxable && (val + (1 << 21) < (1 << 22))) {
347
ia64_imm22_r0(insn, val);
354
struct ia64_fixup *next;
355
void *addr; /* address that needs to be patched */
359
#define IA64_PLT(insn, plt_index) \
361
struct ia64_fixup *fixup = alloca(sizeof(*fixup)); \
362
fixup->next = plt_fixes; \
364
fixup->addr = (insn); \
365
fixup->value = (plt_index); \
366
plt_offset[(plt_index)] = 1; \
369
#define IA64_LTOFF(insn, val, relaxable) \
371
if (ia64_patch_ltoff(insn, val, relaxable)) { \
372
struct ia64_fixup *fixup = alloca(sizeof(*fixup)); \
373
fixup->next = ltoff_fixes; \
374
ltoff_fixes = fixup; \
375
fixup->addr = (insn); \
376
fixup->value = (val); \
380
static inline void ia64_apply_fixes (uint8_t **gen_code_pp,
381
struct ia64_fixup *ltoff_fixes,
383
struct ia64_fixup *plt_fixes,
385
unsigned long *plt_target,
386
unsigned int *plt_offset)
388
static const uint8_t plt_bundle[] = {
389
0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, /* nop 0; movl r1=GP */
390
0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x60,
392
0x05, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, /* nop 0; brl IP */
393
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc0
395
uint8_t *gen_code_ptr = *gen_code_pp, *plt_start, *got_start;
397
struct ia64_fixup *fixup;
398
unsigned int offset = 0;
406
plt_start = gen_code_ptr;
408
for (i = 0; i < num_plts; ++i) {
410
plt_offset[i] = offset;
411
offset += sizeof(plt_bundle);
413
fdesc = (struct fdesc *) plt_target[i];
414
memcpy(gen_code_ptr, plt_bundle, sizeof(plt_bundle));
415
ia64_imm64 (gen_code_ptr + 0x02, fdesc->gp);
416
ia64_imm60b(gen_code_ptr + 0x12,
417
(fdesc->ip - (long) (gen_code_ptr + 0x10)) >> 4);
418
gen_code_ptr += sizeof(plt_bundle);
422
for (fixup = plt_fixes; fixup; fixup = fixup->next)
423
ia64_imm21b(fixup->addr,
424
((long) plt_start + plt_offset[fixup->value]
425
- ((long) fixup->addr & ~0xf)) >> 4);
428
got_start = gen_code_ptr;
430
/* First, create the GOT: */
431
for (fixup = ltoff_fixes; fixup; fixup = fixup->next) {
432
/* first check if we already have this value in the GOT: */
433
for (vp = (uint64_t *) got_start; vp < (uint64_t *) gen_code_ptr; ++vp)
434
if (*vp == fixup->value)
436
if (vp == (uint64_t *) gen_code_ptr) {
437
/* Nope, we need to put the value in the GOT: */
441
ia64_imm22(fixup->addr, (long) vp - gp);
443
/* Keep code ptr aligned. */
444
if ((long) gen_code_ptr & 15)
446
*gen_code_pp = gen_code_ptr;