1
/* This is the Linux kernel elf-loading code, ported into user space */
24
/* from personality.h */
27
* Flags for bug emulation.
29
* These occupy the top three bytes.
32
ADDR_NO_RANDOMIZE = 0x0040000, /* disable randomization of VA space */
33
FDPIC_FUNCPTRS = 0x0080000, /* userspace function ptrs point to descriptors
36
MMAP_PAGE_ZERO = 0x0100000,
37
ADDR_COMPAT_LAYOUT = 0x0200000,
38
READ_IMPLIES_EXEC = 0x0400000,
39
ADDR_LIMIT_32BIT = 0x0800000,
40
SHORT_INODE = 0x1000000,
41
WHOLE_SECONDS = 0x2000000,
42
STICKY_TIMEOUTS = 0x4000000,
43
ADDR_LIMIT_3GB = 0x8000000,
49
* These go in the low byte. Avoid using the top bit, it will
50
* conflict with error returns.
54
PER_LINUX_32BIT = 0x0000 | ADDR_LIMIT_32BIT,
55
PER_LINUX_FDPIC = 0x0000 | FDPIC_FUNCPTRS,
56
PER_SVR4 = 0x0001 | STICKY_TIMEOUTS | MMAP_PAGE_ZERO,
57
PER_SVR3 = 0x0002 | STICKY_TIMEOUTS | SHORT_INODE,
58
PER_SCOSVR3 = 0x0003 | STICKY_TIMEOUTS |
59
WHOLE_SECONDS | SHORT_INODE,
60
PER_OSR5 = 0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS,
61
PER_WYSEV386 = 0x0004 | STICKY_TIMEOUTS | SHORT_INODE,
62
PER_ISCR4 = 0x0005 | STICKY_TIMEOUTS,
64
PER_SUNOS = 0x0006 | STICKY_TIMEOUTS,
65
PER_XENIX = 0x0007 | STICKY_TIMEOUTS | SHORT_INODE,
67
PER_LINUX32_3GB = 0x0008 | ADDR_LIMIT_3GB,
68
PER_IRIX32 = 0x0009 | STICKY_TIMEOUTS,/* IRIX5 32-bit */
69
PER_IRIXN32 = 0x000a | STICKY_TIMEOUTS,/* IRIX6 new 32-bit */
70
PER_IRIX64 = 0x000b | STICKY_TIMEOUTS,/* IRIX6 64-bit */
72
PER_SOLARIS = 0x000d | STICKY_TIMEOUTS,
73
PER_UW7 = 0x000e | STICKY_TIMEOUTS | MMAP_PAGE_ZERO,
74
PER_OSF4 = 0x000f, /* OSF/1 v4 */
80
* Return the base personality without flags.
82
#define personality(pers) (pers & PER_MASK)
84
/* this flag is uneffective under linux too, should be deleted */
86
#define MAP_DENYWRITE 0
89
/* should probably go in elf.h */
96
#define ELF_PLATFORM get_elf_platform()
98
static const char *get_elf_platform(void)
100
static char elf_platform[] = "i386";
101
int family = (thread_env->cpuid_version >> 8) & 0xff;
105
elf_platform[1] = '0' + family;
109
#define ELF_HWCAP get_elf_hwcap()
111
static uint32_t get_elf_hwcap(void)
113
return thread_env->cpuid_features;
117
#define ELF_START_MMAP 0x2aaaaab000ULL
118
#define elf_check_arch(x) ( ((x) == ELF_ARCH) )
120
#define ELF_CLASS ELFCLASS64
121
#define ELF_DATA ELFDATA2LSB
122
#define ELF_ARCH EM_X86_64
124
static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
127
regs->rsp = infop->start_stack;
128
regs->rip = infop->entry;
133
#define ELF_START_MMAP 0x80000000
136
* This is used to ensure we don't load something for the wrong architecture.
138
#define elf_check_arch(x) ( ((x) == EM_386) || ((x) == EM_486) )
141
* These are used to set parameters in the core dumps.
143
#define ELF_CLASS ELFCLASS32
144
#define ELF_DATA ELFDATA2LSB
145
#define ELF_ARCH EM_386
147
static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
149
regs->esp = infop->start_stack;
150
regs->eip = infop->entry;
152
/* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program
153
starts %edx contains a pointer to a function which might be
154
registered using `atexit'. This provides a mean for the
155
dynamic linker to call DT_FINI functions for shared libraries
156
that have been loaded before the code runs.
158
A value of 0 tells we have no such handler. */
163
#define USE_ELF_CORE_DUMP
164
#define ELF_EXEC_PAGESIZE 4096
170
#define ELF_START_MMAP 0x80000000
172
#define elf_check_arch(x) ( (x) == EM_ARM )
174
#define ELF_CLASS ELFCLASS32
175
#ifdef TARGET_WORDS_BIGENDIAN
176
#define ELF_DATA ELFDATA2MSB
178
#define ELF_DATA ELFDATA2LSB
180
#define ELF_ARCH EM_ARM
182
static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
184
abi_long stack = infop->start_stack;
185
memset(regs, 0, sizeof(*regs));
186
regs->ARM_cpsr = 0x10;
187
if (infop->entry & 1)
188
regs->ARM_cpsr |= CPSR_T;
189
regs->ARM_pc = infop->entry & 0xfffffffe;
190
regs->ARM_sp = infop->start_stack;
191
/* FIXME - what to for failure of get_user()? */
192
get_user_ual(regs->ARM_r2, stack + 8); /* envp */
193
get_user_ual(regs->ARM_r1, stack + 4); /* envp */
194
/* XXX: it seems that r0 is zeroed after ! */
196
/* For uClinux PIC binaries. */
197
/* XXX: Linux does this only on ARM with no MMU (do we care ?) */
198
regs->ARM_r10 = infop->start_data;
201
#define USE_ELF_CORE_DUMP
202
#define ELF_EXEC_PAGESIZE 4096
206
ARM_HWCAP_ARM_SWP = 1 << 0,
207
ARM_HWCAP_ARM_HALF = 1 << 1,
208
ARM_HWCAP_ARM_THUMB = 1 << 2,
209
ARM_HWCAP_ARM_26BIT = 1 << 3,
210
ARM_HWCAP_ARM_FAST_MULT = 1 << 4,
211
ARM_HWCAP_ARM_FPA = 1 << 5,
212
ARM_HWCAP_ARM_VFP = 1 << 6,
213
ARM_HWCAP_ARM_EDSP = 1 << 7,
216
#define ELF_HWCAP (ARM_HWCAP_ARM_SWP | ARM_HWCAP_ARM_HALF \
217
| ARM_HWCAP_ARM_THUMB | ARM_HWCAP_ARM_FAST_MULT \
218
| ARM_HWCAP_ARM_FPA | ARM_HWCAP_ARM_VFP)
223
#ifdef TARGET_SPARC64
225
#define ELF_START_MMAP 0x80000000
228
#define elf_check_arch(x) ( (x) == EM_SPARCV9 || (x) == EM_SPARC32PLUS )
230
#define elf_check_arch(x) ( (x) == EM_SPARC32PLUS || (x) == EM_SPARC )
233
#define ELF_CLASS ELFCLASS64
234
#define ELF_DATA ELFDATA2MSB
235
#define ELF_ARCH EM_SPARCV9
237
#define STACK_BIAS 2047
239
static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
244
regs->pc = infop->entry;
245
regs->npc = regs->pc + 4;
248
regs->u_regs[14] = infop->start_stack - 16 * 4;
250
if (personality(infop->personality) == PER_LINUX32)
251
regs->u_regs[14] = infop->start_stack - 16 * 4;
253
regs->u_regs[14] = infop->start_stack - 16 * 8 - STACK_BIAS;
258
#define ELF_START_MMAP 0x80000000
260
#define elf_check_arch(x) ( (x) == EM_SPARC )
262
#define ELF_CLASS ELFCLASS32
263
#define ELF_DATA ELFDATA2MSB
264
#define ELF_ARCH EM_SPARC
266
static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
269
regs->pc = infop->entry;
270
regs->npc = regs->pc + 4;
272
regs->u_regs[14] = infop->start_stack - 16 * 4;
280
#define ELF_START_MMAP 0x80000000
282
#if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
284
#define elf_check_arch(x) ( (x) == EM_PPC64 )
286
#define ELF_CLASS ELFCLASS64
290
#define elf_check_arch(x) ( (x) == EM_PPC )
292
#define ELF_CLASS ELFCLASS32
296
#ifdef TARGET_WORDS_BIGENDIAN
297
#define ELF_DATA ELFDATA2MSB
299
#define ELF_DATA ELFDATA2LSB
301
#define ELF_ARCH EM_PPC
304
* We need to put in some extra aux table entries to tell glibc what
305
* the cache block size is, so it can use the dcbz instruction safely.
307
#define AT_DCACHEBSIZE 19
308
#define AT_ICACHEBSIZE 20
309
#define AT_UCACHEBSIZE 21
310
/* A special ignored type value for PPC, for glibc compatibility. */
311
#define AT_IGNOREPPC 22
313
* The requirements here are:
314
* - keep the final alignment of sp (sp & 0xf)
315
* - make sure the 32-bit value at the first 16 byte aligned position of
316
* AUXV is greater than 16 for glibc compatibility.
317
* AT_IGNOREPPC is used for that.
318
* - for compatibility with glibc ARCH_DLINFO must always be defined on PPC,
319
* even if DLINFO_ARCH_ITEMS goes to zero or is undefined.
321
#define DLINFO_ARCH_ITEMS 5
322
#define ARCH_DLINFO \
324
NEW_AUX_ENT(AT_DCACHEBSIZE, 0x20); \
325
NEW_AUX_ENT(AT_ICACHEBSIZE, 0x20); \
326
NEW_AUX_ENT(AT_UCACHEBSIZE, 0); \
328
* Now handle glibc compatibility. \
330
NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
331
NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
334
static inline void init_thread(struct target_pt_regs *_regs, struct image_info *infop)
336
abi_ulong pos = infop->start_stack;
338
#if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
339
abi_ulong entry, toc;
342
_regs->gpr[1] = infop->start_stack;
343
#if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
344
entry = ldq_raw(infop->entry) + infop->load_addr;
345
toc = ldq_raw(infop->entry + 8) + infop->load_addr;
347
infop->entry = entry;
349
_regs->nip = infop->entry;
350
/* Note that isn't exactly what regular kernel does
351
* but this is what the ABI wants and is needed to allow
352
* execution of PPC BSD programs.
354
/* FIXME - what to for failure of get_user()? */
355
get_user_ual(_regs->gpr[3], pos);
356
pos += sizeof(abi_ulong);
358
for (tmp = 1; tmp != 0; pos += sizeof(abi_ulong))
363
#define USE_ELF_CORE_DUMP
364
#define ELF_EXEC_PAGESIZE 4096
370
#define ELF_START_MMAP 0x80000000
372
#define elf_check_arch(x) ( (x) == EM_MIPS )
375
#define ELF_CLASS ELFCLASS64
377
#define ELF_CLASS ELFCLASS32
379
#ifdef TARGET_WORDS_BIGENDIAN
380
#define ELF_DATA ELFDATA2MSB
382
#define ELF_DATA ELFDATA2LSB
384
#define ELF_ARCH EM_MIPS
386
static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
388
regs->cp0_status = 2 << CP0St_KSU;
389
regs->cp0_epc = infop->entry;
390
regs->regs[29] = infop->start_stack;
393
#define USE_ELF_CORE_DUMP
394
#define ELF_EXEC_PAGESIZE 4096
396
#endif /* TARGET_MIPS */
400
#define ELF_START_MMAP 0x80000000
402
#define elf_check_arch(x) ( (x) == EM_SH )
404
#define ELF_CLASS ELFCLASS32
405
#define ELF_DATA ELFDATA2LSB
406
#define ELF_ARCH EM_SH
408
static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
410
/* Check other registers XXXXX */
411
regs->pc = infop->entry;
412
regs->regs[15] = infop->start_stack;
415
#define USE_ELF_CORE_DUMP
416
#define ELF_EXEC_PAGESIZE 4096
422
#define ELF_START_MMAP 0x80000000
424
#define elf_check_arch(x) ( (x) == EM_CRIS )
426
#define ELF_CLASS ELFCLASS32
427
#define ELF_DATA ELFDATA2LSB
428
#define ELF_ARCH EM_CRIS
430
static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
432
regs->erp = infop->entry;
435
#define USE_ELF_CORE_DUMP
436
#define ELF_EXEC_PAGESIZE 8192
442
#define ELF_START_MMAP 0x80000000
444
#define elf_check_arch(x) ( (x) == EM_68K )
446
#define ELF_CLASS ELFCLASS32
447
#define ELF_DATA ELFDATA2MSB
448
#define ELF_ARCH EM_68K
450
/* ??? Does this need to do anything?
451
#define ELF_PLAT_INIT(_r) */
453
static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
455
regs->usp = infop->start_stack;
457
regs->pc = infop->entry;
460
#define USE_ELF_CORE_DUMP
461
#define ELF_EXEC_PAGESIZE 8192
467
#define ELF_START_MMAP (0x30000000000ULL)
469
#define elf_check_arch(x) ( (x) == ELF_ARCH )
471
#define ELF_CLASS ELFCLASS64
472
#define ELF_DATA ELFDATA2MSB
473
#define ELF_ARCH EM_ALPHA
475
static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
477
regs->pc = infop->entry;
479
regs->usp = infop->start_stack;
480
regs->unique = infop->start_data; /* ? */
481
printf("Set unique value to " TARGET_FMT_lx " (" TARGET_FMT_lx ")\n",
482
regs->unique, infop->start_data);
485
#define USE_ELF_CORE_DUMP
486
#define ELF_EXEC_PAGESIZE 8192
488
#endif /* TARGET_ALPHA */
491
#define ELF_PLATFORM (NULL)
500
#define ELF_CLASS ELFCLASS32
502
#define bswaptls(ptr) bswap32s(ptr)
509
unsigned int a_info; /* Use macros N_MAGIC, etc for access */
510
unsigned int a_text; /* length of text, in bytes */
511
unsigned int a_data; /* length of data, in bytes */
512
unsigned int a_bss; /* length of uninitialized data area, in bytes */
513
unsigned int a_syms; /* length of symbol table data in file, in bytes */
514
unsigned int a_entry; /* start address */
515
unsigned int a_trsize; /* length of relocation info for text, in bytes */
516
unsigned int a_drsize; /* length of relocation info for data, in bytes */
520
#define N_MAGIC(exec) ((exec).a_info & 0xffff)
526
/* max code+data+bss space allocated to elf interpreter */
527
#define INTERP_MAP_SIZE (32 * 1024 * 1024)
529
/* max code+data+bss+brk space allocated to ET_DYN executables */
530
#define ET_DYN_MAP_SIZE (128 * 1024 * 1024)
532
/* Necessary parameters */
533
#define TARGET_ELF_EXEC_PAGESIZE TARGET_PAGE_SIZE
534
#define TARGET_ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(TARGET_ELF_EXEC_PAGESIZE-1))
535
#define TARGET_ELF_PAGEOFFSET(_v) ((_v) & (TARGET_ELF_EXEC_PAGESIZE-1))
537
#define INTERPRETER_NONE 0
538
#define INTERPRETER_AOUT 1
539
#define INTERPRETER_ELF 2
541
#define DLINFO_ITEMS 12
543
static inline void memcpy_fromfs(void * to, const void * from, unsigned long n)
548
extern unsigned long x86_stack_size;
550
static int load_aout_interp(void * exptr, int interp_fd);
553
static void bswap_ehdr(struct elfhdr *ehdr)
555
bswap16s(&ehdr->e_type); /* Object file type */
556
bswap16s(&ehdr->e_machine); /* Architecture */
557
bswap32s(&ehdr->e_version); /* Object file version */
558
bswaptls(&ehdr->e_entry); /* Entry point virtual address */
559
bswaptls(&ehdr->e_phoff); /* Program header table file offset */
560
bswaptls(&ehdr->e_shoff); /* Section header table file offset */
561
bswap32s(&ehdr->e_flags); /* Processor-specific flags */
562
bswap16s(&ehdr->e_ehsize); /* ELF header size in bytes */
563
bswap16s(&ehdr->e_phentsize); /* Program header table entry size */
564
bswap16s(&ehdr->e_phnum); /* Program header table entry count */
565
bswap16s(&ehdr->e_shentsize); /* Section header table entry size */
566
bswap16s(&ehdr->e_shnum); /* Section header table entry count */
567
bswap16s(&ehdr->e_shstrndx); /* Section header string table index */
570
static void bswap_phdr(struct elf_phdr *phdr)
572
bswap32s(&phdr->p_type); /* Segment type */
573
bswaptls(&phdr->p_offset); /* Segment file offset */
574
bswaptls(&phdr->p_vaddr); /* Segment virtual address */
575
bswaptls(&phdr->p_paddr); /* Segment physical address */
576
bswaptls(&phdr->p_filesz); /* Segment size in file */
577
bswaptls(&phdr->p_memsz); /* Segment size in memory */
578
bswap32s(&phdr->p_flags); /* Segment flags */
579
bswaptls(&phdr->p_align); /* Segment alignment */
582
static void bswap_shdr(struct elf_shdr *shdr)
584
bswap32s(&shdr->sh_name);
585
bswap32s(&shdr->sh_type);
586
bswaptls(&shdr->sh_flags);
587
bswaptls(&shdr->sh_addr);
588
bswaptls(&shdr->sh_offset);
589
bswaptls(&shdr->sh_size);
590
bswap32s(&shdr->sh_link);
591
bswap32s(&shdr->sh_info);
592
bswaptls(&shdr->sh_addralign);
593
bswaptls(&shdr->sh_entsize);
596
static void bswap_sym(struct elf_sym *sym)
598
bswap32s(&sym->st_name);
599
bswaptls(&sym->st_value);
600
bswaptls(&sym->st_size);
601
bswap16s(&sym->st_shndx);
606
* 'copy_elf_strings()' copies argument/envelope strings from user
607
* memory to free pages in kernel mem. These are in a format ready
608
* to be put directly into the top of new user memory.
611
static abi_ulong copy_elf_strings(int argc,char ** argv, void **page,
614
char *tmp, *tmp1, *pag = NULL;
618
return 0; /* bullet-proofing */
623
fprintf(stderr, "VFS: argc is wrong");
629
if (p < len) { /* this shouldn't happen - 128kB */
635
offset = p % TARGET_PAGE_SIZE;
636
pag = (char *)page[p/TARGET_PAGE_SIZE];
638
pag = (char *)malloc(TARGET_PAGE_SIZE);
639
memset(pag, 0, TARGET_PAGE_SIZE);
640
page[p/TARGET_PAGE_SIZE] = pag;
645
if (len == 0 || offset == 0) {
646
*(pag + offset) = *tmp;
649
int bytes_to_copy = (len > offset) ? offset : len;
650
tmp -= bytes_to_copy;
652
offset -= bytes_to_copy;
653
len -= bytes_to_copy;
654
memcpy_fromfs(pag + offset, tmp, bytes_to_copy + 1);
661
static abi_ulong setup_arg_pages(abi_ulong p, struct linux_binprm *bprm,
662
struct image_info *info)
664
abi_ulong stack_base, size, error;
667
/* Create enough stack to hold everything. If we don't use
668
* it for args, we'll use it for something else...
670
size = x86_stack_size;
671
if (size < MAX_ARG_PAGES*TARGET_PAGE_SIZE)
672
size = MAX_ARG_PAGES*TARGET_PAGE_SIZE;
673
error = target_mmap(0,
674
size + qemu_host_page_size,
675
PROT_READ | PROT_WRITE,
676
MAP_PRIVATE | MAP_ANON,
682
/* we reserve one extra page at the top of the stack as guard */
683
target_mprotect(error + size, qemu_host_page_size, PROT_NONE);
685
stack_base = error + size - MAX_ARG_PAGES*TARGET_PAGE_SIZE;
688
for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
691
/* FIXME - check return value of memcpy_to_target() for failure */
692
memcpy_to_target(stack_base, bprm->page[i], TARGET_PAGE_SIZE);
695
stack_base += TARGET_PAGE_SIZE;
700
static void set_brk(abi_ulong start, abi_ulong end)
702
/* page-align the start and end addresses... */
703
start = HOST_PAGE_ALIGN(start);
704
end = HOST_PAGE_ALIGN(end);
707
if(target_mmap(start, end - start,
708
PROT_READ | PROT_WRITE | PROT_EXEC,
709
MAP_FIXED | MAP_PRIVATE | MAP_ANON, -1, 0) == -1) {
710
perror("cannot mmap brk");
716
/* We need to explicitly zero any fractional pages after the data
717
section (i.e. bss). This would contain the junk from the file that
718
should not be in memory. */
719
static void padzero(abi_ulong elf_bss, abi_ulong last_bss)
723
if (elf_bss >= last_bss)
726
/* XXX: this is really a hack : if the real host page size is
727
smaller than the target page size, some pages after the end
728
of the file may not be mapped. A better fix would be to
729
patch target_mmap(), but it is more complicated as the file
730
size must be known */
731
if (qemu_real_host_page_size < qemu_host_page_size) {
732
abi_ulong end_addr, end_addr1;
733
end_addr1 = (elf_bss + qemu_real_host_page_size - 1) &
734
~(qemu_real_host_page_size - 1);
735
end_addr = HOST_PAGE_ALIGN(elf_bss);
736
if (end_addr1 < end_addr) {
737
mmap((void *)g2h(end_addr1), end_addr - end_addr1,
738
PROT_READ|PROT_WRITE|PROT_EXEC,
739
MAP_FIXED|MAP_PRIVATE|MAP_ANON, -1, 0);
743
nbyte = elf_bss & (qemu_host_page_size-1);
745
nbyte = qemu_host_page_size - nbyte;
747
/* FIXME - what to do if put_user() fails? */
748
put_user_u8(0, elf_bss);
755
static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc,
756
struct elfhdr * exec,
759
abi_ulong interp_load_addr, int ibcs,
760
struct image_info *info)
764
abi_ulong u_platform;
765
const char *k_platform;
766
const int n = sizeof(elf_addr_t);
770
k_platform = ELF_PLATFORM;
772
size_t len = strlen(k_platform) + 1;
773
sp -= (len + n - 1) & ~(n - 1);
775
/* FIXME - check return value of memcpy_to_target() for failure */
776
memcpy_to_target(sp, k_platform, len);
779
* Force 16 byte _final_ alignment here for generality.
781
sp = sp &~ (abi_ulong)15;
782
size = (DLINFO_ITEMS + 1) * 2;
785
#ifdef DLINFO_ARCH_ITEMS
786
size += DLINFO_ARCH_ITEMS * 2;
788
size += envc + argc + 2;
789
size += (!ibcs ? 3 : 1); /* argc itself */
792
sp -= 16 - (size & 15);
794
/* This is correct because Linux defines
795
* elf_addr_t as Elf32_Off / Elf64_Off
797
#define NEW_AUX_ENT(id, val) do { \
798
sp -= n; put_user_ual(val, sp); \
799
sp -= n; put_user_ual(id, sp); \
802
NEW_AUX_ENT (AT_NULL, 0);
804
/* There must be exactly DLINFO_ITEMS entries here. */
805
NEW_AUX_ENT(AT_PHDR, (abi_ulong)(load_addr + exec->e_phoff));
806
NEW_AUX_ENT(AT_PHENT, (abi_ulong)(sizeof (struct elf_phdr)));
807
NEW_AUX_ENT(AT_PHNUM, (abi_ulong)(exec->e_phnum));
808
NEW_AUX_ENT(AT_PAGESZ, (abi_ulong)(TARGET_PAGE_SIZE));
809
NEW_AUX_ENT(AT_BASE, (abi_ulong)(interp_load_addr));
810
NEW_AUX_ENT(AT_FLAGS, (abi_ulong)0);
811
NEW_AUX_ENT(AT_ENTRY, load_bias + exec->e_entry);
812
NEW_AUX_ENT(AT_UID, (abi_ulong) getuid());
813
NEW_AUX_ENT(AT_EUID, (abi_ulong) geteuid());
814
NEW_AUX_ENT(AT_GID, (abi_ulong) getgid());
815
NEW_AUX_ENT(AT_EGID, (abi_ulong) getegid());
816
NEW_AUX_ENT(AT_HWCAP, (abi_ulong) ELF_HWCAP);
817
NEW_AUX_ENT(AT_CLKTCK, (abi_ulong) sysconf(_SC_CLK_TCK));
819
NEW_AUX_ENT(AT_PLATFORM, u_platform);
822
* ARCH_DLINFO must come last so platform specific code can enforce
823
* special alignment requirements on the AUXV if necessary (eg. PPC).
829
sp = loader_build_argptr(envc, argc, sp, p, !ibcs);
834
static abi_ulong load_elf_interp(struct elfhdr * interp_elf_ex,
836
abi_ulong *interp_load_addr)
838
struct elf_phdr *elf_phdata = NULL;
839
struct elf_phdr *eppnt;
840
abi_ulong load_addr = 0;
841
int load_addr_set = 0;
843
abi_ulong last_bss, elf_bss;
852
bswap_ehdr(interp_elf_ex);
854
/* First of all, some simple consistency checks */
855
if ((interp_elf_ex->e_type != ET_EXEC &&
856
interp_elf_ex->e_type != ET_DYN) ||
857
!elf_check_arch(interp_elf_ex->e_machine)) {
858
return ~((abi_ulong)0UL);
862
/* Now read in all of the header information */
864
if (sizeof(struct elf_phdr) * interp_elf_ex->e_phnum > TARGET_PAGE_SIZE)
865
return ~(abi_ulong)0UL;
867
elf_phdata = (struct elf_phdr *)
868
malloc(sizeof(struct elf_phdr) * interp_elf_ex->e_phnum);
871
return ~((abi_ulong)0UL);
874
* If the size of this structure has changed, then punt, since
875
* we will be doing the wrong thing.
877
if (interp_elf_ex->e_phentsize != sizeof(struct elf_phdr)) {
879
return ~((abi_ulong)0UL);
882
retval = lseek(interpreter_fd, interp_elf_ex->e_phoff, SEEK_SET);
884
retval = read(interpreter_fd,
886
sizeof(struct elf_phdr) * interp_elf_ex->e_phnum);
889
perror("load_elf_interp");
896
for (i=0; i<interp_elf_ex->e_phnum; i++, eppnt++) {
901
if (interp_elf_ex->e_type == ET_DYN) {
902
/* in order to avoid hardcoding the interpreter load
903
address in qemu, we allocate a big enough memory zone */
904
error = target_mmap(0, INTERP_MAP_SIZE,
905
PROT_NONE, MAP_PRIVATE | MAP_ANON,
916
for(i=0; i<interp_elf_ex->e_phnum; i++, eppnt++)
917
if (eppnt->p_type == PT_LOAD) {
918
int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
923
if (eppnt->p_flags & PF_R) elf_prot = PROT_READ;
924
if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
925
if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
926
if (interp_elf_ex->e_type == ET_EXEC || load_addr_set) {
927
elf_type |= MAP_FIXED;
928
vaddr = eppnt->p_vaddr;
930
error = target_mmap(load_addr+TARGET_ELF_PAGESTART(vaddr),
931
eppnt->p_filesz + TARGET_ELF_PAGEOFFSET(eppnt->p_vaddr),
935
eppnt->p_offset - TARGET_ELF_PAGEOFFSET(eppnt->p_vaddr));
939
close(interpreter_fd);
941
return ~((abi_ulong)0UL);
944
if (!load_addr_set && interp_elf_ex->e_type == ET_DYN) {
950
* Find the end of the file mapping for this phdr, and keep
951
* track of the largest address we see for this.
953
k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
954
if (k > elf_bss) elf_bss = k;
957
* Do the same thing for the memory mapping - between
958
* elf_bss and last_bss is the bss section.
960
k = load_addr + eppnt->p_memsz + eppnt->p_vaddr;
961
if (k > last_bss) last_bss = k;
964
/* Now use mmap to map the library into memory. */
966
close(interpreter_fd);
969
* Now fill out the bss section. First pad the last page up
970
* to the page boundary, and then perform a mmap to make sure
971
* that there are zeromapped pages up to and including the last
974
padzero(elf_bss, last_bss);
975
elf_bss = TARGET_ELF_PAGESTART(elf_bss + qemu_host_page_size - 1); /* What we have mapped so far */
977
/* Map the last of the bss segment */
978
if (last_bss > elf_bss) {
979
target_mmap(elf_bss, last_bss-elf_bss,
980
PROT_READ|PROT_WRITE|PROT_EXEC,
981
MAP_FIXED|MAP_PRIVATE|MAP_ANON, -1, 0);
985
*interp_load_addr = load_addr;
986
return ((abi_ulong) interp_elf_ex->e_entry) + load_addr;
989
static int symfind(const void *s0, const void *s1)
991
struct elf_sym *key = (struct elf_sym *)s0;
992
struct elf_sym *sym = (struct elf_sym *)s1;
994
if (key->st_value < sym->st_value) {
996
} else if (key->st_value > sym->st_value + sym->st_size) {
1002
static const char *lookup_symbolxx(struct syminfo *s, target_ulong orig_addr)
1004
#if ELF_CLASS == ELFCLASS32
1005
struct elf_sym *syms = s->disas_symtab.elf32;
1007
struct elf_sym *syms = s->disas_symtab.elf64;
1012
struct elf_sym *sym;
1014
key.st_value = orig_addr;
1016
sym = bsearch(&key, syms, s->disas_num_syms, sizeof(*syms), symfind);
1018
return s->disas_strtab + sym->st_name;
1024
/* FIXME: This should use elf_ops.h */
1025
static int symcmp(const void *s0, const void *s1)
1027
struct elf_sym *sym0 = (struct elf_sym *)s0;
1028
struct elf_sym *sym1 = (struct elf_sym *)s1;
1029
return (sym0->st_value < sym1->st_value)
1031
: ((sym0->st_value > sym1->st_value) ? 1 : 0);
1034
/* Best attempt to load symbols from this ELF object. */
1035
static void load_symbols(struct elfhdr *hdr, int fd)
1037
unsigned int i, nsyms;
1038
struct elf_shdr sechdr, symtab, strtab;
1041
struct elf_sym *syms;
1043
lseek(fd, hdr->e_shoff, SEEK_SET);
1044
for (i = 0; i < hdr->e_shnum; i++) {
1045
if (read(fd, &sechdr, sizeof(sechdr)) != sizeof(sechdr))
1048
bswap_shdr(&sechdr);
1050
if (sechdr.sh_type == SHT_SYMTAB) {
1052
lseek(fd, hdr->e_shoff
1053
+ sizeof(sechdr) * sechdr.sh_link, SEEK_SET);
1054
if (read(fd, &strtab, sizeof(strtab))
1058
bswap_shdr(&strtab);
1063
return; /* Shouldn't happen... */
1066
/* Now know where the strtab and symtab are. Snarf them. */
1067
s = malloc(sizeof(*s));
1068
syms = malloc(symtab.sh_size);
1071
s->disas_strtab = strings = malloc(strtab.sh_size);
1072
if (!s->disas_strtab)
1075
lseek(fd, symtab.sh_offset, SEEK_SET);
1076
if (read(fd, syms, symtab.sh_size) != symtab.sh_size)
1079
nsyms = symtab.sh_size / sizeof(struct elf_sym);
1084
bswap_sym(syms + i);
1086
// Throw away entries which we do not need.
1087
if (syms[i].st_shndx == SHN_UNDEF ||
1088
syms[i].st_shndx >= SHN_LORESERVE ||
1089
ELF_ST_TYPE(syms[i].st_info) != STT_FUNC) {
1092
syms[i] = syms[nsyms];
1096
#if defined(TARGET_ARM) || defined (TARGET_MIPS)
1097
/* The bottom address bit marks a Thumb or MIPS16 symbol. */
1098
syms[i].st_value &= ~(target_ulong)1;
1102
syms = realloc(syms, nsyms * sizeof(*syms));
1104
qsort(syms, nsyms, sizeof(*syms), symcmp);
1106
lseek(fd, strtab.sh_offset, SEEK_SET);
1107
if (read(fd, strings, strtab.sh_size) != strtab.sh_size)
1109
s->disas_num_syms = nsyms;
1110
#if ELF_CLASS == ELFCLASS32
1111
s->disas_symtab.elf32 = syms;
1112
s->lookup_symbol = lookup_symbolxx;
1114
s->disas_symtab.elf64 = syms;
1115
s->lookup_symbol = lookup_symbolxx;
1121
int load_elf_binary(struct linux_binprm * bprm, struct target_pt_regs * regs,
1122
struct image_info * info)
1124
struct elfhdr elf_ex;
1125
struct elfhdr interp_elf_ex;
1126
struct exec interp_ex;
1127
int interpreter_fd = -1; /* avoid warning */
1128
abi_ulong load_addr, load_bias;
1129
int load_addr_set = 0;
1130
unsigned int interpreter_type = INTERPRETER_NONE;
1131
unsigned char ibcs2_interpreter;
1133
abi_ulong mapped_addr;
1134
struct elf_phdr * elf_ppnt;
1135
struct elf_phdr *elf_phdata;
1136
abi_ulong elf_bss, k, elf_brk;
1138
char * elf_interpreter;
1139
abi_ulong elf_entry, interp_load_addr = 0;
1141
abi_ulong start_code, end_code, start_data, end_data;
1142
abi_ulong reloc_func_desc = 0;
1143
abi_ulong elf_stack;
1144
char passed_fileno[6];
1146
ibcs2_interpreter = 0;
1150
elf_ex = *((struct elfhdr *) bprm->buf); /* exec-header */
1152
bswap_ehdr(&elf_ex);
1155
/* First of all, some simple consistency checks */
1156
if ((elf_ex.e_type != ET_EXEC && elf_ex.e_type != ET_DYN) ||
1157
(! elf_check_arch(elf_ex.e_machine))) {
1161
bprm->p = copy_elf_strings(1, &bprm->filename, bprm->page, bprm->p);
1162
bprm->p = copy_elf_strings(bprm->envc,bprm->envp,bprm->page,bprm->p);
1163
bprm->p = copy_elf_strings(bprm->argc,bprm->argv,bprm->page,bprm->p);
1168
/* Now read in all of the header information */
1169
elf_phdata = (struct elf_phdr *)malloc(elf_ex.e_phentsize*elf_ex.e_phnum);
1170
if (elf_phdata == NULL) {
1174
retval = lseek(bprm->fd, elf_ex.e_phoff, SEEK_SET);
1176
retval = read(bprm->fd, (char *) elf_phdata,
1177
elf_ex.e_phentsize * elf_ex.e_phnum);
1181
perror("load_elf_binary");
1188
elf_ppnt = elf_phdata;
1189
for (i=0; i<elf_ex.e_phnum; i++, elf_ppnt++) {
1190
bswap_phdr(elf_ppnt);
1193
elf_ppnt = elf_phdata;
1199
elf_stack = ~((abi_ulong)0UL);
1200
elf_interpreter = NULL;
1201
start_code = ~((abi_ulong)0UL);
1205
interp_ex.a_info = 0;
1207
for(i=0;i < elf_ex.e_phnum; i++) {
1208
if (elf_ppnt->p_type == PT_INTERP) {
1209
if ( elf_interpreter != NULL )
1212
free(elf_interpreter);
1217
/* This is the program interpreter used for
1218
* shared libraries - for now assume that this
1219
* is an a.out format binary
1222
elf_interpreter = (char *)malloc(elf_ppnt->p_filesz);
1224
if (elf_interpreter == NULL) {
1230
retval = lseek(bprm->fd, elf_ppnt->p_offset, SEEK_SET);
1232
retval = read(bprm->fd, elf_interpreter, elf_ppnt->p_filesz);
1235
perror("load_elf_binary2");
1239
/* If the program interpreter is one of these two,
1240
then assume an iBCS2 image. Otherwise assume
1241
a native linux image. */
1243
/* JRP - Need to add X86 lib dir stuff here... */
1245
if (strcmp(elf_interpreter,"/usr/lib/libc.so.1") == 0 ||
1246
strcmp(elf_interpreter,"/usr/lib/ld.so.1") == 0) {
1247
ibcs2_interpreter = 1;
1251
printf("Using ELF interpreter %s\n", elf_interpreter);
1254
retval = open(path(elf_interpreter), O_RDONLY);
1256
interpreter_fd = retval;
1259
perror(elf_interpreter);
1261
/* retval = -errno; */
1266
retval = lseek(interpreter_fd, 0, SEEK_SET);
1268
retval = read(interpreter_fd,bprm->buf,128);
1272
interp_ex = *((struct exec *) bprm->buf); /* aout exec-header */
1273
interp_elf_ex=*((struct elfhdr *) bprm->buf); /* elf exec-header */
1276
perror("load_elf_binary3");
1279
free(elf_interpreter);
1287
/* Some simple consistency checks for the interpreter */
1288
if (elf_interpreter){
1289
interpreter_type = INTERPRETER_ELF | INTERPRETER_AOUT;
1291
/* Now figure out which format our binary is */
1292
if ((N_MAGIC(interp_ex) != OMAGIC) && (N_MAGIC(interp_ex) != ZMAGIC) &&
1293
(N_MAGIC(interp_ex) != QMAGIC)) {
1294
interpreter_type = INTERPRETER_ELF;
1297
if (interp_elf_ex.e_ident[0] != 0x7f ||
1298
strncmp(&interp_elf_ex.e_ident[1], "ELF",3) != 0) {
1299
interpreter_type &= ~INTERPRETER_ELF;
1302
if (!interpreter_type) {
1303
free(elf_interpreter);
1310
/* OK, we are done with that, now set up the arg stuff,
1311
and then start this sucker up */
1316
if (interpreter_type == INTERPRETER_AOUT) {
1317
snprintf(passed_fileno, sizeof(passed_fileno), "%d", bprm->fd);
1318
passed_p = passed_fileno;
1320
if (elf_interpreter) {
1321
bprm->p = copy_elf_strings(1,&passed_p,bprm->page,bprm->p);
1326
if (elf_interpreter) {
1327
free(elf_interpreter);
1335
/* OK, This is the point of no return */
1338
info->start_mmap = (abi_ulong)ELF_START_MMAP;
1340
elf_entry = (abi_ulong) elf_ex.e_entry;
1342
/* Do this so that we can load the interpreter, if need be. We will
1343
change some of these later */
1345
bprm->p = setup_arg_pages(bprm->p, bprm, info);
1346
info->start_stack = bprm->p;
1348
/* Now we do a little grungy work by mmaping the ELF image into
1349
* the correct location in memory. At this point, we assume that
1350
* the image should be loaded at fixed address, not at a variable
1354
for(i = 0, elf_ppnt = elf_phdata; i < elf_ex.e_phnum; i++, elf_ppnt++) {
1359
if (elf_ppnt->p_type != PT_LOAD)
1362
if (elf_ppnt->p_flags & PF_R) elf_prot |= PROT_READ;
1363
if (elf_ppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
1364
if (elf_ppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
1365
elf_flags = MAP_PRIVATE | MAP_DENYWRITE;
1366
if (elf_ex.e_type == ET_EXEC || load_addr_set) {
1367
elf_flags |= MAP_FIXED;
1368
} else if (elf_ex.e_type == ET_DYN) {
1369
/* Try and get dynamic programs out of the way of the default mmap
1370
base, as well as whatever program they might try to exec. This
1371
is because the brk will follow the loader, and is not movable. */
1372
/* NOTE: for qemu, we do a big mmap to get enough space
1373
without hardcoding any address */
1374
error = target_mmap(0, ET_DYN_MAP_SIZE,
1375
PROT_NONE, MAP_PRIVATE | MAP_ANON,
1381
load_bias = TARGET_ELF_PAGESTART(error - elf_ppnt->p_vaddr);
1384
error = target_mmap(TARGET_ELF_PAGESTART(load_bias + elf_ppnt->p_vaddr),
1385
(elf_ppnt->p_filesz +
1386
TARGET_ELF_PAGEOFFSET(elf_ppnt->p_vaddr)),
1388
(MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE),
1390
(elf_ppnt->p_offset -
1391
TARGET_ELF_PAGEOFFSET(elf_ppnt->p_vaddr)));
1397
#ifdef LOW_ELF_STACK
1398
if (TARGET_ELF_PAGESTART(elf_ppnt->p_vaddr) < elf_stack)
1399
elf_stack = TARGET_ELF_PAGESTART(elf_ppnt->p_vaddr);
1402
if (!load_addr_set) {
1404
load_addr = elf_ppnt->p_vaddr - elf_ppnt->p_offset;
1405
if (elf_ex.e_type == ET_DYN) {
1406
load_bias += error -
1407
TARGET_ELF_PAGESTART(load_bias + elf_ppnt->p_vaddr);
1408
load_addr += load_bias;
1409
reloc_func_desc = load_bias;
1412
k = elf_ppnt->p_vaddr;
1417
k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
1420
if ((elf_ppnt->p_flags & PF_X) && end_code < k)
1424
k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
1425
if (k > elf_brk) elf_brk = k;
1428
elf_entry += load_bias;
1429
elf_bss += load_bias;
1430
elf_brk += load_bias;
1431
start_code += load_bias;
1432
end_code += load_bias;
1433
start_data += load_bias;
1434
end_data += load_bias;
1436
if (elf_interpreter) {
1437
if (interpreter_type & 1) {
1438
elf_entry = load_aout_interp(&interp_ex, interpreter_fd);
1440
else if (interpreter_type & 2) {
1441
elf_entry = load_elf_interp(&interp_elf_ex, interpreter_fd,
1444
reloc_func_desc = interp_load_addr;
1446
close(interpreter_fd);
1447
free(elf_interpreter);
1449
if (elf_entry == ~((abi_ulong)0UL)) {
1450
printf("Unable to load interpreter\n");
1460
load_symbols(&elf_ex, bprm->fd);
1462
if (interpreter_type != INTERPRETER_AOUT) close(bprm->fd);
1463
info->personality = (ibcs2_interpreter ? PER_SVR4 : PER_LINUX);
1465
#ifdef LOW_ELF_STACK
1466
info->start_stack = bprm->p = elf_stack - 4;
1468
bprm->p = create_elf_tables(bprm->p,
1472
load_addr, load_bias,
1474
(interpreter_type == INTERPRETER_AOUT ? 0 : 1),
1476
info->load_addr = reloc_func_desc;
1477
info->start_brk = info->brk = elf_brk;
1478
info->end_code = end_code;
1479
info->start_code = start_code;
1480
info->start_data = start_data;
1481
info->end_data = end_data;
1482
info->start_stack = bprm->p;
1484
/* Calling set_brk effectively mmaps the pages that we need for the bss and break
1486
set_brk(elf_bss, elf_brk);
1488
padzero(elf_bss, elf_brk);
1491
printf("(start_brk) %x\n" , info->start_brk);
1492
printf("(end_code) %x\n" , info->end_code);
1493
printf("(start_code) %x\n" , info->start_code);
1494
printf("(end_data) %x\n" , info->end_data);
1495
printf("(start_stack) %x\n" , info->start_stack);
1496
printf("(brk) %x\n" , info->brk);
1499
if ( info->personality == PER_SVR4 )
1501
/* Why this, you ask??? Well SVr4 maps page 0 as read-only,
1502
and some applications "depend" upon this behavior.
1503
Since we do not have the power to recompile these, we
1504
emulate the SVr4 behavior. Sigh. */
1505
mapped_addr = target_mmap(0, qemu_host_page_size, PROT_READ | PROT_EXEC,
1506
MAP_FIXED | MAP_PRIVATE, -1, 0);
1509
info->entry = elf_entry;
1514
static int load_aout_interp(void * exptr, int interp_fd)
1516
printf("a.out interpreter not yet supported\n");
1520
void do_init_thread(struct target_pt_regs *regs, struct image_info *infop)
1522
init_thread(regs, infop);