280
void RuntimeDyldELF::resolveAArch64Relocation(const SectionEntry &Section,
285
uint32_t *TargetPtr = reinterpret_cast<uint32_t*>(Section.Address + Offset);
286
uint64_t FinalAddress = Section.LoadAddress + Offset;
288
DEBUG(dbgs() << "resolveAArch64Relocation, LocalAddress: 0x"
289
<< format("%llx", Section.Address + Offset)
290
<< " FinalAddress: 0x" << format("%llx",FinalAddress)
291
<< " Value: 0x" << format("%llx",Value)
292
<< " Type: 0x" << format("%x",Type)
293
<< " Addend: 0x" << format("%llx",Addend)
298
llvm_unreachable("Relocation type not implemented yet!");
300
case ELF::R_AARCH64_ABS64: {
301
uint64_t *TargetPtr = reinterpret_cast<uint64_t*>(Section.Address + Offset);
302
*TargetPtr = Value + Addend;
305
case ELF::R_AARCH64_PREL32: {
306
uint64_t Result = Value + Addend - FinalAddress;
307
assert(static_cast<int64_t>(Result) >= INT32_MIN &&
308
static_cast<int64_t>(Result) <= UINT32_MAX);
309
*TargetPtr = static_cast<uint32_t>(Result & 0xffffffffU);
312
case ELF::R_AARCH64_CALL26: // fallthrough
313
case ELF::R_AARCH64_JUMP26: {
314
// Operation: S+A-P. Set Call or B immediate value to bits fff_fffc of the
316
uint64_t BranchImm = Value + Addend - FinalAddress;
318
// "Check that -2^27 <= result < 2^27".
319
assert(-(1LL << 27) <= static_cast<int64_t>(BranchImm) &&
320
static_cast<int64_t>(BranchImm) < (1LL << 27));
322
// AArch64 code is emitted with .rela relocations. The data already in any
323
// bits affected by the relocation on entry is garbage.
324
*TargetPtr &= 0xfc000000U;
325
// Immediate goes in bits 25:0 of B and BL.
326
*TargetPtr |= static_cast<uint32_t>(BranchImm & 0xffffffcU) >> 2;
329
case ELF::R_AARCH64_MOVW_UABS_G3: {
330
uint64_t Result = Value + Addend;
332
// AArch64 code is emitted with .rela relocations. The data already in any
333
// bits affected by the relocation on entry is garbage.
334
*TargetPtr &= 0xff80001fU;
335
// Immediate goes in bits 20:5 of MOVZ/MOVK instruction
336
*TargetPtr |= Result >> (48 - 5);
337
// Shift is "lsl #48", in bits 22:21
338
*TargetPtr |= 3 << 21;
341
case ELF::R_AARCH64_MOVW_UABS_G2_NC: {
342
uint64_t Result = Value + Addend;
345
// AArch64 code is emitted with .rela relocations. The data already in any
346
// bits affected by the relocation on entry is garbage.
347
*TargetPtr &= 0xff80001fU;
348
// Immediate goes in bits 20:5 of MOVZ/MOVK instruction
349
*TargetPtr |= ((Result & 0xffff00000000ULL) >> (32 - 5));
350
// Shift is "lsl #32", in bits 22:21
351
*TargetPtr |= 2 << 21;
354
case ELF::R_AARCH64_MOVW_UABS_G1_NC: {
355
uint64_t Result = Value + Addend;
357
// AArch64 code is emitted with .rela relocations. The data already in any
358
// bits affected by the relocation on entry is garbage.
359
*TargetPtr &= 0xff80001fU;
360
// Immediate goes in bits 20:5 of MOVZ/MOVK instruction
361
*TargetPtr |= ((Result & 0xffff0000U) >> (16 - 5));
362
// Shift is "lsl #16", in bits 22:21
363
*TargetPtr |= 1 << 21;
366
case ELF::R_AARCH64_MOVW_UABS_G0_NC: {
367
uint64_t Result = Value + Addend;
369
// AArch64 code is emitted with .rela relocations. The data already in any
370
// bits affected by the relocation on entry is garbage.
371
*TargetPtr &= 0xff80001fU;
372
// Immediate goes in bits 20:5 of MOVZ/MOVK instruction
373
*TargetPtr |= ((Result & 0xffffU) << 5);
374
// Shift is "lsl #0", in bits 22:21. No action needed.
380
// FIXME: PR16013: this routine needs modification to handle repeated relocations.
272
381
void RuntimeDyldELF::resolveARMRelocation(const SectionEntry &Section,
677
void RuntimeDyldELF::resolveSystemZRelocation(const SectionEntry &Section,
682
uint8_t *LocalAddress = Section.Address + Offset;
685
llvm_unreachable("Relocation type not implemented yet!");
687
case ELF::R_390_PC16DBL:
688
case ELF::R_390_PLT16DBL: {
689
int64_t Delta = (Value + Addend) - (Section.LoadAddress + Offset);
690
assert(int16_t(Delta / 2) * 2 == Delta && "R_390_PC16DBL overflow");
691
writeInt16BE(LocalAddress, Delta / 2);
694
case ELF::R_390_PC32DBL:
695
case ELF::R_390_PLT32DBL: {
696
int64_t Delta = (Value + Addend) - (Section.LoadAddress + Offset);
697
assert(int32_t(Delta / 2) * 2 == Delta && "R_390_PC32DBL overflow");
698
writeInt32BE(LocalAddress, Delta / 2);
701
case ELF::R_390_PC32: {
702
int64_t Delta = (Value + Addend) - (Section.LoadAddress + Offset);
703
assert(int32_t(Delta) == Delta && "R_390_PC32 overflow");
704
writeInt32BE(LocalAddress, Delta);
708
writeInt64BE(LocalAddress, Value + Addend);
713
void RuntimeDyldELF::resolveRelocation(const RelocationEntry &RE,
715
const SectionEntry &Section = Sections[RE.SectionID];
716
return resolveRelocation(Section, RE.Offset, Value, RE.RelType, RE.Addend);
563
719
void RuntimeDyldELF::resolveRelocation(const SectionEntry &Section,
589
748
case Triple::ppc64:
590
749
resolvePPC64Relocation(Section, Offset, Value, Type, Addend);
751
case Triple::systemz:
752
resolveSystemZRelocation(Section, Offset, Value, Type, Addend);
592
754
default: llvm_unreachable("Unsupported CPU type!");
596
void RuntimeDyldELF::processRelocationRef(const ObjRelocationInfo &Rel,
758
void RuntimeDyldELF::processRelocationRef(unsigned SectionID,
597
760
ObjectImage &Obj,
598
761
ObjSectionToIDMap &ObjSectionToID,
599
762
const SymbolTableMap &Symbols,
600
763
StubMap &Stubs) {
602
uint32_t RelType = (uint32_t)(Rel.Type & 0xffffffffL);
603
intptr_t Addend = (intptr_t)Rel.AdditionalInfo;
604
const SymbolRef &Symbol = Rel.Symbol;
765
Check(RelI.getType(RelType));
767
Check(getELFRelocationAddend(RelI, Addend));
769
Check(RelI.getSymbol(Symbol));
606
771
// Obtain the symbol name which is referenced in the relocation
607
772
StringRef TargetName;
660
DEBUG(dbgs() << "\t\tRel.SectionID: " << Rel.SectionID
661
<< " Rel.Offset: " << Rel.Offset
826
Check(RelI.getOffset(Offset));
828
DEBUG(dbgs() << "\t\tSectionID: " << SectionID
829
<< " Offset: " << Offset
663
if (Arch == Triple::arm &&
831
if (Arch == Triple::aarch64 &&
832
(RelType == ELF::R_AARCH64_CALL26 ||
833
RelType == ELF::R_AARCH64_JUMP26)) {
834
// This is an AArch64 branch relocation, need to use a stub function.
835
DEBUG(dbgs() << "\t\tThis is an AArch64 branch relocation.");
836
SectionEntry &Section = Sections[SectionID];
838
// Look for an existing stub.
839
StubMap::const_iterator i = Stubs.find(Value);
840
if (i != Stubs.end()) {
841
resolveRelocation(Section, Offset,
842
(uint64_t)Section.Address + i->second, RelType, 0);
843
DEBUG(dbgs() << " Stub function found\n");
845
// Create a new stub function.
846
DEBUG(dbgs() << " Create a new stub function\n");
847
Stubs[Value] = Section.StubOffset;
848
uint8_t *StubTargetAddr = createStubFunction(Section.Address +
851
RelocationEntry REmovz_g3(SectionID,
852
StubTargetAddr - Section.Address,
853
ELF::R_AARCH64_MOVW_UABS_G3, Value.Addend);
854
RelocationEntry REmovk_g2(SectionID,
855
StubTargetAddr - Section.Address + 4,
856
ELF::R_AARCH64_MOVW_UABS_G2_NC, Value.Addend);
857
RelocationEntry REmovk_g1(SectionID,
858
StubTargetAddr - Section.Address + 8,
859
ELF::R_AARCH64_MOVW_UABS_G1_NC, Value.Addend);
860
RelocationEntry REmovk_g0(SectionID,
861
StubTargetAddr - Section.Address + 12,
862
ELF::R_AARCH64_MOVW_UABS_G0_NC, Value.Addend);
864
if (Value.SymbolName) {
865
addRelocationForSymbol(REmovz_g3, Value.SymbolName);
866
addRelocationForSymbol(REmovk_g2, Value.SymbolName);
867
addRelocationForSymbol(REmovk_g1, Value.SymbolName);
868
addRelocationForSymbol(REmovk_g0, Value.SymbolName);
870
addRelocationForSection(REmovz_g3, Value.SectionID);
871
addRelocationForSection(REmovk_g2, Value.SectionID);
872
addRelocationForSection(REmovk_g1, Value.SectionID);
873
addRelocationForSection(REmovk_g0, Value.SectionID);
875
resolveRelocation(Section, Offset,
876
(uint64_t)Section.Address + Section.StubOffset,
878
Section.StubOffset += getMaxStubSize();
880
} else if (Arch == Triple::arm &&
664
881
(RelType == ELF::R_ARM_PC24 ||
665
882
RelType == ELF::R_ARM_CALL ||
666
883
RelType == ELF::R_ARM_JUMP24)) {
667
884
// This is an ARM branch relocation, need to use a stub function.
668
885
DEBUG(dbgs() << "\t\tThis is an ARM branch relocation.");
669
SectionEntry &Section = Sections[Rel.SectionID];
886
SectionEntry &Section = Sections[SectionID];
671
888
// Look for an existing stub.
672
889
StubMap::const_iterator i = Stubs.find(Value);
673
890
if (i != Stubs.end()) {
674
resolveRelocation(Section, Rel.Offset,
891
resolveRelocation(Section, Offset,
675
892
(uint64_t)Section.Address + i->second, RelType, 0);
676
893
DEBUG(dbgs() << " Stub function found\n");
680
897
Stubs[Value] = Section.StubOffset;
681
898
uint8_t *StubTargetAddr = createStubFunction(Section.Address +
682
899
Section.StubOffset);
683
RelocationEntry RE(Rel.SectionID, StubTargetAddr - Section.Address,
900
RelocationEntry RE(SectionID, StubTargetAddr - Section.Address,
684
901
ELF::R_ARM_ABS32, Value.Addend);
685
902
if (Value.SymbolName)
686
903
addRelocationForSymbol(RE, Value.SymbolName);
688
905
addRelocationForSection(RE, Value.SectionID);
690
resolveRelocation(Section, Rel.Offset,
907
resolveRelocation(Section, Offset,
691
908
(uint64_t)Section.Address + Section.StubOffset,
693
910
Section.StubOffset += getMaxStubSize();
719
936
Section.StubOffset);
721
938
// Creating Hi and Lo relocations for the filled stub instructions.
722
RelocationEntry REHi(Rel.SectionID,
939
RelocationEntry REHi(SectionID,
723
940
StubTargetAddr - Section.Address,
724
941
ELF::R_MIPS_HI16, Value.Addend);
725
RelocationEntry RELo(Rel.SectionID,
942
RelocationEntry RELo(SectionID,
726
943
StubTargetAddr - Section.Address + 4,
727
944
ELF::R_MIPS_LO16, Value.Addend);
779
996
Stubs[Value] = Section.StubOffset;
780
997
uint8_t *StubTargetAddr = createStubFunction(Section.Address +
781
998
Section.StubOffset);
782
RelocationEntry RE(Rel.SectionID, StubTargetAddr - Section.Address,
999
RelocationEntry RE(SectionID, StubTargetAddr - Section.Address,
783
1000
ELF::R_PPC64_ADDR64, Value.Addend);
785
1002
// Generates the 64-bits address loads as exemplified in section
786
1003
// 4.5.1 in PPC64 ELF ABI.
787
RelocationEntry REhst(Rel.SectionID,
1004
RelocationEntry REhst(SectionID,
788
1005
StubTargetAddr - Section.Address + 2,
789
1006
ELF::R_PPC64_ADDR16_HIGHEST, Value.Addend);
790
RelocationEntry REhr(Rel.SectionID,
1007
RelocationEntry REhr(SectionID,
791
1008
StubTargetAddr - Section.Address + 6,
792
1009
ELF::R_PPC64_ADDR16_HIGHER, Value.Addend);
793
RelocationEntry REh(Rel.SectionID,
1010
RelocationEntry REh(SectionID,
794
1011
StubTargetAddr - Section.Address + 14,
795
1012
ELF::R_PPC64_ADDR16_HI, Value.Addend);
796
RelocationEntry REl(Rel.SectionID,
1013
RelocationEntry REl(SectionID,
797
1014
StubTargetAddr - Section.Address + 18,
798
1015
ELF::R_PPC64_ADDR16_LO, Value.Addend);
828
1045
addRelocationForSection(RE, Value.SectionID);
1047
} else if (Arch == Triple::systemz &&
1048
(RelType == ELF::R_390_PLT32DBL ||
1049
RelType == ELF::R_390_GOTENT)) {
1050
// Create function stubs for both PLT and GOT references, regardless of
1051
// whether the GOT reference is to data or code. The stub contains the
1052
// full address of the symbol, as needed by GOT references, and the
1053
// executable part only adds an overhead of 8 bytes.
1055
// We could try to conserve space by allocating the code and data
1056
// parts of the stub separately. However, as things stand, we allocate
1057
// a stub for every relocation, so using a GOT in JIT code should be
1058
// no less space efficient than using an explicit constant pool.
1059
DEBUG(dbgs() << "\t\tThis is a SystemZ indirect relocation.");
1060
SectionEntry &Section = Sections[SectionID];
1062
// Look for an existing stub.
1063
StubMap::const_iterator i = Stubs.find(Value);
1064
uintptr_t StubAddress;
1065
if (i != Stubs.end()) {
1066
StubAddress = uintptr_t(Section.Address) + i->second;
1067
DEBUG(dbgs() << " Stub function found\n");
1069
// Create a new stub function.
1070
DEBUG(dbgs() << " Create a new stub function\n");
1072
uintptr_t BaseAddress = uintptr_t(Section.Address);
1073
uintptr_t StubAlignment = getStubAlignment();
1074
StubAddress = (BaseAddress + Section.StubOffset +
1075
StubAlignment - 1) & -StubAlignment;
1076
unsigned StubOffset = StubAddress - BaseAddress;
1078
Stubs[Value] = StubOffset;
1079
createStubFunction((uint8_t *)StubAddress);
1080
RelocationEntry RE(SectionID, StubOffset + 8,
1081
ELF::R_390_64, Value.Addend - Addend);
1082
if (Value.SymbolName)
1083
addRelocationForSymbol(RE, Value.SymbolName);
1085
addRelocationForSection(RE, Value.SectionID);
1086
Section.StubOffset = StubOffset + getMaxStubSize();
1089
if (RelType == ELF::R_390_GOTENT)
1090
resolveRelocation(Section, Offset, StubAddress + 8,
1091
ELF::R_390_PC32DBL, Addend);
1093
resolveRelocation(Section, Offset, StubAddress, RelType, Addend);
831
RelocationEntry RE(Rel.SectionID, Rel.Offset, RelType, Value.Addend);
1095
RelocationEntry RE(SectionID, Offset, RelType, Value.Addend);
832
1096
if (Value.SymbolName)
833
1097
addRelocationForSymbol(RE, Value.SymbolName);