7
#include "zasm_GOOS_GOARCH.h"
7
9
#include "../../cmd/ld/textflag.h"
11
// The following thunks allow calling the gcc-compiled race runtime directly
12
// from Go code without going all the way through cgo.
13
// First, it's much faster (up to 50% speedup for real Go programs).
14
// Second, it eliminates race-related special cases from cgocall and scheduler.
15
// Third, in long-term it will allow to remove cyclic runtime/race dependency on cmd/go.
17
// A brief recap of the amd64 calling convention.
18
// Arguments are passed in DI, SI, DX, CX, R8, R9, the rest is on stack.
19
// Callee-saved registers are: BX, BP, R12-R15.
20
// SP must be 16-byte aligned.
22
// Arguments are passed in CX, DX, R8, R9, the rest is on stack.
23
// Callee-saved registers are: BX, BP, DI, SI, R12-R15.
24
// SP must be 16-byte aligned. Windows also requires "stack-backing" for the 4 register arguments:
25
// http://msdn.microsoft.com/en-us/library/ms235286.aspx
26
// We do not do this, because it seems to be intended for vararg/unprototyped functions.
27
// Gcc-compiled race runtime does not try to use that space.
41
// func runtime·raceread(addr uintptr)
42
// Called from instrumented code.
43
TEXT runtime·raceread(SB), NOSPLIT, $0-8
44
MOVQ addr+0(FP), RARG1
46
// void __tsan_read(ThreadState *thr, void *addr, void *pc);
47
MOVQ $__tsan_read(SB), AX
48
JMP racecalladdr<>(SB)
50
// func runtime·RaceRead(addr uintptr)
51
TEXT runtime·RaceRead(SB), NOSPLIT, $0-8
52
// This needs to be a tail call, because raceread reads caller pc.
53
JMP runtime·raceread(SB)
55
// void runtime·racereadpc(void *addr, void *callpc, void *pc)
56
TEXT runtime·racereadpc(SB), NOSPLIT, $0-24
57
MOVQ addr+0(FP), RARG1
58
MOVQ callpc+8(FP), RARG2
60
// void __tsan_read_pc(ThreadState *thr, void *addr, void *callpc, void *pc);
61
MOVQ $__tsan_read_pc(SB), AX
62
JMP racecalladdr<>(SB)
64
// func runtime·racewrite(addr uintptr)
65
// Called from instrumented code.
66
TEXT runtime·racewrite(SB), NOSPLIT, $0-8
67
MOVQ addr+0(FP), RARG1
69
// void __tsan_write(ThreadState *thr, void *addr, void *pc);
70
MOVQ $__tsan_write(SB), AX
71
JMP racecalladdr<>(SB)
73
// func runtime·RaceWrite(addr uintptr)
74
TEXT runtime·RaceWrite(SB), NOSPLIT, $0-8
75
// This needs to be a tail call, because racewrite reads caller pc.
76
JMP runtime·racewrite(SB)
78
// void runtime·racewritepc(void *addr, void *callpc, void *pc)
79
TEXT runtime·racewritepc(SB), NOSPLIT, $0-24
80
MOVQ addr+0(FP), RARG1
81
MOVQ callpc+8(FP), RARG2
83
// void __tsan_write_pc(ThreadState *thr, void *addr, void *callpc, void *pc);
84
MOVQ $__tsan_write_pc(SB), AX
85
JMP racecalladdr<>(SB)
87
// func runtime·racereadrange(addr, size uintptr)
88
// Called from instrumented code.
89
TEXT runtime·racereadrange(SB), NOSPLIT, $0-16
90
MOVQ addr+0(FP), RARG1
91
MOVQ size+8(FP), RARG2
93
// void __tsan_read_range(ThreadState *thr, void *addr, uintptr size, void *pc);
94
MOVQ $__tsan_read_range(SB), AX
95
JMP racecalladdr<>(SB)
97
// func runtime·RaceReadRange(addr, size uintptr)
98
TEXT runtime·RaceReadRange(SB), NOSPLIT, $0-16
99
// This needs to be a tail call, because racereadrange reads caller pc.
100
JMP runtime·racereadrange(SB)
102
// void runtime·racereadrangepc1(void *addr, uintptr sz, void *pc)
103
TEXT runtime·racereadrangepc1(SB), NOSPLIT, $0-24
104
MOVQ addr+0(FP), RARG1
105
MOVQ size+8(FP), RARG2
106
MOVQ pc+16(FP), RARG3
107
// void __tsan_read_range(ThreadState *thr, void *addr, uintptr size, void *pc);
108
MOVQ $__tsan_read_range(SB), AX
109
JMP racecalladdr<>(SB)
111
// func runtime·racewriterange(addr, size uintptr)
112
// Called from instrumented code.
113
TEXT runtime·racewriterange(SB), NOSPLIT, $0-16
114
MOVQ addr+0(FP), RARG1
115
MOVQ size+8(FP), RARG2
117
// void __tsan_write_range(ThreadState *thr, void *addr, uintptr size, void *pc);
118
MOVQ $__tsan_write_range(SB), AX
119
JMP racecalladdr<>(SB)
121
// func runtime·RaceWriteRange(addr, size uintptr)
122
TEXT runtime·RaceWriteRange(SB), NOSPLIT, $0-16
123
// This needs to be a tail call, because racewriterange reads caller pc.
124
JMP runtime·racewriterange(SB)
126
// void runtime·racewriterangepc1(void *addr, uintptr sz, void *pc)
127
TEXT runtime·racewriterangepc1(SB), NOSPLIT, $0-24
128
MOVQ addr+0(FP), RARG1
129
MOVQ size+8(FP), RARG2
130
MOVQ pc+16(FP), RARG3
131
// void __tsan_write_range(ThreadState *thr, void *addr, uintptr size, void *pc);
132
MOVQ $__tsan_write_range(SB), AX
133
JMP racecalladdr<>(SB)
135
// If addr (RARG1) is out of range, do nothing.
136
// Otherwise, setup goroutine context and invoke racecall. Other arguments already set.
137
TEXT racecalladdr<>(SB), NOSPLIT, $0-0
140
MOVQ g_racectx(R14), RARG0 // goroutine context
141
// Check that addr is within [arenastart, arenaend) or within [noptrdata, enoptrbss).
142
CMPQ RARG1, runtime·racearenastart(SB)
144
CMPQ RARG1, runtime·racearenaend(SB)
147
CMPQ RARG1, $noptrdata(SB)
149
CMPQ RARG1, $enoptrbss(SB)
152
MOVQ AX, AX // w/o this 6a miscompiles this function
9
157
// func runtime·racefuncenter(pc uintptr)
10
TEXT runtime·racefuncenter(SB), NOSPLIT, $16-8
11
MOVQ DX, saved-8(SP) // save function entry context (for closures)
14
CALL runtime·racefuncenter1(SB)
158
// Called from instrumented code.
159
TEXT runtime·racefuncenter(SB), NOSPLIT, $0-8
160
MOVQ DX, R15 // save function entry context (for closures)
163
MOVQ g_racectx(R14), RARG0 // goroutine context
164
MOVQ callpc+0(FP), RARG1
165
// void __tsan_func_enter(ThreadState *thr, void *pc);
166
MOVQ $__tsan_func_enter(SB), AX
168
MOVQ R15, DX // restore function entry context
171
// func runtime·racefuncexit()
172
// Called from instrumented code.
173
TEXT runtime·racefuncexit(SB), NOSPLIT, $0-0
176
MOVQ g_racectx(R14), RARG0 // goroutine context
177
// void __tsan_func_exit(ThreadState *thr);
178
MOVQ $__tsan_func_exit(SB), AX
181
// void runtime·racecall(void(*f)(...), ...)
182
// Calls C function f from race runtime and passes up to 4 arguments to it.
183
// The arguments are never heap-object-preserving pointers, so we pretend there are no arguments.
184
TEXT runtime·racecall(SB), NOSPLIT, $0-0
186
MOVQ arg0+8(FP), RARG0
187
MOVQ arg1+16(FP), RARG1
188
MOVQ arg2+24(FP), RARG2
189
MOVQ arg3+32(FP), RARG3
192
// Switches SP to g0 stack and calls (AX). Arguments already set.
193
TEXT racecall<>(SB), NOSPLIT, $0-0
197
// Switch to g0 stack.
198
MOVQ SP, R12 // callee-saved, preserved across the CALL
201
JE racecall_cont // already on g0
202
MOVQ (g_sched+gobuf_sp)(R10), SP
204
ANDQ $~15, SP // alignment for gcc ABI
209
// C->Go callback thunk that allows to call runtime·racesymbolize from C code.
210
// Direct Go->C race call has only switched SP, finish g->g0 switch by setting correct g.
211
// The overall effect of Go->C->Go call chain is similar to that of mcall.
212
TEXT runtime·racesymbolizethunk(SB), NOSPLIT, $56-8
213
// Save callee-saved registers (Go code won't respect that).
214
// This is superset of darwin/linux/windows registers.
227
MOVQ R14, g(R12) // g = m->g0
228
MOVQ RARG0, 0(SP) // func arg
229
CALL runtime·racesymbolize(SB)
230
// All registers are smashed after Go code, reload.
233
MOVQ m_curg(R13), R14
234
MOVQ R14, g(R12) // g = m->curg
235
// Restore callee-saved registers.