2
* Code for replacing ftrace calls with jumps.
4
* Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5
* Copyright (C) 2009, 2010 DSLab, Lanzhou University, China
6
* Author: Wu Zhangjin <wuzhangjin@gmail.com>
8
* Thanks goes to Steven Rostedt for writing the original x86 version.
11
#include <linux/uaccess.h>
12
#include <linux/init.h>
13
#include <linux/ftrace.h>
16
#include <asm/asm-offsets.h>
17
#include <asm/cacheflush.h>
20
#include <asm-generic/sections.h>
22
#if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT)
23
#define MCOUNT_OFFSET_INSNS 5
25
#define MCOUNT_OFFSET_INSNS 4
29
* Check if the address is in kernel space
31
* Clone core_kernel_text() from kernel/extable.c, but doesn't call
32
* init_kernel_text() for Ftrace doesn't trace functions in init sections.
34
static inline int in_kernel_space(unsigned long ip)
36
if (ip >= (unsigned long)_stext &&
37
ip <= (unsigned long)_etext)
42
#ifdef CONFIG_DYNAMIC_FTRACE
44
#define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */
45
#define ADDR_MASK 0x03ffffff /* op_code|addr : 31...26|25 ....0 */
46
#define JUMP_RANGE_MASK ((1UL << 28) - 1)
48
#define INSN_NOP 0x00000000 /* nop */
49
#define INSN_JAL(addr) \
50
((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK)))
52
static unsigned int insn_jal_ftrace_caller __read_mostly;
53
static unsigned int insn_lui_v1_hi16_mcount __read_mostly;
54
static unsigned int insn_j_ftrace_graph_caller __maybe_unused __read_mostly;
56
static inline void ftrace_dyn_arch_init_insns(void)
61
/* lui v1, hi16_mcount */
63
buf = (u32 *)&insn_lui_v1_hi16_mcount;
64
UASM_i_LA_mostly(&buf, v1, MCOUNT_ADDR);
66
/* jal (ftrace_caller + 8), jump over the first two instruction */
67
buf = (u32 *)&insn_jal_ftrace_caller;
68
uasm_i_jal(&buf, (FTRACE_ADDR + 8) & JUMP_RANGE_MASK);
70
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
71
/* j ftrace_graph_caller */
72
buf = (u32 *)&insn_j_ftrace_graph_caller;
73
uasm_i_j(&buf, (unsigned long)ftrace_graph_caller & JUMP_RANGE_MASK);
77
static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
81
/* *(unsigned int *)ip = new_code; */
82
safe_store_code(new_code, ip, faulted);
84
if (unlikely(faulted))
87
flush_icache_range(ip, ip + 8);
93
* The details about the calling site of mcount on MIPS
102
* 2.1 For KBUILD_MCOUNT_RA_ADDRESS and CONFIG_32BIT
104
* lui v1, hi_16bit_of_mcount --> b 1f (0x10000005)
105
* addiu v1, v1, low_16bit_of_mcount
107
* move $12, ra_address
110
* 1: offset = 5 instructions
111
* 2.2 For the Other situations
113
* lui v1, hi_16bit_of_mcount --> b 1f (0x10000004)
114
* addiu v1, v1, low_16bit_of_mcount
117
* nop | move $12, ra_address | sub sp, sp, 8
118
* 1: offset = 4 instructions
121
#define INSN_B_1F (0x10000000 | MCOUNT_OFFSET_INSNS)
123
int ftrace_make_nop(struct module *mod,
124
struct dyn_ftrace *rec, unsigned long addr)
127
unsigned long ip = rec->ip;
130
* If ip is in kernel space, no long call, otherwise, long call is
133
new = in_kernel_space(ip) ? INSN_NOP : INSN_B_1F;
135
return ftrace_modify_code(ip, new);
138
int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
141
unsigned long ip = rec->ip;
143
new = in_kernel_space(ip) ? insn_jal_ftrace_caller :
144
insn_lui_v1_hi16_mcount;
146
return ftrace_modify_code(ip, new);
149
#define FTRACE_CALL_IP ((unsigned long)(&ftrace_call))
151
int ftrace_update_ftrace_func(ftrace_func_t func)
155
new = INSN_JAL((unsigned long)func);
157
return ftrace_modify_code(FTRACE_CALL_IP, new);
160
int __init ftrace_dyn_arch_init(void *data)
162
/* Encode the instructions when booting */
163
ftrace_dyn_arch_init_insns();
165
/* Remove "b ftrace_stub" to ensure ftrace_caller() is executed */
166
ftrace_modify_code(MCOUNT_ADDR, INSN_NOP);
168
/* The return code is retured via data */
169
*(unsigned long *)data = 0;
173
#endif /* CONFIG_DYNAMIC_FTRACE */
175
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
177
#ifdef CONFIG_DYNAMIC_FTRACE
179
extern void ftrace_graph_call(void);
180
#define FTRACE_GRAPH_CALL_IP ((unsigned long)(&ftrace_graph_call))
182
int ftrace_enable_ftrace_graph_caller(void)
184
return ftrace_modify_code(FTRACE_GRAPH_CALL_IP,
185
insn_j_ftrace_graph_caller);
188
int ftrace_disable_ftrace_graph_caller(void)
190
return ftrace_modify_code(FTRACE_GRAPH_CALL_IP, INSN_NOP);
193
#endif /* CONFIG_DYNAMIC_FTRACE */
195
#ifndef KBUILD_MCOUNT_RA_ADDRESS
197
#define S_RA_SP (0xafbf << 16) /* s{d,w} ra, offset(sp) */
198
#define S_R_SP (0xafb0 << 16) /* s{d,w} R, offset(sp) */
199
#define OFFSET_MASK 0xffff /* stack offset range: 0 ~ PT_SIZE */
201
unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long
202
old_parent_ra, unsigned long parent_ra_addr, unsigned long fp)
204
unsigned long sp, ip, tmp;
209
* For module, move the ip from the return address after the
210
* instruction "lui v1, hi_16bit_of_mcount"(offset is 24), but for
211
* kernel, move after the instruction "move ra, at"(offset is 16)
213
ip = self_ra - (in_kernel_space(self_ra) ? 16 : 24);
216
* search the text until finding the non-store instruction or "s{d,w}
217
* ra, offset(sp)" instruction
220
/* get the code at "ip": code = *(unsigned int *)ip; */
221
safe_load_code(code, ip, faulted);
223
if (unlikely(faulted))
226
* If we hit the non-store instruction before finding where the
227
* ra is stored, then this is a leaf function and it does not
228
* store the ra on the stack
230
if ((code & S_R_SP) != S_R_SP)
231
return parent_ra_addr;
233
/* Move to the next instruction */
235
} while ((code & S_RA_SP) != S_RA_SP);
237
sp = fp + (code & OFFSET_MASK);
239
/* tmp = *(unsigned long *)sp; */
240
safe_load_stack(tmp, sp, faulted);
241
if (unlikely(faulted))
244
if (tmp == old_parent_ra)
249
#endif /* !KBUILD_MCOUNT_RA_ADDRESS */
252
* Hook the return address and push it in the stack of return addrs
253
* in current thread info.
255
void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra,
258
unsigned long old_parent_ra;
259
struct ftrace_graph_ent trace;
260
unsigned long return_hooker = (unsigned long)
264
if (unlikely(atomic_read(¤t->tracing_graph_pause)))
268
* "parent_ra_addr" is the stack address saved the return address of
269
* the caller of _mcount.
271
* if the gcc < 4.5, a leaf function does not save the return address
272
* in the stack address, so, we "emulate" one in _mcount's stack space,
273
* and hijack it directly, but for a non-leaf function, it save the
274
* return address to the its own stack space, we can not hijack it
275
* directly, but need to find the real stack address,
276
* ftrace_get_parent_addr() does it!
278
* if gcc>= 4.5, with the new -mmcount-ra-address option, for a
279
* non-leaf function, the location of the return address will be saved
280
* to $12 for us, and for a leaf function, only put a zero into $12. we
281
* do it in ftrace_graph_caller of mcount.S.
284
/* old_parent_ra = *parent_ra_addr; */
285
safe_load_stack(old_parent_ra, parent_ra_addr, faulted);
286
if (unlikely(faulted))
288
#ifndef KBUILD_MCOUNT_RA_ADDRESS
289
parent_ra_addr = (unsigned long *)ftrace_get_parent_ra_addr(self_ra,
290
old_parent_ra, (unsigned long)parent_ra_addr, fp);
292
* If fails when getting the stack address of the non-leaf function's
293
* ra, stop function graph tracer and return
295
if (parent_ra_addr == 0)
298
/* *parent_ra_addr = return_hooker; */
299
safe_store_stack(return_hooker, parent_ra_addr, faulted);
300
if (unlikely(faulted))
303
if (ftrace_push_return_trace(old_parent_ra, self_ra, &trace.depth, fp)
305
*parent_ra_addr = old_parent_ra;
310
* Get the recorded ip of the current mcount calling site in the
311
* __mcount_loc section, which will be used to filter the function
312
* entries configured through the tracing/set_graph_function interface.
315
insns = in_kernel_space(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1;
316
trace.func = self_ra - (MCOUNT_INSN_SIZE * insns);
318
/* Only trace if the calling function expects to */
319
if (!ftrace_graph_entry(&trace)) {
320
current->curr_ret_stack--;
321
*parent_ra_addr = old_parent_ra;
328
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */