1
#ifndef _LINUX_PTRACE_H
2
#define _LINUX_PTRACE_H
4
/* structs and defines to help the user use the ptrace system call. */
6
/* has the defines to get at the registers. */
8
#define PTRACE_TRACEME 0
9
#define PTRACE_PEEKTEXT 1
10
#define PTRACE_PEEKDATA 2
11
#define PTRACE_PEEKUSR 3
12
#define PTRACE_POKETEXT 4
13
#define PTRACE_POKEDATA 5
14
#define PTRACE_POKEUSR 6
17
#define PTRACE_SINGLESTEP 9
19
#define PTRACE_ATTACH 16
20
#define PTRACE_DETACH 17
22
#define PTRACE_SYSCALL 24
24
/* 0x4200-0x4300 are reserved for architecture-independent additions. */
25
#define PTRACE_SETOPTIONS 0x4200
26
#define PTRACE_GETEVENTMSG 0x4201
27
#define PTRACE_GETSIGINFO 0x4202
28
#define PTRACE_SETSIGINFO 0x4203
31
* Generic ptrace interface that exports the architecture specific regsets
32
* using the corresponding NT_* types (which are also used in the core dump).
33
* Please note that the NT_PRSTATUS note type in a core dump contains a full
34
* 'struct elf_prstatus'. But the user_regset for NT_PRSTATUS contains just the
35
* elf_gregset_t that is the pr_reg field of 'struct elf_prstatus'. For all the
36
* other user_regset flavors, the user_regset layout and the ELF core dump note
37
* payload are exactly the same layout.
39
* This interface usage is as follows:
40
* struct iovec iov = { buf, len};
42
* ret = ptrace(PTRACE_GETREGSET/PTRACE_SETREGSET, pid, NT_XXX_TYPE, &iov);
44
* On the successful completion, iov.len will be updated by the kernel,
45
* specifying how much the kernel has written/read to/from the user's iov.buf.
47
#define PTRACE_GETREGSET 0x4204
48
#define PTRACE_SETREGSET 0x4205
50
#define PTRACE_SEIZE 0x4206
51
#define PTRACE_INTERRUPT 0x4207
52
#define PTRACE_LISTEN 0x4208
54
/* flags in @data for PTRACE_SEIZE */
55
#define PTRACE_SEIZE_DEVEL 0x80000000 /* temp flag for development */
57
/* options set using PTRACE_SETOPTIONS */
58
#define PTRACE_O_TRACESYSGOOD 0x00000001
59
#define PTRACE_O_TRACEFORK 0x00000002
60
#define PTRACE_O_TRACEVFORK 0x00000004
61
#define PTRACE_O_TRACECLONE 0x00000008
62
#define PTRACE_O_TRACEEXEC 0x00000010
63
#define PTRACE_O_TRACEVFORKDONE 0x00000020
64
#define PTRACE_O_TRACEEXIT 0x00000040
66
#define PTRACE_O_MASK 0x0000007f
68
/* Wait extended result codes for the above trace options. */
69
#define PTRACE_EVENT_FORK 1
70
#define PTRACE_EVENT_VFORK 2
71
#define PTRACE_EVENT_CLONE 3
72
#define PTRACE_EVENT_EXEC 4
73
#define PTRACE_EVENT_VFORK_DONE 5
74
#define PTRACE_EVENT_EXIT 6
75
#define PTRACE_EVENT_STOP 7
77
#include <asm/ptrace.h>
83
* The owner ship rules for task->ptrace which holds the ptrace
84
* flags is simple. When a task is running it owns it's task->ptrace
85
* flags. When the a task is stopped the ptracer owns task->ptrace.
88
#define PT_SEIZED 0x00010000 /* SEIZE used, enable new behavior */
89
#define PT_PTRACED 0x00000001
90
#define PT_DTRACE 0x00000002 /* delayed trace (used on m68k, i386) */
91
#define PT_TRACESYSGOOD 0x00000004
92
#define PT_PTRACE_CAP 0x00000008 /* ptracer can follow suid-exec */
94
/* PT_TRACE_* event enable flags */
95
#define PT_EVENT_FLAG_SHIFT 4
96
#define PT_EVENT_FLAG(event) (1 << (PT_EVENT_FLAG_SHIFT + (event) - 1))
98
#define PT_TRACE_FORK PT_EVENT_FLAG(PTRACE_EVENT_FORK)
99
#define PT_TRACE_VFORK PT_EVENT_FLAG(PTRACE_EVENT_VFORK)
100
#define PT_TRACE_CLONE PT_EVENT_FLAG(PTRACE_EVENT_CLONE)
101
#define PT_TRACE_EXEC PT_EVENT_FLAG(PTRACE_EVENT_EXEC)
102
#define PT_TRACE_VFORK_DONE PT_EVENT_FLAG(PTRACE_EVENT_VFORK_DONE)
103
#define PT_TRACE_EXIT PT_EVENT_FLAG(PTRACE_EVENT_EXIT)
105
#define PT_TRACE_MASK 0x000003f4
107
/* single stepping state bits (used on ARM and PA-RISC) */
108
#define PT_SINGLESTEP_BIT 31
109
#define PT_SINGLESTEP (1<<PT_SINGLESTEP_BIT)
110
#define PT_BLOCKSTEP_BIT 30
111
#define PT_BLOCKSTEP (1<<PT_BLOCKSTEP_BIT)
113
#include <linux/compiler.h> /* For unlikely. */
114
#include <linux/sched.h> /* For struct task_struct. */
117
extern long arch_ptrace(struct task_struct *child, long request,
118
unsigned long addr, unsigned long data);
119
extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len);
120
extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len);
121
extern void ptrace_disable(struct task_struct *);
122
extern int ptrace_check_attach(struct task_struct *task, bool ignore_state);
123
extern int ptrace_request(struct task_struct *child, long request,
124
unsigned long addr, unsigned long data);
125
extern void ptrace_notify(int exit_code);
126
extern void __ptrace_link(struct task_struct *child,
127
struct task_struct *new_parent);
128
extern void __ptrace_unlink(struct task_struct *child);
129
extern void exit_ptrace(struct task_struct *tracer);
130
#define PTRACE_MODE_READ 1
131
#define PTRACE_MODE_ATTACH 2
132
/* Returns 0 on success, -errno on denial. */
133
extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
134
/* Returns true on success, false on denial. */
135
extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
137
static inline int ptrace_reparented(struct task_struct *child)
139
return !same_thread_group(child->real_parent, child->parent);
142
static inline void ptrace_unlink(struct task_struct *child)
144
if (unlikely(child->ptrace))
145
__ptrace_unlink(child);
148
int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
150
int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
154
* ptrace_parent - return the task that is tracing the given task
155
* @task: task to consider
157
* Returns %NULL if no one is tracing @task, or the &struct task_struct
158
* pointer to its tracer.
160
* Must called under rcu_read_lock(). The pointer returned might be kept
161
* live only by RCU. During exec, this may be called with task_lock() held
162
* on @task, still held from when check_unsafe_exec() was called.
164
static inline struct task_struct *ptrace_parent(struct task_struct *task)
166
if (unlikely(task->ptrace))
167
return rcu_dereference(task->parent);
172
* ptrace_event_enabled - test whether a ptrace event is enabled
173
* @task: ptracee of interest
174
* @event: %PTRACE_EVENT_* to test
176
* Test whether @event is enabled for ptracee @task.
178
* Returns %true if @event is enabled, %false otherwise.
180
static inline bool ptrace_event_enabled(struct task_struct *task, int event)
182
return task->ptrace & PT_EVENT_FLAG(event);
186
* ptrace_event - possibly stop for a ptrace event notification
187
* @event: %PTRACE_EVENT_* value to report
188
* @message: value for %PTRACE_GETEVENTMSG to return
190
* Check whether @event is enabled and, if so, report @event and @message
191
* to the ptrace parent.
193
* Called without locks.
195
static inline void ptrace_event(int event, unsigned long message)
197
if (unlikely(ptrace_event_enabled(current, event))) {
198
current->ptrace_message = message;
199
ptrace_notify((event << 8) | SIGTRAP);
200
} else if (event == PTRACE_EVENT_EXEC && unlikely(current->ptrace)) {
201
/* legacy EXEC report via SIGTRAP */
202
send_sig(SIGTRAP, current, 0);
207
* ptrace_init_task - initialize ptrace state for a new child
208
* @child: new child task
209
* @ptrace: true if child should be ptrace'd by parent's tracer
211
* This is called immediately after adding @child to its parent's children
212
* list. @ptrace is false in the normal case, and true to ptrace @child.
214
* Called with current's siglock and write_lock_irq(&tasklist_lock) held.
216
static inline void ptrace_init_task(struct task_struct *child, bool ptrace)
218
INIT_LIST_HEAD(&child->ptrace_entry);
219
INIT_LIST_HEAD(&child->ptraced);
220
#ifdef CONFIG_HAVE_HW_BREAKPOINT
221
atomic_set(&child->ptrace_bp_refcnt, 1);
225
child->parent = child->real_parent;
227
if (unlikely(ptrace) && current->ptrace) {
228
child->ptrace = current->ptrace;
229
__ptrace_link(child, current->parent);
231
if (child->ptrace & PT_SEIZED)
232
task_set_jobctl_pending(child, JOBCTL_TRAP_STOP);
234
sigaddset(&child->pending.signal, SIGSTOP);
236
set_tsk_thread_flag(child, TIF_SIGPENDING);
241
* ptrace_release_task - final ptrace-related cleanup of a zombie being reaped
242
* @task: task in %EXIT_DEAD state
244
* Called with write_lock(&tasklist_lock) held.
246
static inline void ptrace_release_task(struct task_struct *task)
248
BUG_ON(!list_empty(&task->ptraced));
250
BUG_ON(!list_empty(&task->ptrace_entry));
253
#ifndef force_successful_syscall_return
255
* System call handlers that, upon successful completion, need to return a
256
* negative value should call force_successful_syscall_return() right before
257
* returning. On architectures where the syscall convention provides for a
258
* separate error flag (e.g., alpha, ia64, ppc{,64}, sparc{,64}, possibly
259
* others), this macro can be used to ensure that the error flag will not get
260
* set. On architectures which do not support a separate error flag, the macro
261
* is a no-op and the spurious error condition needs to be filtered out by some
262
* other means (e.g., in user-level, by passing an extra argument to the
263
* syscall handler, or something along those lines).
265
#define force_successful_syscall_return() do { } while (0)
269
* <asm/ptrace.h> should define the following things inside #ifdef __KERNEL__.
271
* These do-nothing inlines are used when the arch does not
272
* implement single-step. The kerneldoc comments are here
273
* to document the interface for all arch definitions.
276
#ifndef arch_has_single_step
278
* arch_has_single_step - does this CPU support user-mode single-step?
280
* If this is defined, then there must be function declarations or
281
* inlines for user_enable_single_step() and user_disable_single_step().
282
* arch_has_single_step() should evaluate to nonzero iff the machine
283
* supports instruction single-step for user mode.
284
* It can be a constant or it can test a CPU feature bit.
286
#define arch_has_single_step() (0)
289
* user_enable_single_step - single-step in user-mode task
290
* @task: either current or a task stopped in %TASK_TRACED
292
* This can only be called when arch_has_single_step() has returned nonzero.
293
* Set @task so that when it returns to user mode, it will trap after the
294
* next single instruction executes. If arch_has_block_step() is defined,
295
* this must clear the effects of user_enable_block_step() too.
297
static inline void user_enable_single_step(struct task_struct *task)
299
BUG(); /* This can never be called. */
303
* user_disable_single_step - cancel user-mode single-step
304
* @task: either current or a task stopped in %TASK_TRACED
306
* Clear @task of the effects of user_enable_single_step() and
307
* user_enable_block_step(). This can be called whether or not either
308
* of those was ever called on @task, and even if arch_has_single_step()
311
static inline void user_disable_single_step(struct task_struct *task)
315
extern void user_enable_single_step(struct task_struct *);
316
extern void user_disable_single_step(struct task_struct *);
317
#endif /* arch_has_single_step */
319
#ifndef arch_has_block_step
321
* arch_has_block_step - does this CPU support user-mode block-step?
323
* If this is defined, then there must be a function declaration or inline
324
* for user_enable_block_step(), and arch_has_single_step() must be defined
325
* too. arch_has_block_step() should evaluate to nonzero iff the machine
326
* supports step-until-branch for user mode. It can be a constant or it
327
* can test a CPU feature bit.
329
#define arch_has_block_step() (0)
332
* user_enable_block_step - step until branch in user-mode task
333
* @task: either current or a task stopped in %TASK_TRACED
335
* This can only be called when arch_has_block_step() has returned nonzero,
336
* and will never be called when single-instruction stepping is being used.
337
* Set @task so that when it returns to user mode, it will trap after the
338
* next branch or trap taken.
340
static inline void user_enable_block_step(struct task_struct *task)
342
BUG(); /* This can never be called. */
345
extern void user_enable_block_step(struct task_struct *);
346
#endif /* arch_has_block_step */
348
#ifdef ARCH_HAS_USER_SINGLE_STEP_INFO
349
extern void user_single_step_siginfo(struct task_struct *tsk,
350
struct pt_regs *regs, siginfo_t *info);
352
static inline void user_single_step_siginfo(struct task_struct *tsk,
353
struct pt_regs *regs, siginfo_t *info)
355
memset(info, 0, sizeof(*info));
356
info->si_signo = SIGTRAP;
360
#ifndef arch_ptrace_stop_needed
362
* arch_ptrace_stop_needed - Decide whether arch_ptrace_stop() should be called
363
* @code: current->exit_code value ptrace will stop with
364
* @info: siginfo_t pointer (or %NULL) for signal ptrace will stop with
366
* This is called with the siglock held, to decide whether or not it's
367
* necessary to release the siglock and call arch_ptrace_stop() with the
368
* same @code and @info arguments. It can be defined to a constant if
369
* arch_ptrace_stop() is never required, or always is. On machines where
370
* this makes sense, it should be defined to a quick test to optimize out
371
* calling arch_ptrace_stop() when it would be superfluous. For example,
372
* if the thread has not been back to user mode since the last stop, the
373
* thread state might indicate that nothing needs to be done.
375
#define arch_ptrace_stop_needed(code, info) (0)
378
#ifndef arch_ptrace_stop
380
* arch_ptrace_stop - Do machine-specific work before stopping for ptrace
381
* @code: current->exit_code value ptrace will stop with
382
* @info: siginfo_t pointer (or %NULL) for signal ptrace will stop with
384
* This is called with no locks held when arch_ptrace_stop_needed() has
385
* just returned nonzero. It is allowed to block, e.g. for user memory
386
* access. The arch can have machine-specific work to be done before
387
* ptrace stops. On ia64, register backing store gets written back to user
388
* memory here. Since this can be costly (requires dropping the siglock),
389
* we only do it when the arch requires it for this particular stop, as
390
* indicated by arch_ptrace_stop_needed().
392
#define arch_ptrace_stop(code, info) do { } while (0)
395
extern int task_current_syscall(struct task_struct *target, long *callno,
396
unsigned long args[6], unsigned int maxargs,
397
unsigned long *sp, unsigned long *pc);
399
#ifdef CONFIG_HAVE_HW_BREAKPOINT
400
extern int ptrace_get_breakpoints(struct task_struct *tsk);
401
extern void ptrace_put_breakpoints(struct task_struct *tsk);
403
static inline void ptrace_put_breakpoints(struct task_struct *tsk) { }
404
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
406
#endif /* __KERNEL */