2
* This program is free software; you can redistribute it and/or modify
3
* it under the terms of the GNU General Public License as published by
4
* the Free Software Foundation; either version 2 of the License, or
5
* (at your option) any later version.
7
* This program is distributed in the hope that it will be useful,
8
* but WITHOUT ANY WARRANTY; without even the implied warranty of
9
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10
* GNU General Public License for more details.
12
* You should have received a copy of the GNU General Public License
13
* along with this program; if not, write to the Free Software
14
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16
* Copyright (C) 2007 Alan Stern
17
* Copyright (C) IBM Corporation, 2009
18
* Copyright (C) 2009, Frederic Weisbecker <fweisbec@gmail.com>
20
* Thanks to Ingo Molnar for his many suggestions.
22
* Authors: Alan Stern <stern@rowland.harvard.edu>
23
* K.Prasad <prasad@linux.vnet.ibm.com>
24
* Frederic Weisbecker <fweisbec@gmail.com>
28
* HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
29
* using the CPU's debug registers.
30
* This file contains the arch-independent routines.
33
#include <linux/irqflags.h>
34
#include <linux/kallsyms.h>
35
#include <linux/notifier.h>
36
#include <linux/kprobes.h>
37
#include <linux/kdebug.h>
38
#include <linux/kernel.h>
39
#include <linux/module.h>
40
#include <linux/percpu.h>
41
#include <linux/sched.h>
42
#include <linux/init.h>
43
#include <linux/slab.h>
44
#include <linux/list.h>
45
#include <linux/cpu.h>
46
#include <linux/smp.h>
48
#include <linux/hw_breakpoint.h>
55
/* Number of pinned cpu breakpoints in a cpu */
56
static DEFINE_PER_CPU(unsigned int, nr_cpu_bp_pinned[TYPE_MAX]);
58
/* Number of pinned task breakpoints in a cpu */
59
static DEFINE_PER_CPU(unsigned int *, nr_task_bp_pinned[TYPE_MAX]);
61
/* Number of non-pinned cpu/task breakpoints in a cpu */
62
static DEFINE_PER_CPU(unsigned int, nr_bp_flexible[TYPE_MAX]);
64
static int nr_slots[TYPE_MAX];
66
/* Keep track of the breakpoints attached to tasks */
67
static LIST_HEAD(bp_task_head);
69
static int constraints_initialized;
71
/* Gather the number of total pinned and un-pinned bp in a cpuset */
72
struct bp_busy_slots {
74
unsigned int flexible;
77
/* Serialize accesses to the above constraints */
78
static DEFINE_MUTEX(nr_bp_mutex);
80
__weak int hw_breakpoint_weight(struct perf_event *bp)
85
static inline enum bp_type_idx find_slot_idx(struct perf_event *bp)
87
if (bp->attr.bp_type & HW_BREAKPOINT_RW)
94
* Report the maximum number of pinned breakpoints a task
97
static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type)
100
unsigned int *tsk_pinned = per_cpu(nr_task_bp_pinned[type], cpu);
102
for (i = nr_slots[type] - 1; i >= 0; i--) {
103
if (tsk_pinned[i] > 0)
111
* Count the number of breakpoints of the same type and same task.
112
* The given event must be not on the list.
114
static int task_bp_pinned(struct perf_event *bp, enum bp_type_idx type)
116
struct task_struct *tsk = bp->hw.bp_target;
117
struct perf_event *iter;
120
list_for_each_entry(iter, &bp_task_head, hw.bp_list) {
121
if (iter->hw.bp_target == tsk && find_slot_idx(iter) == type)
122
count += hw_breakpoint_weight(iter);
129
* Report the number of pinned/un-pinned breakpoints we have in
130
* a given cpu (cpu > -1) or in all of them (cpu = -1).
133
fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp,
134
enum bp_type_idx type)
137
struct task_struct *tsk = bp->hw.bp_target;
140
slots->pinned = per_cpu(nr_cpu_bp_pinned[type], cpu);
142
slots->pinned += max_task_bp_pinned(cpu, type);
144
slots->pinned += task_bp_pinned(bp, type);
145
slots->flexible = per_cpu(nr_bp_flexible[type], cpu);
150
for_each_online_cpu(cpu) {
153
nr = per_cpu(nr_cpu_bp_pinned[type], cpu);
155
nr += max_task_bp_pinned(cpu, type);
157
nr += task_bp_pinned(bp, type);
159
if (nr > slots->pinned)
162
nr = per_cpu(nr_bp_flexible[type], cpu);
164
if (nr > slots->flexible)
165
slots->flexible = nr;
170
* For now, continue to consider flexible as pinned, until we can
171
* ensure no flexible event can ever be scheduled before a pinned event
175
fetch_this_slot(struct bp_busy_slots *slots, int weight)
177
slots->pinned += weight;
181
* Add a pinned breakpoint for the given task in our constraint table
183
static void toggle_bp_task_slot(struct perf_event *bp, int cpu, bool enable,
184
enum bp_type_idx type, int weight)
186
unsigned int *tsk_pinned;
191
old_count = task_bp_pinned(bp, type);
192
old_idx = old_count - 1;
193
idx = old_idx + weight;
195
/* tsk_pinned[n] is the number of tasks having n breakpoints */
196
tsk_pinned = per_cpu(nr_task_bp_pinned[type], cpu);
200
tsk_pinned[old_idx]--;
204
tsk_pinned[old_idx]++;
209
* Add/remove the given breakpoint in our constraint table
212
toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type,
216
struct task_struct *tsk = bp->hw.bp_target;
218
/* Pinned counter cpu profiling */
222
per_cpu(nr_cpu_bp_pinned[type], bp->cpu) += weight;
224
per_cpu(nr_cpu_bp_pinned[type], bp->cpu) -= weight;
228
/* Pinned counter task profiling */
231
list_del(&bp->hw.bp_list);
234
toggle_bp_task_slot(bp, cpu, enable, type, weight);
236
for_each_online_cpu(cpu)
237
toggle_bp_task_slot(bp, cpu, enable, type, weight);
241
list_add_tail(&bp->hw.bp_list, &bp_task_head);
245
* Function to perform processor-specific cleanup during unregistration
247
__weak void arch_unregister_hw_breakpoint(struct perf_event *bp)
250
* A weak stub function here for those archs that don't define
251
* it inside arch/.../kernel/hw_breakpoint.c
256
* Contraints to check before allowing this new breakpoint counter:
258
* == Non-pinned counter == (Considered as pinned for now)
260
* - If attached to a single cpu, check:
262
* (per_cpu(nr_bp_flexible, cpu) || (per_cpu(nr_cpu_bp_pinned, cpu)
263
* + max(per_cpu(nr_task_bp_pinned, cpu)))) < HBP_NUM
265
* -> If there are already non-pinned counters in this cpu, it means
266
* there is already a free slot for them.
267
* Otherwise, we check that the maximum number of per task
268
* breakpoints (for this cpu) plus the number of per cpu breakpoint
269
* (for this cpu) doesn't cover every registers.
271
* - If attached to every cpus, check:
273
* (per_cpu(nr_bp_flexible, *) || (max(per_cpu(nr_cpu_bp_pinned, *))
274
* + max(per_cpu(nr_task_bp_pinned, *)))) < HBP_NUM
276
* -> This is roughly the same, except we check the number of per cpu
277
* bp for every cpu and we keep the max one. Same for the per tasks
281
* == Pinned counter ==
283
* - If attached to a single cpu, check:
285
* ((per_cpu(nr_bp_flexible, cpu) > 1) + per_cpu(nr_cpu_bp_pinned, cpu)
286
* + max(per_cpu(nr_task_bp_pinned, cpu))) < HBP_NUM
288
* -> Same checks as before. But now the nr_bp_flexible, if any, must keep
289
* one register at least (or they will never be fed).
291
* - If attached to every cpus, check:
293
* ((per_cpu(nr_bp_flexible, *) > 1) + max(per_cpu(nr_cpu_bp_pinned, *))
294
* + max(per_cpu(nr_task_bp_pinned, *))) < HBP_NUM
296
static int __reserve_bp_slot(struct perf_event *bp)
298
struct bp_busy_slots slots = {0};
299
enum bp_type_idx type;
302
/* We couldn't initialize breakpoint constraints on boot */
303
if (!constraints_initialized)
307
if (bp->attr.bp_type == HW_BREAKPOINT_EMPTY ||
308
bp->attr.bp_type == HW_BREAKPOINT_INVALID)
311
type = find_slot_idx(bp);
312
weight = hw_breakpoint_weight(bp);
314
fetch_bp_busy_slots(&slots, bp, type);
316
* Simulate the addition of this breakpoint to the constraints
317
* and see the result.
319
fetch_this_slot(&slots, weight);
321
/* Flexible counters need to keep at least one slot */
322
if (slots.pinned + (!!slots.flexible) > nr_slots[type])
325
toggle_bp_slot(bp, true, type, weight);
330
int reserve_bp_slot(struct perf_event *bp)
334
mutex_lock(&nr_bp_mutex);
336
ret = __reserve_bp_slot(bp);
338
mutex_unlock(&nr_bp_mutex);
343
static void __release_bp_slot(struct perf_event *bp)
345
enum bp_type_idx type;
348
type = find_slot_idx(bp);
349
weight = hw_breakpoint_weight(bp);
350
toggle_bp_slot(bp, false, type, weight);
353
void release_bp_slot(struct perf_event *bp)
355
mutex_lock(&nr_bp_mutex);
357
arch_unregister_hw_breakpoint(bp);
358
__release_bp_slot(bp);
360
mutex_unlock(&nr_bp_mutex);
364
* Allow the kernel debugger to reserve breakpoint slots without
365
* taking a lock using the dbg_* variant of for the reserve and
366
* release breakpoint slots.
368
int dbg_reserve_bp_slot(struct perf_event *bp)
370
if (mutex_is_locked(&nr_bp_mutex))
373
return __reserve_bp_slot(bp);
376
int dbg_release_bp_slot(struct perf_event *bp)
378
if (mutex_is_locked(&nr_bp_mutex))
381
__release_bp_slot(bp);
386
static int validate_hw_breakpoint(struct perf_event *bp)
390
ret = arch_validate_hwbkpt_settings(bp);
394
if (arch_check_bp_in_kernelspace(bp)) {
395
if (bp->attr.exclude_kernel)
398
* Don't let unprivileged users set a breakpoint in the trap
399
* path to avoid trap recursion attacks.
401
if (!capable(CAP_SYS_ADMIN))
408
int register_perf_hw_breakpoint(struct perf_event *bp)
412
ret = reserve_bp_slot(bp);
416
ret = validate_hw_breakpoint(bp);
418
/* if arch_validate_hwbkpt_settings() fails then release bp slot */
426
* register_user_hw_breakpoint - register a hardware breakpoint for user space
427
* @attr: breakpoint attributes
428
* @triggered: callback to trigger when we hit the breakpoint
429
* @tsk: pointer to 'task_struct' of the process to which the address belongs
432
register_user_hw_breakpoint(struct perf_event_attr *attr,
433
perf_overflow_handler_t triggered,
435
struct task_struct *tsk)
437
return perf_event_create_kernel_counter(attr, -1, tsk, triggered,
440
EXPORT_SYMBOL_GPL(register_user_hw_breakpoint);
443
* modify_user_hw_breakpoint - modify a user-space hardware breakpoint
444
* @bp: the breakpoint structure to modify
445
* @attr: new breakpoint attributes
446
* @triggered: callback to trigger when we hit the breakpoint
447
* @tsk: pointer to 'task_struct' of the process to which the address belongs
449
int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr)
451
u64 old_addr = bp->attr.bp_addr;
452
u64 old_len = bp->attr.bp_len;
453
int old_type = bp->attr.bp_type;
456
perf_event_disable(bp);
458
bp->attr.bp_addr = attr->bp_addr;
459
bp->attr.bp_type = attr->bp_type;
460
bp->attr.bp_len = attr->bp_len;
465
err = validate_hw_breakpoint(bp);
467
perf_event_enable(bp);
470
bp->attr.bp_addr = old_addr;
471
bp->attr.bp_type = old_type;
472
bp->attr.bp_len = old_len;
473
if (!bp->attr.disabled)
474
perf_event_enable(bp);
480
bp->attr.disabled = attr->disabled;
484
EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint);
487
* unregister_hw_breakpoint - unregister a user-space hardware breakpoint
488
* @bp: the breakpoint structure to unregister
490
void unregister_hw_breakpoint(struct perf_event *bp)
494
perf_event_release_kernel(bp);
496
EXPORT_SYMBOL_GPL(unregister_hw_breakpoint);
499
* register_wide_hw_breakpoint - register a wide breakpoint in the kernel
500
* @attr: breakpoint attributes
501
* @triggered: callback to trigger when we hit the breakpoint
503
* @return a set of per_cpu pointers to perf events
505
struct perf_event * __percpu *
506
register_wide_hw_breakpoint(struct perf_event_attr *attr,
507
perf_overflow_handler_t triggered,
510
struct perf_event * __percpu *cpu_events, **pevent, *bp;
514
cpu_events = alloc_percpu(typeof(*cpu_events));
516
return (void __percpu __force *)ERR_PTR(-ENOMEM);
519
for_each_online_cpu(cpu) {
520
pevent = per_cpu_ptr(cpu_events, cpu);
521
bp = perf_event_create_kernel_counter(attr, cpu, NULL,
536
for_each_online_cpu(cpu) {
537
pevent = per_cpu_ptr(cpu_events, cpu);
540
unregister_hw_breakpoint(*pevent);
544
free_percpu(cpu_events);
545
return (void __percpu __force *)ERR_PTR(err);
547
EXPORT_SYMBOL_GPL(register_wide_hw_breakpoint);
550
* unregister_wide_hw_breakpoint - unregister a wide breakpoint in the kernel
551
* @cpu_events: the per cpu set of events to unregister
553
void unregister_wide_hw_breakpoint(struct perf_event * __percpu *cpu_events)
556
struct perf_event **pevent;
558
for_each_possible_cpu(cpu) {
559
pevent = per_cpu_ptr(cpu_events, cpu);
560
unregister_hw_breakpoint(*pevent);
562
free_percpu(cpu_events);
564
EXPORT_SYMBOL_GPL(unregister_wide_hw_breakpoint);
566
static struct notifier_block hw_breakpoint_exceptions_nb = {
567
.notifier_call = hw_breakpoint_exceptions_notify,
568
/* we need to be notified first */
569
.priority = 0x7fffffff
572
static void bp_perf_event_destroy(struct perf_event *event)
574
release_bp_slot(event);
577
static int hw_breakpoint_event_init(struct perf_event *bp)
581
if (bp->attr.type != PERF_TYPE_BREAKPOINT)
584
err = register_perf_hw_breakpoint(bp);
588
bp->destroy = bp_perf_event_destroy;
593
static int hw_breakpoint_add(struct perf_event *bp, int flags)
595
if (!(flags & PERF_EF_START))
596
bp->hw.state = PERF_HES_STOPPED;
598
return arch_install_hw_breakpoint(bp);
601
static void hw_breakpoint_del(struct perf_event *bp, int flags)
603
arch_uninstall_hw_breakpoint(bp);
606
static void hw_breakpoint_start(struct perf_event *bp, int flags)
611
static void hw_breakpoint_stop(struct perf_event *bp, int flags)
613
bp->hw.state = PERF_HES_STOPPED;
616
static struct pmu perf_breakpoint = {
617
.task_ctx_nr = perf_sw_context, /* could eventually get its own */
619
.event_init = hw_breakpoint_event_init,
620
.add = hw_breakpoint_add,
621
.del = hw_breakpoint_del,
622
.start = hw_breakpoint_start,
623
.stop = hw_breakpoint_stop,
624
.read = hw_breakpoint_pmu_read,
627
int __init init_hw_breakpoint(void)
629
unsigned int **task_bp_pinned;
633
for (i = 0; i < TYPE_MAX; i++)
634
nr_slots[i] = hw_breakpoint_slots(i);
636
for_each_possible_cpu(cpu) {
637
for (i = 0; i < TYPE_MAX; i++) {
638
task_bp_pinned = &per_cpu(nr_task_bp_pinned[i], cpu);
639
*task_bp_pinned = kzalloc(sizeof(int) * nr_slots[i],
641
if (!*task_bp_pinned)
646
constraints_initialized = 1;
648
perf_pmu_register(&perf_breakpoint, "breakpoint", PERF_TYPE_BREAKPOINT);
650
return register_die_notifier(&hw_breakpoint_exceptions_nb);
653
for_each_possible_cpu(err_cpu) {
656
for (i = 0; i < TYPE_MAX; i++)
657
kfree(per_cpu(nr_task_bp_pinned[i], cpu));