2
* Blackfin performance counters
4
* Copyright 2011 Analog Devices Inc.
6
* Ripped from SuperH version:
8
* Copyright (C) 2009 Paul Mundt
10
* Heavily based on the x86 and PowerPC implementations.
13
* Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
14
* Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
15
* Copyright (C) 2009 Jaswinder Singh Rajput
16
* Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
17
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
18
* Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
21
* Copyright 2008-2009 Paul Mackerras, IBM Corporation.
23
* Licensed under the GPL-2 or later.
26
#include <linux/kernel.h>
27
#include <linux/export.h>
28
#include <linux/init.h>
29
#include <linux/perf_event.h>
30
#include <asm/bfin_pfmon.h>
33
* We have two counters, and each counter can support an event type.
34
* The 'o' is PFCNTx=1 and 's' is PFCNTx=0
36
* 0x04 o pc invariant branches
37
* 0x06 o mispredicted branches
38
* 0x09 o predicted branches taken
40
* 0x0C o CSYNC/SSYNC insn
41
* 0x0D o Insns committed
42
* 0x0E o Interrupts taken
43
* 0x0F o Misaligned address exceptions
44
* 0x80 o Code memory fetches stalled due to DMA
45
* 0x83 o 64bit insn fetches delivered
46
* 0x9A o data cache fills (bank a)
47
* 0x9B o data cache fills (bank b)
48
* 0x9C o data cache lines evicted (bank a)
49
* 0x9D o data cache lines evicted (bank b)
50
* 0x9E o data cache high priority fills
51
* 0x9F o data cache low priority fills
52
* 0x00 s loop 0 iterations
53
* 0x01 s loop 1 iterations
54
* 0x0A s CSYNC/SSYNC stalls
55
* 0x10 s DAG read/after write hazards
56
* 0x13 s RAW data hazards
57
* 0x81 s code TAG stalls
58
* 0x82 s code fill stalls
59
* 0x90 s processor to memory stalls
60
* 0x91 s data memory stalls not hidden by 0x90
61
* 0x92 s data store buffer full stalls
62
* 0x93 s data memory write buffer full stalls due to high->low priority
63
* 0x95 s data memory fill buffer stalls
64
* 0x96 s data TAG collision stalls
65
* 0x97 s data collision stalls
67
* 0x99 s data stalls sent to processor
70
static const int event_map[] = {
71
/* use CYCLES cpu register */
72
[PERF_COUNT_HW_CPU_CYCLES] = -1,
73
[PERF_COUNT_HW_INSTRUCTIONS] = 0x0D,
74
[PERF_COUNT_HW_CACHE_REFERENCES] = -1,
75
[PERF_COUNT_HW_CACHE_MISSES] = 0x83,
76
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x09,
77
[PERF_COUNT_HW_BRANCH_MISSES] = 0x06,
78
[PERF_COUNT_HW_BUS_CYCLES] = -1,
81
#define C(x) PERF_COUNT_HW_CACHE_##x
83
static const int cache_events[PERF_COUNT_HW_CACHE_MAX]
84
[PERF_COUNT_HW_CACHE_OP_MAX]
85
[PERF_COUNT_HW_CACHE_RESULT_MAX] =
87
[C(L1D)] = { /* Data bank A */
89
[C(RESULT_ACCESS)] = 0,
90
[C(RESULT_MISS) ] = 0x9A,
93
[C(RESULT_ACCESS)] = 0,
94
[C(RESULT_MISS) ] = 0,
97
[C(RESULT_ACCESS)] = 0,
98
[C(RESULT_MISS) ] = 0,
104
[C(RESULT_ACCESS)] = 0,
105
[C(RESULT_MISS) ] = 0x83,
108
[C(RESULT_ACCESS)] = -1,
109
[C(RESULT_MISS) ] = -1,
112
[C(RESULT_ACCESS)] = 0,
113
[C(RESULT_MISS) ] = 0,
119
[C(RESULT_ACCESS)] = -1,
120
[C(RESULT_MISS) ] = -1,
123
[C(RESULT_ACCESS)] = -1,
124
[C(RESULT_MISS) ] = -1,
127
[C(RESULT_ACCESS)] = -1,
128
[C(RESULT_MISS) ] = -1,
134
[C(RESULT_ACCESS)] = -1,
135
[C(RESULT_MISS) ] = -1,
138
[C(RESULT_ACCESS)] = -1,
139
[C(RESULT_MISS) ] = -1,
142
[C(RESULT_ACCESS)] = -1,
143
[C(RESULT_MISS) ] = -1,
149
[C(RESULT_ACCESS)] = -1,
150
[C(RESULT_MISS) ] = -1,
153
[C(RESULT_ACCESS)] = -1,
154
[C(RESULT_MISS) ] = -1,
157
[C(RESULT_ACCESS)] = -1,
158
[C(RESULT_MISS) ] = -1,
164
[C(RESULT_ACCESS)] = -1,
165
[C(RESULT_MISS) ] = -1,
168
[C(RESULT_ACCESS)] = -1,
169
[C(RESULT_MISS) ] = -1,
172
[C(RESULT_ACCESS)] = -1,
173
[C(RESULT_MISS) ] = -1,
178
const char *perf_pmu_name(void)
182
EXPORT_SYMBOL(perf_pmu_name);
184
int perf_num_counters(void)
186
return ARRAY_SIZE(event_map);
188
EXPORT_SYMBOL(perf_num_counters);
190
static u64 bfin_pfmon_read(int idx)
192
return bfin_read32(PFCNTR0 + (idx * 4));
195
static void bfin_pfmon_disable(struct hw_perf_event *hwc, int idx)
197
bfin_write_PFCTL(bfin_read_PFCTL() & ~PFCEN(idx, PFCEN_MASK));
200
static void bfin_pfmon_enable(struct hw_perf_event *hwc, int idx)
206
mask = ~(PFCNT1 | PFMON1 | PFCEN1 | PEMUSW1);
207
/* The packed config is for event0, so shift it to event1 slots */
208
val |= (hwc->config << (PFMON1_P - PFMON0_P));
209
val |= (hwc->config & PFCNT0) << (PFCNT1_P - PFCNT0_P);
210
bfin_write_PFCNTR1(0);
212
mask = ~(PFCNT0 | PFMON0 | PFCEN0 | PEMUSW0);
214
bfin_write_PFCNTR0(0);
217
bfin_write_PFCTL((bfin_read_PFCTL() & mask) | val);
220
static void bfin_pfmon_disable_all(void)
222
bfin_write_PFCTL(bfin_read_PFCTL() & ~PFPWR);
225
static void bfin_pfmon_enable_all(void)
227
bfin_write_PFCTL(bfin_read_PFCTL() | PFPWR);
230
struct cpu_hw_events {
231
struct perf_event *events[MAX_HWEVENTS];
232
unsigned long used_mask[BITS_TO_LONGS(MAX_HWEVENTS)];
234
DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
236
static int hw_perf_cache_event(int config, int *evp)
238
unsigned long type, op, result;
242
type = config & 0xff;
243
op = (config >> 8) & 0xff;
244
result = (config >> 16) & 0xff;
246
if (type >= PERF_COUNT_HW_CACHE_MAX ||
247
op >= PERF_COUNT_HW_CACHE_OP_MAX ||
248
result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
251
ev = cache_events[type][op][result];
260
static void bfin_perf_event_update(struct perf_event *event,
261
struct hw_perf_event *hwc, int idx)
263
u64 prev_raw_count, new_raw_count;
268
* Depending on the counter configuration, they may or may not
269
* be chained, in which case the previous counter value can be
270
* updated underneath us if the lower-half overflows.
272
* Our tactic to handle this is to first atomically read and
273
* exchange a new raw count - then add that new-prev delta
274
* count to the generic counter atomically.
276
* As there is no interrupt associated with the overflow events,
277
* this is the simplest approach for maintaining consistency.
280
prev_raw_count = local64_read(&hwc->prev_count);
281
new_raw_count = bfin_pfmon_read(idx);
283
if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
284
new_raw_count) != prev_raw_count)
288
* Now we have the new raw value and have updated the prev
289
* timestamp already. We can now calculate the elapsed delta
290
* (counter-)time and add that to the generic counter.
292
* Careful, not all hw sign-extends above the physical width
295
delta = (new_raw_count << shift) - (prev_raw_count << shift);
298
local64_add(delta, &event->count);
301
static void bfin_pmu_stop(struct perf_event *event, int flags)
303
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
304
struct hw_perf_event *hwc = &event->hw;
307
if (!(event->hw.state & PERF_HES_STOPPED)) {
308
bfin_pfmon_disable(hwc, idx);
309
cpuc->events[idx] = NULL;
310
event->hw.state |= PERF_HES_STOPPED;
313
if ((flags & PERF_EF_UPDATE) && !(event->hw.state & PERF_HES_UPTODATE)) {
314
bfin_perf_event_update(event, &event->hw, idx);
315
event->hw.state |= PERF_HES_UPTODATE;
319
static void bfin_pmu_start(struct perf_event *event, int flags)
321
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
322
struct hw_perf_event *hwc = &event->hw;
325
if (WARN_ON_ONCE(idx == -1))
328
if (flags & PERF_EF_RELOAD)
329
WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
331
cpuc->events[idx] = event;
333
bfin_pfmon_enable(hwc, idx);
336
static void bfin_pmu_del(struct perf_event *event, int flags)
338
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
340
bfin_pmu_stop(event, PERF_EF_UPDATE);
341
__clear_bit(event->hw.idx, cpuc->used_mask);
343
perf_event_update_userpage(event);
346
static int bfin_pmu_add(struct perf_event *event, int flags)
348
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
349
struct hw_perf_event *hwc = &event->hw;
353
perf_pmu_disable(event->pmu);
355
if (__test_and_set_bit(idx, cpuc->used_mask)) {
356
idx = find_first_zero_bit(cpuc->used_mask, MAX_HWEVENTS);
357
if (idx == MAX_HWEVENTS)
360
__set_bit(idx, cpuc->used_mask);
364
bfin_pfmon_disable(hwc, idx);
366
event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
367
if (flags & PERF_EF_START)
368
bfin_pmu_start(event, PERF_EF_RELOAD);
370
perf_event_update_userpage(event);
373
perf_pmu_enable(event->pmu);
377
static void bfin_pmu_read(struct perf_event *event)
379
bfin_perf_event_update(event, &event->hw, event->hw.idx);
382
static int bfin_pmu_event_init(struct perf_event *event)
384
struct perf_event_attr *attr = &event->attr;
385
struct hw_perf_event *hwc = &event->hw;
389
if (attr->exclude_hv || attr->exclude_idle)
393
* All of the on-chip counters are "limited", in that they have
394
* no interrupts, and are therefore unable to do sampling without
395
* further work and timer assistance.
397
if (hwc->sample_period)
401
switch (attr->type) {
403
config = PFMON(0, attr->config & PFMON_MASK) |
404
PFCNT(0, !(attr->config & 0x100));
406
case PERF_TYPE_HW_CACHE:
407
ret = hw_perf_cache_event(attr->config, &config);
409
case PERF_TYPE_HARDWARE:
410
if (attr->config >= ARRAY_SIZE(event_map))
413
config = event_map[attr->config];
420
if (!attr->exclude_kernel)
421
config |= PFCEN(0, PFCEN_ENABLE_SUPV);
422
if (!attr->exclude_user)
423
config |= PFCEN(0, PFCEN_ENABLE_USER);
425
hwc->config |= config;
430
static void bfin_pmu_enable(struct pmu *pmu)
432
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
433
struct perf_event *event;
434
struct hw_perf_event *hwc;
437
for (i = 0; i < MAX_HWEVENTS; ++i) {
438
event = cpuc->events[i];
442
bfin_pfmon_enable(hwc, hwc->idx);
445
bfin_pfmon_enable_all();
448
static void bfin_pmu_disable(struct pmu *pmu)
450
bfin_pfmon_disable_all();
453
static struct pmu pmu = {
454
.pmu_enable = bfin_pmu_enable,
455
.pmu_disable = bfin_pmu_disable,
456
.event_init = bfin_pmu_event_init,
459
.start = bfin_pmu_start,
460
.stop = bfin_pmu_stop,
461
.read = bfin_pmu_read,
464
static void bfin_pmu_setup(int cpu)
466
struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
468
memset(cpuhw, 0, sizeof(struct cpu_hw_events));
472
bfin_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
474
unsigned int cpu = (long)hcpu;
476
switch (action & ~CPU_TASKS_FROZEN) {
489
static int __init bfin_pmu_init(void)
493
ret = perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
495
perf_cpu_notifier(bfin_pmu_notifier);
499
early_initcall(bfin_pmu_init);