1
#include <linux/perf_event.h>
2
#include <linux/types.h>
3
#include <linux/init.h>
4
#include <linux/slab.h>
5
#include <asm/apicdef.h>
7
#include "perf_event.h"
9
static __initconst const u64 amd_hw_cache_event_ids
10
[PERF_COUNT_HW_CACHE_MAX]
11
[PERF_COUNT_HW_CACHE_OP_MAX]
12
[PERF_COUNT_HW_CACHE_RESULT_MAX] =
16
[ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
17
[ C(RESULT_MISS) ] = 0x0141, /* Data Cache Misses */
20
[ C(RESULT_ACCESS) ] = 0x0142, /* Data Cache Refills :system */
21
[ C(RESULT_MISS) ] = 0,
23
[ C(OP_PREFETCH) ] = {
24
[ C(RESULT_ACCESS) ] = 0x0267, /* Data Prefetcher :attempts */
25
[ C(RESULT_MISS) ] = 0x0167, /* Data Prefetcher :cancelled */
30
[ C(RESULT_ACCESS) ] = 0x0080, /* Instruction cache fetches */
31
[ C(RESULT_MISS) ] = 0x0081, /* Instruction cache misses */
34
[ C(RESULT_ACCESS) ] = -1,
35
[ C(RESULT_MISS) ] = -1,
37
[ C(OP_PREFETCH) ] = {
38
[ C(RESULT_ACCESS) ] = 0x014B, /* Prefetch Instructions :Load */
39
[ C(RESULT_MISS) ] = 0,
44
[ C(RESULT_ACCESS) ] = 0x037D, /* Requests to L2 Cache :IC+DC */
45
[ C(RESULT_MISS) ] = 0x037E, /* L2 Cache Misses : IC+DC */
48
[ C(RESULT_ACCESS) ] = 0x017F, /* L2 Fill/Writeback */
49
[ C(RESULT_MISS) ] = 0,
51
[ C(OP_PREFETCH) ] = {
52
[ C(RESULT_ACCESS) ] = 0,
53
[ C(RESULT_MISS) ] = 0,
58
[ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
59
[ C(RESULT_MISS) ] = 0x0746, /* L1_DTLB_AND_L2_DLTB_MISS.ALL */
62
[ C(RESULT_ACCESS) ] = 0,
63
[ C(RESULT_MISS) ] = 0,
65
[ C(OP_PREFETCH) ] = {
66
[ C(RESULT_ACCESS) ] = 0,
67
[ C(RESULT_MISS) ] = 0,
72
[ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes */
73
[ C(RESULT_MISS) ] = 0x0385, /* L1_ITLB_AND_L2_ITLB_MISS.ALL */
76
[ C(RESULT_ACCESS) ] = -1,
77
[ C(RESULT_MISS) ] = -1,
79
[ C(OP_PREFETCH) ] = {
80
[ C(RESULT_ACCESS) ] = -1,
81
[ C(RESULT_MISS) ] = -1,
86
[ C(RESULT_ACCESS) ] = 0x00c2, /* Retired Branch Instr. */
87
[ C(RESULT_MISS) ] = 0x00c3, /* Retired Mispredicted BI */
90
[ C(RESULT_ACCESS) ] = -1,
91
[ C(RESULT_MISS) ] = -1,
93
[ C(OP_PREFETCH) ] = {
94
[ C(RESULT_ACCESS) ] = -1,
95
[ C(RESULT_MISS) ] = -1,
100
[ C(RESULT_ACCESS) ] = 0xb8e9, /* CPU Request to Memory, l+r */
101
[ C(RESULT_MISS) ] = 0x98e9, /* CPU Request to Memory, r */
104
[ C(RESULT_ACCESS) ] = -1,
105
[ C(RESULT_MISS) ] = -1,
107
[ C(OP_PREFETCH) ] = {
108
[ C(RESULT_ACCESS) ] = -1,
109
[ C(RESULT_MISS) ] = -1,
115
* AMD Performance Monitor K7 and later.
117
static const u64 amd_perfmon_event_map[] =
119
[PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
120
[PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
121
[PERF_COUNT_HW_CACHE_REFERENCES] = 0x0080,
122
[PERF_COUNT_HW_CACHE_MISSES] = 0x0081,
123
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2,
124
[PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3,
125
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00d0, /* "Decoder empty" event */
126
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x00d1, /* "Dispatch stalls" event */
129
static u64 amd_pmu_event_map(int hw_event)
131
return amd_perfmon_event_map[hw_event];
134
static int amd_pmu_hw_config(struct perf_event *event)
136
int ret = x86_pmu_hw_config(event);
141
if (event->attr.exclude_host && event->attr.exclude_guest)
143
* When HO == GO == 1 the hardware treats that as GO == HO == 0
144
* and will count in both modes. We don't want to count in that
145
* case so we emulate no-counting by setting US = OS = 0.
147
event->hw.config &= ~(ARCH_PERFMON_EVENTSEL_USR |
148
ARCH_PERFMON_EVENTSEL_OS);
149
else if (event->attr.exclude_host)
150
event->hw.config |= AMD_PERFMON_EVENTSEL_GUESTONLY;
151
else if (event->attr.exclude_guest)
152
event->hw.config |= AMD_PERFMON_EVENTSEL_HOSTONLY;
154
if (event->attr.type != PERF_TYPE_RAW)
157
event->hw.config |= event->attr.config & AMD64_RAW_EVENT_MASK;
163
* AMD64 events are detected based on their event codes.
165
static inline unsigned int amd_get_event_code(struct hw_perf_event *hwc)
167
return ((hwc->config >> 24) & 0x0f00) | (hwc->config & 0x00ff);
170
static inline int amd_is_nb_event(struct hw_perf_event *hwc)
172
return (hwc->config & 0xe0) == 0xe0;
175
static inline int amd_has_nb(struct cpu_hw_events *cpuc)
177
struct amd_nb *nb = cpuc->amd_nb;
179
return nb && nb->nb_id != -1;
182
static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
183
struct perf_event *event)
185
struct hw_perf_event *hwc = &event->hw;
186
struct amd_nb *nb = cpuc->amd_nb;
190
* only care about NB events
192
if (!(amd_has_nb(cpuc) && amd_is_nb_event(hwc)))
196
* need to scan whole list because event may not have
197
* been assigned during scheduling
199
* no race condition possible because event can only
200
* be removed on one CPU at a time AND PMU is disabled
203
for (i = 0; i < x86_pmu.num_counters; i++) {
204
if (nb->owners[i] == event) {
205
cmpxchg(nb->owners+i, event, NULL);
212
* AMD64 NorthBridge events need special treatment because
213
* counter access needs to be synchronized across all cores
214
* of a package. Refer to BKDG section 3.12
216
* NB events are events measuring L3 cache, Hypertransport
217
* traffic. They are identified by an event code >= 0xe00.
218
* They measure events on the NorthBride which is shared
219
* by all cores on a package. NB events are counted on a
220
* shared set of counters. When a NB event is programmed
221
* in a counter, the data actually comes from a shared
222
* counter. Thus, access to those counters needs to be
225
* We implement the synchronization such that no two cores
226
* can be measuring NB events using the same counters. Thus,
227
* we maintain a per-NB allocation table. The available slot
228
* is propagated using the event_constraint structure.
230
* We provide only one choice for each NB event based on
231
* the fact that only NB events have restrictions. Consequently,
232
* if a counter is available, there is a guarantee the NB event
233
* will be assigned to it. If no slot is available, an empty
234
* constraint is returned and scheduling will eventually fail
237
* Note that all cores attached the same NB compete for the same
238
* counters to host NB events, this is why we use atomic ops. Some
239
* multi-chip CPUs may have more than one NB.
241
* Given that resources are allocated (cmpxchg), they must be
242
* eventually freed for others to use. This is accomplished by
243
* calling amd_put_event_constraints().
245
* Non NB events are not impacted by this restriction.
247
static struct event_constraint *
248
amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
250
struct hw_perf_event *hwc = &event->hw;
251
struct amd_nb *nb = cpuc->amd_nb;
252
struct perf_event *old = NULL;
253
int max = x86_pmu.num_counters;
257
* if not NB event or no NB, then no constraints
259
if (!(amd_has_nb(cpuc) && amd_is_nb_event(hwc)))
260
return &unconstrained;
263
* detect if already present, if so reuse
265
* cannot merge with actual allocation
266
* because of possible holes
268
* event can already be present yet not assigned (in hwc->idx)
269
* because of successive calls to x86_schedule_events() from
270
* hw_perf_group_sched_in() without hw_perf_enable()
272
for (i = 0; i < max; i++) {
274
* keep track of first free slot
276
if (k == -1 && !nb->owners[i])
279
/* already present, reuse */
280
if (nb->owners[i] == event)
284
* not present, so grab a new slot
285
* starting either at:
287
if (hwc->idx != -1) {
288
/* previous assignment */
290
} else if (k != -1) {
291
/* start from free slot found */
295
* event not found, no slot found in
296
* first pass, try again from the
303
old = cmpxchg(nb->owners+i, NULL, event);
311
return &nb->event_constraints[i];
313
return &emptyconstraint;
316
static struct amd_nb *amd_alloc_nb(int cpu)
321
nb = kmalloc_node(sizeof(struct amd_nb), GFP_KERNEL | __GFP_ZERO,
329
* initialize all possible NB constraints
331
for (i = 0; i < x86_pmu.num_counters; i++) {
332
__set_bit(i, nb->event_constraints[i].idxmsk);
333
nb->event_constraints[i].weight = 1;
338
static int amd_pmu_cpu_prepare(int cpu)
340
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
342
WARN_ON_ONCE(cpuc->amd_nb);
344
if (boot_cpu_data.x86_max_cores < 2)
347
cpuc->amd_nb = amd_alloc_nb(cpu);
354
static void amd_pmu_cpu_starting(int cpu)
356
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
360
if (boot_cpu_data.x86_max_cores < 2)
363
nb_id = amd_get_nb_id(cpu);
364
WARN_ON_ONCE(nb_id == BAD_APICID);
366
for_each_online_cpu(i) {
367
nb = per_cpu(cpu_hw_events, i).amd_nb;
368
if (WARN_ON_ONCE(!nb))
371
if (nb->nb_id == nb_id) {
372
cpuc->kfree_on_online = cpuc->amd_nb;
378
cpuc->amd_nb->nb_id = nb_id;
379
cpuc->amd_nb->refcnt++;
382
static void amd_pmu_cpu_dead(int cpu)
384
struct cpu_hw_events *cpuhw;
386
if (boot_cpu_data.x86_max_cores < 2)
389
cpuhw = &per_cpu(cpu_hw_events, cpu);
392
struct amd_nb *nb = cpuhw->amd_nb;
394
if (nb->nb_id == -1 || --nb->refcnt == 0)
397
cpuhw->amd_nb = NULL;
401
static __initconst const struct x86_pmu amd_pmu = {
403
.handle_irq = x86_pmu_handle_irq,
404
.disable_all = x86_pmu_disable_all,
405
.enable_all = x86_pmu_enable_all,
406
.enable = x86_pmu_enable_event,
407
.disable = x86_pmu_disable_event,
408
.hw_config = amd_pmu_hw_config,
409
.schedule_events = x86_schedule_events,
410
.eventsel = MSR_K7_EVNTSEL0,
411
.perfctr = MSR_K7_PERFCTR0,
412
.event_map = amd_pmu_event_map,
413
.max_events = ARRAY_SIZE(amd_perfmon_event_map),
414
.num_counters = AMD64_NUM_COUNTERS,
416
.cntval_mask = (1ULL << 48) - 1,
418
/* use highest bit to detect overflow */
419
.max_period = (1ULL << 47) - 1,
420
.get_event_constraints = amd_get_event_constraints,
421
.put_event_constraints = amd_put_event_constraints,
423
.cpu_prepare = amd_pmu_cpu_prepare,
424
.cpu_starting = amd_pmu_cpu_starting,
425
.cpu_dead = amd_pmu_cpu_dead,
430
#define AMD_EVENT_TYPE_MASK 0x000000F0ULL
432
#define AMD_EVENT_FP 0x00000000ULL ... 0x00000010ULL
433
#define AMD_EVENT_LS 0x00000020ULL ... 0x00000030ULL
434
#define AMD_EVENT_DC 0x00000040ULL ... 0x00000050ULL
435
#define AMD_EVENT_CU 0x00000060ULL ... 0x00000070ULL
436
#define AMD_EVENT_IC_DE 0x00000080ULL ... 0x00000090ULL
437
#define AMD_EVENT_EX_LS 0x000000C0ULL
438
#define AMD_EVENT_DE 0x000000D0ULL
439
#define AMD_EVENT_NB 0x000000E0ULL ... 0x000000F0ULL
442
* AMD family 15h event code/PMC mappings:
444
* type = event_code & 0x0F0:
446
* 0x000 FP PERF_CTL[5:3]
447
* 0x010 FP PERF_CTL[5:3]
448
* 0x020 LS PERF_CTL[5:0]
449
* 0x030 LS PERF_CTL[5:0]
450
* 0x040 DC PERF_CTL[5:0]
451
* 0x050 DC PERF_CTL[5:0]
452
* 0x060 CU PERF_CTL[2:0]
453
* 0x070 CU PERF_CTL[2:0]
454
* 0x080 IC/DE PERF_CTL[2:0]
455
* 0x090 IC/DE PERF_CTL[2:0]
458
* 0x0C0 EX/LS PERF_CTL[5:0]
459
* 0x0D0 DE PERF_CTL[2:0]
460
* 0x0E0 NB NB_PERF_CTL[3:0]
461
* 0x0F0 NB NB_PERF_CTL[3:0]
465
* 0x000 FP PERF_CTL[3], PERF_CTL[5:3] (*)
466
* 0x003 FP PERF_CTL[3]
467
* 0x004 FP PERF_CTL[3], PERF_CTL[5:3] (*)
468
* 0x00B FP PERF_CTL[3]
469
* 0x00D FP PERF_CTL[3]
470
* 0x023 DE PERF_CTL[2:0]
471
* 0x02D LS PERF_CTL[3]
472
* 0x02E LS PERF_CTL[3,0]
473
* 0x043 CU PERF_CTL[2:0]
474
* 0x045 CU PERF_CTL[2:0]
475
* 0x046 CU PERF_CTL[2:0]
476
* 0x054 CU PERF_CTL[2:0]
477
* 0x055 CU PERF_CTL[2:0]
478
* 0x08F IC PERF_CTL[0]
479
* 0x187 DE PERF_CTL[0]
480
* 0x188 DE PERF_CTL[0]
481
* 0x0DB EX PERF_CTL[5:0]
482
* 0x0DC LS PERF_CTL[5:0]
483
* 0x0DD LS PERF_CTL[5:0]
484
* 0x0DE LS PERF_CTL[5:0]
485
* 0x0DF LS PERF_CTL[5:0]
486
* 0x1D6 EX PERF_CTL[5:0]
487
* 0x1D8 EX PERF_CTL[5:0]
489
* (*) depending on the umask all FPU counters may be used
492
static struct event_constraint amd_f15_PMC0 = EVENT_CONSTRAINT(0, 0x01, 0);
493
static struct event_constraint amd_f15_PMC20 = EVENT_CONSTRAINT(0, 0x07, 0);
494
static struct event_constraint amd_f15_PMC3 = EVENT_CONSTRAINT(0, 0x08, 0);
495
static struct event_constraint amd_f15_PMC30 = EVENT_CONSTRAINT(0, 0x09, 0);
496
static struct event_constraint amd_f15_PMC50 = EVENT_CONSTRAINT(0, 0x3F, 0);
497
static struct event_constraint amd_f15_PMC53 = EVENT_CONSTRAINT(0, 0x38, 0);
499
static struct event_constraint *
500
amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *event)
502
struct hw_perf_event *hwc = &event->hw;
503
unsigned int event_code = amd_get_event_code(hwc);
505
switch (event_code & AMD_EVENT_TYPE_MASK) {
507
switch (event_code) {
509
if (!(hwc->config & 0x0000F000ULL))
511
if (!(hwc->config & 0x00000F00ULL))
513
return &amd_f15_PMC3;
515
if (hweight_long(hwc->config & ARCH_PERFMON_EVENTSEL_UMASK) <= 1)
517
return &amd_f15_PMC3;
521
return &amd_f15_PMC3;
523
return &amd_f15_PMC53;
526
case AMD_EVENT_EX_LS:
527
switch (event_code) {
534
return &amd_f15_PMC20;
536
return &amd_f15_PMC3;
538
return &amd_f15_PMC30;
540
return &amd_f15_PMC50;
543
case AMD_EVENT_IC_DE:
545
switch (event_code) {
549
return &amd_f15_PMC0;
550
case 0x0DB ... 0x0DF:
553
return &amd_f15_PMC50;
555
return &amd_f15_PMC20;
558
/* not yet implemented */
559
return &emptyconstraint;
561
return &emptyconstraint;
565
static __initconst const struct x86_pmu amd_pmu_f15h = {
566
.name = "AMD Family 15h",
567
.handle_irq = x86_pmu_handle_irq,
568
.disable_all = x86_pmu_disable_all,
569
.enable_all = x86_pmu_enable_all,
570
.enable = x86_pmu_enable_event,
571
.disable = x86_pmu_disable_event,
572
.hw_config = amd_pmu_hw_config,
573
.schedule_events = x86_schedule_events,
574
.eventsel = MSR_F15H_PERF_CTL,
575
.perfctr = MSR_F15H_PERF_CTR,
576
.event_map = amd_pmu_event_map,
577
.max_events = ARRAY_SIZE(amd_perfmon_event_map),
578
.num_counters = AMD64_NUM_COUNTERS_F15H,
580
.cntval_mask = (1ULL << 48) - 1,
582
/* use highest bit to detect overflow */
583
.max_period = (1ULL << 47) - 1,
584
.get_event_constraints = amd_get_event_constraints_f15h,
585
/* nortbridge counters not yet implemented: */
587
.put_event_constraints = amd_put_event_constraints,
589
.cpu_prepare = amd_pmu_cpu_prepare,
590
.cpu_starting = amd_pmu_cpu_starting,
591
.cpu_dead = amd_pmu_cpu_dead,
595
__init int amd_pmu_init(void)
597
/* Performance-monitoring supported from K7 and later: */
598
if (boot_cpu_data.x86 < 6)
602
* If core performance counter extensions exists, it must be
603
* family 15h, otherwise fail. See x86_pmu_addr_offset().
605
switch (boot_cpu_data.x86) {
607
if (!cpu_has_perfctr_core)
609
x86_pmu = amd_pmu_f15h;
612
if (cpu_has_perfctr_core)
618
/* Events are common for all AMDs */
619
memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
620
sizeof(hw_cache_event_ids));