2
* Linux performance counter support for MIPS.
4
* Copyright (C) 2010 MIPS Technologies, Inc.
5
* Copyright (C) 2011 Cavium Networks, Inc.
6
* Author: Deng-Cheng Zhu
8
* This code is based on the implementation for ARM, which is in turn
9
* based on the sparc64 perf event code and the x86 code. Performance
10
* counter access is based on the MIPS Oprofile code. And the callchain
11
* support references the code of MIPS stacktrace.c.
13
* This program is free software; you can redistribute it and/or modify
14
* it under the terms of the GNU General Public License version 2 as
15
* published by the Free Software Foundation.
18
#include <linux/cpumask.h>
19
#include <linux/interrupt.h>
20
#include <linux/smp.h>
21
#include <linux/kernel.h>
22
#include <linux/perf_event.h>
23
#include <linux/uaccess.h>
26
#include <asm/irq_regs.h>
27
#include <asm/stacktrace.h>
28
#include <asm/time.h> /* For perf_irq */
30
#define MIPS_MAX_HWEVENTS 4
32
struct cpu_hw_events {
33
/* Array of events on this cpu. */
34
struct perf_event *events[MIPS_MAX_HWEVENTS];
37
* Set the bit (indexed by the counter number) when the counter
38
* is used for an event.
40
unsigned long used_mask[BITS_TO_LONGS(MIPS_MAX_HWEVENTS)];
43
* Software copy of the control register for each performance counter.
44
* MIPS CPUs vary in performance counters. They use this differently,
45
* and even may not use it.
47
unsigned int saved_ctrl[MIPS_MAX_HWEVENTS];
49
DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
53
/* The description of MIPS performance events. */
54
struct mips_perf_event {
55
unsigned int event_id;
57
* MIPS performance counters are indexed starting from 0.
58
* CNTR_EVEN indicates the indexes of the counters to be used are
61
unsigned int cntr_mask;
62
#define CNTR_EVEN 0x55555555
63
#define CNTR_ODD 0xaaaaaaaa
64
#define CNTR_ALL 0xffffffff
65
#ifdef CONFIG_MIPS_MT_SMP
78
static struct mips_perf_event raw_event;
79
static DEFINE_MUTEX(raw_event_mutex);
81
#define UNSUPPORTED_PERF_EVENT_ID 0xffffffff
82
#define C(x) PERF_COUNT_HW_CACHE_##x
90
u64 (*read_counter)(unsigned int idx);
91
void (*write_counter)(unsigned int idx, u64 val);
92
const struct mips_perf_event *(*map_raw_event)(u64 config);
93
const struct mips_perf_event (*general_event_map)[PERF_COUNT_HW_MAX];
94
const struct mips_perf_event (*cache_event_map)
95
[PERF_COUNT_HW_CACHE_MAX]
96
[PERF_COUNT_HW_CACHE_OP_MAX]
97
[PERF_COUNT_HW_CACHE_RESULT_MAX];
98
unsigned int num_counters;
101
static struct mips_pmu mipspmu;
103
#define M_CONFIG1_PC (1 << 4)
105
#define M_PERFCTL_EXL (1 << 0)
106
#define M_PERFCTL_KERNEL (1 << 1)
107
#define M_PERFCTL_SUPERVISOR (1 << 2)
108
#define M_PERFCTL_USER (1 << 3)
109
#define M_PERFCTL_INTERRUPT_ENABLE (1 << 4)
110
#define M_PERFCTL_EVENT(event) (((event) & 0x3ff) << 5)
111
#define M_PERFCTL_VPEID(vpe) ((vpe) << 16)
112
#define M_PERFCTL_MT_EN(filter) ((filter) << 20)
113
#define M_TC_EN_ALL M_PERFCTL_MT_EN(0)
114
#define M_TC_EN_VPE M_PERFCTL_MT_EN(1)
115
#define M_TC_EN_TC M_PERFCTL_MT_EN(2)
116
#define M_PERFCTL_TCID(tcid) ((tcid) << 22)
117
#define M_PERFCTL_WIDE (1 << 30)
118
#define M_PERFCTL_MORE (1 << 31)
120
#define M_PERFCTL_COUNT_EVENT_WHENEVER (M_PERFCTL_EXL | \
123
M_PERFCTL_SUPERVISOR | \
124
M_PERFCTL_INTERRUPT_ENABLE)
126
#ifdef CONFIG_MIPS_MT_SMP
127
#define M_PERFCTL_CONFIG_MASK 0x3fff801f
129
#define M_PERFCTL_CONFIG_MASK 0x1f
131
#define M_PERFCTL_EVENT_MASK 0xfe0
134
#ifdef CONFIG_MIPS_MT_SMP
135
static int cpu_has_mipsmt_pertccounters;
137
static DEFINE_RWLOCK(pmuint_rwlock);
140
* FIXME: For VSMP, vpe_id() is redefined for Perf-events, because
141
* cpu_data[cpuid].vpe_id reports 0 for _both_ CPUs.
143
#if defined(CONFIG_HW_PERF_EVENTS)
144
#define vpe_id() (cpu_has_mipsmt_pertccounters ? \
145
0 : smp_processor_id())
147
#define vpe_id() (cpu_has_mipsmt_pertccounters ? \
148
0 : cpu_data[smp_processor_id()].vpe_id)
151
/* Copied from op_model_mipsxx.c */
152
static unsigned int vpe_shift(void)
154
if (num_possible_cpus() > 1)
160
static unsigned int counters_total_to_per_cpu(unsigned int counters)
162
return counters >> vpe_shift();
165
static unsigned int counters_per_cpu_to_total(unsigned int counters)
167
return counters << vpe_shift();
170
#else /* !CONFIG_MIPS_MT_SMP */
173
#endif /* CONFIG_MIPS_MT_SMP */
175
static void resume_local_counters(void);
176
static void pause_local_counters(void);
177
static irqreturn_t mipsxx_pmu_handle_irq(int, void *);
178
static int mipsxx_pmu_handle_shared_irq(void);
180
static unsigned int mipsxx_pmu_swizzle_perf_idx(unsigned int idx)
187
static u64 mipsxx_pmu_read_counter(unsigned int idx)
189
idx = mipsxx_pmu_swizzle_perf_idx(idx);
194
* The counters are unsigned, we must cast to truncate
197
return (u32)read_c0_perfcntr0();
199
return (u32)read_c0_perfcntr1();
201
return (u32)read_c0_perfcntr2();
203
return (u32)read_c0_perfcntr3();
205
WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
210
static u64 mipsxx_pmu_read_counter_64(unsigned int idx)
212
idx = mipsxx_pmu_swizzle_perf_idx(idx);
216
return read_c0_perfcntr0_64();
218
return read_c0_perfcntr1_64();
220
return read_c0_perfcntr2_64();
222
return read_c0_perfcntr3_64();
224
WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
229
static void mipsxx_pmu_write_counter(unsigned int idx, u64 val)
231
idx = mipsxx_pmu_swizzle_perf_idx(idx);
235
write_c0_perfcntr0(val);
238
write_c0_perfcntr1(val);
241
write_c0_perfcntr2(val);
244
write_c0_perfcntr3(val);
249
static void mipsxx_pmu_write_counter_64(unsigned int idx, u64 val)
251
idx = mipsxx_pmu_swizzle_perf_idx(idx);
255
write_c0_perfcntr0_64(val);
258
write_c0_perfcntr1_64(val);
261
write_c0_perfcntr2_64(val);
264
write_c0_perfcntr3_64(val);
269
static unsigned int mipsxx_pmu_read_control(unsigned int idx)
271
idx = mipsxx_pmu_swizzle_perf_idx(idx);
275
return read_c0_perfctrl0();
277
return read_c0_perfctrl1();
279
return read_c0_perfctrl2();
281
return read_c0_perfctrl3();
283
WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
288
static void mipsxx_pmu_write_control(unsigned int idx, unsigned int val)
290
idx = mipsxx_pmu_swizzle_perf_idx(idx);
294
write_c0_perfctrl0(val);
297
write_c0_perfctrl1(val);
300
write_c0_perfctrl2(val);
303
write_c0_perfctrl3(val);
308
static int mipsxx_pmu_alloc_counter(struct cpu_hw_events *cpuc,
309
struct hw_perf_event *hwc)
314
* We only need to care the counter mask. The range has been
315
* checked definitely.
317
unsigned long cntr_mask = (hwc->event_base >> 8) & 0xffff;
319
for (i = mipspmu.num_counters - 1; i >= 0; i--) {
321
* Note that some MIPS perf events can be counted by both
322
* even and odd counters, wheresas many other are only by
323
* even _or_ odd counters. This introduces an issue that
324
* when the former kind of event takes the counter the
325
* latter kind of event wants to use, then the "counter
326
* allocation" for the latter event will fail. In fact if
327
* they can be dynamically swapped, they both feel happy.
328
* But here we leave this issue alone for now.
330
if (test_bit(i, &cntr_mask) &&
331
!test_and_set_bit(i, cpuc->used_mask))
338
static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx)
340
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
342
WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
344
cpuc->saved_ctrl[idx] = M_PERFCTL_EVENT(evt->event_base & 0xff) |
345
(evt->config_base & M_PERFCTL_CONFIG_MASK) |
346
/* Make sure interrupt enabled. */
347
M_PERFCTL_INTERRUPT_ENABLE;
349
* We do not actually let the counter run. Leave it until start().
353
static void mipsxx_pmu_disable_event(int idx)
355
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
358
WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
360
local_irq_save(flags);
361
cpuc->saved_ctrl[idx] = mipsxx_pmu_read_control(idx) &
362
~M_PERFCTL_COUNT_EVENT_WHENEVER;
363
mipsxx_pmu_write_control(idx, cpuc->saved_ctrl[idx]);
364
local_irq_restore(flags);
367
static int mipspmu_event_set_period(struct perf_event *event,
368
struct hw_perf_event *hwc,
371
u64 left = local64_read(&hwc->period_left);
372
u64 period = hwc->sample_period;
375
if (unlikely((left + period) & (1ULL << 63))) {
376
/* left underflowed by more than period. */
378
local64_set(&hwc->period_left, left);
379
hwc->last_period = period;
381
} else if (unlikely((left + period) <= period)) {
382
/* left underflowed by less than period. */
384
local64_set(&hwc->period_left, left);
385
hwc->last_period = period;
389
if (left > mipspmu.max_period) {
390
left = mipspmu.max_period;
391
local64_set(&hwc->period_left, left);
394
local64_set(&hwc->prev_count, mipspmu.overflow - left);
396
mipspmu.write_counter(idx, mipspmu.overflow - left);
398
perf_event_update_userpage(event);
403
static void mipspmu_event_update(struct perf_event *event,
404
struct hw_perf_event *hwc,
407
u64 prev_raw_count, new_raw_count;
411
prev_raw_count = local64_read(&hwc->prev_count);
412
new_raw_count = mipspmu.read_counter(idx);
414
if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
415
new_raw_count) != prev_raw_count)
418
delta = new_raw_count - prev_raw_count;
420
local64_add(delta, &event->count);
421
local64_sub(delta, &hwc->period_left);
424
static void mipspmu_start(struct perf_event *event, int flags)
426
struct hw_perf_event *hwc = &event->hw;
428
if (flags & PERF_EF_RELOAD)
429
WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
433
/* Set the period for the event. */
434
mipspmu_event_set_period(event, hwc, hwc->idx);
436
/* Enable the event. */
437
mipsxx_pmu_enable_event(hwc, hwc->idx);
440
static void mipspmu_stop(struct perf_event *event, int flags)
442
struct hw_perf_event *hwc = &event->hw;
444
if (!(hwc->state & PERF_HES_STOPPED)) {
445
/* We are working on a local event. */
446
mipsxx_pmu_disable_event(hwc->idx);
448
mipspmu_event_update(event, hwc, hwc->idx);
449
hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
453
static int mipspmu_add(struct perf_event *event, int flags)
455
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
456
struct hw_perf_event *hwc = &event->hw;
460
perf_pmu_disable(event->pmu);
462
/* To look for a free counter for this event. */
463
idx = mipsxx_pmu_alloc_counter(cpuc, hwc);
470
* If there is an event in the counter we are going to use then
471
* make sure it is disabled.
474
mipsxx_pmu_disable_event(idx);
475
cpuc->events[idx] = event;
477
hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
478
if (flags & PERF_EF_START)
479
mipspmu_start(event, PERF_EF_RELOAD);
481
/* Propagate our changes to the userspace mapping. */
482
perf_event_update_userpage(event);
485
perf_pmu_enable(event->pmu);
489
static void mipspmu_del(struct perf_event *event, int flags)
491
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
492
struct hw_perf_event *hwc = &event->hw;
495
WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
497
mipspmu_stop(event, PERF_EF_UPDATE);
498
cpuc->events[idx] = NULL;
499
clear_bit(idx, cpuc->used_mask);
501
perf_event_update_userpage(event);
504
static void mipspmu_read(struct perf_event *event)
506
struct hw_perf_event *hwc = &event->hw;
508
/* Don't read disabled counters! */
512
mipspmu_event_update(event, hwc, hwc->idx);
515
static void mipspmu_enable(struct pmu *pmu)
517
#ifdef CONFIG_MIPS_MT_SMP
518
write_unlock(&pmuint_rwlock);
520
resume_local_counters();
524
* MIPS performance counters can be per-TC. The control registers can
525
* not be directly accessed accross CPUs. Hence if we want to do global
526
* control, we need cross CPU calls. on_each_cpu() can help us, but we
527
* can not make sure this function is called with interrupts enabled. So
528
* here we pause local counters and then grab a rwlock and leave the
529
* counters on other CPUs alone. If any counter interrupt raises while
530
* we own the write lock, simply pause local counters on that CPU and
531
* spin in the handler. Also we know we won't be switched to another
532
* CPU after pausing local counters and before grabbing the lock.
534
static void mipspmu_disable(struct pmu *pmu)
536
pause_local_counters();
537
#ifdef CONFIG_MIPS_MT_SMP
538
write_lock(&pmuint_rwlock);
542
static atomic_t active_events = ATOMIC_INIT(0);
543
static DEFINE_MUTEX(pmu_reserve_mutex);
544
static int (*save_perf_irq)(void);
546
static int mipspmu_get_irq(void)
550
if (mipspmu.irq >= 0) {
551
/* Request my own irq handler. */
552
err = request_irq(mipspmu.irq, mipsxx_pmu_handle_irq,
553
IRQF_PERCPU | IRQF_NOBALANCING,
554
"mips_perf_pmu", NULL);
556
pr_warning("Unable to request IRQ%d for MIPS "
557
"performance counters!\n", mipspmu.irq);
559
} else if (cp0_perfcount_irq < 0) {
561
* We are sharing the irq number with the timer interrupt.
563
save_perf_irq = perf_irq;
564
perf_irq = mipsxx_pmu_handle_shared_irq;
567
pr_warning("The platform hasn't properly defined its "
568
"interrupt controller.\n");
575
static void mipspmu_free_irq(void)
577
if (mipspmu.irq >= 0)
578
free_irq(mipspmu.irq, NULL);
579
else if (cp0_perfcount_irq < 0)
580
perf_irq = save_perf_irq;
584
* mipsxx/rm9000/loongson2 have different performance counters, they have
585
* specific low-level init routines.
587
static void reset_counters(void *arg);
588
static int __hw_perf_event_init(struct perf_event *event);
590
static void hw_perf_event_destroy(struct perf_event *event)
592
if (atomic_dec_and_mutex_lock(&active_events,
593
&pmu_reserve_mutex)) {
595
* We must not call the destroy function with interrupts
598
on_each_cpu(reset_counters,
599
(void *)(long)mipspmu.num_counters, 1);
601
mutex_unlock(&pmu_reserve_mutex);
605
static int mipspmu_event_init(struct perf_event *event)
609
switch (event->attr.type) {
611
case PERF_TYPE_HARDWARE:
612
case PERF_TYPE_HW_CACHE:
619
if (event->cpu >= nr_cpumask_bits ||
620
(event->cpu >= 0 && !cpu_online(event->cpu)))
623
if (!atomic_inc_not_zero(&active_events)) {
624
if (atomic_read(&active_events) > MIPS_MAX_HWEVENTS) {
625
atomic_dec(&active_events);
629
mutex_lock(&pmu_reserve_mutex);
630
if (atomic_read(&active_events) == 0)
631
err = mipspmu_get_irq();
634
atomic_inc(&active_events);
635
mutex_unlock(&pmu_reserve_mutex);
641
err = __hw_perf_event_init(event);
643
hw_perf_event_destroy(event);
648
static struct pmu pmu = {
649
.pmu_enable = mipspmu_enable,
650
.pmu_disable = mipspmu_disable,
651
.event_init = mipspmu_event_init,
654
.start = mipspmu_start,
655
.stop = mipspmu_stop,
656
.read = mipspmu_read,
659
static unsigned int mipspmu_perf_event_encode(const struct mips_perf_event *pev)
662
* Top 8 bits for range, next 16 bits for cntr_mask, lowest 8 bits for
665
#ifdef CONFIG_MIPS_MT_SMP
666
return ((unsigned int)pev->range << 24) |
667
(pev->cntr_mask & 0xffff00) |
668
(pev->event_id & 0xff);
670
return (pev->cntr_mask & 0xffff00) |
671
(pev->event_id & 0xff);
675
static const struct mips_perf_event *mipspmu_map_general_event(int idx)
677
const struct mips_perf_event *pev;
679
pev = ((*mipspmu.general_event_map)[idx].event_id ==
680
UNSUPPORTED_PERF_EVENT_ID ? ERR_PTR(-EOPNOTSUPP) :
681
&(*mipspmu.general_event_map)[idx]);
686
static const struct mips_perf_event *mipspmu_map_cache_event(u64 config)
688
unsigned int cache_type, cache_op, cache_result;
689
const struct mips_perf_event *pev;
691
cache_type = (config >> 0) & 0xff;
692
if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
693
return ERR_PTR(-EINVAL);
695
cache_op = (config >> 8) & 0xff;
696
if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
697
return ERR_PTR(-EINVAL);
699
cache_result = (config >> 16) & 0xff;
700
if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
701
return ERR_PTR(-EINVAL);
703
pev = &((*mipspmu.cache_event_map)
708
if (pev->event_id == UNSUPPORTED_PERF_EVENT_ID)
709
return ERR_PTR(-EOPNOTSUPP);
715
static int validate_event(struct cpu_hw_events *cpuc,
716
struct perf_event *event)
718
struct hw_perf_event fake_hwc = event->hw;
720
/* Allow mixed event group. So return 1 to pass validation. */
721
if (event->pmu != &pmu || event->state <= PERF_EVENT_STATE_OFF)
724
return mipsxx_pmu_alloc_counter(cpuc, &fake_hwc) >= 0;
727
static int validate_group(struct perf_event *event)
729
struct perf_event *sibling, *leader = event->group_leader;
730
struct cpu_hw_events fake_cpuc;
732
memset(&fake_cpuc, 0, sizeof(fake_cpuc));
734
if (!validate_event(&fake_cpuc, leader))
737
list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
738
if (!validate_event(&fake_cpuc, sibling))
742
if (!validate_event(&fake_cpuc, event))
748
/* This is needed by specific irq handlers in perf_event_*.c */
749
static void handle_associated_event(struct cpu_hw_events *cpuc,
750
int idx, struct perf_sample_data *data,
751
struct pt_regs *regs)
753
struct perf_event *event = cpuc->events[idx];
754
struct hw_perf_event *hwc = &event->hw;
756
mipspmu_event_update(event, hwc, idx);
757
data->period = event->hw.last_period;
758
if (!mipspmu_event_set_period(event, hwc, idx))
761
if (perf_event_overflow(event, data, regs))
762
mipsxx_pmu_disable_event(idx);
766
static int __n_counters(void)
768
if (!(read_c0_config1() & M_CONFIG1_PC))
770
if (!(read_c0_perfctrl0() & M_PERFCTL_MORE))
772
if (!(read_c0_perfctrl1() & M_PERFCTL_MORE))
774
if (!(read_c0_perfctrl2() & M_PERFCTL_MORE))
780
static int n_counters(void)
784
switch (current_cpu_type()) {
795
counters = __n_counters();
801
static void reset_counters(void *arg)
803
int counters = (int)(long)arg;
806
mipsxx_pmu_write_control(3, 0);
807
mipspmu.write_counter(3, 0);
809
mipsxx_pmu_write_control(2, 0);
810
mipspmu.write_counter(2, 0);
812
mipsxx_pmu_write_control(1, 0);
813
mipspmu.write_counter(1, 0);
815
mipsxx_pmu_write_control(0, 0);
816
mipspmu.write_counter(0, 0);
820
/* 24K/34K/1004K cores can share the same event map. */
821
static const struct mips_perf_event mipsxxcore_event_map
822
[PERF_COUNT_HW_MAX] = {
823
[PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
824
[PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
825
[PERF_COUNT_HW_CACHE_REFERENCES] = { UNSUPPORTED_PERF_EVENT_ID },
826
[PERF_COUNT_HW_CACHE_MISSES] = { UNSUPPORTED_PERF_EVENT_ID },
827
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x02, CNTR_EVEN, T },
828
[PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T },
829
[PERF_COUNT_HW_BUS_CYCLES] = { UNSUPPORTED_PERF_EVENT_ID },
832
/* 74K core has different branch event code. */
833
static const struct mips_perf_event mipsxx74Kcore_event_map
834
[PERF_COUNT_HW_MAX] = {
835
[PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
836
[PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
837
[PERF_COUNT_HW_CACHE_REFERENCES] = { UNSUPPORTED_PERF_EVENT_ID },
838
[PERF_COUNT_HW_CACHE_MISSES] = { UNSUPPORTED_PERF_EVENT_ID },
839
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x27, CNTR_EVEN, T },
840
[PERF_COUNT_HW_BRANCH_MISSES] = { 0x27, CNTR_ODD, T },
841
[PERF_COUNT_HW_BUS_CYCLES] = { UNSUPPORTED_PERF_EVENT_ID },
844
static const struct mips_perf_event octeon_event_map[PERF_COUNT_HW_MAX] = {
845
[PERF_COUNT_HW_CPU_CYCLES] = { 0x01, CNTR_ALL },
846
[PERF_COUNT_HW_INSTRUCTIONS] = { 0x03, CNTR_ALL },
847
[PERF_COUNT_HW_CACHE_REFERENCES] = { 0x2b, CNTR_ALL },
848
[PERF_COUNT_HW_CACHE_MISSES] = { 0x2e, CNTR_ALL },
849
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x08, CNTR_ALL },
850
[PERF_COUNT_HW_BRANCH_MISSES] = { 0x09, CNTR_ALL },
851
[PERF_COUNT_HW_BUS_CYCLES] = { 0x25, CNTR_ALL },
854
/* 24K/34K/1004K cores can share the same cache event map. */
855
static const struct mips_perf_event mipsxxcore_cache_map
856
[PERF_COUNT_HW_CACHE_MAX]
857
[PERF_COUNT_HW_CACHE_OP_MAX]
858
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
861
* Like some other architectures (e.g. ARM), the performance
862
* counters don't differentiate between read and write
863
* accesses/misses, so this isn't strictly correct, but it's the
864
* best we can do. Writes and reads get combined.
867
[C(RESULT_ACCESS)] = { 0x0a, CNTR_EVEN, T },
868
[C(RESULT_MISS)] = { 0x0b, CNTR_EVEN | CNTR_ODD, T },
871
[C(RESULT_ACCESS)] = { 0x0a, CNTR_EVEN, T },
872
[C(RESULT_MISS)] = { 0x0b, CNTR_EVEN | CNTR_ODD, T },
875
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
876
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
881
[C(RESULT_ACCESS)] = { 0x09, CNTR_EVEN, T },
882
[C(RESULT_MISS)] = { 0x09, CNTR_ODD, T },
885
[C(RESULT_ACCESS)] = { 0x09, CNTR_EVEN, T },
886
[C(RESULT_MISS)] = { 0x09, CNTR_ODD, T },
889
[C(RESULT_ACCESS)] = { 0x14, CNTR_EVEN, T },
891
* Note that MIPS has only "hit" events countable for
892
* the prefetch operation.
894
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
899
[C(RESULT_ACCESS)] = { 0x15, CNTR_ODD, P },
900
[C(RESULT_MISS)] = { 0x16, CNTR_EVEN, P },
903
[C(RESULT_ACCESS)] = { 0x15, CNTR_ODD, P },
904
[C(RESULT_MISS)] = { 0x16, CNTR_EVEN, P },
907
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
908
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
913
[C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
914
[C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
917
[C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
918
[C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
921
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
922
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
927
[C(RESULT_ACCESS)] = { 0x05, CNTR_EVEN, T },
928
[C(RESULT_MISS)] = { 0x05, CNTR_ODD, T },
931
[C(RESULT_ACCESS)] = { 0x05, CNTR_EVEN, T },
932
[C(RESULT_MISS)] = { 0x05, CNTR_ODD, T },
935
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
936
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
940
/* Using the same code for *HW_BRANCH* */
942
[C(RESULT_ACCESS)] = { 0x02, CNTR_EVEN, T },
943
[C(RESULT_MISS)] = { 0x02, CNTR_ODD, T },
946
[C(RESULT_ACCESS)] = { 0x02, CNTR_EVEN, T },
947
[C(RESULT_MISS)] = { 0x02, CNTR_ODD, T },
950
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
951
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
956
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
957
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
960
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
961
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
964
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
965
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
970
/* 74K core has completely different cache event map. */
971
static const struct mips_perf_event mipsxx74Kcore_cache_map
972
[PERF_COUNT_HW_CACHE_MAX]
973
[PERF_COUNT_HW_CACHE_OP_MAX]
974
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
977
* Like some other architectures (e.g. ARM), the performance
978
* counters don't differentiate between read and write
979
* accesses/misses, so this isn't strictly correct, but it's the
980
* best we can do. Writes and reads get combined.
983
[C(RESULT_ACCESS)] = { 0x17, CNTR_ODD, T },
984
[C(RESULT_MISS)] = { 0x18, CNTR_ODD, T },
987
[C(RESULT_ACCESS)] = { 0x17, CNTR_ODD, T },
988
[C(RESULT_MISS)] = { 0x18, CNTR_ODD, T },
991
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
992
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
997
[C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
998
[C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
1001
[C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
1002
[C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
1004
[C(OP_PREFETCH)] = {
1005
[C(RESULT_ACCESS)] = { 0x34, CNTR_EVEN, T },
1007
* Note that MIPS has only "hit" events countable for
1008
* the prefetch operation.
1010
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1015
[C(RESULT_ACCESS)] = { 0x1c, CNTR_ODD, P },
1016
[C(RESULT_MISS)] = { 0x1d, CNTR_EVEN | CNTR_ODD, P },
1019
[C(RESULT_ACCESS)] = { 0x1c, CNTR_ODD, P },
1020
[C(RESULT_MISS)] = { 0x1d, CNTR_EVEN | CNTR_ODD, P },
1022
[C(OP_PREFETCH)] = {
1023
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1024
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1028
/* 74K core does not have specific DTLB events. */
1030
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1031
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1034
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1035
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1037
[C(OP_PREFETCH)] = {
1038
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1039
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1044
[C(RESULT_ACCESS)] = { 0x04, CNTR_EVEN, T },
1045
[C(RESULT_MISS)] = { 0x04, CNTR_ODD, T },
1048
[C(RESULT_ACCESS)] = { 0x04, CNTR_EVEN, T },
1049
[C(RESULT_MISS)] = { 0x04, CNTR_ODD, T },
1051
[C(OP_PREFETCH)] = {
1052
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1053
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1057
/* Using the same code for *HW_BRANCH* */
1059
[C(RESULT_ACCESS)] = { 0x27, CNTR_EVEN, T },
1060
[C(RESULT_MISS)] = { 0x27, CNTR_ODD, T },
1063
[C(RESULT_ACCESS)] = { 0x27, CNTR_EVEN, T },
1064
[C(RESULT_MISS)] = { 0x27, CNTR_ODD, T },
1066
[C(OP_PREFETCH)] = {
1067
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1068
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1073
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1074
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1077
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1078
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1080
[C(OP_PREFETCH)] = {
1081
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1082
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1088
static const struct mips_perf_event octeon_cache_map
1089
[PERF_COUNT_HW_CACHE_MAX]
1090
[PERF_COUNT_HW_CACHE_OP_MAX]
1091
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1094
[C(RESULT_ACCESS)] = { 0x2b, CNTR_ALL },
1095
[C(RESULT_MISS)] = { 0x2e, CNTR_ALL },
1098
[C(RESULT_ACCESS)] = { 0x30, CNTR_ALL },
1099
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1101
[C(OP_PREFETCH)] = {
1102
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1103
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1108
[C(RESULT_ACCESS)] = { 0x18, CNTR_ALL },
1109
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1112
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1113
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1115
[C(OP_PREFETCH)] = {
1116
[C(RESULT_ACCESS)] = { 0x19, CNTR_ALL },
1117
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1122
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1123
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1126
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1127
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1129
[C(OP_PREFETCH)] = {
1130
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1131
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1136
* Only general DTLB misses are counted use the same event for
1140
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1141
[C(RESULT_MISS)] = { 0x35, CNTR_ALL },
1144
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1145
[C(RESULT_MISS)] = { 0x35, CNTR_ALL },
1147
[C(OP_PREFETCH)] = {
1148
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1149
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1154
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1155
[C(RESULT_MISS)] = { 0x37, CNTR_ALL },
1158
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1159
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1161
[C(OP_PREFETCH)] = {
1162
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1163
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1167
/* Using the same code for *HW_BRANCH* */
1169
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1170
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1173
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1174
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1176
[C(OP_PREFETCH)] = {
1177
[C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1178
[C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1183
#ifdef CONFIG_MIPS_MT_SMP
1184
static void check_and_calc_range(struct perf_event *event,
1185
const struct mips_perf_event *pev)
1187
struct hw_perf_event *hwc = &event->hw;
1189
if (event->cpu >= 0) {
1190
if (pev->range > V) {
1192
* The user selected an event that is processor
1193
* wide, while expecting it to be VPE wide.
1195
hwc->config_base |= M_TC_EN_ALL;
1198
* FIXME: cpu_data[event->cpu].vpe_id reports 0
1201
hwc->config_base |= M_PERFCTL_VPEID(event->cpu);
1202
hwc->config_base |= M_TC_EN_VPE;
1205
hwc->config_base |= M_TC_EN_ALL;
1208
static void check_and_calc_range(struct perf_event *event,
1209
const struct mips_perf_event *pev)
1214
static int __hw_perf_event_init(struct perf_event *event)
1216
struct perf_event_attr *attr = &event->attr;
1217
struct hw_perf_event *hwc = &event->hw;
1218
const struct mips_perf_event *pev;
1221
/* Returning MIPS event descriptor for generic perf event. */
1222
if (PERF_TYPE_HARDWARE == event->attr.type) {
1223
if (event->attr.config >= PERF_COUNT_HW_MAX)
1225
pev = mipspmu_map_general_event(event->attr.config);
1226
} else if (PERF_TYPE_HW_CACHE == event->attr.type) {
1227
pev = mipspmu_map_cache_event(event->attr.config);
1228
} else if (PERF_TYPE_RAW == event->attr.type) {
1229
/* We are working on the global raw event. */
1230
mutex_lock(&raw_event_mutex);
1231
pev = mipspmu.map_raw_event(event->attr.config);
1233
/* The event type is not (yet) supported. */
1238
if (PERF_TYPE_RAW == event->attr.type)
1239
mutex_unlock(&raw_event_mutex);
1240
return PTR_ERR(pev);
1244
* We allow max flexibility on how each individual counter shared
1245
* by the single CPU operates (the mode exclusion and the range).
1247
hwc->config_base = M_PERFCTL_INTERRUPT_ENABLE;
1249
/* Calculate range bits and validate it. */
1250
if (num_possible_cpus() > 1)
1251
check_and_calc_range(event, pev);
1253
hwc->event_base = mipspmu_perf_event_encode(pev);
1254
if (PERF_TYPE_RAW == event->attr.type)
1255
mutex_unlock(&raw_event_mutex);
1257
if (!attr->exclude_user)
1258
hwc->config_base |= M_PERFCTL_USER;
1259
if (!attr->exclude_kernel) {
1260
hwc->config_base |= M_PERFCTL_KERNEL;
1261
/* MIPS kernel mode: KSU == 00b || EXL == 1 || ERL == 1 */
1262
hwc->config_base |= M_PERFCTL_EXL;
1264
if (!attr->exclude_hv)
1265
hwc->config_base |= M_PERFCTL_SUPERVISOR;
1267
hwc->config_base &= M_PERFCTL_CONFIG_MASK;
1269
* The event can belong to another cpu. We do not assign a local
1270
* counter for it for now.
1275
if (!hwc->sample_period) {
1276
hwc->sample_period = mipspmu.max_period;
1277
hwc->last_period = hwc->sample_period;
1278
local64_set(&hwc->period_left, hwc->sample_period);
1282
if (event->group_leader != event) {
1283
err = validate_group(event);
1288
event->destroy = hw_perf_event_destroy;
1292
static void pause_local_counters(void)
1294
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1295
int ctr = mipspmu.num_counters;
1296
unsigned long flags;
1298
local_irq_save(flags);
1301
cpuc->saved_ctrl[ctr] = mipsxx_pmu_read_control(ctr);
1302
mipsxx_pmu_write_control(ctr, cpuc->saved_ctrl[ctr] &
1303
~M_PERFCTL_COUNT_EVENT_WHENEVER);
1305
local_irq_restore(flags);
1308
static void resume_local_counters(void)
1310
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1311
int ctr = mipspmu.num_counters;
1315
mipsxx_pmu_write_control(ctr, cpuc->saved_ctrl[ctr]);
1319
static int mipsxx_pmu_handle_shared_irq(void)
1321
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1322
struct perf_sample_data data;
1323
unsigned int counters = mipspmu.num_counters;
1325
int handled = IRQ_NONE;
1326
struct pt_regs *regs;
1328
if (cpu_has_mips_r2 && !(read_c0_cause() & (1 << 26)))
1331
* First we pause the local counters, so that when we are locked
1332
* here, the counters are all paused. When it gets locked due to
1333
* perf_disable(), the timer interrupt handler will be delayed.
1335
* See also mipsxx_pmu_start().
1337
pause_local_counters();
1338
#ifdef CONFIG_MIPS_MT_SMP
1339
read_lock(&pmuint_rwlock);
1342
regs = get_irq_regs();
1344
perf_sample_data_init(&data, 0);
1347
#define HANDLE_COUNTER(n) \
1349
if (test_bit(n, cpuc->used_mask)) { \
1350
counter = mipspmu.read_counter(n); \
1351
if (counter & mipspmu.overflow) { \
1352
handle_associated_event(cpuc, n, &data, regs); \
1353
handled = IRQ_HANDLED; \
1363
* Do all the work for the pending perf events. We can do this
1364
* in here because the performance counter interrupt is a regular
1365
* interrupt, not NMI.
1367
if (handled == IRQ_HANDLED)
1370
#ifdef CONFIG_MIPS_MT_SMP
1371
read_unlock(&pmuint_rwlock);
1373
resume_local_counters();
1377
static irqreturn_t mipsxx_pmu_handle_irq(int irq, void *dev)
1379
return mipsxx_pmu_handle_shared_irq();
1383
#define IS_UNSUPPORTED_24K_EVENT(r, b) \
1384
((b) == 12 || (r) == 151 || (r) == 152 || (b) == 26 || \
1385
(b) == 27 || (r) == 28 || (r) == 158 || (b) == 31 || \
1386
(b) == 32 || (b) == 34 || (b) == 36 || (r) == 168 || \
1387
(r) == 172 || (b) == 47 || ((b) >= 56 && (b) <= 63) || \
1388
((b) >= 68 && (b) <= 127))
1389
#define IS_BOTH_COUNTERS_24K_EVENT(b) \
1390
((b) == 0 || (b) == 1 || (b) == 11)
1393
#define IS_UNSUPPORTED_34K_EVENT(r, b) \
1394
((b) == 12 || (r) == 27 || (r) == 158 || (b) == 36 || \
1395
(b) == 38 || (r) == 175 || ((b) >= 56 && (b) <= 63) || \
1396
((b) >= 68 && (b) <= 127))
1397
#define IS_BOTH_COUNTERS_34K_EVENT(b) \
1398
((b) == 0 || (b) == 1 || (b) == 11)
1399
#ifdef CONFIG_MIPS_MT_SMP
1400
#define IS_RANGE_P_34K_EVENT(r, b) \
1401
((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 || \
1402
(b) == 25 || (b) == 39 || (r) == 44 || (r) == 174 || \
1403
(r) == 176 || ((b) >= 50 && (b) <= 55) || \
1404
((b) >= 64 && (b) <= 67))
1405
#define IS_RANGE_V_34K_EVENT(r) ((r) == 47)
1409
#define IS_UNSUPPORTED_74K_EVENT(r, b) \
1410
((r) == 5 || ((r) >= 135 && (r) <= 137) || \
1411
((b) >= 10 && (b) <= 12) || (b) == 22 || (b) == 27 || \
1412
(b) == 33 || (b) == 34 || ((b) >= 47 && (b) <= 49) || \
1413
(r) == 178 || (b) == 55 || (b) == 57 || (b) == 60 || \
1414
(b) == 61 || (r) == 62 || (r) == 191 || \
1415
((b) >= 64 && (b) <= 127))
1416
#define IS_BOTH_COUNTERS_74K_EVENT(b) \
1417
((b) == 0 || (b) == 1)
1420
#define IS_UNSUPPORTED_1004K_EVENT(r, b) \
1421
((b) == 12 || (r) == 27 || (r) == 158 || (b) == 38 || \
1422
(r) == 175 || (b) == 63 || ((b) >= 68 && (b) <= 127))
1423
#define IS_BOTH_COUNTERS_1004K_EVENT(b) \
1424
((b) == 0 || (b) == 1 || (b) == 11)
1425
#ifdef CONFIG_MIPS_MT_SMP
1426
#define IS_RANGE_P_1004K_EVENT(r, b) \
1427
((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 || \
1428
(b) == 25 || (b) == 36 || (b) == 39 || (r) == 44 || \
1429
(r) == 174 || (r) == 176 || ((b) >= 50 && (b) <= 59) || \
1430
(r) == 188 || (b) == 61 || (b) == 62 || \
1431
((b) >= 64 && (b) <= 67))
1432
#define IS_RANGE_V_1004K_EVENT(r) ((r) == 47)
1436
* User can use 0-255 raw events, where 0-127 for the events of even
1437
* counters, and 128-255 for odd counters. Note that bit 7 is used to
1438
* indicate the parity. So, for example, when user wants to take the
1439
* Event Num of 15 for odd counters (by referring to the user manual),
1440
* then 128 needs to be added to 15 as the input for the event config,
1441
* i.e., 143 (0x8F) to be used.
1443
static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config)
1445
unsigned int raw_id = config & 0xff;
1446
unsigned int base_id = raw_id & 0x7f;
1448
switch (current_cpu_type()) {
1450
if (IS_UNSUPPORTED_24K_EVENT(raw_id, base_id))
1451
return ERR_PTR(-EOPNOTSUPP);
1452
raw_event.event_id = base_id;
1453
if (IS_BOTH_COUNTERS_24K_EVENT(base_id))
1454
raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1456
raw_event.cntr_mask =
1457
raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1458
#ifdef CONFIG_MIPS_MT_SMP
1460
* This is actually doing nothing. Non-multithreading
1461
* CPUs will not check and calculate the range.
1463
raw_event.range = P;
1467
if (IS_UNSUPPORTED_34K_EVENT(raw_id, base_id))
1468
return ERR_PTR(-EOPNOTSUPP);
1469
raw_event.event_id = base_id;
1470
if (IS_BOTH_COUNTERS_34K_EVENT(base_id))
1471
raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1473
raw_event.cntr_mask =
1474
raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1475
#ifdef CONFIG_MIPS_MT_SMP
1476
if (IS_RANGE_P_34K_EVENT(raw_id, base_id))
1477
raw_event.range = P;
1478
else if (unlikely(IS_RANGE_V_34K_EVENT(raw_id)))
1479
raw_event.range = V;
1481
raw_event.range = T;
1485
if (IS_UNSUPPORTED_74K_EVENT(raw_id, base_id))
1486
return ERR_PTR(-EOPNOTSUPP);
1487
raw_event.event_id = base_id;
1488
if (IS_BOTH_COUNTERS_74K_EVENT(base_id))
1489
raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1491
raw_event.cntr_mask =
1492
raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1493
#ifdef CONFIG_MIPS_MT_SMP
1494
raw_event.range = P;
1498
if (IS_UNSUPPORTED_1004K_EVENT(raw_id, base_id))
1499
return ERR_PTR(-EOPNOTSUPP);
1500
raw_event.event_id = base_id;
1501
if (IS_BOTH_COUNTERS_1004K_EVENT(base_id))
1502
raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1504
raw_event.cntr_mask =
1505
raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1506
#ifdef CONFIG_MIPS_MT_SMP
1507
if (IS_RANGE_P_1004K_EVENT(raw_id, base_id))
1508
raw_event.range = P;
1509
else if (unlikely(IS_RANGE_V_1004K_EVENT(raw_id)))
1510
raw_event.range = V;
1512
raw_event.range = T;
1520
static const struct mips_perf_event *octeon_pmu_map_raw_event(u64 config)
1522
unsigned int raw_id = config & 0xff;
1523
unsigned int base_id = raw_id & 0x7f;
1526
raw_event.cntr_mask = CNTR_ALL;
1527
raw_event.event_id = base_id;
1529
if (current_cpu_type() == CPU_CAVIUM_OCTEON2) {
1531
return ERR_PTR(-EOPNOTSUPP);
1534
return ERR_PTR(-EOPNOTSUPP);
1545
return ERR_PTR(-EOPNOTSUPP);
1554
init_hw_perf_events(void)
1559
pr_info("Performance counters: ");
1561
counters = n_counters();
1562
if (counters == 0) {
1563
pr_cont("No available PMU.\n");
1567
#ifdef CONFIG_MIPS_MT_SMP
1568
cpu_has_mipsmt_pertccounters = read_c0_config7() & (1<<19);
1569
if (!cpu_has_mipsmt_pertccounters)
1570
counters = counters_total_to_per_cpu(counters);
1573
#ifdef MSC01E_INT_BASE
1576
* Using platform specific interrupt controller defines.
1578
irq = MSC01E_INT_BASE + MSC01E_INT_PERFCTR;
1581
if (cp0_perfcount_irq >= 0)
1582
irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
1585
#ifdef MSC01E_INT_BASE
1589
mipspmu.map_raw_event = mipsxx_pmu_map_raw_event;
1591
switch (current_cpu_type()) {
1593
mipspmu.name = "mips/24K";
1594
mipspmu.general_event_map = &mipsxxcore_event_map;
1595
mipspmu.cache_event_map = &mipsxxcore_cache_map;
1598
mipspmu.name = "mips/34K";
1599
mipspmu.general_event_map = &mipsxxcore_event_map;
1600
mipspmu.cache_event_map = &mipsxxcore_cache_map;
1603
mipspmu.name = "mips/74K";
1604
mipspmu.general_event_map = &mipsxx74Kcore_event_map;
1605
mipspmu.cache_event_map = &mipsxx74Kcore_cache_map;
1608
mipspmu.name = "mips/1004K";
1609
mipspmu.general_event_map = &mipsxxcore_event_map;
1610
mipspmu.cache_event_map = &mipsxxcore_cache_map;
1612
case CPU_CAVIUM_OCTEON:
1613
case CPU_CAVIUM_OCTEON_PLUS:
1614
case CPU_CAVIUM_OCTEON2:
1615
mipspmu.name = "octeon";
1616
mipspmu.general_event_map = &octeon_event_map;
1617
mipspmu.cache_event_map = &octeon_cache_map;
1618
mipspmu.map_raw_event = octeon_pmu_map_raw_event;
1621
pr_cont("Either hardware does not support performance "
1622
"counters, or not yet implemented.\n");
1626
mipspmu.num_counters = counters;
1629
if (read_c0_perfctrl0() & M_PERFCTL_WIDE) {
1630
mipspmu.max_period = (1ULL << 63) - 1;
1631
mipspmu.valid_count = (1ULL << 63) - 1;
1632
mipspmu.overflow = 1ULL << 63;
1633
mipspmu.read_counter = mipsxx_pmu_read_counter_64;
1634
mipspmu.write_counter = mipsxx_pmu_write_counter_64;
1637
mipspmu.max_period = (1ULL << 31) - 1;
1638
mipspmu.valid_count = (1ULL << 31) - 1;
1639
mipspmu.overflow = 1ULL << 31;
1640
mipspmu.read_counter = mipsxx_pmu_read_counter;
1641
mipspmu.write_counter = mipsxx_pmu_write_counter;
1645
on_each_cpu(reset_counters, (void *)(long)counters, 1);
1647
pr_cont("%s PMU enabled, %d %d-bit counters available to each "
1648
"CPU, irq %d%s\n", mipspmu.name, counters, counter_bits, irq,
1649
irq < 0 ? " (share with timer interrupt)" : "");
1651
perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
1655
early_initcall(init_hw_perf_events);