~ubuntu-branches/debian/experimental/linux-2.6/experimental

« back to all changes in this revision

Viewing changes to arch/arm/kernel/perf_event.c

  • Committer: Package Import Robot
  • Author(s): Ben Hutchings, Ben Hutchings
  • Date: 2012-03-21 03:08:36 UTC
  • mfrom: (1.2.34)
  • Revision ID: package-import@ubuntu.com-20120321030836-rvavg03lkz15wj2q
Tags: 3.3-1~experimental.1
* New upstream release: http://kernelnewbies.org/Linux_3.3

[ Ben Hutchings ]
* [x86] crypto: Enable CRYPTO_SERPENT_SSE2_586, CRYPTO_SERPENT_SSE2_X86_64
* aufs: Update to aufs3.x-rcN-20120312
* IB: Enable INFINIBAND_SRPT as module (Closes: #663041)

Show diffs side-by-side

added added

removed removed

Lines of Context:
180
180
u64
181
181
armpmu_event_update(struct perf_event *event,
182
182
                    struct hw_perf_event *hwc,
183
 
                    int idx, int overflow)
 
183
                    int idx)
184
184
{
185
185
        struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
186
186
        u64 delta, prev_raw_count, new_raw_count;
193
193
                             new_raw_count) != prev_raw_count)
194
194
                goto again;
195
195
 
196
 
        new_raw_count &= armpmu->max_period;
197
 
        prev_raw_count &= armpmu->max_period;
198
 
 
199
 
        if (overflow)
200
 
                delta = armpmu->max_period - prev_raw_count + new_raw_count + 1;
201
 
        else
202
 
                delta = new_raw_count - prev_raw_count;
 
196
        delta = (new_raw_count - prev_raw_count) & armpmu->max_period;
203
197
 
204
198
        local64_add(delta, &event->count);
205
199
        local64_sub(delta, &hwc->period_left);
216
210
        if (hwc->idx < 0)
217
211
                return;
218
212
 
219
 
        armpmu_event_update(event, hwc, hwc->idx, 0);
 
213
        armpmu_event_update(event, hwc, hwc->idx);
220
214
}
221
215
 
222
216
static void
232
226
        if (!(hwc->state & PERF_HES_STOPPED)) {
233
227
                armpmu->disable(hwc, hwc->idx);
234
228
                barrier(); /* why? */
235
 
                armpmu_event_update(event, hwc, hwc->idx, 0);
 
229
                armpmu_event_update(event, hwc, hwc->idx);
236
230
                hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
237
231
        }
238
232
}
518
512
        hwc->config_base            |= (unsigned long)mapping;
519
513
 
520
514
        if (!hwc->sample_period) {
521
 
                hwc->sample_period  = armpmu->max_period;
 
515
                /*
 
516
                 * For non-sampling runs, limit the sample_period to half
 
517
                 * of the counter width. That way, the new counter value
 
518
                 * is far less likely to overtake the previous one unless
 
519
                 * you have some serious IRQ latency issues.
 
520
                 */
 
521
                hwc->sample_period  = armpmu->max_period >> 1;
522
522
                hwc->last_period    = hwc->sample_period;
523
523
                local64_set(&hwc->period_left, hwc->sample_period);
524
524
        }
680
680
}
681
681
 
682
682
/*
 
683
 * PMU hardware loses all context when a CPU goes offline.
 
684
 * When a CPU is hotplugged back in, since some hardware registers are
 
685
 * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
 
686
 * junk values out of them.
 
687
 */
 
688
static int __cpuinit pmu_cpu_notify(struct notifier_block *b,
 
689
                                        unsigned long action, void *hcpu)
 
690
{
 
691
        if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING)
 
692
                return NOTIFY_DONE;
 
693
 
 
694
        if (cpu_pmu && cpu_pmu->reset)
 
695
                cpu_pmu->reset(NULL);
 
696
 
 
697
        return NOTIFY_OK;
 
698
}
 
699
 
 
700
static struct notifier_block __cpuinitdata pmu_cpu_notifier = {
 
701
        .notifier_call = pmu_cpu_notify,
 
702
};
 
703
 
 
704
/*
683
705
 * CPU PMU identification and registration.
684
706
 */
685
707
static int __init
730
752
                pr_info("enabled with %s PMU driver, %d counters available\n",
731
753
                        cpu_pmu->name, cpu_pmu->num_events);
732
754
                cpu_pmu_init(cpu_pmu);
 
755
                register_cpu_notifier(&pmu_cpu_notifier);
733
756
                armpmu_register(cpu_pmu, "cpu", PERF_TYPE_RAW);
734
757
        } else {
735
758
                pr_info("no hardware support available\n");