2
* Copyright (C) 1991, 1992 Linus Torvalds
3
* Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
4
* Copyright (C) 2011 Don Zickus Red Hat, Inc.
6
* Pentium III FXSR, SSE support
7
* Gareth Hughes <gareth@valinux.com>, May 2000
11
* Handle hardware traps and faults.
13
#include <linux/spinlock.h>
14
#include <linux/kprobes.h>
15
#include <linux/kdebug.h>
16
#include <linux/nmi.h>
17
#include <linux/delay.h>
18
#include <linux/hardirq.h>
19
#include <linux/slab.h>
20
#include <linux/export.h>
22
#include <linux/mca.h>
24
#if defined(CONFIG_EDAC)
25
#include <linux/edac.h>
28
#include <linux/atomic.h>
29
#include <asm/traps.h>
30
#include <asm/mach_traps.h>
32
#include <asm/x86_init.h>
34
#define NMI_MAX_NAMELEN 16
36
struct list_head list;
37
nmi_handler_t handler;
44
struct list_head head;
47
static struct nmi_desc nmi_desc[NMI_MAX] =
50
.lock = __SPIN_LOCK_UNLOCKED(&nmi_desc[0].lock),
51
.head = LIST_HEAD_INIT(nmi_desc[0].head),
54
.lock = __SPIN_LOCK_UNLOCKED(&nmi_desc[1].lock),
55
.head = LIST_HEAD_INIT(nmi_desc[1].head),
63
unsigned int external;
67
static DEFINE_PER_CPU(struct nmi_stats, nmi_stats);
69
static int ignore_nmis;
71
int unknown_nmi_panic;
73
* Prevent NMI reason port (0x61) being accessed simultaneously, can
74
* only be used in NMI handler.
76
static DEFINE_RAW_SPINLOCK(nmi_reason_lock);
78
static int __init setup_unknown_nmi_panic(char *str)
80
unknown_nmi_panic = 1;
83
__setup("unknown_nmi_panic", setup_unknown_nmi_panic);
85
#define nmi_to_desc(type) (&nmi_desc[type])
87
static int notrace __kprobes nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b)
89
struct nmi_desc *desc = nmi_to_desc(type);
96
* NMIs are edge-triggered, which means if you have enough
97
* of them concurrently, you can lose some because only one
98
* can be latched at any given time. Walk the whole list
99
* to handle those situations.
101
list_for_each_entry_rcu(a, &desc->head, list)
102
handled += a->handler(type, regs);
106
/* return total number of NMI events handled */
110
static int __setup_nmi(unsigned int type, struct nmiaction *action)
112
struct nmi_desc *desc = nmi_to_desc(type);
115
spin_lock_irqsave(&desc->lock, flags);
118
* most handlers of type NMI_UNKNOWN never return because
119
* they just assume the NMI is theirs. Just a sanity check
120
* to manage expectations
122
WARN_ON_ONCE(type == NMI_UNKNOWN && !list_empty(&desc->head));
125
* some handlers need to be executed first otherwise a fake
126
* event confuses some handlers (kdump uses this flag)
128
if (action->flags & NMI_FLAG_FIRST)
129
list_add_rcu(&action->list, &desc->head);
131
list_add_tail_rcu(&action->list, &desc->head);
133
spin_unlock_irqrestore(&desc->lock, flags);
137
static struct nmiaction *__free_nmi(unsigned int type, const char *name)
139
struct nmi_desc *desc = nmi_to_desc(type);
143
spin_lock_irqsave(&desc->lock, flags);
145
list_for_each_entry_rcu(n, &desc->head, list) {
147
* the name passed in to describe the nmi handler
148
* is used as the lookup key
150
if (!strcmp(n->name, name)) {
152
"Trying to free NMI (%s) from NMI context!\n", n->name);
153
list_del_rcu(&n->list);
158
spin_unlock_irqrestore(&desc->lock, flags);
163
int register_nmi_handler(unsigned int type, nmi_handler_t handler,
164
unsigned long nmiflags, const char *devname)
166
struct nmiaction *action;
167
int retval = -ENOMEM;
172
action = kzalloc(sizeof(struct nmiaction), GFP_KERNEL);
176
action->handler = handler;
177
action->flags = nmiflags;
178
action->name = kstrndup(devname, NMI_MAX_NAMELEN, GFP_KERNEL);
180
goto fail_action_name;
182
retval = __setup_nmi(type, action);
197
EXPORT_SYMBOL_GPL(register_nmi_handler);
199
void unregister_nmi_handler(unsigned int type, const char *name)
203
a = __free_nmi(type, name);
210
EXPORT_SYMBOL_GPL(unregister_nmi_handler);
212
static notrace __kprobes void
213
pci_serr_error(unsigned char reason, struct pt_regs *regs)
215
pr_emerg("NMI: PCI system error (SERR) for reason %02x on CPU %d.\n",
216
reason, smp_processor_id());
219
* On some machines, PCI SERR line is used to report memory
220
* errors. EDAC makes use of it.
222
#if defined(CONFIG_EDAC)
223
if (edac_handler_set()) {
224
edac_atomic_assert_error();
229
if (panic_on_unrecovered_nmi)
230
panic("NMI: Not continuing");
232
pr_emerg("Dazed and confused, but trying to continue\n");
234
/* Clear and disable the PCI SERR error line. */
235
reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_SERR;
236
outb(reason, NMI_REASON_PORT);
239
static notrace __kprobes void
240
io_check_error(unsigned char reason, struct pt_regs *regs)
245
"NMI: IOCK error (debug interrupt?) for reason %02x on CPU %d.\n",
246
reason, smp_processor_id());
247
show_registers(regs);
250
panic("NMI IOCK error: Not continuing");
252
/* Re-enable the IOCK line, wait for a few seconds */
253
reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_IOCHK;
254
outb(reason, NMI_REASON_PORT);
258
touch_nmi_watchdog();
262
reason &= ~NMI_REASON_CLEAR_IOCHK;
263
outb(reason, NMI_REASON_PORT);
266
static notrace __kprobes void
267
unknown_nmi_error(unsigned char reason, struct pt_regs *regs)
272
* Use 'false' as back-to-back NMIs are dealt with one level up.
273
* Of course this makes having multiple 'unknown' handlers useless
274
* as only the first one is ever run (unless it can actually determine
275
* if it caused the NMI)
277
handled = nmi_handle(NMI_UNKNOWN, regs, false);
279
__this_cpu_add(nmi_stats.unknown, handled);
283
__this_cpu_add(nmi_stats.unknown, 1);
287
* Might actually be able to figure out what the guilty party
295
pr_emerg("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
296
reason, smp_processor_id());
298
pr_emerg("Do you have a strange power saving mode enabled?\n");
299
if (unknown_nmi_panic || panic_on_unrecovered_nmi)
300
panic("NMI: Not continuing");
302
pr_emerg("Dazed and confused, but trying to continue\n");
305
static DEFINE_PER_CPU(bool, swallow_nmi);
306
static DEFINE_PER_CPU(unsigned long, last_nmi_rip);
308
static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
310
unsigned char reason = 0;
315
* CPU-specific NMI must be processed before non-CPU-specific
316
* NMI, otherwise we may lose it, because the CPU-specific
317
* NMI can not be detected/processed on other CPUs.
321
* Back-to-back NMIs are interesting because they can either
322
* be two NMI or more than two NMIs (any thing over two is dropped
323
* due to NMI being edge-triggered). If this is the second half
324
* of the back-to-back NMI, assume we dropped things and process
325
* more handlers. Otherwise reset the 'swallow' NMI behaviour
327
if (regs->ip == __this_cpu_read(last_nmi_rip))
330
__this_cpu_write(swallow_nmi, false);
332
__this_cpu_write(last_nmi_rip, regs->ip);
334
handled = nmi_handle(NMI_LOCAL, regs, b2b);
335
__this_cpu_add(nmi_stats.normal, handled);
338
* There are cases when a NMI handler handles multiple
339
* events in the current NMI. One of these events may
340
* be queued for in the next NMI. Because the event is
341
* already handled, the next NMI will result in an unknown
342
* NMI. Instead lets flag this for a potential NMI to
346
__this_cpu_write(swallow_nmi, true);
350
/* Non-CPU-specific NMI: NMI sources can be processed on any CPU */
351
raw_spin_lock(&nmi_reason_lock);
352
reason = x86_platform.get_nmi_reason();
354
if (reason & NMI_REASON_MASK) {
355
if (reason & NMI_REASON_SERR)
356
pci_serr_error(reason, regs);
357
else if (reason & NMI_REASON_IOCHK)
358
io_check_error(reason, regs);
361
* Reassert NMI in case it became active
362
* meanwhile as it's edge-triggered:
366
__this_cpu_add(nmi_stats.external, 1);
367
raw_spin_unlock(&nmi_reason_lock);
370
raw_spin_unlock(&nmi_reason_lock);
373
* Only one NMI can be latched at a time. To handle
374
* this we may process multiple nmi handlers at once to
375
* cover the case where an NMI is dropped. The downside
376
* to this approach is we may process an NMI prematurely,
377
* while its real NMI is sitting latched. This will cause
378
* an unknown NMI on the next run of the NMI processing.
380
* We tried to flag that condition above, by setting the
381
* swallow_nmi flag when we process more than one event.
382
* This condition is also only present on the second half
383
* of a back-to-back NMI, so we flag that condition too.
385
* If both are true, we assume we already processed this
386
* NMI previously and we swallow it. Otherwise we reset
389
* There are scenarios where we may accidentally swallow
390
* a 'real' unknown NMI. For example, while processing
391
* a perf NMI another perf NMI comes in along with a
392
* 'real' unknown NMI. These two NMIs get combined into
393
* one (as descibed above). When the next NMI gets
394
* processed, it will be flagged by perf as handled, but
395
* noone will know that there was a 'real' unknown NMI sent
396
* also. As a result it gets swallowed. Or if the first
397
* perf NMI returns two events handled then the second
398
* NMI will get eaten by the logic below, again losing a
399
* 'real' unknown NMI. But this is the best we can do
402
if (b2b && __this_cpu_read(swallow_nmi))
403
__this_cpu_add(nmi_stats.swallow, 1);
405
unknown_nmi_error(reason, regs);
408
dotraplinkage notrace __kprobes void
409
do_nmi(struct pt_regs *regs, long error_code)
413
inc_irq_stat(__nmi_count);
416
default_do_nmi(regs);
426
void restart_nmi(void)
431
/* reset the back-to-back NMI logic */
432
void local_touch_nmi(void)
434
__this_cpu_write(last_nmi_rip, 0);