61
62
arch_local_irq_restore(flags);
64
static void mn10300_cpupic_mask(unsigned int irq)
65
static void mn10300_cpupic_mask(struct irq_data *d)
66
__mask_and_set_icr(irq, GxICR_LEVEL, 0);
67
__mask_and_set_icr(d->irq, GxICR_LEVEL, 0);
69
static void mn10300_cpupic_mask_ack(unsigned int irq)
70
static void mn10300_cpupic_mask_ack(struct irq_data *d)
72
unsigned int irq = d->irq;
72
74
unsigned long flags;
97
99
#endif /* CONFIG_SMP */
100
static void mn10300_cpupic_unmask(unsigned int irq)
102
static void mn10300_cpupic_unmask(struct irq_data *d)
102
__mask_and_set_icr(irq, GxICR_LEVEL, GxICR_ENABLE);
104
__mask_and_set_icr(d->irq, GxICR_LEVEL, GxICR_ENABLE);
105
static void mn10300_cpupic_unmask_clear(unsigned int irq)
107
static void mn10300_cpupic_unmask_clear(struct irq_data *d)
109
unsigned int irq = d->irq;
107
110
/* the MN10300 PIC latches its interrupt request bit, even after the
108
111
* device has ceased to assert its interrupt line and the interrupt
109
112
* channel has been disabled in the PIC, so for level-triggered
202
206
* mask_ack() is provided), and mask_ack() just masks.
204
208
static struct irq_chip mn10300_cpu_pic_level = {
206
.disable = mn10300_cpupic_mask,
207
.enable = mn10300_cpupic_unmask_clear,
209
.mask = mn10300_cpupic_mask,
210
.mask_ack = mn10300_cpupic_mask,
211
.unmask = mn10300_cpupic_unmask_clear,
210
.irq_disable = mn10300_cpupic_mask,
211
.irq_enable = mn10300_cpupic_unmask_clear,
213
.irq_mask = mn10300_cpupic_mask,
214
.irq_mask_ack = mn10300_cpupic_mask,
215
.irq_unmask = mn10300_cpupic_unmask_clear,
212
216
#ifdef CONFIG_SMP
213
.set_affinity = mn10300_cpupic_setaffinity,
217
.irq_set_affinity = mn10300_cpupic_setaffinity,
220
224
* We use the latch clearing function of the PIC as the 'ACK' function.
222
226
static struct irq_chip mn10300_cpu_pic_edge = {
224
.disable = mn10300_cpupic_mask,
225
.enable = mn10300_cpupic_unmask,
226
.ack = mn10300_cpupic_ack,
227
.mask = mn10300_cpupic_mask,
228
.mask_ack = mn10300_cpupic_mask_ack,
229
.unmask = mn10300_cpupic_unmask,
228
.irq_disable = mn10300_cpupic_mask,
229
.irq_enable = mn10300_cpupic_unmask,
230
.irq_ack = mn10300_cpupic_ack,
231
.irq_mask = mn10300_cpupic_mask,
232
.irq_mask_ack = mn10300_cpupic_mask_ack,
233
.irq_unmask = mn10300_cpupic_unmask,
230
234
#ifdef CONFIG_SMP
231
.set_affinity = mn10300_cpupic_setaffinity,
235
.irq_set_affinity = mn10300_cpupic_setaffinity,
252
256
__mask_and_set_icr(irq, GxICR_ENABLE, level);
255
void mn10300_intc_set_level(unsigned int irq, unsigned int level)
257
set_intr_level(irq, NUM2GxICR_LEVEL(level) & GxICR_LEVEL);
260
void mn10300_intc_clear(unsigned int irq)
262
__mask_and_set_icr(irq, GxICR_LEVEL | GxICR_ENABLE, GxICR_DETECT);
265
void mn10300_intc_set(unsigned int irq)
267
__mask_and_set_icr(irq, 0, GxICR_REQUEST | GxICR_DETECT);
270
void mn10300_intc_enable(unsigned int irq)
272
mn10300_cpupic_unmask(irq);
275
void mn10300_intc_disable(unsigned int irq)
277
mn10300_cpupic_mask(irq);
281
260
* mark an interrupt to be ACK'd after interrupt handlers have been run rather
298
277
for (irq = 0; irq < NR_IRQS; irq++)
299
if (irq_desc[irq].chip == &no_irq_chip)
278
if (irq_get_chip(irq) == &no_irq_chip)
300
279
/* due to the PIC latching interrupt requests, even
301
280
* when the IRQ is disabled, IRQ_PENDING is superfluous
302
281
* and we can use handle_level_irq() for edge-triggered
304
set_irq_chip_and_handler(irq, &mn10300_cpu_pic_edge,
283
irq_set_chip_and_handler(irq, &mn10300_cpu_pic_edge,
305
284
handle_level_irq);
357
336
* Display interrupt management information through /proc/interrupts
359
int show_interrupts(struct seq_file *p, void *v)
338
int arch_show_interrupts(struct seq_file *p, int prec)
361
int i = *(loff_t *) v, j, cpu;
362
struct irqaction *action;
366
/* display column title bar naming CPUs */
369
for (j = 0; j < NR_CPUS; j++)
371
seq_printf(p, "CPU%d ", j);
375
/* display information rows, one per active CPU */
376
case 1 ... NR_IRQS - 1:
377
raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
379
action = irq_desc[i].action;
381
seq_printf(p, "%3d: ", i);
382
for_each_present_cpu(cpu)
383
seq_printf(p, "%10u ", kstat_irqs_cpu(i, cpu));
386
seq_printf(p, " %14s.%u",
387
irq_desc[i].chip->name,
388
(GxICR(i) & GxICR_LEVEL) >>
391
seq_printf(p, " %14s",
392
irq_desc[i].chip->name);
394
seq_printf(p, " %s", action->name);
396
for (action = action->next;
398
action = action->next)
399
seq_printf(p, ", %s", action->name);
404
raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
407
/* polish off with NMI and error counters */
409
340
#ifdef CONFIG_MN10300_WD_TIMER
410
seq_printf(p, "NMI: ");
411
for (j = 0; j < NR_CPUS; j++)
413
seq_printf(p, "%10u ", nmi_count(j));
343
seq_printf(p, "%*s: ", prec, "NMI");
344
for (j = 0; j < NR_CPUS; j++)
346
seq_printf(p, "%10u ", nmi_count(j));
417
seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
350
seq_printf(p, "%*s: ", prec, "ERR");
351
seq_printf(p, "%10u\n", atomic_read(&irq_err_count));
424
355
#ifdef CONFIG_HOTPLUG_CPU
425
356
void migrate_irqs(void)
429
359
unsigned int self, new;
430
360
unsigned long flags;
432
362
self = smp_processor_id();
433
363
for (irq = 0; irq < NR_IRQS; irq++) {
434
desc = irq_desc + irq;
364
struct irq_data *data = irq_get_irq_data(irq);
436
if (desc->status == IRQ_PER_CPU)
366
if (irqd_is_per_cpu(data))
439
if (cpu_isset(self, irq_desc[irq].affinity) &&
369
if (cpu_isset(self, data->affinity) &&
440
370
!cpus_intersects(irq_affinity[irq], cpu_online_map)) {
442
372
cpu_id = first_cpu(cpu_online_map);
443
cpu_set(cpu_id, irq_desc[irq].affinity);
373
cpu_set(cpu_id, data->affinity);
445
375
/* We need to operate irq_affinity_online atomically. */
446
376
arch_local_cli_save(flags);