~ubuntu-branches/debian/wheezy/linux-2.6/wheezy

« back to all changes in this revision

Viewing changes to arch/mn10300/kernel/irq.c

  • Committer: Bazaar Package Importer
  • Author(s): Ben Hutchings, Ben Hutchings, Aurelien Jarno
  • Date: 2011-06-07 12:14:05 UTC
  • mfrom: (43.1.9 sid)
  • Revision ID: james.westby@ubuntu.com-20110607121405-i3h1rd7nrnd2b73h
Tags: 2.6.39-2
[ Ben Hutchings ]
* [x86] Enable BACKLIGHT_APPLE, replacing BACKLIGHT_MBP_NVIDIA
  (Closes: #627492)
* cgroups: Disable memory resource controller by default. Allow it
  to be enabled using kernel parameter 'cgroup_enable=memory'.
* rt2800usb: Enable support for more USB devices including
  Linksys WUSB600N (Closes: #596626) (this change was accidentally
  omitted from 2.6.39-1)
* [x86] Remove Celeron from list of processors supporting PAE. Most
  'Celeron M' models do not.
* Update debconf template translations:
  - Swedish (Martin Bagge) (Closes: #628932)
  - French (David Prévot) (Closes: #628191)
* aufs: Update for 2.6.39 (Closes: #627837)
* Add stable 2.6.39.1, including:
  - ext4: dont set PageUptodate in ext4_end_bio()
  - pata_cmd64x: fix boot crash on parisc (Closes: #622997, #622745)
  - ext3: Fix fs corruption when make_indexed_dir() fails
  - netfilter: nf_ct_sip: validate Content-Length in TCP SIP messages
  - sctp: fix race between sctp_bind_addr_free() and
    sctp_bind_addr_conflict()
  - sctp: fix memory leak of the ASCONF queue when free asoc
  - md/bitmap: fix saving of events_cleared and other state
  - cdc_acm: Fix oops when Droids MuIn LCD is connected
  - cx88: Fix conversion from BKL to fine-grained locks (Closes: #619827)
  - keys: Set cred->user_ns in key_replace_session_keyring (CVE-2011-2184)
  - tmpfs: fix race between truncate and writepage
  - nfs41: Correct offset for LAYOUTCOMMIT
  - xen/mmu: fix a race window causing leave_mm BUG()
  - ext4: fix possible use-after-free in ext4_remove_li_request()
  For the complete list of changes, see:
   http://www.kernel.org/pub/linux/kernel/v2.6/ChangeLog-2.6.39.1
* Bump ABI to 2
* netfilter: Enable IP_SET, IP_SET_BITMAP_IP, IP_SET_BITMAP_IPMAC,
  IP_SET_BITMAP_PORT, IP_SET_HASH_IP, IP_SET_HASH_IPPORT,
  IP_SET_HASH_IPPORTIP, IP_SET_HASH_IPPORTNET, IP_SET_HASH_NET,
  IP_SET_HASH_NETPORT, IP_SET_LIST_SET, NETFILTER_XT_SET as modules
  (Closes: #629401)

[ Aurelien Jarno ]
* [mipsel/loongson-2f] Disable_SCSI_LPFC to workaround GCC ICE.

Show diffs side-by-side

added added

removed removed

Lines of Context:
37
37
/*
38
38
 * MN10300 interrupt controller operations
39
39
 */
40
 
static void mn10300_cpupic_ack(unsigned int irq)
 
40
static void mn10300_cpupic_ack(struct irq_data *d)
41
41
{
 
42
        unsigned int irq = d->irq;
42
43
        unsigned long flags;
43
44
        u16 tmp;
44
45
 
61
62
        arch_local_irq_restore(flags);
62
63
}
63
64
 
64
 
static void mn10300_cpupic_mask(unsigned int irq)
 
65
static void mn10300_cpupic_mask(struct irq_data *d)
65
66
{
66
 
        __mask_and_set_icr(irq, GxICR_LEVEL, 0);
 
67
        __mask_and_set_icr(d->irq, GxICR_LEVEL, 0);
67
68
}
68
69
 
69
 
static void mn10300_cpupic_mask_ack(unsigned int irq)
 
70
static void mn10300_cpupic_mask_ack(struct irq_data *d)
70
71
{
 
72
        unsigned int irq = d->irq;
71
73
#ifdef CONFIG_SMP
72
74
        unsigned long flags;
73
75
        u16 tmp;
85
87
                tmp2 = GxICR(irq);
86
88
 
87
89
                irq_affinity_online[irq] =
88
 
                        any_online_cpu(*irq_desc[irq].affinity);
 
90
                        any_online_cpu(*d->affinity);
89
91
                CROSS_GxICR(irq, irq_affinity_online[irq]) =
90
92
                        (tmp & (GxICR_LEVEL | GxICR_ENABLE)) | GxICR_DETECT;
91
93
                tmp = CROSS_GxICR(irq, irq_affinity_online[irq]);
97
99
#endif /* CONFIG_SMP */
98
100
}
99
101
 
100
 
static void mn10300_cpupic_unmask(unsigned int irq)
 
102
static void mn10300_cpupic_unmask(struct irq_data *d)
101
103
{
102
 
        __mask_and_set_icr(irq, GxICR_LEVEL, GxICR_ENABLE);
 
104
        __mask_and_set_icr(d->irq, GxICR_LEVEL, GxICR_ENABLE);
103
105
}
104
106
 
105
 
static void mn10300_cpupic_unmask_clear(unsigned int irq)
 
107
static void mn10300_cpupic_unmask_clear(struct irq_data *d)
106
108
{
 
109
        unsigned int irq = d->irq;
107
110
        /* the MN10300 PIC latches its interrupt request bit, even after the
108
111
         * device has ceased to assert its interrupt line and the interrupt
109
112
         * channel has been disabled in the PIC, so for level-triggered
121
124
        } else {
122
125
                tmp = GxICR(irq);
123
126
 
124
 
                irq_affinity_online[irq] = any_online_cpu(*irq_desc[irq].affinity);
 
127
                irq_affinity_online[irq] = any_online_cpu(*d->affinity);
125
128
                CROSS_GxICR(irq, irq_affinity_online[irq]) = (tmp & GxICR_LEVEL) | GxICR_ENABLE | GxICR_DETECT;
126
129
                tmp = CROSS_GxICR(irq, irq_affinity_online[irq]);
127
130
        }
134
137
 
135
138
#ifdef CONFIG_SMP
136
139
static int
137
 
mn10300_cpupic_setaffinity(unsigned int irq, const struct cpumask *mask)
 
140
mn10300_cpupic_setaffinity(struct irq_data *d, const struct cpumask *mask,
 
141
                           bool force)
138
142
{
139
143
        unsigned long flags;
140
144
        int err;
142
146
        flags = arch_local_cli_save();
143
147
 
144
148
        /* check irq no */
145
 
        switch (irq) {
 
149
        switch (d->irq) {
146
150
        case TMJCIRQ:
147
151
        case RESCHEDULE_IPI:
148
152
        case CALL_FUNC_SINGLE_IPI:
149
153
        case LOCAL_TIMER_IPI:
150
154
        case FLUSH_CACHE_IPI:
151
155
        case CALL_FUNCTION_NMI_IPI:
152
 
        case GDB_NMI_IPI:
 
156
        case DEBUGGER_NMI_IPI:
153
157
#ifdef CONFIG_MN10300_TTYSM0
154
158
        case SC0RXIRQ:
155
159
        case SC0TXIRQ:
181
185
                break;
182
186
 
183
187
        default:
184
 
                set_bit(irq, irq_affinity_request);
 
188
                set_bit(d->irq, irq_affinity_request);
185
189
                err = 0;
186
190
                break;
187
191
        }
202
206
 * mask_ack() is provided), and mask_ack() just masks.
203
207
 */
204
208
static struct irq_chip mn10300_cpu_pic_level = {
205
 
        .name           = "cpu_l",
206
 
        .disable        = mn10300_cpupic_mask,
207
 
        .enable         = mn10300_cpupic_unmask_clear,
208
 
        .ack            = NULL,
209
 
        .mask           = mn10300_cpupic_mask,
210
 
        .mask_ack       = mn10300_cpupic_mask,
211
 
        .unmask         = mn10300_cpupic_unmask_clear,
 
209
        .name                   = "cpu_l",
 
210
        .irq_disable            = mn10300_cpupic_mask,
 
211
        .irq_enable             = mn10300_cpupic_unmask_clear,
 
212
        .irq_ack                = NULL,
 
213
        .irq_mask               = mn10300_cpupic_mask,
 
214
        .irq_mask_ack           = mn10300_cpupic_mask,
 
215
        .irq_unmask             = mn10300_cpupic_unmask_clear,
212
216
#ifdef CONFIG_SMP
213
 
        .set_affinity   = mn10300_cpupic_setaffinity,
 
217
        .irq_set_affinity       = mn10300_cpupic_setaffinity,
214
218
#endif
215
219
};
216
220
 
220
224
 * We use the latch clearing function of the PIC as the 'ACK' function.
221
225
 */
222
226
static struct irq_chip mn10300_cpu_pic_edge = {
223
 
        .name           = "cpu_e",
224
 
        .disable        = mn10300_cpupic_mask,
225
 
        .enable         = mn10300_cpupic_unmask,
226
 
        .ack            = mn10300_cpupic_ack,
227
 
        .mask           = mn10300_cpupic_mask,
228
 
        .mask_ack       = mn10300_cpupic_mask_ack,
229
 
        .unmask         = mn10300_cpupic_unmask,
 
227
        .name                   = "cpu_e",
 
228
        .irq_disable            = mn10300_cpupic_mask,
 
229
        .irq_enable             = mn10300_cpupic_unmask,
 
230
        .irq_ack                = mn10300_cpupic_ack,
 
231
        .irq_mask               = mn10300_cpupic_mask,
 
232
        .irq_mask_ack           = mn10300_cpupic_mask_ack,
 
233
        .irq_unmask             = mn10300_cpupic_unmask,
230
234
#ifdef CONFIG_SMP
231
 
        .set_affinity   = mn10300_cpupic_setaffinity,
 
235
        .irq_set_affinity       = mn10300_cpupic_setaffinity,
232
236
#endif
233
237
};
234
238
 
252
256
        __mask_and_set_icr(irq, GxICR_ENABLE, level);
253
257
}
254
258
 
255
 
void mn10300_intc_set_level(unsigned int irq, unsigned int level)
256
 
{
257
 
        set_intr_level(irq, NUM2GxICR_LEVEL(level) & GxICR_LEVEL);
258
 
}
259
 
 
260
 
void mn10300_intc_clear(unsigned int irq)
261
 
{
262
 
        __mask_and_set_icr(irq, GxICR_LEVEL | GxICR_ENABLE, GxICR_DETECT);
263
 
}
264
 
 
265
 
void mn10300_intc_set(unsigned int irq)
266
 
{
267
 
        __mask_and_set_icr(irq, 0, GxICR_REQUEST | GxICR_DETECT);
268
 
}
269
 
 
270
 
void mn10300_intc_enable(unsigned int irq)
271
 
{
272
 
        mn10300_cpupic_unmask(irq);
273
 
}
274
 
 
275
 
void mn10300_intc_disable(unsigned int irq)
276
 
{
277
 
        mn10300_cpupic_mask(irq);
278
 
}
279
 
 
280
259
/*
281
260
 * mark an interrupt to be ACK'd after interrupt handlers have been run rather
282
261
 * than before
284
263
 */
285
264
void mn10300_set_lateack_irq_type(int irq)
286
265
{
287
 
        set_irq_chip_and_handler(irq, &mn10300_cpu_pic_level,
 
266
        irq_set_chip_and_handler(irq, &mn10300_cpu_pic_level,
288
267
                                 handle_level_irq);
289
268
}
290
269
 
296
275
        int irq;
297
276
 
298
277
        for (irq = 0; irq < NR_IRQS; irq++)
299
 
                if (irq_desc[irq].chip == &no_irq_chip)
 
278
                if (irq_get_chip(irq) == &no_irq_chip)
300
279
                        /* due to the PIC latching interrupt requests, even
301
280
                         * when the IRQ is disabled, IRQ_PENDING is superfluous
302
281
                         * and we can use handle_level_irq() for edge-triggered
303
282
                         * interrupts */
304
 
                        set_irq_chip_and_handler(irq, &mn10300_cpu_pic_edge,
 
283
                        irq_set_chip_and_handler(irq, &mn10300_cpu_pic_edge,
305
284
                                                 handle_level_irq);
306
285
 
307
286
        unit_init_IRQ();
356
335
/*
357
336
 * Display interrupt management information through /proc/interrupts
358
337
 */
359
 
int show_interrupts(struct seq_file *p, void *v)
 
338
int arch_show_interrupts(struct seq_file *p, int prec)
360
339
{
361
 
        int i = *(loff_t *) v, j, cpu;
362
 
        struct irqaction *action;
363
 
        unsigned long flags;
364
 
 
365
 
        switch (i) {
366
 
                /* display column title bar naming CPUs */
367
 
        case 0:
368
 
                seq_printf(p, "           ");
369
 
                for (j = 0; j < NR_CPUS; j++)
370
 
                        if (cpu_online(j))
371
 
                                seq_printf(p, "CPU%d       ", j);
372
 
                seq_putc(p, '\n');
373
 
                break;
374
 
 
375
 
                /* display information rows, one per active CPU */
376
 
        case 1 ... NR_IRQS - 1:
377
 
                raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
378
 
 
379
 
                action = irq_desc[i].action;
380
 
                if (action) {
381
 
                        seq_printf(p, "%3d: ", i);
382
 
                        for_each_present_cpu(cpu)
383
 
                                seq_printf(p, "%10u ", kstat_irqs_cpu(i, cpu));
384
 
 
385
 
                        if (i < NR_CPU_IRQS)
386
 
                                seq_printf(p, " %14s.%u",
387
 
                                           irq_desc[i].chip->name,
388
 
                                           (GxICR(i) & GxICR_LEVEL) >>
389
 
                                           GxICR_LEVEL_SHIFT);
390
 
                        else
391
 
                                seq_printf(p, " %14s",
392
 
                                           irq_desc[i].chip->name);
393
 
 
394
 
                        seq_printf(p, "  %s", action->name);
395
 
 
396
 
                        for (action = action->next;
397
 
                             action;
398
 
                             action = action->next)
399
 
                                seq_printf(p, ", %s", action->name);
400
 
 
401
 
                        seq_putc(p, '\n');
402
 
                }
403
 
 
404
 
                raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
405
 
                break;
406
 
 
407
 
                /* polish off with NMI and error counters */
408
 
        case NR_IRQS:
409
340
#ifdef CONFIG_MN10300_WD_TIMER
410
 
                seq_printf(p, "NMI: ");
411
 
                for (j = 0; j < NR_CPUS; j++)
412
 
                        if (cpu_online(j))
413
 
                                seq_printf(p, "%10u ", nmi_count(j));
414
 
                seq_putc(p, '\n');
 
341
        int j;
 
342
 
 
343
        seq_printf(p, "%*s: ", prec, "NMI");
 
344
        for (j = 0; j < NR_CPUS; j++)
 
345
                if (cpu_online(j))
 
346
                        seq_printf(p, "%10u ", nmi_count(j));
 
347
        seq_putc(p, '\n');
415
348
#endif
416
349
 
417
 
                seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
418
 
                break;
419
 
        }
420
 
 
 
350
        seq_printf(p, "%*s: ", prec, "ERR");
 
351
        seq_printf(p, "%10u\n", atomic_read(&irq_err_count));
421
352
        return 0;
422
353
}
423
354
 
424
355
#ifdef CONFIG_HOTPLUG_CPU
425
356
void migrate_irqs(void)
426
357
{
427
 
        irq_desc_t *desc;
428
358
        int irq;
429
359
        unsigned int self, new;
430
360
        unsigned long flags;
431
361
 
432
362
        self = smp_processor_id();
433
363
        for (irq = 0; irq < NR_IRQS; irq++) {
434
 
                desc = irq_desc + irq;
 
364
                struct irq_data *data = irq_get_irq_data(irq);
435
365
 
436
 
                if (desc->status == IRQ_PER_CPU)
 
366
                if (irqd_is_per_cpu(data))
437
367
                        continue;
438
368
 
439
 
                if (cpu_isset(self, irq_desc[irq].affinity) &&
 
369
                if (cpu_isset(self, data->affinity) &&
440
370
                    !cpus_intersects(irq_affinity[irq], cpu_online_map)) {
441
371
                        int cpu_id;
442
372
                        cpu_id = first_cpu(cpu_online_map);
443
 
                        cpu_set(cpu_id, irq_desc[irq].affinity);
 
373
                        cpu_set(cpu_id, data->affinity);
444
374
                }
445
375
                /* We need to operate irq_affinity_online atomically. */
446
376
                arch_local_cli_save(flags);
451
381
                        GxICR(irq) = x & GxICR_LEVEL;
452
382
                        tmp = GxICR(irq);
453
383
 
454
 
                        new = any_online_cpu(irq_desc[irq].affinity);
 
384
                        new = any_online_cpu(data->affinity);
455
385
                        irq_affinity_online[irq] = new;
456
386
 
457
387
                        CROSS_GxICR(irq, new) =