~ubuntu-branches/ubuntu/precise/linux-ti-omap4/precise

« back to all changes in this revision

Viewing changes to kernel/irq/manage.c

  • Committer: Bazaar Package Importer
  • Author(s): Paolo Pisati
  • Date: 2011-06-29 15:23:51 UTC
  • mfrom: (26.1.1 natty-proposed)
  • Revision ID: james.westby@ubuntu.com-20110629152351-xs96tm303d95rpbk
Tags: 3.0.0-1200.2
* Rebased against 3.0.0-6.7
* BSP from TI based on 3.0.0

Show diffs side-by-side

added added

removed removed

Lines of Context:
17
17
 
18
18
#include "internals.h"
19
19
 
 
20
#ifdef CONFIG_IRQ_FORCED_THREADING
 
21
__read_mostly bool force_irqthreads;
 
22
 
 
23
static int __init setup_forced_irqthreads(char *arg)
 
24
{
 
25
        force_irqthreads = true;
 
26
        return 0;
 
27
}
 
28
early_param("threadirqs", setup_forced_irqthreads);
 
29
#endif
 
30
 
20
31
/**
21
32
 *      synchronize_irq - wait for pending IRQ handlers (on other CPUs)
22
33
 *      @irq: interrupt number to wait for
30
41
void synchronize_irq(unsigned int irq)
31
42
{
32
43
        struct irq_desc *desc = irq_to_desc(irq);
33
 
        unsigned int status;
 
44
        bool inprogress;
34
45
 
35
46
        if (!desc)
36
47
                return;
42
53
                 * Wait until we're out of the critical section.  This might
43
54
                 * give the wrong answer due to the lack of memory barriers.
44
55
                 */
45
 
                while (desc->status & IRQ_INPROGRESS)
 
56
                while (irqd_irq_inprogress(&desc->irq_data))
46
57
                        cpu_relax();
47
58
 
48
59
                /* Ok, that indicated we're done: double-check carefully. */
49
60
                raw_spin_lock_irqsave(&desc->lock, flags);
50
 
                status = desc->status;
 
61
                inprogress = irqd_irq_inprogress(&desc->irq_data);
51
62
                raw_spin_unlock_irqrestore(&desc->lock, flags);
52
63
 
53
64
                /* Oops, that failed? */
54
 
        } while (status & IRQ_INPROGRESS);
 
65
        } while (inprogress);
55
66
 
56
67
        /*
57
68
         * We made sure that no hardirq handler is running. Now verify
73
84
{
74
85
        struct irq_desc *desc = irq_to_desc(irq);
75
86
 
76
 
        if (CHECK_IRQ_PER_CPU(desc->status) || !desc->irq_data.chip ||
77
 
            !desc->irq_data.chip->irq_set_affinity)
 
87
        if (!desc || !irqd_can_balance(&desc->irq_data) ||
 
88
            !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
78
89
                return 0;
79
90
 
80
91
        return 1;
100
111
        }
101
112
}
102
113
 
 
114
#ifdef CONFIG_GENERIC_PENDING_IRQ
 
115
static inline bool irq_can_move_pcntxt(struct irq_data *data)
 
116
{
 
117
        return irqd_can_move_in_process_context(data);
 
118
}
 
119
static inline bool irq_move_pending(struct irq_data *data)
 
120
{
 
121
        return irqd_is_setaffinity_pending(data);
 
122
}
 
123
static inline void
 
124
irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask)
 
125
{
 
126
        cpumask_copy(desc->pending_mask, mask);
 
127
}
 
128
static inline void
 
129
irq_get_pending(struct cpumask *mask, struct irq_desc *desc)
 
130
{
 
131
        cpumask_copy(mask, desc->pending_mask);
 
132
}
 
133
#else
 
134
static inline bool irq_can_move_pcntxt(struct irq_data *data) { return true; }
 
135
static inline bool irq_move_pending(struct irq_data *data) { return false; }
 
136
static inline void
 
137
irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { }
 
138
static inline void
 
139
irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { }
 
140
#endif
 
141
 
 
142
int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
 
143
{
 
144
        struct irq_chip *chip = irq_data_get_irq_chip(data);
 
145
        struct irq_desc *desc = irq_data_to_desc(data);
 
146
        int ret = 0;
 
147
 
 
148
        if (!chip || !chip->irq_set_affinity)
 
149
                return -EINVAL;
 
150
 
 
151
        if (irq_can_move_pcntxt(data)) {
 
152
                ret = chip->irq_set_affinity(data, mask, false);
 
153
                switch (ret) {
 
154
                case IRQ_SET_MASK_OK:
 
155
                        cpumask_copy(data->affinity, mask);
 
156
                case IRQ_SET_MASK_OK_NOCOPY:
 
157
                        irq_set_thread_affinity(desc);
 
158
                        ret = 0;
 
159
                }
 
160
        } else {
 
161
                irqd_set_move_pending(data);
 
162
                irq_copy_pending(desc, mask);
 
163
        }
 
164
 
 
165
        if (desc->affinity_notify) {
 
166
                kref_get(&desc->affinity_notify->kref);
 
167
                schedule_work(&desc->affinity_notify->work);
 
168
        }
 
169
        irqd_set(data, IRQD_AFFINITY_SET);
 
170
 
 
171
        return ret;
 
172
}
 
173
 
103
174
/**
104
175
 *      irq_set_affinity - Set the irq affinity of a given irq
105
176
 *      @irq:           Interrupt to set affinity
106
 
 *      @cpumask:       cpumask
 
177
 *      @mask:          cpumask
107
178
 *
108
179
 */
109
 
int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
 
180
int irq_set_affinity(unsigned int irq, const struct cpumask *mask)
110
181
{
111
182
        struct irq_desc *desc = irq_to_desc(irq);
112
 
        struct irq_chip *chip = desc->irq_data.chip;
113
183
        unsigned long flags;
 
184
        int ret;
114
185
 
115
 
        if (!chip->irq_set_affinity)
 
186
        if (!desc)
116
187
                return -EINVAL;
117
188
 
118
189
        raw_spin_lock_irqsave(&desc->lock, flags);
119
 
 
120
 
#ifdef CONFIG_GENERIC_PENDING_IRQ
121
 
        if (desc->status & IRQ_MOVE_PCNTXT) {
122
 
                if (!chip->irq_set_affinity(&desc->irq_data, cpumask, false)) {
123
 
                        cpumask_copy(desc->irq_data.affinity, cpumask);
124
 
                        irq_set_thread_affinity(desc);
125
 
                }
126
 
        }
127
 
        else {
128
 
                desc->status |= IRQ_MOVE_PENDING;
129
 
                cpumask_copy(desc->pending_mask, cpumask);
130
 
        }
131
 
#else
132
 
        if (!chip->irq_set_affinity(&desc->irq_data, cpumask, false)) {
133
 
                cpumask_copy(desc->irq_data.affinity, cpumask);
134
 
                irq_set_thread_affinity(desc);
135
 
        }
136
 
#endif
137
 
        desc->status |= IRQ_AFFINITY_SET;
 
190
        ret =  __irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask);
138
191
        raw_spin_unlock_irqrestore(&desc->lock, flags);
139
 
        return 0;
 
192
        return ret;
140
193
}
141
194
 
142
195
int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
143
196
{
144
 
        struct irq_desc *desc = irq_to_desc(irq);
145
197
        unsigned long flags;
 
198
        struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
146
199
 
147
200
        if (!desc)
148
201
                return -EINVAL;
149
 
 
150
 
        raw_spin_lock_irqsave(&desc->lock, flags);
151
202
        desc->affinity_hint = m;
152
 
        raw_spin_unlock_irqrestore(&desc->lock, flags);
153
 
 
 
203
        irq_put_desc_unlock(desc, flags);
154
204
        return 0;
155
205
}
156
206
EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
157
207
 
 
208
static void irq_affinity_notify(struct work_struct *work)
 
209
{
 
210
        struct irq_affinity_notify *notify =
 
211
                container_of(work, struct irq_affinity_notify, work);
 
212
        struct irq_desc *desc = irq_to_desc(notify->irq);
 
213
        cpumask_var_t cpumask;
 
214
        unsigned long flags;
 
215
 
 
216
        if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
 
217
                goto out;
 
218
 
 
219
        raw_spin_lock_irqsave(&desc->lock, flags);
 
220
        if (irq_move_pending(&desc->irq_data))
 
221
                irq_get_pending(cpumask, desc);
 
222
        else
 
223
                cpumask_copy(cpumask, desc->irq_data.affinity);
 
224
        raw_spin_unlock_irqrestore(&desc->lock, flags);
 
225
 
 
226
        notify->notify(notify, cpumask);
 
227
 
 
228
        free_cpumask_var(cpumask);
 
229
out:
 
230
        kref_put(&notify->kref, notify->release);
 
231
}
 
232
 
 
233
/**
 
234
 *      irq_set_affinity_notifier - control notification of IRQ affinity changes
 
235
 *      @irq:           Interrupt for which to enable/disable notification
 
236
 *      @notify:        Context for notification, or %NULL to disable
 
237
 *                      notification.  Function pointers must be initialised;
 
238
 *                      the other fields will be initialised by this function.
 
239
 *
 
240
 *      Must be called in process context.  Notification may only be enabled
 
241
 *      after the IRQ is allocated and must be disabled before the IRQ is
 
242
 *      freed using free_irq().
 
243
 */
 
244
int
 
245
irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
 
246
{
 
247
        struct irq_desc *desc = irq_to_desc(irq);
 
248
        struct irq_affinity_notify *old_notify;
 
249
        unsigned long flags;
 
250
 
 
251
        /* The release function is promised process context */
 
252
        might_sleep();
 
253
 
 
254
        if (!desc)
 
255
                return -EINVAL;
 
256
 
 
257
        /* Complete initialisation of *notify */
 
258
        if (notify) {
 
259
                notify->irq = irq;
 
260
                kref_init(&notify->kref);
 
261
                INIT_WORK(&notify->work, irq_affinity_notify);
 
262
        }
 
263
 
 
264
        raw_spin_lock_irqsave(&desc->lock, flags);
 
265
        old_notify = desc->affinity_notify;
 
266
        desc->affinity_notify = notify;
 
267
        raw_spin_unlock_irqrestore(&desc->lock, flags);
 
268
 
 
269
        if (old_notify)
 
270
                kref_put(&old_notify->kref, old_notify->release);
 
271
 
 
272
        return 0;
 
273
}
 
274
EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
 
275
 
158
276
#ifndef CONFIG_AUTO_IRQ_AFFINITY
159
277
/*
160
278
 * Generic version of the affinity autoselector.
161
279
 */
162
 
static int setup_affinity(unsigned int irq, struct irq_desc *desc)
 
280
static int
 
281
setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
163
282
{
 
283
        struct irq_chip *chip = irq_desc_get_chip(desc);
 
284
        struct cpumask *set = irq_default_affinity;
 
285
        int ret;
 
286
 
 
287
        /* Excludes PER_CPU and NO_BALANCE interrupts */
164
288
        if (!irq_can_set_affinity(irq))
165
289
                return 0;
166
290
 
168
292
         * Preserve an userspace affinity setup, but make sure that
169
293
         * one of the targets is online.
170
294
         */
171
 
        if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) {
172
 
                if (cpumask_any_and(desc->irq_data.affinity, cpu_online_mask)
173
 
                    < nr_cpu_ids)
174
 
                        goto set_affinity;
 
295
        if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
 
296
                if (cpumask_intersects(desc->irq_data.affinity,
 
297
                                       cpu_online_mask))
 
298
                        set = desc->irq_data.affinity;
175
299
                else
176
 
                        desc->status &= ~IRQ_AFFINITY_SET;
177
 
        }
178
 
 
179
 
        cpumask_and(desc->irq_data.affinity, cpu_online_mask, irq_default_affinity);
180
 
set_affinity:
181
 
        desc->irq_data.chip->irq_set_affinity(&desc->irq_data, desc->irq_data.affinity, false);
182
 
 
 
300
                        irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
 
301
        }
 
302
 
 
303
        cpumask_and(mask, cpu_online_mask, set);
 
304
        ret = chip->irq_set_affinity(&desc->irq_data, mask, false);
 
305
        switch (ret) {
 
306
        case IRQ_SET_MASK_OK:
 
307
                cpumask_copy(desc->irq_data.affinity, mask);
 
308
        case IRQ_SET_MASK_OK_NOCOPY:
 
309
                irq_set_thread_affinity(desc);
 
310
        }
183
311
        return 0;
184
312
}
185
313
#else
186
 
static inline int setup_affinity(unsigned int irq, struct irq_desc *d)
 
314
static inline int
 
315
setup_affinity(unsigned int irq, struct irq_desc *d, struct cpumask *mask)
187
316
{
188
317
        return irq_select_affinity(irq);
189
318
}
192
321
/*
193
322
 * Called when affinity is set via /proc/irq
194
323
 */
195
 
int irq_select_affinity_usr(unsigned int irq)
 
324
int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask)
196
325
{
197
326
        struct irq_desc *desc = irq_to_desc(irq);
198
327
        unsigned long flags;
199
328
        int ret;
200
329
 
201
330
        raw_spin_lock_irqsave(&desc->lock, flags);
202
 
        ret = setup_affinity(irq, desc);
203
 
        if (!ret)
204
 
                irq_set_thread_affinity(desc);
 
331
        ret = setup_affinity(irq, desc, mask);
205
332
        raw_spin_unlock_irqrestore(&desc->lock, flags);
206
 
 
207
333
        return ret;
208
334
}
209
335
 
210
336
#else
211
 
static inline int setup_affinity(unsigned int irq, struct irq_desc *desc)
 
337
static inline int
 
338
setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
212
339
{
213
340
        return 0;
214
341
}
219
346
        if (suspend) {
220
347
                if (!desc->action || (desc->action->flags & IRQF_NO_SUSPEND))
221
348
                        return;
222
 
                desc->status |= IRQ_SUSPENDED;
223
 
        }
224
 
 
225
 
        if (!desc->depth++) {
226
 
                desc->status |= IRQ_DISABLED;
227
 
                desc->irq_data.chip->irq_disable(&desc->irq_data);
228
 
        }
 
349
                desc->istate |= IRQS_SUSPENDED;
 
350
        }
 
351
 
 
352
        if (!desc->depth++)
 
353
                irq_disable(desc);
 
354
}
 
355
 
 
356
static int __disable_irq_nosync(unsigned int irq)
 
357
{
 
358
        unsigned long flags;
 
359
        struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
 
360
 
 
361
        if (!desc)
 
362
                return -EINVAL;
 
363
        __disable_irq(desc, irq, false);
 
364
        irq_put_desc_busunlock(desc, flags);
 
365
        return 0;
229
366
}
230
367
 
231
368
/**
241
378
 */
242
379
void disable_irq_nosync(unsigned int irq)
243
380
{
244
 
        struct irq_desc *desc = irq_to_desc(irq);
245
 
        unsigned long flags;
246
 
 
247
 
        if (!desc)
248
 
                return;
249
 
 
250
 
        chip_bus_lock(desc);
251
 
        raw_spin_lock_irqsave(&desc->lock, flags);
252
 
        __disable_irq(desc, irq, false);
253
 
        raw_spin_unlock_irqrestore(&desc->lock, flags);
254
 
        chip_bus_sync_unlock(desc);
 
381
        __disable_irq_nosync(irq);
255
382
}
256
383
EXPORT_SYMBOL(disable_irq_nosync);
257
384
 
269
396
 */
270
397
void disable_irq(unsigned int irq)
271
398
{
272
 
        struct irq_desc *desc = irq_to_desc(irq);
273
 
 
274
 
        if (!desc)
275
 
                return;
276
 
 
277
 
        disable_irq_nosync(irq);
278
 
        if (desc->action)
 
399
        if (!__disable_irq_nosync(irq))
279
400
                synchronize_irq(irq);
280
401
}
281
402
EXPORT_SYMBOL(disable_irq);
282
403
 
283
404
void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume)
284
405
{
285
 
        if (resume)
286
 
                desc->status &= ~IRQ_SUSPENDED;
 
406
        if (resume) {
 
407
                if (!(desc->istate & IRQS_SUSPENDED)) {
 
408
                        if (!desc->action)
 
409
                                return;
 
410
                        if (!(desc->action->flags & IRQF_FORCE_RESUME))
 
411
                                return;
 
412
                        /* Pretend that it got disabled ! */
 
413
                        desc->depth++;
 
414
                }
 
415
                desc->istate &= ~IRQS_SUSPENDED;
 
416
        }
287
417
 
288
418
        switch (desc->depth) {
289
419
        case 0:
291
421
                WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq);
292
422
                break;
293
423
        case 1: {
294
 
                unsigned int status = desc->status & ~IRQ_DISABLED;
295
 
 
296
 
                if (desc->status & IRQ_SUSPENDED)
 
424
                if (desc->istate & IRQS_SUSPENDED)
297
425
                        goto err_out;
298
426
                /* Prevent probing on this irq: */
299
 
                desc->status = status | IRQ_NOPROBE;
 
427
                irq_settings_set_noprobe(desc);
 
428
                irq_enable(desc);
300
429
                check_irq_resend(desc, irq);
301
430
                /* fall-through */
302
431
        }
318
447
 */
319
448
void enable_irq(unsigned int irq)
320
449
{
321
 
        struct irq_desc *desc = irq_to_desc(irq);
322
450
        unsigned long flags;
 
451
        struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
323
452
 
324
453
        if (!desc)
325
454
                return;
326
 
 
327
 
        if (WARN(!desc->irq_data.chip || !desc->irq_data.chip->irq_enable,
328
 
            KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
329
 
                return;
330
 
 
331
 
        chip_bus_lock(desc);
332
 
        raw_spin_lock_irqsave(&desc->lock, flags);
 
455
        if (WARN(!desc->irq_data.chip,
 
456
                 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
 
457
                goto out;
 
458
 
333
459
        __enable_irq(desc, irq, false);
334
 
        raw_spin_unlock_irqrestore(&desc->lock, flags);
335
 
        chip_bus_sync_unlock(desc);
 
460
out:
 
461
        irq_put_desc_busunlock(desc, flags);
336
462
}
337
463
EXPORT_SYMBOL(enable_irq);
338
464
 
348
474
}
349
475
 
350
476
/**
351
 
 *      set_irq_wake - control irq power management wakeup
 
477
 *      irq_set_irq_wake - control irq power management wakeup
352
478
 *      @irq:   interrupt to control
353
479
 *      @on:    enable/disable power management wakeup
354
480
 *
359
485
 *      Wakeup mode lets this IRQ wake the system from sleep
360
486
 *      states like "suspend to RAM".
361
487
 */
362
 
int set_irq_wake(unsigned int irq, unsigned int on)
 
488
int irq_set_irq_wake(unsigned int irq, unsigned int on)
363
489
{
364
 
        struct irq_desc *desc = irq_to_desc(irq);
365
490
        unsigned long flags;
 
491
        struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
366
492
        int ret = 0;
367
493
 
 
494
        if (!desc)
 
495
                return -EINVAL;
 
496
 
368
497
        /* wakeup-capable irqs can be shared between drivers that
369
498
         * don't need to have the same sleep mode behaviors.
370
499
         */
371
 
        raw_spin_lock_irqsave(&desc->lock, flags);
372
500
        if (on) {
373
501
                if (desc->wake_depth++ == 0) {
374
502
                        ret = set_irq_wake_real(irq, on);
375
503
                        if (ret)
376
504
                                desc->wake_depth = 0;
377
505
                        else
378
 
                                desc->status |= IRQ_WAKEUP;
 
506
                                irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
379
507
                }
380
508
        } else {
381
509
                if (desc->wake_depth == 0) {
385
513
                        if (ret)
386
514
                                desc->wake_depth = 1;
387
515
                        else
388
 
                                desc->status &= ~IRQ_WAKEUP;
 
516
                                irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
389
517
                }
390
518
        }
391
 
 
392
 
        raw_spin_unlock_irqrestore(&desc->lock, flags);
 
519
        irq_put_desc_busunlock(desc, flags);
393
520
        return ret;
394
521
}
395
 
EXPORT_SYMBOL(set_irq_wake);
 
522
EXPORT_SYMBOL(irq_set_irq_wake);
396
523
 
397
524
/*
398
525
 * Internal function that tells the architecture code whether a
401
528
 */
402
529
int can_request_irq(unsigned int irq, unsigned long irqflags)
403
530
{
404
 
        struct irq_desc *desc = irq_to_desc(irq);
405
 
        struct irqaction *action;
406
531
        unsigned long flags;
 
532
        struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
 
533
        int canrequest = 0;
407
534
 
408
535
        if (!desc)
409
536
                return 0;
410
537
 
411
 
        if (desc->status & IRQ_NOREQUEST)
412
 
                return 0;
413
 
 
414
 
        raw_spin_lock_irqsave(&desc->lock, flags);
415
 
        action = desc->action;
416
 
        if (action)
417
 
                if (irqflags & action->flags & IRQF_SHARED)
418
 
                        action = NULL;
419
 
 
420
 
        raw_spin_unlock_irqrestore(&desc->lock, flags);
421
 
 
422
 
        return !action;
423
 
}
424
 
 
425
 
void compat_irq_chip_set_default_handler(struct irq_desc *desc)
426
 
{
427
 
        /*
428
 
         * If the architecture still has not overriden
429
 
         * the flow handler then zap the default. This
430
 
         * should catch incorrect flow-type setting.
431
 
         */
432
 
        if (desc->handle_irq == &handle_bad_irq)
433
 
                desc->handle_irq = NULL;
 
538
        if (irq_settings_can_request(desc)) {
 
539
                if (desc->action)
 
540
                        if (irqflags & desc->action->flags & IRQF_SHARED)
 
541
                                canrequest =1;
 
542
        }
 
543
        irq_put_desc_unlock(desc, flags);
 
544
        return canrequest;
434
545
}
435
546
 
436
547
int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
437
548
                      unsigned long flags)
438
549
{
439
 
        int ret;
440
550
        struct irq_chip *chip = desc->irq_data.chip;
 
551
        int ret, unmask = 0;
441
552
 
442
553
        if (!chip || !chip->irq_set_type) {
443
554
                /*
449
560
                return 0;
450
561
        }
451
562
 
 
563
        flags &= IRQ_TYPE_SENSE_MASK;
 
564
 
 
565
        if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
 
566
                if (!irqd_irq_masked(&desc->irq_data))
 
567
                        mask_irq(desc);
 
568
                if (!irqd_irq_disabled(&desc->irq_data))
 
569
                        unmask = 1;
 
570
        }
 
571
 
452
572
        /* caller masked out all except trigger mode flags */
453
573
        ret = chip->irq_set_type(&desc->irq_data, flags);
454
574
 
455
 
        if (ret)
 
575
        switch (ret) {
 
576
        case IRQ_SET_MASK_OK:
 
577
                irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
 
578
                irqd_set(&desc->irq_data, flags);
 
579
 
 
580
        case IRQ_SET_MASK_OK_NOCOPY:
 
581
                flags = irqd_get_trigger_type(&desc->irq_data);
 
582
                irq_settings_set_trigger_mask(desc, flags);
 
583
                irqd_clear(&desc->irq_data, IRQD_LEVEL);
 
584
                irq_settings_clr_level(desc);
 
585
                if (flags & IRQ_TYPE_LEVEL_MASK) {
 
586
                        irq_settings_set_level(desc);
 
587
                        irqd_set(&desc->irq_data, IRQD_LEVEL);
 
588
                }
 
589
 
 
590
                ret = 0;
 
591
                break;
 
592
        default:
456
593
                pr_err("setting trigger mode %lu for irq %u failed (%pF)\n",
457
594
                       flags, irq, chip->irq_set_type);
458
 
        else {
459
 
                if (flags & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
460
 
                        flags |= IRQ_LEVEL;
461
 
                /* note that IRQF_TRIGGER_MASK == IRQ_TYPE_SENSE_MASK */
462
 
                desc->status &= ~(IRQ_LEVEL | IRQ_TYPE_SENSE_MASK);
463
 
                desc->status |= flags;
464
 
 
465
 
                if (chip != desc->irq_data.chip)
466
 
                        irq_chip_set_defaults(desc->irq_data.chip);
467
595
        }
468
 
 
 
596
        if (unmask)
 
597
                unmask_irq(desc);
469
598
        return ret;
470
599
}
471
600
 
509
638
 * handler finished. unmask if the interrupt has not been disabled and
510
639
 * is marked MASKED.
511
640
 */
512
 
static void irq_finalize_oneshot(unsigned int irq, struct irq_desc *desc)
 
641
static void irq_finalize_oneshot(struct irq_desc *desc,
 
642
                                 struct irqaction *action, bool force)
513
643
{
 
644
        if (!(desc->istate & IRQS_ONESHOT))
 
645
                return;
514
646
again:
515
647
        chip_bus_lock(desc);
516
648
        raw_spin_lock_irq(&desc->lock);
522
654
         * The thread is faster done than the hard interrupt handler
523
655
         * on the other CPU. If we unmask the irq line then the
524
656
         * interrupt can come in again and masks the line, leaves due
525
 
         * to IRQ_INPROGRESS and the irq line is masked forever.
 
657
         * to IRQS_INPROGRESS and the irq line is masked forever.
 
658
         *
 
659
         * This also serializes the state of shared oneshot handlers
 
660
         * versus "desc->threads_onehsot |= action->thread_mask;" in
 
661
         * irq_wake_thread(). See the comment there which explains the
 
662
         * serialization.
526
663
         */
527
 
        if (unlikely(desc->status & IRQ_INPROGRESS)) {
 
664
        if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
528
665
                raw_spin_unlock_irq(&desc->lock);
529
666
                chip_bus_sync_unlock(desc);
530
667
                cpu_relax();
531
668
                goto again;
532
669
        }
533
670
 
534
 
        if (!(desc->status & IRQ_DISABLED) && (desc->status & IRQ_MASKED)) {
535
 
                desc->status &= ~IRQ_MASKED;
536
 
                desc->irq_data.chip->irq_unmask(&desc->irq_data);
537
 
        }
 
671
        /*
 
672
         * Now check again, whether the thread should run. Otherwise
 
673
         * we would clear the threads_oneshot bit of this thread which
 
674
         * was just set.
 
675
         */
 
676
        if (!force && test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
 
677
                goto out_unlock;
 
678
 
 
679
        desc->threads_oneshot &= ~action->thread_mask;
 
680
 
 
681
        if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
 
682
            irqd_irq_masked(&desc->irq_data))
 
683
                unmask_irq(desc);
 
684
 
 
685
out_unlock:
538
686
        raw_spin_unlock_irq(&desc->lock);
539
687
        chip_bus_sync_unlock(desc);
540
688
}
541
689
 
542
690
#ifdef CONFIG_SMP
543
691
/*
544
 
 * Check whether we need to change the affinity of the interrupt thread.
 
692
 * Check whether we need to chasnge the affinity of the interrupt thread.
545
693
 */
546
694
static void
547
695
irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
573
721
#endif
574
722
 
575
723
/*
 
724
 * Interrupts which are not explicitely requested as threaded
 
725
 * interrupts rely on the implicit bh/preempt disable of the hard irq
 
726
 * context. So we need to disable bh here to avoid deadlocks and other
 
727
 * side effects.
 
728
 */
 
729
static irqreturn_t
 
730
irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
 
731
{
 
732
        irqreturn_t ret;
 
733
 
 
734
        local_bh_disable();
 
735
        ret = action->thread_fn(action->irq, action->dev_id);
 
736
        irq_finalize_oneshot(desc, action, false);
 
737
        local_bh_enable();
 
738
        return ret;
 
739
}
 
740
 
 
741
/*
 
742
 * Interrupts explicitely requested as threaded interupts want to be
 
743
 * preemtible - many of them need to sleep and wait for slow busses to
 
744
 * complete.
 
745
 */
 
746
static irqreturn_t irq_thread_fn(struct irq_desc *desc,
 
747
                struct irqaction *action)
 
748
{
 
749
        irqreturn_t ret;
 
750
 
 
751
        ret = action->thread_fn(action->irq, action->dev_id);
 
752
        irq_finalize_oneshot(desc, action, false);
 
753
        return ret;
 
754
}
 
755
 
 
756
/*
576
757
 * Interrupt handler thread
577
758
 */
578
759
static int irq_thread(void *data)
582
763
        };
583
764
        struct irqaction *action = data;
584
765
        struct irq_desc *desc = irq_to_desc(action->irq);
585
 
        int wake, oneshot = desc->status & IRQ_ONESHOT;
 
766
        irqreturn_t (*handler_fn)(struct irq_desc *desc,
 
767
                        struct irqaction *action);
 
768
        int wake;
 
769
 
 
770
        if (force_irqthreads & test_bit(IRQTF_FORCED_THREAD,
 
771
                                        &action->thread_flags))
 
772
                handler_fn = irq_forced_thread_fn;
 
773
        else
 
774
                handler_fn = irq_thread_fn;
586
775
 
587
776
        sched_setscheduler(current, SCHED_FIFO, &param);
588
777
        current->irqaction = action;
594
783
                atomic_inc(&desc->threads_active);
595
784
 
596
785
                raw_spin_lock_irq(&desc->lock);
597
 
                if (unlikely(desc->status & IRQ_DISABLED)) {
 
786
                if (unlikely(irqd_irq_disabled(&desc->irq_data))) {
598
787
                        /*
599
788
                         * CHECKME: We might need a dedicated
600
789
                         * IRQ_THREAD_PENDING flag here, which
601
790
                         * retriggers the thread in check_irq_resend()
602
 
                         * but AFAICT IRQ_PENDING should be fine as it
 
791
                         * but AFAICT IRQS_PENDING should be fine as it
603
792
                         * retriggers the interrupt itself --- tglx
604
793
                         */
605
 
                        desc->status |= IRQ_PENDING;
 
794
                        desc->istate |= IRQS_PENDING;
606
795
                        raw_spin_unlock_irq(&desc->lock);
607
796
                } else {
 
797
                        irqreturn_t action_ret;
 
798
 
608
799
                        raw_spin_unlock_irq(&desc->lock);
609
 
 
610
 
                        action->thread_fn(action->irq, action->dev_id);
611
 
 
612
 
                        if (oneshot)
613
 
                                irq_finalize_oneshot(action->irq, desc);
 
800
                        action_ret = handler_fn(desc, action);
 
801
                        if (!noirqdebug)
 
802
                                note_interrupt(action->irq, desc, action_ret);
614
803
                }
615
804
 
616
805
                wake = atomic_dec_and_test(&desc->threads_active);
619
808
                        wake_up(&desc->wait_for_threads);
620
809
        }
621
810
 
 
811
        /* Prevent a stale desc->threads_oneshot */
 
812
        irq_finalize_oneshot(desc, action, true);
 
813
 
622
814
        /*
623
815
         * Clear irqaction. Otherwise exit_irq_thread() would make
624
816
         * fuzz about an active irq thread going into nirvana.
633
825
void exit_irq_thread(void)
634
826
{
635
827
        struct task_struct *tsk = current;
 
828
        struct irq_desc *desc;
636
829
 
637
830
        if (!tsk->irqaction)
638
831
                return;
641
834
               "exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
642
835
               tsk->comm ? tsk->comm : "", tsk->pid, tsk->irqaction->irq);
643
836
 
 
837
        desc = irq_to_desc(tsk->irqaction->irq);
 
838
 
 
839
        /*
 
840
         * Prevent a stale desc->threads_oneshot. Must be called
 
841
         * before setting the IRQTF_DIED flag.
 
842
         */
 
843
        irq_finalize_oneshot(desc, tsk->irqaction, true);
 
844
 
644
845
        /*
645
846
         * Set the THREAD DIED flag to prevent further wakeups of the
646
847
         * soon to be gone threaded handler.
648
849
        set_bit(IRQTF_DIED, &tsk->irqaction->flags);
649
850
}
650
851
 
 
852
static void irq_setup_forced_threading(struct irqaction *new)
 
853
{
 
854
        if (!force_irqthreads)
 
855
                return;
 
856
        if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
 
857
                return;
 
858
 
 
859
        new->flags |= IRQF_ONESHOT;
 
860
 
 
861
        if (!new->thread_fn) {
 
862
                set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
 
863
                new->thread_fn = new->handler;
 
864
                new->handler = irq_default_primary_handler;
 
865
        }
 
866
}
 
867
 
651
868
/*
652
869
 * Internal function to register an irqaction - typically used to
653
870
 * allocate special interrupts that are part of the architecture.
657
874
{
658
875
        struct irqaction *old, **old_ptr;
659
876
        const char *old_name = NULL;
660
 
        unsigned long flags;
661
 
        int nested, shared = 0;
662
 
        int ret;
 
877
        unsigned long flags, thread_mask = 0;
 
878
        int ret, nested, shared = 0;
 
879
        cpumask_var_t mask;
663
880
 
664
881
        if (!desc)
665
882
                return -EINVAL;
683
900
                rand_initialize_irq(irq);
684
901
        }
685
902
 
686
 
        /* Oneshot interrupts are not allowed with shared */
687
 
        if ((new->flags & IRQF_ONESHOT) && (new->flags & IRQF_SHARED))
688
 
                return -EINVAL;
689
 
 
690
903
        /*
691
904
         * Check whether the interrupt nests into another interrupt
692
905
         * thread.
693
906
         */
694
 
        nested = desc->status & IRQ_NESTED_THREAD;
 
907
        nested = irq_settings_is_nested_thread(desc);
695
908
        if (nested) {
696
909
                if (!new->thread_fn)
697
910
                        return -EINVAL;
701
914
                 * dummy function which warns when called.
702
915
                 */
703
916
                new->handler = irq_nested_primary_handler;
 
917
        } else {
 
918
                if (irq_settings_can_thread(desc))
 
919
                        irq_setup_forced_threading(new);
704
920
        }
705
921
 
706
922
        /*
724
940
                new->thread = t;
725
941
        }
726
942
 
 
943
        if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
 
944
                ret = -ENOMEM;
 
945
                goto out_thread;
 
946
        }
 
947
 
727
948
        /*
728
949
         * The following block of code has to be executed atomically
729
950
         */
735
956
                 * Can't share interrupts unless both agree to and are
736
957
                 * the same type (level, edge, polarity). So both flag
737
958
                 * fields must have IRQF_SHARED set and the bits which
738
 
                 * set the trigger type must match.
 
959
                 * set the trigger type must match. Also all must
 
960
                 * agree on ONESHOT.
739
961
                 */
740
962
                if (!((old->flags & new->flags) & IRQF_SHARED) ||
741
 
                    ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK)) {
 
963
                    ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK) ||
 
964
                    ((old->flags ^ new->flags) & IRQF_ONESHOT)) {
742
965
                        old_name = old->name;
743
966
                        goto mismatch;
744
967
                }
745
968
 
746
 
#if defined(CONFIG_IRQ_PER_CPU)
747
969
                /* All handlers must agree on per-cpuness */
748
970
                if ((old->flags & IRQF_PERCPU) !=
749
971
                    (new->flags & IRQF_PERCPU))
750
972
                        goto mismatch;
751
 
#endif
752
973
 
753
974
                /* add new interrupt at end of irq queue */
754
975
                do {
 
976
                        thread_mask |= old->thread_mask;
755
977
                        old_ptr = &old->next;
756
978
                        old = *old_ptr;
757
979
                } while (old);
758
980
                shared = 1;
759
981
        }
760
982
 
 
983
        /*
 
984
         * Setup the thread mask for this irqaction. Unlikely to have
 
985
         * 32 resp 64 irqs sharing one line, but who knows.
 
986
         */
 
987
        if (new->flags & IRQF_ONESHOT && thread_mask == ~0UL) {
 
988
                ret = -EBUSY;
 
989
                goto out_mask;
 
990
        }
 
991
        new->thread_mask = 1 << ffz(thread_mask);
 
992
 
761
993
        if (!shared) {
762
 
                irq_chip_set_defaults(desc->irq_data.chip);
763
 
 
764
994
                init_waitqueue_head(&desc->wait_for_threads);
765
995
 
766
996
                /* Setup the type (level, edge polarity) if configured: */
769
999
                                        new->flags & IRQF_TRIGGER_MASK);
770
1000
 
771
1001
                        if (ret)
772
 
                                goto out_thread;
773
 
                } else
774
 
                        compat_irq_chip_set_default_handler(desc);
775
 
#if defined(CONFIG_IRQ_PER_CPU)
776
 
                if (new->flags & IRQF_PERCPU)
777
 
                        desc->status |= IRQ_PER_CPU;
778
 
#endif
779
 
 
780
 
                desc->status &= ~(IRQ_AUTODETECT | IRQ_WAITING | IRQ_ONESHOT |
781
 
                                  IRQ_INPROGRESS | IRQ_SPURIOUS_DISABLED);
 
1002
                                goto out_mask;
 
1003
                }
 
1004
 
 
1005
                desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
 
1006
                                  IRQS_ONESHOT | IRQS_WAITING);
 
1007
                irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
 
1008
 
 
1009
                if (new->flags & IRQF_PERCPU) {
 
1010
                        irqd_set(&desc->irq_data, IRQD_PER_CPU);
 
1011
                        irq_settings_set_per_cpu(desc);
 
1012
                }
782
1013
 
783
1014
                if (new->flags & IRQF_ONESHOT)
784
 
                        desc->status |= IRQ_ONESHOT;
 
1015
                        desc->istate |= IRQS_ONESHOT;
785
1016
 
786
 
                if (!(desc->status & IRQ_NOAUTOEN)) {
787
 
                        desc->depth = 0;
788
 
                        desc->status &= ~IRQ_DISABLED;
789
 
                        desc->irq_data.chip->irq_startup(&desc->irq_data);
790
 
                } else
 
1017
                if (irq_settings_can_autoenable(desc))
 
1018
                        irq_startup(desc);
 
1019
                else
791
1020
                        /* Undo nested disables: */
792
1021
                        desc->depth = 1;
793
1022
 
794
1023
                /* Exclude IRQ from balancing if requested */
795
 
                if (new->flags & IRQF_NOBALANCING)
796
 
                        desc->status |= IRQ_NO_BALANCING;
 
1024
                if (new->flags & IRQF_NOBALANCING) {
 
1025
                        irq_settings_set_no_balancing(desc);
 
1026
                        irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
 
1027
                }
797
1028
 
798
1029
                /* Set default affinity mask once everything is setup */
799
 
                setup_affinity(irq, desc);
800
 
 
801
 
        } else if ((new->flags & IRQF_TRIGGER_MASK)
802
 
                        && (new->flags & IRQF_TRIGGER_MASK)
803
 
                                != (desc->status & IRQ_TYPE_SENSE_MASK)) {
804
 
                /* hope the handler works with the actual trigger mode... */
805
 
                pr_warning("IRQ %d uses trigger mode %d; requested %d\n",
806
 
                                irq, (int)(desc->status & IRQ_TYPE_SENSE_MASK),
807
 
                                (int)(new->flags & IRQF_TRIGGER_MASK));
 
1030
                setup_affinity(irq, desc, mask);
 
1031
 
 
1032
        } else if (new->flags & IRQF_TRIGGER_MASK) {
 
1033
                unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
 
1034
                unsigned int omsk = irq_settings_get_trigger_mask(desc);
 
1035
 
 
1036
                if (nmsk != omsk)
 
1037
                        /* hope the handler works with current  trigger mode */
 
1038
                        pr_warning("IRQ %d uses trigger mode %u; requested %u\n",
 
1039
                                   irq, nmsk, omsk);
808
1040
        }
809
1041
 
810
1042
        new->irq = irq;
818
1050
         * Check whether we disabled the irq via the spurious handler
819
1051
         * before. Reenable it and give it another chance.
820
1052
         */
821
 
        if (shared && (desc->status & IRQ_SPURIOUS_DISABLED)) {
822
 
                desc->status &= ~IRQ_SPURIOUS_DISABLED;
 
1053
        if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
 
1054
                desc->istate &= ~IRQS_SPURIOUS_DISABLED;
823
1055
                __enable_irq(desc, irq, false);
824
1056
        }
825
1057
 
835
1067
        register_irq_proc(irq, desc);
836
1068
        new->dir = NULL;
837
1069
        register_handler_proc(irq, new);
 
1070
        free_cpumask_var(mask);
838
1071
 
839
1072
        return 0;
840
1073
 
849
1082
#endif
850
1083
        ret = -EBUSY;
851
1084
 
 
1085
out_mask:
 
1086
        raw_spin_unlock_irqrestore(&desc->lock, flags);
 
1087
        free_cpumask_var(mask);
 
1088
 
852
1089
out_thread:
853
 
        raw_spin_unlock_irqrestore(&desc->lock, flags);
854
1090
        if (new->thread) {
855
1091
                struct task_struct *t = new->thread;
856
1092
 
871
1107
 */
872
1108
int setup_irq(unsigned int irq, struct irqaction *act)
873
1109
{
 
1110
        int retval;
874
1111
        struct irq_desc *desc = irq_to_desc(irq);
875
1112
 
876
 
        return __setup_irq(irq, desc, act);
 
1113
        chip_bus_lock(desc);
 
1114
        retval = __setup_irq(irq, desc, act);
 
1115
        chip_bus_sync_unlock(desc);
 
1116
 
 
1117
        return retval;
877
1118
}
878
1119
EXPORT_SYMBOL_GPL(setup_irq);
879
1120
 
924
1165
#endif
925
1166
 
926
1167
        /* If this was the last handler, shut down the IRQ line: */
927
 
        if (!desc->action) {
928
 
                desc->status |= IRQ_DISABLED;
929
 
                if (desc->irq_data.chip->irq_shutdown)
930
 
                        desc->irq_data.chip->irq_shutdown(&desc->irq_data);
931
 
                else
932
 
                        desc->irq_data.chip->irq_disable(&desc->irq_data);
933
 
        }
 
1168
        if (!desc->action)
 
1169
                irq_shutdown(desc);
934
1170
 
935
1171
#ifdef CONFIG_SMP
936
1172
        /* make sure affinity_hint is cleaned up */
1004
1240
        if (!desc)
1005
1241
                return;
1006
1242
 
 
1243
#ifdef CONFIG_SMP
 
1244
        if (WARN_ON(desc->affinity_notify))
 
1245
                desc->affinity_notify = NULL;
 
1246
#endif
 
1247
 
1007
1248
        chip_bus_lock(desc);
1008
1249
        kfree(__free_irq(irq, dev_id));
1009
1250
        chip_bus_sync_unlock(desc);
1074
1315
        if (!desc)
1075
1316
                return -EINVAL;
1076
1317
 
1077
 
        if (desc->status & IRQ_NOREQUEST)
 
1318
        if (!irq_settings_can_request(desc))
1078
1319
                return -EINVAL;
1079
1320
 
1080
1321
        if (!handler) {
1149
1390
        if (!desc)
1150
1391
                return -EINVAL;
1151
1392
 
1152
 
        if (desc->status & IRQ_NESTED_THREAD) {
 
1393
        if (irq_settings_is_nested_thread(desc)) {
1153
1394
                ret = request_threaded_irq(irq, NULL, handler,
1154
1395
                                           flags, name, dev_id);
1155
1396
                return !ret ? IRQC_IS_NESTED : ret;