114
#ifdef CONFIG_GENERIC_PENDING_IRQ
115
static inline bool irq_can_move_pcntxt(struct irq_data *data)
117
return irqd_can_move_in_process_context(data);
119
static inline bool irq_move_pending(struct irq_data *data)
121
return irqd_is_setaffinity_pending(data);
124
irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask)
126
cpumask_copy(desc->pending_mask, mask);
129
irq_get_pending(struct cpumask *mask, struct irq_desc *desc)
131
cpumask_copy(mask, desc->pending_mask);
134
static inline bool irq_can_move_pcntxt(struct irq_data *data) { return true; }
135
static inline bool irq_move_pending(struct irq_data *data) { return false; }
137
irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { }
139
irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { }
142
int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
144
struct irq_chip *chip = irq_data_get_irq_chip(data);
145
struct irq_desc *desc = irq_data_to_desc(data);
148
if (!chip || !chip->irq_set_affinity)
151
if (irq_can_move_pcntxt(data)) {
152
ret = chip->irq_set_affinity(data, mask, false);
154
case IRQ_SET_MASK_OK:
155
cpumask_copy(data->affinity, mask);
156
case IRQ_SET_MASK_OK_NOCOPY:
157
irq_set_thread_affinity(desc);
161
irqd_set_move_pending(data);
162
irq_copy_pending(desc, mask);
165
if (desc->affinity_notify) {
166
kref_get(&desc->affinity_notify->kref);
167
schedule_work(&desc->affinity_notify->work);
169
irqd_set(data, IRQD_AFFINITY_SET);
104
175
* irq_set_affinity - Set the irq affinity of a given irq
105
176
* @irq: Interrupt to set affinity
109
int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
180
int irq_set_affinity(unsigned int irq, const struct cpumask *mask)
111
182
struct irq_desc *desc = irq_to_desc(irq);
112
struct irq_chip *chip = desc->irq_data.chip;
113
183
unsigned long flags;
115
if (!chip->irq_set_affinity)
118
189
raw_spin_lock_irqsave(&desc->lock, flags);
120
#ifdef CONFIG_GENERIC_PENDING_IRQ
121
if (desc->status & IRQ_MOVE_PCNTXT) {
122
if (!chip->irq_set_affinity(&desc->irq_data, cpumask, false)) {
123
cpumask_copy(desc->irq_data.affinity, cpumask);
124
irq_set_thread_affinity(desc);
128
desc->status |= IRQ_MOVE_PENDING;
129
cpumask_copy(desc->pending_mask, cpumask);
132
if (!chip->irq_set_affinity(&desc->irq_data, cpumask, false)) {
133
cpumask_copy(desc->irq_data.affinity, cpumask);
134
irq_set_thread_affinity(desc);
137
desc->status |= IRQ_AFFINITY_SET;
190
ret = __irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask);
138
191
raw_spin_unlock_irqrestore(&desc->lock, flags);
142
195
int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
144
struct irq_desc *desc = irq_to_desc(irq);
145
197
unsigned long flags;
198
struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
150
raw_spin_lock_irqsave(&desc->lock, flags);
151
202
desc->affinity_hint = m;
152
raw_spin_unlock_irqrestore(&desc->lock, flags);
203
irq_put_desc_unlock(desc, flags);
156
206
EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
208
static void irq_affinity_notify(struct work_struct *work)
210
struct irq_affinity_notify *notify =
211
container_of(work, struct irq_affinity_notify, work);
212
struct irq_desc *desc = irq_to_desc(notify->irq);
213
cpumask_var_t cpumask;
216
if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
219
raw_spin_lock_irqsave(&desc->lock, flags);
220
if (irq_move_pending(&desc->irq_data))
221
irq_get_pending(cpumask, desc);
223
cpumask_copy(cpumask, desc->irq_data.affinity);
224
raw_spin_unlock_irqrestore(&desc->lock, flags);
226
notify->notify(notify, cpumask);
228
free_cpumask_var(cpumask);
230
kref_put(¬ify->kref, notify->release);
234
* irq_set_affinity_notifier - control notification of IRQ affinity changes
235
* @irq: Interrupt for which to enable/disable notification
236
* @notify: Context for notification, or %NULL to disable
237
* notification. Function pointers must be initialised;
238
* the other fields will be initialised by this function.
240
* Must be called in process context. Notification may only be enabled
241
* after the IRQ is allocated and must be disabled before the IRQ is
242
* freed using free_irq().
245
irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
247
struct irq_desc *desc = irq_to_desc(irq);
248
struct irq_affinity_notify *old_notify;
251
/* The release function is promised process context */
257
/* Complete initialisation of *notify */
260
kref_init(¬ify->kref);
261
INIT_WORK(¬ify->work, irq_affinity_notify);
264
raw_spin_lock_irqsave(&desc->lock, flags);
265
old_notify = desc->affinity_notify;
266
desc->affinity_notify = notify;
267
raw_spin_unlock_irqrestore(&desc->lock, flags);
270
kref_put(&old_notify->kref, old_notify->release);
274
EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
158
276
#ifndef CONFIG_AUTO_IRQ_AFFINITY
160
278
* Generic version of the affinity autoselector.
162
static int setup_affinity(unsigned int irq, struct irq_desc *desc)
281
setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
283
struct irq_chip *chip = irq_desc_get_chip(desc);
284
struct cpumask *set = irq_default_affinity;
287
/* Excludes PER_CPU and NO_BALANCE interrupts */
164
288
if (!irq_can_set_affinity(irq))
168
292
* Preserve an userspace affinity setup, but make sure that
169
293
* one of the targets is online.
171
if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) {
172
if (cpumask_any_and(desc->irq_data.affinity, cpu_online_mask)
295
if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
296
if (cpumask_intersects(desc->irq_data.affinity,
298
set = desc->irq_data.affinity;
176
desc->status &= ~IRQ_AFFINITY_SET;
179
cpumask_and(desc->irq_data.affinity, cpu_online_mask, irq_default_affinity);
181
desc->irq_data.chip->irq_set_affinity(&desc->irq_data, desc->irq_data.affinity, false);
300
irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
303
cpumask_and(mask, cpu_online_mask, set);
304
ret = chip->irq_set_affinity(&desc->irq_data, mask, false);
306
case IRQ_SET_MASK_OK:
307
cpumask_copy(desc->irq_data.affinity, mask);
308
case IRQ_SET_MASK_OK_NOCOPY:
309
irq_set_thread_affinity(desc);
186
static inline int setup_affinity(unsigned int irq, struct irq_desc *d)
315
setup_affinity(unsigned int irq, struct irq_desc *d, struct cpumask *mask)
188
317
return irq_select_affinity(irq);
402
529
int can_request_irq(unsigned int irq, unsigned long irqflags)
404
struct irq_desc *desc = irq_to_desc(irq);
405
struct irqaction *action;
406
531
unsigned long flags;
532
struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
411
if (desc->status & IRQ_NOREQUEST)
414
raw_spin_lock_irqsave(&desc->lock, flags);
415
action = desc->action;
417
if (irqflags & action->flags & IRQF_SHARED)
420
raw_spin_unlock_irqrestore(&desc->lock, flags);
425
void compat_irq_chip_set_default_handler(struct irq_desc *desc)
428
* If the architecture still has not overriden
429
* the flow handler then zap the default. This
430
* should catch incorrect flow-type setting.
432
if (desc->handle_irq == &handle_bad_irq)
433
desc->handle_irq = NULL;
538
if (irq_settings_can_request(desc)) {
540
if (irqflags & desc->action->flags & IRQF_SHARED)
543
irq_put_desc_unlock(desc, flags);
436
547
int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
437
548
unsigned long flags)
440
550
struct irq_chip *chip = desc->irq_data.chip;
442
553
if (!chip || !chip->irq_set_type) {
563
flags &= IRQ_TYPE_SENSE_MASK;
565
if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
566
if (!irqd_irq_masked(&desc->irq_data))
568
if (!irqd_irq_disabled(&desc->irq_data))
452
572
/* caller masked out all except trigger mode flags */
453
573
ret = chip->irq_set_type(&desc->irq_data, flags);
576
case IRQ_SET_MASK_OK:
577
irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
578
irqd_set(&desc->irq_data, flags);
580
case IRQ_SET_MASK_OK_NOCOPY:
581
flags = irqd_get_trigger_type(&desc->irq_data);
582
irq_settings_set_trigger_mask(desc, flags);
583
irqd_clear(&desc->irq_data, IRQD_LEVEL);
584
irq_settings_clr_level(desc);
585
if (flags & IRQ_TYPE_LEVEL_MASK) {
586
irq_settings_set_level(desc);
587
irqd_set(&desc->irq_data, IRQD_LEVEL);
456
593
pr_err("setting trigger mode %lu for irq %u failed (%pF)\n",
457
594
flags, irq, chip->irq_set_type);
459
if (flags & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
461
/* note that IRQF_TRIGGER_MASK == IRQ_TYPE_SENSE_MASK */
462
desc->status &= ~(IRQ_LEVEL | IRQ_TYPE_SENSE_MASK);
463
desc->status |= flags;
465
if (chip != desc->irq_data.chip)
466
irq_chip_set_defaults(desc->irq_data.chip);
522
654
* The thread is faster done than the hard interrupt handler
523
655
* on the other CPU. If we unmask the irq line then the
524
656
* interrupt can come in again and masks the line, leaves due
525
* to IRQ_INPROGRESS and the irq line is masked forever.
657
* to IRQS_INPROGRESS and the irq line is masked forever.
659
* This also serializes the state of shared oneshot handlers
660
* versus "desc->threads_onehsot |= action->thread_mask;" in
661
* irq_wake_thread(). See the comment there which explains the
527
if (unlikely(desc->status & IRQ_INPROGRESS)) {
664
if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
528
665
raw_spin_unlock_irq(&desc->lock);
529
666
chip_bus_sync_unlock(desc);
534
if (!(desc->status & IRQ_DISABLED) && (desc->status & IRQ_MASKED)) {
535
desc->status &= ~IRQ_MASKED;
536
desc->irq_data.chip->irq_unmask(&desc->irq_data);
672
* Now check again, whether the thread should run. Otherwise
673
* we would clear the threads_oneshot bit of this thread which
676
if (!force && test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
679
desc->threads_oneshot &= ~action->thread_mask;
681
if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
682
irqd_irq_masked(&desc->irq_data))
538
686
raw_spin_unlock_irq(&desc->lock);
539
687
chip_bus_sync_unlock(desc);
542
690
#ifdef CONFIG_SMP
544
* Check whether we need to change the affinity of the interrupt thread.
692
* Check whether we need to chasnge the affinity of the interrupt thread.
547
695
irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
594
783
atomic_inc(&desc->threads_active);
596
785
raw_spin_lock_irq(&desc->lock);
597
if (unlikely(desc->status & IRQ_DISABLED)) {
786
if (unlikely(irqd_irq_disabled(&desc->irq_data))) {
599
788
* CHECKME: We might need a dedicated
600
789
* IRQ_THREAD_PENDING flag here, which
601
790
* retriggers the thread in check_irq_resend()
602
* but AFAICT IRQ_PENDING should be fine as it
791
* but AFAICT IRQS_PENDING should be fine as it
603
792
* retriggers the interrupt itself --- tglx
605
desc->status |= IRQ_PENDING;
794
desc->istate |= IRQS_PENDING;
606
795
raw_spin_unlock_irq(&desc->lock);
797
irqreturn_t action_ret;
608
799
raw_spin_unlock_irq(&desc->lock);
610
action->thread_fn(action->irq, action->dev_id);
613
irq_finalize_oneshot(action->irq, desc);
800
action_ret = handler_fn(desc, action);
802
note_interrupt(action->irq, desc, action_ret);
616
805
wake = atomic_dec_and_test(&desc->threads_active);
735
956
* Can't share interrupts unless both agree to and are
736
957
* the same type (level, edge, polarity). So both flag
737
958
* fields must have IRQF_SHARED set and the bits which
738
* set the trigger type must match.
959
* set the trigger type must match. Also all must
740
962
if (!((old->flags & new->flags) & IRQF_SHARED) ||
741
((old->flags ^ new->flags) & IRQF_TRIGGER_MASK)) {
963
((old->flags ^ new->flags) & IRQF_TRIGGER_MASK) ||
964
((old->flags ^ new->flags) & IRQF_ONESHOT)) {
742
965
old_name = old->name;
746
#if defined(CONFIG_IRQ_PER_CPU)
747
969
/* All handlers must agree on per-cpuness */
748
970
if ((old->flags & IRQF_PERCPU) !=
749
971
(new->flags & IRQF_PERCPU))
753
974
/* add new interrupt at end of irq queue */
976
thread_mask |= old->thread_mask;
755
977
old_ptr = &old->next;
984
* Setup the thread mask for this irqaction. Unlikely to have
985
* 32 resp 64 irqs sharing one line, but who knows.
987
if (new->flags & IRQF_ONESHOT && thread_mask == ~0UL) {
991
new->thread_mask = 1 << ffz(thread_mask);
762
irq_chip_set_defaults(desc->irq_data.chip);
764
994
init_waitqueue_head(&desc->wait_for_threads);
766
996
/* Setup the type (level, edge polarity) if configured: */
769
999
new->flags & IRQF_TRIGGER_MASK);
774
compat_irq_chip_set_default_handler(desc);
775
#if defined(CONFIG_IRQ_PER_CPU)
776
if (new->flags & IRQF_PERCPU)
777
desc->status |= IRQ_PER_CPU;
780
desc->status &= ~(IRQ_AUTODETECT | IRQ_WAITING | IRQ_ONESHOT |
781
IRQ_INPROGRESS | IRQ_SPURIOUS_DISABLED);
1005
desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
1006
IRQS_ONESHOT | IRQS_WAITING);
1007
irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
1009
if (new->flags & IRQF_PERCPU) {
1010
irqd_set(&desc->irq_data, IRQD_PER_CPU);
1011
irq_settings_set_per_cpu(desc);
783
1014
if (new->flags & IRQF_ONESHOT)
784
desc->status |= IRQ_ONESHOT;
1015
desc->istate |= IRQS_ONESHOT;
786
if (!(desc->status & IRQ_NOAUTOEN)) {
788
desc->status &= ~IRQ_DISABLED;
789
desc->irq_data.chip->irq_startup(&desc->irq_data);
1017
if (irq_settings_can_autoenable(desc))
791
1020
/* Undo nested disables: */
792
1021
desc->depth = 1;
794
1023
/* Exclude IRQ from balancing if requested */
795
if (new->flags & IRQF_NOBALANCING)
796
desc->status |= IRQ_NO_BALANCING;
1024
if (new->flags & IRQF_NOBALANCING) {
1025
irq_settings_set_no_balancing(desc);
1026
irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
798
1029
/* Set default affinity mask once everything is setup */
799
setup_affinity(irq, desc);
801
} else if ((new->flags & IRQF_TRIGGER_MASK)
802
&& (new->flags & IRQF_TRIGGER_MASK)
803
!= (desc->status & IRQ_TYPE_SENSE_MASK)) {
804
/* hope the handler works with the actual trigger mode... */
805
pr_warning("IRQ %d uses trigger mode %d; requested %d\n",
806
irq, (int)(desc->status & IRQ_TYPE_SENSE_MASK),
807
(int)(new->flags & IRQF_TRIGGER_MASK));
1030
setup_affinity(irq, desc, mask);
1032
} else if (new->flags & IRQF_TRIGGER_MASK) {
1033
unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
1034
unsigned int omsk = irq_settings_get_trigger_mask(desc);
1037
/* hope the handler works with current trigger mode */
1038
pr_warning("IRQ %d uses trigger mode %u; requested %u\n",