2
* Support for virtual IRQ subgroups.
4
* Copyright (C) 2010 Paul Mundt
6
* This file is subject to the terms and conditions of the GNU General Public
7
* License. See the file "COPYING" in the main directory of this archive
10
#define pr_fmt(fmt) "intc: " fmt
12
#include <linux/slab.h>
13
#include <linux/irq.h>
14
#include <linux/list.h>
15
#include <linux/radix-tree.h>
16
#include <linux/spinlock.h>
17
#include <linux/export.h>
18
#include "internals.h"
20
static struct intc_map_entry intc_irq_xlate[NR_IRQS];
22
struct intc_virq_list {
24
struct intc_virq_list *next;
27
#define for_each_virq(entry, head) \
28
for (entry = head; entry; entry = entry->next)
31
* Tags for the radix tree
33
#define INTC_TAG_VIRQ_NEEDS_ALLOC 0
35
void intc_irq_xlate_set(unsigned int irq, intc_enum id, struct intc_desc_int *d)
39
raw_spin_lock_irqsave(&intc_big_lock, flags);
40
intc_irq_xlate[irq].enum_id = id;
41
intc_irq_xlate[irq].desc = d;
42
raw_spin_unlock_irqrestore(&intc_big_lock, flags);
45
struct intc_map_entry *intc_irq_xlate_get(unsigned int irq)
47
return intc_irq_xlate + irq;
50
int intc_irq_lookup(const char *chipname, intc_enum enum_id)
52
struct intc_map_entry *ptr;
53
struct intc_desc_int *d;
56
list_for_each_entry(d, &intc_list, list) {
59
if (strcmp(d->chip.name, chipname) != 0)
63
* Catch early lookups for subgroup VIRQs that have not
64
* yet been allocated an IRQ. This already includes a
65
* fast-path out if the tree is untagged, so there is no
66
* need to explicitly test the root tree.
68
tagged = radix_tree_tag_get(&d->tree, enum_id,
69
INTC_TAG_VIRQ_NEEDS_ALLOC);
73
ptr = radix_tree_lookup(&d->tree, enum_id);
75
irq = ptr - intc_irq_xlate;
82
EXPORT_SYMBOL_GPL(intc_irq_lookup);
84
static int add_virq_to_pirq(unsigned int irq, unsigned int virq)
86
struct intc_virq_list **last, *entry;
87
struct irq_data *data = irq_get_irq_data(irq);
89
/* scan for duplicates */
90
last = (struct intc_virq_list **)&data->handler_data;
91
for_each_virq(entry, data->handler_data) {
92
if (entry->irq == virq)
97
entry = kzalloc(sizeof(struct intc_virq_list), GFP_ATOMIC);
99
pr_err("can't allocate VIRQ mapping for %d\n", virq);
110
static void intc_virq_handler(unsigned int irq, struct irq_desc *desc)
112
struct irq_data *data = irq_get_irq_data(irq);
113
struct irq_chip *chip = irq_data_get_irq_chip(data);
114
struct intc_virq_list *entry, *vlist = irq_data_get_irq_handler_data(data);
115
struct intc_desc_int *d = get_intc_desc(irq);
117
chip->irq_mask_ack(data);
119
for_each_virq(entry, vlist) {
120
unsigned long addr, handle;
122
handle = (unsigned long)irq_get_handler_data(entry->irq);
123
addr = INTC_REG(d, _INTC_ADDR_E(handle), 0);
125
if (intc_reg_fns[_INTC_FN(handle)](addr, handle, 0))
126
generic_handle_irq(entry->irq);
129
chip->irq_unmask(data);
132
static unsigned long __init intc_subgroup_data(struct intc_subgroup *subgroup,
133
struct intc_desc_int *d,
136
unsigned int fn = REG_FN_TEST_BASE + (subgroup->reg_width >> 3) - 1;
138
return _INTC_MK(fn, MODE_ENABLE_REG, intc_get_reg(d, subgroup->reg),
139
0, 1, (subgroup->reg_width - 1) - index);
142
static void __init intc_subgroup_init_one(struct intc_desc *desc,
143
struct intc_desc_int *d,
144
struct intc_subgroup *subgroup)
146
struct intc_map_entry *mapped;
151
mapped = radix_tree_lookup(&d->tree, subgroup->parent_id);
157
pirq = mapped - intc_irq_xlate;
159
raw_spin_lock_irqsave(&d->lock, flags);
161
for (i = 0; i < ARRAY_SIZE(subgroup->enum_ids); i++) {
162
struct intc_subgroup_entry *entry;
165
if (!subgroup->enum_ids[i])
168
entry = kmalloc(sizeof(*entry), GFP_NOWAIT);
173
entry->enum_id = subgroup->enum_ids[i];
174
entry->handle = intc_subgroup_data(subgroup, d, i);
176
err = radix_tree_insert(&d->tree, entry->enum_id, entry);
177
if (unlikely(err < 0))
180
radix_tree_tag_set(&d->tree, entry->enum_id,
181
INTC_TAG_VIRQ_NEEDS_ALLOC);
184
raw_spin_unlock_irqrestore(&d->lock, flags);
187
void __init intc_subgroup_init(struct intc_desc *desc, struct intc_desc_int *d)
191
if (!desc->hw.subgroups)
194
for (i = 0; i < desc->hw.nr_subgroups; i++)
195
intc_subgroup_init_one(desc, d, desc->hw.subgroups + i);
198
static void __init intc_subgroup_map(struct intc_desc_int *d)
200
struct intc_subgroup_entry *entries[32];
202
unsigned int nr_found;
205
raw_spin_lock_irqsave(&d->lock, flags);
208
nr_found = radix_tree_gang_lookup_tag_slot(&d->tree,
209
(void ***)entries, 0, ARRAY_SIZE(entries),
210
INTC_TAG_VIRQ_NEEDS_ALLOC);
212
for (i = 0; i < nr_found; i++) {
213
struct intc_subgroup_entry *entry;
216
entry = radix_tree_deref_slot((void **)entries[i]);
217
if (unlikely(!entry))
219
if (radix_tree_deref_retry(entry))
223
if (unlikely(irq < 0)) {
224
pr_err("no more free IRQs, bailing..\n");
228
pr_info("Setting up a chained VIRQ from %d -> %d\n",
231
intc_irq_xlate_set(irq, entry->enum_id, d);
233
irq_set_chip_and_handler_name(irq, irq_get_chip(entry->pirq),
234
handle_simple_irq, "virq");
235
irq_set_chip_data(irq, irq_get_chip_data(entry->pirq));
237
irq_set_handler_data(irq, (void *)entry->handle);
240
* Set the virtual IRQ as non-threadable.
242
irq_set_nothread(irq);
244
irq_set_chained_handler(entry->pirq, intc_virq_handler);
245
add_virq_to_pirq(entry->pirq, irq);
247
radix_tree_tag_clear(&d->tree, entry->enum_id,
248
INTC_TAG_VIRQ_NEEDS_ALLOC);
249
radix_tree_replace_slot((void **)entries[i],
250
&intc_irq_xlate[irq]);
253
raw_spin_unlock_irqrestore(&d->lock, flags);
256
void __init intc_finalize(void)
258
struct intc_desc_int *d;
260
list_for_each_entry(d, &intc_list, list)
261
if (radix_tree_tagged(&d->tree, INTC_TAG_VIRQ_NEEDS_ALLOC))
262
intc_subgroup_map(d);