~ubuntu-branches/ubuntu/lucid/linux-rt/lucid

« back to all changes in this revision

Viewing changes to arch/sparc/kernel/irq_64.c

  • Committer: Bazaar Package Importer
  • Author(s): Luke Yelavich
  • Date: 2009-08-05 23:00:52 UTC
  • Revision ID: james.westby@ubuntu.com-20090805230052-7xedvqcyk9dnnxb2
Tags: 2.6.31-1.1
New upstream release

Show diffs side-by-side

added added

removed removed

Lines of Context:
20
20
#include <linux/delay.h>
21
21
#include <linux/proc_fs.h>
22
22
#include <linux/seq_file.h>
23
 
#include <linux/bootmem.h>
24
23
#include <linux/irq.h>
25
24
 
26
25
#include <asm/ptrace.h>
45
44
#include <asm/cacheflush.h>
46
45
 
47
46
#include "entry.h"
 
47
#include "cpumap.h"
48
48
 
49
49
#define NUM_IVECS       (IMAP_INR + 1)
50
50
 
185
185
                seq_printf(p, "%10u ", kstat_irqs(i));
186
186
#else
187
187
                for_each_online_cpu(j)
188
 
                        seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
 
188
                        seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
189
189
#endif
190
190
                seq_printf(p, " %9s", irq_desc[i].chip->typename);
191
191
                seq_printf(p, "  %s", action->name);
252
252
#ifdef CONFIG_SMP
253
253
static int irq_choose_cpu(unsigned int virt_irq)
254
254
{
255
 
        cpumask_t mask = irq_desc[virt_irq].affinity;
 
255
        cpumask_t mask;
256
256
        int cpuid;
257
257
 
258
 
        if (cpus_equal(mask, CPU_MASK_ALL)) {
259
 
                static int irq_rover;
260
 
                static DEFINE_SPINLOCK(irq_rover_lock);
261
 
                unsigned long flags;
262
 
 
263
 
                /* Round-robin distribution... */
264
 
        do_round_robin:
265
 
                spin_lock_irqsave(&irq_rover_lock, flags);
266
 
 
267
 
                while (!cpu_online(irq_rover)) {
268
 
                        if (++irq_rover >= NR_CPUS)
269
 
                                irq_rover = 0;
270
 
                }
271
 
                cpuid = irq_rover;
272
 
                do {
273
 
                        if (++irq_rover >= NR_CPUS)
274
 
                                irq_rover = 0;
275
 
                } while (!cpu_online(irq_rover));
276
 
 
277
 
                spin_unlock_irqrestore(&irq_rover_lock, flags);
 
258
        cpumask_copy(&mask, irq_desc[virt_irq].affinity);
 
259
        if (cpus_equal(mask, cpu_online_map)) {
 
260
                cpuid = map_to_cpu(virt_irq);
278
261
        } else {
279
262
                cpumask_t tmp;
280
263
 
281
264
                cpus_and(tmp, cpu_online_map, mask);
282
 
 
283
 
                if (cpus_empty(tmp))
284
 
                        goto do_round_robin;
285
 
 
286
 
                cpuid = first_cpu(tmp);
 
265
                cpuid = cpus_empty(tmp) ? map_to_cpu(virt_irq) : first_cpu(tmp);
287
266
        }
288
267
 
289
268
        return cpuid;
317
296
        }
318
297
}
319
298
 
320
 
static void sun4u_set_affinity(unsigned int virt_irq,
 
299
static int sun4u_set_affinity(unsigned int virt_irq,
321
300
                               const struct cpumask *mask)
322
301
{
323
302
        sun4u_irq_enable(virt_irq);
 
303
 
 
304
        return 0;
324
305
}
325
306
 
326
307
/* Don't do anything.  The desc->status check for IRQ_DISABLED in
376
357
                       ino, err);
377
358
}
378
359
 
379
 
static void sun4v_set_affinity(unsigned int virt_irq,
 
360
static int sun4v_set_affinity(unsigned int virt_irq,
380
361
                               const struct cpumask *mask)
381
362
{
382
363
        unsigned int ino = virt_irq_table[virt_irq].dev_ino;
387
368
        if (err != HV_EOK)
388
369
                printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): "
389
370
                       "err(%d)\n", ino, cpuid, err);
 
371
 
 
372
        return 0;
390
373
}
391
374
 
392
375
static void sun4v_irq_disable(unsigned int virt_irq)
444
427
                       dev_handle, dev_ino, err);
445
428
}
446
429
 
447
 
static void sun4v_virt_set_affinity(unsigned int virt_irq,
 
430
static int sun4v_virt_set_affinity(unsigned int virt_irq,
448
431
                                    const struct cpumask *mask)
449
432
{
450
433
        unsigned long cpuid, dev_handle, dev_ino;
460
443
                printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): "
461
444
                       "err(%d)\n",
462
445
                       dev_handle, dev_ino, cpuid, err);
 
446
 
 
447
        return 0;
463
448
}
464
449
 
465
450
static void sun4v_virq_disable(unsigned int virt_irq)
805
790
                    !(irq_desc[irq].status & IRQ_PER_CPU)) {
806
791
                        if (irq_desc[irq].chip->set_affinity)
807
792
                                irq_desc[irq].chip->set_affinity(irq,
808
 
                                        &irq_desc[irq].affinity);
 
793
                                        irq_desc[irq].affinity);
809
794
                }
810
795
                spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
811
796
        }
928
913
                           tb->nonresum_qmask);
929
914
}
930
915
 
931
 
static void __init alloc_one_mondo(unsigned long *pa_ptr, unsigned long qmask)
932
 
{
933
 
        unsigned long size = PAGE_ALIGN(qmask + 1);
934
 
        void *p = __alloc_bootmem(size, size, 0);
935
 
        if (!p) {
936
 
                prom_printf("SUN4V: Error, cannot allocate mondo queue.\n");
937
 
                prom_halt();
938
 
        }
939
 
 
940
 
        *pa_ptr = __pa(p);
941
 
}
942
 
 
943
 
static void __init alloc_one_kbuf(unsigned long *pa_ptr, unsigned long qmask)
944
 
{
945
 
        unsigned long size = PAGE_ALIGN(qmask + 1);
946
 
        void *p = __alloc_bootmem(size, size, 0);
947
 
 
948
 
        if (!p) {
949
 
                prom_printf("SUN4V: Error, cannot allocate kbuf page.\n");
 
916
/* Each queue region must be a power of 2 multiple of 64 bytes in
 
917
 * size.  The base real address must be aligned to the size of the
 
918
 * region.  Thus, an 8KB queue must be 8KB aligned, for example.
 
919
 */
 
920
static void __init alloc_one_queue(unsigned long *pa_ptr, unsigned long qmask)
 
921
{
 
922
        unsigned long size = PAGE_ALIGN(qmask + 1);
 
923
        unsigned long order = get_order(size);
 
924
        unsigned long p;
 
925
 
 
926
        p = __get_free_pages(GFP_KERNEL, order);
 
927
        if (!p) {
 
928
                prom_printf("SUN4V: Error, cannot allocate queue.\n");
950
929
                prom_halt();
951
930
        }
952
931
 
956
935
static void __init init_cpu_send_mondo_info(struct trap_per_cpu *tb)
957
936
{
958
937
#ifdef CONFIG_SMP
959
 
        void *page;
 
938
        unsigned long page;
960
939
 
961
940
        BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64));
962
941
 
963
 
        page = alloc_bootmem_pages(PAGE_SIZE);
 
942
        page = get_zeroed_page(GFP_KERNEL);
964
943
        if (!page) {
965
944
                prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n");
966
945
                prom_halt();
979
958
        for_each_possible_cpu(cpu) {
980
959
                struct trap_per_cpu *tb = &trap_block[cpu];
981
960
 
982
 
                alloc_one_mondo(&tb->cpu_mondo_pa, tb->cpu_mondo_qmask);
983
 
                alloc_one_mondo(&tb->dev_mondo_pa, tb->dev_mondo_qmask);
984
 
                alloc_one_mondo(&tb->resum_mondo_pa, tb->resum_qmask);
985
 
                alloc_one_kbuf(&tb->resum_kernel_buf_pa, tb->resum_qmask);
986
 
                alloc_one_mondo(&tb->nonresum_mondo_pa, tb->nonresum_qmask);
987
 
                alloc_one_kbuf(&tb->nonresum_kernel_buf_pa,
988
 
                               tb->nonresum_qmask);
 
961
                alloc_one_queue(&tb->cpu_mondo_pa, tb->cpu_mondo_qmask);
 
962
                alloc_one_queue(&tb->dev_mondo_pa, tb->dev_mondo_qmask);
 
963
                alloc_one_queue(&tb->resum_mondo_pa, tb->resum_qmask);
 
964
                alloc_one_queue(&tb->resum_kernel_buf_pa, tb->resum_qmask);
 
965
                alloc_one_queue(&tb->nonresum_mondo_pa, tb->nonresum_qmask);
 
966
                alloc_one_queue(&tb->nonresum_kernel_buf_pa,
 
967
                                tb->nonresum_qmask);
989
968
        }
990
969
}
991
970
 
1013
992
        kill_prom_timer();
1014
993
 
1015
994
        size = sizeof(struct ino_bucket) * NUM_IVECS;
1016
 
        ivector_table = alloc_bootmem(size);
 
995
        ivector_table = kzalloc(size, GFP_KERNEL);
1017
996
        if (!ivector_table) {
1018
997
                prom_printf("Fatal error, cannot allocate ivector_table\n");
1019
998
                prom_halt();