~ubuntu-branches/ubuntu/quantal/linux-linaro-mx51/quantal

« back to all changes in this revision

Viewing changes to drivers/xen/events.c

  • Committer: Package Import Robot
  • Author(s): John Rigby, John Rigby
  • Date: 2011-09-26 10:44:23 UTC
  • Revision ID: package-import@ubuntu.com-20110926104423-3o58a3c1bj7x00rs
Tags: 3.0.0-1007.9
[ John Rigby ]

Enable crypto modules and remove crypto-modules from
exclude-module files
LP: #826021

Show diffs side-by-side

added added

removed removed

Lines of Context:
101
101
                        unsigned short gsi;
102
102
                        unsigned char vector;
103
103
                        unsigned char flags;
 
104
                        uint16_t domid;
104
105
                } pirq;
105
106
        } u;
106
107
};
118
119
static struct irq_chip xen_dynamic_chip;
119
120
static struct irq_chip xen_percpu_chip;
120
121
static struct irq_chip xen_pirq_chip;
 
122
static void enable_dynirq(struct irq_data *data);
 
123
static void disable_dynirq(struct irq_data *data);
121
124
 
122
125
/* Get info for IRQ */
123
126
static struct irq_info *info_for_irq(unsigned irq)
184
187
                                   unsigned short pirq,
185
188
                                   unsigned short gsi,
186
189
                                   unsigned short vector,
 
190
                                   uint16_t domid,
187
191
                                   unsigned char flags)
188
192
{
189
193
        struct irq_info *info = info_for_irq(irq);
193
197
        info->u.pirq.pirq = pirq;
194
198
        info->u.pirq.gsi = gsi;
195
199
        info->u.pirq.vector = vector;
 
200
        info->u.pirq.domid = domid;
196
201
        info->u.pirq.flags = flags;
197
202
}
198
203
 
390
395
static void xen_irq_init(unsigned irq)
391
396
{
392
397
        struct irq_info *info;
 
398
#ifdef CONFIG_SMP
393
399
        struct irq_desc *desc = irq_to_desc(irq);
394
400
 
395
 
#ifdef CONFIG_SMP
396
401
        /* By default all event channels notify CPU#0. */
397
402
        cpumask_copy(desc->irq_data.affinity, cpumask_of(0));
398
403
#endif
473
478
        irq_free_desc(irq);
474
479
}
475
480
 
476
 
static void pirq_unmask_notify(int irq)
477
 
{
478
 
        struct physdev_eoi eoi = { .irq = pirq_from_irq(irq) };
479
 
 
480
 
        if (unlikely(pirq_needs_eoi(irq))) {
481
 
                int rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
482
 
                WARN_ON(rc);
483
 
        }
484
 
}
485
 
 
486
481
static void pirq_query_unmask(int irq)
487
482
{
488
483
        struct physdev_irq_status_query irq_status;
506
501
        return desc && desc->action == NULL;
507
502
}
508
503
 
 
504
static void eoi_pirq(struct irq_data *data)
 
505
{
 
506
        int evtchn = evtchn_from_irq(data->irq);
 
507
        struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) };
 
508
        int rc = 0;
 
509
 
 
510
        irq_move_irq(data);
 
511
 
 
512
        if (VALID_EVTCHN(evtchn))
 
513
                clear_evtchn(evtchn);
 
514
 
 
515
        if (pirq_needs_eoi(data->irq)) {
 
516
                rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
 
517
                WARN_ON(rc);
 
518
        }
 
519
}
 
520
 
 
521
static void mask_ack_pirq(struct irq_data *data)
 
522
{
 
523
        disable_dynirq(data);
 
524
        eoi_pirq(data);
 
525
}
 
526
 
509
527
static unsigned int __startup_pirq(unsigned int irq)
510
528
{
511
529
        struct evtchn_bind_pirq bind_pirq;
539
557
 
540
558
out:
541
559
        unmask_evtchn(evtchn);
542
 
        pirq_unmask_notify(irq);
 
560
        eoi_pirq(irq_get_irq_data(irq));
543
561
 
544
562
        return 0;
545
563
}
579
597
 
580
598
static void disable_pirq(struct irq_data *data)
581
599
{
582
 
}
583
 
 
584
 
static void ack_pirq(struct irq_data *data)
585
 
{
586
 
        int evtchn = evtchn_from_irq(data->irq);
587
 
 
588
 
        irq_move_irq(data);
589
 
 
590
 
        if (VALID_EVTCHN(evtchn)) {
591
 
                mask_evtchn(evtchn);
592
 
                clear_evtchn(evtchn);
593
 
        }
 
600
        disable_dynirq(data);
594
601
}
595
602
 
596
603
static int find_irq_by_gsi(unsigned gsi)
619
626
 *
620
627
 * Note: We don't assign an event channel until the irq actually started
621
628
 * up.  Return an existing irq if we've already got one for the gsi.
 
629
 *
 
630
 * Shareable implies level triggered, not shareable implies edge
 
631
 * triggered here.
622
632
 */
623
633
int xen_bind_pirq_gsi_to_irq(unsigned gsi,
624
634
                             unsigned pirq, int shareable, char *name)
639
649
        if (irq < 0)
640
650
                goto out;
641
651
 
642
 
        irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_level_irq,
643
 
                                      name);
644
 
 
645
652
        irq_op.irq = irq;
646
653
        irq_op.vector = 0;
647
654
 
655
662
                goto out;
656
663
        }
657
664
 
658
 
        xen_irq_info_pirq_init(irq, 0, pirq, gsi, irq_op.vector,
 
665
        xen_irq_info_pirq_init(irq, 0, pirq, gsi, irq_op.vector, DOMID_SELF,
659
666
                               shareable ? PIRQ_SHAREABLE : 0);
660
667
 
 
668
        pirq_query_unmask(irq);
 
669
        /* We try to use the handler with the appropriate semantic for the
 
670
         * type of interrupt: if the interrupt is an edge triggered
 
671
         * interrupt we use handle_edge_irq.
 
672
         *
 
673
         * On the other hand if the interrupt is level triggered we use
 
674
         * handle_fasteoi_irq like the native code does for this kind of
 
675
         * interrupts.
 
676
         *
 
677
         * Depending on the Xen version, pirq_needs_eoi might return true
 
678
         * not only for level triggered interrupts but for edge triggered
 
679
         * interrupts too. In any case Xen always honors the eoi mechanism,
 
680
         * not injecting any more pirqs of the same kind if the first one
 
681
         * hasn't received an eoi yet. Therefore using the fasteoi handler
 
682
         * is the right choice either way.
 
683
         */
 
684
        if (shareable)
 
685
                irq_set_chip_and_handler_name(irq, &xen_pirq_chip,
 
686
                                handle_fasteoi_irq, name);
 
687
        else
 
688
                irq_set_chip_and_handler_name(irq, &xen_pirq_chip,
 
689
                                handle_edge_irq, name);
 
690
 
661
691
out:
662
692
        spin_unlock(&irq_mapping_update_lock);
663
693
 
680
710
}
681
711
 
682
712
int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
683
 
                             int pirq, int vector, const char *name)
 
713
                             int pirq, int vector, const char *name,
 
714
                             domid_t domid)
684
715
{
685
716
        int irq, ret;
686
717
 
690
721
        if (irq == -1)
691
722
                goto out;
692
723
 
693
 
        irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_level_irq,
694
 
                                      name);
 
724
        irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_edge_irq,
 
725
                        name);
695
726
 
696
 
        xen_irq_info_pirq_init(irq, 0, pirq, 0, vector, 0);
 
727
        xen_irq_info_pirq_init(irq, 0, pirq, 0, vector, domid, 0);
697
728
        ret = irq_set_msi_desc(irq, msidesc);
698
729
        if (ret < 0)
699
730
                goto error_irq;
722
753
 
723
754
        if (xen_initial_domain()) {
724
755
                unmap_irq.pirq = info->u.pirq.pirq;
725
 
                unmap_irq.domid = DOMID_SELF;
 
756
                unmap_irq.domid = info->u.pirq.domid;
726
757
                rc = HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap_irq);
727
 
                if (rc) {
 
758
                /* If another domain quits without making the pci_disable_msix
 
759
                 * call, the Xen hypervisor takes care of freeing the PIRQs
 
760
                 * (free_domain_pirqs).
 
761
                 */
 
762
                if ((rc == -ESRCH && info->u.pirq.domid != DOMID_SELF))
 
763
                        printk(KERN_INFO "domain %d does not have %d anymore\n",
 
764
                                info->u.pirq.domid, info->u.pirq.pirq);
 
765
                else if (rc) {
728
766
                        printk(KERN_WARNING "unmap irq failed %d\n", rc);
729
767
                        goto out;
730
768
                }
759
797
        return irq;
760
798
}
761
799
 
 
800
 
 
801
int xen_pirq_from_irq(unsigned irq)
 
802
{
 
803
        return pirq_from_irq(irq);
 
804
}
 
805
EXPORT_SYMBOL_GPL(xen_pirq_from_irq);
762
806
int bind_evtchn_to_irq(unsigned int evtchn)
763
807
{
764
808
        int irq;
773
817
                        goto out;
774
818
 
775
819
                irq_set_chip_and_handler_name(irq, &xen_dynamic_chip,
776
 
                                              handle_fasteoi_irq, "event");
 
820
                                              handle_edge_irq, "event");
777
821
 
778
822
                xen_irq_info_evtchn_init(irq, evtchn);
779
823
        }
1179
1223
                                port = (word_idx * BITS_PER_LONG) + bit_idx;
1180
1224
                                irq = evtchn_to_irq[port];
1181
1225
 
1182
 
                                mask_evtchn(port);
1183
 
                                clear_evtchn(port);
1184
 
 
1185
1226
                                if (irq != -1) {
1186
1227
                                        desc = irq_to_desc(irq);
1187
1228
                                        if (desc)
1337
1378
{
1338
1379
        int evtchn = evtchn_from_irq(data->irq);
1339
1380
 
1340
 
        irq_move_masked_irq(data);
 
1381
        irq_move_irq(data);
1341
1382
 
1342
1383
        if (VALID_EVTCHN(evtchn))
1343
 
                unmask_evtchn(evtchn);
 
1384
                clear_evtchn(evtchn);
 
1385
}
 
1386
 
 
1387
static void mask_ack_dynirq(struct irq_data *data)
 
1388
{
 
1389
        disable_dynirq(data);
 
1390
        ack_dynirq(data);
1344
1391
}
1345
1392
 
1346
1393
static int retrigger_dynirq(struct irq_data *data)
1502
1549
        xen_poll_irq_timeout(irq, 0 /* no timeout */);
1503
1550
}
1504
1551
 
 
1552
/* Check whether the IRQ line is shared with other guests. */
 
1553
int xen_test_irq_shared(int irq)
 
1554
{
 
1555
        struct irq_info *info = info_for_irq(irq);
 
1556
        struct physdev_irq_status_query irq_status = { .irq = info->u.pirq.pirq };
 
1557
 
 
1558
        if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
 
1559
                return 0;
 
1560
        return !(irq_status.flags & XENIRQSTAT_shared);
 
1561
}
 
1562
EXPORT_SYMBOL_GPL(xen_test_irq_shared);
 
1563
 
1505
1564
void xen_irq_resume(void)
1506
1565
{
1507
1566
        unsigned int cpu, evtchn;
1535
1594
        .irq_mask               = disable_dynirq,
1536
1595
        .irq_unmask             = enable_dynirq,
1537
1596
 
1538
 
        .irq_eoi                = ack_dynirq,
 
1597
        .irq_ack                = ack_dynirq,
 
1598
        .irq_mask_ack           = mask_ack_dynirq,
 
1599
 
1539
1600
        .irq_set_affinity       = set_affinity_irq,
1540
1601
        .irq_retrigger          = retrigger_dynirq,
1541
1602
};
1545
1606
 
1546
1607
        .irq_startup            = startup_pirq,
1547
1608
        .irq_shutdown           = shutdown_pirq,
1548
 
 
1549
1609
        .irq_enable             = enable_pirq,
1550
 
        .irq_unmask             = enable_pirq,
1551
 
 
1552
1610
        .irq_disable            = disable_pirq,
1553
 
        .irq_mask               = disable_pirq,
1554
 
 
1555
 
        .irq_ack                = ack_pirq,
 
1611
 
 
1612
        .irq_mask               = disable_dynirq,
 
1613
        .irq_unmask             = enable_dynirq,
 
1614
 
 
1615
        .irq_ack                = eoi_pirq,
 
1616
        .irq_eoi                = eoi_pirq,
 
1617
        .irq_mask_ack           = mask_ack_pirq,
1556
1618
 
1557
1619
        .irq_set_affinity       = set_affinity_irq,
1558
1620