118
119
static struct irq_chip xen_dynamic_chip;
119
120
static struct irq_chip xen_percpu_chip;
120
121
static struct irq_chip xen_pirq_chip;
122
static void enable_dynirq(struct irq_data *data);
123
static void disable_dynirq(struct irq_data *data);
122
125
/* Get info for IRQ */
123
126
static struct irq_info *info_for_irq(unsigned irq)
390
395
static void xen_irq_init(unsigned irq)
392
397
struct irq_info *info;
393
399
struct irq_desc *desc = irq_to_desc(irq);
396
401
/* By default all event channels notify CPU#0. */
397
402
cpumask_copy(desc->irq_data.affinity, cpumask_of(0));
473
478
irq_free_desc(irq);
476
static void pirq_unmask_notify(int irq)
478
struct physdev_eoi eoi = { .irq = pirq_from_irq(irq) };
480
if (unlikely(pirq_needs_eoi(irq))) {
481
int rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
486
481
static void pirq_query_unmask(int irq)
488
483
struct physdev_irq_status_query irq_status;
506
501
return desc && desc->action == NULL;
504
static void eoi_pirq(struct irq_data *data)
506
int evtchn = evtchn_from_irq(data->irq);
507
struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) };
512
if (VALID_EVTCHN(evtchn))
513
clear_evtchn(evtchn);
515
if (pirq_needs_eoi(data->irq)) {
516
rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
521
static void mask_ack_pirq(struct irq_data *data)
523
disable_dynirq(data);
509
527
static unsigned int __startup_pirq(unsigned int irq)
511
529
struct evtchn_bind_pirq bind_pirq;
580
598
static void disable_pirq(struct irq_data *data)
584
static void ack_pirq(struct irq_data *data)
586
int evtchn = evtchn_from_irq(data->irq);
590
if (VALID_EVTCHN(evtchn)) {
592
clear_evtchn(evtchn);
600
disable_dynirq(data);
596
603
static int find_irq_by_gsi(unsigned gsi)
620
627
* Note: We don't assign an event channel until the irq actually started
621
628
* up. Return an existing irq if we've already got one for the gsi.
630
* Shareable implies level triggered, not shareable implies edge
623
633
int xen_bind_pirq_gsi_to_irq(unsigned gsi,
624
634
unsigned pirq, int shareable, char *name)
658
xen_irq_info_pirq_init(irq, 0, pirq, gsi, irq_op.vector,
665
xen_irq_info_pirq_init(irq, 0, pirq, gsi, irq_op.vector, DOMID_SELF,
659
666
shareable ? PIRQ_SHAREABLE : 0);
668
pirq_query_unmask(irq);
669
/* We try to use the handler with the appropriate semantic for the
670
* type of interrupt: if the interrupt is an edge triggered
671
* interrupt we use handle_edge_irq.
673
* On the other hand if the interrupt is level triggered we use
674
* handle_fasteoi_irq like the native code does for this kind of
677
* Depending on the Xen version, pirq_needs_eoi might return true
678
* not only for level triggered interrupts but for edge triggered
679
* interrupts too. In any case Xen always honors the eoi mechanism,
680
* not injecting any more pirqs of the same kind if the first one
681
* hasn't received an eoi yet. Therefore using the fasteoi handler
682
* is the right choice either way.
685
irq_set_chip_and_handler_name(irq, &xen_pirq_chip,
686
handle_fasteoi_irq, name);
688
irq_set_chip_and_handler_name(irq, &xen_pirq_chip,
689
handle_edge_irq, name);
662
692
spin_unlock(&irq_mapping_update_lock);
682
712
int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
683
int pirq, int vector, const char *name)
713
int pirq, int vector, const char *name,
693
irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_level_irq,
724
irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_edge_irq,
696
xen_irq_info_pirq_init(irq, 0, pirq, 0, vector, 0);
727
xen_irq_info_pirq_init(irq, 0, pirq, 0, vector, domid, 0);
697
728
ret = irq_set_msi_desc(irq, msidesc);
723
754
if (xen_initial_domain()) {
724
755
unmap_irq.pirq = info->u.pirq.pirq;
725
unmap_irq.domid = DOMID_SELF;
756
unmap_irq.domid = info->u.pirq.domid;
726
757
rc = HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap_irq);
758
/* If another domain quits without making the pci_disable_msix
759
* call, the Xen hypervisor takes care of freeing the PIRQs
760
* (free_domain_pirqs).
762
if ((rc == -ESRCH && info->u.pirq.domid != DOMID_SELF))
763
printk(KERN_INFO "domain %d does not have %d anymore\n",
764
info->u.pirq.domid, info->u.pirq.pirq);
728
766
printk(KERN_WARNING "unmap irq failed %d\n", rc);
775
819
irq_set_chip_and_handler_name(irq, &xen_dynamic_chip,
776
handle_fasteoi_irq, "event");
820
handle_edge_irq, "event");
778
822
xen_irq_info_evtchn_init(irq, evtchn);
1338
1379
int evtchn = evtchn_from_irq(data->irq);
1340
irq_move_masked_irq(data);
1342
1383
if (VALID_EVTCHN(evtchn))
1343
unmask_evtchn(evtchn);
1384
clear_evtchn(evtchn);
1387
static void mask_ack_dynirq(struct irq_data *data)
1389
disable_dynirq(data);
1346
1393
static int retrigger_dynirq(struct irq_data *data)
1502
1549
xen_poll_irq_timeout(irq, 0 /* no timeout */);
1552
/* Check whether the IRQ line is shared with other guests. */
1553
int xen_test_irq_shared(int irq)
1555
struct irq_info *info = info_for_irq(irq);
1556
struct physdev_irq_status_query irq_status = { .irq = info->u.pirq.pirq };
1558
if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
1560
return !(irq_status.flags & XENIRQSTAT_shared);
1562
EXPORT_SYMBOL_GPL(xen_test_irq_shared);
1505
1564
void xen_irq_resume(void)
1507
1566
unsigned int cpu, evtchn;
1535
1594
.irq_mask = disable_dynirq,
1536
1595
.irq_unmask = enable_dynirq,
1538
.irq_eoi = ack_dynirq,
1597
.irq_ack = ack_dynirq,
1598
.irq_mask_ack = mask_ack_dynirq,
1539
1600
.irq_set_affinity = set_affinity_irq,
1540
1601
.irq_retrigger = retrigger_dynirq,
1546
1607
.irq_startup = startup_pirq,
1547
1608
.irq_shutdown = shutdown_pirq,
1549
1609
.irq_enable = enable_pirq,
1550
.irq_unmask = enable_pirq,
1552
1610
.irq_disable = disable_pirq,
1553
.irq_mask = disable_pirq,
1555
.irq_ack = ack_pirq,
1612
.irq_mask = disable_dynirq,
1613
.irq_unmask = enable_dynirq,
1615
.irq_ack = eoi_pirq,
1616
.irq_eoi = eoi_pirq,
1617
.irq_mask_ack = mask_ack_pirq,
1557
1619
.irq_set_affinity = set_affinity_irq,