2
* Copyright (C) 2001 MandrakeSoft S.A.
7
* http://www.linux-mandrake.com/
8
* http://www.mandrakesoft.com/
10
* This library is free software; you can redistribute it and/or
11
* modify it under the terms of the GNU Lesser General Public
12
* License as published by the Free Software Foundation; either
13
* version 2 of the License, or (at your option) any later version.
15
* This library is distributed in the hope that it will be useful,
16
* but WITHOUT ANY WARRANTY; without even the implied warranty of
17
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18
* Lesser General Public License for more details.
20
* You should have received a copy of the GNU Lesser General Public
21
* License along with this library; if not, write to the Free Software
22
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24
* Support for virtual MSI logic
25
* Will be merged it with virtual IOAPIC logic, since most is the same
28
#include <xen/config.h>
29
#include <xen/types.h>
31
#include <xen/xmalloc.h>
33
#include <xen/errno.h>
34
#include <xen/sched.h>
35
#include <public/hvm/ioreq.h>
36
#include <asm/hvm/io.h>
37
#include <asm/hvm/vpic.h>
38
#include <asm/hvm/vlapic.h>
39
#include <asm/hvm/support.h>
40
#include <asm/current.h>
41
#include <asm/event.h>
43
static void vmsi_inj_irq(
45
struct vlapic *target,
48
uint8_t delivery_mode)
50
HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "vmsi_inj_irq "
51
"irq %d trig %d delive mode %d\n",
52
vector, trig_mode, delivery_mode);
54
switch ( delivery_mode )
58
if ( vlapic_set_irq(target, vector, trig_mode) )
59
vcpu_kick(vlapic_vcpu(target));
62
gdprintk(XENLOG_WARNING, "error delivery mode %d\n", delivery_mode);
67
int vmsi_deliver(struct domain *d, int pirq)
69
struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
70
uint32_t flags = hvm_irq_dpci->mirq[pirq].gmsi.gflags;
71
int vector = hvm_irq_dpci->mirq[pirq].gmsi.gvec;
72
uint8_t dest = (uint8_t)flags;
73
uint8_t dest_mode = !!(flags & VMSI_DM_MASK);
74
uint8_t delivery_mode = (flags & VMSI_DELIV_MASK) >> GLFAGS_SHIFT_DELIV_MODE;
75
uint8_t trig_mode = (flags & VMSI_TRIG_MODE) >> GLFAGS_SHIFT_TRG_MODE;
76
struct vlapic *target;
79
HVM_DBG_LOG(DBG_LEVEL_IOAPIC,
80
"msi: dest=%x dest_mode=%x delivery_mode=%x "
81
"vector=%x trig_mode=%x\n",
82
dest, dest_mode, delivery_mode, vector, trig_mode);
84
if ( !( hvm_irq_dpci->mirq[pirq].flags & HVM_IRQ_DPCI_GUEST_MSI ) )
86
gdprintk(XENLOG_WARNING, "pirq %x not msi \n", pirq);
90
switch ( delivery_mode )
94
target = vlapic_lowest_prio(d, NULL, 0, dest, dest_mode);
96
vmsi_inj_irq(d, target, vector, trig_mode, delivery_mode);
98
HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "null round robin: "
99
"vector=%x delivery_mode=%x\n",
100
vector, dest_LowestPrio);
107
for_each_vcpu ( d, v )
108
if ( vlapic_match_dest(vcpu_vlapic(v), NULL,
109
0, dest, dest_mode) )
110
vmsi_inj_irq(d, vcpu_vlapic(v),
111
vector, trig_mode, delivery_mode);
118
case dest__reserved_2:
120
gdprintk(XENLOG_WARNING, "Unsupported delivery mode %d\n",
127
/* Return value, -1 : multi-dests, non-negative value: dest_vcpu_id */
128
int hvm_girq_dest_2_vcpu_id(struct domain *d, uint8_t dest, uint8_t dest_mode)
130
int dest_vcpu_id = -1, w = 0;
133
if ( d->max_vcpus == 1 )
136
for_each_vcpu ( d, v )
138
if ( vlapic_match_dest(vcpu_vlapic(v), NULL, 0, dest, dest_mode) )
141
dest_vcpu_id = v->vcpu_id;
150
/* MSI-X mask bit hypervisor interception */
153
struct list_head list;
154
atomic_t refcnt; /* how many bind_pt_irq called for the device */
156
/* TODO: resolve the potential race by destruction of pdev */
157
struct pci_dev *pdev;
158
unsigned long gtable; /* gpa of msix table */
159
unsigned long table_len;
160
unsigned long table_flags[MAX_MSIX_TABLE_ENTRIES / BITS_PER_LONG + 1];
165
static struct msixtbl_entry *msixtbl_find_entry(
166
struct vcpu *v, unsigned long addr)
168
struct msixtbl_entry *entry;
169
struct domain *d = v->domain;
171
list_for_each_entry( entry, &d->arch.hvm_domain.msixtbl_list, list )
172
if ( addr >= entry->gtable &&
173
addr < entry->gtable + entry->table_len )
179
static void __iomem *msixtbl_addr_to_virt(
180
struct msixtbl_entry *entry, unsigned long addr)
187
nr_page = (addr >> PAGE_SHIFT) -
188
(entry->gtable >> PAGE_SHIFT);
193
idx = entry->pdev->msix_table_idx[nr_page];
197
return (void *)(fix_to_virt(idx) +
198
(addr & ((1UL << PAGE_SHIFT) - 1)));
201
static int msixtbl_read(
202
struct vcpu *v, unsigned long address,
203
unsigned long len, unsigned long *pval)
205
unsigned long offset;
206
struct msixtbl_entry *entry;
208
int r = X86EMUL_UNHANDLEABLE;
215
offset = address & (PCI_MSIX_ENTRY_SIZE - 1);
216
if ( offset != PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET)
219
entry = msixtbl_find_entry(v, address);
220
virt = msixtbl_addr_to_virt(entry, address);
232
static int msixtbl_write(struct vcpu *v, unsigned long address,
233
unsigned long len, unsigned long val)
235
unsigned long offset;
236
struct msixtbl_entry *entry;
239
int r = X86EMUL_UNHANDLEABLE;
246
entry = msixtbl_find_entry(v, address);
247
nr_entry = (address - entry->gtable) / PCI_MSIX_ENTRY_SIZE;
249
offset = address & (PCI_MSIX_ENTRY_SIZE - 1);
250
if ( offset != PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET)
252
set_bit(nr_entry, &entry->table_flags);
256
/* exit to device model if address/data has been modified */
257
if ( test_and_clear_bit(nr_entry, &entry->table_flags) )
260
virt = msixtbl_addr_to_virt(entry, address);
272
static int msixtbl_range(struct vcpu *v, unsigned long addr)
274
struct msixtbl_entry *entry;
279
entry = msixtbl_find_entry(v, addr);
280
virt = msixtbl_addr_to_virt(entry, addr);
287
const struct hvm_mmio_handler msixtbl_mmio_handler = {
288
.check_handler = msixtbl_range,
289
.read_handler = msixtbl_read,
290
.write_handler = msixtbl_write
293
static void add_msixtbl_entry(struct domain *d,
294
struct pci_dev *pdev,
296
struct msixtbl_entry *entry)
300
memset(entry, 0, sizeof(struct msixtbl_entry));
302
INIT_LIST_HEAD(&entry->list);
303
INIT_RCU_HEAD(&entry->rcu);
304
atomic_set(&entry->refcnt, 0);
306
len = pci_msix_get_table_len(pdev);
307
entry->table_len = len;
309
entry->gtable = (unsigned long) gtable;
311
list_add_rcu(&entry->list, &d->arch.hvm_domain.msixtbl_list);
314
static void free_msixtbl_entry(struct rcu_head *rcu)
316
struct msixtbl_entry *entry;
318
entry = container_of (rcu, struct msixtbl_entry, rcu);
323
static void del_msixtbl_entry(struct msixtbl_entry *entry)
325
list_del_rcu(&entry->list);
326
call_rcu(&entry->rcu, free_msixtbl_entry);
329
int msixtbl_pt_register(struct domain *d, int pirq, uint64_t gtable)
331
struct irq_desc *irq_desc;
332
struct msi_desc *msi_desc;
333
struct pci_dev *pdev;
334
struct msixtbl_entry *entry, *new_entry;
337
ASSERT(spin_is_locked(&pcidevs_lock));
340
* xmalloc() with irq_disabled causes the failure of check_lock()
341
* for xenpool->lock. So we allocate an entry beforehand.
343
new_entry = xmalloc(struct msixtbl_entry);
347
irq_desc = domain_spin_lock_irq_desc(d, pirq, NULL);
354
if ( irq_desc->handler != &pci_msi_type )
357
msi_desc = irq_desc->msi_desc;
361
pdev = msi_desc->dev;
363
spin_lock(&d->arch.hvm_domain.msixtbl_list_lock);
365
list_for_each_entry( entry, &d->arch.hvm_domain.msixtbl_list, list )
366
if ( pdev == entry->pdev )
371
add_msixtbl_entry(d, pdev, gtable, entry);
374
atomic_inc(&entry->refcnt);
375
spin_unlock(&d->arch.hvm_domain.msixtbl_list_lock);
379
spin_unlock_irq(&irq_desc->lock);
384
void msixtbl_pt_unregister(struct domain *d, int pirq)
386
struct irq_desc *irq_desc;
387
struct msi_desc *msi_desc;
388
struct pci_dev *pdev;
389
struct msixtbl_entry *entry;
391
ASSERT(spin_is_locked(&pcidevs_lock));
393
irq_desc = domain_spin_lock_irq_desc(d, pirq, NULL);
397
if ( irq_desc->handler != &pci_msi_type )
400
msi_desc = irq_desc->msi_desc;
404
pdev = msi_desc->dev;
406
spin_lock(&d->arch.hvm_domain.msixtbl_list_lock);
408
list_for_each_entry( entry, &d->arch.hvm_domain.msixtbl_list, list )
409
if ( pdev == entry->pdev )
412
spin_unlock(&d->arch.hvm_domain.msixtbl_list_lock);
416
spin_unlock_irq(&irq_desc->lock);
420
if ( !atomic_dec_and_test(&entry->refcnt) )
421
del_msixtbl_entry(entry);
423
spin_unlock(&d->arch.hvm_domain.msixtbl_list_lock);
424
spin_unlock_irq(&irq_desc->lock);
427
void msixtbl_pt_cleanup(struct domain *d, int pirq)
429
struct msixtbl_entry *entry, *temp;
432
/* msixtbl_list_lock must be acquired with irq_disabled for check_lock() */
433
local_irq_save(flags);
434
spin_lock(&d->arch.hvm_domain.msixtbl_list_lock);
436
list_for_each_entry_safe( entry, temp,
437
&d->arch.hvm_domain.msixtbl_list, list )
438
del_msixtbl_entry(entry);
440
spin_unlock(&d->arch.hvm_domain.msixtbl_list_lock);
441
local_irq_restore(flags);