2
* Copyright (C) 2007 Advanced Micro Devices, Inc.
3
* Author: Leo Duran <leo.duran@amd.com>
4
* Author: Wei Wang <wei.wang2@amd.com> - adapted to xen
6
* This program is free software; you can redistribute it and/or modify
7
* it under the terms of the GNU General Public License as published by
8
* the Free Software Foundation; either version 2 of the License, or
9
* (at your option) any later version.
11
* This program is distributed in the hope that it will be useful,
12
* but WITHOUT ANY WARRANTY; without even the implied warranty of
13
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14
* GNU General Public License for more details.
16
* You should have received a copy of the GNU General Public License
17
* along with this program; if not, write to the Free Software
18
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21
#include <xen/config.h>
22
#include <xen/errno.h>
24
#include <xen/pci_regs.h>
25
#include <asm/amd-iommu.h>
27
#include <asm/hvm/svm/amd-iommu-proto.h>
28
#include <asm-x86/fixmap.h>
29
#include <mach_apic.h>
31
static struct amd_iommu **irq_to_iommu;
32
static int nr_amd_iommus;
33
static long amd_iommu_cmd_buffer_entries = IOMMU_CMD_BUFFER_DEFAULT_ENTRIES;
34
static long amd_iommu_event_log_entries = IOMMU_EVENT_LOG_DEFAULT_ENTRIES;
36
unsigned short ivrs_bdf_entries;
37
struct ivrs_mappings *ivrs_mappings;
38
struct list_head amd_iommu_head;
39
struct table_struct device_table;
41
static int __init map_iommu_mmio_region(struct amd_iommu *iommu)
45
if ( nr_amd_iommus > MAX_AMD_IOMMUS )
47
AMD_IOMMU_DEBUG("nr_amd_iommus %d > MAX_IOMMUS\n", nr_amd_iommus);
51
iommu->mmio_base = (void *)fix_to_virt(
52
FIX_IOMMU_MMIO_BASE_0 + nr_amd_iommus * MMIO_PAGES_PER_IOMMU);
53
mfn = (unsigned long)(iommu->mmio_base_phys >> PAGE_SHIFT);
54
map_pages_to_xen((unsigned long)iommu->mmio_base, mfn,
55
MMIO_PAGES_PER_IOMMU, PAGE_HYPERVISOR_NOCACHE);
57
memset(iommu->mmio_base, 0, IOMMU_MMIO_REGION_LENGTH);
62
static void __init unmap_iommu_mmio_region(struct amd_iommu *iommu)
64
if ( iommu->mmio_base )
66
iounmap(iommu->mmio_base);
67
iommu->mmio_base = NULL;
71
static void register_iommu_dev_table_in_mmio_space(struct amd_iommu *iommu)
73
u64 addr_64, addr_lo, addr_hi;
76
addr_64 = (u64)virt_to_maddr(iommu->dev_table.buffer);
77
addr_lo = addr_64 & DMA_32BIT_MASK;
78
addr_hi = addr_64 >> 32;
80
set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, 0,
81
IOMMU_DEV_TABLE_BASE_LOW_MASK,
82
IOMMU_DEV_TABLE_BASE_LOW_SHIFT, &entry);
83
set_field_in_reg_u32((iommu->dev_table.alloc_size / PAGE_SIZE) - 1,
84
entry, IOMMU_DEV_TABLE_SIZE_MASK,
85
IOMMU_DEV_TABLE_SIZE_SHIFT, &entry);
86
writel(entry, iommu->mmio_base + IOMMU_DEV_TABLE_BASE_LOW_OFFSET);
88
set_field_in_reg_u32((u32)addr_hi, 0,
89
IOMMU_DEV_TABLE_BASE_HIGH_MASK,
90
IOMMU_DEV_TABLE_BASE_HIGH_SHIFT, &entry);
91
writel(entry, iommu->mmio_base + IOMMU_DEV_TABLE_BASE_HIGH_OFFSET);
94
static void register_iommu_cmd_buffer_in_mmio_space(struct amd_iommu *iommu)
96
u64 addr_64, addr_lo, addr_hi;
97
u32 power_of2_entries;
100
addr_64 = (u64)virt_to_maddr(iommu->cmd_buffer.buffer);
101
addr_lo = addr_64 & DMA_32BIT_MASK;
102
addr_hi = addr_64 >> 32;
104
set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, 0,
105
IOMMU_CMD_BUFFER_BASE_LOW_MASK,
106
IOMMU_CMD_BUFFER_BASE_LOW_SHIFT, &entry);
107
writel(entry, iommu->mmio_base + IOMMU_CMD_BUFFER_BASE_LOW_OFFSET);
109
power_of2_entries = get_order_from_bytes(iommu->cmd_buffer.alloc_size) +
110
IOMMU_CMD_BUFFER_POWER_OF2_ENTRIES_PER_PAGE;
112
set_field_in_reg_u32((u32)addr_hi, 0,
113
IOMMU_CMD_BUFFER_BASE_HIGH_MASK,
114
IOMMU_CMD_BUFFER_BASE_HIGH_SHIFT, &entry);
115
set_field_in_reg_u32(power_of2_entries, entry,
116
IOMMU_CMD_BUFFER_LENGTH_MASK,
117
IOMMU_CMD_BUFFER_LENGTH_SHIFT, &entry);
118
writel(entry, iommu->mmio_base+IOMMU_CMD_BUFFER_BASE_HIGH_OFFSET);
121
static void register_iommu_event_log_in_mmio_space(struct amd_iommu *iommu)
123
u64 addr_64, addr_lo, addr_hi;
124
u32 power_of2_entries;
127
addr_64 = (u64)virt_to_maddr(iommu->event_log.buffer);
128
addr_lo = addr_64 & DMA_32BIT_MASK;
129
addr_hi = addr_64 >> 32;
131
set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, 0,
132
IOMMU_EVENT_LOG_BASE_LOW_MASK,
133
IOMMU_EVENT_LOG_BASE_LOW_SHIFT, &entry);
134
writel(entry, iommu->mmio_base + IOMMU_EVENT_LOG_BASE_LOW_OFFSET);
136
power_of2_entries = get_order_from_bytes(iommu->event_log.alloc_size) +
137
IOMMU_EVENT_LOG_POWER_OF2_ENTRIES_PER_PAGE;
139
set_field_in_reg_u32((u32)addr_hi, 0,
140
IOMMU_EVENT_LOG_BASE_HIGH_MASK,
141
IOMMU_EVENT_LOG_BASE_HIGH_SHIFT, &entry);
142
set_field_in_reg_u32(power_of2_entries, entry,
143
IOMMU_EVENT_LOG_LENGTH_MASK,
144
IOMMU_EVENT_LOG_LENGTH_SHIFT, &entry);
145
writel(entry, iommu->mmio_base+IOMMU_EVENT_LOG_BASE_HIGH_OFFSET);
148
static void set_iommu_translation_control(struct amd_iommu *iommu,
153
entry = readl(iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET);
157
set_field_in_reg_u32(iommu->ht_tunnel_support ? IOMMU_CONTROL_ENABLED :
158
IOMMU_CONTROL_DISABLED, entry,
159
IOMMU_CONTROL_HT_TUNNEL_TRANSLATION_MASK,
160
IOMMU_CONTROL_HT_TUNNEL_TRANSLATION_SHIFT, &entry);
161
set_field_in_reg_u32(iommu->isochronous ? IOMMU_CONTROL_ENABLED :
162
IOMMU_CONTROL_DISABLED, entry,
163
IOMMU_CONTROL_ISOCHRONOUS_MASK,
164
IOMMU_CONTROL_ISOCHRONOUS_SHIFT, &entry);
165
set_field_in_reg_u32(iommu->coherent ? IOMMU_CONTROL_ENABLED :
166
IOMMU_CONTROL_DISABLED, entry,
167
IOMMU_CONTROL_COHERENT_MASK,
168
IOMMU_CONTROL_COHERENT_SHIFT, &entry);
169
set_field_in_reg_u32(iommu->res_pass_pw ? IOMMU_CONTROL_ENABLED :
170
IOMMU_CONTROL_DISABLED, entry,
171
IOMMU_CONTROL_RESP_PASS_POSTED_WRITE_MASK,
172
IOMMU_CONTROL_RESP_PASS_POSTED_WRITE_SHIFT, &entry);
173
/* do not set PassPW bit */
174
set_field_in_reg_u32(IOMMU_CONTROL_DISABLED, entry,
175
IOMMU_CONTROL_PASS_POSTED_WRITE_MASK,
176
IOMMU_CONTROL_PASS_POSTED_WRITE_SHIFT, &entry);
178
set_field_in_reg_u32(enable ? IOMMU_CONTROL_ENABLED :
179
IOMMU_CONTROL_DISABLED, entry,
180
IOMMU_CONTROL_TRANSLATION_ENABLE_MASK,
181
IOMMU_CONTROL_TRANSLATION_ENABLE_SHIFT, &entry);
182
writel(entry, iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET);
185
static void set_iommu_command_buffer_control(struct amd_iommu *iommu,
190
entry = readl(iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET);
191
set_field_in_reg_u32(enable ? IOMMU_CONTROL_ENABLED :
192
IOMMU_CONTROL_DISABLED, entry,
193
IOMMU_CONTROL_COMMAND_BUFFER_ENABLE_MASK,
194
IOMMU_CONTROL_COMMAND_BUFFER_ENABLE_SHIFT, &entry);
196
/*reset head and tail pointer manually before enablement */
197
if ( enable == IOMMU_CONTROL_ENABLED )
199
writel(0x0, iommu->mmio_base + IOMMU_CMD_BUFFER_HEAD_OFFSET);
200
writel(0x0, iommu->mmio_base + IOMMU_CMD_BUFFER_TAIL_OFFSET);
203
writel(entry, iommu->mmio_base+IOMMU_CONTROL_MMIO_OFFSET);
206
static void register_iommu_exclusion_range(struct amd_iommu *iommu)
208
u64 addr_lo, addr_hi;
211
addr_lo = iommu->exclusion_limit & DMA_32BIT_MASK;
212
addr_hi = iommu->exclusion_limit >> 32;
214
set_field_in_reg_u32((u32)addr_hi, 0,
215
IOMMU_EXCLUSION_LIMIT_HIGH_MASK,
216
IOMMU_EXCLUSION_LIMIT_HIGH_SHIFT, &entry);
217
writel(entry, iommu->mmio_base+IOMMU_EXCLUSION_LIMIT_HIGH_OFFSET);
219
set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, 0,
220
IOMMU_EXCLUSION_LIMIT_LOW_MASK,
221
IOMMU_EXCLUSION_LIMIT_LOW_SHIFT, &entry);
222
writel(entry, iommu->mmio_base+IOMMU_EXCLUSION_LIMIT_LOW_OFFSET);
224
addr_lo = iommu->exclusion_base & DMA_32BIT_MASK;
225
addr_hi = iommu->exclusion_base >> 32;
227
set_field_in_reg_u32((u32)addr_hi, 0,
228
IOMMU_EXCLUSION_BASE_HIGH_MASK,
229
IOMMU_EXCLUSION_BASE_HIGH_SHIFT, &entry);
230
writel(entry, iommu->mmio_base+IOMMU_EXCLUSION_BASE_HIGH_OFFSET);
232
set_field_in_reg_u32((u32)addr_lo >> PAGE_SHIFT, 0,
233
IOMMU_EXCLUSION_BASE_LOW_MASK,
234
IOMMU_EXCLUSION_BASE_LOW_SHIFT, &entry);
236
set_field_in_reg_u32(iommu->exclusion_allow_all, entry,
237
IOMMU_EXCLUSION_ALLOW_ALL_MASK,
238
IOMMU_EXCLUSION_ALLOW_ALL_SHIFT, &entry);
240
set_field_in_reg_u32(iommu->exclusion_enable, entry,
241
IOMMU_EXCLUSION_RANGE_ENABLE_MASK,
242
IOMMU_EXCLUSION_RANGE_ENABLE_SHIFT, &entry);
243
writel(entry, iommu->mmio_base+IOMMU_EXCLUSION_BASE_LOW_OFFSET);
246
static void set_iommu_event_log_control(struct amd_iommu *iommu,
251
entry = readl(iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET);
252
set_field_in_reg_u32(enable ? IOMMU_CONTROL_ENABLED :
253
IOMMU_CONTROL_DISABLED, entry,
254
IOMMU_CONTROL_EVENT_LOG_ENABLE_MASK,
255
IOMMU_CONTROL_EVENT_LOG_ENABLE_SHIFT, &entry);
256
set_field_in_reg_u32(enable ? IOMMU_CONTROL_ENABLED :
257
IOMMU_CONTROL_DISABLED, entry,
258
IOMMU_CONTROL_EVENT_LOG_INT_MASK,
259
IOMMU_CONTROL_EVENT_LOG_INT_SHIFT, &entry);
260
set_field_in_reg_u32(IOMMU_CONTROL_DISABLED, entry,
261
IOMMU_CONTROL_COMP_WAIT_INT_MASK,
262
IOMMU_CONTROL_COMP_WAIT_INT_SHIFT, &entry);
264
/*reset head and tail pointer manually before enablement */
265
if ( enable == IOMMU_CONTROL_ENABLED )
267
writel(0x0, iommu->mmio_base + IOMMU_EVENT_LOG_HEAD_OFFSET);
268
writel(0x0, iommu->mmio_base + IOMMU_EVENT_LOG_TAIL_OFFSET);
270
writel(entry, iommu->mmio_base + IOMMU_CONTROL_MMIO_OFFSET);
273
static void parse_event_log_entry(u32 entry[]);
275
static int amd_iommu_read_event_log(struct amd_iommu *iommu)
277
u32 tail, head, *event_log;
281
/* make sure there's an entry in the log */
282
tail = readl(iommu->mmio_base + IOMMU_EVENT_LOG_TAIL_OFFSET);
283
tail = get_field_from_reg_u32(tail,
284
IOMMU_EVENT_LOG_TAIL_MASK,
285
IOMMU_EVENT_LOG_TAIL_SHIFT);
287
while ( tail != iommu->event_log_head )
289
/* read event log entry */
290
event_log = (u32 *)(iommu->event_log.buffer +
291
(iommu->event_log_head *
292
IOMMU_EVENT_LOG_ENTRY_SIZE));
294
parse_event_log_entry(event_log);
296
if ( ++iommu->event_log_head == iommu->event_log.entries )
297
iommu->event_log_head = 0;
299
/* update head pointer */
300
set_field_in_reg_u32(iommu->event_log_head, 0,
301
IOMMU_EVENT_LOG_HEAD_MASK,
302
IOMMU_EVENT_LOG_HEAD_SHIFT, &head);
303
writel(head, iommu->mmio_base + IOMMU_EVENT_LOG_HEAD_OFFSET);
309
static void amd_iommu_reset_event_log(struct amd_iommu *iommu)
313
int loop_count = 1000;
315
/* wait until EventLogRun bit = 0 */
317
entry = readl(iommu->mmio_base + IOMMU_STATUS_MMIO_OFFSET);
318
log_run = get_field_from_reg_u32(entry,
319
IOMMU_STATUS_EVENT_LOG_RUN_MASK,
320
IOMMU_STATUS_EVENT_LOG_RUN_SHIFT);
322
} while ( log_run && loop_count );
326
AMD_IOMMU_DEBUG("Warning: EventLogRun bit is not cleared"
331
set_iommu_event_log_control(iommu, IOMMU_CONTROL_DISABLED);
333
/* read event log for debugging */
334
amd_iommu_read_event_log(iommu);
336
/*clear overflow bit */
337
set_field_in_reg_u32(IOMMU_CONTROL_DISABLED, entry,
338
IOMMU_STATUS_EVENT_OVERFLOW_MASK,
339
IOMMU_STATUS_EVENT_OVERFLOW_SHIFT, &entry);
340
writel(entry, iommu->mmio_base+IOMMU_STATUS_MMIO_OFFSET);
342
/*reset event log base address */
343
iommu->event_log_head = 0;
345
set_iommu_event_log_control(iommu, IOMMU_CONTROL_ENABLED);
348
static void iommu_msi_set_affinity(unsigned int irq, cpumask_t mask)
352
struct amd_iommu *iommu = irq_to_iommu[irq];
353
struct irq_desc *desc = irq_to_desc(irq);
354
struct irq_cfg *cfg = desc->chip_data;
355
u8 bus = (iommu->bdf >> 8) & 0xff;
356
u8 dev = PCI_SLOT(iommu->bdf & 0xff);
357
u8 func = PCI_FUNC(iommu->bdf & 0xff);
359
dest = set_desc_affinity(desc, &mask);
361
if ( dest == BAD_APICID )
363
dprintk(XENLOG_ERR, "Set iommu interrupt affinity error!\n");
367
memset(&msg, 0, sizeof(msg));
368
msg.data = MSI_DATA_VECTOR(cfg->vector) & 0xff;
370
msg.data |= (INT_DELIVERY_MODE != dest_LowestPrio) ?
371
MSI_DATA_DELIVERY_FIXED:
372
MSI_DATA_DELIVERY_LOWPRI;
375
msg.address_lo = (MSI_ADDRESS_HEADER << (MSI_ADDRESS_HEADER_SHIFT + 8));
376
msg.address_lo |= INT_DEST_MODE ? MSI_ADDR_DESTMODE_LOGIC:
377
MSI_ADDR_DESTMODE_PHYS;
378
msg.address_lo |= (INT_DELIVERY_MODE != dest_LowestPrio) ?
379
MSI_ADDR_REDIRECTION_CPU:
380
MSI_ADDR_REDIRECTION_LOWPRI;
381
msg.address_lo |= MSI_ADDR_DEST_ID(dest & 0xff);
383
pci_conf_write32(bus, dev, func,
384
iommu->msi_cap + PCI_MSI_DATA_64, msg.data);
385
pci_conf_write32(bus, dev, func,
386
iommu->msi_cap + PCI_MSI_ADDRESS_LO, msg.address_lo);
387
pci_conf_write32(bus, dev, func,
388
iommu->msi_cap + PCI_MSI_ADDRESS_HI, msg.address_hi);
392
static void amd_iommu_msi_enable(struct amd_iommu *iommu, int flag)
395
int bus = (iommu->bdf >> 8) & 0xff;
396
int dev = PCI_SLOT(iommu->bdf & 0xff);
397
int func = PCI_FUNC(iommu->bdf & 0xff);
399
control = pci_conf_read16(bus, dev, func,
400
iommu->msi_cap + PCI_MSI_FLAGS);
404
pci_conf_write16(bus, dev, func,
405
iommu->msi_cap + PCI_MSI_FLAGS, control);
408
static void iommu_msi_unmask(unsigned int irq)
411
struct amd_iommu *iommu = irq_to_iommu[irq];
413
/* FIXME: do not support mask bits at the moment */
414
if ( iommu->maskbit )
417
spin_lock_irqsave(&iommu->lock, flags);
418
amd_iommu_msi_enable(iommu, IOMMU_CONTROL_ENABLED);
419
spin_unlock_irqrestore(&iommu->lock, flags);
422
static void iommu_msi_mask(unsigned int irq)
425
struct amd_iommu *iommu = irq_to_iommu[irq];
426
struct irq_desc *desc = irq_to_desc(irq);
428
irq_complete_move(&desc);
430
/* FIXME: do not support mask bits at the moment */
431
if ( iommu->maskbit )
434
spin_lock_irqsave(&iommu->lock, flags);
435
amd_iommu_msi_enable(iommu, IOMMU_CONTROL_DISABLED);
436
spin_unlock_irqrestore(&iommu->lock, flags);
439
static unsigned int iommu_msi_startup(unsigned int irq)
441
iommu_msi_unmask(irq);
445
static void iommu_msi_end(unsigned int irq, u8 vector)
447
iommu_msi_unmask(irq);
452
static hw_irq_controller iommu_msi_type = {
453
.typename = "AMD-IOMMU-MSI",
454
.startup = iommu_msi_startup,
455
.shutdown = iommu_msi_mask,
456
.enable = iommu_msi_unmask,
457
.disable = iommu_msi_mask,
458
.ack = iommu_msi_mask,
459
.end = iommu_msi_end,
460
.set_affinity = iommu_msi_set_affinity,
463
static void parse_event_log_entry(u32 entry[])
465
u16 domain_id, device_id, bdf, cword;
468
char * event_str[] = {"ILLEGAL_DEV_TABLE_ENTRY",
470
"DEV_TABLE_HW_ERROR",
471
"PAGE_TABLE_HW_ERROR",
472
"ILLEGAL_COMMAND_ERROR",
475
"INVALID_DEV_REQUEST"};
477
code = get_field_from_reg_u32(entry[1], IOMMU_EVENT_CODE_MASK,
478
IOMMU_EVENT_CODE_SHIFT);
480
if ( (code > IOMMU_EVENT_INVALID_DEV_REQUEST) ||
481
(code < IOMMU_EVENT_ILLEGAL_DEV_TABLE_ENTRY) )
483
AMD_IOMMU_DEBUG("Invalid event log entry!\n");
487
if ( code == IOMMU_EVENT_IO_PAGE_FAULT )
489
device_id = get_field_from_reg_u32(entry[0],
490
IOMMU_EVENT_DEVICE_ID_MASK,
491
IOMMU_EVENT_DEVICE_ID_SHIFT);
492
domain_id = get_field_from_reg_u32(entry[1],
493
IOMMU_EVENT_DOMAIN_ID_MASK,
494
IOMMU_EVENT_DOMAIN_ID_SHIFT);
495
addr= (u64*) (entry + 2);
496
printk(XENLOG_ERR "AMD-Vi: "
497
"%s: domain = %d, device id = 0x%04x, "
498
"fault address = 0x%"PRIx64"\n",
499
event_str[code-1], domain_id, device_id, *addr);
501
/* Tell the device to stop DMAing; we can't rely on the guest to
502
* control it for us. */
503
for ( bdf = 0; bdf < ivrs_bdf_entries; bdf++ )
504
if ( get_dma_requestor_id(bdf) == device_id )
506
cword = pci_conf_read16(PCI_BUS(bdf), PCI_SLOT(bdf),
507
PCI_FUNC(bdf), PCI_COMMAND);
508
pci_conf_write16(PCI_BUS(bdf), PCI_SLOT(bdf),
509
PCI_FUNC(bdf), PCI_COMMAND,
510
cword & ~PCI_COMMAND_MASTER);
515
AMD_IOMMU_DEBUG("event 0x%08x 0x%08x 0x%08x 0x%08x\n", entry[0],
516
entry[1], entry[2], entry[3]);
520
static void amd_iommu_page_fault(int irq, void *dev_id,
521
struct cpu_user_regs *regs)
526
struct amd_iommu *iommu = dev_id;
528
spin_lock_irqsave(&iommu->lock, flags);
529
amd_iommu_read_event_log(iommu);
531
/*check event overflow */
532
entry = readl(iommu->mmio_base + IOMMU_STATUS_MMIO_OFFSET);
533
of = get_field_from_reg_u32(entry,
534
IOMMU_STATUS_EVENT_OVERFLOW_MASK,
535
IOMMU_STATUS_EVENT_OVERFLOW_SHIFT);
537
/* reset event log if event overflow */
539
amd_iommu_reset_event_log(iommu);
541
/* reset interrupt status bit */
542
entry = readl(iommu->mmio_base + IOMMU_STATUS_MMIO_OFFSET);
543
set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
544
IOMMU_STATUS_EVENT_LOG_INT_MASK,
545
IOMMU_STATUS_EVENT_LOG_INT_SHIFT, &entry);
546
writel(entry, iommu->mmio_base+IOMMU_STATUS_MMIO_OFFSET);
547
spin_unlock_irqrestore(&iommu->lock, flags);
550
static int set_iommu_interrupt_handler(struct amd_iommu *iommu)
557
dprintk(XENLOG_ERR, "IOMMU: no irqs\n");
561
irq_desc[irq].handler = &iommu_msi_type;
562
irq_to_iommu[irq] = iommu;
563
ret = request_irq(irq, amd_iommu_page_fault, 0,
567
irq_desc[irq].handler = &no_irq_type;
568
irq_to_iommu[irq] = NULL;
570
AMD_IOMMU_DEBUG("can't request irq\n");
578
static void enable_iommu(struct amd_iommu *iommu)
582
spin_lock_irqsave(&iommu->lock, flags);
584
if ( iommu->enabled )
586
spin_unlock_irqrestore(&iommu->lock, flags);
590
register_iommu_dev_table_in_mmio_space(iommu);
591
register_iommu_cmd_buffer_in_mmio_space(iommu);
592
register_iommu_event_log_in_mmio_space(iommu);
593
register_iommu_exclusion_range(iommu);
595
iommu_msi_set_affinity(iommu->irq, cpu_online_map);
596
amd_iommu_msi_enable(iommu, IOMMU_CONTROL_ENABLED);
598
set_iommu_command_buffer_control(iommu, IOMMU_CONTROL_ENABLED);
599
set_iommu_event_log_control(iommu, IOMMU_CONTROL_ENABLED);
600
set_iommu_translation_control(iommu, IOMMU_CONTROL_ENABLED);
603
spin_unlock_irqrestore(&iommu->lock, flags);
607
static void __init deallocate_iommu_table_struct(
608
struct table_struct *table)
613
order = get_order_from_bytes(table->alloc_size);
614
__free_amd_iommu_tables(table->buffer, order);
615
table->buffer = NULL;
619
static int __init allocate_iommu_table_struct(struct table_struct *table,
623
if ( table->buffer == NULL )
625
order = get_order_from_bytes(table->alloc_size);
626
table->buffer = __alloc_amd_iommu_tables(order);
628
if ( table->buffer == NULL )
630
AMD_IOMMU_DEBUG("Error allocating %s\n", name);
633
memset(table->buffer, 0, PAGE_SIZE * (1UL << order));
638
static int __init allocate_cmd_buffer(struct amd_iommu *iommu)
640
/* allocate 'command buffer' in power of 2 increments of 4K */
641
iommu->cmd_buffer_tail = 0;
642
iommu->cmd_buffer.alloc_size = PAGE_SIZE <<
643
get_order_from_bytes(
644
PAGE_ALIGN(amd_iommu_cmd_buffer_entries *
645
IOMMU_CMD_BUFFER_ENTRY_SIZE));
646
iommu->cmd_buffer.entries = iommu->cmd_buffer.alloc_size /
647
IOMMU_CMD_BUFFER_ENTRY_SIZE;
649
return (allocate_iommu_table_struct(&iommu->cmd_buffer, "Command Buffer"));
652
static int __init allocate_event_log(struct amd_iommu *iommu)
654
/* allocate 'event log' in power of 2 increments of 4K */
655
iommu->event_log_head = 0;
656
iommu->event_log.alloc_size = PAGE_SIZE <<
657
get_order_from_bytes(
658
PAGE_ALIGN(amd_iommu_event_log_entries *
659
IOMMU_EVENT_LOG_ENTRY_SIZE));
660
iommu->event_log.entries = iommu->event_log.alloc_size /
661
IOMMU_EVENT_LOG_ENTRY_SIZE;
663
return (allocate_iommu_table_struct(&iommu->event_log, "Event Log"));
666
static int __init amd_iommu_init_one(struct amd_iommu *iommu)
668
if ( allocate_cmd_buffer(iommu) != 0 )
671
if ( allocate_event_log(iommu) != 0 )
674
if ( map_iommu_mmio_region(iommu) != 0 )
677
if ( set_iommu_interrupt_handler(iommu) == 0 )
680
/* To make sure that device_table.buffer has been successfully allocated */
681
if ( device_table.buffer == NULL )
684
iommu->dev_table.alloc_size = device_table.alloc_size;
685
iommu->dev_table.entries = device_table.entries;
686
iommu->dev_table.buffer = device_table.buffer;
689
printk("AMD-Vi: IOMMU %d Enabled.\n", nr_amd_iommus );
698
static void __init amd_iommu_init_cleanup(void)
700
struct amd_iommu *iommu, *next;
703
/* free amd iommu list */
704
list_for_each_entry_safe ( iommu, next, &amd_iommu_head, list )
706
list_del(&iommu->list);
707
if ( iommu->enabled )
709
deallocate_iommu_table_struct(&iommu->cmd_buffer);
710
deallocate_iommu_table_struct(&iommu->event_log);
711
unmap_iommu_mmio_region(iommu);
716
/* free interrupt remapping table */
717
for ( bdf = 0; bdf < ivrs_bdf_entries; bdf++ )
719
if ( ivrs_mappings[bdf].intremap_table )
720
amd_iommu_free_intremap_table(bdf);
723
/* free device table */
724
deallocate_iommu_table_struct(&device_table);
726
/* free ivrs_mappings[] */
729
xfree(ivrs_mappings);
730
ivrs_mappings = NULL;
733
/* free irq_to_iommu[] */
741
iommu_passthrough = 0;
745
static int __init init_ivrs_mapping(void)
749
BUG_ON( !ivrs_bdf_entries );
751
ivrs_mappings = xmalloc_array( struct ivrs_mappings, ivrs_bdf_entries);
752
if ( ivrs_mappings == NULL )
754
AMD_IOMMU_DEBUG("Error allocating IVRS Mappings table\n");
757
memset(ivrs_mappings, 0, ivrs_bdf_entries * sizeof(struct ivrs_mappings));
759
/* assign default values for device entries */
760
for ( bdf = 0; bdf < ivrs_bdf_entries; bdf++ )
762
ivrs_mappings[bdf].dte_requestor_id = bdf;
763
ivrs_mappings[bdf].dte_sys_mgt_enable =
764
IOMMU_DEV_TABLE_SYS_MGT_MSG_FORWARDED;
765
ivrs_mappings[bdf].dte_allow_exclusion = IOMMU_CONTROL_DISABLED;
766
ivrs_mappings[bdf].unity_map_enable = IOMMU_CONTROL_DISABLED;
767
ivrs_mappings[bdf].iommu = NULL;
769
ivrs_mappings[bdf].intremap_table = NULL;
770
ivrs_mappings[bdf].dte_lint1_pass = IOMMU_CONTROL_DISABLED;
771
ivrs_mappings[bdf].dte_lint0_pass = IOMMU_CONTROL_DISABLED;
772
ivrs_mappings[bdf].dte_nmi_pass = IOMMU_CONTROL_DISABLED;
773
ivrs_mappings[bdf].dte_ext_int_pass = IOMMU_CONTROL_DISABLED;
774
ivrs_mappings[bdf].dte_init_pass = IOMMU_CONTROL_DISABLED;
776
if ( amd_iommu_perdev_intremap )
777
spin_lock_init(&ivrs_mappings[bdf].intremap_lock);
782
static int __init amd_iommu_setup_device_table(void)
786
int sys_mgt, dev_ex, lint1_pass, lint0_pass,
787
nmi_pass, ext_int_pass, init_pass;
789
BUG_ON( (ivrs_bdf_entries == 0) );
791
/* allocate 'device table' on a 4K boundary */
792
device_table.alloc_size = PAGE_SIZE <<
793
get_order_from_bytes(
794
PAGE_ALIGN(ivrs_bdf_entries *
795
IOMMU_DEV_TABLE_ENTRY_SIZE));
796
device_table.entries = device_table.alloc_size /
797
IOMMU_DEV_TABLE_ENTRY_SIZE;
799
if ( allocate_iommu_table_struct(&device_table, "Device Table") != 0 )
802
/* Add device table entries */
803
for ( bdf = 0; bdf < ivrs_bdf_entries; bdf++ )
805
intr_tb = ivrs_mappings[bdf].intremap_table;
809
sys_mgt = ivrs_mappings[bdf].dte_sys_mgt_enable;
810
dev_ex = ivrs_mappings[bdf].dte_allow_exclusion;
812
/* get interrupt remapping settings */
813
lint1_pass = ivrs_mappings[bdf].dte_lint1_pass;
814
lint0_pass = ivrs_mappings[bdf].dte_lint0_pass;
815
nmi_pass = ivrs_mappings[bdf].dte_nmi_pass;
816
ext_int_pass = ivrs_mappings[bdf].dte_ext_int_pass;
817
init_pass = ivrs_mappings[bdf].dte_init_pass;
819
/* add device table entry */
820
dte = device_table.buffer + (bdf * IOMMU_DEV_TABLE_ENTRY_SIZE);
821
amd_iommu_add_dev_table_entry(
822
dte, sys_mgt, dev_ex, lint1_pass, lint0_pass,
823
nmi_pass, ext_int_pass, init_pass);
825
amd_iommu_set_intremap_table(
826
dte, (u64)virt_to_maddr(intr_tb), iommu_intremap);
828
AMD_IOMMU_DEBUG("Add device table entry: device id = 0x%04x, "
829
"interupt table = 0x%"PRIx64"\n", bdf,
830
(u64)virt_to_maddr(intr_tb));
837
int __init amd_iommu_init(void)
839
struct amd_iommu *iommu;
841
BUG_ON( !iommu_found() );
843
irq_to_iommu = xmalloc_array(struct amd_iommu *, nr_irqs);
844
if ( irq_to_iommu == NULL )
846
memset(irq_to_iommu, 0, nr_irqs * sizeof(struct iommu*));
848
ivrs_bdf_entries = amd_iommu_get_ivrs_dev_entries();
850
if ( !ivrs_bdf_entries )
853
if ( init_ivrs_mapping() != 0 )
856
if ( amd_iommu_update_ivrs_mapping_acpi() != 0 )
859
/* initialize io-apic interrupt remapping entries */
860
if ( amd_iommu_setup_ioapic_remapping() != 0 )
863
/* allocate and initialize a global device table shared by all iommus */
864
if ( amd_iommu_setup_device_table() != 0 )
867
/* per iommu initialization */
868
for_each_amd_iommu ( iommu )
869
if ( amd_iommu_init_one(iommu) != 0 )
875
amd_iommu_init_cleanup();
879
static void disable_iommu(struct amd_iommu *iommu)
883
spin_lock_irqsave(&iommu->lock, flags);
885
if ( !iommu->enabled )
887
spin_unlock_irqrestore(&iommu->lock, flags);
891
amd_iommu_msi_enable(iommu, IOMMU_CONTROL_DISABLED);
892
set_iommu_command_buffer_control(iommu, IOMMU_CONTROL_DISABLED);
893
set_iommu_event_log_control(iommu, IOMMU_CONTROL_DISABLED);
894
set_iommu_translation_control(iommu, IOMMU_CONTROL_DISABLED);
898
spin_unlock_irqrestore(&iommu->lock, flags);
902
static void invalidate_all_domain_pages(void)
906
invalidate_all_iommu_pages(d);
909
static void invalidate_all_devices(void)
913
struct amd_iommu *iommu;
915
for ( bdf = 0; bdf < ivrs_bdf_entries; bdf++ )
917
iommu = find_iommu_for_device(bdf);
918
req_id = ivrs_mappings[bdf].dte_requestor_id;
921
spin_lock_irqsave(&iommu->lock, flags);
922
invalidate_dev_table_entry(iommu, req_id);
923
invalidate_interrupt_table(iommu, req_id);
924
flush_command_buffer(iommu);
925
spin_unlock_irqrestore(&iommu->lock, flags);
930
void amd_iommu_suspend(void)
932
struct amd_iommu *iommu;
934
for_each_amd_iommu ( iommu )
935
disable_iommu(iommu);
938
void amd_iommu_resume(void)
940
struct amd_iommu *iommu;
942
for_each_amd_iommu ( iommu )
945
* To make sure that iommus have not been touched
946
* before re-enablement
948
disable_iommu(iommu);
952
/* flush all cache entries after iommu re-enabled */
953
invalidate_all_devices();
954
invalidate_all_domain_pages();