1
/******************************************************************************
3
* Copyright (c) 2007 Isaku Yamahata <yamahata at valinux co jp>
4
* VA Linux Systems Japan K.K.
6
* This program is free software; you can redistribute it and/or modify
7
* it under the terms of the GNU General Public License as published by
8
* the Free Software Foundation; either version 2 of the License, or
9
* (at your option) any later version.
11
* This program is distributed in the hope that it will be useful,
12
* but WITHOUT ANY WARRANTY; without even the implied warranty of
13
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14
* GNU General Public License for more details.
16
* You should have received a copy of the GNU General Public License
17
* along with this program; if not, write to the Free Software
18
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22
* Xen domain firmware emulation support
23
* Copyright (C) 2004 Hewlett-Packard Co.
24
* Dan Magenheimer (dan.magenheimer@hp.com)
27
#include <xen/config.h>
28
#include <xen/errno.h>
29
#include <xen/sched.h>
32
#include <acpi/actables.h>
34
#include <asm/dom_fw.h>
35
#include <asm/dom_fw_common.h>
36
#include <asm/dom_fw_dom0.h>
37
#include <asm/dom_fw_utils.h>
39
#include <linux/sort.h>
41
struct acpi_backup_table_entry {
42
struct list_head list;
45
unsigned char data[0];
48
static LIST_HEAD(acpi_backup_table_list);
50
static u32 lsapic_nbr;
52
/* Modify lsapic table. Provides LPs. */
54
acpi_update_lsapic(struct acpi_subtable_header * header, const unsigned long end)
56
struct acpi_table_lsapic *lsapic;
59
lsapic = (struct acpi_table_lsapic *)header;
63
if (lsapic_nbr < dom0->max_vcpus && dom0->vcpu[lsapic_nbr] != NULL)
68
if (lsapic->flags.enabled && enable) {
69
printk("enable lsapic entry: 0x%lx\n", (u64) lsapic);
70
lsapic->id = lsapic_nbr;
73
} else if (lsapic->flags.enabled) {
74
printk("DISABLE lsapic entry: 0x%lx\n", (u64) lsapic);
75
lsapic->flags.enabled = 0;
83
acpi_patch_plat_int_src(struct acpi_subtable_header * header,
84
const unsigned long end)
86
struct acpi_table_plat_int_src *plintsrc;
88
plintsrc = (struct acpi_table_plat_int_src *)header;
92
if (plintsrc->type == ACPI_INTERRUPT_CPEI) {
93
printk("ACPI_INTERRUPT_CPEI disabled for Domain0\n");
100
acpi_update_madt_checksum(struct acpi_table_header *table)
102
struct acpi_table_madt *acpi_madt;
107
acpi_madt = (struct acpi_table_madt *)table;
108
acpi_madt->header.checksum = 0;
109
acpi_madt->header.checksum = -acpi_tb_checksum((u8*)acpi_madt,
116
acpi_backup_table(struct acpi_table_header *table)
118
struct acpi_backup_table_entry *entry;
120
entry = xmalloc_bytes(sizeof(*entry) + table->length);
122
dprintk(XENLOG_WARNING, "Failed to allocate memory for "
123
"%.4s table backup\n", table->signature);
127
entry->pa = __pa(table);
128
entry->size = table->length;
130
memcpy(entry->data, table, table->length);
132
list_add(&entry->list, &acpi_backup_table_list);
134
printk(XENLOG_INFO "Backup %.4s table stored @0x%p\n",
135
table->signature, entry->data);
141
acpi_restore_tables()
143
struct acpi_backup_table_entry *entry;
145
list_for_each_entry(entry, &acpi_backup_table_list, list) {
146
printk(XENLOG_INFO "Restoring backup %.4s table @0x%p\n",
147
((struct acpi_table_header *)entry->data)->signature,
150
memcpy(__va(entry->pa), entry->data, entry->size);
151
/* Only called from kexec path, no need to free entries */
155
static int __init __acpi_table_disable(struct acpi_table_header *header)
157
printk("Disabling ACPI table: %4.4s\n", header->signature);
159
memcpy(header->oem_id, "xxxxxx", 6);
160
memcpy(header->oem_id+1, header->signature, 4);
161
memcpy(header->oem_table_id, "Xen ", 8);
162
memcpy(header->signature, "OEMx", 4);
163
header->checksum = 0;
164
header->checksum = -acpi_tb_checksum((u8*)header, header->length);
169
static void __init acpi_table_disable(char *id)
171
acpi_table_parse(id, __acpi_table_disable);
174
/* base is physical address of acpi table */
175
static void __init touch_acpi_table(void)
177
struct acpi_table_header *madt = NULL;
181
acpi_get_table(ACPI_SIG_MADT, 0, &madt);
185
* - Disable CPUs that would exceed max vCPUs for the domain
186
* - Virtualize id/eid for indexing into domain vCPU array
187
* - Hide CPEI interrupt source
189
* ACPI tables must be backed-up before modification!
191
* We update the checksum each time we modify to keep the
192
* ACPI CA from warning about invalid checksums.
194
acpi_table_parse(ACPI_SIG_MADT, acpi_backup_table);
196
if (acpi_table_parse_madt(ACPI_MADT_LSAPIC, acpi_update_lsapic, 0) < 0)
197
printk("Error parsing MADT - no LAPIC entries\n");
199
acpi_update_madt_checksum(madt);
201
if (acpi_table_parse_madt(ACPI_MADT_PLAT_INT_SRC,
202
acpi_patch_plat_int_src, 0) < 0)
203
printk("Error parsing MADT - no PLAT_INT_SRC entries\n");
205
acpi_update_madt_checksum(madt);
208
* SRAT & SLIT tables aren't useful for Dom0 until
209
* we support more NUMA configuration information in Xen.
211
* NB - backup ACPI tables first.
213
acpi_table_parse(ACPI_SIG_SRAT, acpi_backup_table);
214
acpi_table_parse(ACPI_SIG_SLIT, acpi_backup_table);
216
acpi_table_disable(ACPI_SIG_SRAT);
217
acpi_table_disable(ACPI_SIG_SLIT);
221
void __init efi_systable_init_dom0(struct fw_tables *tables)
227
/* Write messages to the console. */
228
printk("Domain0 EFI passthrough:");
229
if (efi.mps != EFI_INVALID_TABLE_ADDR) {
230
tables->efi_tables[i].guid = MPS_TABLE_GUID;
231
tables->efi_tables[i].table = efi.mps;
232
printk(" MPS=0x%lx", tables->efi_tables[i].table);
235
if (efi.acpi20 != EFI_INVALID_TABLE_ADDR) {
236
tables->efi_tables[i].guid = ACPI_20_TABLE_GUID;
237
tables->efi_tables[i].table = efi.acpi20;
238
printk(" ACPI 2.0=0x%lx", tables->efi_tables[i].table);
241
if (efi.acpi != EFI_INVALID_TABLE_ADDR) {
242
tables->efi_tables[i].guid = ACPI_TABLE_GUID;
243
tables->efi_tables[i].table = efi.acpi;
244
printk(" ACPI=0x%lx", tables->efi_tables[i].table);
247
if (efi.smbios != EFI_INVALID_TABLE_ADDR) {
248
tables->efi_tables[i].guid = SMBIOS_TABLE_GUID;
249
tables->efi_tables[i].table = efi.smbios;
250
printk(" SMBIOS=0x%lx", tables->efi_tables[i].table);
253
if (efi.hcdp != EFI_INVALID_TABLE_ADDR) {
254
tables->efi_tables[i].guid = HCDP_TABLE_GUID;
255
tables->efi_tables[i].table = efi.hcdp;
256
printk(" HCDP=0x%lx", tables->efi_tables[i].table);
260
BUG_ON(i > NUM_EFI_SYS_TABLES);
264
setup_dom0_memmap_info(struct domain *d, struct fw_tables *tables)
268
unsigned int num_pages;
269
efi_memory_desc_t *md;
270
efi_memory_desc_t *last_mem_md = NULL;
271
xen_ia64_memmap_info_t *memmap_info;
272
unsigned long paddr_start;
273
unsigned long paddr_end;
275
size = sizeof(*memmap_info) +
276
(tables->num_mds + 1) * sizeof(tables->efi_memmap[0]);
277
num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
278
for (i = tables->num_mds - 1; i >= 0; i--) {
279
md = &tables->efi_memmap[i];
280
if (md->attribute == EFI_MEMORY_WB &&
281
md->type == EFI_CONVENTIONAL_MEMORY &&
283
((num_pages + 1) << (PAGE_SHIFT - EFI_PAGE_SHIFT))) {
289
if (last_mem_md == NULL) {
290
printk("%s: warning: "
291
"no dom0 contiguous memory to hold memory map\n",
295
paddr_end = last_mem_md->phys_addr +
296
(last_mem_md->num_pages << EFI_PAGE_SHIFT);
297
paddr_start = (paddr_end - (num_pages << PAGE_SHIFT)) & PAGE_MASK;
298
last_mem_md->num_pages -= (paddr_end - paddr_start) >> EFI_PAGE_SHIFT;
300
md = &tables->efi_memmap[tables->num_mds];
302
md->type = EFI_RUNTIME_SERVICES_DATA;
303
md->phys_addr = paddr_start;
305
md->num_pages = num_pages << (PAGE_SHIFT - EFI_PAGE_SHIFT);
306
md->attribute = EFI_MEMORY_WB;
308
BUG_ON(tables->fw_tables_size <
310
sizeof(tables->efi_memmap[0]) * tables->num_mds);
311
/* with this sort, md doesn't point memmap table */
312
sort(tables->efi_memmap, tables->num_mds,
313
sizeof(efi_memory_desc_t), efi_mdt_cmp, NULL);
315
memmap_info = domain_mpa_to_imva(d, paddr_start);
316
memmap_info->efi_memdesc_size = sizeof(md[0]);
317
memmap_info->efi_memdesc_version = EFI_MEMORY_DESCRIPTOR_VERSION;
318
memmap_info->efi_memmap_size = tables->num_mds * sizeof(md[0]);
320
paddr_start + offsetof(xen_ia64_memmap_info_t, memdesc),
321
&tables->efi_memmap[0], memmap_info->efi_memmap_size);
322
d->shared_info->arch.memmap_info_num_pages = num_pages;
323
d->shared_info->arch.memmap_info_pfn = paddr_start >> PAGE_SHIFT;
326
/* setup_guest() @ libxc/xc_linux_build() arranges memory for domU.
327
* however no one arranges memory for dom0,
328
* instead we allocate pages manually.
331
assign_new_domain0_range(struct domain *d, const efi_memory_desc_t * md)
333
if (md->type == EFI_PAL_CODE ||
334
md->type == EFI_RUNTIME_SERVICES_DATA ||
335
md->type == EFI_CONVENTIONAL_MEMORY) {
336
unsigned long start = md->phys_addr & PAGE_MASK;
338
md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT);
342
/* md->num_pages = 0 is allowed. */
346
for (addr = start; addr < end; addr += PAGE_SIZE)
347
assign_new_domain0_page(d, addr);
351
/* Complete the dom0 memmap. */
353
complete_dom0_memmap(struct domain *d, struct fw_tables *tables)
356
void *efi_map_start, *efi_map_end, *p;
360
for (i = 0; i < tables->num_mds; i++)
361
assign_new_domain0_range(d, &tables->efi_memmap[i]);
363
/* Walk through all MDT entries.
364
Copy all interesting entries. */
365
efi_map_start = __va(ia64_boot_param->efi_memmap);
366
efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
367
efi_desc_size = ia64_boot_param->efi_memdesc_size;
370
/* EFI memory descriptor is using 4k page, while xen is using 16k page.
371
* To avoid identity mapping for EFI_ACPI_RECLAIM_MEMORY etc. being
372
* blocked by WB mapping, scan memory descriptor twice.
373
* First: setup identity mapping for EFI_ACPI_RECLAIM_MEMORY etc.
374
* Second: setup mapping for EFI_CONVENTIONAL_MEMORY etc.
377
/* first scan, setup identity mapping for EFI_ACPI_RECLAIM_MEMORY etc. */
378
for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
379
const efi_memory_desc_t *md = p;
380
efi_memory_desc_t *dom_md = &tables->efi_memmap[tables->num_mds];
381
u64 start = md->phys_addr;
382
u64 size = md->num_pages << EFI_PAGE_SHIFT;
383
u64 end = start + size;
388
case EFI_RUNTIME_SERVICES_CODE:
389
case EFI_RUNTIME_SERVICES_DATA:
390
case EFI_ACPI_RECLAIM_MEMORY:
391
case EFI_ACPI_MEMORY_NVS:
392
case EFI_RESERVED_TYPE:
394
* Map into dom0 - We must respect protection
395
* and cache attributes. Not all of these pages
398
flags = ASSIGN_writable; /* dummy - zero */
399
if (md->attribute & EFI_MEMORY_WP)
400
flags |= ASSIGN_readonly;
401
if ((md->attribute & EFI_MEMORY_UC) &&
402
!(md->attribute & EFI_MEMORY_WB))
403
flags |= ASSIGN_nocache;
405
assign_domain_mach_page(d, start, size, flags);
408
case EFI_MEMORY_MAPPED_IO:
409
/* Will be mapped with ioremap. */
410
/* Copy descriptor. */
412
dom_md->virt_addr = 0;
416
case EFI_MEMORY_MAPPED_IO_PORT_SPACE:
417
flags = ASSIGN_writable; /* dummy - zero */
418
if (md->attribute & EFI_MEMORY_UC)
419
flags |= ASSIGN_nocache;
421
if (start > 0x1ffffffff0000000UL) {
422
mpaddr = 0x4000000000000UL - size;
423
printk(XENLOG_INFO "Remapping IO ports from "
424
"%lx to %lx\n", start, mpaddr);
429
assign_domain_mmio_page(d, mpaddr, start, size, flags);
430
/* Copy descriptor. */
432
dom_md->phys_addr = mpaddr;
433
dom_md->virt_addr = 0;
437
case EFI_CONVENTIONAL_MEMORY:
438
case EFI_LOADER_CODE:
439
case EFI_LOADER_DATA:
440
case EFI_BOOT_SERVICES_CODE:
441
case EFI_BOOT_SERVICES_DATA:
444
case EFI_UNUSABLE_MEMORY:
447
* We don't really need these, but holes in the
448
* memory map may cause Linux to assume there are
449
* uncacheable ranges within a granule.
451
dom_md->type = EFI_UNUSABLE_MEMORY;
452
dom_md->phys_addr = start;
453
dom_md->virt_addr = 0;
454
dom_md->num_pages = (end - start) >> EFI_PAGE_SHIFT;
455
dom_md->attribute = EFI_MEMORY_WB;
460
/* Print a warning but continue. */
461
printk("complete_dom0_memmap: warning: "
462
"unhandled MDT entry type %u\n", md->type);
467
/* secend scan, setup mapping for EFI_CONVENTIONAL_MEMORY etc. */
468
for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
469
const efi_memory_desc_t *md = p;
470
efi_memory_desc_t *dom_md = &tables->efi_memmap[tables->num_mds];
471
u64 start = md->phys_addr;
472
u64 size = md->num_pages << EFI_PAGE_SHIFT;
473
u64 end = start + size;
477
case EFI_CONVENTIONAL_MEMORY:
478
case EFI_LOADER_CODE:
479
case EFI_LOADER_DATA:
480
case EFI_BOOT_SERVICES_CODE:
481
case EFI_BOOT_SERVICES_DATA: {
484
unsigned long left_mem =
485
(unsigned long)(d->max_pages - d->tot_pages) <<
488
if (!(md->attribute & EFI_MEMORY_WB))
491
dom_md_start = max(tables->fw_end_paddr, start);
492
dom_md_end = dom_md_start;
494
dom_md_end = min(dom_md_end + left_mem, end);
495
if (dom_md_end < dom_md_start + PAGE_SIZE)
498
dom_md->type = EFI_CONVENTIONAL_MEMORY;
499
dom_md->phys_addr = dom_md_start;
500
dom_md->virt_addr = 0;
502
(dom_md_end - dom_md_start) >>
504
dom_md->attribute = EFI_MEMORY_WB;
506
assign_new_domain0_range(d, dom_md);
508
* recalculate left_mem.
509
* we might already allocated memory in
510
* this region because of kernel loader.
511
* So we might consumed less than
512
* (dom_md_end - dom_md_start) above.
514
left_mem = (unsigned long)
515
(d->max_pages - d->tot_pages) <<
517
} while (left_mem > 0 && dom_md_end < end);
519
if (!(dom_md_end < dom_md_start + PAGE_SIZE))
530
BUG_ON(tables->fw_tables_size <
532
sizeof(tables->efi_memmap[0]) * tables->num_mds);
534
sort(tables->efi_memmap, tables->num_mds, sizeof(efi_memory_desc_t),
537
// Map low-memory holes & unmapped MMIO for legacy drivers
538
for (addr = 0; addr < ONE_MB; addr += PAGE_SIZE) {
539
if (domain_page_mapped(d, addr))
542
if (efi_mmio(addr, PAGE_SIZE)) {
544
flags = ASSIGN_writable | ASSIGN_nocache;
545
assign_domain_mmio_page(d, addr, addr, PAGE_SIZE,
549
setup_dom0_memmap_info(d, tables);
550
return tables->num_mds;
556
* c-set-style: "linux"
559
* indent-tabs-mode: t