2
* Extensible Firmware Interface
4
* Based on Extensible Firmware Interface Specification version 0.9 April 30, 1999
6
* Copyright (C) 1999 VA Linux Systems
7
* Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
8
* Copyright (C) 1999-2003 Hewlett-Packard Co.
9
* David Mosberger-Tang <davidm@hpl.hp.com>
10
* Stephane Eranian <eranian@hpl.hp.com>
11
* (c) Copyright 2006 Hewlett-Packard Development Company, L.P.
12
* Bjorn Helgaas <bjorn.helgaas@hp.com>
14
* All EFI Runtime Services are not implemented yet as EFI only
15
* supports physical mode addressing on SoftSDV. This is to be fixed
16
* in a future version. --drummond 1999-07-20
18
* Implemented EFI runtime services and virtual mode calls. --davidm
20
* Goutham Rao: <goutham.rao@intel.com>
21
* Skip non-WB memory and ignore empty memory ranges.
23
#include <linux/module.h>
24
#include <linux/bootmem.h>
25
#include <linux/kernel.h>
26
#include <linux/init.h>
27
#include <linux/types.h>
28
#include <linux/time.h>
29
#include <linux/efi.h>
30
#include <linux/kexec.h>
33
#include <asm/kregs.h>
34
#include <asm/meminit.h>
35
#include <asm/pgtable.h>
36
#include <asm/processor.h>
41
extern efi_status_t efi_call_phys (void *, ...);
43
/* this should be defined in linux/kernel.h */
44
extern unsigned long long memparse (char *ptr, char **retptr);
45
/* this should be defined in linux/efi.h */
46
//#define EFI_INVALID_TABLE_ADDR (void *)(~0UL)
51
static efi_runtime_services_t *runtime;
52
#if defined(XEN) && !defined(CONFIG_VIRTUAL_FRAME_TABLE)
53
// this is a temporary hack to avoid CONFIG_VIRTUAL_MEM_MAP
54
static unsigned long mem_limit = ~0UL, max_addr = 0x100000000UL, min_addr = 0UL;
56
static unsigned long mem_limit = ~0UL, max_addr = ~0UL, min_addr = 0UL;
59
#define efi_call_virt(f, args...) (*(f))(args)
61
#define STUB_GET_TIME(prefix, adjust_arg) \
63
prefix##_get_time (efi_time_t *tm, efi_time_cap_t *tc) \
65
struct ia64_fpreg fr[6]; \
66
efi_time_cap_t *atc = NULL; \
68
XEN_EFI_RR_DECLARE(rr6, rr7); \
71
atc = adjust_arg(tc); \
72
ia64_save_scratch_fpregs(fr); \
73
XEN_EFI_RR_ENTER(rr6, rr7); \
74
ret = efi_call_##prefix((efi_get_time_t *) __va(runtime->get_time), adjust_arg(tm), atc); \
75
XEN_EFI_RR_LEAVE(rr6, rr7); \
76
ia64_load_scratch_fpregs(fr); \
80
#define STUB_SET_TIME(prefix, adjust_arg) \
82
prefix##_set_time (efi_time_t *tm) \
84
struct ia64_fpreg fr[6]; \
86
XEN_EFI_RR_DECLARE(rr6, rr7); \
88
ia64_save_scratch_fpregs(fr); \
89
XEN_EFI_RR_ENTER(rr6, rr7); \
90
ret = efi_call_##prefix((efi_set_time_t *) __va(runtime->set_time), adjust_arg(tm)); \
91
XEN_EFI_RR_LEAVE(rr6, rr7); \
92
ia64_load_scratch_fpregs(fr); \
96
#define STUB_GET_WAKEUP_TIME(prefix, adjust_arg) \
98
prefix##_get_wakeup_time (efi_bool_t *enabled, efi_bool_t *pending, efi_time_t *tm) \
100
struct ia64_fpreg fr[6]; \
102
XEN_EFI_RR_DECLARE(rr6, rr7); \
104
ia64_save_scratch_fpregs(fr); \
105
XEN_EFI_RR_ENTER(rr6, rr7); \
106
ret = efi_call_##prefix((efi_get_wakeup_time_t *) __va(runtime->get_wakeup_time), \
107
adjust_arg(enabled), adjust_arg(pending), adjust_arg(tm)); \
108
XEN_EFI_RR_LEAVE(rr6, rr7); \
109
ia64_load_scratch_fpregs(fr); \
113
#define STUB_SET_WAKEUP_TIME(prefix, adjust_arg) \
114
static efi_status_t \
115
prefix##_set_wakeup_time (efi_bool_t enabled, efi_time_t *tm) \
117
struct ia64_fpreg fr[6]; \
118
efi_time_t *atm = NULL; \
120
XEN_EFI_RR_DECLARE(rr6, rr7); \
123
atm = adjust_arg(tm); \
124
ia64_save_scratch_fpregs(fr); \
125
XEN_EFI_RR_ENTER(rr6, rr7); \
126
ret = efi_call_##prefix((efi_set_wakeup_time_t *) __va(runtime->set_wakeup_time), \
128
XEN_EFI_RR_LEAVE(rr6, rr7); \
129
ia64_load_scratch_fpregs(fr); \
133
#define STUB_GET_VARIABLE(prefix, adjust_arg) \
134
static efi_status_t \
135
prefix##_get_variable (efi_char16_t *name, efi_guid_t *vendor, u32 *attr, \
136
unsigned long *data_size, void *data) \
138
struct ia64_fpreg fr[6]; \
141
XEN_EFI_RR_DECLARE(rr6, rr7); \
144
aattr = adjust_arg(attr); \
145
ia64_save_scratch_fpregs(fr); \
146
XEN_EFI_RR_ENTER(rr6, rr7); \
147
ret = efi_call_##prefix((efi_get_variable_t *) __va(runtime->get_variable), \
148
adjust_arg(name), adjust_arg(vendor), aattr, \
149
adjust_arg(data_size), adjust_arg(data)); \
150
XEN_EFI_RR_LEAVE(rr6, rr7); \
151
ia64_load_scratch_fpregs(fr); \
155
#define STUB_GET_NEXT_VARIABLE(prefix, adjust_arg) \
156
static efi_status_t \
157
prefix##_get_next_variable (unsigned long *name_size, efi_char16_t *name, efi_guid_t *vendor) \
159
struct ia64_fpreg fr[6]; \
161
XEN_EFI_RR_DECLARE(rr6, rr7); \
163
ia64_save_scratch_fpregs(fr); \
164
XEN_EFI_RR_ENTER(rr6, rr7); \
165
ret = efi_call_##prefix((efi_get_next_variable_t *) __va(runtime->get_next_variable), \
166
adjust_arg(name_size), adjust_arg(name), adjust_arg(vendor)); \
167
XEN_EFI_RR_LEAVE(rr6, rr7); \
168
ia64_load_scratch_fpregs(fr); \
172
#define STUB_SET_VARIABLE(prefix, adjust_arg) \
173
static efi_status_t \
174
prefix##_set_variable (efi_char16_t *name, efi_guid_t *vendor, unsigned long attr, \
175
unsigned long data_size, void *data) \
177
struct ia64_fpreg fr[6]; \
179
XEN_EFI_RR_DECLARE(rr6, rr7); \
181
ia64_save_scratch_fpregs(fr); \
182
XEN_EFI_RR_ENTER(rr6, rr7); \
183
ret = efi_call_##prefix((efi_set_variable_t *) __va(runtime->set_variable), \
184
adjust_arg(name), adjust_arg(vendor), attr, data_size, \
186
XEN_EFI_RR_LEAVE(rr6, rr7); \
187
ia64_load_scratch_fpregs(fr); \
191
#define STUB_GET_NEXT_HIGH_MONO_COUNT(prefix, adjust_arg) \
192
static efi_status_t \
193
prefix##_get_next_high_mono_count (u32 *count) \
195
struct ia64_fpreg fr[6]; \
197
XEN_EFI_RR_DECLARE(rr6, rr7); \
199
ia64_save_scratch_fpregs(fr); \
200
XEN_EFI_RR_ENTER(rr6, rr7); \
201
ret = efi_call_##prefix((efi_get_next_high_mono_count_t *) \
202
__va(runtime->get_next_high_mono_count), adjust_arg(count)); \
203
XEN_EFI_RR_LEAVE(rr6, rr7); \
204
ia64_load_scratch_fpregs(fr); \
208
#define STUB_RESET_SYSTEM(prefix, adjust_arg) \
210
prefix##_reset_system (int reset_type, efi_status_t status, \
211
unsigned long data_size, efi_char16_t *data) \
213
struct ia64_fpreg fr[6]; \
214
efi_char16_t *adata = NULL; \
215
XEN_EFI_RR_DECLARE(rr6, rr7); \
218
adata = adjust_arg(data); \
220
ia64_save_scratch_fpregs(fr); \
221
XEN_EFI_RR_ENTER(rr6, rr7); \
222
efi_call_##prefix((efi_reset_system_t *) __va(runtime->reset_system), \
223
reset_type, status, data_size, adata); \
224
/* should not return, but just in case... */ \
225
XEN_EFI_RR_LEAVE(rr6, rr7); \
226
ia64_load_scratch_fpregs(fr); \
229
#define phys_ptr(arg) ((__typeof__(arg)) ia64_tpa(arg))
231
STUB_GET_TIME(phys, phys_ptr)
232
STUB_SET_TIME(phys, phys_ptr)
233
STUB_GET_WAKEUP_TIME(phys, phys_ptr)
234
STUB_SET_WAKEUP_TIME(phys, phys_ptr)
235
STUB_GET_VARIABLE(phys, phys_ptr)
236
STUB_GET_NEXT_VARIABLE(phys, phys_ptr)
237
STUB_SET_VARIABLE(phys, phys_ptr)
238
STUB_GET_NEXT_HIGH_MONO_COUNT(phys, phys_ptr)
239
STUB_RESET_SYSTEM(phys, phys_ptr)
243
STUB_GET_TIME(virt, id)
244
STUB_SET_TIME(virt, id)
245
STUB_GET_WAKEUP_TIME(virt, id)
246
STUB_SET_WAKEUP_TIME(virt, id)
247
STUB_GET_VARIABLE(virt, id)
248
STUB_GET_NEXT_VARIABLE(virt, id)
249
STUB_SET_VARIABLE(virt, id)
250
STUB_GET_NEXT_HIGH_MONO_COUNT(virt, id)
251
STUB_RESET_SYSTEM(virt, id)
255
efi_gettimeofday (struct timespec *ts)
259
memset(ts, 0, sizeof(ts));
260
if ((*efi.get_time)(&tm, NULL) != EFI_SUCCESS)
263
ts->tv_sec = mktime(tm.year, tm.month, tm.day, tm.hour, tm.minute, tm.second);
264
ts->tv_nsec = tm.nanosecond;
269
is_memory_available (efi_memory_desc_t *md)
271
if (!(md->attribute & EFI_MEMORY_WB))
275
case EFI_LOADER_CODE:
276
case EFI_LOADER_DATA:
277
case EFI_BOOT_SERVICES_CODE:
278
case EFI_BOOT_SERVICES_DATA:
279
case EFI_CONVENTIONAL_MEMORY:
285
typedef struct kern_memdesc {
291
static kern_memdesc_t *kern_memmap;
293
#define efi_md_size(md) (md->num_pages << EFI_PAGE_SHIFT)
296
kmd_end(kern_memdesc_t *kmd)
298
return (kmd->start + (kmd->num_pages << EFI_PAGE_SHIFT));
302
efi_md_end(efi_memory_desc_t *md)
304
return (md->phys_addr + efi_md_size(md));
308
efi_wb(efi_memory_desc_t *md)
310
return (md->attribute & EFI_MEMORY_WB);
314
efi_uc(efi_memory_desc_t *md)
316
return (md->attribute & EFI_MEMORY_UC);
320
walk (efi_freemem_callback_t callback, void *arg, u64 attr)
323
u64 start, end, voff;
325
voff = (attr == EFI_MEMORY_WB) ? PAGE_OFFSET : __IA64_UNCACHED_OFFSET;
326
for (k = kern_memmap; k->start != ~0UL; k++) {
327
if (k->attribute != attr)
329
start = PAGE_ALIGN(k->start);
330
end = (k->start + (k->num_pages << EFI_PAGE_SHIFT)) & PAGE_MASK;
332
if ((*callback)(start + voff, end + voff, arg) < 0)
338
* Walks the EFI memory map and calls CALLBACK once for each EFI memory descriptor that
339
* has memory that is available for OS use.
342
efi_memmap_walk (efi_freemem_callback_t callback, void *arg)
344
walk(callback, arg, EFI_MEMORY_WB);
348
* Walks the EFI memory map and calls CALLBACK once for each EFI memory descriptor that
349
* has memory that is available for uncached allocator.
352
efi_memmap_walk_uc (efi_freemem_callback_t callback, void *arg)
354
walk(callback, arg, EFI_MEMORY_UC);
358
* Look for the PAL_CODE region reported by EFI and maps it using an
359
* ITR to enable safe PAL calls in virtual mode. See IA-64 Processor
360
* Abstraction Layer chapter 11 in ADAG
365
__efi_get_pal_addr (void)
368
efi_get_pal_addr (void)
371
void *efi_map_start, *efi_map_end, *p;
372
efi_memory_desc_t *md;
374
int pal_code_count = 0;
377
efi_map_start = __va(ia64_boot_param->efi_memmap);
378
efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
379
efi_desc_size = ia64_boot_param->efi_memdesc_size;
381
for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
383
if (md->type != EFI_PAL_CODE)
386
if (++pal_code_count > 1) {
387
printk(KERN_ERR "Too many EFI Pal Code memory ranges, dropped @ %lx\n",
392
* The only ITLB entry in region 7 that is used is the one installed by
393
* __start(). That entry covers a 64MB range.
395
mask = ~((1 << KERNEL_TR_PAGE_SHIFT) - 1);
396
vaddr = PAGE_OFFSET + md->phys_addr;
399
* We must check that the PAL mapping won't overlap with the kernel
402
* PAL code is guaranteed to be aligned on a power of 2 between 4k and
403
* 256KB and that only one ITR is needed to map it. This implies that the
404
* PAL code is always aligned on its size, i.e., the closest matching page
405
* size supported by the TLB. Therefore PAL code is guaranteed never to
406
* cross a 64MB unless it is bigger than 64MB (very unlikely!). So for
407
* now the following test is enough to determine whether or not we need a
408
* dedicated ITR for the PAL code.
410
if ((vaddr & mask) == (KERNEL_START & mask)) {
411
printk(KERN_INFO "%s: no need to install ITR for PAL code\n",
416
if (md->num_pages << EFI_PAGE_SHIFT > IA64_GRANULE_SIZE)
417
panic("Woah! PAL code size bigger than a granule!");
420
mask = ~((1 << IA64_GRANULE_SHIFT) - 1);
422
printk(KERN_INFO "CPU %d: mapping PAL code [0x%lx-0x%lx) into [0x%lx-0x%lx)\n",
423
smp_processor_id(), md->phys_addr,
424
md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT),
425
vaddr & mask, (vaddr & mask) + IA64_GRANULE_SIZE);
427
return __va_efi(md->phys_addr);
429
printk(KERN_WARNING "%s: no PAL-code memory-descriptor found\n",
435
static void *pal_vaddr = 0;
438
efi_get_pal_addr(void)
441
pal_vaddr = __efi_get_pal_addr();
448
__efi_unmap_pal_code (void *pal_vaddr)
450
ia64_ptr(0x1, GRANULEROUNDDOWN((unsigned long)pal_vaddr),
455
efi_unmap_pal_code (void)
457
void *pal_vaddr = efi_get_pal_addr ();
464
* Cannot write to CRx with PSR.ic=1
466
psr = ia64_clear_ic();
467
__efi_unmap_pal_code(pal_vaddr);
468
ia64_set_psr(psr); /* restore psr */
474
efi_map_pal_code (void)
476
void *pal_vaddr = efi_get_pal_addr ();
483
* Cannot write to CRx with PSR.ic=1
485
psr = ia64_clear_ic();
487
/* pal_vaddr must be unpinned before pinning
488
* This is needed in the case of a nested EFI, PAL or SAL call */
489
__efi_unmap_pal_code(pal_vaddr);
491
ia64_itr(0x1, IA64_TR_PALCODE, GRANULEROUNDDOWN((unsigned long) pal_vaddr),
492
pte_val(pfn_pte(__pa(pal_vaddr) >> PAGE_SHIFT, PAGE_KERNEL)),
494
ia64_set_psr(psr); /* restore psr */
501
void *efi_map_start, *efi_map_end;
502
efi_config_table_t *config_tables;
505
char *cp, vendor[100] = "unknown";
508
/* it's too early to be able to use the standard kernel command line support... */
510
extern char saved_command_line[];
511
for (cp = saved_command_line; *cp; ) {
513
for (cp = boot_command_line; *cp; ) {
515
if (memcmp(cp, "mem=", 4) == 0) {
516
mem_limit = memparse(cp + 4, &cp);
517
} else if (memcmp(cp, "max_addr=", 9) == 0) {
518
max_addr = GRANULEROUNDDOWN(memparse(cp + 9, &cp));
519
} else if (memcmp(cp, "min_addr=", 9) == 0) {
520
min_addr = GRANULEROUNDDOWN(memparse(cp + 9, &cp));
522
while (*cp != ' ' && *cp)
529
printk(KERN_INFO "Ignoring memory below %luMB\n", min_addr >> 20);
530
if (max_addr != ~0UL)
531
printk(KERN_INFO "Ignoring memory above %luMB\n", max_addr >> 20);
533
efi.systab = __va(ia64_boot_param->efi_systab);
536
* Verify the EFI Table
538
if (efi.systab == NULL)
539
panic("Woah! Can't find EFI system table.\n");
540
if (efi.systab->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE)
541
panic("Woah! EFI system table signature incorrect\n");
542
if ((efi.systab->hdr.revision >> 16) == 0)
543
printk(KERN_WARNING "Warning: EFI system table version "
544
"%d.%02d, expected 1.00 or greater\n",
545
efi.systab->hdr.revision >> 16,
546
efi.systab->hdr.revision & 0xffff);
548
config_tables = __va(efi.systab->tables);
550
/* Show what we know for posterity */
551
c16 = __va(efi.systab->fw_vendor);
553
for (i = 0;i < (int) sizeof(vendor) - 1 && *c16; ++i)
558
printk(KERN_INFO "EFI v%u.%.02u by %s:",
559
efi.systab->hdr.revision >> 16, efi.systab->hdr.revision & 0xffff, vendor);
561
efi.mps = EFI_INVALID_TABLE_ADDR;
562
efi.acpi = EFI_INVALID_TABLE_ADDR;
563
efi.acpi20 = EFI_INVALID_TABLE_ADDR;
564
efi.smbios = EFI_INVALID_TABLE_ADDR;
565
efi.sal_systab = EFI_INVALID_TABLE_ADDR;
566
efi.boot_info = EFI_INVALID_TABLE_ADDR;
567
efi.hcdp = EFI_INVALID_TABLE_ADDR;
568
efi.uga = EFI_INVALID_TABLE_ADDR;
570
for (i = 0; i < (int) efi.systab->nr_tables; i++) {
571
if (efi_guidcmp(config_tables[i].guid, MPS_TABLE_GUID) == 0) {
572
efi.mps = config_tables[i].table;
573
printk(" MPS=0x%lx", config_tables[i].table);
574
} else if (efi_guidcmp(config_tables[i].guid, ACPI_20_TABLE_GUID) == 0) {
575
efi.acpi20 = config_tables[i].table;
576
printk(" ACPI 2.0=0x%lx", config_tables[i].table);
577
} else if (efi_guidcmp(config_tables[i].guid, ACPI_TABLE_GUID) == 0) {
578
efi.acpi = config_tables[i].table;
579
printk(" ACPI=0x%lx", config_tables[i].table);
580
} else if (efi_guidcmp(config_tables[i].guid, SMBIOS_TABLE_GUID) == 0) {
581
efi.smbios = config_tables[i].table;
582
printk(" SMBIOS=0x%lx", config_tables[i].table);
583
} else if (efi_guidcmp(config_tables[i].guid, SAL_SYSTEM_TABLE_GUID) == 0) {
584
efi.sal_systab = config_tables[i].table;
585
printk(" SALsystab=0x%lx", config_tables[i].table);
586
} else if (efi_guidcmp(config_tables[i].guid, HCDP_TABLE_GUID) == 0) {
587
efi.hcdp = config_tables[i].table;
588
printk(" HCDP=0x%lx", config_tables[i].table);
593
runtime = __va(efi.systab->runtime);
594
efi.get_time = phys_get_time;
595
efi.set_time = phys_set_time;
596
efi.get_wakeup_time = phys_get_wakeup_time;
597
efi.set_wakeup_time = phys_set_wakeup_time;
598
efi.get_variable = phys_get_variable;
599
efi.get_next_variable = phys_get_next_variable;
600
efi.set_variable = phys_set_variable;
601
efi.get_next_high_mono_count = phys_get_next_high_mono_count;
602
efi.reset_system = phys_reset_system;
604
efi_map_start = __va(ia64_boot_param->efi_memmap);
605
efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
606
efi_desc_size = ia64_boot_param->efi_memdesc_size;
609
/* print EFI memory map: */
611
efi_memory_desc_t *md;
614
for (i = 0, p = efi_map_start; p < efi_map_end; ++i, p += efi_desc_size) {
616
printk("mem%02u: type=%u, attr=0x%lx, range=[0x%016lx-0x%016lx) (%luMB)\n",
617
i, md->type, md->attribute, md->phys_addr,
618
md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT),
619
md->num_pages >> (20 - EFI_PAGE_SHIFT));
627
efi_enter_virtual_mode();
631
efi_enter_virtual_mode (void)
633
void *efi_map_start, *efi_map_end, *p;
634
efi_memory_desc_t *md;
638
efi_map_start = __va(ia64_boot_param->efi_memmap);
639
efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
640
efi_desc_size = ia64_boot_param->efi_memdesc_size;
642
for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
645
if (md->attribute & EFI_MEMORY_RUNTIME) {
646
if (md->attribute & EFI_MEMORY_WB)
647
md->virt_addr = __IA64_EFI_CACHED_OFFSET|
649
else if (md->attribute & (EFI_MEMORY_UC|EFI_MEMORY_WC|
651
md->virt_addr = __IA64_EFI_UNCACHED_OFFSET|
655
if (md->attribute & EFI_MEMORY_RUNTIME) {
657
* Some descriptors have multiple bits set, so the order of
658
* the tests is relevant.
660
if (md->attribute & EFI_MEMORY_WB) {
661
md->virt_addr = (u64) __va(md->phys_addr);
662
} else if (md->attribute & EFI_MEMORY_UC) {
663
md->virt_addr = (u64) ioremap(md->phys_addr, 0);
664
} else if (md->attribute & EFI_MEMORY_WC) {
666
md->virt_addr = ia64_remap(md->phys_addr, (_PAGE_A | _PAGE_P
672
printk(KERN_INFO "EFI_MEMORY_WC mapping\n");
673
md->virt_addr = (u64) ioremap(md->phys_addr, 0);
675
} else if (md->attribute & EFI_MEMORY_WT) {
677
md->virt_addr = ia64_remap(md->phys_addr, (_PAGE_A | _PAGE_P
678
| _PAGE_D | _PAGE_MA_WT
682
printk(KERN_INFO "EFI_MEMORY_WT mapping\n");
683
md->virt_addr = (u64) ioremap(md->phys_addr, 0);
690
status = efi_call_phys(__va(runtime->set_virtual_address_map),
691
ia64_boot_param->efi_memmap_size,
692
efi_desc_size, ia64_boot_param->efi_memdesc_version,
693
ia64_boot_param->efi_memmap);
694
if (status != EFI_SUCCESS) {
695
printk(KERN_WARNING "warning: unable to switch EFI into virtual mode "
696
"(status=%lu)\n", status);
701
* Now that EFI is in virtual mode, we call the EFI functions more efficiently:
703
efi.get_time = virt_get_time;
704
efi.set_time = virt_set_time;
705
efi.get_wakeup_time = virt_get_wakeup_time;
706
efi.set_wakeup_time = virt_set_wakeup_time;
707
efi.get_variable = virt_get_variable;
708
efi.get_next_variable = virt_get_next_variable;
709
efi.set_variable = virt_set_variable;
710
efi.get_next_high_mono_count = virt_get_next_high_mono_count;
711
efi.reset_system = virt_reset_system;
715
* Walk the EFI memory map looking for the I/O port range. There can only be one entry of
716
* this type, other I/O port ranges should be described via ACPI.
719
efi_get_iobase (void)
721
void *efi_map_start, *efi_map_end, *p;
722
efi_memory_desc_t *md;
725
efi_map_start = __va(ia64_boot_param->efi_memmap);
726
efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
727
efi_desc_size = ia64_boot_param->efi_memdesc_size;
729
for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
731
if (md->type == EFI_MEMORY_MAPPED_IO_PORT_SPACE) {
732
if (md->attribute & EFI_MEMORY_UC)
733
return md->phys_addr;
739
static struct kern_memdesc *
740
kern_memory_descriptor (unsigned long phys_addr)
742
struct kern_memdesc *md;
744
for (md = kern_memmap; md->start != ~0UL; md++) {
745
if (phys_addr - md->start < (md->num_pages << EFI_PAGE_SHIFT))
751
static efi_memory_desc_t *
752
efi_memory_descriptor (unsigned long phys_addr)
754
void *efi_map_start, *efi_map_end, *p;
755
efi_memory_desc_t *md;
758
efi_map_start = __va(ia64_boot_param->efi_memmap);
759
efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
760
efi_desc_size = ia64_boot_param->efi_memdesc_size;
762
for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
765
if (phys_addr - md->phys_addr < (md->num_pages << EFI_PAGE_SHIFT))
772
efi_mem_type (unsigned long phys_addr)
774
efi_memory_desc_t *md = efi_memory_descriptor(phys_addr);
782
efi_mem_attributes (unsigned long phys_addr)
784
efi_memory_desc_t *md = efi_memory_descriptor(phys_addr);
787
return md->attribute;
790
EXPORT_SYMBOL(efi_mem_attributes);
793
efi_mem_attribute (unsigned long phys_addr, unsigned long size)
795
unsigned long end = phys_addr + size;
796
efi_memory_desc_t *md = efi_memory_descriptor(phys_addr);
803
* EFI_MEMORY_RUNTIME is not a memory attribute; it just tells
804
* the kernel that firmware needs this region mapped.
806
attr = md->attribute & ~EFI_MEMORY_RUNTIME;
808
unsigned long md_end = efi_md_end(md);
813
md = efi_memory_descriptor(md_end);
814
if (!md || (md->attribute & ~EFI_MEMORY_RUNTIME) != attr)
821
kern_mem_attribute (unsigned long phys_addr, unsigned long size)
823
unsigned long end = phys_addr + size;
824
struct kern_memdesc *md;
828
* This is a hack for ioremap calls before we set up kern_memmap.
829
* Maybe we should do efi_memmap_init() earlier instead.
832
attr = efi_mem_attribute(phys_addr, size);
833
if (attr & EFI_MEMORY_WB)
834
return EFI_MEMORY_WB;
838
md = kern_memory_descriptor(phys_addr);
842
attr = md->attribute;
844
unsigned long md_end = kmd_end(md);
849
md = kern_memory_descriptor(md_end);
850
if (!md || md->attribute != attr)
855
EXPORT_SYMBOL(kern_mem_attribute);
859
valid_phys_addr_range (unsigned long phys_addr, unsigned long size)
864
* /dev/mem reads and writes use copy_to_user(), which implicitly
865
* uses a granule-sized kernel identity mapping. It's really
866
* only safe to do this for regions in kern_memmap. For more
867
* details, see Documentation/ia64/aliasing.txt.
869
attr = kern_mem_attribute(phys_addr, size);
870
if (attr & EFI_MEMORY_WB || attr & EFI_MEMORY_UC)
876
valid_mmap_phys_addr_range (unsigned long pfn, unsigned long size)
879
* MMIO regions are often missing from the EFI memory map.
880
* We must allow mmap of them for programs like X, so we
881
* currently can't do any useful validation.
887
phys_mem_access_prot(struct file *file, unsigned long pfn, unsigned long size,
890
unsigned long phys_addr = pfn << PAGE_SHIFT;
894
* For /dev/mem mmap, we use user mappings, but if the region is
895
* in kern_memmap (and hence may be covered by a kernel mapping),
896
* we must use the same attribute as the kernel mapping.
898
attr = kern_mem_attribute(phys_addr, size);
899
if (attr & EFI_MEMORY_WB)
900
return pgprot_cacheable(vma_prot);
901
else if (attr & EFI_MEMORY_UC)
902
return pgprot_noncached(vma_prot);
905
* Some chipsets don't support UC access to memory. If
906
* WB is supported, we prefer that.
908
if (efi_mem_attribute(phys_addr, size) & EFI_MEMORY_WB)
909
return pgprot_cacheable(vma_prot);
911
return pgprot_noncached(vma_prot);
916
efi_uart_console_only(void)
919
char *s, name[] = "ConOut";
920
efi_guid_t guid = EFI_GLOBAL_VARIABLE_GUID;
921
efi_char16_t *utf16, name_utf16[32];
922
unsigned char data[1024];
923
unsigned long size = sizeof(data);
924
struct efi_generic_dev_path *hdr, *end_addr;
927
/* Convert to UTF-16 */
931
*utf16++ = *s++ & 0x7f;
934
status = efi.get_variable(name_utf16, &guid, NULL, &size, data);
935
if (status != EFI_SUCCESS) {
936
printk(KERN_ERR "No EFI %s variable?\n", name);
940
hdr = (struct efi_generic_dev_path *) data;
941
end_addr = (struct efi_generic_dev_path *) ((u8 *) data + size);
942
while (hdr < end_addr) {
943
if (hdr->type == EFI_DEV_MSG &&
944
hdr->sub_type == EFI_DEV_MSG_UART)
946
else if (hdr->type == EFI_DEV_END_PATH ||
947
hdr->type == EFI_DEV_END_PATH2) {
950
if (hdr->sub_type == EFI_DEV_END_ENTIRE)
954
hdr = (struct efi_generic_dev_path *) ((u8 *) hdr + hdr->length);
956
printk(KERN_ERR "Malformed %s value\n", name);
961
* Look for the first granule aligned memory descriptor memory
962
* that is big enough to hold EFI memory map. Make sure this
963
* descriptor is atleast granule sized so it does not get trimmed
965
struct kern_memdesc *
966
find_memmap_space (void)
968
u64 contig_low=0, contig_high=0;
970
void *efi_map_start, *efi_map_end, *p, *q;
971
efi_memory_desc_t *md, *pmd = NULL, *check_md;
972
u64 space_needed, efi_desc_size;
973
unsigned long total_mem = 0;
975
efi_map_start = __va(ia64_boot_param->efi_memmap);
976
efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
977
efi_desc_size = ia64_boot_param->efi_memdesc_size;
980
* Worst case: we need 3 kernel descriptors for each efi descriptor
981
* (if every entry has a WB part in the middle, and UC head and tail),
982
* plus one for the end marker.
984
space_needed = sizeof(kern_memdesc_t) *
985
(3 * (ia64_boot_param->efi_memmap_size/efi_desc_size) + 1);
987
for (p = efi_map_start; p < efi_map_end; pmd = md, p += efi_desc_size) {
992
if (pmd == NULL || !efi_wb(pmd) || efi_md_end(pmd) != md->phys_addr) {
993
contig_low = GRANULEROUNDUP(md->phys_addr);
994
contig_high = efi_md_end(md);
995
for (q = p + efi_desc_size; q < efi_map_end; q += efi_desc_size) {
997
if (!efi_wb(check_md))
999
if (contig_high != check_md->phys_addr)
1001
contig_high = efi_md_end(check_md);
1003
contig_high = GRANULEROUNDDOWN(contig_high);
1005
if (!is_memory_available(md) || md->type == EFI_LOADER_DATA)
1008
/* Round ends inward to granule boundaries */
1009
as = max(contig_low, md->phys_addr);
1010
ae = min(contig_high, efi_md_end(md));
1012
/* keep within max_addr= and min_addr= command line arg */
1013
as = max(as, min_addr);
1014
ae = min(ae, max_addr);
1018
/* avoid going over mem= command line arg */
1019
if (total_mem + (ae - as) > mem_limit)
1020
ae -= total_mem + (ae - as) - mem_limit;
1025
if (ae - as > space_needed)
1028
if (p >= efi_map_end)
1029
panic("Can't allocate space for kernel memory descriptors");
1035
* Walk the EFI memory map and gather all memory available for kernel
1036
* to use. We can allocate partial granules only if the unavailable
1037
* parts exist, and are WB.
1040
efi_memmap_init(unsigned long *s, unsigned long *e)
1042
struct kern_memdesc *k, *prev = NULL;
1043
u64 contig_low=0, contig_high=0;
1045
void *efi_map_start, *efi_map_end, *p, *q;
1046
efi_memory_desc_t *md, *pmd = NULL, *check_md;
1048
unsigned long total_mem = 0;
1050
k = kern_memmap = find_memmap_space();
1052
efi_map_start = __va(ia64_boot_param->efi_memmap);
1053
efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
1054
efi_desc_size = ia64_boot_param->efi_memdesc_size;
1056
for (p = efi_map_start; p < efi_map_end; pmd = md, p += efi_desc_size) {
1059
if (efi_uc(md) && (md->type == EFI_CONVENTIONAL_MEMORY ||
1060
md->type == EFI_BOOT_SERVICES_DATA)) {
1061
k->attribute = EFI_MEMORY_UC;
1062
k->start = md->phys_addr;
1063
k->num_pages = md->num_pages;
1069
/* this works around a problem in the ski bootloader */
1070
if (running_on_sim && md->type != EFI_CONVENTIONAL_MEMORY)
1073
if (pmd == NULL || !efi_wb(pmd) || efi_md_end(pmd) != md->phys_addr) {
1074
contig_low = GRANULEROUNDUP(md->phys_addr);
1075
contig_high = efi_md_end(md);
1076
for (q = p + efi_desc_size; q < efi_map_end; q += efi_desc_size) {
1078
if (!efi_wb(check_md))
1080
if (contig_high != check_md->phys_addr)
1082
contig_high = efi_md_end(check_md);
1084
contig_high = GRANULEROUNDDOWN(contig_high);
1086
if (!is_memory_available(md))
1089
#ifdef CONFIG_CRASH_DUMP
1090
/* saved_max_pfn should ignore max_addr= command line arg */
1091
if (saved_max_pfn < (efi_md_end(md) >> PAGE_SHIFT))
1092
saved_max_pfn = (efi_md_end(md) >> PAGE_SHIFT);
1095
* Round ends inward to granule boundaries
1096
* Give trimmings to uncached allocator
1098
if (md->phys_addr < contig_low) {
1099
lim = min(efi_md_end(md), contig_low);
1101
if (k > kern_memmap && (k-1)->attribute == EFI_MEMORY_UC &&
1102
kmd_end(k-1) == md->phys_addr) {
1103
(k-1)->num_pages += (lim - md->phys_addr) >> EFI_PAGE_SHIFT;
1105
k->attribute = EFI_MEMORY_UC;
1106
k->start = md->phys_addr;
1107
k->num_pages = (lim - md->phys_addr) >> EFI_PAGE_SHIFT;
1115
if (efi_md_end(md) > contig_high) {
1116
lim = max(md->phys_addr, contig_high);
1118
if (lim == md->phys_addr && k > kern_memmap &&
1119
(k-1)->attribute == EFI_MEMORY_UC &&
1120
kmd_end(k-1) == md->phys_addr) {
1121
(k-1)->num_pages += md->num_pages;
1123
k->attribute = EFI_MEMORY_UC;
1125
k->num_pages = (efi_md_end(md) - lim) >> EFI_PAGE_SHIFT;
1131
ae = efi_md_end(md);
1133
/* keep within max_addr= and min_addr= command line arg */
1134
as = max(as, min_addr);
1135
ae = min(ae, max_addr);
1139
/* avoid going over mem= command line arg */
1140
if (total_mem + (ae - as) > mem_limit)
1141
ae -= total_mem + (ae - as) - mem_limit;
1145
if (prev && kmd_end(prev) == md->phys_addr) {
1146
prev->num_pages += (ae - as) >> EFI_PAGE_SHIFT;
1147
total_mem += ae - as;
1150
k->attribute = EFI_MEMORY_WB;
1152
k->num_pages = (ae - as) >> EFI_PAGE_SHIFT;
1153
total_mem += ae - as;
1156
k->start = ~0L; /* end-marker */
1158
/* reserve the memory we are using for kern_memmap */
1159
*s = (u64)kern_memmap;
1165
efi_initialize_iomem_resources(struct resource *code_resource,
1166
struct resource *data_resource)
1168
struct resource *res;
1169
void *efi_map_start, *efi_map_end, *p;
1170
efi_memory_desc_t *md;
1173
unsigned long flags;
1175
efi_map_start = __va(ia64_boot_param->efi_memmap);
1176
efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
1177
efi_desc_size = ia64_boot_param->efi_memdesc_size;
1181
for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
1184
if (md->num_pages == 0) /* should not happen */
1187
flags = IORESOURCE_MEM;
1190
case EFI_MEMORY_MAPPED_IO:
1191
case EFI_MEMORY_MAPPED_IO_PORT_SPACE:
1194
case EFI_LOADER_CODE:
1195
case EFI_LOADER_DATA:
1196
case EFI_BOOT_SERVICES_DATA:
1197
case EFI_BOOT_SERVICES_CODE:
1198
case EFI_CONVENTIONAL_MEMORY:
1199
if (md->attribute & EFI_MEMORY_WP) {
1200
name = "System ROM";
1201
flags |= IORESOURCE_READONLY;
1203
name = "System RAM";
1207
case EFI_ACPI_MEMORY_NVS:
1208
name = "ACPI Non-volatile Storage";
1209
flags |= IORESOURCE_BUSY;
1212
case EFI_UNUSABLE_MEMORY:
1214
flags |= IORESOURCE_BUSY | IORESOURCE_DISABLED;
1217
case EFI_RESERVED_TYPE:
1218
case EFI_RUNTIME_SERVICES_CODE:
1219
case EFI_RUNTIME_SERVICES_DATA:
1220
case EFI_ACPI_RECLAIM_MEMORY:
1223
flags |= IORESOURCE_BUSY;
1227
if ((res = kzalloc(sizeof(struct resource), GFP_KERNEL)) == NULL) {
1228
printk(KERN_ERR "failed to alocate resource for iomem\n");
1233
res->start = md->phys_addr;
1234
res->end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1;
1237
if (insert_resource(&iomem_resource, res) < 0)
1241
* We don't know which region contains
1242
* kernel data so we try it repeatedly and
1243
* let the resource manager test it.
1245
insert_resource(res, code_resource);
1246
insert_resource(res, data_resource);
1248
insert_resource(res, &efi_memmap_res);
1249
insert_resource(res, &boot_param_res);
1250
if (crashk_res.end > crashk_res.start)
1251
insert_resource(res, &crashk_res);
1258
#if defined(CONFIG_KEXEC) || defined(XEN)
1259
/* find a block of memory aligned to 64M exclude reserved regions
1260
rsvd_regions are sorted
1262
unsigned long __init
1263
kdump_find_rsvd_region (unsigned long size,
1264
struct rsvd_region *r, int n)
1268
u64 alignment = 1UL << _PAGE_SIZE_64M;
1269
void *efi_map_start, *efi_map_end, *p;
1270
efi_memory_desc_t *md;
1273
efi_map_start = __va(ia64_boot_param->efi_memmap);
1274
efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
1275
efi_desc_size = ia64_boot_param->efi_memdesc_size;
1277
for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
1281
start = ALIGN(md->phys_addr, alignment);
1282
end = efi_md_end(md);
1283
for (i = 0; i < n; i++) {
1284
if (__pa(r[i].start) >= start && __pa(r[i].end) < end) {
1285
if (__pa(r[i].start) > start + size)
1287
start = ALIGN(__pa(r[i].end), alignment);
1288
if (i < n-1 && __pa(r[i+1].start) < start + size)
1294
if (end > start + size)
1298
printk(KERN_WARNING "Cannot reserve 0x%lx byte of memory for crashdump\n",
1305
#ifdef CONFIG_PROC_VMCORE
1306
/* locate the size find a the descriptor at a certain address */
1308
vmcore_find_descriptor_size (unsigned long address)
1310
void *efi_map_start, *efi_map_end, *p;
1311
efi_memory_desc_t *md;
1313
unsigned long ret = 0;
1315
efi_map_start = __va(ia64_boot_param->efi_memmap);
1316
efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
1317
efi_desc_size = ia64_boot_param->efi_memdesc_size;
1319
for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
1321
if (efi_wb(md) && md->type == EFI_LOADER_DATA
1322
&& md->phys_addr == address) {
1323
ret = efi_md_size(md);
1329
printk(KERN_WARNING "Cannot locate EFI vmcore descriptor\n");