2
* Xen domain builder -- i386 and x86_64 bits.
4
* Most architecture-specific code for x86 goes here.
5
* - prepare page tables.
6
* - fill architecture-specific structs.
8
* This library is free software; you can redistribute it and/or
9
* modify it under the terms of the GNU Lesser General Public
10
* License as published by the Free Software Foundation;
11
* version 2.1 of the License.
13
* This library is distributed in the hope that it will be useful,
14
* but WITHOUT ANY WARRANTY; without even the implied warranty of
15
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16
* Lesser General Public License for more details.
18
* You should have received a copy of the GNU Lesser General Public
19
* License along with this library; if not, write to the Free Software
20
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22
* written 2006 by Gerd Hoffmann <kraxel@suse.de>.
32
#include <xen/foreign/x86_32.h>
33
#include <xen/foreign/x86_64.h>
34
#include <xen/hvm/hvm_info_table.h>
35
#include <xen/io/protocols.h>
37
#include "xg_private.h"
41
/* ------------------------------------------------------------------------ */
43
#define SUPERPAGE_PFN_SHIFT 9
44
#define SUPERPAGE_NR_PFNS (1UL << SUPERPAGE_PFN_SHIFT)
46
#define bits_to_mask(bits) (((xen_vaddr_t)1 << (bits))-1)
47
#define round_down(addr, mask) ((addr) & ~(mask))
48
#define round_up(addr, mask) ((addr) | (mask))
51
nr_page_tables(struct xc_dom_image *dom,
52
xen_vaddr_t start, xen_vaddr_t end, unsigned long bits)
54
xen_vaddr_t mask = bits_to_mask(bits);
58
return 0; /* unused */
60
if ( bits == (8 * sizeof(unsigned long)) )
62
/* must be pgd, need one */
69
start = round_down(start, mask);
70
end = round_up(end, mask);
71
tables = ((end - start) >> bits) + 1;
74
DOMPRINTF("%s: 0x%016" PRIx64 "/%ld: 0x%016" PRIx64
75
" -> 0x%016" PRIx64 ", %d table(s)",
76
__FUNCTION__, mask, bits, start, end, tables);
80
static int count_pgtables(struct xc_dom_image *dom, int pae,
81
int l4_bits, int l3_bits, int l2_bits, int l1_bits)
83
int pages, extra_pages;
84
xen_vaddr_t try_virt_end;
86
extra_pages = dom->alloc_bootstack ? 1 : 0;
87
extra_pages += dom->extra_pages;
88
extra_pages += 128; /* 512kB padding */
92
try_virt_end = round_up(dom->virt_alloc_end + pages * PAGE_SIZE_X86,
93
bits_to_mask(22)); /* 4MB alignment */
95
nr_page_tables(dom, dom->parms.virt_base, try_virt_end, l4_bits);
97
nr_page_tables(dom, dom->parms.virt_base, try_virt_end, l3_bits);
99
nr_page_tables(dom, dom->parms.virt_base, try_virt_end, l2_bits);
101
nr_page_tables(dom, dom->parms.virt_base, try_virt_end, l1_bits);
102
if (pae && try_virt_end < 0xc0000000)
104
DOMPRINTF("%s: PAE: extra l2 page table for l3#3",
108
dom->pgtables = dom->pg_l4 + dom->pg_l3 + dom->pg_l2 + dom->pg_l1;
109
pages = dom->pgtables + extra_pages;
110
if ( dom->virt_alloc_end + pages * PAGE_SIZE_X86 <= try_virt_end + 1 )
113
dom->virt_pgtab_end = try_virt_end + 1;
117
/* ------------------------------------------------------------------------ */
118
/* i386 pagetables */
120
#define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED)
121
#define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
122
#define L3_PROT (_PAGE_PRESENT)
124
static int count_pgtables_x86_32(struct xc_dom_image *dom)
126
return count_pgtables(dom, 0, 0, 0, 32, L2_PAGETABLE_SHIFT_I386);
129
static int count_pgtables_x86_32_pae(struct xc_dom_image *dom)
131
return count_pgtables(dom, 1, 0, 32,
132
L3_PAGETABLE_SHIFT_PAE, L2_PAGETABLE_SHIFT_PAE);
135
#define pfn_to_paddr(pfn) ((xen_paddr_t)(pfn) << PAGE_SHIFT_X86)
137
static int setup_pgtables_x86_32(struct xc_dom_image *dom)
139
xen_pfn_t l2pfn = dom->pgtables_seg.pfn;
140
xen_pfn_t l1pfn = dom->pgtables_seg.pfn + dom->pg_l2;
141
l2_pgentry_32_t *l2tab = xc_dom_pfn_to_ptr(dom, l2pfn, 1);
142
l1_pgentry_32_t *l1tab = NULL;
143
unsigned long l2off, l1off;
150
for ( addr = dom->parms.virt_base; addr < dom->virt_pgtab_end;
151
addr += PAGE_SIZE_X86 )
155
/* get L1 tab, make L2 entry */
156
l1tab = xc_dom_pfn_to_ptr(dom, l1pfn, 1);
159
l2off = l2_table_offset_i386(addr);
161
pfn_to_paddr(xc_dom_p2m_guest(dom, l1pfn)) | L2_PROT;
166
l1off = l1_table_offset_i386(addr);
167
pgpfn = (addr - dom->parms.virt_base) >> PAGE_SHIFT_X86;
169
pfn_to_paddr(xc_dom_p2m_guest(dom, pgpfn)) | L1_PROT;
170
if ( (addr >= dom->pgtables_seg.vstart) &&
171
(addr < dom->pgtables_seg.vend) )
172
l1tab[l1off] &= ~_PAGE_RW; /* page tables are r/o */
173
if ( l1off == (L1_PAGETABLE_ENTRIES_I386 - 1) )
179
xc_dom_panic(dom->xch, XC_INTERNAL_ERROR,
180
"%s: xc_dom_pfn_to_ptr failed", __FUNCTION__);
185
* Move the l3 page table page below 4G for guests which do not
186
* support the extended-cr3 format. The l3 is currently empty so we
187
* do not need to preserve the current contents.
189
static xen_pfn_t move_l3_below_4G(struct xc_dom_image *dom,
197
mmu = xc_alloc_mmu_updates(dom->xch, dom->guest_domid);
200
DOMPRINTF("%s: failed at %d", __FUNCTION__, __LINE__);
204
xc_dom_unmap_one(dom, l3pfn);
206
new_l3mfn = xc_make_page_below_4G(dom->xch, dom->guest_domid, l3mfn);
210
dom->p2m_host[l3pfn] = new_l3mfn;
211
if ( xc_dom_update_guest_p2m(dom) != 0 )
214
if ( xc_add_mmu_update(dom->xch, mmu,
215
(((unsigned long long)new_l3mfn)
216
<< XC_DOM_PAGE_SHIFT(dom)) |
217
MMU_MACHPHYS_UPDATE, l3pfn) )
220
if ( xc_flush_mmu_updates(dom->xch, mmu) )
224
* This ensures that the entire pgtables_seg is mapped by a single
225
* mmap region. arch_setup_bootlate() relies on this to be able to
226
* unmap and pin the pagetables.
228
if ( xc_dom_seg_to_ptr(dom, &dom->pgtables_seg) == NULL )
231
l3tab = xc_dom_pfn_to_ptr(dom, l3pfn, 1);
234
DOMPRINTF("%s: xc_dom_pfn_to_ptr(dom, l3pfn, 1) => NULL",
236
return l3mfn; /* our one call site will call xc_dom_panic and fail */
238
memset(l3tab, 0, XC_DOM_PAGE_SIZE(dom));
240
DOMPRINTF("%s: successfully relocated L3 below 4G. "
241
"(L3 PFN %#"PRIpfn" MFN %#"PRIpfn"=>%#"PRIpfn")",
242
__FUNCTION__, l3pfn, l3mfn, new_l3mfn);
252
static int setup_pgtables_x86_32_pae(struct xc_dom_image *dom)
254
xen_pfn_t l3pfn = dom->pgtables_seg.pfn;
255
xen_pfn_t l2pfn = dom->pgtables_seg.pfn + dom->pg_l3;
256
xen_pfn_t l1pfn = dom->pgtables_seg.pfn + dom->pg_l3 + dom->pg_l2;
257
l3_pgentry_64_t *l3tab;
258
l2_pgentry_64_t *l2tab = NULL;
259
l1_pgentry_64_t *l1tab = NULL;
260
unsigned long l3off, l2off, l1off;
263
xen_pfn_t l3mfn = xc_dom_p2m_guest(dom, l3pfn);
265
if ( dom->parms.pae == 1 )
267
if ( l3mfn >= 0x100000 )
268
l3mfn = move_l3_below_4G(dom, l3pfn, l3mfn);
270
if ( l3mfn >= 0x100000 )
272
xc_dom_panic(dom->xch, XC_INTERNAL_ERROR,"%s: cannot move L3"
273
" below 4G. extended-cr3 not supported by guest. "
274
"(L3 PFN %#"PRIpfn" MFN %#"PRIpfn")",
275
__FUNCTION__, l3pfn, l3mfn);
280
l3tab = xc_dom_pfn_to_ptr(dom, l3pfn, 1);
284
for ( addr = dom->parms.virt_base; addr < dom->virt_pgtab_end;
285
addr += PAGE_SIZE_X86 )
289
/* get L2 tab, make L3 entry */
290
l2tab = xc_dom_pfn_to_ptr(dom, l2pfn, 1);
293
l3off = l3_table_offset_pae(addr);
295
pfn_to_paddr(xc_dom_p2m_guest(dom, l2pfn)) | L3_PROT;
301
/* get L1 tab, make L2 entry */
302
l1tab = xc_dom_pfn_to_ptr(dom, l1pfn, 1);
305
l2off = l2_table_offset_pae(addr);
307
pfn_to_paddr(xc_dom_p2m_guest(dom, l1pfn)) | L2_PROT;
308
if ( l2off == (L2_PAGETABLE_ENTRIES_PAE - 1) )
314
l1off = l1_table_offset_pae(addr);
315
pgpfn = (addr - dom->parms.virt_base) >> PAGE_SHIFT_X86;
317
pfn_to_paddr(xc_dom_p2m_guest(dom, pgpfn)) | L1_PROT;
318
if ( (addr >= dom->pgtables_seg.vstart) &&
319
(addr < dom->pgtables_seg.vend) )
320
l1tab[l1off] &= ~_PAGE_RW; /* page tables are r/o */
321
if ( l1off == (L1_PAGETABLE_ENTRIES_PAE - 1) )
325
if ( dom->virt_pgtab_end <= 0xc0000000 )
327
DOMPRINTF("%s: PAE: extra l2 page table for l3#3", __FUNCTION__);
328
l3tab[3] = pfn_to_paddr(xc_dom_p2m_guest(dom, l2pfn)) | L3_PROT;
333
xc_dom_panic(dom->xch, XC_INTERNAL_ERROR,
334
"%s: xc_dom_pfn_to_ptr failed", __FUNCTION__);
342
/* ------------------------------------------------------------------------ */
343
/* x86_64 pagetables */
345
static int count_pgtables_x86_64(struct xc_dom_image *dom)
347
return count_pgtables(dom, 0,
348
L4_PAGETABLE_SHIFT_X86_64 + 9,
349
L4_PAGETABLE_SHIFT_X86_64,
350
L3_PAGETABLE_SHIFT_X86_64,
351
L2_PAGETABLE_SHIFT_X86_64);
354
#define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED)
355
#define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
356
#define L3_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
357
#define L4_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
359
static int setup_pgtables_x86_64(struct xc_dom_image *dom)
361
xen_pfn_t l4pfn = dom->pgtables_seg.pfn;
362
xen_pfn_t l3pfn = dom->pgtables_seg.pfn + dom->pg_l4;
363
xen_pfn_t l2pfn = dom->pgtables_seg.pfn + dom->pg_l4 + dom->pg_l3;
365
dom->pgtables_seg.pfn + dom->pg_l4 + dom->pg_l3 + dom->pg_l2;
366
l4_pgentry_64_t *l4tab = xc_dom_pfn_to_ptr(dom, l4pfn, 1);
367
l3_pgentry_64_t *l3tab = NULL;
368
l2_pgentry_64_t *l2tab = NULL;
369
l1_pgentry_64_t *l1tab = NULL;
370
uint64_t l4off, l3off, l2off, l1off;
377
for ( addr = dom->parms.virt_base; addr < dom->virt_pgtab_end;
378
addr += PAGE_SIZE_X86 )
382
/* get L3 tab, make L4 entry */
383
l3tab = xc_dom_pfn_to_ptr(dom, l3pfn, 1);
386
l4off = l4_table_offset_x86_64(addr);
388
pfn_to_paddr(xc_dom_p2m_guest(dom, l3pfn)) | L4_PROT;
394
/* get L2 tab, make L3 entry */
395
l2tab = xc_dom_pfn_to_ptr(dom, l2pfn, 1);
398
l3off = l3_table_offset_x86_64(addr);
400
pfn_to_paddr(xc_dom_p2m_guest(dom, l2pfn)) | L3_PROT;
401
if ( l3off == (L3_PAGETABLE_ENTRIES_X86_64 - 1) )
408
/* get L1 tab, make L2 entry */
409
l1tab = xc_dom_pfn_to_ptr(dom, l1pfn, 1);
412
l2off = l2_table_offset_x86_64(addr);
414
pfn_to_paddr(xc_dom_p2m_guest(dom, l1pfn)) | L2_PROT;
415
if ( l2off == (L2_PAGETABLE_ENTRIES_X86_64 - 1) )
421
l1off = l1_table_offset_x86_64(addr);
422
pgpfn = (addr - dom->parms.virt_base) >> PAGE_SHIFT_X86;
424
pfn_to_paddr(xc_dom_p2m_guest(dom, pgpfn)) | L1_PROT;
425
if ( (addr >= dom->pgtables_seg.vstart) &&
426
(addr < dom->pgtables_seg.vend) )
427
l1tab[l1off] &= ~_PAGE_RW; /* page tables are r/o */
428
if ( l1off == (L1_PAGETABLE_ENTRIES_X86_64 - 1) )
434
xc_dom_panic(dom->xch, XC_INTERNAL_ERROR,
435
"%s: xc_dom_pfn_to_ptr failed", __FUNCTION__);
444
/* ------------------------------------------------------------------------ */
446
static int alloc_magic_pages(struct xc_dom_image *dom)
448
size_t p2m_size = dom->total_pages * dom->arch_hooks->sizeof_pfn;
450
/* allocate phys2mach table */
451
if ( xc_dom_alloc_segment(dom, &dom->p2m_seg, "phys2mach", 0, p2m_size) )
453
dom->p2m_guest = xc_dom_seg_to_ptr(dom, &dom->p2m_seg);
454
if ( dom->p2m_guest == NULL )
457
/* allocate special pages */
458
dom->start_info_pfn = xc_dom_alloc_page(dom, "start info");
459
dom->xenstore_pfn = xc_dom_alloc_page(dom, "xenstore");
460
dom->console_pfn = xc_dom_alloc_page(dom, "console");
461
if ( xc_dom_feature_translated(dom) )
462
dom->shared_info_pfn = xc_dom_alloc_page(dom, "shared info");
463
dom->alloc_bootstack = 1;
468
/* ------------------------------------------------------------------------ */
470
static int start_info_x86_32(struct xc_dom_image *dom)
472
start_info_x86_32_t *start_info =
473
xc_dom_pfn_to_ptr(dom, dom->start_info_pfn, 1);
475
xc_dom_feature_translated(dom) ? dom->shared_info_pfn : dom->
478
DOMPRINTF_CALLED(dom->xch);
480
if ( start_info == NULL )
482
DOMPRINTF("%s: xc_dom_pfn_to_ptr failed on start_info", __FUNCTION__);
483
return -1; /* our caller throws away our return value :-/ */
486
memset(start_info, 0, sizeof(*start_info));
487
strncpy(start_info->magic, dom->guest_type, sizeof(start_info->magic));
488
start_info->magic[sizeof(start_info->magic) - 1] = '\0';
489
start_info->nr_pages = dom->total_pages;
490
start_info->shared_info = shinfo << PAGE_SHIFT_X86;
491
start_info->pt_base = dom->pgtables_seg.vstart;
492
start_info->nr_pt_frames = dom->pgtables;
493
start_info->mfn_list = dom->p2m_seg.vstart;
495
start_info->flags = dom->flags;
496
start_info->store_mfn = xc_dom_p2m_guest(dom, dom->xenstore_pfn);
497
start_info->store_evtchn = dom->xenstore_evtchn;
498
start_info->console.domU.mfn = xc_dom_p2m_guest(dom, dom->console_pfn);
499
start_info->console.domU.evtchn = dom->console_evtchn;
501
if ( dom->ramdisk_blob )
503
start_info->mod_start = dom->ramdisk_seg.vstart;
504
start_info->mod_len = dom->ramdisk_seg.vend - dom->ramdisk_seg.vstart;
509
strncpy((char *)start_info->cmd_line, dom->cmdline, MAX_GUEST_CMDLINE);
510
start_info->cmd_line[MAX_GUEST_CMDLINE - 1] = '\0';
516
static int start_info_x86_64(struct xc_dom_image *dom)
518
start_info_x86_64_t *start_info =
519
xc_dom_pfn_to_ptr(dom, dom->start_info_pfn, 1);
521
xc_dom_feature_translated(dom) ? dom->shared_info_pfn : dom->
524
DOMPRINTF_CALLED(dom->xch);
526
if ( start_info == NULL )
528
DOMPRINTF("%s: xc_dom_pfn_to_ptr failed on start_info", __FUNCTION__);
529
return -1; /* our caller throws away our return value :-/ */
532
memset(start_info, 0, sizeof(*start_info));
533
strncpy(start_info->magic, dom->guest_type, sizeof(start_info->magic));
534
start_info->magic[sizeof(start_info->magic) - 1] = '\0';
535
start_info->nr_pages = dom->total_pages;
536
start_info->shared_info = shinfo << PAGE_SHIFT_X86;
537
start_info->pt_base = dom->pgtables_seg.vstart;
538
start_info->nr_pt_frames = dom->pgtables;
539
start_info->mfn_list = dom->p2m_seg.vstart;
541
start_info->flags = dom->flags;
542
start_info->store_mfn = xc_dom_p2m_guest(dom, dom->xenstore_pfn);
543
start_info->store_evtchn = dom->xenstore_evtchn;
544
start_info->console.domU.mfn = xc_dom_p2m_guest(dom, dom->console_pfn);
545
start_info->console.domU.evtchn = dom->console_evtchn;
547
if ( dom->ramdisk_blob )
549
start_info->mod_start = dom->ramdisk_seg.vstart;
550
start_info->mod_len = dom->ramdisk_seg.vend - dom->ramdisk_seg.vstart;
555
strncpy((char *)start_info->cmd_line, dom->cmdline, MAX_GUEST_CMDLINE);
556
start_info->cmd_line[MAX_GUEST_CMDLINE - 1] = '\0';
562
static int shared_info_x86_32(struct xc_dom_image *dom, void *ptr)
564
shared_info_x86_32_t *shared_info = ptr;
567
DOMPRINTF_CALLED(dom->xch);
569
memset(shared_info, 0, sizeof(*shared_info));
570
for ( i = 0; i < XEN_LEGACY_MAX_VCPUS; i++ )
571
shared_info->vcpu_info[i].evtchn_upcall_mask = 1;
575
static int shared_info_x86_64(struct xc_dom_image *dom, void *ptr)
577
shared_info_x86_64_t *shared_info = ptr;
580
DOMPRINTF_CALLED(dom->xch);
582
memset(shared_info, 0, sizeof(*shared_info));
583
for ( i = 0; i < XEN_LEGACY_MAX_VCPUS; i++ )
584
shared_info->vcpu_info[i].evtchn_upcall_mask = 1;
588
/* ------------------------------------------------------------------------ */
590
static int vcpu_x86_32(struct xc_dom_image *dom, void *ptr)
592
vcpu_guest_context_x86_32_t *ctxt = ptr;
595
DOMPRINTF_CALLED(dom->xch);
597
/* clear everything */
598
memset(ctxt, 0, sizeof(*ctxt));
600
ctxt->user_regs.ds = FLAT_KERNEL_DS_X86_32;
601
ctxt->user_regs.es = FLAT_KERNEL_DS_X86_32;
602
ctxt->user_regs.fs = FLAT_KERNEL_DS_X86_32;
603
ctxt->user_regs.gs = FLAT_KERNEL_DS_X86_32;
604
ctxt->user_regs.ss = FLAT_KERNEL_SS_X86_32;
605
ctxt->user_regs.cs = FLAT_KERNEL_CS_X86_32;
606
ctxt->user_regs.eip = dom->parms.virt_entry;
607
ctxt->user_regs.esp =
608
dom->parms.virt_base + (dom->bootstack_pfn + 1) * PAGE_SIZE_X86;
609
ctxt->user_regs.esi =
610
dom->parms.virt_base + (dom->start_info_pfn) * PAGE_SIZE_X86;
611
ctxt->user_regs.eflags = 1 << 9; /* Interrupt Enable */
613
ctxt->kernel_ss = ctxt->user_regs.ss;
614
ctxt->kernel_sp = ctxt->user_regs.esp;
616
ctxt->flags = VGCF_in_kernel_X86_32 | VGCF_online_X86_32;
617
if ( dom->parms.pae == 2 /* extended_cr3 */ ||
618
dom->parms.pae == 3 /* bimodal */ )
619
ctxt->vm_assist |= (1UL << VMASST_TYPE_pae_extended_cr3);
621
cr3_pfn = xc_dom_p2m_guest(dom, dom->pgtables_seg.pfn);
622
ctxt->ctrlreg[3] = xen_pfn_to_cr3_x86_32(cr3_pfn);
623
DOMPRINTF("%s: cr3: pfn 0x%" PRIpfn " mfn 0x%" PRIpfn "",
624
__FUNCTION__, dom->pgtables_seg.pfn, cr3_pfn);
629
static int vcpu_x86_64(struct xc_dom_image *dom, void *ptr)
631
vcpu_guest_context_x86_64_t *ctxt = ptr;
634
DOMPRINTF_CALLED(dom->xch);
636
/* clear everything */
637
memset(ctxt, 0, sizeof(*ctxt));
639
ctxt->user_regs.ds = FLAT_KERNEL_DS_X86_64;
640
ctxt->user_regs.es = FLAT_KERNEL_DS_X86_64;
641
ctxt->user_regs.fs = FLAT_KERNEL_DS_X86_64;
642
ctxt->user_regs.gs = FLAT_KERNEL_DS_X86_64;
643
ctxt->user_regs.ss = FLAT_KERNEL_SS_X86_64;
644
ctxt->user_regs.cs = FLAT_KERNEL_CS_X86_64;
645
ctxt->user_regs.rip = dom->parms.virt_entry;
646
ctxt->user_regs.rsp =
647
dom->parms.virt_base + (dom->bootstack_pfn + 1) * PAGE_SIZE_X86;
648
ctxt->user_regs.rsi =
649
dom->parms.virt_base + (dom->start_info_pfn) * PAGE_SIZE_X86;
650
ctxt->user_regs.rflags = 1 << 9; /* Interrupt Enable */
652
ctxt->kernel_ss = ctxt->user_regs.ss;
653
ctxt->kernel_sp = ctxt->user_regs.esp;
655
ctxt->flags = VGCF_in_kernel_X86_64 | VGCF_online_X86_64;
656
cr3_pfn = xc_dom_p2m_guest(dom, dom->pgtables_seg.pfn);
657
ctxt->ctrlreg[3] = xen_pfn_to_cr3_x86_64(cr3_pfn);
658
DOMPRINTF("%s: cr3: pfn 0x%" PRIpfn " mfn 0x%" PRIpfn "",
659
__FUNCTION__, dom->pgtables_seg.pfn, cr3_pfn);
664
/* ------------------------------------------------------------------------ */
666
static struct xc_dom_arch xc_dom_32 = {
667
.guest_type = "xen-3.0-x86_32",
668
.native_protocol = XEN_IO_PROTO_ABI_X86_32,
669
.page_shift = PAGE_SHIFT_X86,
671
.alloc_magic_pages = alloc_magic_pages,
672
.count_pgtables = count_pgtables_x86_32,
673
.setup_pgtables = setup_pgtables_x86_32,
674
.start_info = start_info_x86_32,
675
.shared_info = shared_info_x86_32,
678
static struct xc_dom_arch xc_dom_32_pae = {
679
.guest_type = "xen-3.0-x86_32p",
680
.native_protocol = XEN_IO_PROTO_ABI_X86_32,
681
.page_shift = PAGE_SHIFT_X86,
683
.alloc_magic_pages = alloc_magic_pages,
684
.count_pgtables = count_pgtables_x86_32_pae,
685
.setup_pgtables = setup_pgtables_x86_32_pae,
686
.start_info = start_info_x86_32,
687
.shared_info = shared_info_x86_32,
691
static struct xc_dom_arch xc_dom_64 = {
692
.guest_type = "xen-3.0-x86_64",
693
.native_protocol = XEN_IO_PROTO_ABI_X86_64,
694
.page_shift = PAGE_SHIFT_X86,
696
.alloc_magic_pages = alloc_magic_pages,
697
.count_pgtables = count_pgtables_x86_64,
698
.setup_pgtables = setup_pgtables_x86_64,
699
.start_info = start_info_x86_64,
700
.shared_info = shared_info_x86_64,
704
static void __init register_arch_hooks(void)
706
xc_dom_register_arch_hooks(&xc_dom_32);
707
xc_dom_register_arch_hooks(&xc_dom_32_pae);
708
xc_dom_register_arch_hooks(&xc_dom_64);
711
static int x86_compat(xc_interface *xch, domid_t domid, char *guest_type)
713
static const struct {
717
{ "xen-3.0-x86_32p", 32 },
718
{ "xen-3.0-x86_64", 64 },
723
memset(&domctl, 0, sizeof(domctl));
724
domctl.domain = domid;
725
domctl.cmd = XEN_DOMCTL_set_address_size;
726
for ( i = 0; i < sizeof(types)/sizeof(types[0]); i++ )
727
if ( !strcmp(types[i].guest, guest_type) )
728
domctl.u.address_size.size = types[i].size;
729
if ( domctl.u.address_size.size == 0 )
733
xc_dom_printf(xch, "%s: guest %s, address size %" PRId32 "", __FUNCTION__,
734
guest_type, domctl.u.address_size.size);
735
rc = do_domctl(xch, &domctl);
737
xc_dom_printf(xch, "%s: warning: failed (rc=%d)",
743
static int x86_shadow(xc_interface *xch, domid_t domid)
747
DOMPRINTF_CALLED(xch);
749
mode = XEN_DOMCTL_SHADOW_ENABLE_REFCOUNT |
750
XEN_DOMCTL_SHADOW_ENABLE_TRANSLATE;
752
rc = xc_shadow_control(xch, domid,
753
XEN_DOMCTL_SHADOW_OP_ENABLE,
754
NULL, 0, NULL, mode, NULL);
757
xc_dom_panic(xch, XC_INTERNAL_ERROR,
758
"%s: SHADOW_OP_ENABLE (mode=0x%x) failed (rc=%d)",
759
__FUNCTION__, mode, rc);
762
xc_dom_printf(xch, "%s: shadow enabled (mode=0x%x)", __FUNCTION__, mode);
766
int arch_setup_meminit(struct xc_dom_image *dom)
769
xen_pfn_t pfn, allocsz, i, j, mfn;
771
rc = x86_compat(dom->xch, dom->guest_domid, dom->guest_type);
774
if ( xc_dom_feature_translated(dom) )
776
dom->shadow_enabled = 1;
777
rc = x86_shadow(dom->xch, dom->guest_domid);
782
dom->p2m_host = xc_dom_malloc(dom, sizeof(xen_pfn_t) * dom->total_pages);
783
if ( dom->superpages )
785
int count = dom->total_pages >> SUPERPAGE_PFN_SHIFT;
786
xen_pfn_t extents[count];
788
DOMPRINTF("Populating memory with %d superpages", count);
789
for ( pfn = 0; pfn < count; pfn++ )
790
extents[pfn] = pfn << SUPERPAGE_PFN_SHIFT;
791
rc = xc_domain_populate_physmap_exact(dom->xch, dom->guest_domid,
792
count, SUPERPAGE_PFN_SHIFT, 0,
797
/* Expand the returned mfn into the p2m array */
799
for ( i = 0; i < count; i++ )
802
for ( j = 0; j < SUPERPAGE_NR_PFNS; j++, pfn++ )
803
dom->p2m_host[pfn] = mfn + j;
808
/* setup initial p2m */
809
for ( pfn = 0; pfn < dom->total_pages; pfn++ )
810
dom->p2m_host[pfn] = pfn;
812
/* allocate guest memory */
813
for ( i = rc = allocsz = 0;
814
(i < dom->total_pages) && !rc;
817
allocsz = dom->total_pages - i;
818
if ( allocsz > 1024*1024 )
820
rc = xc_domain_populate_physmap_exact(
821
dom->xch, dom->guest_domid, allocsz,
822
0, 0, &dom->p2m_host[i]);
829
int arch_setup_bootearly(struct xc_dom_image *dom)
831
DOMPRINTF("%s: doing nothing", __FUNCTION__);
835
int arch_setup_bootlate(struct xc_dom_image *dom)
837
static const struct {
839
unsigned long pgd_type;
841
{ "xen-3.0-x86_32", MMUEXT_PIN_L2_TABLE},
842
{ "xen-3.0-x86_32p", MMUEXT_PIN_L3_TABLE},
843
{ "xen-3.0-x86_64", MMUEXT_PIN_L4_TABLE},
845
unsigned long pgd_type = 0;
846
shared_info_t *shared_info;
850
for ( i = 0; i < sizeof(types) / sizeof(types[0]); i++ )
851
if ( !strcmp(types[i].guest, dom->guest_type) )
852
pgd_type = types[i].pgd_type;
854
if ( !xc_dom_feature_translated(dom) )
856
/* paravirtualized guest */
857
xc_dom_unmap_one(dom, dom->pgtables_seg.pfn);
858
rc = pin_table(dom->xch, pgd_type,
859
xc_dom_p2m_host(dom, dom->pgtables_seg.pfn),
863
xc_dom_panic(dom->xch, XC_INTERNAL_ERROR,
864
"%s: pin_table failed (pfn 0x%" PRIpfn ", rc=%d)",
865
__FUNCTION__, dom->pgtables_seg.pfn, rc);
868
shinfo = dom->shared_info_mfn;
872
/* paravirtualized guest with auto-translation */
875
/* Map shared info frame into guest physmap. */
876
rc = xc_domain_add_to_physmap(dom->xch, dom->guest_domid,
877
XENMAPSPACE_shared_info,
878
0, dom->shared_info_pfn);
881
xc_dom_panic(dom->xch, XC_INTERNAL_ERROR, "%s: mapping"
882
" shared_info failed (pfn=0x%" PRIpfn ", rc=%d)",
883
__FUNCTION__, dom->shared_info_pfn, rc);
887
/* Map grant table frames into guest physmap. */
890
rc = xc_domain_add_to_physmap(dom->xch, dom->guest_domid,
891
XENMAPSPACE_grant_table,
892
i, dom->total_pages + i);
895
if ( (i > 0) && (errno == EINVAL) )
897
DOMPRINTF("%s: %d grant tables mapped", __FUNCTION__, i);
900
xc_dom_panic(dom->xch, XC_INTERNAL_ERROR,
901
"%s: mapping grant tables failed " "(pfn=0x%"
902
PRIpfn ", rc=%d)", __FUNCTION__, dom->total_pages + i, rc);
906
shinfo = dom->shared_info_pfn;
909
/* setup shared_info page */
910
DOMPRINTF("%s: shared_info: pfn 0x%" PRIpfn ", mfn 0x%" PRIpfn "",
911
__FUNCTION__, dom->shared_info_pfn, dom->shared_info_mfn);
912
shared_info = xc_map_foreign_range(dom->xch, dom->guest_domid,
914
PROT_READ | PROT_WRITE,
916
if ( shared_info == NULL )
918
dom->arch_hooks->shared_info(dom, shared_info);
919
munmap(shared_info, PAGE_SIZE_X86);
930
* indent-tabs-mode: nil