1
/******************************************************************************
11
#include "xg_private.h"
12
#include "xc_private.h"
14
#include <xen/foreign/x86_32.h>
15
#include <xen/foreign/x86_64.h>
16
#include <xen/hvm/hvm_info_table.h>
17
#include <xen/hvm/params.h>
18
#include <xen/hvm/e820.h>
20
#include <xen/libelf/libelf.h>
22
#define SUPERPAGE_PFN_SHIFT 9
23
#define SUPERPAGE_NR_PFNS (1UL << SUPERPAGE_PFN_SHIFT)
25
#define SPECIALPAGE_BUFIOREQ 0
26
#define SPECIALPAGE_XENSTORE 1
27
#define SPECIALPAGE_IOREQ 2
28
#define SPECIALPAGE_IDENT_PT 3
29
#define SPECIALPAGE_SHINFO 4
30
#define NR_SPECIAL_PAGES 5
31
#define special_pfn(x) (0xff000u - NR_SPECIAL_PAGES + (x))
33
static void build_hvm_info(void *hvm_info_page, uint64_t mem_size)
35
struct hvm_info_table *hvm_info = (struct hvm_info_table *)
36
(((unsigned char *)hvm_info_page) + HVM_INFO_OFFSET);
37
uint64_t lowmem_end = mem_size, highmem_end = 0;
41
if ( lowmem_end > HVM_BELOW_4G_RAM_END )
43
highmem_end = lowmem_end + (1ull<<32) - HVM_BELOW_4G_RAM_END;
44
lowmem_end = HVM_BELOW_4G_RAM_END;
47
memset(hvm_info_page, 0, PAGE_SIZE);
49
/* Fill in the header. */
50
strncpy(hvm_info->signature, "HVM INFO", 8);
51
hvm_info->length = sizeof(struct hvm_info_table);
53
/* Sensible defaults: these can be overridden by the caller. */
54
hvm_info->acpi_enabled = 1;
55
hvm_info->apic_mode = 1;
56
hvm_info->nr_vcpus = 1;
58
/* Memory parameters. */
59
hvm_info->low_mem_pgend = lowmem_end >> PAGE_SHIFT;
60
hvm_info->high_mem_pgend = highmem_end >> PAGE_SHIFT;
61
hvm_info->reserved_mem_pgstart = special_pfn(0);
63
/* Finish with the checksum. */
64
for ( i = 0, sum = 0; i < hvm_info->length; i++ )
65
sum += ((uint8_t *)hvm_info)[i];
66
hvm_info->checksum = -sum;
69
static int loadelfimage(
70
struct elf_binary *elf, int xch, uint32_t dom, unsigned long *parray)
72
privcmd_mmap_entry_t *entries = NULL;
73
size_t pages = (elf->pend - elf->pstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
76
/* Map address space for initial elf image. */
77
entries = calloc(pages, sizeof(privcmd_mmap_entry_t));
78
if ( entries == NULL )
81
for ( i = 0; i < pages; i++ )
82
entries[i].mfn = parray[(elf->pstart >> PAGE_SHIFT) + i];
84
elf->dest = xc_map_foreign_ranges(
85
xch, dom, pages << PAGE_SHIFT, PROT_READ | PROT_WRITE, 1 << PAGE_SHIFT,
87
if ( elf->dest == NULL )
90
/* Load the initial elf image. */
94
munmap(elf->dest, pages << PAGE_SHIFT);
103
static int setup_guest(int xc_handle,
104
uint32_t dom, int memsize, int target,
105
char *image, unsigned long image_size)
107
xen_pfn_t *page_array = NULL;
108
unsigned long i, nr_pages = (unsigned long)memsize << (20 - PAGE_SHIFT);
109
unsigned long target_pages = (unsigned long)target << (20 - PAGE_SHIFT);
110
unsigned long entry_eip, cur_pages;
111
struct xen_add_to_physmap xatp;
112
struct shared_info *shared_info;
115
struct elf_binary elf;
116
uint64_t v_start, v_end;
118
xen_capabilities_info_t caps;
122
/* An HVM guest must be initialised with at least 2MB memory. */
123
if ( memsize < 2 || target < 2 )
126
if ( memsize > target )
129
memset(&elf, 0, sizeof(elf));
130
if ( elf_init(&elf, image, image_size) != 0 )
132
elf_parse_binary(&elf);
134
v_end = (unsigned long long)memsize << 20;
136
if ( xc_version(xc_handle, XENVER_capabilities, &caps) != 0 )
138
PERROR("Could not get Xen capabilities\n");
142
if ( (elf.pstart & (PAGE_SIZE - 1)) != 0 )
144
PERROR("Guest OS must load to a page boundary.\n");
148
IPRINTF("VIRTUAL MEMORY ARRANGEMENT:\n"
149
" Loader: %016"PRIx64"->%016"PRIx64"\n"
150
" TOTAL: %016"PRIx64"->%016"PRIx64"\n"
151
" ENTRY ADDRESS: %016"PRIx64"\n",
152
elf.pstart, elf.pend,
154
elf_uval(&elf, elf.ehdr, e_entry));
156
if ( (page_array = malloc(nr_pages * sizeof(xen_pfn_t))) == NULL )
158
PERROR("Could not allocate memory.\n");
162
for ( i = 0; i < nr_pages; i++ )
164
for ( i = HVM_BELOW_4G_RAM_END >> PAGE_SHIFT; i < nr_pages; i++ )
165
page_array[i] += HVM_BELOW_4G_MMIO_LENGTH >> PAGE_SHIFT;
168
* Allocate memory for HVM guest, skipping VGA hole 0xA0000-0xC0000.
169
* We allocate pages in batches of no more than 8MB to ensure that
170
* we can be preempted and hence dom0 remains responsive.
172
rc = xc_domain_memory_populate_physmap(
173
xc_handle, dom, 0xa0, 0, 0, &page_array[0x00]);
175
while ( (rc == 0) && (nr_pages > cur_pages) )
177
/* Clip count to maximum 8MB extent. */
178
unsigned long count = nr_pages - cur_pages;
182
/* Clip partial superpage extents to superpage boundaries. */
183
if ( ((cur_pages & (SUPERPAGE_NR_PFNS-1)) != 0) &&
184
(count > (-cur_pages & (SUPERPAGE_NR_PFNS-1))) )
185
count = -cur_pages & (SUPERPAGE_NR_PFNS-1); /* clip s.p. tail */
186
else if ( ((count & (SUPERPAGE_NR_PFNS-1)) != 0) &&
187
(count > SUPERPAGE_NR_PFNS) )
188
count &= ~(SUPERPAGE_NR_PFNS - 1); /* clip non-s.p. tail */
190
/* Attempt to allocate superpage extents. */
191
if ( ((count | cur_pages) & (SUPERPAGE_NR_PFNS - 1)) == 0 )
194
xen_pfn_t sp_extents[count >> SUPERPAGE_PFN_SHIFT];
195
struct xen_memory_reservation sp_req = {
196
.nr_extents = count >> SUPERPAGE_PFN_SHIFT,
197
.extent_order = SUPERPAGE_PFN_SHIFT,
202
sp_req.mem_flags = XENMEMF_populate_on_demand;
204
set_xen_guest_handle(sp_req.extent_start, sp_extents);
205
for ( i = 0; i < sp_req.nr_extents; i++ )
206
sp_extents[i] = page_array[cur_pages+(i<<SUPERPAGE_PFN_SHIFT)];
207
done = xc_memory_op(xc_handle, XENMEM_populate_physmap, &sp_req);
210
done <<= SUPERPAGE_PFN_SHIFT;
216
/* Fall back to 4kB extents. */
219
rc = xc_domain_memory_populate_physmap(
220
xc_handle, dom, count, 0, 0, &page_array[cur_pages]);
225
/* Subtract 0x20 from target_pages for the VGA "hole". Xen will
226
* adjust the PoD cache size so that domain tot_pages will be
227
* target_pages - 0x20 after this call. */
229
rc = xc_domain_memory_set_pod_target(xc_handle,
236
PERROR("Could not allocate memory for HVM guest.\n");
240
if ( loadelfimage(&elf, xc_handle, dom, page_array) != 0 )
243
if ( (hvm_info_page = xc_map_foreign_range(
244
xc_handle, dom, PAGE_SIZE, PROT_READ | PROT_WRITE,
245
HVM_INFO_PFN)) == NULL )
247
build_hvm_info(hvm_info_page, v_end);
248
munmap(hvm_info_page, PAGE_SIZE);
250
/* Map and initialise shared_info page. */
252
xatp.space = XENMAPSPACE_shared_info;
254
xatp.gpfn = special_pfn(SPECIALPAGE_SHINFO);
255
if ( (xc_memory_op(xc_handle, XENMEM_add_to_physmap, &xatp) != 0) ||
256
((shared_info = xc_map_foreign_range(
257
xc_handle, dom, PAGE_SIZE, PROT_READ | PROT_WRITE,
258
special_pfn(SPECIALPAGE_SHINFO))) == NULL) )
260
memset(shared_info, 0, PAGE_SIZE);
261
/* NB. evtchn_upcall_mask is unused: leave as zero. */
262
memset(&shared_info->evtchn_mask[0], 0xff,
263
sizeof(shared_info->evtchn_mask));
264
munmap(shared_info, PAGE_SIZE);
266
/* Allocate and clear special pages. */
267
for ( i = 0; i < NR_SPECIAL_PAGES; i++ )
269
xen_pfn_t pfn = special_pfn(i);
270
if ( i == SPECIALPAGE_SHINFO )
272
rc = xc_domain_memory_populate_physmap(xc_handle, dom, 1, 0, 0, &pfn);
275
PERROR("Could not allocate %d'th special page.\n", i);
278
if ( xc_clear_domain_page(xc_handle, dom, special_pfn(i)) )
282
xc_set_hvm_param(xc_handle, dom, HVM_PARAM_STORE_PFN,
283
special_pfn(SPECIALPAGE_XENSTORE));
284
xc_set_hvm_param(xc_handle, dom, HVM_PARAM_BUFIOREQ_PFN,
285
special_pfn(SPECIALPAGE_BUFIOREQ));
286
xc_set_hvm_param(xc_handle, dom, HVM_PARAM_IOREQ_PFN,
287
special_pfn(SPECIALPAGE_IOREQ));
290
* Identity-map page table is required for running with CR0.PG=0 when
291
* using Intel EPT. Create a 32-bit non-PAE page directory of superpages.
293
if ( (ident_pt = xc_map_foreign_range(
294
xc_handle, dom, PAGE_SIZE, PROT_READ | PROT_WRITE,
295
special_pfn(SPECIALPAGE_IDENT_PT))) == NULL )
297
for ( i = 0; i < PAGE_SIZE / sizeof(*ident_pt); i++ )
298
ident_pt[i] = ((i << 22) | _PAGE_PRESENT | _PAGE_RW | _PAGE_USER |
299
_PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_PSE);
300
munmap(ident_pt, PAGE_SIZE);
301
xc_set_hvm_param(xc_handle, dom, HVM_PARAM_IDENT_PT,
302
special_pfn(SPECIALPAGE_IDENT_PT) << PAGE_SHIFT);
304
/* Insert JMP <rel32> instruction at address 0x0 to reach entry point. */
305
entry_eip = elf_uval(&elf, elf.ehdr, e_entry);
306
if ( entry_eip != 0 )
308
char *page0 = xc_map_foreign_range(
309
xc_handle, dom, PAGE_SIZE, PROT_READ | PROT_WRITE, 0);
313
*(uint32_t *)&page0[1] = entry_eip - 5;
314
munmap(page0, PAGE_SIZE);
325
static int xc_hvm_build_internal(int xc_handle,
330
unsigned long image_size)
332
if ( (image == NULL) || (image_size == 0) )
334
ERROR("Image required");
338
return setup_guest(xc_handle, domid, memsize, target, image, image_size);
342
* Create a domain for a virtualized Linux, using files/filenames.
344
int xc_hvm_build(int xc_handle,
347
const char *image_name)
351
unsigned long image_size;
353
if ( (image_name == NULL) ||
354
((image = xc_read_image(image_name, &image_size)) == NULL) )
357
sts = xc_hvm_build_internal(xc_handle, domid, memsize, memsize, image, image_size);
364
/* xc_hvm_build_target_mem:
365
* Create a domain for a pre-ballooned virtualized Linux, using
366
* files/filenames. If target < memsize, domain is created with
367
* memsize pages marked populate-on-demand, and with a PoD cache size
368
* of target. If target == memsize, pages are populated normally.
370
int xc_hvm_build_target_mem(int xc_handle,
374
const char *image_name)
378
unsigned long image_size;
380
if ( (image_name == NULL) ||
381
((image = xc_read_image(image_name, &image_size)) == NULL) )
384
sts = xc_hvm_build_internal(xc_handle, domid, memsize, target, image, image_size);
392
* Create a domain for a virtualized Linux, using memory buffers.
394
int xc_hvm_build_mem(int xc_handle,
397
const char *image_buffer,
398
unsigned long image_size)
401
unsigned long img_len;
404
/* Validate that there is a kernel buffer */
406
if ( (image_buffer == NULL) || (image_size == 0) )
408
ERROR("kernel image buffer not present");
412
img = xc_inflate_buffer(image_buffer, image_size, &img_len);
415
ERROR("unable to inflate ram disk buffer");
419
sts = xc_hvm_build_internal(xc_handle, domid, memsize, memsize,
422
/* xc_inflate_buffer may return the original buffer pointer (for
423
for already inflated buffers), so exercise some care in freeing */
425
if ( (img != NULL) && (img != image_buffer) )
437
* indent-tabs-mode: nil