1
/******************************************************************************
2
* include/asm-x86/paging.h
4
* physical-to-machine mappings for automatically-translated domains.
6
* Copyright (c) 2007 Advanced Micro Devices (Wei Huang)
7
* Parts of this code are Copyright (c) 2006-2007 by XenSource Inc.
8
* Parts of this code are Copyright (c) 2006 by Michael A Fetterman
9
* Parts based on earlier work by Michael A Fetterman, Ian Pratt et al.
11
* This program is free software; you can redistribute it and/or modify
12
* it under the terms of the GNU General Public License as published by
13
* the Free Software Foundation; either version 2 of the License, or
14
* (at your option) any later version.
16
* This program is distributed in the hope that it will be useful,
17
* but WITHOUT ANY WARRANTY; without even the implied warranty of
18
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19
* GNU General Public License for more details.
21
* You should have received a copy of the GNU General Public License
22
* along with this program; if not, write to the Free Software
23
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
29
#include <xen/config.h>
30
#include <xen/paging.h>
31
#include <asm/mem_sharing.h>
34
* The phys_to_machine_mapping maps guest physical frame numbers
35
* to machine frame numbers. It only exists for paging_mode_translate
36
* guests. It is organised in page-table format, which:
38
* (1) allows us to use it directly as the second pagetable in hardware-
39
* assisted paging and (hopefully) iommu support; and
40
* (2) lets us map it directly into the guest vcpus' virtual address space
41
* as a linear pagetable, so we can read and write it easily.
43
* For (2) we steal the address space that would have normally been used
44
* by the read-only MPT map in a non-translated guest. (For
45
* paging_mode_external() guests this mapping is in the monitor table.)
47
#define phys_to_machine_mapping ((l1_pgentry_t *)RO_MPT_VIRT_START)
50
#define HAVE_GRANT_MAP_P2M
54
* The upper levels of the p2m pagetable always contain full rights; all
55
* variation in the access control bits is made in the level-1 PTEs.
57
* In addition to the phys-to-machine translation, each p2m PTE contains
58
* *type* information about the gfn it translates, helping Xen to decide
59
* on the correct course of action when handling a page-fault to that
60
* guest frame. We store the type in the "available" bits of the PTEs
61
* in the table, which gives us 8 possible types on 32-bit systems.
62
* Further expansions of the type system will only be supported on
66
p2m_invalid = 0, /* Nothing mapped here */
67
p2m_ram_rw = 1, /* Normal read/write guest RAM */
68
p2m_ram_logdirty = 2, /* Temporarily read-only for log-dirty */
69
p2m_ram_ro = 3, /* Read-only; writes are silently dropped */
70
p2m_mmio_dm = 4, /* Reads and write go to the device model */
71
p2m_mmio_direct = 5, /* Read/write mapping of genuine MMIO area */
72
p2m_populate_on_demand = 6, /* Place-holder for empty memory */
74
/* Note that these can only be used if HAVE_GRANT_MAP_P2M is
75
defined. They get defined anyway so as to avoid lots of
76
#ifdef's everywhere else. */
77
p2m_grant_map_rw = 7, /* Read/write grant mapping */
78
p2m_grant_map_ro = 8, /* Read-only grant mapping */
80
p2m_ram_paging_out = 9, /* Memory that is being paged out */
81
p2m_ram_paged = 10, /* Memory that has been paged out */
82
p2m_ram_paging_in = 11, /* Memory that is being paged in */
83
p2m_ram_paging_in_start = 12, /* Memory that is being paged in */
85
p2m_ram_shared = 13, /* Shared or sharable memory */
89
p2m_query = 0, /* Do not populate a PoD entries */
90
p2m_alloc = 1, /* Automatically populate PoD entries */
91
p2m_guest = 2, /* Guest demand-fault; implies alloc */
94
/* We use bitmaps and maks to handle groups of types */
95
#define p2m_to_mask(_t) (1UL << (_t))
97
/* RAM types, which map to real machine frames */
98
#define P2M_RAM_TYPES (p2m_to_mask(p2m_ram_rw) \
99
| p2m_to_mask(p2m_ram_logdirty) \
100
| p2m_to_mask(p2m_ram_ro) \
101
| p2m_to_mask(p2m_ram_paging_out) \
102
| p2m_to_mask(p2m_ram_paged) \
103
| p2m_to_mask(p2m_ram_paging_in_start) \
104
| p2m_to_mask(p2m_ram_paging_in) \
105
| p2m_to_mask(p2m_ram_shared))
107
/* Grant mapping types, which map to a real machine frame in another
109
#define P2M_GRANT_TYPES (p2m_to_mask(p2m_grant_map_rw) \
110
| p2m_to_mask(p2m_grant_map_ro) )
112
/* MMIO types, which don't have to map to anything in the frametable */
113
#define P2M_MMIO_TYPES (p2m_to_mask(p2m_mmio_dm) \
114
| p2m_to_mask(p2m_mmio_direct))
116
/* Read-only types, which must have the _PAGE_RW bit clear in their PTEs */
117
#define P2M_RO_TYPES (p2m_to_mask(p2m_ram_logdirty) \
118
| p2m_to_mask(p2m_ram_ro) \
119
| p2m_to_mask(p2m_grant_map_ro) \
120
| p2m_to_mask(p2m_ram_shared) )
122
#define P2M_MAGIC_TYPES (p2m_to_mask(p2m_populate_on_demand))
125
#define P2M_PAGEABLE_TYPES (p2m_to_mask(p2m_ram_rw))
127
#define P2M_PAGING_TYPES (p2m_to_mask(p2m_ram_paging_out) \
128
| p2m_to_mask(p2m_ram_paged) \
129
| p2m_to_mask(p2m_ram_paging_in_start) \
130
| p2m_to_mask(p2m_ram_paging_in))
132
#define P2M_PAGED_TYPES (p2m_to_mask(p2m_ram_paged))
135
/* XXX: Sharable types could include p2m_ram_ro too, but we would need to
136
* reinit the type correctly after fault */
137
#define P2M_SHARABLE_TYPES (p2m_to_mask(p2m_ram_rw))
138
#define P2M_SHARED_TYPES (p2m_to_mask(p2m_ram_shared))
140
/* Useful predicates */
141
#define p2m_is_ram(_t) (p2m_to_mask(_t) & P2M_RAM_TYPES)
142
#define p2m_is_mmio(_t) (p2m_to_mask(_t) & P2M_MMIO_TYPES)
143
#define p2m_is_readonly(_t) (p2m_to_mask(_t) & P2M_RO_TYPES)
144
#define p2m_is_magic(_t) (p2m_to_mask(_t) & P2M_MAGIC_TYPES)
145
#define p2m_is_grant(_t) (p2m_to_mask(_t) & P2M_GRANT_TYPES)
146
/* Grant types are *not* considered valid, because they can be
147
unmapped at any time and, unless you happen to be the shadow or p2m
148
implementations, there's no way of synchronising against that. */
149
#define p2m_is_valid(_t) (p2m_to_mask(_t) & (P2M_RAM_TYPES | P2M_MMIO_TYPES))
150
#define p2m_has_emt(_t) (p2m_to_mask(_t) & (P2M_RAM_TYPES | p2m_to_mask(p2m_mmio_direct)))
151
#define p2m_is_pageable(_t) (p2m_to_mask(_t) & P2M_PAGEABLE_TYPES)
152
#define p2m_is_paging(_t) (p2m_to_mask(_t) & P2M_PAGING_TYPES)
153
#define p2m_is_paged(_t) (p2m_to_mask(_t) & P2M_PAGED_TYPES)
154
#define p2m_is_sharable(_t) (p2m_to_mask(_t) & P2M_SHARABLE_TYPES)
155
#define p2m_is_shared(_t) (p2m_to_mask(_t) & P2M_SHARED_TYPES)
157
/* Populate-on-demand */
158
#define POPULATE_ON_DEMAND_MFN (1<<9)
159
#define POD_PAGE_ORDER 9
161
#define PAGING_MFN INVALID_MFN
164
/* Lock that protects updates to the p2m */
166
int locker; /* processor which holds the lock */
167
const char *locker_function; /* Func that took it */
169
/* Pages used to construct the p2m */
170
struct page_list_head pages;
172
/* Functions to call to get or free pages for the p2m */
173
struct page_info * (*alloc_page )(struct domain *d);
174
void (*free_page )(struct domain *d,
175
struct page_info *pg);
176
int (*set_entry )(struct domain *d, unsigned long gfn,
177
mfn_t mfn, unsigned int page_order,
179
mfn_t (*get_entry )(struct domain *d, unsigned long gfn,
182
mfn_t (*get_entry_current)(unsigned long gfn,
185
void (*change_entry_type_global)(struct domain *d,
189
/* Highest guest frame that's ever been mapped in the p2m */
190
unsigned long max_mapped_pfn;
192
/* Populate-on-demand variables
193
* NB on locking. {super,single,count} are
194
* covered by d->page_alloc_lock, since they're almost always used in
195
* conjunction with that functionality. {entry_count} is covered by
196
* the domain p2m lock, since it's almost always used in conjunction
197
* with changing the p2m tables.
199
* At this point, both locks are held in two places. In both,
200
* the order is [p2m,page_alloc]:
201
* + p2m_pod_decrease_reservation() calls p2m_pod_cache_add(),
202
* which grabs page_alloc
203
* + p2m_pod_demand_populate() grabs both; the p2m lock to avoid
204
* double-demand-populating of pages, the page_alloc lock to
205
* protect moving stuff from the PoD cache to the domain page list.
208
struct page_list_head super, /* List of superpages */
209
single; /* Non-super lists */
210
int count, /* # of pages in cache lists */
211
entry_count; /* # of pages in p2m marked pod */
212
unsigned reclaim_super; /* Last gpfn of a scan */
213
unsigned reclaim_single; /* Last gpfn of a scan */
214
unsigned max_guest; /* gpfn of max guest demand-populate */
219
* The P2M lock. This protects all updates to the p2m table.
220
* Updates are expected to be safe against concurrent reads,
221
* which do *not* require the lock.
223
* Locking discipline: always acquire this lock before the shadow or HAP one
226
#define p2m_lock_init(_p2m) \
228
spin_lock_init(&(_p2m)->lock); \
229
(_p2m)->locker = -1; \
230
(_p2m)->locker_function = "nobody"; \
233
#define p2m_lock(_p2m) \
235
if ( unlikely((_p2m)->locker == current->processor) ) \
237
printk("Error: p2m lock held by %s\n", \
238
(_p2m)->locker_function); \
241
spin_lock(&(_p2m)->lock); \
242
ASSERT((_p2m)->locker == -1); \
243
(_p2m)->locker = current->processor; \
244
(_p2m)->locker_function = __func__; \
247
#define p2m_unlock(_p2m) \
249
ASSERT((_p2m)->locker == current->processor); \
250
(_p2m)->locker = -1; \
251
(_p2m)->locker_function = "nobody"; \
252
spin_unlock(&(_p2m)->lock); \
255
#define p2m_locked_by_me(_p2m) \
256
(current->processor == (_p2m)->locker)
259
/* Extract the type from the PTE flags that store it */
260
static inline p2m_type_t p2m_flags_to_type(unsigned long flags)
262
/* Type is stored in the "available" bits */
264
return (flags >> 9) & 0x3fff;
266
return (flags >> 9) & 0x7;
270
/* Read the current domain's p2m table. Do not populate PoD pages. */
271
static inline mfn_t gfn_to_mfn_type_current(unsigned long gfn, p2m_type_t *t,
274
return current->domain->arch.p2m->get_entry_current(gfn, t, q);
277
/* Read another domain's P2M table, mapping pages as we go.
278
* Do not populate PoD pages. */
280
mfn_t gfn_to_mfn_type_foreign(struct domain *d, unsigned long gfn, p2m_type_t *t,
283
return d->arch.p2m->get_entry(d, gfn, t, q);
286
/* General conversion function from gfn to mfn */
287
static inline mfn_t _gfn_to_mfn_type(struct domain *d,
288
unsigned long gfn, p2m_type_t *t,
291
if ( !paging_mode_translate(d) )
293
/* Not necessarily true, but for non-translated guests, we claim
294
* it's the most generic kind of memory */
298
if ( likely(current->domain == d) )
299
return gfn_to_mfn_type_current(gfn, t, q);
301
return gfn_to_mfn_type_foreign(d, gfn, t, q);
304
#define gfn_to_mfn(d, g, t) _gfn_to_mfn_type((d), (g), (t), p2m_alloc)
305
#define gfn_to_mfn_query(d, g, t) _gfn_to_mfn_type((d), (g), (t), p2m_query)
306
#define gfn_to_mfn_guest(d, g, t) _gfn_to_mfn_type((d), (g), (t), p2m_guest)
308
#define gfn_to_mfn_current(g, t) gfn_to_mfn_type_current((g), (t), p2m_alloc)
309
#define gfn_to_mfn_foreign(d, g, t) gfn_to_mfn_type_foreign((d), (g), (t), p2m_alloc)
311
static inline mfn_t gfn_to_mfn_unshare(struct domain *d,
319
mfn = gfn_to_mfn(d, gfn, p2mt);
320
if(p2m_is_shared(*p2mt))
322
ret = mem_sharing_unshare_page(d, gfn,
323
must_succeed ? MEM_SHARING_MUST_SUCCEED : 0);
326
BUG_ON(must_succeed);
329
mfn = gfn_to_mfn(d, gfn, p2mt);
336
/* Compatibility function exporting the old untyped interface */
337
static inline unsigned long gmfn_to_mfn(struct domain *d, unsigned long gpfn)
341
mfn = gfn_to_mfn(d, gpfn, &t);
342
if ( p2m_is_valid(t) )
347
/* General conversion function from mfn to gfn */
348
static inline unsigned long mfn_to_gfn(struct domain *d, mfn_t mfn)
350
if ( paging_mode_translate(d) )
351
return get_gpfn_from_mfn(mfn_x(mfn));
356
/* Init the datastructures for later use by the p2m code */
357
int p2m_init(struct domain *d);
359
/* Allocate a new p2m table for a domain.
361
* The alloc_page and free_page functions will be used to get memory to
362
* build the p2m, and to release it again at the end of day.
364
* Returns 0 for success or -errno. */
365
int p2m_alloc_table(struct domain *d,
366
struct page_info * (*alloc_page)(struct domain *d),
367
void (*free_page)(struct domain *d, struct page_info *pg));
369
/* Return all the p2m resources to Xen. */
370
void p2m_teardown(struct domain *d);
371
void p2m_final_teardown(struct domain *d);
373
/* Dump PoD information about the domain */
374
void p2m_pod_dump_data(struct domain *d);
376
/* Move all pages from the populate-on-demand cache to the domain page_list
377
* (usually in preparation for domain destruction) */
378
void p2m_pod_empty_cache(struct domain *d);
380
/* Set populate-on-demand cache size so that the total memory allocated to a
381
* domain matches target */
382
int p2m_pod_set_mem_target(struct domain *d, unsigned long target);
384
/* Call when decreasing memory reservation to handle PoD entries properly.
385
* Will return '1' if all entries were handled and nothing more need be done.*/
387
p2m_pod_decrease_reservation(struct domain *d,
391
/* Called by p2m code when demand-populating a PoD page */
393
p2m_pod_demand_populate(struct domain *d, unsigned long gfn,
397
/* Add a page to a domain's p2m table */
398
int guest_physmap_add_entry(struct domain *d, unsigned long gfn,
399
unsigned long mfn, unsigned int page_order,
402
/* Set a p2m range as populate-on-demand */
403
int guest_physmap_mark_populate_on_demand(struct domain *d, unsigned long gfn,
406
/* Untyped version for RAM only, for compatibility
408
* Return 0 for success
410
static inline int guest_physmap_add_page(struct domain *d, unsigned long gfn,
412
unsigned int page_order)
414
return guest_physmap_add_entry(d, gfn, mfn, page_order, p2m_ram_rw);
417
/* Remove a page from a domain's p2m table */
418
void guest_physmap_remove_page(struct domain *d, unsigned long gfn,
419
unsigned long mfn, unsigned int page_order);
421
/* Change types across all p2m entries in a domain */
422
void p2m_change_type_global(struct domain *d, p2m_type_t ot, p2m_type_t nt);
423
void p2m_change_entry_type_global(struct domain *d, p2m_type_t ot, p2m_type_t nt);
425
/* Compare-exchange the type of a single p2m entry */
426
p2m_type_t p2m_change_type(struct domain *d, unsigned long gfn,
427
p2m_type_t ot, p2m_type_t nt);
429
/* Set mmio addresses in the p2m table (for pass-through) */
430
int set_mmio_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn);
431
int clear_mmio_p2m_entry(struct domain *d, unsigned long gfn);
432
/* Modify p2m table for shared gfn */
434
set_shared_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn);
436
/* Check if a nominated gfn is valid to be paged out */
437
int p2m_mem_paging_nominate(struct domain *d, unsigned long gfn);
439
int p2m_mem_paging_evict(struct domain *d, unsigned long gfn);
440
/* Start populating a paged out frame */
441
void p2m_mem_paging_populate(struct domain *d, unsigned long gfn);
442
/* Prepare the p2m for paging a frame in */
443
int p2m_mem_paging_prep(struct domain *d, unsigned long gfn);
444
/* Resume normal operation (in case a domain was paused) */
445
void p2m_mem_paging_resume(struct domain *d);
447
#endif /* _XEN_P2M_H */
454
* indent-tabs-mode: nil