2
#ifndef __ASM_X86_MM_H__
3
#define __ASM_X86_MM_H__
5
#include <xen/config.h>
7
#include <xen/spinlock.h>
9
#include <asm/uaccess.h>
12
* Per-page-frame information.
14
* Every architecture must ensure the following:
15
* 1. 'struct page_info' contains a 'struct page_list_entry list'.
16
* 2. Provide a PFN_ORDER() macro for accessing the order of a free page.
18
#define PFN_ORDER(_pfn) ((_pfn)->v.free.order)
21
* This definition is solely for the use in struct page_info (and
22
* struct page_list_head), intended to allow easy adjustment once x86-64
23
* wants to support more than 16TB.
24
* 'unsigned long' should be used for MFNs everywhere else.
26
#define __pdx_t unsigned int
28
#undef page_list_entry
29
struct page_list_entry
34
struct page_sharing_info;
39
/* Each frame can be threaded onto a doubly-linked list.
41
* For unused shadow pages, a list of free shadow pages;
42
* for multi-page shadows, links to the other pages in this shadow;
43
* for pinnable shadows, if pinned, a list of all pinned shadows
44
* (see sh_type_is_pinnable() for the definition of "pinnable"
45
* shadow types). N.B. a shadow may be both pinnable and multi-page.
46
* In that case the pages are inserted in order in the list of
47
* pinned shadows and walkers of that list must be prepared
48
* to keep them all together during updates.
50
struct page_list_entry list;
51
/* For non-pinnable single-page shadows, a higher entry that points
54
/* For shared/sharable pages, we use a doubly-linked list
55
* of all the {pfn,domain} pairs that map this page. We also include
56
* an opaque handle, which is effectively a version, so that clients
57
* of sharing share the version they expect to.
58
* This list is allocated and freed when a page is shared/unshared.
60
struct page_sharing_info *sharing;
63
/* Reference count and various PGC_xxx flags and fields. */
64
unsigned long count_info;
66
/* Context-dependent fields follow... */
69
/* Page is in use: ((count_info & PGC_count_mask) != 0). */
71
/* Type reference count and various PGT_xxx flags and fields. */
72
unsigned long type_info;
75
/* Page is in use as a shadow: count_info == 0. */
77
unsigned long type:5; /* What kind of shadow is this? */
78
unsigned long pinned:1; /* Is the shadow pinned? */
79
unsigned long head:1; /* Is this the first page of the shadow? */
80
unsigned long count:25; /* Reference count */
83
/* Page is on a free list: ((count_info & PGC_count_mask) == 0). */
85
/* Do TLBs need flushing for safety before next page use? */
93
/* Page is in use, but not as a shadow. */
95
/* Owner of this page (zero if page is anonymous). */
99
/* Page is in use as a shadow. */
101
/* GMFN of guest page we're a shadow of. */
105
/* Page is on a free list. */
107
/* Order-size of the free chunk this page is the head of. */
115
* Timestamp from 'TLB clock', used to avoid extra safety flushes.
116
* Only valid for: a) free pages, and b) pages with zero type count
117
* (except page table pages when the guest is in shadow mode).
119
u32 tlbflush_timestamp;
122
* When PGT_partial is true then this field is valid and indicates
123
* that PTEs in the range [0, @nr_validated_ptes) have been validated.
124
* An extra page reference must be acquired (or not dropped) whenever
125
* PGT_partial gets set, and it must be dropped when the flag gets
126
* cleared. This is so that a get() leaving a page in partially
127
* validated state (where the caller would drop the reference acquired
128
* due to the getting of the type [apparently] failing [-EAGAIN])
129
* would not accidentally result in a page left with zero general
130
* reference count, but non-zero type reference count (possible when
131
* the partial get() is followed immediately by domain destruction).
132
* Likewise, the ownership of the single type reference for partially
133
* (in-)validated pages is tied to this flag, i.e. the instance
134
* setting the flag must not drop that reference, whereas the instance
135
* clearing it will have to.
137
* If @partial_pte is positive then PTE at @nr_validated_ptes+1 has
138
* been partially validated. This implies that the general reference
139
* to the page (acquired from get_page_from_lNe()) would be dropped
140
* (again due to the apparent failure) and hence must be re-acquired
141
* when resuming the validation, but must not be dropped when picking
142
* up the page for invalidation.
144
* If @partial_pte is negative then PTE at @nr_validated_ptes+1 has
145
* been partially invalidated. This is basically the opposite case of
146
* above, i.e. the general reference to the page was not dropped in
147
* put_page_from_lNe() (due to the apparent failure), and hence it
148
* must be dropped when the put operation is resumed (and completes),
149
* but it must not be acquired if picking up the page for validation.
152
u16 nr_validated_ptes;
157
* Guest pages with a shadow. This does not conflict with
158
* tlbflush_timestamp since page table pages are explicitly not
159
* tracked for TLB-flush avoidance when a guest runs in shadow mode.
163
/* When in use as a shadow, next shadow in this hash chain. */
170
#define PG_shift(idx) (BITS_PER_LONG - (idx))
171
#define PG_mask(x, idx) (x ## UL << PG_shift(idx))
173
/* The following page types are MUTUALLY EXCLUSIVE. */
174
#define PGT_none PG_mask(0, 4) /* no special uses of this page */
175
#define PGT_l1_page_table PG_mask(1, 4) /* using as an L1 page table? */
176
#define PGT_l2_page_table PG_mask(2, 4) /* using as an L2 page table? */
177
#define PGT_l3_page_table PG_mask(3, 4) /* using as an L3 page table? */
178
#define PGT_l4_page_table PG_mask(4, 4) /* using as an L4 page table? */
179
#define PGT_seg_desc_page PG_mask(5, 4) /* using this page in a GDT/LDT? */
180
#define PGT_writable_page PG_mask(7, 4) /* has writable mappings? */
181
#define PGT_shared_page PG_mask(8, 4) /* CoW sharable page */
182
#define PGT_type_mask PG_mask(15, 4) /* Bits 28-31 or 60-63. */
184
/* Owning guest has pinned this page to its current type? */
185
#define _PGT_pinned PG_shift(5)
186
#define PGT_pinned PG_mask(1, 5)
187
/* Has this page been validated for use as its current type? */
188
#define _PGT_validated PG_shift(6)
189
#define PGT_validated PG_mask(1, 6)
190
/* PAE only: is this an L2 page directory containing Xen-private mappings? */
191
#define _PGT_pae_xen_l2 PG_shift(7)
192
#define PGT_pae_xen_l2 PG_mask(1, 7)
193
/* Has this page been *partially* validated for use as its current type? */
194
#define _PGT_partial PG_shift(8)
195
#define PGT_partial PG_mask(1, 8)
196
/* Page is locked? */
197
#define _PGT_locked PG_shift(9)
198
#define PGT_locked PG_mask(1, 9)
200
/* Count of uses of this frame as its current type. */
201
#define PGT_count_width PG_shift(9)
202
#define PGT_count_mask ((1UL<<PGT_count_width)-1)
204
/* Cleared when the owning guest 'frees' this page. */
205
#define _PGC_allocated PG_shift(1)
206
#define PGC_allocated PG_mask(1, 1)
207
/* Page is Xen heap? */
208
#define _PGC_xen_heap PG_shift(2)
209
#define PGC_xen_heap PG_mask(1, 2)
210
/* Set when is using a page as a page table */
211
#define _PGC_page_table PG_shift(3)
212
#define PGC_page_table PG_mask(1, 3)
213
/* 3-bit PAT/PCD/PWT cache-attribute hint. */
214
#define PGC_cacheattr_base PG_shift(6)
215
#define PGC_cacheattr_mask PG_mask(7, 6)
216
/* Page is broken? */
217
#define _PGC_broken PG_shift(7)
218
#define PGC_broken PG_mask(1, 7)
219
/* Mutually-exclusive page states: { inuse, offlining, offlined, free }. */
220
#define PGC_state PG_mask(3, 9)
221
#define PGC_state_inuse PG_mask(0, 9)
222
#define PGC_state_offlining PG_mask(1, 9)
223
#define PGC_state_offlined PG_mask(2, 9)
224
#define PGC_state_free PG_mask(3, 9)
225
#define page_state_is(pg, st) (((pg)->count_info&PGC_state) == PGC_state_##st)
227
/* Count of references to this frame. */
228
#define PGC_count_width PG_shift(9)
229
#define PGC_count_mask ((1UL<<PGC_count_width)-1)
234
unsigned long type_info;
237
/* The following page types are MUTUALLY EXCLUSIVE. */
238
#define SGT_none PG_mask(0, 2) /* superpage not in use */
239
#define SGT_mark PG_mask(1, 2) /* Marked as a superpage */
240
#define SGT_dynamic PG_mask(2, 2) /* has been dynamically mapped as a superpage */
241
#define SGT_type_mask PG_mask(3, 2) /* Bits 30-31 or 62-63. */
243
/* Count of uses of this superpage as its current type. */
244
#define SGT_count_width PG_shift(3)
245
#define SGT_count_mask ((1UL<<SGT_count_width)-1)
248
#if defined(__i386__)
249
#define is_xen_heap_page(page) is_xen_heap_mfn(page_to_mfn(page))
250
#define is_xen_heap_mfn(mfn) ({ \
251
unsigned long _mfn = (mfn); \
252
(_mfn < paddr_to_pfn(xenheap_phys_end)); \
254
#define is_xen_fixed_mfn(mfn) is_xen_heap_mfn(mfn)
256
#define is_xen_heap_page(page) ((page)->count_info & PGC_xen_heap)
257
#define is_xen_heap_mfn(mfn) \
258
(__mfn_valid(mfn) && is_xen_heap_page(__mfn_to_page(mfn)))
259
#define is_xen_fixed_mfn(mfn) \
260
((((mfn) << PAGE_SHIFT) >= __pa(&_start)) && \
261
(((mfn) << PAGE_SHIFT) <= __pa(&_end)))
264
#if defined(__i386__)
265
#define PRtype_info "08lx" /* should only be used for printk's */
266
#elif defined(__x86_64__)
267
#define PRtype_info "016lx"/* should only be used for printk's */
270
/* The number of out-of-sync shadows we allow per vcpu (prime, please) */
271
#define SHADOW_OOS_PAGES 3
273
/* OOS fixup entries */
274
#define SHADOW_OOS_FIXUPS 2
276
#define page_get_owner(_p) \
277
((struct domain *)((_p)->v.inuse._domain ? \
278
pdx_to_virt((_p)->v.inuse._domain) : NULL))
279
#define page_set_owner(_p,_d) \
280
((_p)->v.inuse._domain = (_d) ? virt_to_pdx(_d) : 0)
282
#define maddr_get_owner(ma) (page_get_owner(maddr_to_page((ma))))
283
#define vaddr_get_owner(va) (page_get_owner(virt_to_page((va))))
285
#define XENSHARE_writable 0
286
#define XENSHARE_readonly 1
287
extern void share_xen_page_with_guest(
288
struct page_info *page, struct domain *d, int readonly);
289
extern void share_xen_page_with_privileged_guests(
290
struct page_info *page, int readonly);
292
#define frame_table ((struct page_info *)FRAMETABLE_VIRT_START)
294
#define spage_table ((struct spage_info *)SPAGETABLE_VIRT_START)
295
int get_superpage(unsigned long mfn, struct domain *d);
297
extern unsigned long max_page;
298
extern unsigned long total_pages;
299
void init_frametable(void);
301
#define PDX_GROUP_COUNT ((1 << L2_PAGETABLE_SHIFT) / \
302
(sizeof(*frame_table) & -sizeof(*frame_table)))
303
extern unsigned long pdx_group_valid[];
305
/* Convert between Xen-heap virtual addresses and page-info structures. */
306
static inline struct page_info *__virt_to_page(const void *v)
308
unsigned long va = (unsigned long)v;
311
ASSERT(va >= XEN_VIRT_START);
312
ASSERT(va < DIRECTMAP_VIRT_END);
313
if ( va < XEN_VIRT_END )
314
va += DIRECTMAP_VIRT_START - XEN_VIRT_START + xen_phys_start;
316
ASSERT(va >= DIRECTMAP_VIRT_START);
318
ASSERT(va - DIRECTMAP_VIRT_START < DIRECTMAP_VIRT_END);
320
return frame_table + ((va - DIRECTMAP_VIRT_START) >> PAGE_SHIFT);
323
static inline void *__page_to_virt(const struct page_info *pg)
325
ASSERT((unsigned long)pg - FRAMETABLE_VIRT_START < FRAMETABLE_VIRT_END);
327
* (sizeof(*pg) & -sizeof(*pg)) selects the LS bit of sizeof(*pg). The
328
* division and re-multiplication avoids one shift when sizeof(*pg) is a
329
* power of two (otherwise there would be a right shift followed by a
330
* left shift, which the compiler can't know it can fold into one).
332
return (void *)(DIRECTMAP_VIRT_START +
333
((unsigned long)pg - FRAMETABLE_VIRT_START) /
334
(sizeof(*pg) / (sizeof(*pg) & -sizeof(*pg))) *
335
(PAGE_SIZE / (sizeof(*pg) & -sizeof(*pg))));
338
int free_page_type(struct page_info *page, unsigned long type,
341
int is_iomem_page(unsigned long mfn);
343
void clear_superpage_mark(struct page_info *page);
346
* page_lock() is used for two purposes: pte serialization, and memory sharing.
348
* All users of page lock for pte serialization live in mm.c, use it
349
* to lock a page table page during pte updates, do not take other locks within
350
* the critical section delimited by page_lock/unlock, and perform no
353
* All users of page lock for memory sharing live in mm/mem_sharing.c. Page_lock
354
* is used in memory sharing to protect addition (share) and removal (unshare)
355
* of (gfn,domain) tupples to a list of gfn's that the shared page is currently
356
* backing. Nesting may happen when sharing (and locking) two pages -- deadlock
357
* is avoided by locking pages in increasing order.
358
* All memory sharing code paths take the p2m lock of the affected gfn before
359
* taking the lock for the underlying page. We enforce ordering between page_lock
360
* and p2m_lock using an mm-locks.h construct.
362
* These two users (pte serialization and memory sharing) do not collide, since
363
* sharing is only supported for hvm guests, which do not perform pv pte updates.
366
int page_lock(struct page_info *page);
367
void page_unlock(struct page_info *page);
369
struct domain *page_get_owner_and_reference(struct page_info *page);
370
void put_page(struct page_info *page);
371
int get_page(struct page_info *page, struct domain *domain);
372
void put_page_type(struct page_info *page);
373
int get_page_type(struct page_info *page, unsigned long type);
374
int put_page_type_preemptible(struct page_info *page);
375
int get_page_type_preemptible(struct page_info *page, unsigned long type);
376
int get_page_from_l1e(
377
l1_pgentry_t l1e, struct domain *l1e_owner, struct domain *pg_owner);
378
void put_page_from_l1e(l1_pgentry_t l1e, struct domain *l1e_owner);
380
static inline void put_page_and_type(struct page_info *page)
386
static inline int put_page_and_type_preemptible(struct page_info *page,
392
rc = put_page_type_preemptible(page);
395
if ( likely(rc == 0) )
400
static inline int get_page_and_type(struct page_info *page,
401
struct domain *domain,
404
int rc = get_page(page, domain);
406
if ( likely(rc) && unlikely(!get_page_type(page, type)) )
415
#define ASSERT_PAGE_IS_TYPE(_p, _t) \
416
ASSERT(((_p)->u.inuse.type_info & PGT_type_mask) == (_t)); \
417
ASSERT(((_p)->u.inuse.type_info & PGT_count_mask) != 0)
418
#define ASSERT_PAGE_IS_DOMAIN(_p, _d) \
419
ASSERT(((_p)->count_info & PGC_count_mask) != 0); \
420
ASSERT(page_get_owner(_p) == (_d))
422
// Quick test for whether a given page can be represented directly in CR3.
424
#if CONFIG_PAGING_LEVELS == 3
425
#define MFN_FITS_IN_CR3(_MFN) !(mfn_x(_MFN) >> 20)
427
/* returns a lowmem machine address of the copied L3 root table */
429
pae_copy_root(struct vcpu *v, l3_pgentry_t *l3tab);
430
#endif /* CONFIG_PAGING_LEVELS == 3 */
432
int check_descriptor(const struct domain *, struct desc_struct *d);
434
extern bool_t opt_allow_superpage;
435
extern bool_t mem_hotplug;
437
/******************************************************************************
438
* With shadow pagetables, the different kinds of address start
439
* to get get confusing.
441
* Virtual addresses are what they usually are: the addresses that are used
442
* to accessing memory while the guest is running. The MMU translates from
443
* virtual addresses to machine addresses.
445
* (Pseudo-)physical addresses are the abstraction of physical memory the
446
* guest uses for allocation and so forth. For the purposes of this code,
447
* we can largely ignore them.
449
* Guest frame numbers (gfns) are the entries that the guest puts in its
450
* pagetables. For normal paravirtual guests, they are actual frame numbers,
451
* with the translation done by the guest.
453
* Machine frame numbers (mfns) are the entries that the hypervisor puts
454
* in the shadow page tables.
456
* Elsewhere in the xen code base, the name "gmfn" is generally used to refer
457
* to a "machine frame number, from the guest's perspective", or in other
458
* words, pseudo-physical frame numbers. However, in the shadow code, the
459
* term "gmfn" means "the mfn of a guest page"; this combines naturally with
460
* other terms such as "smfn" (the mfn of a shadow page), gl2mfn (the mfn of a
461
* guest L2 page), etc...
464
/* With this defined, we do some ugly things to force the compiler to
465
* give us type safety between mfns and gfns and other integers.
466
* TYPE_SAFE(int foo) defines a foo_t, and _foo() and foo_x() functions
467
* that translate beween int and foo_t.
469
* It does have some performance cost because the types now have
470
* a different storage attribute, so may not want it on all the time. */
473
#define TYPE_SAFETY 1
477
#define TYPE_SAFE(_type,_name) \
478
typedef struct { _type _name; } _name##_t; \
479
static inline _name##_t _##_name(_type n) { return (_name##_t) { n }; } \
480
static inline _type _name##_x(_name##_t n) { return n._name; }
482
#define TYPE_SAFE(_type,_name) \
483
typedef _type _name##_t; \
484
static inline _name##_t _##_name(_type n) { return n; } \
485
static inline _type _name##_x(_name##_t n) { return n; }
488
TYPE_SAFE(unsigned long,mfn);
490
/* Macro for printk formats: use as printk("%"PRI_mfn"\n", mfn_x(foo)); */
491
#define PRI_mfn "05lx"
495
* The MPT (machine->physical mapping table) is an array of word-sized
496
* values, indexed on machine frame number. It is expected that guest OSes
497
* will use it to store a "physical" frame number to give the appearance of
498
* contiguous (or near contiguous) physical memory.
500
#undef machine_to_phys_mapping
501
#define machine_to_phys_mapping ((unsigned long *)RDWR_MPT_VIRT_START)
502
#define INVALID_M2P_ENTRY (~0UL)
503
#define VALID_M2P(_e) (!((_e) & (1UL<<(BITS_PER_LONG-1))))
504
#define SHARED_M2P_ENTRY (~0UL - 1UL)
505
#define SHARED_M2P(_e) ((_e) == SHARED_M2P_ENTRY)
508
#define compat_machine_to_phys_mapping ((unsigned int *)RDWR_COMPAT_MPT_VIRT_START)
509
#define _set_gpfn_from_mfn(mfn, pfn) ({ \
510
struct domain *d = page_get_owner(__mfn_to_page(mfn)); \
511
unsigned long entry = (d && (d == dom_cow)) ? \
512
SHARED_M2P_ENTRY : (pfn); \
513
((void)((mfn) >= (RDWR_COMPAT_MPT_VIRT_END - RDWR_COMPAT_MPT_VIRT_START) / 4 || \
514
(compat_machine_to_phys_mapping[(mfn)] = (unsigned int)(entry))), \
515
machine_to_phys_mapping[(mfn)] = (entry)); \
518
#define _set_gpfn_from_mfn(mfn, pfn) ({ \
519
struct domain *d = page_get_owner(__mfn_to_page(mfn)); \
520
if(d && (d == dom_cow)) \
521
machine_to_phys_mapping[(mfn)] = SHARED_M2P_ENTRY; \
523
machine_to_phys_mapping[(mfn)] = (pfn); \
528
* Disable some users of set_gpfn_from_mfn() (e.g., free_heap_pages()) until
529
* the machine_to_phys_mapping is actually set up.
531
extern bool_t machine_to_phys_mapping_valid;
532
#define set_gpfn_from_mfn(mfn, pfn) do { \
533
if ( machine_to_phys_mapping_valid ) \
534
_set_gpfn_from_mfn(mfn, pfn); \
537
#define get_gpfn_from_mfn(mfn) (machine_to_phys_mapping[(mfn)])
539
#define mfn_to_gmfn(_d, mfn) \
540
( (paging_mode_translate(_d)) \
541
? get_gpfn_from_mfn(mfn) \
544
#define INVALID_MFN (~0UL)
546
#define compat_pfn_to_cr3(pfn) (((unsigned)(pfn) << 12) | ((unsigned)(pfn) >> 20))
547
#define compat_cr3_to_pfn(cr3) (((unsigned)(cr3) >> 12) | ((unsigned)(cr3) << 20))
550
void memguard_init(void);
551
void memguard_guard_range(void *p, unsigned long l);
552
void memguard_unguard_range(void *p, unsigned long l);
554
#define memguard_init() ((void)0)
555
#define memguard_guard_range(_p,_l) ((void)0)
556
#define memguard_unguard_range(_p,_l) ((void)0)
559
void memguard_guard_stack(void *p);
560
void memguard_unguard_stack(void *p);
562
int ptwr_do_page_fault(struct vcpu *, unsigned long,
563
struct cpu_user_regs *);
564
int mmio_ro_do_page_fault(struct vcpu *, unsigned long,
565
struct cpu_user_regs *);
567
int audit_adjust_pgtables(struct domain *d, int dir, int noisy);
570
extern int pagefault_by_memadd(unsigned long addr, struct cpu_user_regs *regs);
571
extern int handle_memadd_fault(unsigned long addr, struct cpu_user_regs *regs);
573
static inline int pagefault_by_memadd(unsigned long addr,
574
struct cpu_user_regs *regs)
579
static inline int handle_memadd_fault(unsigned long addr,
580
struct cpu_user_regs *regs)
588
#define AUDIT_SHADOW_ALREADY_LOCKED ( 1u << 0 )
589
#define AUDIT_ERRORS_OK ( 1u << 1 )
590
#define AUDIT_QUIET ( 1u << 2 )
592
void _audit_domain(struct domain *d, int flags);
593
#define audit_domain(_d) _audit_domain((_d), AUDIT_ERRORS_OK)
594
void audit_domains(void);
598
#define _audit_domain(_d, _f) ((void)0)
599
#define audit_domain(_d) ((void)0)
600
#define audit_domains() ((void)0)
604
int new_guest_cr3(unsigned long pfn);
605
void make_cr3(struct vcpu *v, unsigned long mfn);
606
void update_cr3(struct vcpu *v);
607
void propagate_page_fault(unsigned long addr, u16 error_code);
608
void *do_page_walk(struct vcpu *v, unsigned long addr);
610
int __sync_local_execstate(void);
612
/* Arch-specific portion of memory_op hypercall. */
613
long arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg);
614
long subarch_memory_op(int op, XEN_GUEST_HANDLE(void) arg);
615
int compat_arch_memory_op(int op, XEN_GUEST_HANDLE(void));
616
int compat_subarch_memory_op(int op, XEN_GUEST_HANDLE(void));
619
struct domain *d, struct page_info *page, unsigned int memflags);
621
struct domain *d, struct page_info *page, unsigned int memflags);
623
int map_ldt_shadow_page(unsigned int);
626
extern int memory_add(unsigned long spfn, unsigned long epfn, unsigned int pxm);
628
static inline int memory_add(uint64_t spfn, uint64_t epfn, uint32_t pxm)
635
void domain_set_alloc_bitsize(struct domain *d);
636
unsigned int domain_clamp_alloc_bitsize(struct domain *d, unsigned int bits);
638
# define domain_set_alloc_bitsize(d) ((void)0)
639
# define domain_clamp_alloc_bitsize(d, b) (b)
642
unsigned long domain_get_maximum_gpfn(struct domain *d);
645
void mem_event_cleanup(struct domain *d);
647
static inline void mem_event_cleanup(struct domain *d) {}
650
extern struct domain *dom_xen, *dom_io, *dom_cow; /* for vmcoreinfo */
652
/* Definition of an mm lock: spinlock with extra fields for debugging */
653
typedef struct mm_lock {
656
int locker; /* processor which holds the lock */
657
const char *locker_function; /* func that took it */
660
typedef struct mm_rwlock {
664
int locker; /* CPU that holds the write lock */
665
const char *locker_function; /* func that took it */
668
#endif /* __ASM_X86_MM_H__ */