2
#ifndef __X86_64_PAGE_H__
3
#define __X86_64_PAGE_H__
5
#define L1_PAGETABLE_SHIFT 12
6
#define L2_PAGETABLE_SHIFT 21
7
#define L3_PAGETABLE_SHIFT 30
8
#define L4_PAGETABLE_SHIFT 39
9
#define PAGE_SHIFT L1_PAGETABLE_SHIFT
10
#define ROOT_PAGETABLE_SHIFT L4_PAGETABLE_SHIFT
12
#define PAGETABLE_ORDER 9
13
#define L1_PAGETABLE_ENTRIES (1<<PAGETABLE_ORDER)
14
#define L2_PAGETABLE_ENTRIES (1<<PAGETABLE_ORDER)
15
#define L3_PAGETABLE_ENTRIES (1<<PAGETABLE_ORDER)
16
#define L4_PAGETABLE_ENTRIES (1<<PAGETABLE_ORDER)
17
#define ROOT_PAGETABLE_ENTRIES L4_PAGETABLE_ENTRIES
19
#define __PAGE_OFFSET DIRECTMAP_VIRT_START
20
#define __XEN_VIRT_START XEN_VIRT_START
22
/* These are architectural limits. Current CPUs support only 40-bit phys. */
25
#define PADDR_MASK ((1UL << PADDR_BITS)-1)
26
#define VADDR_MASK ((1UL << VADDR_BITS)-1)
28
#define is_canonical_address(x) (((long)(x) >> 47) == ((long)(x) >> 63))
32
#include <xen/config.h>
33
#include <asm/types.h>
35
extern unsigned long max_pdx;
36
extern unsigned long pfn_pdx_bottom_mask, ma_va_bottom_mask;
37
extern unsigned int pfn_pdx_hole_shift;
38
extern unsigned long pfn_hole_mask;
39
extern unsigned long pfn_top_mask, ma_top_mask;
40
extern void pfn_pdx_hole_setup(unsigned long);
42
#define page_to_pdx(pg) ((pg) - frame_table)
43
#define pdx_to_page(pdx) (frame_table + (pdx))
45
* Note: These are solely for the use by page_{get,set}_owner(), and
46
* therefore don't need to handle the XEN_VIRT_{START,END} range.
48
#define virt_to_pdx(va) (((unsigned long)(va) - DIRECTMAP_VIRT_START) >> \
50
#define pdx_to_virt(pdx) ((void *)(DIRECTMAP_VIRT_START + \
51
((unsigned long)(pdx) << PAGE_SHIFT)))
53
extern int __mfn_valid(unsigned long mfn);
55
static inline unsigned long pfn_to_pdx(unsigned long pfn)
57
return (pfn & pfn_pdx_bottom_mask) |
58
((pfn & pfn_top_mask) >> pfn_pdx_hole_shift);
61
static inline unsigned long pdx_to_pfn(unsigned long pdx)
63
return (pdx & pfn_pdx_bottom_mask) |
64
((pdx << pfn_pdx_hole_shift) & pfn_top_mask);
67
static inline unsigned long __virt_to_maddr(unsigned long va)
69
ASSERT(va >= XEN_VIRT_START);
70
ASSERT(va < DIRECTMAP_VIRT_END);
71
if ( va >= DIRECTMAP_VIRT_START )
72
va -= DIRECTMAP_VIRT_START;
75
ASSERT(va < XEN_VIRT_END);
76
va += xen_phys_start - XEN_VIRT_START;
78
return (va & ma_va_bottom_mask) |
79
((va << pfn_pdx_hole_shift) & ma_top_mask);
82
static inline void *__maddr_to_virt(unsigned long ma)
84
ASSERT(ma < DIRECTMAP_VIRT_END - DIRECTMAP_VIRT_START);
85
return (void *)(DIRECTMAP_VIRT_START +
86
((ma & ma_va_bottom_mask) |
87
((ma & ma_top_mask) >> pfn_pdx_hole_shift)));
90
/* read access (should only be used for debug printk's) */
92
#define PRIpte "016lx"
94
typedef struct { intpte_t l1; } l1_pgentry_t;
95
typedef struct { intpte_t l2; } l2_pgentry_t;
96
typedef struct { intpte_t l3; } l3_pgentry_t;
97
typedef struct { intpte_t l4; } l4_pgentry_t;
98
typedef l4_pgentry_t root_pgentry_t;
100
#endif /* !__ASSEMBLY__ */
102
#define pte_read_atomic(ptep) (*(ptep))
103
#define pte_write_atomic(ptep, pte) (*(ptep) = (pte))
104
#define pte_write(ptep, pte) (*(ptep) = (pte))
106
/* Given a virtual address, get an entry offset into a linear page table. */
107
#define l1_linear_offset(_a) (((_a) & VADDR_MASK) >> L1_PAGETABLE_SHIFT)
108
#define l2_linear_offset(_a) (((_a) & VADDR_MASK) >> L2_PAGETABLE_SHIFT)
109
#define l3_linear_offset(_a) (((_a) & VADDR_MASK) >> L3_PAGETABLE_SHIFT)
110
#define l4_linear_offset(_a) (((_a) & VADDR_MASK) >> L4_PAGETABLE_SHIFT)
112
#define is_guest_l1_slot(_s) (1)
113
#define is_guest_l2_slot(_d, _t, _s) \
114
( !is_pv_32bit_domain(_d) || \
115
!((_t) & PGT_pae_xen_l2) || \
116
((_s) < COMPAT_L2_PAGETABLE_FIRST_XEN_SLOT(_d)) )
117
#define is_guest_l3_slot(_s) (1)
118
#define is_guest_l4_slot(_d, _s) \
119
( is_pv_32bit_domain(_d) \
121
: (((_s) < ROOT_PAGETABLE_FIRST_XEN_SLOT) || \
122
((_s) > ROOT_PAGETABLE_LAST_XEN_SLOT)))
124
#define root_get_pfn l4e_get_pfn
125
#define root_get_flags l4e_get_flags
126
#define root_get_intpte l4e_get_intpte
127
#define root_empty l4e_empty
128
#define root_from_paddr l4e_from_paddr
129
#define PGT_root_page_table PGT_l4_page_table
133
* 40-bit pfn = (pte[51:12])
134
* 24-bit flags = (pte[63:52],pte[11:0])
137
/* Extract flags into 24-bit integer, or turn 24-bit flags into a pte mask. */
138
#define get_pte_flags(x) (((int)((x) >> 40) & ~0xFFF) | ((int)(x) & 0xFFF))
139
#define put_pte_flags(x) (((intpte_t)((x) & ~0xFFF) << 40) | ((x) & 0xFFF))
141
/* Bit 23 of a 24-bit flag mask. This corresponds to bit 63 of a pte.*/
142
#define _PAGE_NX_BIT (1U<<23)
143
#define _PAGE_NX (cpu_has_nx ? _PAGE_NX_BIT : 0U)
145
/* Bit 22 of a 24-bit flag mask. This corresponds to bit 62 of a pte.*/
146
#define _PAGE_GNTTAB (1U<<22)
149
* Disallow unused flag bits plus PAT/PSE, PCD, PWT and GLOBAL.
150
* Permit the NX bit if the hardware supports it.
151
* Note that range [62:52] is available for software use on x86/64.
153
#define BASE_DISALLOW_MASK (0xFF800198U & ~_PAGE_NX)
155
#define L1_DISALLOW_MASK (BASE_DISALLOW_MASK | _PAGE_GNTTAB)
156
#define L2_DISALLOW_MASK (BASE_DISALLOW_MASK & ~_PAGE_PSE)
157
#define L3_DISALLOW_MASK (BASE_DISALLOW_MASK)
158
#define L4_DISALLOW_MASK (BASE_DISALLOW_MASK)
160
#define COMPAT_L3_DISALLOW_MASK 0xFFFFF198U
162
#define PAGE_HYPERVISOR (__PAGE_HYPERVISOR | _PAGE_GLOBAL)
163
#define PAGE_HYPERVISOR_NOCACHE (__PAGE_HYPERVISOR_NOCACHE | _PAGE_GLOBAL)
165
#define USER_MAPPINGS_ARE_GLOBAL
166
#ifdef USER_MAPPINGS_ARE_GLOBAL
168
* Bit 12 of a 24-bit flag mask. This corresponds to bit 52 of a pte.
169
* This is needed to distinguish between user and kernel PTEs since _PAGE_USER
170
* is asserted for both.
172
#define _PAGE_GUEST_KERNEL (1U<<12)
173
/* Global bit is allowed to be set on L1 PTEs. Intended for user mappings. */
174
#undef L1_DISALLOW_MASK
175
#define L1_DISALLOW_MASK ((BASE_DISALLOW_MASK | _PAGE_GNTTAB) & ~_PAGE_GLOBAL)
177
#define _PAGE_GUEST_KERNEL 0
180
#endif /* __X86_64_PAGE_H__ */
188
* indent-tabs-mode: nil