5
* Xen reserves a memory page of GDT entries.
6
* No guest GDT entries exist beyond the Xen reserved area.
8
#define NR_RESERVED_GDT_PAGES 1
9
#define NR_RESERVED_GDT_BYTES (NR_RESERVED_GDT_PAGES * PAGE_SIZE)
10
#define NR_RESERVED_GDT_ENTRIES (NR_RESERVED_GDT_BYTES / 8)
12
#define LAST_RESERVED_GDT_PAGE \
13
(FIRST_RESERVED_GDT_PAGE + NR_RESERVED_GDT_PAGES - 1)
14
#define LAST_RESERVED_GDT_BYTE \
15
(FIRST_RESERVED_GDT_BYTE + NR_RESERVED_GDT_BYTES - 1)
16
#define LAST_RESERVED_GDT_ENTRY \
17
(FIRST_RESERVED_GDT_ENTRY + NR_RESERVED_GDT_ENTRIES - 1)
19
#define LDT_ENTRY_SIZE 8
21
#if defined(__x86_64__)
23
#define FLAT_COMPAT_RING1_CS 0xe019 /* GDT index 259 */
24
#define FLAT_COMPAT_RING1_DS 0xe021 /* GDT index 260 */
25
#define FLAT_COMPAT_RING1_SS 0xe021 /* GDT index 260 */
26
#define FLAT_COMPAT_RING3_CS 0xe02b /* GDT index 261 */
27
#define FLAT_COMPAT_RING3_DS 0xe033 /* GDT index 262 */
28
#define FLAT_COMPAT_RING3_SS 0xe033 /* GDT index 262 */
30
#define FLAT_COMPAT_KERNEL_DS FLAT_COMPAT_RING1_DS
31
#define FLAT_COMPAT_KERNEL_CS FLAT_COMPAT_RING1_CS
32
#define FLAT_COMPAT_KERNEL_SS FLAT_COMPAT_RING1_SS
33
#define FLAT_COMPAT_USER_DS FLAT_COMPAT_RING3_DS
34
#define FLAT_COMPAT_USER_CS FLAT_COMPAT_RING3_CS
35
#define FLAT_COMPAT_USER_SS FLAT_COMPAT_RING3_SS
37
#define TSS_ENTRY (FIRST_RESERVED_GDT_ENTRY + 8)
38
#define LDT_ENTRY (TSS_ENTRY + 2)
39
#define PER_CPU_GDT_ENTRY (LDT_ENTRY + 2)
41
#elif defined(__i386__)
43
#define FLAT_COMPAT_KERNEL_CS FLAT_KERNEL_CS
44
#define FLAT_COMPAT_KERNEL_DS FLAT_KERNEL_DS
45
#define FLAT_COMPAT_KERNEL_SS FLAT_KERNEL_SS
46
#define FLAT_COMPAT_USER_CS FLAT_USER_CS
47
#define FLAT_COMPAT_USER_DS FLAT_USER_DS
48
#define FLAT_COMPAT_USER_SS FLAT_USER_SS
50
#define DOUBLEFAULT_TSS_ENTRY FIRST_RESERVED_GDT_ENTRY
52
#define TSS_ENTRY (FIRST_RESERVED_GDT_ENTRY + 8)
53
#define LDT_ENTRY (TSS_ENTRY + 1)
54
#define PER_CPU_GDT_ENTRY (LDT_ENTRY + 1)
60
#if defined(__x86_64__)
61
#define GUEST_KERNEL_RPL(d) (is_pv_32bit_domain(d) ? 1 : 3)
62
#elif defined(__i386__)
63
#define GUEST_KERNEL_RPL(d) ((void)(d), 1)
66
/* Fix up the RPL of a guest segment selector. */
67
#define __fixup_guest_selector(d, sel) \
69
uint16_t _rpl = GUEST_KERNEL_RPL(d); \
70
(sel) = (((sel) & 3) >= _rpl) ? (sel) : (((sel) & ~3) | _rpl); \
73
/* Stack selectors don't need fixing up if the kernel runs in ring 0. */
74
#ifdef CONFIG_X86_SUPERVISOR_MODE_KERNEL
75
#define fixup_guest_stack_selector(d, ss) ((void)0)
77
#define fixup_guest_stack_selector(d, ss) __fixup_guest_selector(d, ss)
81
* Code selectors are always fixed up. It allows the Xen exit stub to detect
82
* return to guest context, even when the guest kernel runs in ring 0.
84
#define fixup_guest_code_selector(d, cs) __fixup_guest_selector(d, cs)
87
* We need this function because enforcing the correct guest kernel RPL is
88
* unsufficient if the selector is poked into an interrupt, trap or call gate.
89
* The selector RPL is ignored when a gate is accessed. We must therefore make
90
* sure that the selector does not reference a Xen-private segment.
92
* Note that selectors used only by IRET do not need to be checked. If the
93
* descriptor DPL fiffers from CS RPL then we'll #GP.
95
* Stack and data selectors do not need to be checked. If DS, ES, FS, GS are
96
* DPL < CPL then they'll be cleared automatically. If SS RPL or DPL differs
97
* from CS RPL then we'll #GP.
99
#define guest_gate_selector_okay(d, sel) \
100
((((sel)>>3) < FIRST_RESERVED_GDT_ENTRY) || /* Guest seg? */ \
101
((sel) == (!is_pv_32on64_domain(d) ? \
102
FLAT_KERNEL_CS : /* Xen default seg? */ \
103
FLAT_COMPAT_KERNEL_CS)) || \
104
((sel) & 4)) /* LDT seg? */
106
#endif /* __ASSEMBLY__ */
108
/* These are bitmasks for the high 32 bits of a descriptor table entry. */
109
#define _SEGMENT_TYPE (15<< 8)
110
#define _SEGMENT_WR ( 1<< 9) /* Writeable (data) or Readable (code)
112
#define _SEGMENT_EC ( 1<<10) /* Expand-down or Conforming segment */
113
#define _SEGMENT_CODE ( 1<<11) /* Code (vs data) segment for non-system
115
#define _SEGMENT_S ( 1<<12) /* System descriptor (yes iff S==0) */
116
#define _SEGMENT_DPL ( 3<<13) /* Descriptor Privilege Level */
117
#define _SEGMENT_P ( 1<<15) /* Segment Present */
119
#define _SEGMENT_L ( 1<<21) /* 64-bit segment */
123
#define _SEGMENT_DB ( 1<<22) /* 16- or 32-bit segment */
124
#define _SEGMENT_G ( 1<<23) /* Granularity */
132
#if defined(__x86_64__)
138
#define _set_gate(gate_addr,type,dpl,addr) \
140
(gate_addr)->a = 0; \
141
wmb(); /* disable gate /then/ rewrite */ \
143
((unsigned long)(addr) >> 32); \
144
wmb(); /* rewrite /then/ enable gate */ \
146
(((unsigned long)(addr) & 0xFFFF0000UL) << 32) | \
147
((unsigned long)(dpl) << 45) | \
148
((unsigned long)(type) << 40) | \
149
((unsigned long)(addr) & 0xFFFFUL) | \
150
((unsigned long)__HYPERVISOR_CS64 << 16) | \
154
#define _set_tssldt_desc(desc,addr,limit,type) \
156
(desc)[0].b = (desc)[1].b = 0; \
157
wmb(); /* disable entry /then/ rewrite */ \
159
((u32)(addr) << 16) | ((u32)(limit) & 0xFFFF); \
160
(desc)[1].a = (u32)(((unsigned long)(addr)) >> 32); \
161
wmb(); /* rewrite /then/ enable entry */ \
163
((u32)(addr) & 0xFF000000U) | \
164
((u32)(type) << 8) | 0x8000U | \
165
(((u32)(addr) & 0x00FF0000U) >> 16); \
168
#elif defined(__i386__)
170
typedef struct desc_struct idt_entry_t;
172
#define _set_gate(gate_addr,type,dpl,addr) \
174
(gate_addr)->b = 0; \
175
wmb(); /* disable gate /then/ rewrite */ \
177
((unsigned long)(addr) & 0xFFFFUL) | \
178
((unsigned long)__HYPERVISOR_CS << 16); \
179
wmb(); /* rewrite /then/ enable gate */ \
181
((unsigned long)(addr) & 0xFFFF0000UL) | \
182
((unsigned long)(dpl) << 13) | \
183
((unsigned long)(type) << 8) | \
187
#define _set_tssldt_desc(desc,addr,limit,type) \
190
wmb(); /* disable entry /then/ rewrite */ \
192
((u32)(addr) << 16) | ((u32)(limit) & 0xFFFF); \
193
wmb(); /* rewrite /then/ enable entry */ \
195
((u32)(addr) & 0xFF000000U) | \
196
((u32)(type) << 8) | 0x8000U | \
197
(((u32)(addr) & 0x00FF0000U) >> 16); \
200
DECLARE_PER_CPU(struct tss_struct *, doublefault_tss);
205
unsigned short limit;
207
} __attribute__((__packed__)) ;
209
extern struct desc_struct boot_cpu_gdt_table[];
210
DECLARE_PER_CPU(struct desc_struct *, gdt_table);
212
extern struct desc_struct boot_cpu_compat_gdt_table[];
213
DECLARE_PER_CPU(struct desc_struct *, compat_gdt_table);
215
# define boot_cpu_compat_gdt_table boot_cpu_gdt_table
216
# define per_cpu__compat_gdt_table per_cpu__gdt_table
219
extern void set_intr_gate(unsigned int irq, void * addr);
220
extern void load_TR(void);
222
#endif /* !__ASSEMBLY__ */
224
#endif /* __ARCH_DESC_H */