2
* include/asm-i386/i387.h
4
* Copyright (C) 1994 Linus Torvalds
6
* Pentium III FXSR, SSE support
7
* General FPU state handling cleanups
8
* Gareth Hughes <gareth@valinux.com>, May 2000
11
#ifndef __ASM_I386_I387_H
12
#define __ASM_I386_I387_H
14
#include <xen/sched.h>
15
#include <asm/processor.h>
17
extern unsigned int xsave_cntxt_size;
18
extern u32 xfeature_low, xfeature_high;
20
extern void xsave_init(void);
21
extern void xsave_init_save_area(void *save_area);
23
#define XSTATE_FP (1 << 0)
24
#define XSTATE_SSE (1 << 1)
25
#define XSTATE_YMM (1 << 2)
26
#define XSTATE_FP_SSE (XSTATE_FP | XSTATE_SSE)
27
#define XCNTXT_MASK (XSTATE_FP | XSTATE_SSE | XSTATE_YMM)
28
#define XSTATE_YMM_OFFSET (512 + 64)
29
#define XSTATE_YMM_SIZE 256
33
struct { char x[512]; } fpu_sse; /* FPU/MMX, SSE */
38
} xsave_hdr; /* The 64-byte header */
40
struct { char x[XSTATE_YMM_SIZE]; } ymm; /* YMM */
41
char data[]; /* Future new states */
42
} __attribute__ ((packed, aligned (64)));
44
#define XCR_XFEATURE_ENABLED_MASK 0
47
#define REX_PREFIX "0x48, "
52
static inline void xsetbv(u32 index, u64 xfeature_mask)
54
u32 hi = xfeature_mask >> 32;
55
u32 lo = (u32)xfeature_mask;
57
asm volatile (".byte 0x0f,0x01,0xd1" :: "c" (index),
61
static inline void set_xcr0(u64 xfeature_mask)
63
xsetbv(XCR_XFEATURE_ENABLED_MASK, xfeature_mask);
66
static inline void xsave(struct vcpu *v)
68
u64 mask = v->arch.hvm_vcpu.xfeature_mask | XSTATE_FP_SSE;
69
u32 lo = mask, hi = mask >> 32;
70
struct xsave_struct *ptr;
72
ptr =(struct xsave_struct *)v->arch.hvm_vcpu.xsave_area;
74
asm volatile (".byte " REX_PREFIX "0x0f,0xae,0x27"
76
: "a" (lo), "d" (hi), "D"(ptr)
80
static inline void xrstor(struct vcpu *v)
82
u64 mask = v->arch.hvm_vcpu.xfeature_mask | XSTATE_FP_SSE;
83
u32 lo = mask, hi = mask >> 32;
84
struct xsave_struct *ptr;
86
ptr =(struct xsave_struct *)v->arch.hvm_vcpu.xsave_area;
88
asm volatile (".byte " REX_PREFIX "0x0f,0xae,0x2f"
90
: "m" (*ptr), "a" (lo), "d" (hi), "D"(ptr));
93
extern void init_fpu(void);
94
extern void save_init_fpu(struct vcpu *v);
95
extern void restore_fpu(struct vcpu *v);
97
#define unlazy_fpu(v) do { \
98
if ( (v)->fpu_dirtied ) \
102
#define load_mxcsr(val) do { \
103
unsigned long __mxcsr = ((unsigned long)(val) & 0xffbf); \
104
__asm__ __volatile__ ( "ldmxcsr %0" : : "m" (__mxcsr) ); \
107
static inline void setup_fpu(struct vcpu *v)
109
/* Avoid recursion. */
112
if ( !v->fpu_dirtied )
115
if ( cpu_has_xsave && is_hvm_vcpu(v) )
117
if ( !v->fpu_initialised )
118
v->fpu_initialised = 1;
120
set_xcr0(v->arch.hvm_vcpu.xfeature_mask | XSTATE_FP_SSE);
122
set_xcr0(v->arch.hvm_vcpu.xfeature_mask);
126
if ( v->fpu_initialised )
134
#endif /* __ASM_I386_I387_H */