2
* entry.S: SVM architecture-specific entry/exit handling.
3
* Copyright (c) 2005-2007, Advanced Micro Devices, Inc.
4
* Copyright (c) 2004, Intel Corporation.
5
* Copyright (c) 2008, Citrix Systems, Inc.
7
* This program is free software; you can redistribute it and/or modify it
8
* under the terms and conditions of the GNU General Public License,
9
* version 2, as published by the Free Software Foundation.
11
* This program is distributed in the hope it will be useful, but WITHOUT
12
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16
* You should have received a copy of the GNU General Public License along with
17
* this program; if not, write to the Free Software Foundation, Inc., 59 Temple
18
* Place - Suite 330, Boston, MA 02111-1307 USA.
21
#include <xen/config.h>
22
#include <xen/errno.h>
23
#include <xen/softirq.h>
24
#include <asm/types.h>
25
#include <asm/asm_defns.h>
26
#include <asm/apicdef.h>
28
#include <public/xen.h>
30
#define VMRUN .byte 0x0F,0x01,0xD8
31
#define STGI .byte 0x0F,0x01,0xDC
32
#define CLGI .byte 0x0F,0x01,0xDD
34
#define get_current(reg) \
35
mov $STACK_SIZE-BYTES_PER_LONG, r(reg); \
37
and $~(BYTES_PER_LONG-1),r(reg); \
40
#if defined(__x86_64__)
41
#define r(reg) %r##reg
42
#define addr_of(lbl) lbl(%rip)
43
#define call_with_regs(fn) \
46
#else /* defined(__i386__) */
47
#define r(reg) %e##reg
48
#define addr_of(lbl) lbl
49
#define UREGS_rax UREGS_eax
50
#define UREGS_rip UREGS_eip
51
#define UREGS_rsp UREGS_esp
52
#define call_with_regs(fn) \
59
ENTRY(svm_asm_do_resume)
65
mov VCPU_processor(r(bx)),%eax
66
shl $IRQSTAT_shift,r(ax)
67
lea addr_of(irq_stat),r(dx)
68
testl $~0,(r(dx),r(ax),1)
69
jnz .Lsvm_process_softirqs
71
call svm_asid_handle_vmrun
73
cmpb $0,addr_of(tb_init_done)
77
mov VCPU_svm_vmcb(r(bx)),r(cx)
78
mov UREGS_rax(r(sp)),r(ax)
79
mov r(ax),VMCB_rax(r(cx))
80
mov UREGS_rip(r(sp)),r(ax)
81
mov r(ax),VMCB_rip(r(cx))
82
mov UREGS_rsp(r(sp)),r(ax)
83
mov r(ax),VMCB_rsp(r(cx))
84
mov UREGS_eflags(r(sp)),r(ax)
85
mov r(ax),VMCB_rflags(r(cx))
87
mov VCPU_svm_vmcb_pa(r(bx)),r(ax)
89
#if defined(__x86_64__)
100
add $8,%rsp /* Skip %rax: restored by VMRUN. */
105
#else /* defined(__i386__) */
116
#if defined(__x86_64__)
132
#else /* defined(__i386__) */
142
movb $0,VCPU_svm_vmcb_in_sync(r(bx))
143
mov VCPU_svm_vmcb(r(bx)),r(cx)
144
mov VMCB_rax(r(cx)),r(ax)
145
mov r(ax),UREGS_rax(r(sp))
146
mov VMCB_rip(r(cx)),r(ax)
147
mov r(ax),UREGS_rip(r(sp))
148
mov VMCB_rsp(r(cx)),r(ax)
149
mov r(ax),UREGS_rsp(r(sp))
150
mov VMCB_rflags(r(cx)),r(ax)
151
mov r(ax),UREGS_eflags(r(sp))
155
mov %ax,UREGS_error_code(r(sp))
156
mov %ax,UREGS_entry_vector(r(sp))
157
mov %ax,UREGS_saved_upcall_mask(r(sp))
158
mov %ax,UREGS_cs(r(sp))
159
mov %ax,UREGS_ds(r(sp))
160
mov %ax,UREGS_es(r(sp))
161
mov %ax,UREGS_fs(r(sp))
162
mov %ax,UREGS_gs(r(sp))
163
mov %ax,UREGS_ss(r(sp))
167
.globl svm_stgi_label
169
call_with_regs(svm_vmexit_handler)
170
jmp svm_asm_do_resume
172
.Lsvm_process_softirqs:
175
jmp svm_asm_do_resume
178
call svm_trace_vmentry