~ubuntu-branches/ubuntu/utopic/xen/utopic

« back to all changes in this revision

Viewing changes to xen/arch/x86/hvm/vmx/entry.S

  • Committer: Bazaar Package Importer
  • Author(s): Bastian Blank
  • Date: 2010-05-06 15:47:38 UTC
  • mto: (1.3.1) (15.1.1 sid) (4.1.1 experimental)
  • mto: This revision was merged to the branch mainline in revision 3.
  • Revision ID: james.westby@ubuntu.com-20100506154738-agoz0rlafrh1fnq7
Tags: upstream-4.0.0
ImportĀ upstreamĀ versionĀ 4.0.0

Show diffs side-by-side

added added

removed removed

Lines of Context:
 
1
/*
 
2
 * entry.S: VMX architecture-specific entry/exit handling.
 
3
 * Copyright (c) 2004, Intel Corporation.
 
4
 * Copyright (c) 2008, Citrix Systems, Inc.
 
5
 *
 
6
 * This program is free software; you can redistribute it and/or modify it
 
7
 * under the terms and conditions of the GNU General Public License,
 
8
 * version 2, as published by the Free Software Foundation.
 
9
 *
 
10
 * This program is distributed in the hope it will be useful, but WITHOUT
 
11
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 
12
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 
13
 * more details.
 
14
 *
 
15
 * You should have received a copy of the GNU General Public License along with
 
16
 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
 
17
 * Place - Suite 330, Boston, MA 02111-1307 USA.
 
18
 */
 
19
 
 
20
#include <xen/config.h>
 
21
#include <xen/errno.h>
 
22
#include <xen/softirq.h>
 
23
#include <asm/types.h>
 
24
#include <asm/asm_defns.h>
 
25
#include <asm/apicdef.h>
 
26
#include <asm/page.h>
 
27
#include <public/xen.h>
 
28
 
 
29
#define VMRESUME     .byte 0x0f,0x01,0xc3
 
30
#define VMLAUNCH     .byte 0x0f,0x01,0xc2
 
31
#define VMREAD(off)  .byte 0x0f,0x78,0x47,((off)-UREGS_rip)
 
32
#define VMWRITE(off) .byte 0x0f,0x79,0x47,((off)-UREGS_rip)
 
33
 
 
34
/* VMCS field encodings */
 
35
#define GUEST_RSP    0x681c
 
36
#define GUEST_RIP    0x681e
 
37
#define GUEST_RFLAGS 0x6820
 
38
 
 
39
#define get_current(reg)                        \
 
40
        mov $STACK_SIZE-BYTES_PER_LONG, r(reg); \
 
41
        or  r(sp), r(reg);                      \
 
42
        and $~(BYTES_PER_LONG-1),r(reg);        \
 
43
        mov (r(reg)),r(reg);
 
44
 
 
45
#if defined(__x86_64__)
 
46
#define r(reg) %r##reg
 
47
#define addr_of(lbl) lbl(%rip)
 
48
#define call_with_regs(fn)                      \
 
49
        mov  %rsp,%rdi;                         \
 
50
        call fn;
 
51
#else /* defined(__i386__) */
 
52
#define r(reg) %e##reg
 
53
#define addr_of(lbl) lbl
 
54
#define UREGS_rip UREGS_eip
 
55
#define UREGS_rsp UREGS_esp
 
56
#define call_with_regs(fn)                      \
 
57
        mov  %esp,%eax;                         \
 
58
        push %eax;                              \
 
59
        call fn;                                \
 
60
        add  $4,%esp;
 
61
#endif
 
62
 
 
63
        ALIGN
 
64
.globl vmx_asm_vmexit_handler
 
65
vmx_asm_vmexit_handler:
 
66
#if defined(__x86_64__)
 
67
        push %rdi
 
68
        push %rsi
 
69
        push %rdx
 
70
        push %rcx
 
71
        push %rax
 
72
        push %r8
 
73
        push %r9
 
74
        push %r10
 
75
        push %r11
 
76
        push %rbx
 
77
        push %rbp
 
78
        push %r12
 
79
        push %r13
 
80
        push %r14
 
81
        push %r15
 
82
#else /* defined(__i386__) */
 
83
        push %eax
 
84
        push %ebp
 
85
        push %edi
 
86
        push %esi
 
87
        push %edx
 
88
        push %ecx
 
89
        push %ebx
 
90
#endif
 
91
 
 
92
        get_current(bx)
 
93
 
 
94
        movb $1,VCPU_vmx_launched(r(bx))
 
95
 
 
96
        lea  UREGS_rip(r(sp)),r(di)
 
97
        mov  $GUEST_RIP,%eax
 
98
        /*VMREAD(UREGS_rip)*/
 
99
        .byte 0x0f,0x78,0x07  /* vmread r(ax),(r(di)) */
 
100
        mov  $GUEST_RSP,%eax
 
101
        VMREAD(UREGS_rsp)
 
102
        mov  $GUEST_RFLAGS,%eax
 
103
        VMREAD(UREGS_eflags)
 
104
 
 
105
        mov  %cr2,r(ax)
 
106
        mov  r(ax),VCPU_hvm_guest_cr2(r(bx))
 
107
 
 
108
#ifndef NDEBUG
 
109
        mov  $0xbeef,%ax
 
110
        mov  %ax,UREGS_error_code(r(sp))
 
111
        mov  %ax,UREGS_entry_vector(r(sp))
 
112
        mov  %ax,UREGS_saved_upcall_mask(r(sp))
 
113
        mov  %ax,UREGS_cs(r(sp))
 
114
        mov  %ax,UREGS_ds(r(sp))
 
115
        mov  %ax,UREGS_es(r(sp))
 
116
        mov  %ax,UREGS_fs(r(sp))
 
117
        mov  %ax,UREGS_gs(r(sp))
 
118
        mov  %ax,UREGS_ss(r(sp))
 
119
#endif
 
120
 
 
121
        call_with_regs(vmx_vmexit_handler)
 
122
 
 
123
.globl vmx_asm_do_vmentry
 
124
vmx_asm_do_vmentry:
 
125
        call vmx_intr_assist
 
126
 
 
127
        get_current(bx)
 
128
        cli
 
129
 
 
130
        mov  VCPU_processor(r(bx)),%eax
 
131
        shl  $IRQSTAT_shift,r(ax)
 
132
        lea  addr_of(irq_stat),r(dx)
 
133
        cmpl $0,(r(dx),r(ax),1)
 
134
        jnz  .Lvmx_process_softirqs
 
135
 
 
136
        testb $0xff,VCPU_vmx_emulate(r(bx))
 
137
        jnz .Lvmx_goto_emulator
 
138
        testb $0xff,VCPU_vmx_realmode(r(bx))
 
139
        jz .Lvmx_not_realmode
 
140
        cmpw $0,VCPU_vm86_seg_mask(r(bx))
 
141
        jnz .Lvmx_goto_emulator
 
142
        call_with_regs(vmx_enter_realmode) 
 
143
 
 
144
.Lvmx_not_realmode:
 
145
        call vmx_vmenter_helper
 
146
        mov  VCPU_hvm_guest_cr2(r(bx)),r(ax)
 
147
        mov  r(ax),%cr2
 
148
 
 
149
        lea  UREGS_rip(r(sp)),r(di)
 
150
        mov  $GUEST_RIP,%eax
 
151
        /*VMWRITE(UREGS_rip)*/
 
152
        .byte 0x0f,0x79,0x07  /* vmwrite (r(di)),r(ax) */
 
153
        mov  $GUEST_RSP,%eax
 
154
        VMWRITE(UREGS_rsp)
 
155
        mov  $GUEST_RFLAGS,%eax
 
156
        VMWRITE(UREGS_eflags)
 
157
 
 
158
        cmpb $0,VCPU_vmx_launched(r(bx))
 
159
#if defined(__x86_64__)
 
160
        pop  %r15
 
161
        pop  %r14
 
162
        pop  %r13
 
163
        pop  %r12
 
164
        pop  %rbp
 
165
        pop  %rbx
 
166
        pop  %r11
 
167
        pop  %r10
 
168
        pop  %r9
 
169
        pop  %r8
 
170
        pop  %rax
 
171
        pop  %rcx
 
172
        pop  %rdx
 
173
        pop  %rsi
 
174
        pop  %rdi
 
175
#else /* defined(__i386__) */
 
176
        pop  %ebx
 
177
        pop  %ecx
 
178
        pop  %edx
 
179
        pop  %esi
 
180
        pop  %edi
 
181
        pop  %ebp
 
182
        pop  %eax
 
183
#endif
 
184
        je   .Lvmx_launch
 
185
 
 
186
/*.Lvmx_resume:*/
 
187
        VMRESUME
 
188
        sti
 
189
        call vm_resume_fail
 
190
        ud2
 
191
 
 
192
.Lvmx_launch:
 
193
        VMLAUNCH
 
194
        sti
 
195
        call vm_launch_fail
 
196
        ud2
 
197
 
 
198
.Lvmx_goto_emulator:
 
199
        sti
 
200
        call_with_regs(vmx_realmode)
 
201
        jmp  vmx_asm_do_vmentry
 
202
 
 
203
.Lvmx_process_softirqs:
 
204
        sti
 
205
        call do_softirq
 
206
        jmp  vmx_asm_do_vmentry