2
* intr.c: handling I/O, interrupts related VMX entry/exit
3
* Copyright (c) 2004, Intel Corporation.
4
* Copyright (c) 2004-2007, XenSource Inc.
6
* This program is free software; you can redistribute it and/or modify it
7
* under the terms and conditions of the GNU General Public License,
8
* version 2, as published by the Free Software Foundation.
10
* This program is distributed in the hope it will be useful, but WITHOUT
11
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15
* You should have received a copy of the GNU General Public License along with
16
* this program; if not, write to the Free Software Foundation, Inc., 59 Temple
17
* Place - Suite 330, Boston, MA 02111-1307 USA.
20
#include <xen/config.h>
24
#include <xen/errno.h>
25
#include <xen/trace.h>
26
#include <xen/event.h>
27
#include <asm/current.h>
28
#include <asm/cpufeature.h>
29
#include <asm/processor.h>
31
#include <asm/hvm/hvm.h>
32
#include <asm/hvm/io.h>
33
#include <asm/hvm/support.h>
34
#include <asm/hvm/vmx/vmx.h>
35
#include <asm/hvm/vmx/vmcs.h>
36
#include <asm/hvm/vpic.h>
37
#include <asm/hvm/vlapic.h>
38
#include <asm/hvm/nestedhvm.h>
39
#include <public/hvm/ioreq.h>
40
#include <asm/hvm/trace.h>
43
* A few notes on virtual NMI and INTR delivery, and interactions with
44
* interruptibility states:
46
* We can only inject an ExtInt if EFLAGS.IF = 1 and no blocking by
47
* STI nor MOV SS. Otherwise the VM entry fails. The 'virtual interrupt
48
* pending' control causes a VM exit when all these checks succeed. It will
49
* exit immediately after VM entry if the checks succeed at that point.
51
* We can only inject an NMI if no blocking by MOV SS (also, depending on
52
* implementation, if no blocking by STI). If pin-based 'virtual NMIs'
53
* control is specified then the NMI-blocking interruptibility flag is
54
* also checked. The 'virtual NMI pending' control (available only in
55
* conjunction with 'virtual NMIs') causes a VM exit when all these checks
56
* succeed. It will exit immediately after VM entry if the checks succeed
59
* Because a processor may or may not check blocking-by-STI when injecting
60
* a virtual NMI, it will be necessary to convert that to block-by-MOV-SS
61
* before specifying the 'virtual NMI pending' control. Otherwise we could
62
* enter an infinite loop where we check blocking-by-STI in software and
63
* thus delay delivery of a virtual NMI, but the processor causes immediate
64
* VM exit because it does not check blocking-by-STI.
66
* Injecting a virtual NMI sets the NMI-blocking interruptibility flag only
67
* if the 'virtual NMIs' control is set. Injecting *any* kind of event clears
68
* the STI- and MOV-SS-blocking interruptibility-state flags.
71
static void enable_intr_window(struct vcpu *v, struct hvm_intack intack)
73
u32 ctl = CPU_BASED_VIRTUAL_INTR_PENDING;
75
ASSERT(intack.source != hvm_intsrc_none);
77
if ( unlikely(tb_init_done) )
79
unsigned int intr = __vmread(VM_ENTRY_INTR_INFO);
80
HVMTRACE_3D(INTR_WINDOW, intack.vector, intack.source,
81
(intr & INTR_INFO_VALID_MASK) ? intr & 0xff : -1);
84
if ( (intack.source == hvm_intsrc_nmi) && cpu_has_vmx_vnmi )
87
* We set MOV-SS blocking in lieu of STI blocking when delivering an
88
* NMI. This is because it is processor-specific whether STI-blocking
89
* blocks NMIs. Hence we *must* check for STI-blocking on NMI delivery
90
* (otherwise vmentry will fail on processors that check for STI-
91
* blocking) but if the processor does not check for STI-blocking then
92
* we may immediately vmexit and hance make no progress!
93
* (see SDM 3B 21.3, "Other Causes of VM Exits").
95
u32 intr_shadow = __vmread(GUEST_INTERRUPTIBILITY_INFO);
96
if ( intr_shadow & VMX_INTR_SHADOW_STI )
98
/* Having both STI-blocking and MOV-SS-blocking fails vmentry. */
99
intr_shadow &= ~VMX_INTR_SHADOW_STI;
100
intr_shadow |= VMX_INTR_SHADOW_MOV_SS;
101
__vmwrite(GUEST_INTERRUPTIBILITY_INFO, intr_shadow);
103
ctl = CPU_BASED_VIRTUAL_NMI_PENDING;
106
if ( !(v->arch.hvm_vmx.exec_control & ctl) )
108
v->arch.hvm_vmx.exec_control |= ctl;
109
vmx_update_cpu_exec_control(v);
114
* Injecting interrupts for nested virtualization
116
* When injecting virtual interrupts (originated from L0), there are
117
* two major possibilities, within L1 context and within L2 context
118
* 1. L1 context (in_nesting == 0)
119
* Everything is the same as without nested, check RFLAGS.IF to
120
* see if the injection can be done, using VMCS to inject the
123
* 2. L2 context (in_nesting == 1)
124
* Causes a virtual VMExit, RFLAGS.IF is ignored, whether to ack
125
* irq according to intr_ack_on_exit, shouldn't block normally,
127
* a. context transition
128
* interrupt needs to be blocked at virtual VMEntry time
129
* b. L2 idtv reinjection
130
* if L2 idtv is handled within L0 (e.g. L0 shadow page fault),
131
* it needs to be reinjected without exiting to L1, interrupt
132
* injection should be blocked as well at this point.
134
* Unfortunately, interrupt blocking in L2 won't work with simple
135
* intr_window_open (which depends on L2's IF). To solve this,
136
* the following algorithm can be used:
137
* v->arch.hvm_vmx.exec_control.VIRTUAL_INTR_PENDING now denotes
138
* only L0 control, physical control may be different from it.
139
* - if in L1, it behaves normally, intr window is written
140
* to physical control as it is
141
* - if in L2, replace it to MTF (or NMI window) if possible
142
* - if MTF/NMI window is not used, intr window can still be
143
* used but may have negative impact on interrupt performance.
146
enum hvm_intblk nvmx_intr_blocked(struct vcpu *v)
148
int r = hvm_intblk_none;
149
struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
151
if ( nestedhvm_vcpu_in_guestmode(v) )
153
if ( nvcpu->nv_vmexit_pending ||
154
nvcpu->nv_vmswitch_in_progress ||
155
(__vmread(VM_ENTRY_INTR_INFO) & INTR_INFO_VALID_MASK) )
156
r = hvm_intblk_rflags_ie;
158
else if ( nvcpu->nv_vmentry_pending )
159
r = hvm_intblk_rflags_ie;
164
static int nvmx_intr_intercept(struct vcpu *v, struct hvm_intack intack)
168
if ( nvmx_intr_blocked(v) != hvm_intblk_none )
170
enable_intr_window(v, intack);
174
if ( nestedhvm_vcpu_in_guestmode(v) )
176
if ( intack.source == hvm_intsrc_pic ||
177
intack.source == hvm_intsrc_lapic )
179
vmx_inject_extint(intack.vector);
181
exit_ctrl = __get_vvmcs(vcpu_nestedhvm(v).nv_vvmcx,
183
if ( exit_ctrl & VM_EXIT_ACK_INTR_ON_EXIT )
185
/* for now, duplicate the ack path in vmx_intr_assist */
186
hvm_vcpu_ack_pending_irq(v, intack);
187
pt_intr_post(v, intack);
189
intack = hvm_vcpu_has_pending_irq(v);
190
if ( unlikely(intack.source != hvm_intsrc_none) )
191
enable_intr_window(v, intack);
194
enable_intr_window(v, intack);
203
void vmx_intr_assist(void)
205
struct hvm_intack intack;
206
struct vcpu *v = current;
207
unsigned int tpr_threshold = 0;
208
enum hvm_intblk intblk;
210
/* Block event injection when single step with MTF. */
211
if ( unlikely(v->arch.hvm_vcpu.single_step) )
213
v->arch.hvm_vmx.exec_control |= CPU_BASED_MONITOR_TRAP_FLAG;
214
vmx_update_cpu_exec_control(v);
218
/* Crank the handle on interrupt state. */
222
intack = hvm_vcpu_has_pending_irq(v);
223
if ( likely(intack.source == hvm_intsrc_none) )
226
if ( unlikely(nvmx_intr_intercept(v, intack)) )
229
intblk = hvm_interrupt_blocked(v, intack);
230
if ( intblk == hvm_intblk_tpr )
232
ASSERT(vlapic_enabled(vcpu_vlapic(v)));
233
ASSERT(intack.source == hvm_intsrc_lapic);
234
tpr_threshold = intack.vector >> 4;
238
if ( (intblk != hvm_intblk_none) ||
239
(__vmread(VM_ENTRY_INTR_INFO) & INTR_INFO_VALID_MASK) )
241
enable_intr_window(v, intack);
245
intack = hvm_vcpu_ack_pending_irq(v, intack);
246
} while ( intack.source == hvm_intsrc_none );
248
if ( intack.source == hvm_intsrc_nmi )
252
else if ( intack.source == hvm_intsrc_mce )
254
hvm_inject_hw_exception(TRAP_machine_check, HVM_DELIVER_NO_ERROR_CODE);
258
HVMTRACE_2D(INJ_VIRQ, intack.vector, /*fake=*/ 0);
259
vmx_inject_extint(intack.vector);
260
pt_intr_post(v, intack);
263
/* Is there another IRQ to queue up behind this one? */
264
intack = hvm_vcpu_has_pending_irq(v);
265
if ( unlikely(intack.source != hvm_intsrc_none) )
266
enable_intr_window(v, intack);
269
if ( cpu_has_vmx_tpr_shadow )
270
__vmwrite(TPR_THRESHOLD, tpr_threshold);
279
* indent-tabs-mode: nil