2
* vlapic.c: virtualize LAPIC for HVM vcpus.
4
* Copyright (c) 2004, Intel Corporation.
5
* Copyright (c) 2006 Keir Fraser, XenSource Inc.
7
* This program is free software; you can redistribute it and/or modify it
8
* under the terms and conditions of the GNU General Public License,
9
* version 2, as published by the Free Software Foundation.
11
* This program is distributed in the hope it will be useful, but WITHOUT
12
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16
* You should have received a copy of the GNU General Public License along with
17
* this program; if not, write to the Free Software Foundation, Inc., 59 Temple
18
* Place - Suite 330, Boston, MA 02111-1307 USA.
21
#include <xen/config.h>
22
#include <xen/types.h>
24
#include <xen/xmalloc.h>
25
#include <xen/domain.h>
26
#include <xen/domain_page.h>
27
#include <xen/event.h>
28
#include <xen/trace.h>
30
#include <xen/sched.h>
32
#include <asm/current.h>
34
#include <asm/hvm/hvm.h>
35
#include <asm/hvm/io.h>
36
#include <asm/hvm/support.h>
37
#include <asm/hvm/vmx/vmx.h>
38
#include <public/hvm/ioreq.h>
39
#include <public/hvm/params.h>
41
#define VLAPIC_VERSION 0x00050014
42
#define VLAPIC_LVT_NUM 6
44
/* vlapic's frequence is 100 MHz */
45
#define APIC_BUS_CYCLE_NS 10
48
APIC_LVT_MASKED | APIC_SEND_PENDING | APIC_VECTOR_MASK
51
LVT_MASK | APIC_MODE_MASK | APIC_INPUT_POLARITY |\
52
APIC_LVT_REMOTE_IRR | APIC_LVT_LEVEL_TRIGGER
54
static unsigned int vlapic_lvt_mask[VLAPIC_LVT_NUM] =
57
LVT_MASK | APIC_LVT_TIMER_PERIODIC,
59
LVT_MASK | APIC_MODE_MASK,
61
LVT_MASK | APIC_MODE_MASK,
68
/* Following could belong in apicdef.h */
69
#define APIC_SHORT_MASK 0xc0000
70
#define APIC_DEST_NOSHORT 0x0
71
#define APIC_DEST_MASK 0x800
73
#define vlapic_lvt_vector(vlapic, lvt_type) \
74
(vlapic_get_reg(vlapic, lvt_type) & APIC_VECTOR_MASK)
76
#define vlapic_lvt_dm(vlapic, lvt_type) \
77
(vlapic_get_reg(vlapic, lvt_type) & APIC_MODE_MASK)
79
#define vlapic_lvtt_period(vlapic) \
80
(vlapic_get_reg(vlapic, APIC_LVTT) & APIC_LVT_TIMER_PERIODIC)
84
* Generic APIC bitmap vector update & search routines.
87
#define VEC_POS(v) ((v)%32)
88
#define REG_POS(v) (((v)/32) * 0x10)
89
#define vlapic_test_and_set_vector(vec, bitmap) \
90
test_and_set_bit(VEC_POS(vec), \
91
(unsigned long *)((bitmap) + REG_POS(vec)))
92
#define vlapic_test_and_clear_vector(vec, bitmap) \
93
test_and_clear_bit(VEC_POS(vec), \
94
(unsigned long *)((bitmap) + REG_POS(vec)))
95
#define vlapic_set_vector(vec, bitmap) \
96
set_bit(VEC_POS(vec), (unsigned long *)((bitmap) + REG_POS(vec)))
97
#define vlapic_clear_vector(vec, bitmap) \
98
clear_bit(VEC_POS(vec), (unsigned long *)((bitmap) + REG_POS(vec)))
100
static int vlapic_find_highest_vector(void *bitmap)
102
uint32_t *word = bitmap;
103
int word_offset = MAX_VECTOR / 32;
105
/* Work backwards through the bitmap (first 32-bit word in every four). */
106
while ( (word_offset != 0) && (word[(--word_offset)*4] == 0) )
109
return (fls(word[word_offset*4]) - 1) + (word_offset * 32);
114
* IRR-specific bitmap update & search routines.
117
static int vlapic_test_and_set_irr(int vector, struct vlapic *vlapic)
119
return vlapic_test_and_set_vector(vector, &vlapic->regs->data[APIC_IRR]);
122
static void vlapic_clear_irr(int vector, struct vlapic *vlapic)
124
vlapic_clear_vector(vector, &vlapic->regs->data[APIC_IRR]);
127
static int vlapic_find_highest_irr(struct vlapic *vlapic)
129
return vlapic_find_highest_vector(&vlapic->regs->data[APIC_IRR]);
132
int vlapic_set_irq(struct vlapic *vlapic, uint8_t vec, uint8_t trig)
136
ret = !vlapic_test_and_set_irr(vec, vlapic);
138
vlapic_set_vector(vec, &vlapic->regs->data[APIC_TMR]);
140
/* We may need to wake up target vcpu, besides set pending bit here */
144
static int vlapic_find_highest_isr(struct vlapic *vlapic)
146
return vlapic_find_highest_vector(&vlapic->regs->data[APIC_ISR]);
149
static uint32_t vlapic_get_ppr(struct vlapic *vlapic)
151
uint32_t tpr, isrv, ppr;
154
tpr = vlapic_get_reg(vlapic, APIC_TASKPRI);
155
isr = vlapic_find_highest_isr(vlapic);
156
isrv = (isr != -1) ? isr : 0;
158
if ( (tpr & 0xf0) >= (isrv & 0xf0) )
163
HVM_DBG_LOG(DBG_LEVEL_VLAPIC_INTERRUPT,
164
"vlapic %p, ppr 0x%x, isr 0x%x, isrv 0x%x",
165
vlapic, ppr, isr, isrv);
170
static int vlapic_match_logical_addr(struct vlapic *vlapic, uint8_t mda)
175
logical_id = GET_xAPIC_LOGICAL_ID(vlapic_get_reg(vlapic, APIC_LDR));
177
switch ( vlapic_get_reg(vlapic, APIC_DFR) )
180
if ( logical_id & mda )
183
case APIC_DFR_CLUSTER:
184
if ( ((logical_id >> 4) == (mda >> 0x4)) && (logical_id & mda & 0xf) )
188
gdprintk(XENLOG_WARNING, "Bad DFR value for lapic of vcpu %d: %08x\n",
189
vlapic_vcpu(vlapic)->vcpu_id,
190
vlapic_get_reg(vlapic, APIC_DFR));
197
bool_t vlapic_match_dest(
198
struct vlapic *target, struct vlapic *source,
199
int short_hand, uint8_t dest, uint8_t dest_mode)
201
HVM_DBG_LOG(DBG_LEVEL_VLAPIC, "target %p, source %p, dest 0x%x, "
202
"dest_mode 0x%x, short_hand 0x%x",
203
target, source, dest, dest_mode, short_hand);
205
switch ( short_hand )
207
case APIC_DEST_NOSHORT:
209
return vlapic_match_logical_addr(target, dest);
210
return ((dest == 0xFF) || (dest == VLAPIC_ID(target)));
213
return (target == source);
215
case APIC_DEST_ALLINC:
218
case APIC_DEST_ALLBUT:
219
return (target != source);
222
gdprintk(XENLOG_WARNING, "Bad dest shorthand value %x\n", short_hand);
229
static int vlapic_vcpu_pause_async(struct vcpu *v)
231
vcpu_pause_nosync(v);
239
sync_vcpu_execstate(v);
243
static void vlapic_init_action(unsigned long _vcpu)
245
struct vcpu *v = (struct vcpu *)_vcpu;
246
struct domain *d = v->domain;
247
bool_t fpu_initialised;
249
/* If the VCPU is not on its way down we have nothing to do. */
250
if ( !test_bit(_VPF_down, &v->pause_flags) )
253
if ( !vlapic_vcpu_pause_async(v) )
255
tasklet_schedule(&vcpu_vlapic(v)->init_tasklet);
259
/* Reset necessary VCPU state. This does not include FPU state. */
261
fpu_initialised = v->fpu_initialised;
263
v->fpu_initialised = fpu_initialised;
264
vlapic_reset(vcpu_vlapic(v));
270
static int vlapic_accept_init(struct vcpu *v)
272
/* Nothing to do if the VCPU is already reset. */
273
if ( !v->is_initialised )
276
/* Asynchronously take the VCPU down and schedule reset work. */
278
tasklet_schedule(&vcpu_vlapic(v)->init_tasklet);
279
return X86EMUL_RETRY;
282
static int vlapic_accept_sipi(struct vcpu *v, int trampoline_vector)
284
/* If the VCPU is not on its way down we have nothing to do. */
285
if ( !test_bit(_VPF_down, &v->pause_flags) )
288
if ( !vlapic_vcpu_pause_async(v) )
289
return X86EMUL_RETRY;
291
hvm_vcpu_reset_state(v, trampoline_vector << 8, 0);
298
/* Add a pending IRQ into lapic. */
299
static int vlapic_accept_irq(struct vcpu *v, uint32_t icr_low)
301
struct vlapic *vlapic = vcpu_vlapic(v);
302
uint8_t vector = (uint8_t)icr_low;
303
int rc = X86EMUL_OKAY;
305
switch ( icr_low & APIC_MODE_MASK )
309
if ( vlapic_enabled(vlapic) &&
310
!vlapic_test_and_set_irr(vector, vlapic) )
315
gdprintk(XENLOG_WARNING, "Ignoring delivery mode 3\n");
319
gdprintk(XENLOG_WARNING, "Ignoring guest SMI\n");
323
if ( !test_and_set_bool(v->nmi_pending) )
328
/* No work on INIT de-assert for P4-type APIC. */
329
if ( (icr_low & (APIC_INT_LEVELTRIG | APIC_INT_ASSERT)) ==
332
rc = vlapic_accept_init(v);
335
case APIC_DM_STARTUP:
336
rc = vlapic_accept_sipi(v, vector);
340
gdprintk(XENLOG_ERR, "TODO: unsupported delivery mode in ICR %x\n",
342
domain_crash(v->domain);
348
struct vlapic *vlapic_lowest_prio(
349
struct domain *d, struct vlapic *source,
350
int short_hand, uint8_t dest, uint8_t dest_mode)
352
int old = d->arch.hvm_domain.irq.round_robin_prev_vcpu;
353
uint32_t ppr, target_ppr = UINT_MAX;
354
struct vlapic *vlapic, *target = NULL;
357
if ( unlikely(!d->vcpu) || unlikely((v = d->vcpu[old]) == NULL) )
361
v = v->next_in_list ? : d->vcpu[0];
362
vlapic = vcpu_vlapic(v);
363
if ( vlapic_match_dest(vlapic, source, short_hand, dest, dest_mode) &&
364
vlapic_enabled(vlapic) &&
365
((ppr = vlapic_get_ppr(vlapic)) < target_ppr) )
370
} while ( v->vcpu_id != old );
372
if ( target != NULL )
373
d->arch.hvm_domain.irq.round_robin_prev_vcpu =
374
vlapic_vcpu(target)->vcpu_id;
379
void vlapic_EOI_set(struct vlapic *vlapic)
381
int vector = vlapic_find_highest_isr(vlapic);
383
/* Some EOI writes may not have a matching to an in-service interrupt. */
387
vlapic_clear_vector(vector, &vlapic->regs->data[APIC_ISR]);
389
if ( vlapic_test_and_clear_vector(vector, &vlapic->regs->data[APIC_TMR]) )
390
vioapic_update_EOI(vlapic_domain(vlapic), vector);
392
hvm_dpci_msi_eoi(current->domain, vector);
396
struct vlapic *vlapic, uint32_t icr_low, uint32_t icr_high)
398
unsigned int dest = GET_xAPIC_DEST_FIELD(icr_high);
399
unsigned int short_hand = icr_low & APIC_SHORT_MASK;
400
unsigned int dest_mode = !!(icr_low & APIC_DEST_MASK);
401
struct vlapic *target;
403
int rc = X86EMUL_OKAY;
405
HVM_DBG_LOG(DBG_LEVEL_VLAPIC, "icr = 0x%08x:%08x", icr_high, icr_low);
407
if ( (icr_low & APIC_MODE_MASK) == APIC_DM_LOWEST )
409
target = vlapic_lowest_prio(vlapic_domain(vlapic), vlapic,
410
short_hand, dest, dest_mode);
411
if ( target != NULL )
412
rc = vlapic_accept_irq(vlapic_vcpu(target), icr_low);
416
for_each_vcpu ( vlapic_domain(vlapic), v )
418
if ( vlapic_match_dest(vcpu_vlapic(v), vlapic,
419
short_hand, dest, dest_mode) )
420
rc = vlapic_accept_irq(v, icr_low);
421
if ( rc != X86EMUL_OKAY )
428
static uint32_t vlapic_get_tmcct(struct vlapic *vlapic)
430
struct vcpu *v = current;
431
uint32_t tmcct, tmict = vlapic_get_reg(vlapic, APIC_TMICT);
432
uint64_t counter_passed;
434
counter_passed = ((hvm_get_guest_time(v) - vlapic->timer_last_update)
435
/ APIC_BUS_CYCLE_NS / vlapic->hw.timer_divisor);
436
tmcct = tmict - counter_passed;
438
HVM_DBG_LOG(DBG_LEVEL_VLAPIC_TIMER,
439
"timer initial count %d, timer current count %d, "
441
tmict, tmcct, counter_passed);
446
static void vlapic_set_tdcr(struct vlapic *vlapic, unsigned int val)
448
/* Only bits 0, 1 and 3 are settable; others are MBZ. */
450
vlapic_set_reg(vlapic, APIC_TDCR, val);
452
/* Update the demangled hw.timer_divisor. */
453
val = ((val & 3) | ((val & 8) >> 1)) + 1;
454
vlapic->hw.timer_divisor = 1 << (val & 7);
456
HVM_DBG_LOG(DBG_LEVEL_VLAPIC_TIMER,
457
"timer_divisor: %d", vlapic->hw.timer_divisor);
460
static void vlapic_read_aligned(
461
struct vlapic *vlapic, unsigned int offset, unsigned int *result)
466
*result = vlapic_get_ppr(vlapic);
469
case APIC_TMCCT: /* Timer CCR */
470
*result = vlapic_get_tmcct(vlapic);
474
*result = vlapic_get_reg(vlapic, offset);
479
static int vlapic_read(
480
struct vcpu *v, unsigned long address,
481
unsigned long len, unsigned long *pval)
483
unsigned int alignment;
485
unsigned long result = 0;
486
struct vlapic *vlapic = vcpu_vlapic(v);
487
unsigned int offset = address - vlapic_base_address(vlapic);
489
if ( offset > (APIC_TDCR + 0x3) )
492
alignment = offset & 0x3;
494
vlapic_read_aligned(vlapic, offset & ~0x3, &tmp);
498
result = *((unsigned char *)&tmp + alignment);
502
if ( alignment == 3 )
503
goto unaligned_exit_and_crash;
504
result = *(unsigned short *)((unsigned char *)&tmp + alignment);
508
if ( alignment != 0 )
509
goto unaligned_exit_and_crash;
510
result = *(unsigned int *)((unsigned char *)&tmp + alignment);
514
gdprintk(XENLOG_ERR, "Local APIC read with len=0x%lx, "
515
"should be 4 instead.\n", len);
519
HVM_DBG_LOG(DBG_LEVEL_VLAPIC, "offset 0x%x with length 0x%lx, "
520
"and the result is 0x%lx", offset, len, result);
526
unaligned_exit_and_crash:
527
gdprintk(XENLOG_ERR, "Unaligned LAPIC read len=0x%lx at offset=0x%x.\n",
530
domain_crash(v->domain);
534
static void vlapic_pt_cb(struct vcpu *v, void *data)
536
*(s_time_t *)data = hvm_get_guest_time(v);
539
static int vlapic_write(struct vcpu *v, unsigned long address,
540
unsigned long len, unsigned long val)
542
struct vlapic *vlapic = vcpu_vlapic(v);
543
unsigned int offset = address - vlapic_base_address(vlapic);
544
int rc = X86EMUL_OKAY;
546
if ( offset != 0xb0 )
547
HVM_DBG_LOG(DBG_LEVEL_VLAPIC,
548
"offset 0x%x with length 0x%lx, and value is 0x%lx",
552
* According to the IA32 Manual, all accesses should be 32 bits.
553
* Some OSes do 8- or 16-byte accesses, however.
559
unsigned char alignment;
561
gdprintk(XENLOG_INFO, "Notice: Local APIC write with len = %lx\n",len);
563
alignment = offset & 0x3;
564
(void)vlapic_read_aligned(vlapic, offset & ~0x3, &tmp);
569
val = ((tmp & ~(0xff << (8*alignment))) |
570
((val & 0xff) << (8*alignment)));
575
goto unaligned_exit_and_crash;
576
val = ((tmp & ~(0xffff << (8*alignment))) |
577
((val & 0xffff) << (8*alignment)));
581
gdprintk(XENLOG_ERR, "Local APIC write with len = %lx, "
582
"should be 4 instead\n", len);
586
else if ( (offset & 0x3) != 0 )
587
goto unaligned_exit_and_crash;
594
vlapic_set_reg(vlapic, APIC_TASKPRI, val & 0xff);
598
vlapic_EOI_set(vlapic);
602
vlapic_set_reg(vlapic, APIC_LDR, val & APIC_LDR_MASK);
606
vlapic_set_reg(vlapic, APIC_DFR, val | 0x0FFFFFFF);
610
vlapic_set_reg(vlapic, APIC_SPIV, val & 0x3ff);
612
if ( !(val & APIC_SPIV_APIC_ENABLED) )
617
vlapic->hw.disabled |= VLAPIC_SW_DISABLED;
619
for ( i = 0; i < VLAPIC_LVT_NUM; i++ )
621
lvt_val = vlapic_get_reg(vlapic, APIC_LVTT + 0x10 * i);
622
vlapic_set_reg(vlapic, APIC_LVTT + 0x10 * i,
623
lvt_val | APIC_LVT_MASKED);
628
vlapic->hw.disabled &= ~VLAPIC_SW_DISABLED;
629
pt_may_unmask_irq(vlapic_domain(vlapic), &vlapic->pt);
638
val &= ~(1 << 12); /* always clear the pending bit */
639
rc = vlapic_ipi(vlapic, val, vlapic_get_reg(vlapic, APIC_ICR2));
640
if ( rc == X86EMUL_OKAY )
641
vlapic_set_reg(vlapic, APIC_ICR, val);
645
vlapic_set_reg(vlapic, APIC_ICR2, val & 0xff000000);
648
case APIC_LVTT: /* LVT Timer Reg */
649
vlapic->pt.irq = val & APIC_VECTOR_MASK;
650
case APIC_LVTTHMR: /* LVT Thermal Monitor */
651
case APIC_LVTPC: /* LVT Performance Counter */
652
case APIC_LVT0: /* LVT LINT0 Reg */
653
case APIC_LVT1: /* LVT Lint1 Reg */
654
case APIC_LVTERR: /* LVT Error Reg */
655
if ( vlapic_sw_disabled(vlapic) )
656
val |= APIC_LVT_MASKED;
657
val &= vlapic_lvt_mask[(offset - APIC_LVTT) >> 4];
658
vlapic_set_reg(vlapic, offset, val);
659
if ( offset == APIC_LVT0 )
661
vlapic_adjust_i8259_target(v->domain);
662
pt_may_unmask_irq(v->domain, NULL);
664
if ( (offset == APIC_LVTT) && !(val & APIC_LVT_MASKED) )
665
pt_may_unmask_irq(NULL, &vlapic->pt);
672
vlapic_set_reg(vlapic, APIC_TMICT, val);
675
destroy_periodic_time(&vlapic->pt);
679
period = ((uint64_t)APIC_BUS_CYCLE_NS *
680
(uint32_t)val * vlapic->hw.timer_divisor);
681
create_periodic_time(current, &vlapic->pt, period,
682
vlapic_lvtt_period(vlapic) ? period : 0,
683
vlapic->pt.irq, vlapic_pt_cb,
684
&vlapic->timer_last_update);
685
vlapic->timer_last_update = vlapic->pt.last_plt_gtime;
687
HVM_DBG_LOG(DBG_LEVEL_VLAPIC,
688
"bus cycle is %uns, "
689
"initial count %lu, period %"PRIu64"ns",
690
APIC_BUS_CYCLE_NS, val, period);
695
vlapic_set_tdcr(vlapic, val & 0xb);
696
HVM_DBG_LOG(DBG_LEVEL_VLAPIC_TIMER, "timer divisor is 0x%x",
697
vlapic->hw.timer_divisor);
701
gdprintk(XENLOG_DEBUG,
702
"Local APIC Write to read-only register 0x%x\n", offset);
708
unaligned_exit_and_crash:
709
gdprintk(XENLOG_ERR, "Unaligned LAPIC write len=0x%lx at offset=0x%x.\n",
712
domain_crash(v->domain);
716
static int vlapic_range(struct vcpu *v, unsigned long addr)
718
struct vlapic *vlapic = vcpu_vlapic(v);
719
unsigned long offset = addr - vlapic_base_address(vlapic);
720
return (!vlapic_hw_disabled(vlapic) && (offset < PAGE_SIZE));
723
const struct hvm_mmio_handler vlapic_mmio_handler = {
724
.check_handler = vlapic_range,
725
.read_handler = vlapic_read,
726
.write_handler = vlapic_write
729
void vlapic_msr_set(struct vlapic *vlapic, uint64_t value)
731
if ( (vlapic->hw.apic_base_msr ^ value) & MSR_IA32_APICBASE_ENABLE )
733
if ( value & MSR_IA32_APICBASE_ENABLE )
735
vlapic_reset(vlapic);
736
vlapic->hw.disabled &= ~VLAPIC_HW_DISABLED;
737
pt_may_unmask_irq(vlapic_domain(vlapic), &vlapic->pt);
741
vlapic->hw.disabled |= VLAPIC_HW_DISABLED;
742
pt_may_unmask_irq(vlapic_domain(vlapic), NULL);
746
vlapic->hw.apic_base_msr = value;
748
vmx_vlapic_msr_changed(vlapic_vcpu(vlapic));
750
HVM_DBG_LOG(DBG_LEVEL_VLAPIC,
751
"apic base msr is 0x%016"PRIx64, vlapic->hw.apic_base_msr);
754
static int __vlapic_accept_pic_intr(struct vcpu *v)
756
struct domain *d = v->domain;
757
struct vlapic *vlapic = vcpu_vlapic(v);
758
uint32_t lvt0 = vlapic_get_reg(vlapic, APIC_LVT0);
759
union vioapic_redir_entry redir0 = domain_vioapic(d)->redirtbl[0];
761
/* We deliver 8259 interrupts to the appropriate CPU as follows. */
762
return ((/* IOAPIC pin0 is unmasked and routing to this LAPIC? */
763
((redir0.fields.delivery_mode == dest_ExtINT) &&
764
!redir0.fields.mask &&
765
redir0.fields.dest_id == VLAPIC_ID(vlapic) &&
766
!vlapic_disabled(vlapic)) ||
767
/* LAPIC has LVT0 unmasked for ExtInts? */
768
((lvt0 & (APIC_MODE_MASK|APIC_LVT_MASKED)) == APIC_DM_EXTINT) ||
769
/* LAPIC is fully disabled? */
770
vlapic_hw_disabled(vlapic)));
773
int vlapic_accept_pic_intr(struct vcpu *v)
775
return ((v == v->domain->arch.hvm_domain.i8259_target) &&
776
__vlapic_accept_pic_intr(v));
779
void vlapic_adjust_i8259_target(struct domain *d)
783
for_each_vcpu ( d, v )
784
if ( __vlapic_accept_pic_intr(v) )
787
v = d->vcpu ? d->vcpu[0] : NULL;
790
if ( d->arch.hvm_domain.i8259_target == v )
792
d->arch.hvm_domain.i8259_target = v;
793
pt_adjust_global_vcpu_target(v);
796
int vlapic_has_pending_irq(struct vcpu *v)
798
struct vlapic *vlapic = vcpu_vlapic(v);
801
if ( !vlapic_enabled(vlapic) )
804
irr = vlapic_find_highest_irr(vlapic);
808
isr = vlapic_find_highest_isr(vlapic);
809
isr = (isr != -1) ? isr : 0;
810
if ( (isr & 0xf0) >= (irr & 0xf0) )
816
int vlapic_ack_pending_irq(struct vcpu *v, int vector)
818
struct vlapic *vlapic = vcpu_vlapic(v);
820
vlapic_set_vector(vector, &vlapic->regs->data[APIC_ISR]);
821
vlapic_clear_irr(vector, vlapic);
826
/* Reset the VLPAIC back to its power-on/reset state. */
827
void vlapic_reset(struct vlapic *vlapic)
829
struct vcpu *v = vlapic_vcpu(vlapic);
832
vlapic_set_reg(vlapic, APIC_ID, (v->vcpu_id * 2) << 24);
833
vlapic_set_reg(vlapic, APIC_LVR, VLAPIC_VERSION);
835
for ( i = 0; i < 8; i++ )
837
vlapic_set_reg(vlapic, APIC_IRR + 0x10 * i, 0);
838
vlapic_set_reg(vlapic, APIC_ISR + 0x10 * i, 0);
839
vlapic_set_reg(vlapic, APIC_TMR + 0x10 * i, 0);
841
vlapic_set_reg(vlapic, APIC_ICR, 0);
842
vlapic_set_reg(vlapic, APIC_ICR2, 0);
843
vlapic_set_reg(vlapic, APIC_LDR, 0);
844
vlapic_set_reg(vlapic, APIC_TASKPRI, 0);
845
vlapic_set_reg(vlapic, APIC_TMICT, 0);
846
vlapic_set_reg(vlapic, APIC_TMCCT, 0);
847
vlapic_set_tdcr(vlapic, 0);
849
vlapic_set_reg(vlapic, APIC_DFR, 0xffffffffU);
851
for ( i = 0; i < VLAPIC_LVT_NUM; i++ )
852
vlapic_set_reg(vlapic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED);
854
vlapic_set_reg(vlapic, APIC_SPIV, 0xff);
855
vlapic->hw.disabled |= VLAPIC_SW_DISABLED;
857
destroy_periodic_time(&vlapic->pt);
860
/* rearm the actimer if needed, after a HVM restore */
861
static void lapic_rearm(struct vlapic *s)
863
unsigned long tmict = vlapic_get_reg(s, APIC_TMICT);
866
if ( (tmict = vlapic_get_reg(s, APIC_TMICT)) == 0 )
869
period = ((uint64_t)APIC_BUS_CYCLE_NS *
870
(uint32_t)tmict * s->hw.timer_divisor);
871
s->pt.irq = vlapic_get_reg(s, APIC_LVTT) & APIC_VECTOR_MASK;
872
create_periodic_time(vlapic_vcpu(s), &s->pt, period,
873
vlapic_lvtt_period(s) ? period : 0,
874
s->pt.irq, vlapic_pt_cb,
875
&s->timer_last_update);
876
s->timer_last_update = s->pt.last_plt_gtime;
879
static int lapic_save_hidden(struct domain *d, hvm_domain_context_t *h)
885
for_each_vcpu ( d, v )
888
if ( (rc = hvm_save_entry(LAPIC, v->vcpu_id, h, &s->hw)) != 0 )
895
static int lapic_save_regs(struct domain *d, hvm_domain_context_t *h)
901
for_each_vcpu ( d, v )
904
if ( (rc = hvm_save_entry(LAPIC_REGS, v->vcpu_id, h, s->regs)) != 0 )
911
static int lapic_load_hidden(struct domain *d, hvm_domain_context_t *h)
917
/* Which vlapic to load? */
918
vcpuid = hvm_load_instance(h);
919
if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL )
921
gdprintk(XENLOG_ERR, "HVM restore: domain has no vlapic %u\n", vcpuid);
926
if ( hvm_load_entry(LAPIC, h, &s->hw) != 0 )
929
vmx_vlapic_msr_changed(v);
934
static int lapic_load_regs(struct domain *d, hvm_domain_context_t *h)
940
/* Which vlapic to load? */
941
vcpuid = hvm_load_instance(h);
942
if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL )
944
gdprintk(XENLOG_ERR, "HVM restore: domain has no vlapic %u\n", vcpuid);
949
if ( hvm_load_entry(LAPIC_REGS, h, s->regs) != 0 )
952
vlapic_adjust_i8259_target(d);
957
HVM_REGISTER_SAVE_RESTORE(LAPIC, lapic_save_hidden, lapic_load_hidden,
959
HVM_REGISTER_SAVE_RESTORE(LAPIC_REGS, lapic_save_regs, lapic_load_regs,
962
int vlapic_init(struct vcpu *v)
964
struct vlapic *vlapic = vcpu_vlapic(v);
965
unsigned int memflags = MEMF_node(vcpu_to_node(v));
967
HVM_DBG_LOG(DBG_LEVEL_VLAPIC, "%d", v->vcpu_id);
969
vlapic->pt.source = PTSRC_lapic;
972
/* 32-bit VMX may be limited to 32-bit physical addresses. */
973
if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL )
974
memflags |= MEMF_bits(32);
976
if (vlapic->regs_page == NULL)
978
vlapic->regs_page = alloc_domheap_page(NULL, memflags);
979
if ( vlapic->regs_page == NULL )
981
dprintk(XENLOG_ERR, "alloc vlapic regs error: %d/%d\n",
982
v->domain->domain_id, v->vcpu_id);
986
if (vlapic->regs == NULL)
988
vlapic->regs = __map_domain_page_global(vlapic->regs_page);
989
if ( vlapic->regs == NULL )
991
dprintk(XENLOG_ERR, "map vlapic regs error: %d/%d\n",
992
v->domain->domain_id, v->vcpu_id);
996
clear_page(vlapic->regs);
998
vlapic_reset(vlapic);
1000
vlapic->hw.apic_base_msr = (MSR_IA32_APICBASE_ENABLE |
1001
APIC_DEFAULT_PHYS_BASE);
1002
if ( v->vcpu_id == 0 )
1003
vlapic->hw.apic_base_msr |= MSR_IA32_APICBASE_BSP;
1005
tasklet_init(&vlapic->init_tasklet, vlapic_init_action, (unsigned long)v);
1010
void vlapic_destroy(struct vcpu *v)
1012
struct vlapic *vlapic = vcpu_vlapic(v);
1014
tasklet_kill(&vlapic->init_tasklet);
1015
destroy_periodic_time(&vlapic->pt);
1016
unmap_domain_page_global(vlapic->regs);
1017
free_domheap_page(vlapic->regs_page);