2
* This program is free software; you can redistribute it and/or modify
3
* it under the terms of the GNU General Public License, version 2, as
4
* published by the Free Software Foundation.
6
* This program is distributed in the hope that it will be useful,
7
* but WITHOUT ANY WARRANTY; without even the implied warranty of
8
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9
* GNU General Public License for more details.
11
* You should have received a copy of the GNU General Public License
12
* along with this program; if not, write to the Free Software
13
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15
* Copyright IBM Corp. 2007
16
* Copyright 2011 Freescale Semiconductor, Inc.
18
* Authors: Hollis Blanchard <hollisb@us.ibm.com>
21
#include <asm/ppc_asm.h>
22
#include <asm/kvm_asm.h>
24
#include <asm/mmu-44x.h>
26
#include <asm/asm-offsets.h>
28
#define VCPU_GPR(n) (VCPU_GPRS + (n * 4))
30
/* The host stack layout: */
31
#define HOST_R1 0 /* Implied by stwu. */
32
#define HOST_CALLEE_LR 4
34
/* r2 is special: it holds 'current', and it made nonvolatile in the
35
* kernel with the -ffixed-r2 gcc option. */
37
#define HOST_NV_GPRS 16
38
#define HOST_NV_GPR(n) (HOST_NV_GPRS + ((n - 14) * 4))
39
#define HOST_MIN_STACK_SIZE (HOST_NV_GPR(31) + 4)
40
#define HOST_STACK_SIZE (((HOST_MIN_STACK_SIZE + 15) / 16) * 16) /* Align. */
41
#define HOST_STACK_LR (HOST_STACK_SIZE + 4) /* In caller stack frame. */
43
#define NEED_INST_MASK ((1<<BOOKE_INTERRUPT_PROGRAM) | \
44
(1<<BOOKE_INTERRUPT_DTLB_MISS) | \
45
(1<<BOOKE_INTERRUPT_DEBUG))
47
#define NEED_DEAR_MASK ((1<<BOOKE_INTERRUPT_DATA_STORAGE) | \
48
(1<<BOOKE_INTERRUPT_DTLB_MISS))
50
#define NEED_ESR_MASK ((1<<BOOKE_INTERRUPT_DATA_STORAGE) | \
51
(1<<BOOKE_INTERRUPT_INST_STORAGE) | \
52
(1<<BOOKE_INTERRUPT_PROGRAM) | \
53
(1<<BOOKE_INTERRUPT_DTLB_MISS))
55
.macro KVM_HANDLER ivor_nr
56
_GLOBAL(kvmppc_handler_\ivor_nr)
57
/* Get pointer to vcpu and record exit number. */
58
mtspr SPRN_SPRG_WSCRATCH0, r4
59
mfspr r4, SPRN_SPRG_RVCPU
60
stw r5, VCPU_GPR(r5)(r4)
61
stw r6, VCPU_GPR(r6)(r4)
63
lis r6, kvmppc_resume_host@h
66
ori r6, r6, kvmppc_resume_host@l
71
_GLOBAL(kvmppc_handlers_start)
72
KVM_HANDLER BOOKE_INTERRUPT_CRITICAL
73
KVM_HANDLER BOOKE_INTERRUPT_MACHINE_CHECK
74
KVM_HANDLER BOOKE_INTERRUPT_DATA_STORAGE
75
KVM_HANDLER BOOKE_INTERRUPT_INST_STORAGE
76
KVM_HANDLER BOOKE_INTERRUPT_EXTERNAL
77
KVM_HANDLER BOOKE_INTERRUPT_ALIGNMENT
78
KVM_HANDLER BOOKE_INTERRUPT_PROGRAM
79
KVM_HANDLER BOOKE_INTERRUPT_FP_UNAVAIL
80
KVM_HANDLER BOOKE_INTERRUPT_SYSCALL
81
KVM_HANDLER BOOKE_INTERRUPT_AP_UNAVAIL
82
KVM_HANDLER BOOKE_INTERRUPT_DECREMENTER
83
KVM_HANDLER BOOKE_INTERRUPT_FIT
84
KVM_HANDLER BOOKE_INTERRUPT_WATCHDOG
85
KVM_HANDLER BOOKE_INTERRUPT_DTLB_MISS
86
KVM_HANDLER BOOKE_INTERRUPT_ITLB_MISS
87
KVM_HANDLER BOOKE_INTERRUPT_DEBUG
88
KVM_HANDLER BOOKE_INTERRUPT_SPE_UNAVAIL
89
KVM_HANDLER BOOKE_INTERRUPT_SPE_FP_DATA
90
KVM_HANDLER BOOKE_INTERRUPT_SPE_FP_ROUND
92
_GLOBAL(kvmppc_handler_len)
93
.long kvmppc_handler_1 - kvmppc_handler_0
97
* SPRG_SCRATCH0: guest r4
101
_GLOBAL(kvmppc_resume_host)
102
stw r3, VCPU_GPR(r3)(r4)
105
stw r7, VCPU_GPR(r7)(r4)
106
stw r8, VCPU_GPR(r8)(r4)
107
stw r9, VCPU_GPR(r9)(r4)
112
#ifdef CONFIG_KVM_EXIT_TIMING
120
stw r8, VCPU_TIMING_EXIT_TBL(r4)
121
stw r9, VCPU_TIMING_EXIT_TBU(r4)
124
/* Save the faulting instruction and all GPRs for emulation. */
125
andi. r7, r6, NEED_INST_MASK
135
stw r9, VCPU_LAST_INST(r4)
137
stw r15, VCPU_GPR(r15)(r4)
138
stw r16, VCPU_GPR(r16)(r4)
139
stw r17, VCPU_GPR(r17)(r4)
140
stw r18, VCPU_GPR(r18)(r4)
141
stw r19, VCPU_GPR(r19)(r4)
142
stw r20, VCPU_GPR(r20)(r4)
143
stw r21, VCPU_GPR(r21)(r4)
144
stw r22, VCPU_GPR(r22)(r4)
145
stw r23, VCPU_GPR(r23)(r4)
146
stw r24, VCPU_GPR(r24)(r4)
147
stw r25, VCPU_GPR(r25)(r4)
148
stw r26, VCPU_GPR(r26)(r4)
149
stw r27, VCPU_GPR(r27)(r4)
150
stw r28, VCPU_GPR(r28)(r4)
151
stw r29, VCPU_GPR(r29)(r4)
152
stw r30, VCPU_GPR(r30)(r4)
153
stw r31, VCPU_GPR(r31)(r4)
156
/* Also grab DEAR and ESR before the host can clobber them. */
158
andi. r7, r6, NEED_DEAR_MASK
161
stw r9, VCPU_FAULT_DEAR(r4)
164
andi. r7, r6, NEED_ESR_MASK
167
stw r9, VCPU_FAULT_ESR(r4)
170
/* Save remaining volatile guest register state to vcpu. */
171
stw r0, VCPU_GPR(r0)(r4)
172
stw r1, VCPU_GPR(r1)(r4)
173
stw r2, VCPU_GPR(r2)(r4)
174
stw r10, VCPU_GPR(r10)(r4)
175
stw r11, VCPU_GPR(r11)(r4)
176
stw r12, VCPU_GPR(r12)(r4)
177
stw r13, VCPU_GPR(r13)(r4)
178
stw r14, VCPU_GPR(r14)(r4) /* We need a NV GPR below. */
183
mfspr r3, SPRN_SPRG_RSCRATCH0
184
stw r3, VCPU_GPR(r4)(r4)
188
/* Restore host stack pointer and PID before IVPR, since the host
189
* exception handlers use them. */
190
lwz r1, VCPU_HOST_STACK(r4)
191
lwz r3, VCPU_HOST_PID(r4)
194
#ifdef CONFIG_FSL_BOOKE
195
/* we cheat and know that Linux doesn't use PID1 which is always 0 */
200
/* Restore host IVPR before re-enabling interrupts. We cheat and know
201
* that Linux IVPR is always 0xc0000000. */
205
/* Switch to kernel stack and jump to handler. */
206
LOAD_REG_ADDR(r3, kvmppc_handle_exit)
210
mr r14, r4 /* Save vcpu pointer. */
212
bctrl /* kvmppc_handle_exit() */
214
/* Restore vcpu pointer and the nonvolatiles we used. */
216
lwz r14, VCPU_GPR(r14)(r4)
218
/* Sometimes instruction emulation must restore complete GPR state. */
219
andi. r5, r3, RESUME_FLAG_NV
221
lwz r15, VCPU_GPR(r15)(r4)
222
lwz r16, VCPU_GPR(r16)(r4)
223
lwz r17, VCPU_GPR(r17)(r4)
224
lwz r18, VCPU_GPR(r18)(r4)
225
lwz r19, VCPU_GPR(r19)(r4)
226
lwz r20, VCPU_GPR(r20)(r4)
227
lwz r21, VCPU_GPR(r21)(r4)
228
lwz r22, VCPU_GPR(r22)(r4)
229
lwz r23, VCPU_GPR(r23)(r4)
230
lwz r24, VCPU_GPR(r24)(r4)
231
lwz r25, VCPU_GPR(r25)(r4)
232
lwz r26, VCPU_GPR(r26)(r4)
233
lwz r27, VCPU_GPR(r27)(r4)
234
lwz r28, VCPU_GPR(r28)(r4)
235
lwz r29, VCPU_GPR(r29)(r4)
236
lwz r30, VCPU_GPR(r30)(r4)
237
lwz r31, VCPU_GPR(r31)(r4)
240
/* Should we return to the guest? */
241
andi. r5, r3, RESUME_FLAG_HOST
244
srawi r3, r3, 2 /* Shift -ERR back down. */
247
/* Not returning to guest. */
250
/* save guest SPEFSCR and load host SPEFSCR */
251
mfspr r9, SPRN_SPEFSCR
252
stw r9, VCPU_SPEFSCR(r4)
253
lwz r9, VCPU_HOST_SPEFSCR(r4)
254
mtspr SPRN_SPEFSCR, r9
257
/* We already saved guest volatile register state; now save the
259
stw r15, VCPU_GPR(r15)(r4)
260
stw r16, VCPU_GPR(r16)(r4)
261
stw r17, VCPU_GPR(r17)(r4)
262
stw r18, VCPU_GPR(r18)(r4)
263
stw r19, VCPU_GPR(r19)(r4)
264
stw r20, VCPU_GPR(r20)(r4)
265
stw r21, VCPU_GPR(r21)(r4)
266
stw r22, VCPU_GPR(r22)(r4)
267
stw r23, VCPU_GPR(r23)(r4)
268
stw r24, VCPU_GPR(r24)(r4)
269
stw r25, VCPU_GPR(r25)(r4)
270
stw r26, VCPU_GPR(r26)(r4)
271
stw r27, VCPU_GPR(r27)(r4)
272
stw r28, VCPU_GPR(r28)(r4)
273
stw r29, VCPU_GPR(r29)(r4)
274
stw r30, VCPU_GPR(r30)(r4)
275
stw r31, VCPU_GPR(r31)(r4)
277
/* Load host non-volatile register state from host stack. */
278
lwz r14, HOST_NV_GPR(r14)(r1)
279
lwz r15, HOST_NV_GPR(r15)(r1)
280
lwz r16, HOST_NV_GPR(r16)(r1)
281
lwz r17, HOST_NV_GPR(r17)(r1)
282
lwz r18, HOST_NV_GPR(r18)(r1)
283
lwz r19, HOST_NV_GPR(r19)(r1)
284
lwz r20, HOST_NV_GPR(r20)(r1)
285
lwz r21, HOST_NV_GPR(r21)(r1)
286
lwz r22, HOST_NV_GPR(r22)(r1)
287
lwz r23, HOST_NV_GPR(r23)(r1)
288
lwz r24, HOST_NV_GPR(r24)(r1)
289
lwz r25, HOST_NV_GPR(r25)(r1)
290
lwz r26, HOST_NV_GPR(r26)(r1)
291
lwz r27, HOST_NV_GPR(r27)(r1)
292
lwz r28, HOST_NV_GPR(r28)(r1)
293
lwz r29, HOST_NV_GPR(r29)(r1)
294
lwz r30, HOST_NV_GPR(r30)(r1)
295
lwz r31, HOST_NV_GPR(r31)(r1)
297
/* Return to kvm_vcpu_run(). */
298
lwz r4, HOST_STACK_LR(r1)
299
addi r1, r1, HOST_STACK_SIZE
301
/* r3 still contains the return code from kvmppc_handle_exit(). */
306
* r3: kvm_run pointer
309
_GLOBAL(__kvmppc_vcpu_run)
310
stwu r1, -HOST_STACK_SIZE(r1)
311
stw r1, VCPU_HOST_STACK(r4) /* Save stack pointer to vcpu. */
313
/* Save host state to stack. */
316
stw r3, HOST_STACK_LR(r1)
318
/* Save host non-volatile register state to stack. */
319
stw r14, HOST_NV_GPR(r14)(r1)
320
stw r15, HOST_NV_GPR(r15)(r1)
321
stw r16, HOST_NV_GPR(r16)(r1)
322
stw r17, HOST_NV_GPR(r17)(r1)
323
stw r18, HOST_NV_GPR(r18)(r1)
324
stw r19, HOST_NV_GPR(r19)(r1)
325
stw r20, HOST_NV_GPR(r20)(r1)
326
stw r21, HOST_NV_GPR(r21)(r1)
327
stw r22, HOST_NV_GPR(r22)(r1)
328
stw r23, HOST_NV_GPR(r23)(r1)
329
stw r24, HOST_NV_GPR(r24)(r1)
330
stw r25, HOST_NV_GPR(r25)(r1)
331
stw r26, HOST_NV_GPR(r26)(r1)
332
stw r27, HOST_NV_GPR(r27)(r1)
333
stw r28, HOST_NV_GPR(r28)(r1)
334
stw r29, HOST_NV_GPR(r29)(r1)
335
stw r30, HOST_NV_GPR(r30)(r1)
336
stw r31, HOST_NV_GPR(r31)(r1)
338
/* Load guest non-volatiles. */
339
lwz r14, VCPU_GPR(r14)(r4)
340
lwz r15, VCPU_GPR(r15)(r4)
341
lwz r16, VCPU_GPR(r16)(r4)
342
lwz r17, VCPU_GPR(r17)(r4)
343
lwz r18, VCPU_GPR(r18)(r4)
344
lwz r19, VCPU_GPR(r19)(r4)
345
lwz r20, VCPU_GPR(r20)(r4)
346
lwz r21, VCPU_GPR(r21)(r4)
347
lwz r22, VCPU_GPR(r22)(r4)
348
lwz r23, VCPU_GPR(r23)(r4)
349
lwz r24, VCPU_GPR(r24)(r4)
350
lwz r25, VCPU_GPR(r25)(r4)
351
lwz r26, VCPU_GPR(r26)(r4)
352
lwz r27, VCPU_GPR(r27)(r4)
353
lwz r28, VCPU_GPR(r28)(r4)
354
lwz r29, VCPU_GPR(r29)(r4)
355
lwz r30, VCPU_GPR(r30)(r4)
356
lwz r31, VCPU_GPR(r31)(r4)
359
/* save host SPEFSCR and load guest SPEFSCR */
360
mfspr r3, SPRN_SPEFSCR
361
stw r3, VCPU_HOST_SPEFSCR(r4)
362
lwz r3, VCPU_SPEFSCR(r4)
363
mtspr SPRN_SPEFSCR, r3
370
stw r3, VCPU_HOST_PID(r4)
371
lwz r3, VCPU_SHADOW_PID(r4)
374
#ifdef CONFIG_FSL_BOOKE
375
lwz r3, VCPU_SHADOW_PID1(r4)
380
iccci 0, 0 /* XXX hack */
383
/* Load some guest volatiles. */
384
lwz r0, VCPU_GPR(r0)(r4)
385
lwz r2, VCPU_GPR(r2)(r4)
386
lwz r9, VCPU_GPR(r9)(r4)
387
lwz r10, VCPU_GPR(r10)(r4)
388
lwz r11, VCPU_GPR(r11)(r4)
389
lwz r12, VCPU_GPR(r12)(r4)
390
lwz r13, VCPU_GPR(r13)(r4)
396
/* Switch the IVPR. XXX If we take a TLB miss after this we're screwed,
397
* so how do we make sure vcpu won't fault? */
398
lis r8, kvmppc_booke_handlers@ha
399
lwz r8, kvmppc_booke_handlers@l(r8)
402
/* Save vcpu pointer for the exception handlers. */
403
mtspr SPRN_SPRG_WVCPU, r4
405
/* Can't switch the stack pointer until after IVPR is switched,
406
* because host interrupt handlers would get confused. */
407
lwz r1, VCPU_GPR(r1)(r4)
409
/* Host interrupt handlers may have clobbered these guest-readable
410
* SPRGs, so we need to reload them here with the guest's values. */
411
lwz r3, VCPU_SPRG4(r4)
412
mtspr SPRN_SPRG4W, r3
413
lwz r3, VCPU_SPRG5(r4)
414
mtspr SPRN_SPRG5W, r3
415
lwz r3, VCPU_SPRG6(r4)
416
mtspr SPRN_SPRG6W, r3
417
lwz r3, VCPU_SPRG7(r4)
418
mtspr SPRN_SPRG7W, r3
420
#ifdef CONFIG_KVM_EXIT_TIMING
421
/* save enter time */
428
stw r7, VCPU_TIMING_LAST_ENTER_TBL(r4)
429
stw r8, VCPU_TIMING_LAST_ENTER_TBU(r4)
432
/* Finish loading guest volatiles and jump to guest. */
436
lwz r7, VCPU_SHADOW_MSR(r4)
441
lwz r5, VCPU_GPR(r5)(r4)
442
lwz r6, VCPU_GPR(r6)(r4)
443
lwz r7, VCPU_GPR(r7)(r4)
444
lwz r8, VCPU_GPR(r8)(r4)
446
/* Clear any debug events which occurred since we disabled MSR[DE].
447
* XXX This gives us a 3-instruction window in which a breakpoint
448
* intended for guest context could fire in the host instead. */
453
lwz r3, VCPU_GPR(r3)(r4)
454
lwz r4, VCPU_GPR(r4)(r4)
458
_GLOBAL(kvmppc_save_guest_spe)
461
SAVE_32EVRS(0, r4, r3, VCPU_EVR)
462
evxor evr6, evr6, evr6
463
evmwumiaa evr6, evr6, evr6
465
evstddx evr6, r4, r3 /* save acc */
468
_GLOBAL(kvmppc_load_guest_spe)
473
evmra evr6,evr6 /* load acc */
474
REST_32EVRS(0, r4, r3, VCPU_EVR)