118
124
extern void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 vp, u64 vp_mask);
119
125
extern void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end);
120
126
extern void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 new_msr);
121
extern void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr);
122
127
extern void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu);
123
128
extern void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu);
124
129
extern void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu);
125
extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte);
130
extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte,
132
extern void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte);
126
133
extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr);
127
134
extern void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong eaddr, ulong seg_size);
128
135
extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu);
181
192
extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst);
182
193
extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst);
183
194
extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd);
195
extern void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu,
196
struct kvm_vcpu *vcpu);
197
extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
198
struct kvmppc_book3s_shadow_vcpu *svcpu);
185
200
static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
187
return container_of(vcpu, struct kvmppc_vcpu_book3s, vcpu);
202
return vcpu->arch.book3s;
190
extern void kvm_return_point(void);
192
205
/* Also add subarch specific defines */
194
207
#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
198
211
#include <asm/kvm_book3s_64.h>
201
#ifdef CONFIG_KVM_BOOK3S_PR
203
static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu)
205
return to_book3s(vcpu)->hior;
208
static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
209
unsigned long pending_now, unsigned long old_pending)
212
vcpu->arch.shared->int_pending = 1;
213
else if (old_pending)
214
vcpu->arch.shared->int_pending = 0;
217
static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
220
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
221
svcpu->gpr[num] = val;
223
to_book3s(vcpu)->shadow_vcpu->gpr[num] = val;
225
vcpu->arch.gpr[num] = val;
228
static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
231
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
232
ulong r = svcpu->gpr[num];
236
return vcpu->arch.gpr[num];
239
static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
241
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
244
to_book3s(vcpu)->shadow_vcpu->cr = val;
247
static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
249
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
256
static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val)
258
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
260
to_book3s(vcpu)->shadow_vcpu->xer = val;
264
static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
266
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
273
static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
275
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
280
static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu)
282
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
289
static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val)
291
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
296
static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu)
298
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
305
static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val)
307
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
312
static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
314
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
321
static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
323
ulong pc = kvmppc_get_pc(vcpu);
324
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
327
/* Load the instruction manually if it failed to do so in the
329
if (svcpu->last_inst == KVM_INST_FETCH_FAILED)
330
kvmppc_ld(vcpu, &pc, sizeof(u32), &svcpu->last_inst, false);
332
r = svcpu->last_inst;
338
* Like kvmppc_get_last_inst(), but for fetching a sc instruction.
339
* Because the sc instruction sets SRR0 to point to the following
340
* instruction, we have to fetch from pc - 4.
342
static inline u32 kvmppc_get_last_sc(struct kvm_vcpu *vcpu)
344
ulong pc = kvmppc_get_pc(vcpu) - 4;
345
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
348
/* Load the instruction manually if it failed to do so in the
350
if (svcpu->last_inst == KVM_INST_FETCH_FAILED)
351
kvmppc_ld(vcpu, &pc, sizeof(u32), &svcpu->last_inst, false);
353
r = svcpu->last_inst;
358
static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
360
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
362
r = svcpu->fault_dar;
367
static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
369
ulong crit_raw = vcpu->arch.shared->critical;
370
ulong crit_r1 = kvmppc_get_gpr(vcpu, 1);
373
/* Truncate crit indicators in 32 bit mode */
374
if (!(vcpu->arch.shared->msr & MSR_SF)) {
375
crit_raw &= 0xffffffff;
376
crit_r1 &= 0xffffffff;
379
/* Critical section when crit == r1 */
380
crit = (crit_raw == crit_r1);
381
/* ... and we're in supervisor mode */
382
crit = crit && !(vcpu->arch.shared->msr & MSR_PR);
386
#else /* CONFIG_KVM_BOOK3S_PR */
388
static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu)
393
static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
394
unsigned long pending_now, unsigned long old_pending)
398
214
static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
400
216
vcpu->arch.gpr[num] = val;