91
91
pt_element_t *table;
94
down_read(¤t->mm->mmap_sem);
94
95
page = gfn_to_page(kvm, table_gfn);
96
up_read(¤t->mm->mmap_sem);
95
98
table = kmap_atomic(page, KM_USER0);
97
100
ret = CMPXCHG(&table[index], orig_pte, new_pte);
263
267
mmu_set_spte(vcpu, spte, page->role.access, pte_access, 0, 0,
264
gpte & PT_DIRTY_MASK, NULL, gpte_to_gfn(gpte), npage);
268
gpte & PT_DIRTY_MASK, NULL, largepage, gpte_to_gfn(gpte),
270
275
static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
271
276
struct guest_walker *walker,
272
int user_fault, int write_fault, int *ptwrite,
277
int user_fault, int write_fault, int largepage,
278
int *ptwrite, struct page *page)
275
280
hpa_t shadow_addr;
299
304
shadow_ent = ((u64 *)__va(shadow_addr)) + index;
300
305
if (level == PT_PAGE_TABLE_LEVEL)
302
if (is_shadow_present_pte(*shadow_ent)) {
308
if (largepage && level == PT_DIRECTORY_LEVEL)
311
if (is_shadow_present_pte(*shadow_ent)
312
&& !is_large_pte(*shadow_ent)) {
303
313
shadow_addr = *shadow_ent & PT64_BASE_ADDR_MASK;
317
if (is_large_pte(*shadow_ent))
318
rmap_remove(vcpu->kvm, shadow_ent);
307
320
if (level - 1 == PT_PAGE_TABLE_LEVEL
308
321
&& walker->level == PT_DIRECTORY_LEVEL) {
309
322
metaphysical = 1;
337
350
mmu_set_spte(vcpu, shadow_ent, access, walker->pte_access & access,
338
351
user_fault, write_fault,
339
352
walker->ptes[walker->level-1] & PT_DIRTY_MASK,
340
ptwrite, walker->gfn, page);
353
ptwrite, largepage, walker->gfn, page);
342
355
return shadow_ent;
389
403
pgprintk("%s: guest page fault\n", __FUNCTION__);
390
404
inject_page_fault(vcpu, addr, walker.error_code);
391
405
vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
392
up_read(¤t->mm->mmap_sem);
406
up_read(&vcpu->kvm->slots_lock);
410
down_read(¤t->mm->mmap_sem);
411
if (walker.level == PT_DIRECTORY_LEVEL) {
413
large_gfn = walker.gfn & ~(KVM_PAGES_PER_HPAGE-1);
414
if (is_largepage_backed(vcpu, large_gfn)) {
415
walker.gfn = large_gfn;
396
419
page = gfn_to_page(vcpu->kvm, walker.gfn);
420
up_read(¤t->mm->mmap_sem);
423
if (is_error_page(page)) {
424
pgprintk("gfn %x is mmio\n", walker.gfn);
425
kvm_release_page_clean(page);
426
up_read(&vcpu->kvm->slots_lock);
398
430
spin_lock(&vcpu->kvm->mmu_lock);
399
431
kvm_mmu_free_some_pages(vcpu);
400
432
shadow_pte = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
433
largepage, &write_pt, page);
402
435
pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __FUNCTION__,
403
436
shadow_pte, *shadow_pte, write_pt);
406
439
vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
409
* mmio: emulate if accessible, otherwise its a guest fault.
411
if (shadow_pte && is_io_pte(*shadow_pte)) {
412
spin_unlock(&vcpu->kvm->mmu_lock);
413
up_read(¤t->mm->mmap_sem);
417
441
++vcpu->stat.pf_fixed;
418
442
kvm_mmu_audit(vcpu, "post page fault (fixed)");
419
443
spin_unlock(&vcpu->kvm->mmu_lock);
420
up_read(¤t->mm->mmap_sem);
444
up_read(&vcpu->kvm->slots_lock);