~ubuntu-branches/ubuntu/hardy/kvm/hardy-backports

« back to all changes in this revision

Viewing changes to kernel/paging_tmpl.h

  • Committer: Bazaar Package Importer
  • Author(s): Soren Hansen
  • Date: 2008-02-26 13:10:57 UTC
  • mfrom: (1.1.18 upstream)
  • Revision ID: james.westby@ubuntu.com-20080226131057-s67x6l89mtjw1x9b
Tags: 1:62+dfsg-0ubuntu1
New upstream release

Show diffs side-by-side

added added

removed removed

Lines of Context:
91
91
        pt_element_t *table;
92
92
        struct page *page;
93
93
 
 
94
        down_read(&current->mm->mmap_sem);
94
95
        page = gfn_to_page(kvm, table_gfn);
 
96
        up_read(&current->mm->mmap_sem);
 
97
 
95
98
        table = kmap_atomic(page, KM_USER0);
96
99
 
97
100
        ret = CMPXCHG(&table[index], orig_pte, new_pte);
140
143
        }
141
144
#endif
142
145
        ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) ||
143
 
               (vcpu->cr3 & CR3_NONPAE_RESERVED_BITS) == 0);
 
146
               (vcpu->arch.cr3 & CR3_NONPAE_RESERVED_BITS) == 0);
144
147
 
145
148
        pt_access = ACC_ALL;
146
149
 
245
248
        pt_element_t gpte;
246
249
        unsigned pte_access;
247
250
        struct page *npage;
 
251
        int largepage = vcpu->arch.update_pte.largepage;
248
252
 
249
253
        gpte = *(const pt_element_t *)pte;
250
254
        if (~gpte & (PT_PRESENT_MASK | PT_ACCESSED_MASK)) {
261
265
                return;
262
266
        get_page(npage);
263
267
        mmu_set_spte(vcpu, spte, page->role.access, pte_access, 0, 0,
264
 
                     gpte & PT_DIRTY_MASK, NULL, gpte_to_gfn(gpte), npage);
 
268
                     gpte & PT_DIRTY_MASK, NULL, largepage, gpte_to_gfn(gpte),
 
269
                     npage);
265
270
}
266
271
 
267
272
/*
269
274
 */
270
275
static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
271
276
                         struct guest_walker *walker,
272
 
                         int user_fault, int write_fault, int *ptwrite,
273
 
                         struct page *page)
 
277
                         int user_fault, int write_fault, int largepage,
 
278
                         int *ptwrite, struct page *page)
274
279
{
275
280
        hpa_t shadow_addr;
276
281
        int level;
299
304
                shadow_ent = ((u64 *)__va(shadow_addr)) + index;
300
305
                if (level == PT_PAGE_TABLE_LEVEL)
301
306
                        break;
302
 
                if (is_shadow_present_pte(*shadow_ent)) {
 
307
 
 
308
                if (largepage && level == PT_DIRECTORY_LEVEL)
 
309
                        break;
 
310
 
 
311
                if (is_shadow_present_pte(*shadow_ent)
 
312
                    && !is_large_pte(*shadow_ent)) {
303
313
                        shadow_addr = *shadow_ent & PT64_BASE_ADDR_MASK;
304
314
                        continue;
305
315
                }
306
316
 
 
317
                if (is_large_pte(*shadow_ent))
 
318
                        rmap_remove(vcpu->kvm, shadow_ent);
 
319
 
307
320
                if (level - 1 == PT_PAGE_TABLE_LEVEL
308
321
                    && walker->level == PT_DIRECTORY_LEVEL) {
309
322
                        metaphysical = 1;
337
350
        mmu_set_spte(vcpu, shadow_ent, access, walker->pte_access & access,
338
351
                     user_fault, write_fault,
339
352
                     walker->ptes[walker->level-1] & PT_DIRTY_MASK,
340
 
                     ptwrite, walker->gfn, page);
 
353
                     ptwrite, largepage, walker->gfn, page);
341
354
 
342
355
        return shadow_ent;
343
356
}
367
380
        int write_pt = 0;
368
381
        int r;
369
382
        struct page *page;
 
383
        int largepage = 0;
370
384
 
371
385
        pgprintk("%s: addr %lx err %x\n", __FUNCTION__, addr, error_code);
372
386
        kvm_mmu_audit(vcpu, "pre page fault");
375
389
        if (r)
376
390
                return r;
377
391
 
378
 
        down_read(&current->mm->mmap_sem);
 
392
        down_read(&vcpu->kvm->slots_lock);
379
393
        /*
380
394
         * Look up the shadow pte for the faulting address.
381
395
         */
389
403
                pgprintk("%s: guest page fault\n", __FUNCTION__);
390
404
                inject_page_fault(vcpu, addr, walker.error_code);
391
405
                vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
392
 
                up_read(&current->mm->mmap_sem);
 
406
                up_read(&vcpu->kvm->slots_lock);
393
407
                return 0;
394
408
        }
395
409
 
 
410
        down_read(&current->mm->mmap_sem);
 
411
        if (walker.level == PT_DIRECTORY_LEVEL) {
 
412
                gfn_t large_gfn;
 
413
                large_gfn = walker.gfn & ~(KVM_PAGES_PER_HPAGE-1);
 
414
                if (is_largepage_backed(vcpu, large_gfn)) {
 
415
                        walker.gfn = large_gfn;
 
416
                        largepage = 1;
 
417
                }
 
418
        }
396
419
        page = gfn_to_page(vcpu->kvm, walker.gfn);
 
420
        up_read(&current->mm->mmap_sem);
 
421
 
 
422
        /* mmio */
 
423
        if (is_error_page(page)) {
 
424
                pgprintk("gfn %x is mmio\n", walker.gfn);
 
425
                kvm_release_page_clean(page);
 
426
                up_read(&vcpu->kvm->slots_lock);
 
427
                return 1;
 
428
        }
397
429
 
398
430
        spin_lock(&vcpu->kvm->mmu_lock);
399
431
        kvm_mmu_free_some_pages(vcpu);
400
432
        shadow_pte = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
401
 
                                  &write_pt, page);
 
433
                                  largepage, &write_pt, page);
 
434
 
402
435
        pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __FUNCTION__,
403
436
                 shadow_pte, *shadow_pte, write_pt);
404
437
 
405
438
        if (!write_pt)
406
439
                vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
407
440
 
408
 
        /*
409
 
         * mmio: emulate if accessible, otherwise its a guest fault.
410
 
         */
411
 
        if (shadow_pte && is_io_pte(*shadow_pte)) {
412
 
                spin_unlock(&vcpu->kvm->mmu_lock);
413
 
                up_read(&current->mm->mmap_sem);
414
 
                return 1;
415
 
        }
416
 
 
417
441
        ++vcpu->stat.pf_fixed;
418
442
        kvm_mmu_audit(vcpu, "post page fault (fixed)");
419
443
        spin_unlock(&vcpu->kvm->mmu_lock);
420
 
        up_read(&current->mm->mmap_sem);
 
444
        up_read(&vcpu->kvm->slots_lock);
421
445
 
422
446
        return write_pt;
423
447
}