2
* Miscellaneous process/domain related routines
4
* Copyright (C) 2004 Hewlett-Packard Co.
5
* Dan Magenheimer (dan.magenheimer@hp.com)
9
#include <xen/config.h>
11
#include <xen/errno.h>
12
#include <xen/sched.h>
14
#include <asm/ptrace.h>
15
#include <xen/delay.h>
16
#include <xen/perfc.h>
19
#include <asm/system.h>
20
#include <asm/processor.h>
22
#include <xen/event.h>
23
#include <asm/privop.h>
25
#include <asm/ia64_int.h>
26
#include <asm/dom_fw.h>
28
#include <asm/debugger.h>
29
#include <asm/fpswa.h>
30
#include <asm/bundle.h>
31
#include <asm/asm-xsi-offsets.h>
32
#include <asm/shadow.h>
33
#include <asm/uaccess.h>
34
#include <asm/p2m_entry.h>
36
extern void die_if_kernel(char *str, struct pt_regs *regs, long err);
37
/* FIXME: where these declarations shold be there ? */
38
extern int ia64_hyperprivop(unsigned long, REGS *);
39
extern IA64FAULT ia64_hypercall(struct pt_regs *regs);
41
extern void do_ssc(unsigned long ssc, struct pt_regs *regs);
43
// should never panic domain... if it does, stack may have been overrun
44
static void check_bad_nested_interruption(unsigned long isr,
48
struct vcpu *v = current;
50
if (!(PSCB(v, ipsr) & IA64_PSR_DT)) {
52
"psr.dt off, trying to deliver nested dtlb!\n");
55
if (vector != IA64_DATA_TLB_VECTOR &&
56
vector != IA64_ALT_DATA_TLB_VECTOR &&
57
vector != IA64_VHPT_TRANS_VECTOR) {
58
panic_domain(regs, "psr.ic off, delivering fault=%lx,"
59
"ipsr=%lx,iip=%lx,ifa=%lx,isr=%lx,PSCB.iip=%lx\n",
60
vector, regs->cr_ipsr, regs->cr_iip, PSCB(v, ifa),
65
static void reflect_interruption(unsigned long isr, struct pt_regs *regs,
68
struct vcpu *v = current;
70
if (!PSCB(v, interrupt_collection_enabled))
71
check_bad_nested_interruption(isr, regs, vector);
72
PSCB(v, unat) = regs->ar_unat; // not sure if this is really needed?
73
PSCB(v, precover_ifs) = regs->cr_ifs;
74
PSCB(v, ipsr) = vcpu_get_psr(v);
77
PSCB(v, iip) = regs->cr_iip;
80
regs->cr_iip = ((unsigned long)PSCBX(v, iva) + vector) & ~0xffUL;
81
regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
82
regs->cr_ipsr = vcpu_pl_adjust(regs->cr_ipsr, IA64_PSR_CPL0_BIT);
83
if (PSCB(v, dcr) & IA64_DCR_BE)
84
regs->cr_ipsr |= IA64_PSR_BE;
86
regs->cr_ipsr &= ~IA64_PSR_BE;
88
if (PSCB(v, hpsr_dfh))
89
regs->cr_ipsr |= IA64_PSR_DFH;
90
PSCB(v, vpsr_dfh) = 0;
91
v->vcpu_info->evtchn_upcall_mask = 1;
92
PSCB(v, interrupt_collection_enabled) = 0;
94
perfc_incra(slow_reflect, vector >> 8);
96
debugger_event(vector == IA64_EXTINT_VECTOR ?
97
XEN_IA64_DEBUG_ON_EXTINT : XEN_IA64_DEBUG_ON_EXCEPT);
100
void reflect_event(void)
102
struct vcpu *v = current;
103
struct pt_regs *regs;
106
if (!event_pending(v))
110
if (is_idle_vcpu(v)) {
111
//printk("WARN: invocation to reflect_event in nested xen\n");
117
isr = regs->cr_ipsr & IA64_PSR_RI;
119
if (!PSCB(v, interrupt_collection_enabled))
120
printk("psr.ic off, delivering event, ipsr=%lx,iip=%lx,"
121
"isr=%lx,viip=0x%lx\n",
122
regs->cr_ipsr, regs->cr_iip, isr, PSCB(v, iip));
123
PSCB(v, unat) = regs->ar_unat; // not sure if this is really needed?
124
PSCB(v, precover_ifs) = regs->cr_ifs;
125
PSCB(v, ipsr) = vcpu_get_psr(v);
128
PSCB(v, iip) = regs->cr_iip;
131
regs->cr_iip = v->arch.event_callback_ip;
132
regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
133
regs->cr_ipsr = vcpu_pl_adjust(regs->cr_ipsr, IA64_PSR_CPL0_BIT);
134
if (PSCB(v, dcr) & IA64_DCR_BE)
135
regs->cr_ipsr |= IA64_PSR_BE;
137
regs->cr_ipsr &= ~IA64_PSR_BE;
140
if (PSCB(v, hpsr_dfh))
141
regs->cr_ipsr |= IA64_PSR_DFH;
142
PSCB(v, vpsr_dfh) = 0;
143
v->vcpu_info->evtchn_upcall_mask = 1;
144
PSCB(v, interrupt_collection_enabled) = 0;
146
debugger_event(XEN_IA64_DEBUG_ON_EVENT);
149
static int handle_lazy_cover(struct vcpu *v, struct pt_regs *regs)
151
if (!PSCB(v, interrupt_collection_enabled)) {
152
PSCB(v, ifs) = regs->cr_ifs;
154
perfc_incr(lazy_cover);
155
return 1; // retry same instruction with cr.ifs off
160
void ia64_do_page_fault(unsigned long address, unsigned long isr,
161
struct pt_regs *regs, unsigned long itir)
163
unsigned long iip = regs->cr_iip, iha;
164
// FIXME should validate address here
165
unsigned long pteval;
166
unsigned long is_data = !((isr >> IA64_ISR_X_BIT) & 1UL);
168
int is_ptc_l_needed = 0;
169
ia64_itir_t _itir = {.itir = itir};
171
if ((isr & IA64_ISR_SP)
172
|| ((isr & IA64_ISR_NA)
173
&& (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH)) {
175
* This fault was due to a speculative load or lfetch.fault,
176
* set the "ed" bit in the psr to ensure forward progress.
177
* (Target register will get a NaT for ld.s, lfetch will be
180
ia64_psr(regs)->ed = 1;
185
fault = vcpu_translate(current, address, is_data, &pteval,
187
if (fault == IA64_NO_FAULT || fault == IA64_USE_TLB) {
188
struct p2m_entry entry;
189
unsigned long m_pteval;
190
m_pteval = translate_domain_pte(pteval, address, itir,
191
&(_itir.itir), &entry);
192
vcpu_itc_no_srlz(current, is_data ? 2 : 1, address,
193
m_pteval, pteval, _itir.itir, &entry);
194
if ((fault == IA64_USE_TLB && !current->arch.dtlb.pte.p) ||
195
p2m_entry_retry(&entry)) {
196
/* dtlb has been purged in-between. This dtlb was
197
matching. Undo the work. */
198
vcpu_flush_tlb_vhpt_range(address, _itir.ps);
200
// the stale entry which we inserted above
201
// may remains in tlb cache.
202
// we don't purge it now hoping next itc purges it.
210
vcpu_ptc_l(current, address, _itir.ps);
211
if (!guest_mode(regs)) {
212
/* The fault occurs inside Xen. */
213
if (!ia64_done_with_exception(regs)) {
214
// should never happen. If it does, region 0 addr may
215
// indicate a bad xen pointer
216
printk("*** xen_handle_domain_access: exception table"
217
" lookup failed, iip=0x%lx, addr=0x%lx, "
218
"spinning...\n", iip, address);
219
panic_domain(regs, "*** xen_handle_domain_access: "
220
"exception table lookup failed, "
221
"iip=0x%lx, addr=0x%lx, spinning...\n",
227
if ((isr & IA64_ISR_IR) && handle_lazy_cover(current, regs))
230
if (!PSCB(current, interrupt_collection_enabled)) {
231
check_bad_nested_interruption(isr, regs, fault);
232
//printk("Delivering NESTED DATA TLB fault\n");
233
fault = IA64_DATA_NESTED_TLB_VECTOR;
235
((unsigned long)PSCBX(current, iva) + fault) & ~0xffUL;
237
(regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
238
regs->cr_ipsr = vcpu_pl_adjust(regs->cr_ipsr,
240
if (PSCB(current, dcr) & IA64_DCR_BE)
241
regs->cr_ipsr |= IA64_PSR_BE;
243
regs->cr_ipsr &= ~IA64_PSR_BE;
246
if (PSCB(current, hpsr_dfh))
247
regs->cr_ipsr |= IA64_PSR_DFH;
248
PSCB(current, vpsr_dfh) = 0;
249
perfc_incra(slow_reflect, fault >> 8);
253
PSCB(current, itir) = itir;
254
PSCB(current, iha) = iha;
255
PSCB(current, ifa) = address;
256
reflect_interruption(isr, regs, fault);
259
fpswa_interface_t *fpswa_interface = 0;
261
void __init trap_init(void)
263
if (ia64_boot_param->fpswa)
264
/* FPSWA fixup: make the interface pointer a virtual address */
265
fpswa_interface = __va(ia64_boot_param->fpswa);
267
printk("No FPSWA supported.\n");
271
fp_emulate(int fp_fault, void *bundle, unsigned long *ipsr,
272
unsigned long *fpsr, unsigned long *isr, unsigned long *pr,
273
unsigned long *ifs, struct pt_regs *regs)
277
XEN_EFI_RR_DECLARE(rr6, rr7);
279
if (!fpswa_interface)
280
return (fpswa_ret_t) {-1, 0, 0, 0};
282
memset(&fp_state, 0, sizeof(fp_state_t));
285
* compute fp_state. only FP registers f6 - f11 are used by the
286
* kernel, so set those bits in the mask and set the low volatile
287
* pointer to point to these registers.
289
fp_state.bitmask_low64 = 0xfc0; /* bit6..bit11 */
291
fp_state.fp_state_low_volatile = (fp_state_low_volatile_t *) ®s->f6;
293
* unsigned long (*EFI_FPSWA) (
294
* unsigned long trap_type,
296
* unsigned long *pipsr,
297
* unsigned long *pfsr,
298
* unsigned long *pisr,
299
* unsigned long *ppreds,
300
* unsigned long *pifs,
303
XEN_EFI_RR_ENTER(rr6, rr7);
304
ret = (*fpswa_interface->fpswa) (fp_fault, bundle,
305
ipsr, fpsr, isr, pr, ifs, &fp_state);
306
XEN_EFI_RR_LEAVE(rr6, rr7);
312
* Handle floating-point assist faults and traps for domain.
315
handle_fpu_swa(int fp_fault, struct pt_regs *regs, unsigned long isr)
318
unsigned long fault_ip;
322
fault_ip = regs->cr_iip;
324
* When the FP trap occurs, the trapping instruction is completed.
325
* If ipsr.ri == 0, there is the trapping instruction in previous
328
if (!fp_fault && (ia64_psr(regs)->ri == 0))
331
if (VMX_DOMAIN(current)) {
332
rc = __vmx_get_domain_bundle(fault_ip, &bundle);
335
if (vcpu_get_domain_bundle(current, regs, fault_ip,
339
if (rc == IA64_RETRY) {
340
PSCBX(current, fpswa_ret) = (fpswa_ret_t){IA64_RETRY, 0, 0, 0};
341
gdprintk(XENLOG_DEBUG,
342
"%s(%s): floating-point bundle at 0x%lx not mapped\n",
343
__FUNCTION__, fp_fault ? "fault" : "trap", fault_ip);
347
ret = fp_emulate(fp_fault, &bundle, ®s->cr_ipsr, ®s->ar_fpsr,
348
&isr, ®s->pr, ®s->cr_ifs, regs);
351
PSCBX(current, fpswa_ret) = ret;
352
gdprintk(XENLOG_ERR, "%s(%s): fp_emulate() returned %ld\n",
353
__FUNCTION__, fp_fault ? "fault" : "trap",
361
ia64_fault(unsigned long vector, unsigned long isr, unsigned long ifa,
362
unsigned long iim, unsigned long itir, unsigned long arg5,
363
unsigned long arg6, unsigned long arg7, unsigned long stack)
365
struct pt_regs *regs = (struct pt_regs *)&stack;
367
static const char *const reason[] = {
368
"IA-64 Illegal Operation fault",
369
"IA-64 Privileged Operation fault",
370
"IA-64 Privileged Register fault",
371
"IA-64 Reserved Register/Field fault",
372
"Disabled Instruction Set Transition fault",
373
"Unknown fault 5", "Unknown fault 6",
374
"Unknown fault 7", "Illegal Hazard fault",
375
"Unknown fault 9", "Unknown fault 10",
376
"Unknown fault 11", "Unknown fault 12",
377
"Unknown fault 13", "Unknown fault 14", "Unknown fault 15"
380
printk("ia64_fault, vector=0x%lx, ifa=0x%016lx, iip=0x%016lx, "
381
"ipsr=0x%016lx, isr=0x%016lx\n", vector, ifa,
382
regs->cr_iip, regs->cr_ipsr, isr);
384
if ((isr & IA64_ISR_NA) &&
385
((isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH)) {
387
* This fault was due to lfetch.fault, set "ed" bit in the
388
* psr to cancel the lfetch.
390
ia64_psr(regs)->ed = 1;
391
printk("ia64_fault: handled lfetch.fault\n");
397
printk("VHPT Translation.\n");
401
printk("Alt DTLB.\n");
405
printk("Instruction Key Miss.\n");
409
printk("Data Key Miss.\n");
413
printk("Dirty-bit.\n");
417
/* __domain_get_bundle() may cause fault. */
418
if (ia64_done_with_exception(regs))
420
printk("Data Access-bit.\n");
424
printk("Page Not Found.\n");
428
printk("Key Permission.\n");
432
printk("Instruction Access Rights.\n");
435
case 24: /* General Exception */
436
code = (isr >> 4) & 0xf;
437
printk("General Exception: %s%s.\n", reason[code],
438
(code == 3) ? ((isr & (1UL << 37)) ? " (RSE access)" :
439
" (data access)") : "");
441
#ifdef CONFIG_IA64_PRINT_HAZARDS
442
printk("%s[%d]: possible hazard @ ip=%016lx "
443
"(pr = %016lx)\n", current->comm, current->pid,
444
regs->cr_iip + ia64_psr(regs)->ri, regs->pr);
446
printk("ia64_fault: returning on hazard\n");
452
printk("Disabled FP-Register.\n");
456
printk("NaT consumption.\n");
464
printk("Unaligned Reference.\n");
468
printk("Unsupported data reference.\n");
472
printk("Floating-Point Fault.\n");
476
printk("Floating-Point Trap.\n");
480
printk("Lower Privilege Transfer Trap.\n");
484
printk("Taken Branch Trap.\n");
488
printk("Single Step Trap.\n");
492
printk("IA-32 Exception.\n");
496
printk("IA-32 Intercept.\n");
500
printk("IA-32 Interrupt.\n");
504
printk("Fault %lu\n", vector);
508
show_registers(regs);
509
panic("Fault in Xen.\n");
512
/* Also read in hyperprivop.S */
516
ia64_handle_break(unsigned long ifa, struct pt_regs *regs, unsigned long isr,
519
struct domain *d = current->domain;
520
struct vcpu *v = current;
523
/* FIXME: don't hardcode constant */
524
if ((iim == 0x80001 || iim == 0x80002)
525
&& ia64_get_cpl(regs->cr_ipsr) == CONFIG_CPL0_EMUL) {
526
do_ssc(vcpu_get_gr(current, 36), regs);
529
else if ((iim == 0 || iim == CDB_BREAK_NUM) && !guest_mode(regs)) {
531
show_registers(regs);
532
debugger_trap_fatal(0 /* don't care */ , regs);
533
regs_increment_iip(regs);
536
else if (iim == d->arch.breakimm &&
537
ia64_get_cpl(regs->cr_ipsr) == CONFIG_CPL0_EMUL) {
538
/* by default, do not continue */
539
v->arch.hypercall_continuation = 0;
541
if ((vector = ia64_hypercall(regs)) == IA64_NO_FAULT) {
542
if (!PSCBX(v, hypercall_continuation))
543
vcpu_increment_iip(current);
545
reflect_interruption(isr, regs, vector);
546
} else if ((iim - HYPERPRIVOP_START) < HYPERPRIVOP_MAX
547
&& ia64_get_cpl(regs->cr_ipsr) == CONFIG_CPL0_EMUL) {
548
if (ia64_hyperprivop(iim, regs))
549
vcpu_increment_iip(current);
552
die_if_kernel("bug check", regs, iim);
554
reflect_interruption(isr, regs, IA64_BREAK_VECTOR);
559
ia64_handle_privop(unsigned long ifa, struct pt_regs *regs, unsigned long isr,
564
vector = priv_emulate(current, regs, isr);
565
if (vector != IA64_NO_FAULT && vector != IA64_RFI_IN_PROGRESS) {
566
// Note: if a path results in a vector to reflect that requires
567
// iha/itir (e.g. vcpu_force_data_miss), they must be set there
569
* IA64_GENEX_VECTOR may contain in the lowest byte an ISR.code
570
* see IA64_ILLOP_FAULT, ...
572
if ((vector & ~0xffUL) == IA64_GENEX_VECTOR) {
573
isr = vector & 0xffUL;
574
vector = IA64_GENEX_VECTOR;
576
reflect_interruption(isr, regs, vector);
581
ia64_lazy_load_fpu(struct vcpu *v)
583
if (PSCB(v, hpsr_dfh)) {
584
PSCB(v, hpsr_dfh) = 0;
585
PSCB(v, hpsr_mfh) = 1;
586
if (__ia64_per_cpu_var(fp_owner) != v)
587
__ia64_load_fpu(v->arch._thread.fph);
592
ia64_handle_reflection(unsigned long ifa, struct pt_regs *regs,
593
unsigned long isr, unsigned long iim,
594
unsigned long vector)
596
struct vcpu *v = current;
597
unsigned long check_lazy_cover = 0;
598
unsigned long psr = regs->cr_ipsr;
599
unsigned long status;
601
/* Following faults shouldn't be seen from Xen itself */
602
BUG_ON(!(psr & IA64_PSR_CPL));
606
vector = IA64_INST_KEY_MISS_VECTOR;
609
vector = IA64_DATA_KEY_MISS_VECTOR;
612
vector = IA64_DIRTY_BIT_VECTOR;
615
vector = IA64_INST_ACCESS_BIT_VECTOR;
618
check_lazy_cover = 1;
619
vector = IA64_DATA_ACCESS_BIT_VECTOR;
622
check_lazy_cover = 1;
623
vector = IA64_PAGE_NOT_PRESENT_VECTOR;
626
vector = IA64_KEY_PERMISSION_VECTOR;
629
vector = IA64_INST_ACCESS_RIGHTS_VECTOR;
632
check_lazy_cover = 1;
633
vector = IA64_DATA_ACCESS_RIGHTS_VECTOR;
636
vector = IA64_GENEX_VECTOR;
639
ia64_lazy_load_fpu(v);
640
if (!PSCB(v, vpsr_dfh)) {
641
regs->cr_ipsr &= ~IA64_PSR_DFH;
644
vector = IA64_DISABLED_FPREG_VECTOR;
647
if (((isr >> 4L) & 0xfL) == 1) {
648
/* Fault is due to a register NaT consumption fault. */
649
//regs->eml_unat = 0; FIXME: DO WE NEED THIS??
650
vector = IA64_NAT_CONSUMPTION_VECTOR;
654
// pass null pointer dereferences through with no error
655
// but retain debug output for non-zero ifa
657
vector = IA64_NAT_CONSUMPTION_VECTOR;
661
#ifdef CONFIG_PRIVIFY
662
/* Some privified operations are coded using reg+64 instead
664
printk("*** NaT fault... attempting to handle as privop\n");
665
printk("isr=%016lx, ifa=%016lx, iip=%016lx, ipsr=%016lx\n",
666
isr, ifa, regs->cr_iip, psr);
667
//regs->eml_unat = 0; FIXME: DO WE NEED THIS???
668
// certain NaT faults are higher priority than privop faults
669
vector = priv_emulate(v, regs, isr);
670
if (vector == IA64_NO_FAULT) {
671
printk("*** Handled privop masquerading as NaT "
676
vector = IA64_NAT_CONSUMPTION_VECTOR;
679
//printk("*** Handled speculation vector, itc=%lx!\n",
681
PSCB(current, iim) = iim;
682
vector = IA64_SPECULATION_VECTOR;
685
vector = IA64_DEBUG_VECTOR;
686
if (debugger_kernel_event(regs, XEN_IA64_DEBUG_ON_KERN_DEBUG))
690
// FIXME: Should we handle unaligned refs in Xen??
691
vector = IA64_UNALIGNED_REF_VECTOR;
694
status = handle_fpu_swa(1, regs, isr);
696
vcpu_increment_iip(v);
699
vector = IA64_FP_FAULT_VECTOR;
702
status = handle_fpu_swa(0, regs, isr);
705
vector = IA64_FP_TRAP_VECTOR;
708
if (isr & (1UL << 4))
709
printk("ia64_handle_reflection: handling "
710
"unimplemented instruction address %s\n",
711
(isr & (1UL<<32)) ? "fault" : "trap");
712
vector = IA64_LOWERPRIV_TRANSFER_TRAP_VECTOR;
715
vector = IA64_TAKEN_BRANCH_TRAP_VECTOR;
716
if (debugger_kernel_event(regs, XEN_IA64_DEBUG_ON_KERN_TBRANCH))
720
vector = IA64_SINGLE_STEP_TRAP_VECTOR;
721
if (debugger_kernel_event(regs, XEN_IA64_DEBUG_ON_KERN_SSTEP))
726
panic_domain(regs, "ia64_handle_reflection: "
727
"unhandled vector=0x%lx\n", vector);
730
if (check_lazy_cover && (isr & IA64_ISR_IR) &&
731
handle_lazy_cover(v, regs))
733
PSCB(current, ifa) = ifa;
734
PSCB(current, itir) = vcpu_get_itir_on_fault(v, ifa);
735
reflect_interruption(isr, regs, vector);
739
ia64_shadow_fault(unsigned long ifa, unsigned long itir,
740
unsigned long isr, struct pt_regs *regs)
742
struct vcpu *v = current;
743
struct domain *d = current->domain;
745
unsigned long pte = 0;
746
struct vhpt_lf_entry *vlfe;
749
* v->arch.vhpt_pg_shift shouldn't be used here.
750
* Currently dirty page logging bitmap is allocated based
751
* on PAGE_SIZE. This is part of xen_domctl_shadow_op ABI.
752
* If we want to log dirty pages in finer grained when
753
* v->arch.vhpt_pg_shift < PAGE_SHIFT, we have to
754
* revise the ABI and update this function and the related
755
* tool stack (live relocation).
757
unsigned long vhpt_pg_shift = PAGE_SHIFT;
759
/* There are 2 jobs to do:
760
- marking the page as dirty (the metaphysical address must be
761
extracted to do that).
762
- reflecting or not the fault (the virtual Dirty bit must be
763
extracted to decide).
764
Unfortunatly these informations are not immediatly available!
767
/* Extract the metaphysical address.
768
Try to get it from VHPT and M2P as we need the flags. */
769
vlfe = (struct vhpt_lf_entry *)ia64_thash(ifa);
770
pte = vlfe->page_flags;
771
if (vlfe->ti_tag == ia64_ttag(ifa)) {
772
/* The VHPT entry is valid. */
773
gpfn = get_gpfn_from_mfn((pte & _PAGE_PPN_MASK) >>
775
BUG_ON(gpfn == INVALID_M2P_ENTRY);
777
unsigned long itir, iha;
780
/* The VHPT entry is not valid. */
783
/* FIXME: gives a chance to tpa, as the TC was valid. */
785
fault = vcpu_translate(v, ifa, 1, &pte, &itir, &iha);
788
if (fault != IA64_NO_FAULT) {
789
/* This will trigger a dtlb miss. */
790
ia64_ptcl(ifa, vhpt_pg_shift << 2);
793
gpfn = ((pte & _PAGE_PPN_MASK) >> vhpt_pg_shift);
798
/* Set the dirty bit in the bitmap. */
799
shadow_mark_page_dirty(d, gpfn);
801
/* Update the local TC/VHPT and decides wether or not the fault should
803
SMP note: we almost ignore the other processors. The shadow_bitmap
804
has been atomically updated. If the dirty fault happen on another
805
processor, it will do its job.
809
/* We will know how to handle the fault. */
811
if (pte & _PAGE_VIRT_D) {
812
/* Rewrite VHPT entry.
813
There is no race here because only the
814
cpu VHPT owner can write page_flags. */
816
vlfe->page_flags = pte | _PAGE_D;
818
/* Purge the TC locally.
819
It will be reloaded from the VHPT iff the
820
VHPT entry is still valid. */
821
ia64_ptcl(ifa, vhpt_pg_shift << 2);
823
atomic64_inc(&d->arch.shadow_fault_count);
826
In this case there is no need to purge. */
827
ia64_handle_reflection(ifa, regs, isr, 0, 8);
830
/* We don't know wether or not the fault must be
831
reflected. The VHPT entry is not valid. */
832
/* FIXME: in metaphysical mode, we could do an ITC now. */
833
ia64_ptcl(ifa, vhpt_pg_shift << 2);