~ubuntu-branches/ubuntu/utopic/xen/utopic

« back to all changes in this revision

Viewing changes to xen/arch/ia64/xen/faults.c

  • Committer: Bazaar Package Importer
  • Author(s): Bastian Blank
  • Date: 2010-05-06 15:47:38 UTC
  • mto: (1.3.1) (15.1.1 sid) (4.1.1 experimental)
  • mto: This revision was merged to the branch mainline in revision 3.
  • Revision ID: james.westby@ubuntu.com-20100506154738-agoz0rlafrh1fnq7
Tags: upstream-4.0.0
ImportĀ upstreamĀ versionĀ 4.0.0

Show diffs side-by-side

added added

removed removed

Lines of Context:
 
1
/*
 
2
 * Miscellaneous process/domain related routines
 
3
 * 
 
4
 * Copyright (C) 2004 Hewlett-Packard Co.
 
5
 *      Dan Magenheimer (dan.magenheimer@hp.com)
 
6
 *
 
7
 */
 
8
 
 
9
#include <xen/config.h>
 
10
#include <xen/lib.h>
 
11
#include <xen/errno.h>
 
12
#include <xen/sched.h>
 
13
#include <xen/smp.h>
 
14
#include <asm/ptrace.h>
 
15
#include <xen/delay.h>
 
16
#include <xen/perfc.h>
 
17
#include <xen/mm.h>
 
18
 
 
19
#include <asm/system.h>
 
20
#include <asm/processor.h>
 
21
#include <xen/irq.h>
 
22
#include <xen/event.h>
 
23
#include <asm/privop.h>
 
24
#include <asm/vcpu.h>
 
25
#include <asm/ia64_int.h>
 
26
#include <asm/dom_fw.h>
 
27
#include <asm/vhpt.h>
 
28
#include <asm/debugger.h>
 
29
#include <asm/fpswa.h>
 
30
#include <asm/bundle.h>
 
31
#include <asm/asm-xsi-offsets.h>
 
32
#include <asm/shadow.h>
 
33
#include <asm/uaccess.h>
 
34
#include <asm/p2m_entry.h>
 
35
 
 
36
extern void die_if_kernel(char *str, struct pt_regs *regs, long err);
 
37
/* FIXME: where these declarations shold be there ? */
 
38
extern int ia64_hyperprivop(unsigned long, REGS *);
 
39
extern IA64FAULT ia64_hypercall(struct pt_regs *regs);
 
40
 
 
41
extern void do_ssc(unsigned long ssc, struct pt_regs *regs);
 
42
 
 
43
// should never panic domain... if it does, stack may have been overrun
 
44
static void check_bad_nested_interruption(unsigned long isr,
 
45
                                          struct pt_regs *regs,
 
46
                                          unsigned long vector)
 
47
{
 
48
        struct vcpu *v = current;
 
49
 
 
50
        if (!(PSCB(v, ipsr) & IA64_PSR_DT)) {
 
51
                panic_domain(regs,
 
52
                             "psr.dt off, trying to deliver nested dtlb!\n");
 
53
        }
 
54
        vector &= ~0xf;
 
55
        if (vector != IA64_DATA_TLB_VECTOR &&
 
56
            vector != IA64_ALT_DATA_TLB_VECTOR &&
 
57
            vector != IA64_VHPT_TRANS_VECTOR) {
 
58
                panic_domain(regs, "psr.ic off, delivering fault=%lx,"
 
59
                             "ipsr=%lx,iip=%lx,ifa=%lx,isr=%lx,PSCB.iip=%lx\n",
 
60
                             vector, regs->cr_ipsr, regs->cr_iip, PSCB(v, ifa),
 
61
                             isr, PSCB(v, iip));
 
62
        }
 
63
}
 
64
 
 
65
static void reflect_interruption(unsigned long isr, struct pt_regs *regs,
 
66
                                 unsigned long vector)
 
67
{
 
68
        struct vcpu *v = current;
 
69
 
 
70
        if (!PSCB(v, interrupt_collection_enabled))
 
71
                check_bad_nested_interruption(isr, regs, vector);
 
72
        PSCB(v, unat) = regs->ar_unat;  // not sure if this is really needed?
 
73
        PSCB(v, precover_ifs) = regs->cr_ifs;
 
74
        PSCB(v, ipsr) = vcpu_get_psr(v);
 
75
        vcpu_bsw0(v);
 
76
        PSCB(v, isr) = isr;
 
77
        PSCB(v, iip) = regs->cr_iip;
 
78
        PSCB(v, ifs) = 0;
 
79
 
 
80
        regs->cr_iip = ((unsigned long)PSCBX(v, iva) + vector) & ~0xffUL;
 
81
        regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
 
82
        regs->cr_ipsr = vcpu_pl_adjust(regs->cr_ipsr, IA64_PSR_CPL0_BIT);
 
83
        if (PSCB(v, dcr) & IA64_DCR_BE)
 
84
                regs->cr_ipsr |= IA64_PSR_BE;
 
85
        else
 
86
                regs->cr_ipsr &= ~IA64_PSR_BE;
 
87
    
 
88
        if (PSCB(v, hpsr_dfh))
 
89
                regs->cr_ipsr |= IA64_PSR_DFH;  
 
90
        PSCB(v, vpsr_dfh) = 0;
 
91
        v->vcpu_info->evtchn_upcall_mask = 1;
 
92
        PSCB(v, interrupt_collection_enabled) = 0;
 
93
 
 
94
        perfc_incra(slow_reflect, vector >> 8);
 
95
 
 
96
        debugger_event(vector == IA64_EXTINT_VECTOR ?
 
97
                       XEN_IA64_DEBUG_ON_EXTINT : XEN_IA64_DEBUG_ON_EXCEPT);
 
98
}
 
99
 
 
100
void reflect_event(void)
 
101
{
 
102
        struct vcpu *v = current;
 
103
        struct pt_regs *regs;
 
104
        unsigned long isr;
 
105
 
 
106
        if (!event_pending(v))
 
107
                return;
 
108
 
 
109
        /* Sanity check */
 
110
        if (is_idle_vcpu(v)) {
 
111
                //printk("WARN: invocation to reflect_event in nested xen\n");
 
112
                return;
 
113
        }
 
114
 
 
115
        regs = vcpu_regs(v);
 
116
 
 
117
        isr = regs->cr_ipsr & IA64_PSR_RI;
 
118
 
 
119
        if (!PSCB(v, interrupt_collection_enabled))
 
120
                printk("psr.ic off, delivering event, ipsr=%lx,iip=%lx,"
 
121
                       "isr=%lx,viip=0x%lx\n",
 
122
                       regs->cr_ipsr, regs->cr_iip, isr, PSCB(v, iip));
 
123
        PSCB(v, unat) = regs->ar_unat;  // not sure if this is really needed?
 
124
        PSCB(v, precover_ifs) = regs->cr_ifs;
 
125
        PSCB(v, ipsr) = vcpu_get_psr(v);
 
126
        vcpu_bsw0(v);
 
127
        PSCB(v, isr) = isr;
 
128
        PSCB(v, iip) = regs->cr_iip;
 
129
        PSCB(v, ifs) = 0;
 
130
 
 
131
        regs->cr_iip = v->arch.event_callback_ip;
 
132
        regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
 
133
        regs->cr_ipsr = vcpu_pl_adjust(regs->cr_ipsr, IA64_PSR_CPL0_BIT);
 
134
        if (PSCB(v, dcr) & IA64_DCR_BE)
 
135
                regs->cr_ipsr |= IA64_PSR_BE;
 
136
        else
 
137
                regs->cr_ipsr &= ~IA64_PSR_BE;
 
138
 
 
139
 
 
140
        if (PSCB(v, hpsr_dfh))
 
141
                regs->cr_ipsr |= IA64_PSR_DFH;
 
142
        PSCB(v, vpsr_dfh) = 0;
 
143
        v->vcpu_info->evtchn_upcall_mask = 1;
 
144
        PSCB(v, interrupt_collection_enabled) = 0;
 
145
 
 
146
        debugger_event(XEN_IA64_DEBUG_ON_EVENT);
 
147
}
 
148
 
 
149
static int handle_lazy_cover(struct vcpu *v, struct pt_regs *regs)
 
150
{
 
151
        if (!PSCB(v, interrupt_collection_enabled)) {
 
152
                PSCB(v, ifs) = regs->cr_ifs;
 
153
                regs->cr_ifs = 0;
 
154
                perfc_incr(lazy_cover);
 
155
                return 1;       // retry same instruction with cr.ifs off
 
156
        }
 
157
        return 0;
 
158
}
 
159
 
 
160
void ia64_do_page_fault(unsigned long address, unsigned long isr,
 
161
                        struct pt_regs *regs, unsigned long itir)
 
162
{
 
163
        unsigned long iip = regs->cr_iip, iha;
 
164
        // FIXME should validate address here
 
165
        unsigned long pteval;
 
166
        unsigned long is_data = !((isr >> IA64_ISR_X_BIT) & 1UL);
 
167
        IA64FAULT fault;
 
168
        int is_ptc_l_needed = 0;
 
169
        ia64_itir_t _itir = {.itir = itir};
 
170
 
 
171
        if ((isr & IA64_ISR_SP)
 
172
            || ((isr & IA64_ISR_NA)
 
173
                && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH)) {
 
174
                /*
 
175
                 * This fault was due to a speculative load or lfetch.fault,
 
176
                 * set the "ed" bit in the psr to ensure forward progress.
 
177
                 * (Target register will get a NaT for ld.s, lfetch will be
 
178
                 * canceled.)
 
179
                 */
 
180
                ia64_psr(regs)->ed = 1;
 
181
                return;
 
182
        }
 
183
 
 
184
 again:
 
185
        fault = vcpu_translate(current, address, is_data, &pteval,
 
186
                               &itir, &iha);
 
187
        if (fault == IA64_NO_FAULT || fault == IA64_USE_TLB) {
 
188
                struct p2m_entry entry;
 
189
                unsigned long m_pteval;
 
190
                m_pteval = translate_domain_pte(pteval, address, itir,
 
191
                                                &(_itir.itir), &entry);
 
192
                vcpu_itc_no_srlz(current, is_data ? 2 : 1, address,
 
193
                                 m_pteval, pteval, _itir.itir, &entry);
 
194
                if ((fault == IA64_USE_TLB && !current->arch.dtlb.pte.p) ||
 
195
                    p2m_entry_retry(&entry)) {
 
196
                        /* dtlb has been purged in-between.  This dtlb was
 
197
                           matching.  Undo the work.  */
 
198
                        vcpu_flush_tlb_vhpt_range(address, _itir.ps);
 
199
 
 
200
                        // the stale entry which we inserted above
 
201
                        // may remains in tlb cache.
 
202
                        // we don't purge it now hoping next itc purges it.
 
203
                        is_ptc_l_needed = 1;
 
204
                        goto again;
 
205
                }
 
206
                return;
 
207
        }
 
208
 
 
209
        if (is_ptc_l_needed)
 
210
                vcpu_ptc_l(current, address, _itir.ps);
 
211
        if (!guest_mode(regs)) {
 
212
                /* The fault occurs inside Xen.  */
 
213
                if (!ia64_done_with_exception(regs)) {
 
214
                        // should never happen.  If it does, region 0 addr may
 
215
                        // indicate a bad xen pointer
 
216
                        printk("*** xen_handle_domain_access: exception table"
 
217
                               " lookup failed, iip=0x%lx, addr=0x%lx, "
 
218
                               "spinning...\n", iip, address);
 
219
                        panic_domain(regs, "*** xen_handle_domain_access: "
 
220
                                     "exception table lookup failed, "
 
221
                                     "iip=0x%lx, addr=0x%lx, spinning...\n",
 
222
                                     iip, address);
 
223
                }
 
224
                return;
 
225
        }
 
226
 
 
227
        if ((isr & IA64_ISR_IR) && handle_lazy_cover(current, regs))
 
228
                return;
 
229
 
 
230
        if (!PSCB(current, interrupt_collection_enabled)) {
 
231
                check_bad_nested_interruption(isr, regs, fault);
 
232
                //printk("Delivering NESTED DATA TLB fault\n");
 
233
                fault = IA64_DATA_NESTED_TLB_VECTOR;
 
234
                regs->cr_iip =
 
235
                    ((unsigned long)PSCBX(current, iva) + fault) & ~0xffUL;
 
236
                regs->cr_ipsr =
 
237
                    (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
 
238
                regs->cr_ipsr = vcpu_pl_adjust(regs->cr_ipsr,
 
239
                                               IA64_PSR_CPL0_BIT);
 
240
                if (PSCB(current, dcr) & IA64_DCR_BE)
 
241
                        regs->cr_ipsr |= IA64_PSR_BE;
 
242
                else
 
243
                        regs->cr_ipsr &= ~IA64_PSR_BE;
 
244
 
 
245
 
 
246
                if (PSCB(current, hpsr_dfh))
 
247
                        regs->cr_ipsr |= IA64_PSR_DFH;  
 
248
                PSCB(current, vpsr_dfh) = 0;
 
249
                perfc_incra(slow_reflect, fault >> 8);
 
250
                return;
 
251
        }
 
252
 
 
253
        PSCB(current, itir) = itir;
 
254
        PSCB(current, iha) = iha;
 
255
        PSCB(current, ifa) = address;
 
256
        reflect_interruption(isr, regs, fault);
 
257
}
 
258
 
 
259
fpswa_interface_t *fpswa_interface = 0;
 
260
 
 
261
void __init trap_init(void)
 
262
{
 
263
        if (ia64_boot_param->fpswa)
 
264
                /* FPSWA fixup: make the interface pointer a virtual address */
 
265
                fpswa_interface = __va(ia64_boot_param->fpswa);
 
266
        else
 
267
                printk("No FPSWA supported.\n");
 
268
}
 
269
 
 
270
static fpswa_ret_t
 
271
fp_emulate(int fp_fault, void *bundle, unsigned long *ipsr,
 
272
           unsigned long *fpsr, unsigned long *isr, unsigned long *pr,
 
273
           unsigned long *ifs, struct pt_regs *regs)
 
274
{
 
275
        fp_state_t fp_state;
 
276
        fpswa_ret_t ret;
 
277
        XEN_EFI_RR_DECLARE(rr6, rr7);
 
278
 
 
279
        if (!fpswa_interface)
 
280
                return (fpswa_ret_t) {-1, 0, 0, 0};
 
281
 
 
282
        memset(&fp_state, 0, sizeof(fp_state_t));
 
283
 
 
284
        /*
 
285
         * compute fp_state.  only FP registers f6 - f11 are used by the
 
286
         * kernel, so set those bits in the mask and set the low volatile
 
287
         * pointer to point to these registers.
 
288
         */
 
289
        fp_state.bitmask_low64 = 0xfc0; /* bit6..bit11 */
 
290
 
 
291
        fp_state.fp_state_low_volatile = (fp_state_low_volatile_t *) &regs->f6;
 
292
        /*
 
293
         * unsigned long (*EFI_FPSWA) (
 
294
         *      unsigned long    trap_type,
 
295
         *      void             *Bundle,
 
296
         *      unsigned long    *pipsr,
 
297
         *      unsigned long    *pfsr,
 
298
         *      unsigned long    *pisr,
 
299
         *      unsigned long    *ppreds,
 
300
         *      unsigned long    *pifs,
 
301
         *      void             *fp_state);
 
302
         */
 
303
        XEN_EFI_RR_ENTER(rr6, rr7);
 
304
        ret = (*fpswa_interface->fpswa) (fp_fault, bundle,
 
305
                                         ipsr, fpsr, isr, pr, ifs, &fp_state);
 
306
        XEN_EFI_RR_LEAVE(rr6, rr7);
 
307
 
 
308
        return ret;
 
309
}
 
310
 
 
311
/*
 
312
 * Handle floating-point assist faults and traps for domain.
 
313
 */
 
314
unsigned long
 
315
handle_fpu_swa(int fp_fault, struct pt_regs *regs, unsigned long isr)
 
316
{
 
317
        IA64_BUNDLE bundle;
 
318
        unsigned long fault_ip;
 
319
        fpswa_ret_t ret;
 
320
        unsigned long rc;
 
321
 
 
322
        fault_ip = regs->cr_iip;
 
323
        /*
 
324
         * When the FP trap occurs, the trapping instruction is completed.
 
325
         * If ipsr.ri == 0, there is the trapping instruction in previous
 
326
         * bundle.
 
327
         */
 
328
        if (!fp_fault && (ia64_psr(regs)->ri == 0))
 
329
                fault_ip -= 16;
 
330
 
 
331
        if (VMX_DOMAIN(current)) {
 
332
                rc = __vmx_get_domain_bundle(fault_ip, &bundle);
 
333
        } else {
 
334
                rc = 0;
 
335
                if (vcpu_get_domain_bundle(current, regs, fault_ip,
 
336
                                           &bundle) == 0)
 
337
                        rc = IA64_RETRY;
 
338
        }
 
339
        if (rc == IA64_RETRY) {
 
340
                PSCBX(current, fpswa_ret) = (fpswa_ret_t){IA64_RETRY, 0, 0, 0};
 
341
                gdprintk(XENLOG_DEBUG,
 
342
                         "%s(%s): floating-point bundle at 0x%lx not mapped\n",
 
343
                         __FUNCTION__, fp_fault ? "fault" : "trap", fault_ip);
 
344
                return IA64_RETRY;
 
345
        }
 
346
 
 
347
        ret = fp_emulate(fp_fault, &bundle, &regs->cr_ipsr, &regs->ar_fpsr,
 
348
                         &isr, &regs->pr, &regs->cr_ifs, regs);
 
349
 
 
350
        if (ret.status) {
 
351
                PSCBX(current, fpswa_ret) = ret;
 
352
                gdprintk(XENLOG_ERR, "%s(%s): fp_emulate() returned %ld\n",
 
353
                         __FUNCTION__, fp_fault ? "fault" : "trap",
 
354
                         ret.status);
 
355
        }
 
356
 
 
357
        return ret.status;
 
358
}
 
359
 
 
360
void
 
361
ia64_fault(unsigned long vector, unsigned long isr, unsigned long ifa,
 
362
           unsigned long iim, unsigned long itir, unsigned long arg5,
 
363
           unsigned long arg6, unsigned long arg7, unsigned long stack)
 
364
{
 
365
        struct pt_regs *regs = (struct pt_regs *)&stack;
 
366
        unsigned long code;
 
367
        static const char *const reason[] = {
 
368
                "IA-64 Illegal Operation fault",
 
369
                "IA-64 Privileged Operation fault",
 
370
                "IA-64 Privileged Register fault",
 
371
                "IA-64 Reserved Register/Field fault",
 
372
                "Disabled Instruction Set Transition fault",
 
373
                "Unknown fault 5", "Unknown fault 6",
 
374
                "Unknown fault 7", "Illegal Hazard fault",
 
375
                "Unknown fault 9", "Unknown fault 10",
 
376
                "Unknown fault 11", "Unknown fault 12",
 
377
                "Unknown fault 13", "Unknown fault 14", "Unknown fault 15"
 
378
        };
 
379
 
 
380
        printk("ia64_fault, vector=0x%lx, ifa=0x%016lx, iip=0x%016lx, "
 
381
               "ipsr=0x%016lx, isr=0x%016lx\n", vector, ifa,
 
382
               regs->cr_iip, regs->cr_ipsr, isr);
 
383
 
 
384
        if ((isr & IA64_ISR_NA) &&
 
385
            ((isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH)) {
 
386
                /*
 
387
                 * This fault was due to lfetch.fault, set "ed" bit in the
 
388
                 * psr to cancel the lfetch.
 
389
                 */
 
390
                ia64_psr(regs)->ed = 1;
 
391
                printk("ia64_fault: handled lfetch.fault\n");
 
392
                return;
 
393
        }
 
394
 
 
395
        switch (vector) {
 
396
        case 0:
 
397
                printk("VHPT Translation.\n");
 
398
                break;
 
399
 
 
400
        case 4:
 
401
                printk("Alt DTLB.\n");
 
402
                break;
 
403
 
 
404
        case 6:
 
405
                printk("Instruction Key Miss.\n");
 
406
                break;
 
407
 
 
408
        case 7:
 
409
                printk("Data Key Miss.\n");
 
410
                break;
 
411
 
 
412
        case 8:
 
413
                printk("Dirty-bit.\n");
 
414
                break;
 
415
 
 
416
        case 10:
 
417
                /* __domain_get_bundle() may cause fault. */
 
418
                if (ia64_done_with_exception(regs))
 
419
                        return;
 
420
                printk("Data Access-bit.\n");
 
421
                break;
 
422
 
 
423
        case 20:
 
424
                printk("Page Not Found.\n");
 
425
                break;
 
426
 
 
427
        case 21:
 
428
                printk("Key Permission.\n");
 
429
                break;
 
430
 
 
431
        case 22:
 
432
                printk("Instruction Access Rights.\n");
 
433
                break;
 
434
 
 
435
        case 24:        /* General Exception */
 
436
                code = (isr >> 4) & 0xf;
 
437
                printk("General Exception: %s%s.\n", reason[code],
 
438
                       (code == 3) ? ((isr & (1UL << 37)) ? " (RSE access)" :
 
439
                                       " (data access)") : "");
 
440
                if (code == 8) {
 
441
#ifdef CONFIG_IA64_PRINT_HAZARDS
 
442
                        printk("%s[%d]: possible hazard @ ip=%016lx "
 
443
                               "(pr = %016lx)\n", current->comm, current->pid,
 
444
                               regs->cr_iip + ia64_psr(regs)->ri, regs->pr);
 
445
#endif
 
446
                        printk("ia64_fault: returning on hazard\n");
 
447
                        return;
 
448
                }
 
449
                break;
 
450
 
 
451
        case 25:
 
452
                printk("Disabled FP-Register.\n");
 
453
                break;
 
454
 
 
455
        case 26:
 
456
                printk("NaT consumption.\n");
 
457
                break;
 
458
 
 
459
        case 29:
 
460
                printk("Debug.\n");
 
461
                break;
 
462
 
 
463
        case 30:
 
464
                printk("Unaligned Reference.\n");
 
465
                break;
 
466
 
 
467
        case 31:
 
468
                printk("Unsupported data reference.\n");
 
469
                break;
 
470
 
 
471
        case 32:
 
472
                printk("Floating-Point Fault.\n");
 
473
                break;
 
474
 
 
475
        case 33:
 
476
                printk("Floating-Point Trap.\n");
 
477
                break;
 
478
 
 
479
        case 34:
 
480
                printk("Lower Privilege Transfer Trap.\n");
 
481
                break;
 
482
 
 
483
        case 35:
 
484
                printk("Taken Branch Trap.\n");
 
485
                break;
 
486
 
 
487
        case 36:
 
488
                printk("Single Step Trap.\n");
 
489
                break;
 
490
 
 
491
        case 45:
 
492
                printk("IA-32 Exception.\n");
 
493
                break;
 
494
 
 
495
        case 46:
 
496
                printk("IA-32 Intercept.\n");
 
497
                break;
 
498
 
 
499
        case 47:
 
500
                printk("IA-32 Interrupt.\n");
 
501
                break;
 
502
 
 
503
        default:
 
504
                printk("Fault %lu\n", vector);
 
505
                break;
 
506
        }
 
507
 
 
508
        show_registers(regs);
 
509
        panic("Fault in Xen.\n");
 
510
}
 
511
 
 
512
/* Also read in hyperprivop.S  */
 
513
int first_break = 0;
 
514
 
 
515
void
 
516
ia64_handle_break(unsigned long ifa, struct pt_regs *regs, unsigned long isr,
 
517
                  unsigned long iim)
 
518
{
 
519
        struct domain *d = current->domain;
 
520
        struct vcpu *v = current;
 
521
        IA64FAULT vector;
 
522
 
 
523
        /* FIXME: don't hardcode constant */
 
524
        if ((iim == 0x80001 || iim == 0x80002)
 
525
            && ia64_get_cpl(regs->cr_ipsr) == CONFIG_CPL0_EMUL) {
 
526
                do_ssc(vcpu_get_gr(current, 36), regs);
 
527
        }
 
528
#ifdef CRASH_DEBUG
 
529
        else if ((iim == 0 || iim == CDB_BREAK_NUM) && !guest_mode(regs)) {
 
530
                if (iim == 0)
 
531
                        show_registers(regs);
 
532
                debugger_trap_fatal(0 /* don't care */ , regs);
 
533
                regs_increment_iip(regs);
 
534
        }
 
535
#endif
 
536
        else if (iim == d->arch.breakimm &&
 
537
                 ia64_get_cpl(regs->cr_ipsr) == CONFIG_CPL0_EMUL) {
 
538
                /* by default, do not continue */
 
539
                v->arch.hypercall_continuation = 0;
 
540
 
 
541
                if ((vector = ia64_hypercall(regs)) == IA64_NO_FAULT) {
 
542
                        if (!PSCBX(v, hypercall_continuation))
 
543
                                vcpu_increment_iip(current);
 
544
                } else
 
545
                        reflect_interruption(isr, regs, vector);
 
546
        } else if ((iim - HYPERPRIVOP_START) < HYPERPRIVOP_MAX
 
547
                   && ia64_get_cpl(regs->cr_ipsr) == CONFIG_CPL0_EMUL) {
 
548
                if (ia64_hyperprivop(iim, regs))
 
549
                        vcpu_increment_iip(current);
 
550
        } else {
 
551
                if (iim == 0)
 
552
                        die_if_kernel("bug check", regs, iim);
 
553
                PSCB(v, iim) = iim;
 
554
                reflect_interruption(isr, regs, IA64_BREAK_VECTOR);
 
555
        }
 
556
}
 
557
 
 
558
void
 
559
ia64_handle_privop(unsigned long ifa, struct pt_regs *regs, unsigned long isr,
 
560
                   unsigned long itir)
 
561
{
 
562
        IA64FAULT vector;
 
563
 
 
564
        vector = priv_emulate(current, regs, isr);
 
565
        if (vector != IA64_NO_FAULT && vector != IA64_RFI_IN_PROGRESS) {
 
566
                // Note: if a path results in a vector to reflect that requires
 
567
                // iha/itir (e.g. vcpu_force_data_miss), they must be set there
 
568
                /*
 
569
                 * IA64_GENEX_VECTOR may contain in the lowest byte an ISR.code
 
570
                 * see IA64_ILLOP_FAULT, ...
 
571
                 */
 
572
                if ((vector & ~0xffUL) == IA64_GENEX_VECTOR) {
 
573
                        isr = vector & 0xffUL;
 
574
                        vector = IA64_GENEX_VECTOR;
 
575
                }
 
576
                reflect_interruption(isr, regs, vector);
 
577
        }
 
578
}
 
579
 
 
580
void
 
581
ia64_lazy_load_fpu(struct vcpu *v)
 
582
{
 
583
        if (PSCB(v, hpsr_dfh)) {
 
584
                PSCB(v, hpsr_dfh) = 0;
 
585
                PSCB(v, hpsr_mfh) = 1;
 
586
                if (__ia64_per_cpu_var(fp_owner) != v)
 
587
                        __ia64_load_fpu(v->arch._thread.fph);
 
588
        }
 
589
}
 
590
 
 
591
void
 
592
ia64_handle_reflection(unsigned long ifa, struct pt_regs *regs,
 
593
                       unsigned long isr, unsigned long iim,
 
594
                       unsigned long vector)
 
595
{
 
596
        struct vcpu *v = current;
 
597
        unsigned long check_lazy_cover = 0;
 
598
        unsigned long psr = regs->cr_ipsr;
 
599
        unsigned long status;
 
600
 
 
601
        /* Following faults shouldn't be seen from Xen itself */
 
602
        BUG_ON(!(psr & IA64_PSR_CPL));
 
603
 
 
604
        switch (vector) {
 
605
        case 6:
 
606
                vector = IA64_INST_KEY_MISS_VECTOR;
 
607
                break;
 
608
        case 7:
 
609
                vector = IA64_DATA_KEY_MISS_VECTOR;
 
610
                break;
 
611
        case 8:
 
612
                vector = IA64_DIRTY_BIT_VECTOR;
 
613
                break;
 
614
        case 9:
 
615
                vector = IA64_INST_ACCESS_BIT_VECTOR;
 
616
                break;
 
617
        case 10:
 
618
                check_lazy_cover = 1;
 
619
                vector = IA64_DATA_ACCESS_BIT_VECTOR;
 
620
                break;
 
621
        case 20:
 
622
                check_lazy_cover = 1;
 
623
                vector = IA64_PAGE_NOT_PRESENT_VECTOR;
 
624
                break;
 
625
        case 21:
 
626
                vector = IA64_KEY_PERMISSION_VECTOR;
 
627
                break;
 
628
        case 22:
 
629
                vector = IA64_INST_ACCESS_RIGHTS_VECTOR;
 
630
                break;
 
631
        case 23:
 
632
                check_lazy_cover = 1;
 
633
                vector = IA64_DATA_ACCESS_RIGHTS_VECTOR;
 
634
                break;
 
635
        case 24:
 
636
                vector = IA64_GENEX_VECTOR;
 
637
                break;
 
638
        case 25:
 
639
                ia64_lazy_load_fpu(v);
 
640
                if (!PSCB(v, vpsr_dfh)) {
 
641
                        regs->cr_ipsr &= ~IA64_PSR_DFH;
 
642
                        return;
 
643
                }
 
644
                vector = IA64_DISABLED_FPREG_VECTOR;
 
645
                break;
 
646
        case 26:
 
647
                if (((isr >> 4L) & 0xfL) == 1) {
 
648
                        /* Fault is due to a register NaT consumption fault. */
 
649
                        //regs->eml_unat = 0;  FIXME: DO WE NEED THIS??
 
650
                        vector = IA64_NAT_CONSUMPTION_VECTOR;
 
651
                        break;
 
652
                }
 
653
#if 1
 
654
                // pass null pointer dereferences through with no error
 
655
                // but retain debug output for non-zero ifa
 
656
                if (!ifa) {
 
657
                        vector = IA64_NAT_CONSUMPTION_VECTOR;
 
658
                        break;
 
659
                }
 
660
#endif
 
661
#ifdef CONFIG_PRIVIFY
 
662
                /* Some privified operations are coded using reg+64 instead
 
663
                   of reg.  */
 
664
                printk("*** NaT fault... attempting to handle as privop\n");
 
665
                printk("isr=%016lx, ifa=%016lx, iip=%016lx, ipsr=%016lx\n",
 
666
                       isr, ifa, regs->cr_iip, psr);
 
667
                //regs->eml_unat = 0;  FIXME: DO WE NEED THIS???
 
668
                // certain NaT faults are higher priority than privop faults
 
669
                vector = priv_emulate(v, regs, isr);
 
670
                if (vector == IA64_NO_FAULT) {
 
671
                        printk("*** Handled privop masquerading as NaT "
 
672
                               "fault\n");
 
673
                        return;
 
674
                }
 
675
#endif
 
676
                vector = IA64_NAT_CONSUMPTION_VECTOR;
 
677
                break;
 
678
        case 27:
 
679
                //printk("*** Handled speculation vector, itc=%lx!\n",
 
680
                //       ia64_get_itc());
 
681
                PSCB(current, iim) = iim;
 
682
                vector = IA64_SPECULATION_VECTOR;
 
683
                break;
 
684
        case 29:
 
685
                vector = IA64_DEBUG_VECTOR;
 
686
                if (debugger_kernel_event(regs, XEN_IA64_DEBUG_ON_KERN_DEBUG))
 
687
                        return;
 
688
                break;
 
689
        case 30:
 
690
                // FIXME: Should we handle unaligned refs in Xen??
 
691
                vector = IA64_UNALIGNED_REF_VECTOR;
 
692
                break;
 
693
        case 32:
 
694
                status = handle_fpu_swa(1, regs, isr);
 
695
                if (!status) {
 
696
                        vcpu_increment_iip(v);
 
697
                        return;
 
698
                }
 
699
                vector = IA64_FP_FAULT_VECTOR;
 
700
                break;
 
701
        case 33:
 
702
                status = handle_fpu_swa(0, regs, isr);
 
703
                if (!status)
 
704
                        return;
 
705
                vector = IA64_FP_TRAP_VECTOR;
 
706
                break;
 
707
        case 34:
 
708
                if (isr & (1UL << 4))
 
709
                        printk("ia64_handle_reflection: handling "
 
710
                               "unimplemented instruction address %s\n",
 
711
                               (isr & (1UL<<32)) ? "fault" : "trap");
 
712
                vector = IA64_LOWERPRIV_TRANSFER_TRAP_VECTOR;
 
713
                break;
 
714
        case 35:
 
715
                vector = IA64_TAKEN_BRANCH_TRAP_VECTOR;
 
716
                if (debugger_kernel_event(regs, XEN_IA64_DEBUG_ON_KERN_TBRANCH))
 
717
                        return;
 
718
                break;
 
719
        case 36:
 
720
                vector = IA64_SINGLE_STEP_TRAP_VECTOR;
 
721
                if (debugger_kernel_event(regs, XEN_IA64_DEBUG_ON_KERN_SSTEP))
 
722
                        return;
 
723
                break;
 
724
 
 
725
        default:
 
726
                panic_domain(regs, "ia64_handle_reflection: "
 
727
                             "unhandled vector=0x%lx\n", vector);
 
728
                return;
 
729
        }
 
730
        if (check_lazy_cover && (isr & IA64_ISR_IR) &&
 
731
            handle_lazy_cover(v, regs))
 
732
                return;
 
733
        PSCB(current, ifa) = ifa;
 
734
        PSCB(current, itir) = vcpu_get_itir_on_fault(v, ifa);
 
735
        reflect_interruption(isr, regs, vector);
 
736
}
 
737
 
 
738
void
 
739
ia64_shadow_fault(unsigned long ifa, unsigned long itir,
 
740
                  unsigned long isr, struct pt_regs *regs)
 
741
{
 
742
        struct vcpu *v = current;
 
743
        struct domain *d = current->domain;
 
744
        unsigned long gpfn;
 
745
        unsigned long pte = 0;
 
746
        struct vhpt_lf_entry *vlfe;
 
747
 
 
748
        /*
 
749
         * v->arch.vhpt_pg_shift shouldn't be used here.
 
750
         * Currently dirty page logging bitmap is allocated based
 
751
         * on PAGE_SIZE. This is part of xen_domctl_shadow_op ABI.
 
752
         * If we want to log dirty pages in finer grained when
 
753
         * v->arch.vhpt_pg_shift < PAGE_SHIFT, we have to
 
754
         * revise the ABI and update this function and the related
 
755
         * tool stack (live relocation).
 
756
         */
 
757
        unsigned long vhpt_pg_shift = PAGE_SHIFT;
 
758
 
 
759
        /* There are 2 jobs to do:
 
760
           -  marking the page as dirty (the metaphysical address must be
 
761
              extracted to do that).
 
762
           -  reflecting or not the fault (the virtual Dirty bit must be
 
763
              extracted to decide).
 
764
           Unfortunatly these informations are not immediatly available!
 
765
         */
 
766
 
 
767
        /* Extract the metaphysical address.
 
768
           Try to get it from VHPT and M2P as we need the flags.  */
 
769
        vlfe = (struct vhpt_lf_entry *)ia64_thash(ifa);
 
770
        pte = vlfe->page_flags;
 
771
        if (vlfe->ti_tag == ia64_ttag(ifa)) {
 
772
                /* The VHPT entry is valid.  */
 
773
                gpfn = get_gpfn_from_mfn((pte & _PAGE_PPN_MASK) >>
 
774
                                         vhpt_pg_shift);
 
775
                BUG_ON(gpfn == INVALID_M2P_ENTRY);
 
776
        } else {
 
777
                unsigned long itir, iha;
 
778
                IA64FAULT fault;
 
779
 
 
780
                /* The VHPT entry is not valid.  */
 
781
                vlfe = NULL;
 
782
 
 
783
                /* FIXME: gives a chance to tpa, as the TC was valid.  */
 
784
 
 
785
                fault = vcpu_translate(v, ifa, 1, &pte, &itir, &iha);
 
786
 
 
787
                /* Try again!  */
 
788
                if (fault != IA64_NO_FAULT) {
 
789
                        /* This will trigger a dtlb miss.  */
 
790
                        ia64_ptcl(ifa, vhpt_pg_shift << 2);
 
791
                        return;
 
792
                }
 
793
                gpfn = ((pte & _PAGE_PPN_MASK) >> vhpt_pg_shift);
 
794
                if (pte & _PAGE_D)
 
795
                        pte |= _PAGE_VIRT_D;
 
796
        }
 
797
 
 
798
        /* Set the dirty bit in the bitmap.  */
 
799
        shadow_mark_page_dirty(d, gpfn);
 
800
 
 
801
        /* Update the local TC/VHPT and decides wether or not the fault should
 
802
           be reflected.
 
803
           SMP note: we almost ignore the other processors.  The shadow_bitmap
 
804
           has been atomically updated.  If the dirty fault happen on another
 
805
           processor, it will do its job.
 
806
         */
 
807
 
 
808
        if (pte != 0) {
 
809
                /* We will know how to handle the fault.  */
 
810
 
 
811
                if (pte & _PAGE_VIRT_D) {
 
812
                        /* Rewrite VHPT entry.
 
813
                           There is no race here because only the
 
814
                           cpu VHPT owner can write page_flags.  */
 
815
                        if (vlfe)
 
816
                                vlfe->page_flags = pte | _PAGE_D;
 
817
 
 
818
                        /* Purge the TC locally.
 
819
                           It will be reloaded from the VHPT iff the
 
820
                           VHPT entry is still valid.  */
 
821
                        ia64_ptcl(ifa, vhpt_pg_shift << 2);
 
822
 
 
823
                        atomic64_inc(&d->arch.shadow_fault_count);
 
824
                } else {
 
825
                        /* Reflect.
 
826
                           In this case there is no need to purge.  */
 
827
                        ia64_handle_reflection(ifa, regs, isr, 0, 8);
 
828
                }
 
829
        } else {
 
830
                /* We don't know wether or not the fault must be
 
831
                   reflected.  The VHPT entry is not valid.  */
 
832
                /* FIXME: in metaphysical mode, we could do an ITC now.  */
 
833
                ia64_ptcl(ifa, vhpt_pg_shift << 2);
 
834
        }
 
835
}