2
// assembly portion of the IA64 MCA handling
4
// Mods by cfleck to integrate into kernel build
5
// 00/03/15 davidm Added various stop bits to get a clean compile
7
// 00/03/29 cfleck Added code to save INIT handoff state in pt_regs format, switch to temp
8
// kstack, switch modes, jump to C INIT handler
10
// 02/01/04 J.Hall <jenna.s.hall@intel.com>
11
// Before entering virtual mode code:
12
// 1. Check for TLB CPU error
13
// 2. Restore current thread pointer to kr6
14
// 3. Move stack ptr 16 bytes to conform to C calling convention
16
// 04/11/12 Russ Anderson <rja@sgi.com>
17
// Added per cpu MCA/INIT stack save areas.
19
#include <linux/config.h>
20
#include <linux/threads.h>
22
#include <asm/asmmacro.h>
23
#include <asm/pgtable.h>
24
#include <asm/processor.h>
25
#include <asm/mca_asm.h>
29
#include <public/arch-ia64.h>
33
* When we get a machine check, the kernel stack pointer is no longer
34
* valid, so we need to set a new stack pointer.
36
#define MINSTATE_PHYS /* Make sure stack access is physical for MINSTATE */
39
* Needed for return context to SAL
41
#define IA64_MCA_SAME_CONTEXT 0
42
#define IA64_MCA_COLD_BOOT -2
47
* SAL_TO_OS_MCA_HANDOFF_STATE (SAL 3.0 spec)
49
* 2. GR8 = PAL_PROC physical address
50
* 3. GR9 = SAL_PROC physical address
51
* 4. GR10 = SAL GP (physical)
52
* 5. GR11 = Rendez state
53
* 6. GR12 = Return address to location within SAL_CHECK
56
#define SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(_tmp) \
57
GET_THIS_PADDR(_tmp, ia64_sal_to_os_handoff_state_addr);; \
59
st8 [_tmp]=r1,0x08;; \
60
st8 [_tmp]=r8,0x08;; \
61
st8 [_tmp]=r9,0x08;; \
62
st8 [_tmp]=r10,0x08;; \
63
st8 [_tmp]=r11,0x08;; \
64
st8 [_tmp]=r12,0x08;; \
65
st8 [_tmp]=r17,0x08;; \
68
#define SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(_tmp) \
69
LOAD_PHYSICAL(p0, _tmp, ia64_sal_to_os_handoff_state);; \
70
st8 [_tmp]=r1,0x08;; \
71
st8 [_tmp]=r8,0x08;; \
72
st8 [_tmp]=r9,0x08;; \
73
st8 [_tmp]=r10,0x08;; \
74
st8 [_tmp]=r11,0x08;; \
75
st8 [_tmp]=r12,0x08;; \
76
st8 [_tmp]=r17,0x08;; \
81
* OS_MCA_TO_SAL_HANDOFF_STATE (SAL 3.0 spec)
82
* (p6) is executed if we never entered virtual mode (TLB error)
83
* (p7) is executed if we entered virtual mode as expected (normal case)
84
* 1. GR8 = OS_MCA return status
85
* 2. GR9 = SAL GP (physical)
86
* 3. GR10 = 0/1 returning same/new context
87
* 4. GR22 = New min state save area pointer
88
* returns ptr to SAL rtn save loc in _tmp
90
#define OS_MCA_TO_SAL_HANDOFF_STATE_RESTORE(_tmp) \
91
movl _tmp=ia64_os_to_sal_handoff_state;; \
92
DATA_VA_TO_PA(_tmp);; \
93
ld8 r8=[_tmp],0x08;; \
94
ld8 r9=[_tmp],0x08;; \
95
ld8 r10=[_tmp],0x08;; \
97
// now _tmp is pointing to SAL rtn save location
100
* COLD_BOOT_HANDOFF_STATE() sets ia64_mca_os_to_sal_state
101
* imots_os_status=IA64_MCA_COLD_BOOT
102
* imots_sal_gp=SAL GP
103
* imots_context=IA64_MCA_SAME_CONTEXT
104
* imots_new_min_state=Min state save area pointer
105
* imots_sal_check_ra=Return address to location within SAL_CHECK
109
#define COLD_BOOT_HANDOFF_STATE(sal_to_os_handoff,os_to_sal_handoff,tmp)\
110
movl tmp=IA64_MCA_COLD_BOOT; \
111
GET_THIS_PADDR(r2,ia64_sal_to_os_handoff_state_addr);; \
112
ld8 sal_to_os_handoff=[sal_to_os_handoff];; \
113
movl os_to_sal_handoff=ia64_os_to_sal_handoff_state;; \
114
dep os_to_sal_handoff = 0, os_to_sal_handoff, 60, 4;; \
115
/*DATA_VA_TO_PA(os_to_sal_handoff);;*/ \
116
st8 [os_to_sal_handoff]=tmp,8;; \
117
ld8 tmp=[sal_to_os_handoff],48;; \
118
st8 [os_to_sal_handoff]=tmp,8;; \
119
movl tmp=IA64_MCA_SAME_CONTEXT;; \
120
st8 [os_to_sal_handoff]=tmp,8;; \
121
ld8 tmp=[sal_to_os_handoff],-8;; \
122
st8 [os_to_sal_handoff]=tmp,8;; \
123
ld8 tmp=[sal_to_os_handoff];; \
124
st8 [os_to_sal_handoff]=tmp;;
126
#define COLD_BOOT_HANDOFF_STATE(sal_to_os_handoff,os_to_sal_handoff,tmp)\
127
movl tmp=IA64_MCA_COLD_BOOT; \
128
movl sal_to_os_handoff=__pa(ia64_sal_to_os_handoff_state); \
129
movl os_to_sal_handoff=__pa(ia64_os_to_sal_handoff_state);; \
130
st8 [os_to_sal_handoff]=tmp,8;; \
131
ld8 tmp=[sal_to_os_handoff],48;; \
132
st8 [os_to_sal_handoff]=tmp,8;; \
133
movl tmp=IA64_MCA_SAME_CONTEXT;; \
134
st8 [os_to_sal_handoff]=tmp,8;; \
135
ld8 tmp=[sal_to_os_handoff],-8;; \
136
st8 [os_to_sal_handoff]=tmp,8;; \
137
ld8 tmp=[sal_to_os_handoff];; \
138
st8 [os_to_sal_handoff]=tmp;;
141
#define GET_IA64_MCA_DATA(reg) \
142
GET_THIS_PADDR(reg, ia64_mca_data) \
146
.global ia64_os_mca_dispatch
147
.global ia64_os_mca_dispatch_end
149
.global ia64_sal_to_os_handoff_state
150
.global ia64_os_to_sal_handoff_state
152
.global ia64_do_tlb_purge
158
* Just the TLB purge part is moved to a separate function
159
* so we can re-use the code for cpu hotplug code as well
160
* Caller should now setup b1, so we can branch once the
161
* tlb flush is complete.
165
#define O(member) IA64_CPUINFO_##member##_OFFSET
167
GET_THIS_PADDR(r2, cpu_info) // load phys addr of cpu_info into r2
169
addl r17=O(PTCE_STRIDE),r2
170
addl r2=O(PTCE_BASE),r2
172
ld8 r18=[r2],(O(PTCE_COUNT)-O(PTCE_BASE));; // r18=ptce_base
173
ld4 r19=[r2],4 // r19=ptce_count[0]
174
ld4 r21=[r17],4 // r21=ptce_stride[0]
176
ld4 r20=[r2] // r20=ptce_count[1]
177
ld4 r22=[r17] // r22=ptce_stride[1]
185
cmp.ltu p6,p7=r24,r19
186
(p7) br.cond.dpnt.few 4f
199
srlz.i // srlz.i implies srlz.d
202
// Now purge addresses formerly mapped by TR registers
203
// 1. Purge ITR&DTR for kernel.
204
movl r16=KERNEL_START
205
mov r18=KERNEL_TR_PAGE_SHIFT<<2
214
// 2. Purge DTR for PERCPU data.
216
mov r18=PERCPU_PAGE_SHIFT<<2
222
// 3. Purge ITR for PAL code.
223
GET_THIS_PADDR(r2, ia64_mca_pal_base)
226
mov r18=IA64_GRANULE_SHIFT<<2
232
// 4. Purge DTR for stack.
234
// Kernel registers are saved in a per_cpu cpu_kr_ia64_t
235
// to allow the kernel registers themselves to be used by domains.
236
GET_THIS_PADDR(r2, cpu_kr);;
237
add r2=IA64_KR_CURRENT_STACK_OFFSET,r2
241
mov r16=IA64_KR(CURRENT_STACK)
244
shl r16=r16,IA64_GRANULE_SHIFT
248
mov r18=IA64_GRANULE_SHIFT<<2
256
GET_THIS_PADDR(r2, inserted_shared_info);;
266
GET_THIS_PADDR(r2, inserted_mapped_regs);;
268
mov r18=XMAPPEDREGS_SHIFT<<2
276
// The VPD will not be mapped in the case where
277
// a VMX domain hasn't been started since boot
278
GET_THIS_PADDR(r2, inserted_vpd);;
280
mov r18=IA64_GRANULE_SHIFT<<2
284
(p7) br.cond.sptk .vpd_not_mapped
297
// GET_VA_VCPU_VHPT_MADDR() may not give the
298
// value of the VHPT currently pinned into the TLB
299
GET_THIS_PADDR(r2, inserted_vhpt);;
304
(p7) br.cond.sptk .vhpt_not_mapped
305
dep r16=0,r2,0,IA64_GRANULE_SHIFT
306
mov r18=IA64_GRANULE_SHIFT<<2
314
// Now branch away to caller.
318
ia64_os_mca_dispatch:
320
// Serialize all MCA processing
322
LOAD_PHYSICAL(p0,r2,ia64_mca_serialize);;
326
(p6) br ia64_os_mca_spin
328
// Save the SAL to OS MCA handoff state as defined
330
// NOTE : The order in which the state gets saved
331
// is dependent on the way the C-structure
332
// for ia64_mca_sal_to_os_state_t has been
333
// defined in include/asm/mca.h
334
SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(r2)
337
// LOG PROCESSOR STATE INFO FROM HERE ON..
339
br ia64_os_mca_proc_state_dump;;
341
ia64_os_mca_done_dump:
344
// Set current to ar.k6
345
GET_THIS_PADDR(r2,cpu_kr);;
346
add r2=IA64_KR_CURRENT_OFFSET,r2;;
350
GET_THIS_PADDR(r2,ia64_sal_to_os_handoff_state_addr);;
354
LOAD_PHYSICAL(p0,r16,ia64_sal_to_os_handoff_state+56)
357
ld8 r18=[r16] // Get processor state parameter on existing PALE_CHECK.
360
(p7) br.spnt done_tlb_purge_and_reload
362
// The following code purges TC and TR entries. Then reload all TC entries.
363
// Purge percpu data TC entries.
364
begin_tlb_purge_and_reload:
365
movl r18=ia64_reload_tr;;
366
LOAD_PHYSICAL(p0,r18,ia64_reload_tr);;
368
br.sptk.many ia64_do_tlb_purge;;
371
// Finally reload the TR registers.
372
// 1. Reload DTR/ITR registers for kernel.
373
mov r18=KERNEL_TR_PAGE_SHIFT<<2
374
movl r17=KERNEL_START
378
mov r16=IA64_TR_KERNEL
382
dep r17=0,r19,0, KERNEL_TR_PAGE_SHIFT
393
// 2. Reload DTR register for PERCPU data.
394
GET_THIS_PADDR(r2, ia64_mca_per_cpu_pte)
396
movl r16=PERCPU_ADDR // vaddr
397
movl r18=PERCPU_PAGE_SHIFT<<2
402
ld8 r18=[r2] // load per-CPU PTE
403
mov r16=IA64_TR_PERCPU_DATA;
410
// 3. Reload ITR for PAL code.
411
GET_THIS_PADDR(r2, ia64_mca_pal_pte)
413
ld8 r18=[r2] // load PAL PTE
415
GET_THIS_PADDR(r2, ia64_mca_pal_base)
417
ld8 r16=[r2] // load PAL vaddr
418
mov r19=IA64_GRANULE_SHIFT<<2
422
mov r20=IA64_TR_PALCODE
430
// 4. Reload DTR for stack.
432
// Kernel registers are saved in a per_cpu cpu_kr_ia64_t
433
// to allow the kernel registers themselves to be used by domains.
434
GET_THIS_PADDR(r2, cpu_kr);;
435
add r2=IA64_KR_CURRENT_STACK_OFFSET,r2
439
mov r16=IA64_KR(CURRENT_STACK)
442
shl r16=r16,IA64_GRANULE_SHIFT
449
mov r19=IA64_GRANULE_SHIFT<<2
453
mov r20=IA64_TR_CURRENT_STACK
460
// if !VMX_DOMAIN(current)
461
// pin down shared_info and mapped_regs
464
GET_THIS_PADDR(r2,cpu_kr);;
465
add r2=IA64_KR_CURRENT_OFFSET,r2
471
add r2=IA64_VCPU_FLAGS_OFFSET,r2
476
(p7) br.cond.sptk .vmx_domain
479
GET_THIS_PADDR(r2, inserted_shared_info);;
482
movl r20=__pgprot(__DIRTY_BITS | _PAGE_PL_PRIV | _PAGE_AR_RW)
484
GET_THIS_PADDR(r2, domain_shared_info);;
489
or r17=r17,r20 // construct PA | page properties
493
mov r16=IA64_TR_SHARED_INFO
495
itr.d dtr[r16]=r17 // wire in new mapping...
501
GET_THIS_PADDR(r2, inserted_mapped_regs);;
503
mov r18=XMAPPEDREGS_SHIFT<<2
505
GET_THIS_PADDR(r2,cpu_kr);;
506
add r2=IA64_KR_CURRENT_OFFSET,r2
512
add r2=IA64_VPD_BASE_OFFSET,r2
518
or r17=r17,r20 // construct PA | page properties
522
mov r16=IA64_TR_MAPPED_REGS
524
itr.d dtr[r16]=r17 // wire in new mapping...
528
br.sptk.many .reload_vpd_not_mapped;;
532
GET_THIS_PADDR(r2, inserted_vpd);;
534
mov r18=IA64_GRANULE_SHIFT<<2
538
(p7) br.cond.sptk .reload_vpd_not_mapped
541
dep r17=0,r17,0,IA64_GRANULE_SHIFT
544
// avoid overlapping with stack
545
GET_THIS_PADDR(r2, cpu_kr);;
546
add r2=IA64_KR_CURRENT_STACK_OFFSET,r2
550
shl r19=r19,IA64_GRANULE_SHIFT
556
or r17=r20,r17 // construct PA | page properties
562
mov r18=IA64_TR_MAPPED_REGS
566
(p7) itr.d dtr[r18]=r17
572
.reload_vpd_not_mapped:
575
GET_THIS_PADDR(r2, inserted_vhpt);;
580
(p7) br.cond.sptk .overlap_vhpt // vhpt isn't mapped.
582
dep r16=0,r2,0,IA64_GRANULE_SHIFT
584
dep r17=0,r16,60,4 // physical address of
585
// va_vhpt & ~(IA64_GRANULE_SIZE - 1)
587
// avoid overlapping with stack TR
588
GET_THIS_PADDR(r2,cpu_kr);;
589
add r2=IA64_KR_CURRENT_STACK_OFFSET,r2
593
shl r18=r2,IA64_GRANULE_SHIFT
596
(p7) br.cond.sptk .overlap_vhpt
598
// avoid overlapping with VPD
599
GET_THIS_PADDR(r2, inserted_vpd);;
604
dep r18=0,r18,0,IA64_GRANULE_SHIFT
607
(p7) br.cond.sptk .overlap_vhpt
612
mov r19=IA64_GRANULE_SHIFT<<2
614
or r17=r17,r20 // construct PA | page properties
618
itr.d dtr[r18]=r17 // wire in new mapping...
624
br.sptk.many done_tlb_purge_and_reload
626
COLD_BOOT_HANDOFF_STATE(r20,r21,r22)
627
br.sptk.many ia64_os_mca_done_restore
629
done_tlb_purge_and_reload:
631
// Setup new stack frame for OS_MCA handling
632
GET_IA64_MCA_DATA(r2)
634
add r3 = IA64_MCA_CPU_STACKFRAME_OFFSET, r2
635
add r2 = IA64_MCA_CPU_RBSTORE_OFFSET, r2
637
rse_switch_context(r6,r3,r2);; // RSC management in this new context
639
GET_IA64_MCA_DATA(r2)
641
add r2 = IA64_MCA_CPU_STACK_OFFSET+IA64_MCA_STACK_SIZE-16, r2
643
mov r12=r2 // establish new stack-pointer
645
// Enter virtual mode from physical mode
646
VIRTUAL_MODE_ENTER(r2, r3, ia64_os_mca_virtual_begin, r4)
647
ia64_os_mca_virtual_begin:
649
// Call virtual mode handler
650
movl r2=ia64_mca_ucmc_handler;;
652
br.call.sptk.many b0=b6;;
654
// Revert back to physical mode before going back to SAL
655
PHYSICAL_MODE_ENTER(r2, r3, ia64_os_mca_virtual_end, r4)
656
ia64_os_mca_virtual_end:
658
// restore the original stack frame here
659
GET_IA64_MCA_DATA(r2)
661
add r2 = IA64_MCA_CPU_STACKFRAME_OFFSET, r2
665
rse_return_context(r4,r3,r2) // switch from interrupt context for RSE
667
// let us restore all the registers from our PSI structure
670
begin_os_mca_restore:
671
br ia64_os_mca_proc_state_restore;;
673
ia64_os_mca_done_restore:
674
OS_MCA_TO_SAL_HANDOFF_STATE_RESTORE(r2);;
675
// branch back to SALE_CHECK
677
mov b0=r3;; // SAL_CHECK return address
680
movl r3=ia64_mca_serialize;;
686
ia64_os_mca_dispatch_end:
687
//EndMain//////////////////////////////////////////////////////////////////////
692
// ia64_os_mca_proc_state_dump()
696
// This stub dumps the processor state during MCHK to a data area
700
ia64_os_mca_proc_state_dump:
701
// Save bank 1 GRs 16-31 which will be used by c-language code when we switch
702
// to virtual addressing mode.
703
GET_IA64_MCA_DATA(r2)
705
add r2 = IA64_MCA_CPU_PROC_STATE_DUMP_OFFSET, r2
708
mov r5=ar.unat // ar.unat
710
// save banked GRs 16-31 along with NaT bits
712
st8.spill [r2]=r16,8;;
713
st8.spill [r2]=r17,8;;
714
st8.spill [r2]=r18,8;;
715
st8.spill [r2]=r19,8;;
716
st8.spill [r2]=r20,8;;
717
st8.spill [r2]=r21,8;;
718
st8.spill [r2]=r22,8;;
719
st8.spill [r2]=r23,8;;
720
st8.spill [r2]=r24,8;;
721
st8.spill [r2]=r25,8;;
722
st8.spill [r2]=r26,8;;
723
st8.spill [r2]=r27,8;;
724
st8.spill [r2]=r28,8;;
725
st8.spill [r2]=r29,8;;
726
st8.spill [r2]=r30,8;;
727
st8.spill [r2]=r31,8;;
730
st8 [r2]=r4,8 // save User NaT bits for r16-r31
731
mov ar.unat=r5 // restore original unat
735
add r4=8,r2 // duplicate r2 in r4
736
add r6=2*8,r2 // duplicate r2 in r4
759
add r4=8,r2 // duplicate r2 in r4
760
add r6=2*8,r2 // duplicate r2 in r4
768
st8 [r6]=r7,3*8;; // 48 byte rements
771
st8 [r2]=r3,8*8;; // 64 byte rements
773
// if PSR.ic=0, reading interruption registers causes an illegal operation fault
775
tbit.nz.unc p6,p0=r3,PSR_IC;; // PSI Valid Log bit pos. test
776
(p6) st8 [r2]=r0,9*8+160 // increment by 232 byte inc.
777
begin_skip_intr_regs:
778
(p6) br SkipIntrRegs;;
780
add r4=8,r2 // duplicate r2 in r4
781
add r6=2*8,r2 // duplicate r2 in r6
804
mov r3=cr25;; // cr.iha
805
st8 [r2]=r3,160;; // 160 byte rement
808
st8 [r2]=r0,152;; // another 152 byte .
810
add r4=8,r2 // duplicate r2 in r4
811
add r6=2*8,r2 // duplicate r2 in r6
814
// mov r5=cr.ivr // cr.ivr, don't read it
820
mov r3=r0 // cr.eoi => cr67
821
mov r5=r0 // cr.irr0 => cr68
822
mov r7=r0;; // cr.irr1 => cr69
827
mov r3=r0 // cr.irr2 => cr70
828
mov r5=r0 // cr.irr3 => cr71
839
mov r3=r0 // cr.lrr0 => cr80
840
mov r5=r0;; // cr.lrr1 => cr81
848
add r4=8,r2 // duplicate r2 in r4
849
add r6=2*8,r2 // duplicate r2 in r6
867
mov r7=r0;; // ar.kr8
870
st8 [r6]=r7,10*8;; // rement by 72 bytes
873
mov ar.rsc=r0 // put RSE in enforced lazy mode
882
st8 [r2]=r3,8*13 // increment by 13x8 bytes
894
st8 [r2]=r3,160 // 160
904
add r2=8*62,r2 //padding
915
br.cloop.sptk.few cStRR
918
br ia64_os_mca_done_dump;;
920
//EndStub//////////////////////////////////////////////////////////////////////
925
// ia64_os_mca_proc_state_restore()
929
// This is a stub to restore the saved processor state during MCHK
933
ia64_os_mca_proc_state_restore:
935
// Restore bank1 GR16-31
936
GET_IA64_MCA_DATA(r2)
938
add r2 = IA64_MCA_CPU_PROC_STATE_DUMP_OFFSET, r2
940
restore_GRs: // restore bank-1 GRs 16-31
942
add r3=16*8,r2;; // to get to NaT of GR 16-31
944
mov ar.unat=r3;; // first restore NaT
946
ld8.fill r16=[r2],8;;
947
ld8.fill r17=[r2],8;;
948
ld8.fill r18=[r2],8;;
949
ld8.fill r19=[r2],8;;
950
ld8.fill r20=[r2],8;;
951
ld8.fill r21=[r2],8;;
952
ld8.fill r22=[r2],8;;
953
ld8.fill r23=[r2],8;;
954
ld8.fill r24=[r2],8;;
955
ld8.fill r25=[r2],8;;
956
ld8.fill r26=[r2],8;;
957
ld8.fill r27=[r2],8;;
958
ld8.fill r28=[r2],8;;
959
ld8.fill r29=[r2],8;;
960
ld8.fill r30=[r2],8;;
961
ld8.fill r31=[r2],8;;
963
ld8 r3=[r2],8;; // increment to skip NaT
967
add r4=8,r2 // duplicate r2 in r4
968
add r6=2*8,r2;; // duplicate r2 in r4
990
add r4=8,r2 // duplicate r2 in r4
991
add r6=2*8,r2;; // duplicate r2 in r4
995
ld8 r7=[r6],3*8;; // 48 byte increments
1000
ld8 r3=[r2],8*8;; // 64 byte increments
1004
// if PSR.ic=1, reading interruption registers causes an illegal operation fault
1006
tbit.nz.unc p6,p0=r3,PSR_IC;; // PSI Valid Log bit pos. test
1007
(p6) st8 [r2]=r0,9*8+160 // increment by 232 byte inc.
1009
begin_rskip_intr_regs:
1010
(p6) br rSkipIntrRegs;;
1012
add r4=8,r2 // duplicate r2 in r4
1013
add r6=2*8,r2;; // duplicate r2 in r4
1019
// mov cr.isr=r5 // cr.isr is read only
1035
ld8 r3=[r2],160;; // 160 byte increment
1039
ld8 r3=[r2],152;; // another 152 byte inc.
1041
add r4=8,r2 // duplicate r2 in r4
1042
add r6=2*8,r2;; // duplicate r2 in r6
1048
// mov cr.ivr=r5 // cr.ivr is read only
1055
// mov cr.irr0=r5 // cr.irr0 is read only
1056
// mov cr.irr1=r7;; // cr.irr1 is read only
1061
// mov cr.irr2=r3 // cr.irr2 is read only
1062
// mov cr.irr3=r5 // cr.irr3 is read only
1080
add r4=8,r2 // duplicate r2 in r4
1081
add r6=2*8,r2;; // duplicate r2 in r4
1108
// mov ar.bsp=r5 // ar.bsp is read only
1109
mov ar.rsc=r0 // make sure that RSE is in enforced lazy mode
1111
mov ar.bspstore=r7;;
1126
ld8 r3=[r2],160;; // 160
1137
add r2=8*62,r2;; // padding
1146
mov rr[r7]=r3 // what are its access previledges?
1148
br.cloop.sptk.few cStRRr
1153
br ia64_os_mca_done_restore;;
1155
//EndStub//////////////////////////////////////////////////////////////////////
1158
// ok, the issue here is that we need to save state information so
1159
// it can be useable by the kernel debugger and show regs routines.
1160
// In order to do this, our best bet is save the current state (plus
1161
// the state information obtain from the MIN_STATE_AREA) into a pt_regs
1162
// format. This way we can pass it on in a useable format.
1166
// SAL to OS entry point for INIT on the monarch processor
1167
// This has been defined for registration purposes with SAL
1168
// as a part of ia64_mca_init.
1170
// When we get here, the following registers have been
1171
// set by the SAL for our use
1173
// 1. GR1 = OS INIT GP
1174
// 2. GR8 = PAL_PROC physical address
1175
// 3. GR9 = SAL_PROC physical address
1176
// 4. GR10 = SAL GP (physical)
1177
// 5. GR11 = Init Reason
1178
// 0 = Received INIT for event other than crash dump switch
1179
// 1 = Received wakeup at the end of an OS_MCA corrected machine check
1180
// 2 = Received INIT dude to CrashDump switch assertion
1182
// 6. GR12 = Return address to location within SAL_INIT procedure
1185
GLOBAL_ENTRY(ia64_monarch_init_handler)
1187
#ifdef XEN /* Need in ia64_monarch_init_handler? */
1188
// Set current to ar.k6
1189
GET_THIS_PADDR(r2,cpu_kr);;
1190
add r2=IA64_KR_CURRENT_OFFSET,r2;;
1194
// stash the information the SAL passed to os
1195
SAL_TO_OS_MCA_HANDOFF_STATE_SAVE(r2)
1201
adds r3=8,r2 // set up second base pointer
1205
// ok, enough should be saved at this point to be dangerous, and supply
1206
// information for a dump
1207
// We need to switch to Virtual mode before hitting the C functions.
1209
movl r2=IA64_PSR_IT|IA64_PSR_IC|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_DFH|IA64_PSR_BN
1210
mov r3=psr // get the current psr, minimum enabled at this point
1214
movl r3=IVirtual_Switch
1216
mov cr.iip=r3 // short return to set the appropriate bits
1217
mov cr.ipsr=r2 // need to do an rfi to set appropriate bits
1223
// We should now be running virtual
1225
// Let's call the C handler to get the rest of the state info
1227
alloc r14=ar.pfs,0,0,2,0 // now it's safe (must be first in insn group!)
1229
adds out0=16,sp // out0 = pointer to pt_regs
1231
DO_SAVE_SWITCH_STACK
1233
adds out1=16,sp // out0 = pointer to switch_stack
1235
br.call.sptk.many rp=ia64_init_handler
1239
br.sptk return_from_init
1240
END(ia64_monarch_init_handler)
1243
// SAL to OS entry point for INIT on the slave processor
1244
// This has been defined for registration purposes with SAL
1245
// as a part of ia64_mca_init.
1248
GLOBAL_ENTRY(ia64_slave_init_handler)
1250
END(ia64_slave_init_handler)