2
* Copyright 2011 Tilera Corporation. All Rights Reserved.
4
* This program is free software; you can redistribute it and/or
5
* modify it under the terms of the GNU General Public License
6
* as published by the Free Software Foundation, version 2.
8
* This program is distributed in the hope that it will be useful, but
9
* WITHOUT ANY WARRANTY; without even the implied warranty of
10
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11
* NON INFRINGEMENT. See the GNU General Public License for
14
* Linux interrupt vectors.
17
#include <linux/linkage.h>
18
#include <linux/errno.h>
19
#include <linux/unistd.h>
20
#include <asm/ptrace.h>
21
#include <asm/thread_info.h>
22
#include <asm/irqflags.h>
23
#include <asm/asm-offsets.h>
24
#include <asm/types.h>
25
#include <hv/hypervisor.h>
27
#include <arch/interrupts.h>
28
#include <arch/spr_def.h>
31
# error "No support for kernel preemption currently"
34
#define PTREGS_PTR(reg, ptreg) addli reg, sp, C_ABI_SAVE_AREA_SIZE + (ptreg)
36
#define PTREGS_OFFSET_SYSCALL PTREGS_OFFSET_REG(TREG_SYSCALL_NR)
39
.macro push_reg reg, ptr=sp, delta=-8
42
addli \ptr, \ptr, \delta
46
.macro pop_reg reg, ptr=sp, delta=8
49
addli \ptr, \ptr, \delta
53
.macro pop_reg_zero reg, zreg, ptr=sp, delta=8
57
addi \ptr, \ptr, \delta
61
.macro push_extra_callee_saves reg
62
PTREGS_PTR(\reg, PTREGS_OFFSET_REG(51))
80
push_reg r34, \reg, PTREGS_OFFSET_BASE - PTREGS_OFFSET_REG(34)
84
.pushsection .rodata, "a"
89
moveli r0, hw2_last(1b)
92
shl16insli r0, r0, hw1(1b)
95
shl16insli r0, r0, hw0(1b)
101
#ifdef __COLLECT_LINKER_FEEDBACK__
102
.pushsection .text.intvec_feedback,"ax"
108
* Default interrupt handler.
110
* vecnum is where we'll put this code.
111
* c_routine is the C routine we'll call.
113
* The C routine is passed two arguments:
114
* - A pointer to the pt_regs state.
115
* - The interrupt vector number.
117
* The "processing" argument specifies the code for processing
118
* the interrupt. Defaults to "handle_interrupt".
120
.macro int_hand vecnum, vecname, c_routine, processing=handle_interrupt
123
/* Temporarily save a register so we have somewhere to work. */
125
mtspr SPR_SYSTEM_SAVE_K_1, r0
126
mfspr r0, SPR_EX_CONTEXT_K_1
128
andi r0, r0, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */
130
.ifc \vecnum, INT_DOUBLE_FAULT
132
* For double-faults from user-space, fall through to the normal
133
* register save and stack setup path. Otherwise, it's the
134
* hypervisor giving us one last chance to dump diagnostics, and we
135
* branch to the kernel_double_fault routine to do so.
138
j _kernel_double_fault
142
* If we're coming from user-space, then set sp to the top of
143
* the kernel stack. Otherwise, assume sp is already valid.
151
.ifc \c_routine, do_page_fault
153
* The page_fault handler may be downcalled directly by the
154
* hypervisor even when Linux is running and has ICS set.
156
* In this case the contents of EX_CONTEXT_K_1 reflect the
157
* previous fault and can't be relied on to choose whether or
158
* not to reinitialize the stack pointer. So we add a test
159
* to see whether SYSTEM_SAVE_K_2 has the high bit set,
160
* and if so we don't reinitialize sp, since we must be coming
161
* from Linux. (In fact the precise case is !(val & ~1),
162
* but any Linux PC has to have the high bit set.)
164
* Note that the hypervisor *always* sets SYSTEM_SAVE_K_2 for
165
* any path that turns into a downcall to one of our TLB handlers.
167
* FIXME: if we end up never using this path, perhaps we should
168
* prevent the hypervisor from generating downcalls in this case.
169
* The advantage of getting a downcall is we can panic in Linux.
171
mfspr r0, SPR_SYSTEM_SAVE_K_2
173
bltz r0, 0f /* high bit in S_S_1_2 is for a PC to use */
180
* SYSTEM_SAVE_K_0 holds the cpu number in the low bits, and
181
* the current stack top in the higher bits. So we recover
182
* our stack top by just masking off the low bits, then
183
* point sp at the top aligned address on the actual stack page.
185
mfspr r0, SPR_SYSTEM_SAVE_K_0
186
mm r0, zero, LOG2_THREAD_SIZE, 63
190
* Align the stack mod 64 so we can properly predict what
191
* cache lines we need to write-hint to reduce memory fetch
192
* latency as we enter the kernel. The layout of memory is
193
* as follows, with cache line 0 at the lowest VA, and cache
194
* line 8 just below the r0 value this "andi" computes.
195
* Note that we never write to cache line 8, and we skip
196
* cache lines 1-3 for syscalls.
198
* cache line 8: ptregs padding (two words)
199
* cache line 7: sp, lr, pc, ex1, faultnum, orig_r0, flags, cmpexch
200
* cache line 6: r46...r53 (tp)
201
* cache line 5: r38...r45
202
* cache line 4: r30...r37
203
* cache line 3: r22...r29
204
* cache line 2: r14...r21
205
* cache line 1: r6...r13
206
* cache line 0: 2 x frame, r0..r5
211
* Push the first four registers on the stack, so that we can set
212
* them to vector-unique values before we jump to the common code.
214
* Registers are pushed on the stack as a struct pt_regs,
215
* with the sp initially just above the struct, and when we're
216
* done, sp points to the base of the struct, minus
217
* C_ABI_SAVE_AREA_SIZE, so we can directly jal to C code.
219
* This routine saves just the first four registers, plus the
220
* stack context so we can do proper backtracing right away,
221
* and defers to handle_interrupt to save the rest.
222
* The backtracer needs pc, ex1, lr, sp, r52, and faultnum.
224
addli r0, r0, PTREGS_OFFSET_LR - (PTREGS_SIZE + KSTK_PTREGS_GAP)
225
wh64 r0 /* cache line 7 */
228
addli r0, r0, PTREGS_OFFSET_SP - PTREGS_OFFSET_LR
232
addli sp, r0, PTREGS_OFFSET_REG(52) - PTREGS_OFFSET_SP
234
wh64 sp /* cache line 6 */
237
addli sp, sp, PTREGS_OFFSET_REG(1) - PTREGS_OFFSET_REG(52)
239
wh64 sp /* cache line 0 */
242
addli sp, sp, PTREGS_OFFSET_REG(2) - PTREGS_OFFSET_REG(1)
246
addli sp, sp, PTREGS_OFFSET_REG(3) - PTREGS_OFFSET_REG(2)
250
addli sp, sp, PTREGS_OFFSET_PC - PTREGS_OFFSET_REG(3)
252
mfspr r0, SPR_EX_CONTEXT_K_0
253
.ifc \processing,handle_syscall
255
* Bump the saved PC by one bundle so that when we return, we won't
256
* execute the same swint instruction again. We need to do this while
257
* we're in the critical section.
263
addli sp, sp, PTREGS_OFFSET_EX1 - PTREGS_OFFSET_PC
265
mfspr r0, SPR_EX_CONTEXT_K_1
268
addi sp, sp, PTREGS_OFFSET_FAULTNUM - PTREGS_OFFSET_EX1
270
* Use r0 for syscalls so it's a temporary; use r1 for interrupts
271
* so that it gets passed through unchanged to the handler routine.
272
* Note that the .if conditional confusingly spans bundles.
274
.ifc \processing,handle_syscall
285
addli sp, sp, PTREGS_OFFSET_REG(0) - PTREGS_OFFSET_FAULTNUM
287
mfspr r0, SPR_SYSTEM_SAVE_K_1 /* Original r0 */
290
addi sp, sp, -PTREGS_OFFSET_REG(0) - 8
293
st sp, zero /* write zero into "Next SP" frame pointer */
294
addi sp, sp, -8 /* leave SP pointing at bottom of frame */
296
.ifc \processing,handle_syscall
299
/* Capture per-interrupt SPR context to registers. */
300
.ifc \c_routine, do_page_fault
301
mfspr r2, SPR_SYSTEM_SAVE_K_3 /* address of page fault */
302
mfspr r3, SPR_SYSTEM_SAVE_K_2 /* info about page fault */
304
.ifc \vecnum, INT_ILL_TRANS
305
mfspr r2, ILL_TRANS_REASON
307
.ifc \vecnum, INT_DOUBLE_FAULT
308
mfspr r2, SPR_SYSTEM_SAVE_K_2 /* double fault info from HV */
310
.ifc \c_routine, do_trap
313
.ifc \c_routine, op_handle_perf_interrupt
314
mfspr r2, PERF_COUNT_STS
315
#if CHIP_HAS_AUX_PERF_COUNTERS()
317
.ifc \c_routine, op_handle_aux_perf_interrupt
318
mfspr r2, AUX_PERF_COUNT_STS
326
/* Put function pointer in r0 */
327
moveli r0, hw2_last(\c_routine)
328
shl16insli r0, r0, hw1(\c_routine)
330
shl16insli r0, r0, hw0(\c_routine)
334
ENDPROC(intvec_\vecname)
336
#ifdef __COLLECT_LINKER_FEEDBACK__
337
.pushsection .text.intvec_feedback,"ax"
339
FEEDBACK_ENTER_EXPLICIT(intvec_\vecname, .intrpt1, 1 << 8)
348
* Save the rest of the registers that we didn't save in the actual
349
* vector itself. We can't use r0-r10 inclusive here.
351
.macro finish_interrupt_save, function
353
/* If it's a syscall, save a proper orig_r0, otherwise just zero. */
354
PTREGS_PTR(r52, PTREGS_OFFSET_ORIG_R0)
356
.ifc \function,handle_syscall
361
PTREGS_PTR(r52, PTREGS_OFFSET_TP)
365
mfspr tp, CMPEXCH_VALUE
366
PTREGS_PTR(r52, PTREGS_OFFSET_CMPEXCH)
370
* For ordinary syscalls, we save neither caller- nor callee-
371
* save registers, since the syscall invoker doesn't expect the
372
* caller-saves to be saved, and the called kernel functions will
373
* take care of saving the callee-saves for us.
375
* For interrupts we save just the caller-save registers. Saving
376
* them is required (since the "caller" can't save them). Again,
377
* the called kernel functions will restore the callee-save
378
* registers for us appropriately.
380
* On return, we normally restore nothing special for syscalls,
381
* and just the caller-save registers for interrupts.
383
* However, there are some important caveats to all this:
385
* - We always save a few callee-save registers to give us
386
* some scratchpad registers to carry across function calls.
388
* - fork/vfork/etc require us to save all the callee-save
389
* registers, which we do in PTREGS_SYSCALL_ALL_REGS, below.
391
* - We always save r0..r5 and r10 for syscalls, since we need
392
* to reload them a bit later for the actual kernel call, and
393
* since we might need them for -ERESTARTNOINTR, etc.
395
* - Before invoking a signal handler, we save the unsaved
396
* callee-save registers so they are visible to the
397
* signal handler or any ptracer.
399
* - If the unsaved callee-save registers are modified, we set
400
* a bit in pt_regs so we know to reload them from pt_regs
401
* and not just rely on the kernel function unwinding.
402
* (Done for ptrace register writes and SA_SIGINFO handler.)
406
PTREGS_PTR(r52, PTREGS_OFFSET_REG(33))
408
wh64 r52 /* cache line 4 */
412
.ifc \function,handle_syscall
413
push_reg r30, r52, PTREGS_OFFSET_SYSCALL - PTREGS_OFFSET_REG(30)
414
push_reg TREG_SYSCALL_NR_NAME, r52, \
415
PTREGS_OFFSET_REG(5) - PTREGS_OFFSET_SYSCALL
418
push_reg r30, r52, PTREGS_OFFSET_REG(29) - PTREGS_OFFSET_REG(30)
419
wh64 r52 /* cache line 3 */
428
wh64 r52 /* cache line 2 */
437
wh64 r52 /* cache line 1 */
452
/* Load tp with our per-cpu offset. */
455
mfspr r20, SPR_SYSTEM_SAVE_K_0
456
moveli r21, hw2_last(__per_cpu_offset)
459
shl16insli r21, r21, hw1(__per_cpu_offset)
460
bfextu r20, r20, 0, LOG2_THREAD_SIZE-1
462
shl16insli r21, r21, hw0(__per_cpu_offset)
463
shl3add r20, r20, r21
470
* If we will be returning to the kernel, we will need to
471
* reset the interrupt masks to the state they had before.
472
* Set DISABLE_IRQ in flags iff we came from PL1 with irqs disabled.
474
mfspr r32, SPR_EX_CONTEXT_K_1
476
andi r32, r32, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */
477
PTREGS_PTR(r21, PTREGS_OFFSET_FLAGS)
479
beqzt r32, 1f /* zero if from user space */
480
IRQS_DISABLED(r32) /* zero if irqs enabled */
481
#if PT_FLAGS_DISABLE_IRQ != 1
482
# error Value of IRQS_DISABLED used to set PT_FLAGS_DISABLE_IRQ; fix
485
.ifnc \function,handle_syscall
486
/* Record the fact that we saved the caller-save registers above. */
487
ori r32, r32, PT_FLAGS_CALLER_SAVES
491
#ifdef __COLLECT_LINKER_FEEDBACK__
493
* Notify the feedback routines that we were in the
494
* appropriate fixed interrupt vector area. Note that we
495
* still have ICS set at this point, so we can't invoke any
496
* atomic operations or we will panic. The feedback
497
* routines internally preserve r0..r10 and r30 up.
499
.ifnc \function,handle_syscall
502
moveli r20, INT_SWINT_1 << 5
504
moveli r21, hw2_last(intvec_feedback)
505
shl16insli r21, r21, hw1(intvec_feedback)
506
shl16insli r21, r21, hw0(intvec_feedback)
510
/* And now notify the feedback routines that we are here. */
511
FEEDBACK_ENTER(\function)
515
* we've captured enough state to the stack (including in
516
* particular our EX_CONTEXT state) that we can now release
517
* the interrupt critical section and replace it with our
518
* standard "interrupts disabled" mask value. This allows
519
* synchronous interrupts (and profile interrupts) to punch
520
* through from this point onwards.
522
.ifc \function,handle_nmi
525
IRQ_DISABLE(r20, r21)
527
mtspr INTERRUPT_CRITICAL_SECTION, zero
530
* Prepare the first 256 stack bytes to be rapidly accessible
531
* without having to fetch the background data.
548
#ifdef CONFIG_TRACE_IRQFLAGS
549
.ifnc \function,handle_nmi
551
* We finally have enough state set up to notify the irq
552
* tracing code that irqs were disabled on entry to the handler.
553
* The TRACE_IRQS_OFF call clobbers registers r0-r29.
554
* For syscalls, we already have the register state saved away
555
* on the stack, so we don't bother to do any register saves here,
556
* and later we pop the registers back off the kernel stack.
557
* For interrupt handlers, save r0-r3 in callee-saved registers.
559
.ifnc \function,handle_syscall
560
{ move r30, r0; move r31, r1 }
561
{ move r32, r2; move r33, r3 }
564
.ifnc \function,handle_syscall
565
{ move r0, r30; move r1, r31 }
566
{ move r2, r32; move r3, r33 }
574
* Redispatch a downcall.
576
.macro dc_dispatch vecnum, vecname
579
j hv_downcall_dispatch
580
ENDPROC(intvec_\vecname)
584
* Common code for most interrupts. The C function we're eventually
585
* going to is in r0, and the faultnum is in r1; the original
586
* values for those registers are on the stack.
588
.pushsection .text.handle_interrupt,"ax"
590
finish_interrupt_save handle_interrupt
592
/* Jump to the C routine; it should enable irqs as soon as possible. */
595
PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
597
FEEDBACK_REENTER(handle_interrupt)
599
movei r30, 0 /* not an NMI */
602
STD_ENDPROC(handle_interrupt)
605
* This routine takes a boolean in r30 indicating if this is an NMI.
606
* If so, we also expect a boolean in r31 indicating whether to
607
* re-enable the oprofile interrupts.
609
STD_ENTRY(interrupt_return)
610
/* If we're resuming to kernel space, don't check thread flags. */
612
bnez r30, .Lrestore_all /* NMIs don't special-case user-space */
613
PTREGS_PTR(r29, PTREGS_OFFSET_EX1)
616
andi r29, r29, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */
618
beqzt r29, .Lresume_userspace
619
PTREGS_PTR(r29, PTREGS_OFFSET_PC)
622
/* If we're resuming to _cpu_idle_nap, bump PC forward by 8. */
623
moveli r27, hw2_last(_cpu_idle_nap)
626
shl16insli r27, r27, hw1(_cpu_idle_nap)
629
shl16insli r27, r27, hw0(_cpu_idle_nap)
635
blbc r27, .Lrestore_all
642
FEEDBACK_REENTER(interrupt_return)
645
* Disable interrupts so as to make sure we don't
646
* miss an interrupt that sets any of the thread flags (like
647
* need_resched or sigpending) between sampling and the iret.
648
* Routines like schedule() or do_signal() may re-enable
649
* interrupts before returning.
651
IRQ_DISABLE(r20, r21)
652
TRACE_IRQS_OFF /* Note: clobbers registers r0-r29 */
654
/* Get base of stack in r32; note r30/31 are used as arguments here. */
658
/* Check to see if there is any work to do before returning to user. */
660
addi r29, r32, THREAD_INFO_FLAGS_OFFSET
661
moveli r1, hw1_last(_TIF_ALLWORK_MASK)
665
shl16insli r1, r1, hw0(_TIF_ALLWORK_MASK)
668
beqzt r1, .Lrestore_all
671
* Make sure we have all the registers saved for signal
672
* handling or single-step. Call out to C code to figure out
673
* exactly what we need to do for each flag bit, then if
674
* necessary, reload the flags and recheck.
676
push_extra_callee_saves r0
678
PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
681
bnez r0, .Lresume_userspace
685
* omit the call to single_process_check_nohz, which normally checks
686
* to see if we should start or stop the scheduler tick, because
687
* we can't call arbitrary Linux code from an NMI context.
688
* We always call the homecache TLB deferral code to re-trigger
689
* the deferral mechanism.
691
* The other chunk of responsibility this code has is to reset the
692
* interrupt masks appropriately to reset irqs and NMIs. We have
693
* to call TRACE_IRQS_OFF and TRACE_IRQS_ON to support all the
694
* lockdep-type stuff, but we can't set ICS until afterwards, since
695
* ICS can only be used in very tight chunks of code to avoid
696
* tripping over various assertions that it is off.
699
PTREGS_PTR(r0, PTREGS_OFFSET_EX1)
702
PTREGS_PTR(r32, PTREGS_OFFSET_FLAGS)
705
andi r0, r0, SPR_EX_CONTEXT_1_1__PL_MASK
710
#if PT_FLAGS_DISABLE_IRQ != 1
711
# error Assuming PT_FLAGS_DISABLE_IRQ == 1 so we can use blbct below
717
mtspr INTERRUPT_CRITICAL_SECTION, r0
718
beqzt r30, .Lrestore_regs
722
mtspr INTERRUPT_CRITICAL_SECTION, r0
724
beqzt r30, .Lrestore_regs
729
* We now commit to returning from this interrupt, since we will be
730
* doing things like setting EX_CONTEXT SPRs and unwinding the stack
731
* frame. No calls should be made to any other code after this point.
732
* This code should only be entered with ICS set.
733
* r32 must still be set to ptregs.flags.
734
* We launch loads to each cache line separately first, so we can
735
* get some parallelism out of the memory subsystem.
736
* We start zeroing caller-saved registers throughout, since
737
* that will save some cycles if this turns out to be a syscall.
740
FEEDBACK_REENTER(interrupt_return) /* called from elsewhere */
743
* Rotate so we have one high bit and one low bit to test.
744
* - low bit says whether to restore all the callee-saved registers,
745
* or just r30-r33, and r52 up.
746
* - high bit (i.e. sign bit) says whether to restore all the
747
* caller-saved registers, or just r0.
749
#if PT_FLAGS_CALLER_SAVES != 2 || PT_FLAGS_RESTORE_REGS != 4
750
# error Rotate trick does not work :-)
754
PTREGS_PTR(sp, PTREGS_OFFSET_REG(0))
758
* Load cache lines 0, 4, 6 and 7, in that order, then use
759
* the last loaded value, which makes it likely that the other
760
* cache lines have also loaded, at which point we should be
761
* able to safely read all the remaining words on those cache
762
* lines without waiting for the memory subsystem.
764
pop_reg r0, sp, PTREGS_OFFSET_REG(30) - PTREGS_OFFSET_REG(0)
765
pop_reg r30, sp, PTREGS_OFFSET_REG(52) - PTREGS_OFFSET_REG(30)
766
pop_reg_zero r52, r3, sp, PTREGS_OFFSET_CMPEXCH - PTREGS_OFFSET_REG(52)
767
pop_reg_zero r21, r27, sp, PTREGS_OFFSET_EX1 - PTREGS_OFFSET_CMPEXCH
768
pop_reg_zero lr, r2, sp, PTREGS_OFFSET_PC - PTREGS_OFFSET_EX1
770
mtspr CMPEXCH_VALUE, r21
773
pop_reg r21, sp, PTREGS_OFFSET_REG(31) - PTREGS_OFFSET_PC
775
mtspr SPR_EX_CONTEXT_K_1, lr
776
andi lr, lr, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */
779
mtspr SPR_EX_CONTEXT_K_0, r21
783
/* Restore callee-saveds that we actually use. */
786
pop_reg_zero r33, r8, sp, PTREGS_OFFSET_REG(29) - PTREGS_OFFSET_REG(33)
789
* If we modified other callee-saveds, restore them now.
790
* This is rare, but could be via ptrace or signal handler.
794
blbs r20, .Lrestore_callees
796
.Lcontinue_restore_regs:
798
/* Check if we're returning from a syscall. */
801
bltzt r20, 1f /* no, so go restore callee-save registers */
805
* Check if we're returning to userspace.
806
* Note that if we're not, we don't worry about zeroing everything.
809
addli sp, sp, PTREGS_OFFSET_LR - PTREGS_OFFSET_REG(29)
810
bnez lr, .Lkernel_return
814
* On return from syscall, we've restored r0 from pt_regs, but we
815
* clear the remainder of the caller-saved registers. We could
816
* restore the syscall arguments, but there's not much point,
817
* and it ensures user programs aren't trying to use the
818
* caller-saves if we clear them, as well as avoiding leaking
819
* kernel pointers into userspace.
821
pop_reg_zero lr, r11, sp, PTREGS_OFFSET_TP - PTREGS_OFFSET_LR
822
pop_reg_zero tp, r12, sp, PTREGS_OFFSET_SP - PTREGS_OFFSET_TP
828
{ move r15, zero; move r16, zero }
829
{ move r17, zero; move r18, zero }
830
{ move r19, zero; move r20, zero }
831
{ move r21, zero; move r22, zero }
832
{ move r23, zero; move r24, zero }
833
{ move r25, zero; move r26, zero }
835
/* Set r1 to errno if we are returning an error, otherwise zero. */
851
* Not a syscall, so restore caller-saved registers.
852
* First kick off loads for cache lines 1-3, which we're touching
853
* for the first time here.
856
1: pop_reg r29, sp, PTREGS_OFFSET_REG(21) - PTREGS_OFFSET_REG(29)
857
pop_reg r21, sp, PTREGS_OFFSET_REG(13) - PTREGS_OFFSET_REG(21)
858
pop_reg r13, sp, PTREGS_OFFSET_REG(1) - PTREGS_OFFSET_REG(13)
871
/* r13 already restored above */
879
/* r21 already restored above */
886
pop_reg r28, sp, PTREGS_OFFSET_LR - PTREGS_OFFSET_REG(28)
887
/* r29 already restored above */
888
bnez lr, .Lkernel_return
889
pop_reg lr, sp, PTREGS_OFFSET_TP - PTREGS_OFFSET_LR
890
pop_reg tp, sp, PTREGS_OFFSET_SP - PTREGS_OFFSET_TP
895
* We can't restore tp when in kernel mode, since a thread might
896
* have migrated from another cpu and brought a stale tp value.
899
pop_reg lr, sp, PTREGS_OFFSET_SP - PTREGS_OFFSET_LR
903
/* Restore callee-saved registers from r34 to r51. */
905
addli sp, sp, PTREGS_OFFSET_REG(34) - PTREGS_OFFSET_REG(29)
923
pop_reg r51, sp, PTREGS_OFFSET_REG(29) - PTREGS_OFFSET_REG(51)
924
j .Lcontinue_restore_regs
925
STD_ENDPROC(interrupt_return)
928
* "NMI" interrupts mask ALL interrupts before calling the
929
* handler, and don't check thread flags, etc., on the way
930
* back out. In general, the only things we do here for NMIs
931
* are register save/restore and dataplane kernel-TLB management.
932
* We don't (for example) deal with start/stop of the sched tick.
934
.pushsection .text.handle_nmi,"ax"
936
finish_interrupt_save handle_nmi
939
PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
941
FEEDBACK_REENTER(handle_nmi)
947
STD_ENDPROC(handle_nmi)
950
* Parallel code for syscalls to handle_interrupt.
952
.pushsection .text.handle_syscall,"ax"
954
finish_interrupt_save handle_syscall
960
/* Bump the counter for syscalls made on this tile. */
961
moveli r20, hw2_last(irq_stat + IRQ_CPUSTAT_SYSCALL_COUNT_OFFSET)
962
shl16insli r20, r20, hw1(irq_stat + IRQ_CPUSTAT_SYSCALL_COUNT_OFFSET)
963
shl16insli r20, r20, hw0(irq_stat + IRQ_CPUSTAT_SYSCALL_COUNT_OFFSET)
969
/* Trace syscalls, if requested. */
971
addi r31, r31, THREAD_INFO_FLAGS_OFFSET
973
andi r30, r30, _TIF_SYSCALL_TRACE
975
addi r30, r31, THREAD_INFO_STATUS_OFFSET - THREAD_INFO_FLAGS_OFFSET
976
beqzt r30, .Lrestore_syscall_regs
979
FEEDBACK_REENTER(handle_syscall)
982
* We always reload our registers from the stack at this
983
* point. They might be valid, if we didn't build with
984
* TRACE_IRQFLAGS, and this isn't a dataplane tile, and we're not
985
* doing syscall tracing, but there are enough cases now that it
986
* seems simplest just to do the reload unconditionally.
988
.Lrestore_syscall_regs:
991
PTREGS_PTR(r11, PTREGS_OFFSET_REG(0))
998
pop_reg r5, r11, PTREGS_OFFSET_SYSCALL - PTREGS_OFFSET_REG(5)
1000
ld TREG_SYSCALL_NR_NAME, r11
1001
moveli r21, __NR_syscalls
1004
/* Ensure that the syscall number is within the legal range. */
1006
moveli r20, hw2(sys_call_table)
1007
blbs r30, .Lcompat_syscall
1010
cmpltu r21, TREG_SYSCALL_NR_NAME, r21
1011
shl16insli r20, r20, hw1(sys_call_table)
1014
blbc r21, .Linvalid_syscall
1015
shl16insli r20, r20, hw0(sys_call_table)
1017
.Lload_syscall_pointer:
1018
shl3add r20, TREG_SYSCALL_NR_NAME, r20
1021
/* Jump to syscall handler. */
1023
.Lhandle_syscall_link: /* value of "lr" after "jalr r20" above */
1026
* Write our r0 onto the stack so it gets restored instead
1027
* of whatever the user had there before.
1028
* In compat mode, sign-extend r0 before storing it.
1031
PTREGS_PTR(r29, PTREGS_OFFSET_REG(0))
1037
.Lsyscall_sigreturn_skip:
1038
FEEDBACK_REENTER(handle_syscall)
1040
/* Do syscall trace again, if requested. */
1042
andi r30, r30, _TIF_SYSCALL_TRACE
1044
jal do_syscall_trace
1045
FEEDBACK_REENTER(handle_syscall)
1046
1: j .Lresume_userspace /* jump into middle of interrupt_return */
1050
* Load the base of the compat syscall table in r20, and
1051
* range-check the syscall number (duplicated from 64-bit path).
1052
* Sign-extend all the user's passed arguments to make them consistent.
1053
* Also save the original "r(n)" values away in "r(11+n)" in
1054
* case the syscall table entry wants to validate them.
1056
moveli r20, hw2(compat_sys_call_table)
1058
cmpltu r21, TREG_SYSCALL_NR_NAME, r21
1059
shl16insli r20, r20, hw1(compat_sys_call_table)
1062
blbc r21, .Linvalid_syscall
1063
shl16insli r20, r20, hw0(compat_sys_call_table)
1065
{ move r11, r0; addxi r0, r0, 0 }
1066
{ move r12, r1; addxi r1, r1, 0 }
1067
{ move r13, r2; addxi r2, r2, 0 }
1068
{ move r14, r3; addxi r3, r3, 0 }
1069
{ move r15, r4; addxi r4, r4, 0 }
1070
{ move r16, r5; addxi r5, r5, 0 }
1071
j .Lload_syscall_pointer
1074
/* Report an invalid syscall back to the user program */
1076
PTREGS_PTR(r29, PTREGS_OFFSET_REG(0))
1080
j .Lresume_userspace /* jump into middle of interrupt_return */
1081
STD_ENDPROC(handle_syscall)
1083
/* Return the address for oprofile to suppress in backtraces. */
1084
STD_ENTRY_SECTION(handle_syscall_link_address, .text.handle_syscall)
1087
addli r0, r0, .Lhandle_syscall_link - .
1090
STD_ENDPROC(handle_syscall_link_address)
1092
STD_ENTRY(ret_from_fork)
1095
FEEDBACK_REENTER(ret_from_fork)
1096
j .Lresume_userspace
1097
STD_ENDPROC(ret_from_fork)
1099
/* Various stub interrupt handlers and syscall handlers */
1101
STD_ENTRY_LOCAL(_kernel_double_fault)
1102
mfspr r1, SPR_EX_CONTEXT_K_0
1106
addi sp, sp, -C_ABI_SAVE_AREA_SIZE
1107
j kernel_double_fault
1108
STD_ENDPROC(_kernel_double_fault)
1110
STD_ENTRY_LOCAL(bad_intr)
1111
mfspr r2, SPR_EX_CONTEXT_K_0
1112
panic "Unhandled interrupt %#x: PC %#lx"
1113
STD_ENDPROC(bad_intr)
1115
/* Put address of pt_regs in reg and jump. */
1116
#define PTREGS_SYSCALL(x, reg) \
1119
PTREGS_PTR(reg, PTREGS_OFFSET_BASE); \
1125
* Special-case sigreturn to not write r0 to the stack on return.
1126
* This is technically more efficient, but it also avoids difficulties
1127
* in the 64-bit OS when handling 32-bit compat code, since we must not
1128
* sign-extend r0 for the sigreturn return-value case.
1130
#define PTREGS_SYSCALL_SIGRETURN(x, reg) \
1132
addli lr, lr, .Lsyscall_sigreturn_skip - .Lhandle_syscall_link; \
1134
PTREGS_PTR(reg, PTREGS_OFFSET_BASE); \
1139
PTREGS_SYSCALL(sys_execve, r3)
1140
PTREGS_SYSCALL(sys_sigaltstack, r2)
1141
PTREGS_SYSCALL_SIGRETURN(sys_rt_sigreturn, r0)
1142
#ifdef CONFIG_COMPAT
1143
PTREGS_SYSCALL(compat_sys_execve, r3)
1144
PTREGS_SYSCALL(compat_sys_sigaltstack, r2)
1145
PTREGS_SYSCALL_SIGRETURN(compat_sys_rt_sigreturn, r0)
1148
/* Save additional callee-saves to pt_regs, put address in r4 and jump. */
1149
STD_ENTRY(_sys_clone)
1150
push_extra_callee_saves r4
1152
STD_ENDPROC(_sys_clone)
1154
/* The single-step support may need to read all the registers. */
1156
push_extra_callee_saves r0
1159
/* Include .intrpt1 array of interrupt vectors */
1160
.section ".intrpt1", "ax"
1162
#define op_handle_perf_interrupt bad_intr
1163
#define op_handle_aux_perf_interrupt bad_intr
1165
#ifndef CONFIG_HARDWALL
1166
#define do_hardwall_trap bad_intr
1169
int_hand INT_MEM_ERROR, MEM_ERROR, bad_intr
1170
int_hand INT_SINGLE_STEP_3, SINGLE_STEP_3, bad_intr
1171
#if CONFIG_KERNEL_PL == 2
1172
int_hand INT_SINGLE_STEP_2, SINGLE_STEP_2, gx_singlestep_handle
1173
int_hand INT_SINGLE_STEP_1, SINGLE_STEP_1, bad_intr
1175
int_hand INT_SINGLE_STEP_2, SINGLE_STEP_2, bad_intr
1176
int_hand INT_SINGLE_STEP_1, SINGLE_STEP_1, gx_singlestep_handle
1178
int_hand INT_SINGLE_STEP_0, SINGLE_STEP_0, bad_intr
1179
int_hand INT_IDN_COMPLETE, IDN_COMPLETE, bad_intr
1180
int_hand INT_UDN_COMPLETE, UDN_COMPLETE, bad_intr
1181
int_hand INT_ITLB_MISS, ITLB_MISS, do_page_fault
1182
int_hand INT_ILL, ILL, do_trap
1183
int_hand INT_GPV, GPV, do_trap
1184
int_hand INT_IDN_ACCESS, IDN_ACCESS, do_trap
1185
int_hand INT_UDN_ACCESS, UDN_ACCESS, do_trap
1186
int_hand INT_SWINT_3, SWINT_3, do_trap
1187
int_hand INT_SWINT_2, SWINT_2, do_trap
1188
int_hand INT_SWINT_1, SWINT_1, SYSCALL, handle_syscall
1189
int_hand INT_SWINT_0, SWINT_0, do_trap
1190
int_hand INT_ILL_TRANS, ILL_TRANS, do_trap
1191
int_hand INT_UNALIGN_DATA, UNALIGN_DATA, int_unalign
1192
int_hand INT_DTLB_MISS, DTLB_MISS, do_page_fault
1193
int_hand INT_DTLB_ACCESS, DTLB_ACCESS, do_page_fault
1194
int_hand INT_IDN_FIREWALL, IDN_FIREWALL, bad_intr
1195
int_hand INT_UDN_FIREWALL, UDN_FIREWALL, do_hardwall_trap
1196
int_hand INT_TILE_TIMER, TILE_TIMER, do_timer_interrupt
1197
int_hand INT_IDN_TIMER, IDN_TIMER, bad_intr
1198
int_hand INT_UDN_TIMER, UDN_TIMER, bad_intr
1199
int_hand INT_IDN_AVAIL, IDN_AVAIL, bad_intr
1200
int_hand INT_UDN_AVAIL, UDN_AVAIL, bad_intr
1201
int_hand INT_IPI_3, IPI_3, bad_intr
1202
#if CONFIG_KERNEL_PL == 2
1203
int_hand INT_IPI_2, IPI_2, tile_dev_intr
1204
int_hand INT_IPI_1, IPI_1, bad_intr
1206
int_hand INT_IPI_2, IPI_2, bad_intr
1207
int_hand INT_IPI_1, IPI_1, tile_dev_intr
1209
int_hand INT_IPI_0, IPI_0, bad_intr
1210
int_hand INT_PERF_COUNT, PERF_COUNT, \
1211
op_handle_perf_interrupt, handle_nmi
1212
int_hand INT_AUX_PERF_COUNT, AUX_PERF_COUNT, \
1213
op_handle_perf_interrupt, handle_nmi
1214
int_hand INT_INTCTRL_3, INTCTRL_3, bad_intr
1215
#if CONFIG_KERNEL_PL == 2
1216
dc_dispatch INT_INTCTRL_2, INTCTRL_2
1217
int_hand INT_INTCTRL_1, INTCTRL_1, bad_intr
1219
int_hand INT_INTCTRL_2, INTCTRL_2, bad_intr
1220
dc_dispatch INT_INTCTRL_1, INTCTRL_1
1222
int_hand INT_INTCTRL_0, INTCTRL_0, bad_intr
1223
int_hand INT_MESSAGE_RCV_DWNCL, MESSAGE_RCV_DWNCL, \
1225
int_hand INT_DEV_INTR_DWNCL, DEV_INTR_DWNCL, bad_intr
1226
int_hand INT_I_ASID, I_ASID, bad_intr
1227
int_hand INT_D_ASID, D_ASID, bad_intr
1228
int_hand INT_DOUBLE_FAULT, DOUBLE_FAULT, do_trap
1230
/* Synthetic interrupt delivered only by the simulator */
1231
int_hand INT_BREAKPOINT, BREAKPOINT, do_breakpoint