855
851
/* Check to see if there is any work to do before returning to user. */
857
853
addi r29, r32, THREAD_INFO_FLAGS_OFFSET
858
moveli r28, lo16(_TIF_ALLWORK_MASK)
854
moveli r1, lo16(_TIF_ALLWORK_MASK)
862
auli r28, r28, ha16(_TIF_ALLWORK_MASK)
865
bnz r28, .Lwork_pending
858
auli r1, r1, ha16(_TIF_ALLWORK_MASK)
861
bzt r1, .Lrestore_all
864
* Make sure we have all the registers saved for signal
865
* handling or single-step. Call out to C code to figure out
866
* exactly what we need to do for each flag bit, then if
867
* necessary, reload the flags and recheck.
869
push_extra_callee_saves r0
871
PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
874
bnz r0, .Lresume_userspace
868
877
* In the NMI case we
1104
1113
pop_reg r51, sp, PTREGS_OFFSET_REG(29) - PTREGS_OFFSET_REG(51)
1105
1114
j .Lcontinue_restore_regs
1108
/* Mask the reschedule flag */
1109
andi r28, r29, _TIF_NEED_RESCHED
1113
* If the NEED_RESCHED flag is called, we call schedule(), which
1114
* may drop this context right here and go do something else.
1115
* On return, jump back to .Lresume_userspace and recheck.
1119
/* Mask the async-tlb flag */
1120
andi r28, r29, _TIF_ASYNC_TLB
1124
FEEDBACK_REENTER(interrupt_return)
1126
/* Reload the flags and check again */
1127
j .Lresume_userspace
1131
bz r28, .Lneed_sigpending
1133
/* Mask the sigpending flag */
1134
andi r28, r29, _TIF_SIGPENDING
1137
PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
1138
jal do_async_page_fault
1139
FEEDBACK_REENTER(interrupt_return)
1142
* Go restart the "resume userspace" process. We may have
1143
* fired a signal, and we need to disable interrupts again.
1145
j .Lresume_userspace
1149
* At this point we are either doing signal handling or single-step,
1150
* so either way make sure we have all the registers saved.
1152
push_extra_callee_saves r0
1155
/* If no signal pending, skip to singlestep check */
1156
bz r28, .Lneed_singlestep
1158
/* Mask the singlestep flag */
1159
andi r28, r29, _TIF_SINGLESTEP
1163
FEEDBACK_REENTER(interrupt_return)
1165
/* Reload the flags and check again */
1166
j .Lresume_userspace
1170
/* Get a pointer to the EX1 field */
1171
PTREGS_PTR(r29, PTREGS_OFFSET_EX1)
1173
/* If we get here, our bit must be set. */
1174
bz r28, .Lwork_confusion
1176
/* If we are in priv mode, don't single step */
1178
andi r28, r28, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */
1179
bnz r28, .Lrestore_all
1181
/* Allow interrupts within the single step code */
1182
TRACE_IRQS_ON /* Note: clobbers registers r0-r29 */
1183
IRQ_ENABLE(r20, r21)
1185
/* try to single-step the current instruction */
1186
PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
1187
jal single_step_once
1188
FEEDBACK_REENTER(interrupt_return)
1190
/* Re-disable interrupts. TRACE_IRQS_OFF in .Lrestore_all. */
1191
IRQ_DISABLE(r20,r21)
1197
panic "thread_info allwork flags unhandled on userspace resume: %#x"
1199
1115
STD_ENDPROC(interrupt_return)
1202
* This interrupt variant clears the INT_INTCTRL_K interrupt mask bit
1203
* before returning, so we can properly get more downcalls.
1205
.pushsection .text.handle_interrupt_downcall,"ax"
1206
handle_interrupt_downcall:
1207
finish_interrupt_save handle_interrupt_downcall
1208
check_single_stepping normal, .Ldispatch_downcall
1209
.Ldispatch_downcall:
1211
/* Clear INTCTRL_K from the set of interrupts we ever enable. */
1212
GET_INTERRUPTS_ENABLED_MASK_PTR(r30)
1215
movei r31, INT_MASK(INT_INTCTRL_K)
1226
PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
1228
FEEDBACK_REENTER(handle_interrupt_downcall)
1230
/* Allow INTCTRL_K to be enabled next time we enable interrupts. */
1236
movei r30, 0 /* not an NMI */
1239
STD_ENDPROC(handle_interrupt_downcall)
1242
1118
* Some interrupts don't check for single stepping
1244
1120
.pushsection .text.handle_interrupt_no_single_step,"ax"
1649
1535
shri r20, r25, 32 - ATOMIC_HASH_L1_SHIFT
1650
1536
slt_u r23, r0, r23
1653
* Ensure that the TLB is loaded before we take out the lock.
1654
* On TILEPro, this will start fetching the value all the way
1655
* into our L1 as well (and if it gets modified before we
1656
* grab the lock, it will be invalidated from our cache
1657
* before we reload it). On tile64, we'll start fetching it
1658
* into our L1 if we're the home, and if we're not, we'll
1659
* still at least start fetching it into the home's L2.
1537
lw r26, r0 /* see comment in the "#else" for the "lw r26". */
1664
1540
s2a r21, r20, r21
1674
1550
bbs r23, .Lcmpxchg64
1675
1551
andi r23, r0, 7 /* Precompute alignment for cmpxchg64. */
1680
* We very carefully align the code that actually runs with
1681
* the lock held (nine bundles) so that we know it is all in
1682
* the icache when we start. This instruction (the jump) is
1683
* at the start of the first cache line, address zero mod 64;
1684
* we jump to somewhere in the second cache line to issue the
1685
* tns, then jump back to finish up.
1687
1554
s2a ATOMIC_LOCK_REG_NAME, r25, r21
1555
j .Lcmpxchg32_tns /* see comment in the #else for the jump. */
1691
1558
#else /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
1749
1619
* We very carefully align the code that actually runs with
1750
* the lock held (nine bundles) so that we know it is all in
1620
* the lock held (twelve bundles) so that we know it is all in
1751
1621
* the icache when we start. This instruction (the jump) is
1752
1622
* at the start of the first cache line, address zero mod 64;
1753
* we jump to somewhere in the second cache line to issue the
1754
* tns, then jump back to finish up.
1623
* we jump to the very end of the second cache line to get that
1624
* line loaded in the icache, then fall through to issue the tns
1625
* in the third cache line, at which point it's all cached.
1626
* Note that is for performance, not correctness.
1756
1628
j .Lcmpxchg32_tns
1759
1631
#endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
1761
ENTRY(__sys_cmpxchg_grab_lock)
1633
/* Symbol for do_page_fault_ics() to use to compare against the PC. */
1634
.global __sys_cmpxchg_grab_lock
1635
__sys_cmpxchg_grab_lock:
1764
1638
* Perform the actual cmpxchg or atomic_update.
1765
* Note that __futex_mark_unlocked() in uClibc relies on
1766
* atomic_update() to always perform an "mf", so don't make
1767
* it optional or conditional without modifying that code.
1769
1640
.Ldo_cmpxchg32:
2015
1892
int_hand INT_INTCTRL_0, INTCTRL_0, bad_intr
2016
1893
int_hand INT_MESSAGE_RCV_DWNCL, MESSAGE_RCV_DWNCL, \
2017
hv_message_intr, handle_interrupt_downcall
2018
1895
int_hand INT_DEV_INTR_DWNCL, DEV_INTR_DWNCL, \
2019
tile_dev_intr, handle_interrupt_downcall
2020
1897
int_hand INT_I_ASID, I_ASID, bad_intr
2021
1898
int_hand INT_D_ASID, D_ASID, bad_intr
2022
1899
int_hand INT_DMATLB_MISS_DWNCL, DMATLB_MISS_DWNCL, \
2023
do_page_fault, handle_interrupt_downcall
2024
1901
int_hand INT_SNITLB_MISS_DWNCL, SNITLB_MISS_DWNCL, \
2025
do_page_fault, handle_interrupt_downcall
2026
1903
int_hand INT_DMATLB_ACCESS_DWNCL, DMATLB_ACCESS_DWNCL, \
2027
do_page_fault, handle_interrupt_downcall
2028
1905
int_hand INT_SN_CPL, SN_CPL, bad_intr
2029
1906
int_hand INT_DOUBLE_FAULT, DOUBLE_FAULT, do_trap
2030
1907
#if CHIP_HAS_AUX_PERF_COUNTERS()