1
/* arch/sparc64/kernel/ktlb.S: Kernel mapping TLB miss handling.
3
* Copyright (C) 1995, 1997, 2005, 2008 David S. Miller <davem@davemloft.net>
4
* Copyright (C) 1996 Eddie C. Dost (ecd@brainaid.de)
5
* Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
6
* Copyright (C) 1996,98,99 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
12
#include <asm/pgtable.h>
20
mov TLB_TAG_ACCESS, %g4
21
ldxa [%g4] ASI_IMMU, %g4
23
/* sun4v_itlb_miss branches here with the missing virtual
24
* address already loaded into %g4
29
/* Catch kernel NULL pointer calls. */
30
sethi %hi(PAGE_SIZE), %g5
32
bleu,pn %xcc, kvmap_dtlb_longpath
35
KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_itlb_load)
38
sethi %hi(LOW_OBP_ADDRESS), %g5
40
blu,pn %xcc, kvmap_itlb_vmalloc_addr
44
blu,pn %xcc, kvmap_itlb_obp
47
kvmap_itlb_vmalloc_addr:
48
KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_itlb_longpath)
50
TSB_LOCK_TAG(%g1, %g2, %g7)
52
/* Load and check PTE. */
53
ldxa [%g5] ASI_PHYS_USE_EC, %g5
55
sllx %g7, TSB_TAG_INVALID_BIT, %g7
56
brgez,a,pn %g5, kvmap_itlb_longpath
59
TSB_WRITE(%g1, %g5, %g6)
61
/* fallthrough to TLB load */
65
661: stxa %g5, [%g0] ASI_ITLB_DATA_IN
67
.section .sun4v_2insn_patch, "ax"
73
/* For sun4v the ASI_ITLB_DATA_IN store and the retry
74
* instruction get nop'd out and we get here to branch
75
* to the sun4v tlb load code. The registers are setup
82
* The sun4v TLB load wants the PTE in %g3 so we fix that
85
ba,pt %xcc, sun4v_itlb_load
90
661: rdpr %pstate, %g5
91
wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate
92
.section .sun4v_2insn_patch, "ax"
99
ba,pt %xcc, sparc64_realfault_common
100
mov FAULT_CODE_ITLB, %g4
103
OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_itlb_longpath)
105
TSB_LOCK_TAG(%g1, %g2, %g7)
107
TSB_WRITE(%g1, %g5, %g6)
109
ba,pt %xcc, kvmap_itlb_load
113
OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_dtlb_longpath)
115
TSB_LOCK_TAG(%g1, %g2, %g7)
117
TSB_WRITE(%g1, %g5, %g6)
119
ba,pt %xcc, kvmap_dtlb_load
123
kvmap_dtlb_tsb4m_load:
124
TSB_LOCK_TAG(%g1, %g2, %g7)
125
TSB_WRITE(%g1, %g5, %g6)
126
ba,pt %xcc, kvmap_dtlb_load
130
/* %g6: TAG TARGET */
131
mov TLB_TAG_ACCESS, %g4
132
ldxa [%g4] ASI_DMMU, %g4
134
/* sun4v_dtlb_miss branches here with the missing virtual
135
* address already loaded into %g4
138
brgez,pn %g4, kvmap_dtlb_nonlinear
141
#ifdef CONFIG_DEBUG_PAGEALLOC
142
/* Index through the base page size TSB even for linear
143
* mappings when using page allocation debugging.
145
KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load)
147
/* Correct TAG_TARGET is already in %g6, check 4mb TSB. */
148
KERN_TSB4M_LOOKUP_TL1(%g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load)
150
/* TSB entry address left in %g1, lookup linear PTE.
151
* Must preserve %g1 and %g6 (TAG).
153
kvmap_dtlb_tsb4m_miss:
154
/* Clear the PAGE_OFFSET top virtual bits, shift
155
* down to get PFN, and make sure PFN is in range.
159
/* Check to see if we know about valid memory at the 4MB
160
* chunk this physical address will reside within.
162
srlx %g5, 21 + 41, %g2
163
brnz,pn %g2, kvmap_dtlb_longpath
166
/* This unconditional branch and delay-slot nop gets patched
167
* by the sethi sequence once the bitmap is properly setup.
169
.globl valid_addr_bitmap_insn
170
valid_addr_bitmap_insn:
174
.globl valid_addr_bitmap_patch
175
valid_addr_bitmap_patch:
176
sethi %hi(sparc64_valid_addr_bitmap), %g7
177
or %g7, %lo(sparc64_valid_addr_bitmap), %g7
180
srlx %g5, 21 + 22, %g2
188
be,pn %xcc, kvmap_dtlb_longpath
190
2: sethi %hi(kpte_linear_bitmap), %g2
191
or %g2, %lo(kpte_linear_bitmap), %g2
193
/* Get the 256MB physical address index. */
196
srlx %g5, 21 + 28, %g5
198
/* Don't try this at home kids... this depends upon srlx
199
* only taking the low 6 bits of the shift count in %g5.
203
/* Divide by 64 to get the offset into the bitmask. */
207
/* kern_linear_pte_xor[((mask & bit) ? 1 : 0)] */
210
sethi %hi(kern_linear_pte_xor), %g5
211
or %g5, %lo(kern_linear_pte_xor), %g5
217
.globl kvmap_linear_patch
219
ba,pt %xcc, kvmap_dtlb_tsb4m_load
222
kvmap_dtlb_vmalloc_addr:
223
KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_dtlb_longpath)
225
TSB_LOCK_TAG(%g1, %g2, %g7)
227
/* Load and check PTE. */
228
ldxa [%g5] ASI_PHYS_USE_EC, %g5
230
sllx %g7, TSB_TAG_INVALID_BIT, %g7
231
brgez,a,pn %g5, kvmap_dtlb_longpath
234
TSB_WRITE(%g1, %g5, %g6)
236
/* fallthrough to TLB load */
240
661: stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB
242
.section .sun4v_2insn_patch, "ax"
248
/* For sun4v the ASI_DTLB_DATA_IN store and the retry
249
* instruction get nop'd out and we get here to branch
250
* to the sun4v tlb load code. The registers are setup
257
* The sun4v TLB load wants the PTE in %g3 so we fix that
260
ba,pt %xcc, sun4v_dtlb_load
263
#ifdef CONFIG_SPARSEMEM_VMEMMAP
267
sethi %hi(vmemmap_table), %g1
269
or %g1, %lo(vmemmap_table), %g1
270
ba,pt %xcc, kvmap_dtlb_load
274
kvmap_dtlb_nonlinear:
275
/* Catch kernel NULL pointer derefs. */
276
sethi %hi(PAGE_SIZE), %g5
278
bleu,pn %xcc, kvmap_dtlb_longpath
281
#ifdef CONFIG_SPARSEMEM_VMEMMAP
282
/* Do not use the TSB for vmemmap. */
283
mov (VMEMMAP_BASE >> 40), %g5
286
bgeu,pn %xcc, kvmap_vmemmap
290
KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load)
293
sethi %hi(MODULES_VADDR), %g5
295
blu,pn %xcc, kvmap_dtlb_longpath
296
mov (VMALLOC_END >> 40), %g5
299
bgeu,pn %xcc, kvmap_dtlb_longpath
303
sethi %hi(LOW_OBP_ADDRESS), %g5
305
blu,pn %xcc, kvmap_dtlb_vmalloc_addr
309
blu,pn %xcc, kvmap_dtlb_obp
311
ba,pt %xcc, kvmap_dtlb_vmalloc_addr
316
661: rdpr %pstate, %g5
317
wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate
318
.section .sun4v_2insn_patch, "ax"
321
ldxa [%g0] ASI_SCRATCHPAD, %g5
327
661: mov TLB_TAG_ACCESS, %g4
328
ldxa [%g4] ASI_DMMU, %g5
329
.section .sun4v_2insn_patch, "ax"
331
ldx [%g5 + HV_FAULT_D_ADDR_OFFSET], %g5
335
be,pt %xcc, sparc64_realfault_common
336
mov FAULT_CODE_DTLB, %g4
337
ba,pt %xcc, winfix_trampoline