Loading...
1/* arch/sparc64/kernel/ktlb.S: Kernel mapping TLB miss handling.
2 *
3 * Copyright (C) 1995, 1997, 2005, 2008 David S. Miller <davem@davemloft.net>
4 * Copyright (C) 1996 Eddie C. Dost (ecd@brainaid.de)
5 * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
6 * Copyright (C) 1996,98,99 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
7 */
8
9#include <asm/head.h>
10#include <asm/asi.h>
11#include <asm/page.h>
12#include <asm/pgtable.h>
13#include <asm/tsb.h>
14
15 .text
16 .align 32
17
18kvmap_itlb:
19 /* g6: TAG TARGET */
20 mov TLB_TAG_ACCESS, %g4
21 ldxa [%g4] ASI_IMMU, %g4
22
23 /* sun4v_itlb_miss branches here with the missing virtual
24 * address already loaded into %g4
25 */
26kvmap_itlb_4v:
27
28kvmap_itlb_nonlinear:
29 /* Catch kernel NULL pointer calls. */
30 sethi %hi(PAGE_SIZE), %g5
31 cmp %g4, %g5
32 bleu,pn %xcc, kvmap_dtlb_longpath
33 nop
34
35 KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_itlb_load)
36
37kvmap_itlb_tsb_miss:
38 sethi %hi(LOW_OBP_ADDRESS), %g5
39 cmp %g4, %g5
40 blu,pn %xcc, kvmap_itlb_vmalloc_addr
41 mov 0x1, %g5
42 sllx %g5, 32, %g5
43 cmp %g4, %g5
44 blu,pn %xcc, kvmap_itlb_obp
45 nop
46
47kvmap_itlb_vmalloc_addr:
48 KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_itlb_longpath)
49
50 TSB_LOCK_TAG(%g1, %g2, %g7)
51
52 /* Load and check PTE. */
53 ldxa [%g5] ASI_PHYS_USE_EC, %g5
54 mov 1, %g7
55 sllx %g7, TSB_TAG_INVALID_BIT, %g7
56 brgez,a,pn %g5, kvmap_itlb_longpath
57 TSB_STORE(%g1, %g7)
58
59 TSB_WRITE(%g1, %g5, %g6)
60
61 /* fallthrough to TLB load */
62
63kvmap_itlb_load:
64
65661: stxa %g5, [%g0] ASI_ITLB_DATA_IN
66 retry
67 .section .sun4v_2insn_patch, "ax"
68 .word 661b
69 nop
70 nop
71 .previous
72
73 /* For sun4v the ASI_ITLB_DATA_IN store and the retry
74 * instruction get nop'd out and we get here to branch
75 * to the sun4v tlb load code. The registers are setup
76 * as follows:
77 *
78 * %g4: vaddr
79 * %g5: PTE
80 * %g6: TAG
81 *
82 * The sun4v TLB load wants the PTE in %g3 so we fix that
83 * up here.
84 */
85 ba,pt %xcc, sun4v_itlb_load
86 mov %g5, %g3
87
88kvmap_itlb_longpath:
89
90661: rdpr %pstate, %g5
91 wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate
92 .section .sun4v_2insn_patch, "ax"
93 .word 661b
94 SET_GL(1)
95 nop
96 .previous
97
98 rdpr %tpc, %g5
99 ba,pt %xcc, sparc64_realfault_common
100 mov FAULT_CODE_ITLB, %g4
101
102kvmap_itlb_obp:
103 OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_itlb_longpath)
104
105 TSB_LOCK_TAG(%g1, %g2, %g7)
106
107 TSB_WRITE(%g1, %g5, %g6)
108
109 ba,pt %xcc, kvmap_itlb_load
110 nop
111
112kvmap_dtlb_obp:
113 OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_dtlb_longpath)
114
115 TSB_LOCK_TAG(%g1, %g2, %g7)
116
117 TSB_WRITE(%g1, %g5, %g6)
118
119 ba,pt %xcc, kvmap_dtlb_load
120 nop
121
122 .align 32
123kvmap_dtlb_tsb4m_load:
124 TSB_LOCK_TAG(%g1, %g2, %g7)
125 TSB_WRITE(%g1, %g5, %g6)
126 ba,pt %xcc, kvmap_dtlb_load
127 nop
128
129kvmap_dtlb:
130 /* %g6: TAG TARGET */
131 mov TLB_TAG_ACCESS, %g4
132 ldxa [%g4] ASI_DMMU, %g4
133
134 /* sun4v_dtlb_miss branches here with the missing virtual
135 * address already loaded into %g4
136 */
137kvmap_dtlb_4v:
138 brgez,pn %g4, kvmap_dtlb_nonlinear
139 nop
140
141#ifdef CONFIG_DEBUG_PAGEALLOC
142 /* Index through the base page size TSB even for linear
143 * mappings when using page allocation debugging.
144 */
145 KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load)
146#else
147 /* Correct TAG_TARGET is already in %g6, check 4mb TSB. */
148 KERN_TSB4M_LOOKUP_TL1(%g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load)
149#endif
150 /* TSB entry address left in %g1, lookup linear PTE.
151 * Must preserve %g1 and %g6 (TAG).
152 */
153kvmap_dtlb_tsb4m_miss:
154 /* Clear the PAGE_OFFSET top virtual bits, shift
155 * down to get PFN, and make sure PFN is in range.
156 */
157 sllx %g4, 21, %g5
158
159 /* Check to see if we know about valid memory at the 4MB
160 * chunk this physical address will reside within.
161 */
162 srlx %g5, 21 + 41, %g2
163 brnz,pn %g2, kvmap_dtlb_longpath
164 nop
165
166 /* This unconditional branch and delay-slot nop gets patched
167 * by the sethi sequence once the bitmap is properly setup.
168 */
169 .globl valid_addr_bitmap_insn
170valid_addr_bitmap_insn:
171 ba,pt %xcc, 2f
172 nop
173 .subsection 2
174 .globl valid_addr_bitmap_patch
175valid_addr_bitmap_patch:
176 sethi %hi(sparc64_valid_addr_bitmap), %g7
177 or %g7, %lo(sparc64_valid_addr_bitmap), %g7
178 .previous
179
180 srlx %g5, 21 + 22, %g2
181 srlx %g2, 6, %g5
182 and %g2, 63, %g2
183 sllx %g5, 3, %g5
184 ldx [%g7 + %g5], %g5
185 mov 1, %g7
186 sllx %g7, %g2, %g7
187 andcc %g5, %g7, %g0
188 be,pn %xcc, kvmap_dtlb_longpath
189
1902: sethi %hi(kpte_linear_bitmap), %g2
191 or %g2, %lo(kpte_linear_bitmap), %g2
192
193 /* Get the 256MB physical address index. */
194 sllx %g4, 21, %g5
195 mov 1, %g7
196 srlx %g5, 21 + 28, %g5
197
198 /* Don't try this at home kids... this depends upon srlx
199 * only taking the low 6 bits of the shift count in %g5.
200 */
201 sllx %g7, %g5, %g7
202
203 /* Divide by 64 to get the offset into the bitmask. */
204 srlx %g5, 6, %g5
205 sllx %g5, 3, %g5
206
207 /* kern_linear_pte_xor[((mask & bit) ? 1 : 0)] */
208 ldx [%g2 + %g5], %g2
209 andcc %g2, %g7, %g0
210 sethi %hi(kern_linear_pte_xor), %g5
211 or %g5, %lo(kern_linear_pte_xor), %g5
212 bne,a,pt %xcc, 1f
213 add %g5, 8, %g5
214
2151: ldx [%g5], %g2
216
217 .globl kvmap_linear_patch
218kvmap_linear_patch:
219 ba,pt %xcc, kvmap_dtlb_tsb4m_load
220 xor %g2, %g4, %g5
221
222kvmap_dtlb_vmalloc_addr:
223 KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_dtlb_longpath)
224
225 TSB_LOCK_TAG(%g1, %g2, %g7)
226
227 /* Load and check PTE. */
228 ldxa [%g5] ASI_PHYS_USE_EC, %g5
229 mov 1, %g7
230 sllx %g7, TSB_TAG_INVALID_BIT, %g7
231 brgez,a,pn %g5, kvmap_dtlb_longpath
232 TSB_STORE(%g1, %g7)
233
234 TSB_WRITE(%g1, %g5, %g6)
235
236 /* fallthrough to TLB load */
237
238kvmap_dtlb_load:
239
240661: stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB
241 retry
242 .section .sun4v_2insn_patch, "ax"
243 .word 661b
244 nop
245 nop
246 .previous
247
248 /* For sun4v the ASI_DTLB_DATA_IN store and the retry
249 * instruction get nop'd out and we get here to branch
250 * to the sun4v tlb load code. The registers are setup
251 * as follows:
252 *
253 * %g4: vaddr
254 * %g5: PTE
255 * %g6: TAG
256 *
257 * The sun4v TLB load wants the PTE in %g3 so we fix that
258 * up here.
259 */
260 ba,pt %xcc, sun4v_dtlb_load
261 mov %g5, %g3
262
263#ifdef CONFIG_SPARSEMEM_VMEMMAP
264kvmap_vmemmap:
265 sub %g4, %g5, %g5
266 srlx %g5, 22, %g5
267 sethi %hi(vmemmap_table), %g1
268 sllx %g5, 3, %g5
269 or %g1, %lo(vmemmap_table), %g1
270 ba,pt %xcc, kvmap_dtlb_load
271 ldx [%g1 + %g5], %g5
272#endif
273
274kvmap_dtlb_nonlinear:
275 /* Catch kernel NULL pointer derefs. */
276 sethi %hi(PAGE_SIZE), %g5
277 cmp %g4, %g5
278 bleu,pn %xcc, kvmap_dtlb_longpath
279 nop
280
281#ifdef CONFIG_SPARSEMEM_VMEMMAP
282 /* Do not use the TSB for vmemmap. */
283 mov (VMEMMAP_BASE >> 40), %g5
284 sllx %g5, 40, %g5
285 cmp %g4,%g5
286 bgeu,pn %xcc, kvmap_vmemmap
287 nop
288#endif
289
290 KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load)
291
292kvmap_dtlb_tsbmiss:
293 sethi %hi(MODULES_VADDR), %g5
294 cmp %g4, %g5
295 blu,pn %xcc, kvmap_dtlb_longpath
296 mov (VMALLOC_END >> 40), %g5
297 sllx %g5, 40, %g5
298 cmp %g4, %g5
299 bgeu,pn %xcc, kvmap_dtlb_longpath
300 nop
301
302kvmap_check_obp:
303 sethi %hi(LOW_OBP_ADDRESS), %g5
304 cmp %g4, %g5
305 blu,pn %xcc, kvmap_dtlb_vmalloc_addr
306 mov 0x1, %g5
307 sllx %g5, 32, %g5
308 cmp %g4, %g5
309 blu,pn %xcc, kvmap_dtlb_obp
310 nop
311 ba,pt %xcc, kvmap_dtlb_vmalloc_addr
312 nop
313
314kvmap_dtlb_longpath:
315
316661: rdpr %pstate, %g5
317 wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate
318 .section .sun4v_2insn_patch, "ax"
319 .word 661b
320 SET_GL(1)
321 ldxa [%g0] ASI_SCRATCHPAD, %g5
322 .previous
323
324 rdpr %tl, %g3
325 cmp %g3, 1
326
327661: mov TLB_TAG_ACCESS, %g4
328 ldxa [%g4] ASI_DMMU, %g5
329 .section .sun4v_2insn_patch, "ax"
330 .word 661b
331 ldx [%g5 + HV_FAULT_D_ADDR_OFFSET], %g5
332 nop
333 .previous
334
335 be,pt %xcc, sparc64_realfault_common
336 mov FAULT_CODE_DTLB, %g4
337 ba,pt %xcc, winfix_trampoline
338 nop
1/* arch/sparc64/kernel/ktlb.S: Kernel mapping TLB miss handling.
2 *
3 * Copyright (C) 1995, 1997, 2005, 2008 David S. Miller <davem@davemloft.net>
4 * Copyright (C) 1996 Eddie C. Dost (ecd@brainaid.de)
5 * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
6 * Copyright (C) 1996,98,99 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
7 */
8
9#include <asm/head.h>
10#include <asm/asi.h>
11#include <asm/page.h>
12#include <asm/pgtable.h>
13#include <asm/tsb.h>
14
15 .text
16 .align 32
17
18kvmap_itlb:
19 /* g6: TAG TARGET */
20 mov TLB_TAG_ACCESS, %g4
21 ldxa [%g4] ASI_IMMU, %g4
22
23 /* The kernel executes in context zero, therefore we do not
24 * need to clear the context ID bits out of %g4 here.
25 */
26
27 /* sun4v_itlb_miss branches here with the missing virtual
28 * address already loaded into %g4
29 */
30kvmap_itlb_4v:
31
32 /* Catch kernel NULL pointer calls. */
33 sethi %hi(PAGE_SIZE), %g5
34 cmp %g4, %g5
35 blu,pn %xcc, kvmap_itlb_longpath
36 nop
37
38 KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_itlb_load)
39
40kvmap_itlb_tsb_miss:
41 sethi %hi(LOW_OBP_ADDRESS), %g5
42 cmp %g4, %g5
43 blu,pn %xcc, kvmap_itlb_vmalloc_addr
44 mov 0x1, %g5
45 sllx %g5, 32, %g5
46 cmp %g4, %g5
47 blu,pn %xcc, kvmap_itlb_obp
48 nop
49
50kvmap_itlb_vmalloc_addr:
51 KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_itlb_longpath)
52
53 TSB_LOCK_TAG(%g1, %g2, %g7)
54 TSB_WRITE(%g1, %g5, %g6)
55
56 /* fallthrough to TLB load */
57
58kvmap_itlb_load:
59
60661: stxa %g5, [%g0] ASI_ITLB_DATA_IN
61 retry
62 .section .sun4v_2insn_patch, "ax"
63 .word 661b
64 nop
65 nop
66 .previous
67
68 /* For sun4v the ASI_ITLB_DATA_IN store and the retry
69 * instruction get nop'd out and we get here to branch
70 * to the sun4v tlb load code. The registers are setup
71 * as follows:
72 *
73 * %g4: vaddr
74 * %g5: PTE
75 * %g6: TAG
76 *
77 * The sun4v TLB load wants the PTE in %g3 so we fix that
78 * up here.
79 */
80 ba,pt %xcc, sun4v_itlb_load
81 mov %g5, %g3
82
83kvmap_itlb_longpath:
84
85661: rdpr %pstate, %g5
86 wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate
87 .section .sun4v_2insn_patch, "ax"
88 .word 661b
89 SET_GL(1)
90 nop
91 .previous
92
93 rdpr %tpc, %g5
94 ba,pt %xcc, sparc64_realfault_common
95 mov FAULT_CODE_ITLB, %g4
96
97kvmap_itlb_obp:
98 OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_itlb_longpath)
99
100 TSB_LOCK_TAG(%g1, %g2, %g7)
101
102 TSB_WRITE(%g1, %g5, %g6)
103
104 ba,pt %xcc, kvmap_itlb_load
105 nop
106
107kvmap_dtlb_obp:
108 OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_dtlb_longpath)
109
110 TSB_LOCK_TAG(%g1, %g2, %g7)
111
112 TSB_WRITE(%g1, %g5, %g6)
113
114 ba,pt %xcc, kvmap_dtlb_load
115 nop
116
117kvmap_linear_early:
118 sethi %hi(kern_linear_pte_xor), %g7
119 ldx [%g7 + %lo(kern_linear_pte_xor)], %g2
120 ba,pt %xcc, kvmap_dtlb_tsb4m_load
121 xor %g2, %g4, %g5
122
123 .align 32
124kvmap_dtlb_tsb4m_load:
125 TSB_LOCK_TAG(%g1, %g2, %g7)
126 TSB_WRITE(%g1, %g5, %g6)
127 ba,pt %xcc, kvmap_dtlb_load
128 nop
129
130kvmap_dtlb:
131 /* %g6: TAG TARGET */
132 mov TLB_TAG_ACCESS, %g4
133 ldxa [%g4] ASI_DMMU, %g4
134
135 /* The kernel executes in context zero, therefore we do not
136 * need to clear the context ID bits out of %g4 here.
137 */
138
139 /* sun4v_dtlb_miss branches here with the missing virtual
140 * address already loaded into %g4
141 */
142kvmap_dtlb_4v:
143 brgez,pn %g4, kvmap_dtlb_nonlinear
144 nop
145
146#ifdef CONFIG_DEBUG_PAGEALLOC
147 /* Index through the base page size TSB even for linear
148 * mappings when using page allocation debugging.
149 */
150 KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load)
151#else
152 /* Correct TAG_TARGET is already in %g6, check 4mb TSB. */
153 KERN_TSB4M_LOOKUP_TL1(%g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load)
154#endif
155 /* Linear mapping TSB lookup failed. Fallthrough to kernel
156 * page table based lookup.
157 */
158 .globl kvmap_linear_patch
159kvmap_linear_patch:
160 ba,a,pt %xcc, kvmap_linear_early
161
162kvmap_dtlb_vmalloc_addr:
163 KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_dtlb_longpath)
164
165 TSB_LOCK_TAG(%g1, %g2, %g7)
166 TSB_WRITE(%g1, %g5, %g6)
167
168 /* fallthrough to TLB load */
169
170kvmap_dtlb_load:
171
172661: stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB
173 retry
174 .section .sun4v_2insn_patch, "ax"
175 .word 661b
176 nop
177 nop
178 .previous
179
180 /* For sun4v the ASI_DTLB_DATA_IN store and the retry
181 * instruction get nop'd out and we get here to branch
182 * to the sun4v tlb load code. The registers are setup
183 * as follows:
184 *
185 * %g4: vaddr
186 * %g5: PTE
187 * %g6: TAG
188 *
189 * The sun4v TLB load wants the PTE in %g3 so we fix that
190 * up here.
191 */
192 ba,pt %xcc, sun4v_dtlb_load
193 mov %g5, %g3
194
195#ifdef CONFIG_SPARSEMEM_VMEMMAP
196kvmap_vmemmap:
197 KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_dtlb_longpath)
198 ba,a,pt %xcc, kvmap_dtlb_load
199#endif
200
201kvmap_dtlb_nonlinear:
202 /* Catch kernel NULL pointer derefs. */
203 sethi %hi(PAGE_SIZE), %g5
204 cmp %g4, %g5
205 bleu,pn %xcc, kvmap_dtlb_longpath
206 nop
207
208#ifdef CONFIG_SPARSEMEM_VMEMMAP
209 /* Do not use the TSB for vmemmap. */
210 sethi %hi(VMEMMAP_BASE), %g5
211 ldx [%g5 + %lo(VMEMMAP_BASE)], %g5
212 cmp %g4,%g5
213 bgeu,pn %xcc, kvmap_vmemmap
214 nop
215#endif
216
217 KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load)
218
219kvmap_dtlb_tsbmiss:
220 sethi %hi(MODULES_VADDR), %g5
221 cmp %g4, %g5
222 blu,pn %xcc, kvmap_dtlb_longpath
223 sethi %hi(VMALLOC_END), %g5
224 ldx [%g5 + %lo(VMALLOC_END)], %g5
225 cmp %g4, %g5
226 bgeu,pn %xcc, kvmap_dtlb_longpath
227 nop
228
229kvmap_check_obp:
230 sethi %hi(LOW_OBP_ADDRESS), %g5
231 cmp %g4, %g5
232 blu,pn %xcc, kvmap_dtlb_vmalloc_addr
233 mov 0x1, %g5
234 sllx %g5, 32, %g5
235 cmp %g4, %g5
236 blu,pn %xcc, kvmap_dtlb_obp
237 nop
238 ba,pt %xcc, kvmap_dtlb_vmalloc_addr
239 nop
240
241kvmap_dtlb_longpath:
242
243661: rdpr %pstate, %g5
244 wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate
245 .section .sun4v_2insn_patch, "ax"
246 .word 661b
247 SET_GL(1)
248 ldxa [%g0] ASI_SCRATCHPAD, %g5
249 .previous
250
251 rdpr %tl, %g3
252 cmp %g3, 1
253
254661: mov TLB_TAG_ACCESS, %g4
255 ldxa [%g4] ASI_DMMU, %g5
256 .section .sun4v_2insn_patch, "ax"
257 .word 661b
258 ldx [%g5 + HV_FAULT_D_ADDR_OFFSET], %g5
259 nop
260 .previous
261
262 /* The kernel executes in context zero, therefore we do not
263 * need to clear the context ID bits out of %g5 here.
264 */
265
266 be,pt %xcc, sparc64_realfault_common
267 mov FAULT_CODE_DTLB, %g4
268 ba,pt %xcc, winfix_trampoline
269 nop