Linux Audio

Check our new training course

Linux kernel drivers training

Mar 31-Apr 9, 2025, special US time zones
Register
Loading...
v3.5.6
  1/* arch/sparc64/kernel/ktlb.S: Kernel mapping TLB miss handling.
  2 *
  3 * Copyright (C) 1995, 1997, 2005, 2008 David S. Miller <davem@davemloft.net>
  4 * Copyright (C) 1996 Eddie C. Dost        (ecd@brainaid.de)
  5 * Copyright (C) 1996 Miguel de Icaza      (miguel@nuclecu.unam.mx)
  6 * Copyright (C) 1996,98,99 Jakub Jelinek  (jj@sunsite.mff.cuni.cz)
  7 */
  8
  9#include <asm/head.h>
 10#include <asm/asi.h>
 11#include <asm/page.h>
 12#include <asm/pgtable.h>
 13#include <asm/tsb.h>
 14
 15	.text
 16	.align		32
 17
 18kvmap_itlb:
 19	/* g6: TAG TARGET */
 20	mov		TLB_TAG_ACCESS, %g4
 21	ldxa		[%g4] ASI_IMMU, %g4
 22
 23	/* sun4v_itlb_miss branches here with the missing virtual
 24	 * address already loaded into %g4
 25	 */
 26kvmap_itlb_4v:
 27
 28kvmap_itlb_nonlinear:
 29	/* Catch kernel NULL pointer calls.  */
 30	sethi		%hi(PAGE_SIZE), %g5
 31	cmp		%g4, %g5
 32	bleu,pn		%xcc, kvmap_dtlb_longpath
 33	 nop
 34
 35	KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_itlb_load)
 36
 37kvmap_itlb_tsb_miss:
 38	sethi		%hi(LOW_OBP_ADDRESS), %g5
 39	cmp		%g4, %g5
 40	blu,pn		%xcc, kvmap_itlb_vmalloc_addr
 41	 mov		0x1, %g5
 42	sllx		%g5, 32, %g5
 43	cmp		%g4, %g5
 44	blu,pn		%xcc, kvmap_itlb_obp
 45	 nop
 46
 47kvmap_itlb_vmalloc_addr:
 48	KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_itlb_longpath)
 49
 50	TSB_LOCK_TAG(%g1, %g2, %g7)
 51
 52	/* Load and check PTE.  */
 53	ldxa		[%g5] ASI_PHYS_USE_EC, %g5
 54	mov		1, %g7
 55	sllx		%g7, TSB_TAG_INVALID_BIT, %g7
 56	brgez,a,pn	%g5, kvmap_itlb_longpath
 57	 TSB_STORE(%g1, %g7)
 58
 59	TSB_WRITE(%g1, %g5, %g6)
 60
 61	/* fallthrough to TLB load */
 62
 63kvmap_itlb_load:
 64
 65661:	stxa		%g5, [%g0] ASI_ITLB_DATA_IN
 66	retry
 67	.section	.sun4v_2insn_patch, "ax"
 68	.word		661b
 69	nop
 70	nop
 71	.previous
 72
 73	/* For sun4v the ASI_ITLB_DATA_IN store and the retry
 74	 * instruction get nop'd out and we get here to branch
 75	 * to the sun4v tlb load code.  The registers are setup
 76	 * as follows:
 77	 *
 78	 * %g4: vaddr
 79	 * %g5: PTE
 80	 * %g6:	TAG
 81	 *
 82	 * The sun4v TLB load wants the PTE in %g3 so we fix that
 83	 * up here.
 84	 */
 85	ba,pt		%xcc, sun4v_itlb_load
 86	 mov		%g5, %g3
 87
 88kvmap_itlb_longpath:
 89
 90661:	rdpr	%pstate, %g5
 91	wrpr	%g5, PSTATE_AG | PSTATE_MG, %pstate
 92	.section .sun4v_2insn_patch, "ax"
 93	.word	661b
 94	SET_GL(1)
 95	nop
 96	.previous
 97
 98	rdpr	%tpc, %g5
 99	ba,pt	%xcc, sparc64_realfault_common
100	 mov	FAULT_CODE_ITLB, %g4
101
102kvmap_itlb_obp:
103	OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_itlb_longpath)
104
105	TSB_LOCK_TAG(%g1, %g2, %g7)
106
107	TSB_WRITE(%g1, %g5, %g6)
108
109	ba,pt		%xcc, kvmap_itlb_load
110	 nop
111
112kvmap_dtlb_obp:
113	OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_dtlb_longpath)
114
115	TSB_LOCK_TAG(%g1, %g2, %g7)
116
117	TSB_WRITE(%g1, %g5, %g6)
118
119	ba,pt		%xcc, kvmap_dtlb_load
120	 nop
121
122	.align		32
123kvmap_dtlb_tsb4m_load:
124	TSB_LOCK_TAG(%g1, %g2, %g7)
125	TSB_WRITE(%g1, %g5, %g6)
126	ba,pt		%xcc, kvmap_dtlb_load
127	 nop
128
129kvmap_dtlb:
130	/* %g6: TAG TARGET */
131	mov		TLB_TAG_ACCESS, %g4
132	ldxa		[%g4] ASI_DMMU, %g4
133
134	/* sun4v_dtlb_miss branches here with the missing virtual
135	 * address already loaded into %g4
136	 */
137kvmap_dtlb_4v:
138	brgez,pn	%g4, kvmap_dtlb_nonlinear
139	 nop
140
141#ifdef CONFIG_DEBUG_PAGEALLOC
142	/* Index through the base page size TSB even for linear
143	 * mappings when using page allocation debugging.
144	 */
145	KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load)
146#else
147	/* Correct TAG_TARGET is already in %g6, check 4mb TSB.  */
148	KERN_TSB4M_LOOKUP_TL1(%g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load)
149#endif
150	/* TSB entry address left in %g1, lookup linear PTE.
151	 * Must preserve %g1 and %g6 (TAG).
152	 */
153kvmap_dtlb_tsb4m_miss:
154	/* Clear the PAGE_OFFSET top virtual bits, shift
155	 * down to get PFN, and make sure PFN is in range.
156	 */
157	sllx		%g4, 21, %g5
 
 
 
158
159	/* Check to see if we know about valid memory at the 4MB
160	 * chunk this physical address will reside within.
161	 */
162	srlx		%g5, 21 + 41, %g2
 
 
 
 
163	brnz,pn		%g2, kvmap_dtlb_longpath
164	 nop
165
166	/* This unconditional branch and delay-slot nop gets patched
167	 * by the sethi sequence once the bitmap is properly setup.
168	 */
169	.globl		valid_addr_bitmap_insn
170valid_addr_bitmap_insn:
171	ba,pt		%xcc, 2f
172	 nop
173	.subsection	2
174	.globl		valid_addr_bitmap_patch
175valid_addr_bitmap_patch:
176	sethi		%hi(sparc64_valid_addr_bitmap), %g7
177	or		%g7, %lo(sparc64_valid_addr_bitmap), %g7
178	.previous
179
180	srlx		%g5, 21 + 22, %g2
 
 
 
 
181	srlx		%g2, 6, %g5
182	and		%g2, 63, %g2
183	sllx		%g5, 3, %g5
184	ldx		[%g7 + %g5], %g5
185	mov		1, %g7
186	sllx		%g7, %g2, %g7
187	andcc		%g5, %g7, %g0
188	be,pn		%xcc, kvmap_dtlb_longpath
189
1902:	 sethi		%hi(kpte_linear_bitmap), %g2
191	or		%g2, %lo(kpte_linear_bitmap), %g2
192
193	/* Get the 256MB physical address index. */
194	sllx		%g4, 21, %g5
195	mov		1, %g7
196	srlx		%g5, 21 + 28, %g5
 
197
198	/* Don't try this at home kids... this depends upon srlx
199	 * only taking the low 6 bits of the shift count in %g5.
200	 */
201	sllx		%g7, %g5, %g7
202
203	/* Divide by 64 to get the offset into the bitmask.  */
204	srlx		%g5, 6, %g5
 
 
 
 
 
 
 
 
205	sllx		%g5, 3, %g5
206
207	/* kern_linear_pte_xor[((mask & bit) ? 1 : 0)] */
208	ldx		[%g2 + %g5], %g2
209	andcc		%g2, %g7, %g0
210	sethi		%hi(kern_linear_pte_xor), %g5
 
211	or		%g5, %lo(kern_linear_pte_xor), %g5
212	bne,a,pt	%xcc, 1f
213	 add		%g5, 8, %g5
214
2151:	ldx		[%g5], %g2
216
217	.globl		kvmap_linear_patch
218kvmap_linear_patch:
219	ba,pt		%xcc, kvmap_dtlb_tsb4m_load
220	 xor		%g2, %g4, %g5
221
222kvmap_dtlb_vmalloc_addr:
223	KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_dtlb_longpath)
224
225	TSB_LOCK_TAG(%g1, %g2, %g7)
226
227	/* Load and check PTE.  */
228	ldxa		[%g5] ASI_PHYS_USE_EC, %g5
229	mov		1, %g7
230	sllx		%g7, TSB_TAG_INVALID_BIT, %g7
231	brgez,a,pn	%g5, kvmap_dtlb_longpath
232	 TSB_STORE(%g1, %g7)
233
234	TSB_WRITE(%g1, %g5, %g6)
235
236	/* fallthrough to TLB load */
237
238kvmap_dtlb_load:
239
240661:	stxa		%g5, [%g0] ASI_DTLB_DATA_IN	! Reload TLB
241	retry
242	.section	.sun4v_2insn_patch, "ax"
243	.word		661b
244	nop
245	nop
246	.previous
247
248	/* For sun4v the ASI_DTLB_DATA_IN store and the retry
249	 * instruction get nop'd out and we get here to branch
250	 * to the sun4v tlb load code.  The registers are setup
251	 * as follows:
252	 *
253	 * %g4: vaddr
254	 * %g5: PTE
255	 * %g6:	TAG
256	 *
257	 * The sun4v TLB load wants the PTE in %g3 so we fix that
258	 * up here.
259	 */
260	ba,pt		%xcc, sun4v_dtlb_load
261	 mov		%g5, %g3
262
263#ifdef CONFIG_SPARSEMEM_VMEMMAP
264kvmap_vmemmap:
265	sub		%g4, %g5, %g5
266	srlx		%g5, 22, %g5
267	sethi		%hi(vmemmap_table), %g1
268	sllx		%g5, 3, %g5
269	or		%g1, %lo(vmemmap_table), %g1
270	ba,pt		%xcc, kvmap_dtlb_load
271	 ldx		[%g1 + %g5], %g5
272#endif
273
274kvmap_dtlb_nonlinear:
275	/* Catch kernel NULL pointer derefs.  */
276	sethi		%hi(PAGE_SIZE), %g5
277	cmp		%g4, %g5
278	bleu,pn		%xcc, kvmap_dtlb_longpath
279	 nop
280
281#ifdef CONFIG_SPARSEMEM_VMEMMAP
282	/* Do not use the TSB for vmemmap.  */
283	mov		(VMEMMAP_BASE >> 40), %g5
284	sllx		%g5, 40, %g5
285	cmp		%g4,%g5
286	bgeu,pn		%xcc, kvmap_vmemmap
287	 nop
288#endif
289
290	KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load)
291
292kvmap_dtlb_tsbmiss:
293	sethi		%hi(MODULES_VADDR), %g5
294	cmp		%g4, %g5
295	blu,pn		%xcc, kvmap_dtlb_longpath
296	 mov		(VMALLOC_END >> 40), %g5
297	sllx		%g5, 40, %g5
298	cmp		%g4, %g5
299	bgeu,pn		%xcc, kvmap_dtlb_longpath
300	 nop
301
302kvmap_check_obp:
303	sethi		%hi(LOW_OBP_ADDRESS), %g5
304	cmp		%g4, %g5
305	blu,pn		%xcc, kvmap_dtlb_vmalloc_addr
306	 mov		0x1, %g5
307	sllx		%g5, 32, %g5
308	cmp		%g4, %g5
309	blu,pn		%xcc, kvmap_dtlb_obp
310	 nop
311	ba,pt		%xcc, kvmap_dtlb_vmalloc_addr
312	 nop
313
314kvmap_dtlb_longpath:
315
316661:	rdpr	%pstate, %g5
317	wrpr	%g5, PSTATE_AG | PSTATE_MG, %pstate
318	.section .sun4v_2insn_patch, "ax"
319	.word	661b
320	SET_GL(1)
321	ldxa		[%g0] ASI_SCRATCHPAD, %g5
322	.previous
323
324	rdpr	%tl, %g3
325	cmp	%g3, 1
326
327661:	mov	TLB_TAG_ACCESS, %g4
328	ldxa	[%g4] ASI_DMMU, %g5
329	.section .sun4v_2insn_patch, "ax"
330	.word	661b
331	ldx	[%g5 + HV_FAULT_D_ADDR_OFFSET], %g5
332	nop
333	.previous
334
335	be,pt	%xcc, sparc64_realfault_common
336	 mov	FAULT_CODE_DTLB, %g4
337	ba,pt	%xcc, winfix_trampoline
338	 nop
v3.15
  1/* arch/sparc64/kernel/ktlb.S: Kernel mapping TLB miss handling.
  2 *
  3 * Copyright (C) 1995, 1997, 2005, 2008 David S. Miller <davem@davemloft.net>
  4 * Copyright (C) 1996 Eddie C. Dost        (ecd@brainaid.de)
  5 * Copyright (C) 1996 Miguel de Icaza      (miguel@nuclecu.unam.mx)
  6 * Copyright (C) 1996,98,99 Jakub Jelinek  (jj@sunsite.mff.cuni.cz)
  7 */
  8
  9#include <asm/head.h>
 10#include <asm/asi.h>
 11#include <asm/page.h>
 12#include <asm/pgtable.h>
 13#include <asm/tsb.h>
 14
 15	.text
 16	.align		32
 17
 18kvmap_itlb:
 19	/* g6: TAG TARGET */
 20	mov		TLB_TAG_ACCESS, %g4
 21	ldxa		[%g4] ASI_IMMU, %g4
 22
 23	/* sun4v_itlb_miss branches here with the missing virtual
 24	 * address already loaded into %g4
 25	 */
 26kvmap_itlb_4v:
 27
 
 28	/* Catch kernel NULL pointer calls.  */
 29	sethi		%hi(PAGE_SIZE), %g5
 30	cmp		%g4, %g5
 31	blu,pn		%xcc, kvmap_itlb_longpath
 32	 nop
 33
 34	KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_itlb_load)
 35
 36kvmap_itlb_tsb_miss:
 37	sethi		%hi(LOW_OBP_ADDRESS), %g5
 38	cmp		%g4, %g5
 39	blu,pn		%xcc, kvmap_itlb_vmalloc_addr
 40	 mov		0x1, %g5
 41	sllx		%g5, 32, %g5
 42	cmp		%g4, %g5
 43	blu,pn		%xcc, kvmap_itlb_obp
 44	 nop
 45
 46kvmap_itlb_vmalloc_addr:
 47	KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_itlb_longpath)
 48
 49	TSB_LOCK_TAG(%g1, %g2, %g7)
 50
 51	/* Load and check PTE.  */
 52	ldxa		[%g5] ASI_PHYS_USE_EC, %g5
 53	mov		1, %g7
 54	sllx		%g7, TSB_TAG_INVALID_BIT, %g7
 55	brgez,a,pn	%g5, kvmap_itlb_longpath
 56	 TSB_STORE(%g1, %g7)
 57
 58	TSB_WRITE(%g1, %g5, %g6)
 59
 60	/* fallthrough to TLB load */
 61
 62kvmap_itlb_load:
 63
 64661:	stxa		%g5, [%g0] ASI_ITLB_DATA_IN
 65	retry
 66	.section	.sun4v_2insn_patch, "ax"
 67	.word		661b
 68	nop
 69	nop
 70	.previous
 71
 72	/* For sun4v the ASI_ITLB_DATA_IN store and the retry
 73	 * instruction get nop'd out and we get here to branch
 74	 * to the sun4v tlb load code.  The registers are setup
 75	 * as follows:
 76	 *
 77	 * %g4: vaddr
 78	 * %g5: PTE
 79	 * %g6:	TAG
 80	 *
 81	 * The sun4v TLB load wants the PTE in %g3 so we fix that
 82	 * up here.
 83	 */
 84	ba,pt		%xcc, sun4v_itlb_load
 85	 mov		%g5, %g3
 86
 87kvmap_itlb_longpath:
 88
 89661:	rdpr	%pstate, %g5
 90	wrpr	%g5, PSTATE_AG | PSTATE_MG, %pstate
 91	.section .sun4v_2insn_patch, "ax"
 92	.word	661b
 93	SET_GL(1)
 94	nop
 95	.previous
 96
 97	rdpr	%tpc, %g5
 98	ba,pt	%xcc, sparc64_realfault_common
 99	 mov	FAULT_CODE_ITLB, %g4
100
101kvmap_itlb_obp:
102	OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_itlb_longpath)
103
104	TSB_LOCK_TAG(%g1, %g2, %g7)
105
106	TSB_WRITE(%g1, %g5, %g6)
107
108	ba,pt		%xcc, kvmap_itlb_load
109	 nop
110
111kvmap_dtlb_obp:
112	OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_dtlb_longpath)
113
114	TSB_LOCK_TAG(%g1, %g2, %g7)
115
116	TSB_WRITE(%g1, %g5, %g6)
117
118	ba,pt		%xcc, kvmap_dtlb_load
119	 nop
120
121	.align		32
122kvmap_dtlb_tsb4m_load:
123	TSB_LOCK_TAG(%g1, %g2, %g7)
124	TSB_WRITE(%g1, %g5, %g6)
125	ba,pt		%xcc, kvmap_dtlb_load
126	 nop
127
128kvmap_dtlb:
129	/* %g6: TAG TARGET */
130	mov		TLB_TAG_ACCESS, %g4
131	ldxa		[%g4] ASI_DMMU, %g4
132
133	/* sun4v_dtlb_miss branches here with the missing virtual
134	 * address already loaded into %g4
135	 */
136kvmap_dtlb_4v:
137	brgez,pn	%g4, kvmap_dtlb_nonlinear
138	 nop
139
140#ifdef CONFIG_DEBUG_PAGEALLOC
141	/* Index through the base page size TSB even for linear
142	 * mappings when using page allocation debugging.
143	 */
144	KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load)
145#else
146	/* Correct TAG_TARGET is already in %g6, check 4mb TSB.  */
147	KERN_TSB4M_LOOKUP_TL1(%g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load)
148#endif
149	/* TSB entry address left in %g1, lookup linear PTE.
150	 * Must preserve %g1 and %g6 (TAG).
151	 */
152kvmap_dtlb_tsb4m_miss:
153	/* Clear the PAGE_OFFSET top virtual bits, shift
154	 * down to get PFN, and make sure PFN is in range.
155	 */
156661:	sllx		%g4, 0, %g5
157	.section	.page_offset_shift_patch, "ax"
158	.word		661b
159	.previous
160
161	/* Check to see if we know about valid memory at the 4MB
162	 * chunk this physical address will reside within.
163	 */
164661:	srlx		%g5, MAX_PHYS_ADDRESS_BITS, %g2
165	.section	.page_offset_shift_patch, "ax"
166	.word		661b
167	.previous
168
169	brnz,pn		%g2, kvmap_dtlb_longpath
170	 nop
171
172	/* This unconditional branch and delay-slot nop gets patched
173	 * by the sethi sequence once the bitmap is properly setup.
174	 */
175	.globl		valid_addr_bitmap_insn
176valid_addr_bitmap_insn:
177	ba,pt		%xcc, 2f
178	 nop
179	.subsection	2
180	.globl		valid_addr_bitmap_patch
181valid_addr_bitmap_patch:
182	sethi		%hi(sparc64_valid_addr_bitmap), %g7
183	or		%g7, %lo(sparc64_valid_addr_bitmap), %g7
184	.previous
185
186661:	srlx		%g5, ILOG2_4MB, %g2
187	.section	.page_offset_shift_patch, "ax"
188	.word		661b
189	.previous
190
191	srlx		%g2, 6, %g5
192	and		%g2, 63, %g2
193	sllx		%g5, 3, %g5
194	ldx		[%g7 + %g5], %g5
195	mov		1, %g7
196	sllx		%g7, %g2, %g7
197	andcc		%g5, %g7, %g0
198	be,pn		%xcc, kvmap_dtlb_longpath
199
2002:	 sethi		%hi(kpte_linear_bitmap), %g2
 
201
202	/* Get the 256MB physical address index. */
203661:	sllx		%g4, 0, %g5
204	.section	.page_offset_shift_patch, "ax"
205	.word		661b
206	.previous
207
208	or		%g2, %lo(kpte_linear_bitmap), %g2
 
 
 
209
210661:	srlx		%g5, ILOG2_256MB, %g5
211	.section	.page_offset_shift_patch, "ax"
212	.word		661b
213	.previous
214
215	and		%g5, (32 - 1), %g7
216
217	/* Divide by 32 to get the offset into the bitmask.  */
218	srlx		%g5, 5, %g5
219	add		%g7, %g7, %g7
220	sllx		%g5, 3, %g5
221
222	/* kern_linear_pte_xor[(mask >> shift) & 3)] */
223	ldx		[%g2 + %g5], %g2
224	srlx		%g2, %g7, %g7
225	sethi		%hi(kern_linear_pte_xor), %g5
226	and		%g7, 3, %g7
227	or		%g5, %lo(kern_linear_pte_xor), %g5
228	sllx		%g7, 3, %g7
229	ldx		[%g5 + %g7], %g2
 
 
230
231	.globl		kvmap_linear_patch
232kvmap_linear_patch:
233	ba,pt		%xcc, kvmap_dtlb_tsb4m_load
234	 xor		%g2, %g4, %g5
235
236kvmap_dtlb_vmalloc_addr:
237	KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_dtlb_longpath)
238
239	TSB_LOCK_TAG(%g1, %g2, %g7)
240
241	/* Load and check PTE.  */
242	ldxa		[%g5] ASI_PHYS_USE_EC, %g5
243	mov		1, %g7
244	sllx		%g7, TSB_TAG_INVALID_BIT, %g7
245	brgez,a,pn	%g5, kvmap_dtlb_longpath
246	 TSB_STORE(%g1, %g7)
247
248	TSB_WRITE(%g1, %g5, %g6)
249
250	/* fallthrough to TLB load */
251
252kvmap_dtlb_load:
253
254661:	stxa		%g5, [%g0] ASI_DTLB_DATA_IN	! Reload TLB
255	retry
256	.section	.sun4v_2insn_patch, "ax"
257	.word		661b
258	nop
259	nop
260	.previous
261
262	/* For sun4v the ASI_DTLB_DATA_IN store and the retry
263	 * instruction get nop'd out and we get here to branch
264	 * to the sun4v tlb load code.  The registers are setup
265	 * as follows:
266	 *
267	 * %g4: vaddr
268	 * %g5: PTE
269	 * %g6:	TAG
270	 *
271	 * The sun4v TLB load wants the PTE in %g3 so we fix that
272	 * up here.
273	 */
274	ba,pt		%xcc, sun4v_dtlb_load
275	 mov		%g5, %g3
276
277#ifdef CONFIG_SPARSEMEM_VMEMMAP
278kvmap_vmemmap:
279	sub		%g4, %g5, %g5
280	srlx		%g5, ILOG2_4MB, %g5
281	sethi		%hi(vmemmap_table), %g1
282	sllx		%g5, 3, %g5
283	or		%g1, %lo(vmemmap_table), %g1
284	ba,pt		%xcc, kvmap_dtlb_load
285	 ldx		[%g1 + %g5], %g5
286#endif
287
288kvmap_dtlb_nonlinear:
289	/* Catch kernel NULL pointer derefs.  */
290	sethi		%hi(PAGE_SIZE), %g5
291	cmp		%g4, %g5
292	bleu,pn		%xcc, kvmap_dtlb_longpath
293	 nop
294
295#ifdef CONFIG_SPARSEMEM_VMEMMAP
296	/* Do not use the TSB for vmemmap.  */
297	mov		(VMEMMAP_BASE >> 40), %g5
298	sllx		%g5, 40, %g5
299	cmp		%g4,%g5
300	bgeu,pn		%xcc, kvmap_vmemmap
301	 nop
302#endif
303
304	KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load)
305
306kvmap_dtlb_tsbmiss:
307	sethi		%hi(MODULES_VADDR), %g5
308	cmp		%g4, %g5
309	blu,pn		%xcc, kvmap_dtlb_longpath
310	 mov		(VMALLOC_END >> 40), %g5
311	sllx		%g5, 40, %g5
312	cmp		%g4, %g5
313	bgeu,pn		%xcc, kvmap_dtlb_longpath
314	 nop
315
316kvmap_check_obp:
317	sethi		%hi(LOW_OBP_ADDRESS), %g5
318	cmp		%g4, %g5
319	blu,pn		%xcc, kvmap_dtlb_vmalloc_addr
320	 mov		0x1, %g5
321	sllx		%g5, 32, %g5
322	cmp		%g4, %g5
323	blu,pn		%xcc, kvmap_dtlb_obp
324	 nop
325	ba,pt		%xcc, kvmap_dtlb_vmalloc_addr
326	 nop
327
328kvmap_dtlb_longpath:
329
330661:	rdpr	%pstate, %g5
331	wrpr	%g5, PSTATE_AG | PSTATE_MG, %pstate
332	.section .sun4v_2insn_patch, "ax"
333	.word	661b
334	SET_GL(1)
335	ldxa		[%g0] ASI_SCRATCHPAD, %g5
336	.previous
337
338	rdpr	%tl, %g3
339	cmp	%g3, 1
340
341661:	mov	TLB_TAG_ACCESS, %g4
342	ldxa	[%g4] ASI_DMMU, %g5
343	.section .sun4v_2insn_patch, "ax"
344	.word	661b
345	ldx	[%g5 + HV_FAULT_D_ADDR_OFFSET], %g5
346	nop
347	.previous
348
349	be,pt	%xcc, sparc64_realfault_common
350	 mov	FAULT_CODE_DTLB, %g4
351	ba,pt	%xcc, winfix_trampoline
352	 nop