Linux Audio

Check our new training course

Loading...
v4.17
  1/* SPDX-License-Identifier: GPL-2.0 */
  2/* arch/sparc64/kernel/ktlb.S: Kernel mapping TLB miss handling.
  3 *
  4 * Copyright (C) 1995, 1997, 2005, 2008 David S. Miller <davem@davemloft.net>
  5 * Copyright (C) 1996 Eddie C. Dost        (ecd@brainaid.de)
  6 * Copyright (C) 1996 Miguel de Icaza      (miguel@nuclecu.unam.mx)
  7 * Copyright (C) 1996,98,99 Jakub Jelinek  (jj@sunsite.mff.cuni.cz)
  8 */
  9
 10#include <asm/head.h>
 11#include <asm/asi.h>
 12#include <asm/page.h>
 13#include <asm/pgtable.h>
 14#include <asm/tsb.h>
 15
 16	.text
 17	.align		32
 18
 19kvmap_itlb:
 20	/* g6: TAG TARGET */
 21	mov		TLB_TAG_ACCESS, %g4
 22	ldxa		[%g4] ASI_IMMU, %g4
 23
 24	/* The kernel executes in context zero, therefore we do not
 25	 * need to clear the context ID bits out of %g4 here.
 26	 */
 27
 28	/* sun4v_itlb_miss branches here with the missing virtual
 29	 * address already loaded into %g4
 30	 */
 31kvmap_itlb_4v:
 32
 
 33	/* Catch kernel NULL pointer calls.  */
 34	sethi		%hi(PAGE_SIZE), %g5
 35	cmp		%g4, %g5
 36	blu,pn		%xcc, kvmap_itlb_longpath
 37	 nop
 38
 39	KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_itlb_load)
 40
 41kvmap_itlb_tsb_miss:
 42	sethi		%hi(LOW_OBP_ADDRESS), %g5
 43	cmp		%g4, %g5
 44	blu,pn		%xcc, kvmap_itlb_vmalloc_addr
 45	 mov		0x1, %g5
 46	sllx		%g5, 32, %g5
 47	cmp		%g4, %g5
 48	blu,pn		%xcc, kvmap_itlb_obp
 49	 nop
 50
 51kvmap_itlb_vmalloc_addr:
 52	KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_itlb_longpath)
 53
 54	TSB_LOCK_TAG(%g1, %g2, %g7)
 
 
 
 
 
 
 
 
 55	TSB_WRITE(%g1, %g5, %g6)
 56
 57	/* fallthrough to TLB load */
 58
 59kvmap_itlb_load:
 60
 61661:	stxa		%g5, [%g0] ASI_ITLB_DATA_IN
 62	retry
 63	.section	.sun4v_2insn_patch, "ax"
 64	.word		661b
 65	nop
 66	nop
 67	.previous
 68
 69	/* For sun4v the ASI_ITLB_DATA_IN store and the retry
 70	 * instruction get nop'd out and we get here to branch
 71	 * to the sun4v tlb load code.  The registers are setup
 72	 * as follows:
 73	 *
 74	 * %g4: vaddr
 75	 * %g5: PTE
 76	 * %g6:	TAG
 77	 *
 78	 * The sun4v TLB load wants the PTE in %g3 so we fix that
 79	 * up here.
 80	 */
 81	ba,pt		%xcc, sun4v_itlb_load
 82	 mov		%g5, %g3
 83
 84kvmap_itlb_longpath:
 85
 86661:	rdpr	%pstate, %g5
 87	wrpr	%g5, PSTATE_AG | PSTATE_MG, %pstate
 88	.section .sun4v_2insn_patch, "ax"
 89	.word	661b
 90	SET_GL(1)
 91	nop
 92	.previous
 93
 94	rdpr	%tpc, %g5
 95	ba,pt	%xcc, sparc64_realfault_common
 96	 mov	FAULT_CODE_ITLB, %g4
 97
 98kvmap_itlb_obp:
 99	OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_itlb_longpath)
100
101	TSB_LOCK_TAG(%g1, %g2, %g7)
102
103	TSB_WRITE(%g1, %g5, %g6)
104
105	ba,pt		%xcc, kvmap_itlb_load
106	 nop
107
108kvmap_dtlb_obp:
109	OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_dtlb_longpath)
110
111	TSB_LOCK_TAG(%g1, %g2, %g7)
112
113	TSB_WRITE(%g1, %g5, %g6)
114
115	ba,pt		%xcc, kvmap_dtlb_load
116	 nop
117
118kvmap_linear_early:
119	sethi		%hi(kern_linear_pte_xor), %g7
120	ldx		[%g7 + %lo(kern_linear_pte_xor)], %g2
121	ba,pt		%xcc, kvmap_dtlb_tsb4m_load
122	 xor		%g2, %g4, %g5
123
124	.align		32
125kvmap_dtlb_tsb4m_load:
126	TSB_LOCK_TAG(%g1, %g2, %g7)
127	TSB_WRITE(%g1, %g5, %g6)
128	ba,pt		%xcc, kvmap_dtlb_load
129	 nop
130
131kvmap_dtlb:
132	/* %g6: TAG TARGET */
133	mov		TLB_TAG_ACCESS, %g4
134	ldxa		[%g4] ASI_DMMU, %g4
135
136	/* The kernel executes in context zero, therefore we do not
137	 * need to clear the context ID bits out of %g4 here.
138	 */
139
140	/* sun4v_dtlb_miss branches here with the missing virtual
141	 * address already loaded into %g4
142	 */
143kvmap_dtlb_4v:
144	brgez,pn	%g4, kvmap_dtlb_nonlinear
145	 nop
146
147#ifdef CONFIG_DEBUG_PAGEALLOC
148	/* Index through the base page size TSB even for linear
149	 * mappings when using page allocation debugging.
150	 */
151	KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load)
152#else
153	/* Correct TAG_TARGET is already in %g6, check 4mb TSB.  */
154	KERN_TSB4M_LOOKUP_TL1(%g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load)
155#endif
156	/* Linear mapping TSB lookup failed.  Fallthrough to kernel
157	 * page table based lookup.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
158	 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
159	.globl		kvmap_linear_patch
160kvmap_linear_patch:
161	ba,a,pt		%xcc, kvmap_linear_early
 
162
163kvmap_dtlb_vmalloc_addr:
164	KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_dtlb_longpath)
165
166	TSB_LOCK_TAG(%g1, %g2, %g7)
 
 
 
 
 
 
 
 
167	TSB_WRITE(%g1, %g5, %g6)
168
169	/* fallthrough to TLB load */
170
171kvmap_dtlb_load:
172
173661:	stxa		%g5, [%g0] ASI_DTLB_DATA_IN	! Reload TLB
174	retry
175	.section	.sun4v_2insn_patch, "ax"
176	.word		661b
177	nop
178	nop
179	.previous
180
181	/* For sun4v the ASI_DTLB_DATA_IN store and the retry
182	 * instruction get nop'd out and we get here to branch
183	 * to the sun4v tlb load code.  The registers are setup
184	 * as follows:
185	 *
186	 * %g4: vaddr
187	 * %g5: PTE
188	 * %g6:	TAG
189	 *
190	 * The sun4v TLB load wants the PTE in %g3 so we fix that
191	 * up here.
192	 */
193	ba,pt		%xcc, sun4v_dtlb_load
194	 mov		%g5, %g3
195
196#ifdef CONFIG_SPARSEMEM_VMEMMAP
197kvmap_vmemmap:
198	KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_dtlb_longpath)
199	ba,a,pt		%xcc, kvmap_dtlb_load
 
 
 
 
 
200#endif
201
202kvmap_dtlb_nonlinear:
203	/* Catch kernel NULL pointer derefs.  */
204	sethi		%hi(PAGE_SIZE), %g5
205	cmp		%g4, %g5
206	bleu,pn		%xcc, kvmap_dtlb_longpath
207	 nop
208
209#ifdef CONFIG_SPARSEMEM_VMEMMAP
210	/* Do not use the TSB for vmemmap.  */
211	sethi		%hi(VMEMMAP_BASE), %g5
212	ldx		[%g5 + %lo(VMEMMAP_BASE)], %g5
213	cmp		%g4,%g5
214	bgeu,pn		%xcc, kvmap_vmemmap
215	 nop
216#endif
217
218	KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load)
219
220kvmap_dtlb_tsbmiss:
221	sethi		%hi(MODULES_VADDR), %g5
222	cmp		%g4, %g5
223	blu,pn		%xcc, kvmap_dtlb_longpath
224	 sethi		%hi(VMALLOC_END), %g5
225	ldx		[%g5 + %lo(VMALLOC_END)], %g5
226	cmp		%g4, %g5
227	bgeu,pn		%xcc, kvmap_dtlb_longpath
228	 nop
229
230kvmap_check_obp:
231	sethi		%hi(LOW_OBP_ADDRESS), %g5
232	cmp		%g4, %g5
233	blu,pn		%xcc, kvmap_dtlb_vmalloc_addr
234	 mov		0x1, %g5
235	sllx		%g5, 32, %g5
236	cmp		%g4, %g5
237	blu,pn		%xcc, kvmap_dtlb_obp
238	 nop
239	ba,pt		%xcc, kvmap_dtlb_vmalloc_addr
240	 nop
241
242kvmap_dtlb_longpath:
243
244661:	rdpr	%pstate, %g5
245	wrpr	%g5, PSTATE_AG | PSTATE_MG, %pstate
246	.section .sun4v_2insn_patch, "ax"
247	.word	661b
248	SET_GL(1)
249	ldxa		[%g0] ASI_SCRATCHPAD, %g5
250	.previous
251
252	rdpr	%tl, %g3
253	cmp	%g3, 1
254
255661:	mov	TLB_TAG_ACCESS, %g4
256	ldxa	[%g4] ASI_DMMU, %g5
257	.section .sun4v_2insn_patch, "ax"
258	.word	661b
259	ldx	[%g5 + HV_FAULT_D_ADDR_OFFSET], %g5
260	nop
261	.previous
262
263	/* The kernel executes in context zero, therefore we do not
264	 * need to clear the context ID bits out of %g5 here.
265	 */
266
267	be,pt	%xcc, sparc64_realfault_common
268	 mov	FAULT_CODE_DTLB, %g4
269	ba,pt	%xcc, winfix_trampoline
270	 nop
v3.1
 
  1/* arch/sparc64/kernel/ktlb.S: Kernel mapping TLB miss handling.
  2 *
  3 * Copyright (C) 1995, 1997, 2005, 2008 David S. Miller <davem@davemloft.net>
  4 * Copyright (C) 1996 Eddie C. Dost        (ecd@brainaid.de)
  5 * Copyright (C) 1996 Miguel de Icaza      (miguel@nuclecu.unam.mx)
  6 * Copyright (C) 1996,98,99 Jakub Jelinek  (jj@sunsite.mff.cuni.cz)
  7 */
  8
  9#include <asm/head.h>
 10#include <asm/asi.h>
 11#include <asm/page.h>
 12#include <asm/pgtable.h>
 13#include <asm/tsb.h>
 14
 15	.text
 16	.align		32
 17
 18kvmap_itlb:
 19	/* g6: TAG TARGET */
 20	mov		TLB_TAG_ACCESS, %g4
 21	ldxa		[%g4] ASI_IMMU, %g4
 22
 
 
 
 
 23	/* sun4v_itlb_miss branches here with the missing virtual
 24	 * address already loaded into %g4
 25	 */
 26kvmap_itlb_4v:
 27
 28kvmap_itlb_nonlinear:
 29	/* Catch kernel NULL pointer calls.  */
 30	sethi		%hi(PAGE_SIZE), %g5
 31	cmp		%g4, %g5
 32	bleu,pn		%xcc, kvmap_dtlb_longpath
 33	 nop
 34
 35	KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_itlb_load)
 36
 37kvmap_itlb_tsb_miss:
 38	sethi		%hi(LOW_OBP_ADDRESS), %g5
 39	cmp		%g4, %g5
 40	blu,pn		%xcc, kvmap_itlb_vmalloc_addr
 41	 mov		0x1, %g5
 42	sllx		%g5, 32, %g5
 43	cmp		%g4, %g5
 44	blu,pn		%xcc, kvmap_itlb_obp
 45	 nop
 46
 47kvmap_itlb_vmalloc_addr:
 48	KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_itlb_longpath)
 49
 50	TSB_LOCK_TAG(%g1, %g2, %g7)
 51
 52	/* Load and check PTE.  */
 53	ldxa		[%g5] ASI_PHYS_USE_EC, %g5
 54	mov		1, %g7
 55	sllx		%g7, TSB_TAG_INVALID_BIT, %g7
 56	brgez,a,pn	%g5, kvmap_itlb_longpath
 57	 TSB_STORE(%g1, %g7)
 58
 59	TSB_WRITE(%g1, %g5, %g6)
 60
 61	/* fallthrough to TLB load */
 62
 63kvmap_itlb_load:
 64
 65661:	stxa		%g5, [%g0] ASI_ITLB_DATA_IN
 66	retry
 67	.section	.sun4v_2insn_patch, "ax"
 68	.word		661b
 69	nop
 70	nop
 71	.previous
 72
 73	/* For sun4v the ASI_ITLB_DATA_IN store and the retry
 74	 * instruction get nop'd out and we get here to branch
 75	 * to the sun4v tlb load code.  The registers are setup
 76	 * as follows:
 77	 *
 78	 * %g4: vaddr
 79	 * %g5: PTE
 80	 * %g6:	TAG
 81	 *
 82	 * The sun4v TLB load wants the PTE in %g3 so we fix that
 83	 * up here.
 84	 */
 85	ba,pt		%xcc, sun4v_itlb_load
 86	 mov		%g5, %g3
 87
 88kvmap_itlb_longpath:
 89
 90661:	rdpr	%pstate, %g5
 91	wrpr	%g5, PSTATE_AG | PSTATE_MG, %pstate
 92	.section .sun4v_2insn_patch, "ax"
 93	.word	661b
 94	SET_GL(1)
 95	nop
 96	.previous
 97
 98	rdpr	%tpc, %g5
 99	ba,pt	%xcc, sparc64_realfault_common
100	 mov	FAULT_CODE_ITLB, %g4
101
102kvmap_itlb_obp:
103	OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_itlb_longpath)
104
105	TSB_LOCK_TAG(%g1, %g2, %g7)
106
107	TSB_WRITE(%g1, %g5, %g6)
108
109	ba,pt		%xcc, kvmap_itlb_load
110	 nop
111
112kvmap_dtlb_obp:
113	OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_dtlb_longpath)
114
115	TSB_LOCK_TAG(%g1, %g2, %g7)
116
117	TSB_WRITE(%g1, %g5, %g6)
118
119	ba,pt		%xcc, kvmap_dtlb_load
120	 nop
121
 
 
 
 
 
 
122	.align		32
123kvmap_dtlb_tsb4m_load:
124	TSB_LOCK_TAG(%g1, %g2, %g7)
125	TSB_WRITE(%g1, %g5, %g6)
126	ba,pt		%xcc, kvmap_dtlb_load
127	 nop
128
129kvmap_dtlb:
130	/* %g6: TAG TARGET */
131	mov		TLB_TAG_ACCESS, %g4
132	ldxa		[%g4] ASI_DMMU, %g4
133
 
 
 
 
134	/* sun4v_dtlb_miss branches here with the missing virtual
135	 * address already loaded into %g4
136	 */
137kvmap_dtlb_4v:
138	brgez,pn	%g4, kvmap_dtlb_nonlinear
139	 nop
140
141#ifdef CONFIG_DEBUG_PAGEALLOC
142	/* Index through the base page size TSB even for linear
143	 * mappings when using page allocation debugging.
144	 */
145	KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load)
146#else
147	/* Correct TAG_TARGET is already in %g6, check 4mb TSB.  */
148	KERN_TSB4M_LOOKUP_TL1(%g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load)
149#endif
150	/* TSB entry address left in %g1, lookup linear PTE.
151	 * Must preserve %g1 and %g6 (TAG).
152	 */
153kvmap_dtlb_tsb4m_miss:
154	/* Clear the PAGE_OFFSET top virtual bits, shift
155	 * down to get PFN, and make sure PFN is in range.
156	 */
157	sllx		%g4, 21, %g5
158
159	/* Check to see if we know about valid memory at the 4MB
160	 * chunk this physical address will reside within.
161	 */
162	srlx		%g5, 21 + 41, %g2
163	brnz,pn		%g2, kvmap_dtlb_longpath
164	 nop
165
166	/* This unconditional branch and delay-slot nop gets patched
167	 * by the sethi sequence once the bitmap is properly setup.
168	 */
169	.globl		valid_addr_bitmap_insn
170valid_addr_bitmap_insn:
171	ba,pt		%xcc, 2f
172	 nop
173	.subsection	2
174	.globl		valid_addr_bitmap_patch
175valid_addr_bitmap_patch:
176	sethi		%hi(sparc64_valid_addr_bitmap), %g7
177	or		%g7, %lo(sparc64_valid_addr_bitmap), %g7
178	.previous
179
180	srlx		%g5, 21 + 22, %g2
181	srlx		%g2, 6, %g5
182	and		%g2, 63, %g2
183	sllx		%g5, 3, %g5
184	ldx		[%g7 + %g5], %g5
185	mov		1, %g7
186	sllx		%g7, %g2, %g7
187	andcc		%g5, %g7, %g0
188	be,pn		%xcc, kvmap_dtlb_longpath
189
1902:	 sethi		%hi(kpte_linear_bitmap), %g2
191	or		%g2, %lo(kpte_linear_bitmap), %g2
192
193	/* Get the 256MB physical address index. */
194	sllx		%g4, 21, %g5
195	mov		1, %g7
196	srlx		%g5, 21 + 28, %g5
197
198	/* Don't try this at home kids... this depends upon srlx
199	 * only taking the low 6 bits of the shift count in %g5.
200	 */
201	sllx		%g7, %g5, %g7
202
203	/* Divide by 64 to get the offset into the bitmask.  */
204	srlx		%g5, 6, %g5
205	sllx		%g5, 3, %g5
206
207	/* kern_linear_pte_xor[((mask & bit) ? 1 : 0)] */
208	ldx		[%g2 + %g5], %g2
209	andcc		%g2, %g7, %g0
210	sethi		%hi(kern_linear_pte_xor), %g5
211	or		%g5, %lo(kern_linear_pte_xor), %g5
212	bne,a,pt	%xcc, 1f
213	 add		%g5, 8, %g5
214
2151:	ldx		[%g5], %g2
216
217	.globl		kvmap_linear_patch
218kvmap_linear_patch:
219	ba,pt		%xcc, kvmap_dtlb_tsb4m_load
220	 xor		%g2, %g4, %g5
221
222kvmap_dtlb_vmalloc_addr:
223	KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_dtlb_longpath)
224
225	TSB_LOCK_TAG(%g1, %g2, %g7)
226
227	/* Load and check PTE.  */
228	ldxa		[%g5] ASI_PHYS_USE_EC, %g5
229	mov		1, %g7
230	sllx		%g7, TSB_TAG_INVALID_BIT, %g7
231	brgez,a,pn	%g5, kvmap_dtlb_longpath
232	 TSB_STORE(%g1, %g7)
233
234	TSB_WRITE(%g1, %g5, %g6)
235
236	/* fallthrough to TLB load */
237
238kvmap_dtlb_load:
239
240661:	stxa		%g5, [%g0] ASI_DTLB_DATA_IN	! Reload TLB
241	retry
242	.section	.sun4v_2insn_patch, "ax"
243	.word		661b
244	nop
245	nop
246	.previous
247
248	/* For sun4v the ASI_DTLB_DATA_IN store and the retry
249	 * instruction get nop'd out and we get here to branch
250	 * to the sun4v tlb load code.  The registers are setup
251	 * as follows:
252	 *
253	 * %g4: vaddr
254	 * %g5: PTE
255	 * %g6:	TAG
256	 *
257	 * The sun4v TLB load wants the PTE in %g3 so we fix that
258	 * up here.
259	 */
260	ba,pt		%xcc, sun4v_dtlb_load
261	 mov		%g5, %g3
262
263#ifdef CONFIG_SPARSEMEM_VMEMMAP
264kvmap_vmemmap:
265	sub		%g4, %g5, %g5
266	srlx		%g5, 22, %g5
267	sethi		%hi(vmemmap_table), %g1
268	sllx		%g5, 3, %g5
269	or		%g1, %lo(vmemmap_table), %g1
270	ba,pt		%xcc, kvmap_dtlb_load
271	 ldx		[%g1 + %g5], %g5
272#endif
273
274kvmap_dtlb_nonlinear:
275	/* Catch kernel NULL pointer derefs.  */
276	sethi		%hi(PAGE_SIZE), %g5
277	cmp		%g4, %g5
278	bleu,pn		%xcc, kvmap_dtlb_longpath
279	 nop
280
281#ifdef CONFIG_SPARSEMEM_VMEMMAP
282	/* Do not use the TSB for vmemmap.  */
283	mov		(VMEMMAP_BASE >> 40), %g5
284	sllx		%g5, 40, %g5
285	cmp		%g4,%g5
286	bgeu,pn		%xcc, kvmap_vmemmap
287	 nop
288#endif
289
290	KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load)
291
292kvmap_dtlb_tsbmiss:
293	sethi		%hi(MODULES_VADDR), %g5
294	cmp		%g4, %g5
295	blu,pn		%xcc, kvmap_dtlb_longpath
296	 mov		(VMALLOC_END >> 40), %g5
297	sllx		%g5, 40, %g5
298	cmp		%g4, %g5
299	bgeu,pn		%xcc, kvmap_dtlb_longpath
300	 nop
301
302kvmap_check_obp:
303	sethi		%hi(LOW_OBP_ADDRESS), %g5
304	cmp		%g4, %g5
305	blu,pn		%xcc, kvmap_dtlb_vmalloc_addr
306	 mov		0x1, %g5
307	sllx		%g5, 32, %g5
308	cmp		%g4, %g5
309	blu,pn		%xcc, kvmap_dtlb_obp
310	 nop
311	ba,pt		%xcc, kvmap_dtlb_vmalloc_addr
312	 nop
313
314kvmap_dtlb_longpath:
315
316661:	rdpr	%pstate, %g5
317	wrpr	%g5, PSTATE_AG | PSTATE_MG, %pstate
318	.section .sun4v_2insn_patch, "ax"
319	.word	661b
320	SET_GL(1)
321	ldxa		[%g0] ASI_SCRATCHPAD, %g5
322	.previous
323
324	rdpr	%tl, %g3
325	cmp	%g3, 1
326
327661:	mov	TLB_TAG_ACCESS, %g4
328	ldxa	[%g4] ASI_DMMU, %g5
329	.section .sun4v_2insn_patch, "ax"
330	.word	661b
331	ldx	[%g5 + HV_FAULT_D_ADDR_OFFSET], %g5
332	nop
333	.previous
 
 
 
 
334
335	be,pt	%xcc, sparc64_realfault_common
336	 mov	FAULT_CODE_DTLB, %g4
337	ba,pt	%xcc, winfix_trampoline
338	 nop