Linux Audio

Check our new training course

Loading...
v3.1
  1/*
  2 * ultra.S: Don't expand these all over the place...
  3 *
  4 * Copyright (C) 1997, 2000, 2008 David S. Miller (davem@davemloft.net)
  5 */
  6
  7#include <asm/asi.h>
  8#include <asm/pgtable.h>
  9#include <asm/page.h>
 10#include <asm/spitfire.h>
 11#include <asm/mmu_context.h>
 12#include <asm/mmu.h>
 13#include <asm/pil.h>
 14#include <asm/head.h>
 15#include <asm/thread_info.h>
 16#include <asm/cacheflush.h>
 17#include <asm/hypervisor.h>
 18#include <asm/cpudata.h>
 19
 20	/* Basically, most of the Spitfire vs. Cheetah madness
 21	 * has to do with the fact that Cheetah does not support
 22	 * IMMU flushes out of the secondary context.  Someone needs
 23	 * to throw a south lake birthday party for the folks
 24	 * in Microelectronics who refused to fix this shit.
 25	 */
 26
 27	/* This file is meant to be read efficiently by the CPU, not humans.
 28	 * Staraj sie tego nikomu nie pierdolnac...
 29	 */
 30	.text
 31	.align		32
 32	.globl		__flush_tlb_mm
 33__flush_tlb_mm:		/* 18 insns */
 34	/* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */
 35	ldxa		[%o1] ASI_DMMU, %g2
 36	cmp		%g2, %o0
 37	bne,pn		%icc, __spitfire_flush_tlb_mm_slow
 38	 mov		0x50, %g3
 39	stxa		%g0, [%g3] ASI_DMMU_DEMAP
 40	stxa		%g0, [%g3] ASI_IMMU_DEMAP
 41	sethi		%hi(KERNBASE), %g3
 42	flush		%g3
 43	retl
 44	 nop
 45	nop
 46	nop
 47	nop
 48	nop
 49	nop
 50	nop
 51	nop
 52	nop
 53	nop
 54
 55	.align		32
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 56	.globl		__flush_tlb_pending
 57__flush_tlb_pending:	/* 26 insns */
 58	/* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
 59	rdpr		%pstate, %g7
 60	sllx		%o1, 3, %o1
 61	andn		%g7, PSTATE_IE, %g2
 62	wrpr		%g2, %pstate
 63	mov		SECONDARY_CONTEXT, %o4
 64	ldxa		[%o4] ASI_DMMU, %g2
 65	stxa		%o0, [%o4] ASI_DMMU
 661:	sub		%o1, (1 << 3), %o1
 67	ldx		[%o2 + %o1], %o3
 68	andcc		%o3, 1, %g0
 69	andn		%o3, 1, %o3
 70	be,pn		%icc, 2f
 71	 or		%o3, 0x10, %o3
 72	stxa		%g0, [%o3] ASI_IMMU_DEMAP
 732:	stxa		%g0, [%o3] ASI_DMMU_DEMAP
 74	membar		#Sync
 75	brnz,pt		%o1, 1b
 76	 nop
 77	stxa		%g2, [%o4] ASI_DMMU
 78	sethi		%hi(KERNBASE), %o4
 79	flush		%o4
 80	retl
 81	 wrpr		%g7, 0x0, %pstate
 82	nop
 83	nop
 84	nop
 85	nop
 86
 87	.align		32
 88	.globl		__flush_tlb_kernel_range
 89__flush_tlb_kernel_range:	/* 16 insns */
 90	/* %o0=start, %o1=end */
 91	cmp		%o0, %o1
 92	be,pn		%xcc, 2f
 
 
 
 93	 sethi		%hi(PAGE_SIZE), %o4
 94	sub		%o1, %o0, %o3
 95	sub		%o3, %o4, %o3
 96	or		%o0, 0x20, %o0		! Nucleus
 971:	stxa		%g0, [%o0 + %o3] ASI_DMMU_DEMAP
 98	stxa		%g0, [%o0 + %o3] ASI_IMMU_DEMAP
 99	membar		#Sync
100	brnz,pt		%o3, 1b
101	 sub		%o3, %o4, %o3
1022:	sethi		%hi(KERNBASE), %o3
103	flush		%o3
104	retl
105	 nop
106	nop
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
107
108__spitfire_flush_tlb_mm_slow:
109	rdpr		%pstate, %g1
110	wrpr		%g1, PSTATE_IE, %pstate
111	stxa		%o0, [%o1] ASI_DMMU
112	stxa		%g0, [%g3] ASI_DMMU_DEMAP
113	stxa		%g0, [%g3] ASI_IMMU_DEMAP
114	flush		%g6
115	stxa		%g2, [%o1] ASI_DMMU
116	sethi		%hi(KERNBASE), %o1
117	flush		%o1
118	retl
119	 wrpr		%g1, 0, %pstate
120
121/*
122 * The following code flushes one page_size worth.
123 */
124	.section .kprobes.text, "ax"
125	.align		32
126	.globl		__flush_icache_page
127__flush_icache_page:	/* %o0 = phys_page */
128	srlx		%o0, PAGE_SHIFT, %o0
129	sethi		%uhi(PAGE_OFFSET), %g1
130	sllx		%o0, PAGE_SHIFT, %o0
131	sethi		%hi(PAGE_SIZE), %g2
132	sllx		%g1, 32, %g1
133	add		%o0, %g1, %o0
1341:	subcc		%g2, 32, %g2
135	bne,pt		%icc, 1b
136	 flush		%o0 + %g2
137	retl
138	 nop
139
140#ifdef DCACHE_ALIASING_POSSIBLE
141
142#if (PAGE_SHIFT != 13)
143#error only page shift of 13 is supported by dcache flush
144#endif
145
146#define DTAG_MASK 0x3
147
148	/* This routine is Spitfire specific so the hardcoded
149	 * D-cache size and line-size are OK.
150	 */
151	.align		64
152	.globl		__flush_dcache_page
153__flush_dcache_page:	/* %o0=kaddr, %o1=flush_icache */
154	sethi		%uhi(PAGE_OFFSET), %g1
155	sllx		%g1, 32, %g1
156	sub		%o0, %g1, %o0			! physical address
157	srlx		%o0, 11, %o0			! make D-cache TAG
158	sethi		%hi(1 << 14), %o2		! D-cache size
159	sub		%o2, (1 << 5), %o2		! D-cache line size
1601:	ldxa		[%o2] ASI_DCACHE_TAG, %o3	! load D-cache TAG
161	andcc		%o3, DTAG_MASK, %g0		! Valid?
162	be,pn		%xcc, 2f			! Nope, branch
163	 andn		%o3, DTAG_MASK, %o3		! Clear valid bits
164	cmp		%o3, %o0			! TAG match?
165	bne,pt		%xcc, 2f			! Nope, branch
166	 nop
167	stxa		%g0, [%o2] ASI_DCACHE_TAG	! Invalidate TAG
168	membar		#Sync
1692:	brnz,pt		%o2, 1b
170	 sub		%o2, (1 << 5), %o2		! D-cache line size
171
172	/* The I-cache does not snoop local stores so we
173	 * better flush that too when necessary.
174	 */
175	brnz,pt		%o1, __flush_icache_page
176	 sllx		%o0, 11, %o0
177	retl
178	 nop
179
180#endif /* DCACHE_ALIASING_POSSIBLE */
181
182	.previous
183
184	/* Cheetah specific versions, patched at boot time. */
185__cheetah_flush_tlb_mm: /* 19 insns */
186	rdpr		%pstate, %g7
187	andn		%g7, PSTATE_IE, %g2
188	wrpr		%g2, 0x0, %pstate
189	wrpr		%g0, 1, %tl
190	mov		PRIMARY_CONTEXT, %o2
191	mov		0x40, %g3
192	ldxa		[%o2] ASI_DMMU, %g2
193	srlx		%g2, CTX_PGSZ1_NUC_SHIFT, %o1
194	sllx		%o1, CTX_PGSZ1_NUC_SHIFT, %o1
195	or		%o0, %o1, %o0	/* Preserve nucleus page size fields */
196	stxa		%o0, [%o2] ASI_DMMU
197	stxa		%g0, [%g3] ASI_DMMU_DEMAP
198	stxa		%g0, [%g3] ASI_IMMU_DEMAP
199	stxa		%g2, [%o2] ASI_DMMU
200	sethi		%hi(KERNBASE), %o2
201	flush		%o2
202	wrpr		%g0, 0, %tl
203	retl
204	 wrpr		%g7, 0x0, %pstate
205
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
206__cheetah_flush_tlb_pending:	/* 27 insns */
207	/* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
208	rdpr		%pstate, %g7
209	sllx		%o1, 3, %o1
210	andn		%g7, PSTATE_IE, %g2
211	wrpr		%g2, 0x0, %pstate
212	wrpr		%g0, 1, %tl
213	mov		PRIMARY_CONTEXT, %o4
214	ldxa		[%o4] ASI_DMMU, %g2
215	srlx		%g2, CTX_PGSZ1_NUC_SHIFT, %o3
216	sllx		%o3, CTX_PGSZ1_NUC_SHIFT, %o3
217	or		%o0, %o3, %o0	/* Preserve nucleus page size fields */
218	stxa		%o0, [%o4] ASI_DMMU
2191:	sub		%o1, (1 << 3), %o1
220	ldx		[%o2 + %o1], %o3
221	andcc		%o3, 1, %g0
222	be,pn		%icc, 2f
223	 andn		%o3, 1, %o3
224	stxa		%g0, [%o3] ASI_IMMU_DEMAP
2252:	stxa		%g0, [%o3] ASI_DMMU_DEMAP	
226	membar		#Sync
227	brnz,pt		%o1, 1b
228	 nop
229	stxa		%g2, [%o4] ASI_DMMU
230	sethi		%hi(KERNBASE), %o4
231	flush		%o4
232	wrpr		%g0, 0, %tl
233	retl
234	 wrpr		%g7, 0x0, %pstate
235
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
236#ifdef DCACHE_ALIASING_POSSIBLE
237__cheetah_flush_dcache_page: /* 11 insns */
238	sethi		%uhi(PAGE_OFFSET), %g1
239	sllx		%g1, 32, %g1
240	sub		%o0, %g1, %o0
241	sethi		%hi(PAGE_SIZE), %o4
2421:	subcc		%o4, (1 << 5), %o4
243	stxa		%g0, [%o0 + %o4] ASI_DCACHE_INVALIDATE
244	membar		#Sync
245	bne,pt		%icc, 1b
246	 nop
247	retl		/* I-cache flush never needed on Cheetah, see callers. */
248	 nop
249#endif /* DCACHE_ALIASING_POSSIBLE */
250
251	/* Hypervisor specific versions, patched at boot time.  */
252__hypervisor_tlb_tl0_error:
253	save		%sp, -192, %sp
254	mov		%i0, %o0
255	call		hypervisor_tlbop_error
256	 mov		%i1, %o1
257	ret
258	 restore
259
260__hypervisor_flush_tlb_mm: /* 10 insns */
261	mov		%o0, %o2	/* ARG2: mmu context */
262	mov		0, %o0		/* ARG0: CPU lists unimplemented */
263	mov		0, %o1		/* ARG1: CPU lists unimplemented */
264	mov		HV_MMU_ALL, %o3	/* ARG3: flags */
265	mov		HV_FAST_MMU_DEMAP_CTX, %o5
266	ta		HV_FAST_TRAP
267	brnz,pn		%o0, __hypervisor_tlb_tl0_error
268	 mov		HV_FAST_MMU_DEMAP_CTX, %o1
269	retl
270	 nop
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
271
272__hypervisor_flush_tlb_pending: /* 16 insns */
273	/* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
274	sllx		%o1, 3, %g1
275	mov		%o2, %g2
276	mov		%o0, %g3
2771:	sub		%g1, (1 << 3), %g1
278	ldx		[%g2 + %g1], %o0      /* ARG0: vaddr + IMMU-bit */
279	mov		%g3, %o1	      /* ARG1: mmu context */
280	mov		HV_MMU_ALL, %o2	      /* ARG2: flags */
281	srlx		%o0, PAGE_SHIFT, %o0
282	sllx		%o0, PAGE_SHIFT, %o0
283	ta		HV_MMU_UNMAP_ADDR_TRAP
284	brnz,pn		%o0, __hypervisor_tlb_tl0_error
285	 mov		HV_MMU_UNMAP_ADDR_TRAP, %o1
286	brnz,pt		%g1, 1b
287	 nop
288	retl
289	 nop
 
 
 
 
 
 
 
 
 
 
 
290
291__hypervisor_flush_tlb_kernel_range: /* 16 insns */
292	/* %o0=start, %o1=end */
293	cmp		%o0, %o1
294	be,pn		%xcc, 2f
295	 sethi		%hi(PAGE_SIZE), %g3
296	mov		%o0, %g1
297	sub		%o1, %g1, %g2
 
 
298	sub		%g2, %g3, %g2
2991:	add		%g1, %g2, %o0	/* ARG0: virtual address */
300	mov		0, %o1		/* ARG1: mmu context */
301	mov		HV_MMU_ALL, %o2	/* ARG2: flags */
302	ta		HV_MMU_UNMAP_ADDR_TRAP
303	brnz,pn		%o0, __hypervisor_tlb_tl0_error
304	 mov		HV_MMU_UNMAP_ADDR_TRAP, %o1
305	brnz,pt		%g2, 1b
306	 sub		%g2, %g3, %g2
3072:	retl
308	 nop
 
 
 
 
 
 
 
 
 
 
 
 
 
309
310#ifdef DCACHE_ALIASING_POSSIBLE
311	/* XXX Niagara and friends have an 8K cache, so no aliasing is
312	 * XXX possible, but nothing explicit in the Hypervisor API
313	 * XXX guarantees this.
314	 */
315__hypervisor_flush_dcache_page:	/* 2 insns */
316	retl
317	 nop
318#endif
319
320tlb_patch_one:
3211:	lduw		[%o1], %g1
322	stw		%g1, [%o0]
323	flush		%o0
324	subcc		%o2, 1, %o2
325	add		%o1, 4, %o1
326	bne,pt		%icc, 1b
327	 add		%o0, 4, %o0
328	retl
329	 nop
330
331	.globl		cheetah_patch_cachetlbops
332cheetah_patch_cachetlbops:
333	save		%sp, -128, %sp
334
335	sethi		%hi(__flush_tlb_mm), %o0
336	or		%o0, %lo(__flush_tlb_mm), %o0
337	sethi		%hi(__cheetah_flush_tlb_mm), %o1
338	or		%o1, %lo(__cheetah_flush_tlb_mm), %o1
339	call		tlb_patch_one
340	 mov		19, %o2
341
342	sethi		%hi(__flush_tlb_pending), %o0
343	or		%o0, %lo(__flush_tlb_pending), %o0
344	sethi		%hi(__cheetah_flush_tlb_pending), %o1
345	or		%o1, %lo(__cheetah_flush_tlb_pending), %o1
346	call		tlb_patch_one
347	 mov		27, %o2
348
349#ifdef DCACHE_ALIASING_POSSIBLE
350	sethi		%hi(__flush_dcache_page), %o0
351	or		%o0, %lo(__flush_dcache_page), %o0
352	sethi		%hi(__cheetah_flush_dcache_page), %o1
353	or		%o1, %lo(__cheetah_flush_dcache_page), %o1
354	call		tlb_patch_one
355	 mov		11, %o2
356#endif /* DCACHE_ALIASING_POSSIBLE */
357
358	ret
359	 restore
360
361#ifdef CONFIG_SMP
362	/* These are all called by the slaves of a cross call, at
363	 * trap level 1, with interrupts fully disabled.
364	 *
365	 * Register usage:
366	 *   %g5	mm->context	(all tlb flushes)
367	 *   %g1	address arg 1	(tlb page and range flushes)
368	 *   %g7	address arg 2	(tlb range flush only)
369	 *
370	 *   %g6	scratch 1
371	 *   %g2	scratch 2
372	 *   %g3	scratch 3
373	 *   %g4	scratch 4
374	 */
375	.align		32
376	.globl		xcall_flush_tlb_mm
377xcall_flush_tlb_mm:	/* 21 insns */
378	mov		PRIMARY_CONTEXT, %g2
379	ldxa		[%g2] ASI_DMMU, %g3
380	srlx		%g3, CTX_PGSZ1_NUC_SHIFT, %g4
381	sllx		%g4, CTX_PGSZ1_NUC_SHIFT, %g4
382	or		%g5, %g4, %g5	/* Preserve nucleus page size fields */
383	stxa		%g5, [%g2] ASI_DMMU
384	mov		0x40, %g4
385	stxa		%g0, [%g4] ASI_DMMU_DEMAP
386	stxa		%g0, [%g4] ASI_IMMU_DEMAP
387	stxa		%g3, [%g2] ASI_DMMU
388	retry
389	nop
390	nop
391	nop
392	nop
393	nop
394	nop
395	nop
396	nop
397	nop
398	nop
 
 
 
399
400	.globl		xcall_flush_tlb_pending
401xcall_flush_tlb_pending:	/* 21 insns */
402	/* %g5=context, %g1=nr, %g7=vaddrs[] */
403	sllx		%g1, 3, %g1
404	mov		PRIMARY_CONTEXT, %g4
405	ldxa		[%g4] ASI_DMMU, %g2
406	srlx		%g2, CTX_PGSZ1_NUC_SHIFT, %g4
407	sllx		%g4, CTX_PGSZ1_NUC_SHIFT, %g4
408	or		%g5, %g4, %g5
409	mov		PRIMARY_CONTEXT, %g4
410	stxa		%g5, [%g4] ASI_DMMU
4111:	sub		%g1, (1 << 3), %g1
412	ldx		[%g7 + %g1], %g5
413	andcc		%g5, 0x1, %g0
414	be,pn		%icc, 2f
415
416	 andn		%g5, 0x1, %g5
417	stxa		%g0, [%g5] ASI_IMMU_DEMAP
4182:	stxa		%g0, [%g5] ASI_DMMU_DEMAP
419	membar		#Sync
420	brnz,pt		%g1, 1b
421	 nop
422	stxa		%g2, [%g4] ASI_DMMU
423	retry
424	nop
 
 
 
 
425
426	.globl		xcall_flush_tlb_kernel_range
427xcall_flush_tlb_kernel_range:	/* 25 insns */
428	sethi		%hi(PAGE_SIZE - 1), %g2
429	or		%g2, %lo(PAGE_SIZE - 1), %g2
430	andn		%g1, %g2, %g1
431	andn		%g7, %g2, %g7
432	sub		%g7, %g1, %g3
433	add		%g2, 1, %g2
 
 
434	sub		%g3, %g2, %g3
435	or		%g1, 0x20, %g1		! Nucleus
4361:	stxa		%g0, [%g1 + %g3] ASI_DMMU_DEMAP
437	stxa		%g0, [%g1 + %g3] ASI_IMMU_DEMAP
438	membar		#Sync
439	brnz,pt		%g3, 1b
440	 sub		%g3, %g2, %g3
441	retry
442	nop
443	nop
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
444	nop
445	nop
446	nop
447	nop
448	nop
449	nop
450	nop
451	nop
452	nop
453
454	/* This runs in a very controlled environment, so we do
455	 * not need to worry about BH races etc.
456	 */
457	.globl		xcall_sync_tick
458xcall_sync_tick:
459
460661:	rdpr		%pstate, %g2
461	wrpr		%g2, PSTATE_IG | PSTATE_AG, %pstate
462	.section	.sun4v_2insn_patch, "ax"
463	.word		661b
464	nop
465	nop
466	.previous
467
468	rdpr		%pil, %g2
469	wrpr		%g0, PIL_NORMAL_MAX, %pil
470	sethi		%hi(109f), %g7
471	b,pt		%xcc, etrap_irq
472109:	 or		%g7, %lo(109b), %g7
473#ifdef CONFIG_TRACE_IRQFLAGS
474	call		trace_hardirqs_off
475	 nop
476#endif
477	call		smp_synchronize_tick_client
478	 nop
479	b		rtrap_xcall
480	 ldx		[%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
481
482	.globl		xcall_fetch_glob_regs
483xcall_fetch_glob_regs:
484	sethi		%hi(global_reg_snapshot), %g1
485	or		%g1, %lo(global_reg_snapshot), %g1
486	__GET_CPUID(%g2)
487	sllx		%g2, 6, %g3
488	add		%g1, %g3, %g1
489	rdpr		%tstate, %g7
490	stx		%g7, [%g1 + GR_SNAP_TSTATE]
491	rdpr		%tpc, %g7
492	stx		%g7, [%g1 + GR_SNAP_TPC]
493	rdpr		%tnpc, %g7
494	stx		%g7, [%g1 + GR_SNAP_TNPC]
495	stx		%o7, [%g1 + GR_SNAP_O7]
496	stx		%i7, [%g1 + GR_SNAP_I7]
497	/* Don't try this at home kids... */
498	rdpr		%cwp, %g2
499	sub		%g2, 1, %g7
500	wrpr		%g7, %cwp
501	mov		%i7, %g7
502	wrpr		%g2, %cwp
503	stx		%g7, [%g1 + GR_SNAP_RPC]
504	sethi		%hi(trap_block), %g7
505	or		%g7, %lo(trap_block), %g7
506	sllx		%g2, TRAP_BLOCK_SZ_SHIFT, %g2
507	add		%g7, %g2, %g7
508	ldx		[%g7 + TRAP_PER_CPU_THREAD], %g3
509	stx		%g3, [%g1 + GR_SNAP_THREAD]
510	retry
511
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
512#ifdef DCACHE_ALIASING_POSSIBLE
513	.align		32
514	.globl		xcall_flush_dcache_page_cheetah
515xcall_flush_dcache_page_cheetah: /* %g1 == physical page address */
516	sethi		%hi(PAGE_SIZE), %g3
5171:	subcc		%g3, (1 << 5), %g3
518	stxa		%g0, [%g1 + %g3] ASI_DCACHE_INVALIDATE
519	membar		#Sync
520	bne,pt		%icc, 1b
521	 nop
522	retry
523	nop
524#endif /* DCACHE_ALIASING_POSSIBLE */
525
526	.globl		xcall_flush_dcache_page_spitfire
527xcall_flush_dcache_page_spitfire: /* %g1 == physical page address
528				     %g7 == kernel page virtual address
529				     %g5 == (page->mapping != NULL)  */
530#ifdef DCACHE_ALIASING_POSSIBLE
531	srlx		%g1, (13 - 2), %g1	! Form tag comparitor
532	sethi		%hi(L1DCACHE_SIZE), %g3	! D$ size == 16K
533	sub		%g3, (1 << 5), %g3	! D$ linesize == 32
5341:	ldxa		[%g3] ASI_DCACHE_TAG, %g2
535	andcc		%g2, 0x3, %g0
536	be,pn		%xcc, 2f
537	 andn		%g2, 0x3, %g2
538	cmp		%g2, %g1
539
540	bne,pt		%xcc, 2f
541	 nop
542	stxa		%g0, [%g3] ASI_DCACHE_TAG
543	membar		#Sync
5442:	cmp		%g3, 0
545	bne,pt		%xcc, 1b
546	 sub		%g3, (1 << 5), %g3
547
548	brz,pn		%g5, 2f
549#endif /* DCACHE_ALIASING_POSSIBLE */
550	 sethi		%hi(PAGE_SIZE), %g3
551
5521:	flush		%g7
553	subcc		%g3, (1 << 5), %g3
554	bne,pt		%icc, 1b
555	 add		%g7, (1 << 5), %g7
556
5572:	retry
558	nop
559	nop
560
561	/* %g5:	error
562	 * %g6:	tlb op
563	 */
564__hypervisor_tlb_xcall_error:
565	mov	%g5, %g4
566	mov	%g6, %g5
567	ba,pt	%xcc, etrap
568	 rd	%pc, %g7
569	mov	%l4, %o0
570	call	hypervisor_tlbop_error_xcall
571	 mov	%l5, %o1
572	ba,a,pt	%xcc, rtrap
573
574	.globl		__hypervisor_xcall_flush_tlb_mm
575__hypervisor_xcall_flush_tlb_mm: /* 21 insns */
576	/* %g5=ctx, g1,g2,g3,g4,g7=scratch, %g6=unusable */
577	mov		%o0, %g2
578	mov		%o1, %g3
579	mov		%o2, %g4
580	mov		%o3, %g1
581	mov		%o5, %g7
582	clr		%o0		/* ARG0: CPU lists unimplemented */
583	clr		%o1		/* ARG1: CPU lists unimplemented */
584	mov		%g5, %o2	/* ARG2: mmu context */
585	mov		HV_MMU_ALL, %o3	/* ARG3: flags */
586	mov		HV_FAST_MMU_DEMAP_CTX, %o5
587	ta		HV_FAST_TRAP
588	mov		HV_FAST_MMU_DEMAP_CTX, %g6
589	brnz,pn		%o0, __hypervisor_tlb_xcall_error
590	 mov		%o0, %g5
591	mov		%g2, %o0
592	mov		%g3, %o1
593	mov		%g4, %o2
594	mov		%g1, %o3
595	mov		%g7, %o5
596	membar		#Sync
597	retry
 
 
 
598
599	.globl		__hypervisor_xcall_flush_tlb_pending
600__hypervisor_xcall_flush_tlb_pending: /* 21 insns */
601	/* %g5=ctx, %g1=nr, %g7=vaddrs[], %g2,%g3,%g4,g6=scratch */
602	sllx		%g1, 3, %g1
603	mov		%o0, %g2
604	mov		%o1, %g3
605	mov		%o2, %g4
6061:	sub		%g1, (1 << 3), %g1
607	ldx		[%g7 + %g1], %o0	/* ARG0: virtual address */
608	mov		%g5, %o1		/* ARG1: mmu context */
609	mov		HV_MMU_ALL, %o2		/* ARG2: flags */
610	srlx		%o0, PAGE_SHIFT, %o0
611	sllx		%o0, PAGE_SHIFT, %o0
612	ta		HV_MMU_UNMAP_ADDR_TRAP
613	mov		HV_MMU_UNMAP_ADDR_TRAP, %g6
614	brnz,a,pn	%o0, __hypervisor_tlb_xcall_error
615	 mov		%o0, %g5
616	brnz,pt		%g1, 1b
617	 nop
618	mov		%g2, %o0
619	mov		%g3, %o1
620	mov		%g4, %o2
621	membar		#Sync
622	retry
 
 
 
623
624	.globl		__hypervisor_xcall_flush_tlb_kernel_range
625__hypervisor_xcall_flush_tlb_kernel_range: /* 25 insns */
626	/* %g1=start, %g7=end, g2,g3,g4,g5,g6=scratch */
627	sethi		%hi(PAGE_SIZE - 1), %g2
628	or		%g2, %lo(PAGE_SIZE - 1), %g2
629	andn		%g1, %g2, %g1
630	andn		%g7, %g2, %g7
631	sub		%g7, %g1, %g3
 
632	add		%g2, 1, %g2
633	sub		%g3, %g2, %g3
634	mov		%o0, %g2
635	mov		%o1, %g4
636	mov		%o2, %g7
 
6371:	add		%g1, %g3, %o0	/* ARG0: virtual address */
638	mov		0, %o1		/* ARG1: mmu context */
639	mov		HV_MMU_ALL, %o2	/* ARG2: flags */
640	ta		HV_MMU_UNMAP_ADDR_TRAP
641	mov		HV_MMU_UNMAP_ADDR_TRAP, %g6
642	brnz,pn		%o0, __hypervisor_tlb_xcall_error
643	 mov		%o0, %g5
644	sethi		%hi(PAGE_SIZE), %o2
645	brnz,pt		%g3, 1b
646	 sub		%g3, %o2, %g3
647	mov		%g2, %o0
648	mov		%g4, %o1
649	mov		%g7, %o2
650	membar		#Sync
651	retry
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
652
653	/* These just get rescheduled to PIL vectors. */
654	.globl		xcall_call_function
655xcall_call_function:
656	wr		%g0, (1 << PIL_SMP_CALL_FUNC), %set_softint
657	retry
658
659	.globl		xcall_call_function_single
660xcall_call_function_single:
661	wr		%g0, (1 << PIL_SMP_CALL_FUNC_SNGL), %set_softint
662	retry
663
664	.globl		xcall_receive_signal
665xcall_receive_signal:
666	wr		%g0, (1 << PIL_SMP_RECEIVE_SIGNAL), %set_softint
667	retry
668
669	.globl		xcall_capture
670xcall_capture:
671	wr		%g0, (1 << PIL_SMP_CAPTURE), %set_softint
672	retry
673
674	.globl		xcall_new_mmu_context_version
675xcall_new_mmu_context_version:
676	wr		%g0, (1 << PIL_SMP_CTX_NEW_VERSION), %set_softint
677	retry
678
679#ifdef CONFIG_KGDB
680	.globl		xcall_kgdb_capture
681xcall_kgdb_capture:
682	wr		%g0, (1 << PIL_KGDB_CAPTURE), %set_softint
683	retry
684#endif
685
686#endif /* CONFIG_SMP */
687
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
688
689	.globl		hypervisor_patch_cachetlbops
690hypervisor_patch_cachetlbops:
691	save		%sp, -128, %sp
692
693	sethi		%hi(__flush_tlb_mm), %o0
694	or		%o0, %lo(__flush_tlb_mm), %o0
695	sethi		%hi(__hypervisor_flush_tlb_mm), %o1
696	or		%o1, %lo(__hypervisor_flush_tlb_mm), %o1
697	call		tlb_patch_one
698	 mov		10, %o2
 
 
 
 
 
 
 
699
700	sethi		%hi(__flush_tlb_pending), %o0
701	or		%o0, %lo(__flush_tlb_pending), %o0
702	sethi		%hi(__hypervisor_flush_tlb_pending), %o1
703	or		%o1, %lo(__hypervisor_flush_tlb_pending), %o1
704	call		tlb_patch_one
705	 mov		16, %o2
706
707	sethi		%hi(__flush_tlb_kernel_range), %o0
708	or		%o0, %lo(__flush_tlb_kernel_range), %o0
709	sethi		%hi(__hypervisor_flush_tlb_kernel_range), %o1
710	or		%o1, %lo(__hypervisor_flush_tlb_kernel_range), %o1
711	call		tlb_patch_one
712	 mov		16, %o2
713
714#ifdef DCACHE_ALIASING_POSSIBLE
715	sethi		%hi(__flush_dcache_page), %o0
716	or		%o0, %lo(__flush_dcache_page), %o0
717	sethi		%hi(__hypervisor_flush_dcache_page), %o1
718	or		%o1, %lo(__hypervisor_flush_dcache_page), %o1
719	call		tlb_patch_one
720	 mov		2, %o2
721#endif /* DCACHE_ALIASING_POSSIBLE */
722
723#ifdef CONFIG_SMP
724	sethi		%hi(xcall_flush_tlb_mm), %o0
725	or		%o0, %lo(xcall_flush_tlb_mm), %o0
726	sethi		%hi(__hypervisor_xcall_flush_tlb_mm), %o1
727	or		%o1, %lo(__hypervisor_xcall_flush_tlb_mm), %o1
728	call		tlb_patch_one
729	 mov		21, %o2
730
731	sethi		%hi(xcall_flush_tlb_pending), %o0
732	or		%o0, %lo(xcall_flush_tlb_pending), %o0
733	sethi		%hi(__hypervisor_xcall_flush_tlb_pending), %o1
734	or		%o1, %lo(__hypervisor_xcall_flush_tlb_pending), %o1
735	call		tlb_patch_one
736	 mov		21, %o2
737
738	sethi		%hi(xcall_flush_tlb_kernel_range), %o0
739	or		%o0, %lo(xcall_flush_tlb_kernel_range), %o0
740	sethi		%hi(__hypervisor_xcall_flush_tlb_kernel_range), %o1
741	or		%o1, %lo(__hypervisor_xcall_flush_tlb_kernel_range), %o1
742	call		tlb_patch_one
743	 mov		25, %o2
744#endif /* CONFIG_SMP */
745
746	ret
747	 restore
v4.10.11
   1/*
   2 * ultra.S: Don't expand these all over the place...
   3 *
   4 * Copyright (C) 1997, 2000, 2008 David S. Miller (davem@davemloft.net)
   5 */
   6
   7#include <asm/asi.h>
   8#include <asm/pgtable.h>
   9#include <asm/page.h>
  10#include <asm/spitfire.h>
  11#include <asm/mmu_context.h>
  12#include <asm/mmu.h>
  13#include <asm/pil.h>
  14#include <asm/head.h>
  15#include <asm/thread_info.h>
  16#include <asm/cacheflush.h>
  17#include <asm/hypervisor.h>
  18#include <asm/cpudata.h>
  19
  20	/* Basically, most of the Spitfire vs. Cheetah madness
  21	 * has to do with the fact that Cheetah does not support
  22	 * IMMU flushes out of the secondary context.  Someone needs
  23	 * to throw a south lake birthday party for the folks
  24	 * in Microelectronics who refused to fix this shit.
  25	 */
  26
  27	/* This file is meant to be read efficiently by the CPU, not humans.
  28	 * Staraj sie tego nikomu nie pierdolnac...
  29	 */
  30	.text
  31	.align		32
  32	.globl		__flush_tlb_mm
  33__flush_tlb_mm:		/* 19 insns */
  34	/* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */
  35	ldxa		[%o1] ASI_DMMU, %g2
  36	cmp		%g2, %o0
  37	bne,pn		%icc, __spitfire_flush_tlb_mm_slow
  38	 mov		0x50, %g3
  39	stxa		%g0, [%g3] ASI_DMMU_DEMAP
  40	stxa		%g0, [%g3] ASI_IMMU_DEMAP
  41	sethi		%hi(KERNBASE), %g3
  42	flush		%g3
  43	retl
  44	 nop
  45	nop
  46	nop
  47	nop
  48	nop
  49	nop
  50	nop
  51	nop
  52	nop
  53	nop
  54
  55	.align		32
  56	.globl		__flush_tlb_page
  57__flush_tlb_page:	/* 22 insns */
  58	/* %o0 = context, %o1 = vaddr */
  59	rdpr		%pstate, %g7
  60	andn		%g7, PSTATE_IE, %g2
  61	wrpr		%g2, %pstate
  62	mov		SECONDARY_CONTEXT, %o4
  63	ldxa		[%o4] ASI_DMMU, %g2
  64	stxa		%o0, [%o4] ASI_DMMU
  65	andcc		%o1, 1, %g0
  66	andn		%o1, 1, %o3
  67	be,pn		%icc, 1f
  68	 or		%o3, 0x10, %o3
  69	stxa		%g0, [%o3] ASI_IMMU_DEMAP
  701:	stxa		%g0, [%o3] ASI_DMMU_DEMAP
  71	membar		#Sync
  72	stxa		%g2, [%o4] ASI_DMMU
  73	sethi		%hi(KERNBASE), %o4
  74	flush		%o4
  75	retl
  76	 wrpr		%g7, 0x0, %pstate
  77	nop
  78	nop
  79	nop
  80	nop
  81
  82	.align		32
  83	.globl		__flush_tlb_pending
  84__flush_tlb_pending:	/* 27 insns */
  85	/* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
  86	rdpr		%pstate, %g7
  87	sllx		%o1, 3, %o1
  88	andn		%g7, PSTATE_IE, %g2
  89	wrpr		%g2, %pstate
  90	mov		SECONDARY_CONTEXT, %o4
  91	ldxa		[%o4] ASI_DMMU, %g2
  92	stxa		%o0, [%o4] ASI_DMMU
  931:	sub		%o1, (1 << 3), %o1
  94	ldx		[%o2 + %o1], %o3
  95	andcc		%o3, 1, %g0
  96	andn		%o3, 1, %o3
  97	be,pn		%icc, 2f
  98	 or		%o3, 0x10, %o3
  99	stxa		%g0, [%o3] ASI_IMMU_DEMAP
 1002:	stxa		%g0, [%o3] ASI_DMMU_DEMAP
 101	membar		#Sync
 102	brnz,pt		%o1, 1b
 103	 nop
 104	stxa		%g2, [%o4] ASI_DMMU
 105	sethi		%hi(KERNBASE), %o4
 106	flush		%o4
 107	retl
 108	 wrpr		%g7, 0x0, %pstate
 109	nop
 110	nop
 111	nop
 112	nop
 113
 114	.align		32
 115	.globl		__flush_tlb_kernel_range
 116__flush_tlb_kernel_range:	/* 31 insns */
 117	/* %o0=start, %o1=end */
 118	cmp		%o0, %o1
 119	be,pn		%xcc, 2f
 120	 sub		%o1, %o0, %o3
 121	srlx		%o3, 18, %o4
 122	brnz,pn		%o4, __spitfire_flush_tlb_kernel_range_slow
 123	 sethi		%hi(PAGE_SIZE), %o4
 
 124	sub		%o3, %o4, %o3
 125	or		%o0, 0x20, %o0		! Nucleus
 1261:	stxa		%g0, [%o0 + %o3] ASI_DMMU_DEMAP
 127	stxa		%g0, [%o0 + %o3] ASI_IMMU_DEMAP
 128	membar		#Sync
 129	brnz,pt		%o3, 1b
 130	 sub		%o3, %o4, %o3
 1312:	sethi		%hi(KERNBASE), %o3
 132	flush		%o3
 133	retl
 134	 nop
 135	nop
 136	nop
 137	nop
 138	nop
 139	nop
 140	nop
 141	nop
 142	nop
 143	nop
 144	nop
 145	nop
 146	nop
 147	nop
 148	nop
 149
 150__spitfire_flush_tlb_kernel_range_slow:
 151	mov		63 * 8, %o4
 1521:	ldxa		[%o4] ASI_ITLB_DATA_ACCESS, %o3
 153	andcc		%o3, 0x40, %g0			/* _PAGE_L_4U */
 154	bne,pn		%xcc, 2f
 155	 mov		TLB_TAG_ACCESS, %o3
 156	stxa		%g0, [%o3] ASI_IMMU
 157	stxa		%g0, [%o4] ASI_ITLB_DATA_ACCESS
 158	membar		#Sync
 1592:	ldxa		[%o4] ASI_DTLB_DATA_ACCESS, %o3
 160	andcc		%o3, 0x40, %g0
 161	bne,pn		%xcc, 2f
 162	 mov		TLB_TAG_ACCESS, %o3
 163	stxa		%g0, [%o3] ASI_DMMU
 164	stxa		%g0, [%o4] ASI_DTLB_DATA_ACCESS
 165	membar		#Sync
 1662:	sub		%o4, 8, %o4
 167	brgez,pt	%o4, 1b
 168	 nop
 169	retl
 170	 nop
 171
 172__spitfire_flush_tlb_mm_slow:
 173	rdpr		%pstate, %g1
 174	wrpr		%g1, PSTATE_IE, %pstate
 175	stxa		%o0, [%o1] ASI_DMMU
 176	stxa		%g0, [%g3] ASI_DMMU_DEMAP
 177	stxa		%g0, [%g3] ASI_IMMU_DEMAP
 178	flush		%g6
 179	stxa		%g2, [%o1] ASI_DMMU
 180	sethi		%hi(KERNBASE), %o1
 181	flush		%o1
 182	retl
 183	 wrpr		%g1, 0, %pstate
 184
 185/*
 186 * The following code flushes one page_size worth.
 187 */
 188	.section .kprobes.text, "ax"
 189	.align		32
 190	.globl		__flush_icache_page
 191__flush_icache_page:	/* %o0 = phys_page */
 192	srlx		%o0, PAGE_SHIFT, %o0
 193	sethi		%hi(PAGE_OFFSET), %g1
 194	sllx		%o0, PAGE_SHIFT, %o0
 195	sethi		%hi(PAGE_SIZE), %g2
 196	ldx		[%g1 + %lo(PAGE_OFFSET)], %g1
 197	add		%o0, %g1, %o0
 1981:	subcc		%g2, 32, %g2
 199	bne,pt		%icc, 1b
 200	 flush		%o0 + %g2
 201	retl
 202	 nop
 203
 204#ifdef DCACHE_ALIASING_POSSIBLE
 205
 206#if (PAGE_SHIFT != 13)
 207#error only page shift of 13 is supported by dcache flush
 208#endif
 209
 210#define DTAG_MASK 0x3
 211
 212	/* This routine is Spitfire specific so the hardcoded
 213	 * D-cache size and line-size are OK.
 214	 */
 215	.align		64
 216	.globl		__flush_dcache_page
 217__flush_dcache_page:	/* %o0=kaddr, %o1=flush_icache */
 218	sethi		%hi(PAGE_OFFSET), %g1
 219	ldx		[%g1 + %lo(PAGE_OFFSET)], %g1
 220	sub		%o0, %g1, %o0			! physical address
 221	srlx		%o0, 11, %o0			! make D-cache TAG
 222	sethi		%hi(1 << 14), %o2		! D-cache size
 223	sub		%o2, (1 << 5), %o2		! D-cache line size
 2241:	ldxa		[%o2] ASI_DCACHE_TAG, %o3	! load D-cache TAG
 225	andcc		%o3, DTAG_MASK, %g0		! Valid?
 226	be,pn		%xcc, 2f			! Nope, branch
 227	 andn		%o3, DTAG_MASK, %o3		! Clear valid bits
 228	cmp		%o3, %o0			! TAG match?
 229	bne,pt		%xcc, 2f			! Nope, branch
 230	 nop
 231	stxa		%g0, [%o2] ASI_DCACHE_TAG	! Invalidate TAG
 232	membar		#Sync
 2332:	brnz,pt		%o2, 1b
 234	 sub		%o2, (1 << 5), %o2		! D-cache line size
 235
 236	/* The I-cache does not snoop local stores so we
 237	 * better flush that too when necessary.
 238	 */
 239	brnz,pt		%o1, __flush_icache_page
 240	 sllx		%o0, 11, %o0
 241	retl
 242	 nop
 243
 244#endif /* DCACHE_ALIASING_POSSIBLE */
 245
 246	.previous
 247
 248	/* Cheetah specific versions, patched at boot time. */
 249__cheetah_flush_tlb_mm: /* 19 insns */
 250	rdpr		%pstate, %g7
 251	andn		%g7, PSTATE_IE, %g2
 252	wrpr		%g2, 0x0, %pstate
 253	wrpr		%g0, 1, %tl
 254	mov		PRIMARY_CONTEXT, %o2
 255	mov		0x40, %g3
 256	ldxa		[%o2] ASI_DMMU, %g2
 257	srlx		%g2, CTX_PGSZ1_NUC_SHIFT, %o1
 258	sllx		%o1, CTX_PGSZ1_NUC_SHIFT, %o1
 259	or		%o0, %o1, %o0	/* Preserve nucleus page size fields */
 260	stxa		%o0, [%o2] ASI_DMMU
 261	stxa		%g0, [%g3] ASI_DMMU_DEMAP
 262	stxa		%g0, [%g3] ASI_IMMU_DEMAP
 263	stxa		%g2, [%o2] ASI_DMMU
 264	sethi		%hi(KERNBASE), %o2
 265	flush		%o2
 266	wrpr		%g0, 0, %tl
 267	retl
 268	 wrpr		%g7, 0x0, %pstate
 269
 270__cheetah_flush_tlb_page:	/* 22 insns */
 271	/* %o0 = context, %o1 = vaddr */
 272	rdpr		%pstate, %g7
 273	andn		%g7, PSTATE_IE, %g2
 274	wrpr		%g2, 0x0, %pstate
 275	wrpr		%g0, 1, %tl
 276	mov		PRIMARY_CONTEXT, %o4
 277	ldxa		[%o4] ASI_DMMU, %g2
 278	srlx		%g2, CTX_PGSZ1_NUC_SHIFT, %o3
 279	sllx		%o3, CTX_PGSZ1_NUC_SHIFT, %o3
 280	or		%o0, %o3, %o0	/* Preserve nucleus page size fields */
 281	stxa		%o0, [%o4] ASI_DMMU
 282	andcc		%o1, 1, %g0
 283	be,pn		%icc, 1f
 284	 andn		%o1, 1, %o3
 285	stxa		%g0, [%o3] ASI_IMMU_DEMAP
 2861:	stxa		%g0, [%o3] ASI_DMMU_DEMAP	
 287	membar		#Sync
 288	stxa		%g2, [%o4] ASI_DMMU
 289	sethi		%hi(KERNBASE), %o4
 290	flush		%o4
 291	wrpr		%g0, 0, %tl
 292	retl
 293	 wrpr		%g7, 0x0, %pstate
 294
 295__cheetah_flush_tlb_pending:	/* 27 insns */
 296	/* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
 297	rdpr		%pstate, %g7
 298	sllx		%o1, 3, %o1
 299	andn		%g7, PSTATE_IE, %g2
 300	wrpr		%g2, 0x0, %pstate
 301	wrpr		%g0, 1, %tl
 302	mov		PRIMARY_CONTEXT, %o4
 303	ldxa		[%o4] ASI_DMMU, %g2
 304	srlx		%g2, CTX_PGSZ1_NUC_SHIFT, %o3
 305	sllx		%o3, CTX_PGSZ1_NUC_SHIFT, %o3
 306	or		%o0, %o3, %o0	/* Preserve nucleus page size fields */
 307	stxa		%o0, [%o4] ASI_DMMU
 3081:	sub		%o1, (1 << 3), %o1
 309	ldx		[%o2 + %o1], %o3
 310	andcc		%o3, 1, %g0
 311	be,pn		%icc, 2f
 312	 andn		%o3, 1, %o3
 313	stxa		%g0, [%o3] ASI_IMMU_DEMAP
 3142:	stxa		%g0, [%o3] ASI_DMMU_DEMAP	
 315	membar		#Sync
 316	brnz,pt		%o1, 1b
 317	 nop
 318	stxa		%g2, [%o4] ASI_DMMU
 319	sethi		%hi(KERNBASE), %o4
 320	flush		%o4
 321	wrpr		%g0, 0, %tl
 322	retl
 323	 wrpr		%g7, 0x0, %pstate
 324
 325__cheetah_flush_tlb_kernel_range:	/* 31 insns */
 326	/* %o0=start, %o1=end */
 327	cmp		%o0, %o1
 328	be,pn		%xcc, 2f
 329	 sub		%o1, %o0, %o3
 330	srlx		%o3, 18, %o4
 331	brnz,pn		%o4, 3f
 332	 sethi		%hi(PAGE_SIZE), %o4
 333	sub		%o3, %o4, %o3
 334	or		%o0, 0x20, %o0		! Nucleus
 3351:	stxa		%g0, [%o0 + %o3] ASI_DMMU_DEMAP
 336	stxa		%g0, [%o0 + %o3] ASI_IMMU_DEMAP
 337	membar		#Sync
 338	brnz,pt		%o3, 1b
 339	 sub		%o3, %o4, %o3
 3402:	sethi		%hi(KERNBASE), %o3
 341	flush		%o3
 342	retl
 343	 nop
 3443:	mov		0x80, %o4
 345	stxa		%g0, [%o4] ASI_DMMU_DEMAP
 346	membar		#Sync
 347	stxa		%g0, [%o4] ASI_IMMU_DEMAP
 348	membar		#Sync
 349	retl
 350	 nop
 351	nop
 352	nop
 353	nop
 354	nop
 355	nop
 356	nop
 357	nop
 358
 359#ifdef DCACHE_ALIASING_POSSIBLE
 360__cheetah_flush_dcache_page: /* 11 insns */
 361	sethi		%hi(PAGE_OFFSET), %g1
 362	ldx		[%g1 + %lo(PAGE_OFFSET)], %g1
 363	sub		%o0, %g1, %o0
 364	sethi		%hi(PAGE_SIZE), %o4
 3651:	subcc		%o4, (1 << 5), %o4
 366	stxa		%g0, [%o0 + %o4] ASI_DCACHE_INVALIDATE
 367	membar		#Sync
 368	bne,pt		%icc, 1b
 369	 nop
 370	retl		/* I-cache flush never needed on Cheetah, see callers. */
 371	 nop
 372#endif /* DCACHE_ALIASING_POSSIBLE */
 373
 374	/* Hypervisor specific versions, patched at boot time.  */
 375__hypervisor_tlb_tl0_error:
 376	save		%sp, -192, %sp
 377	mov		%i0, %o0
 378	call		hypervisor_tlbop_error
 379	 mov		%i1, %o1
 380	ret
 381	 restore
 382
 383__hypervisor_flush_tlb_mm: /* 19 insns */
 384	mov		%o0, %o2	/* ARG2: mmu context */
 385	mov		0, %o0		/* ARG0: CPU lists unimplemented */
 386	mov		0, %o1		/* ARG1: CPU lists unimplemented */
 387	mov		HV_MMU_ALL, %o3	/* ARG3: flags */
 388	mov		HV_FAST_MMU_DEMAP_CTX, %o5
 389	ta		HV_FAST_TRAP
 390	brnz,pn		%o0, 1f
 391	 mov		HV_FAST_MMU_DEMAP_CTX, %o1
 392	retl
 393	 nop
 3941:	sethi		%hi(__hypervisor_tlb_tl0_error), %o5
 395	jmpl		%o5 + %lo(__hypervisor_tlb_tl0_error), %g0
 396	 nop
 397	nop
 398	nop
 399	nop
 400	nop
 401	nop
 402	nop
 403
 404__hypervisor_flush_tlb_page: /* 22 insns */
 405	/* %o0 = context, %o1 = vaddr */
 406	mov		%o0, %g2
 407	mov		%o1, %o0              /* ARG0: vaddr + IMMU-bit */
 408	mov		%g2, %o1	      /* ARG1: mmu context */
 409	mov		HV_MMU_ALL, %o2	      /* ARG2: flags */
 410	srlx		%o0, PAGE_SHIFT, %o0
 411	sllx		%o0, PAGE_SHIFT, %o0
 412	ta		HV_MMU_UNMAP_ADDR_TRAP
 413	brnz,pn		%o0, 1f
 414	 mov		HV_MMU_UNMAP_ADDR_TRAP, %o1
 415	retl
 416	 nop
 4171:	sethi		%hi(__hypervisor_tlb_tl0_error), %o2
 418	jmpl		%o2 + %lo(__hypervisor_tlb_tl0_error), %g0
 419	 nop
 420	nop
 421	nop
 422	nop
 423	nop
 424	nop
 425	nop
 426	nop
 427	nop
 428
 429__hypervisor_flush_tlb_pending: /* 27 insns */
 430	/* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
 431	sllx		%o1, 3, %g1
 432	mov		%o2, %g2
 433	mov		%o0, %g3
 4341:	sub		%g1, (1 << 3), %g1
 435	ldx		[%g2 + %g1], %o0      /* ARG0: vaddr + IMMU-bit */
 436	mov		%g3, %o1	      /* ARG1: mmu context */
 437	mov		HV_MMU_ALL, %o2	      /* ARG2: flags */
 438	srlx		%o0, PAGE_SHIFT, %o0
 439	sllx		%o0, PAGE_SHIFT, %o0
 440	ta		HV_MMU_UNMAP_ADDR_TRAP
 441	brnz,pn		%o0, 1f
 442	 mov		HV_MMU_UNMAP_ADDR_TRAP, %o1
 443	brnz,pt		%g1, 1b
 444	 nop
 445	retl
 446	 nop
 4471:	sethi		%hi(__hypervisor_tlb_tl0_error), %o2
 448	jmpl		%o2 + %lo(__hypervisor_tlb_tl0_error), %g0
 449	 nop
 450	nop
 451	nop
 452	nop
 453	nop
 454	nop
 455	nop
 456	nop
 457	nop
 458
 459__hypervisor_flush_tlb_kernel_range: /* 31 insns */
 460	/* %o0=start, %o1=end */
 461	cmp		%o0, %o1
 462	be,pn		%xcc, 2f
 463	 sub		%o1, %o0, %g2
 464	srlx		%g2, 18, %g3
 465	brnz,pn		%g3, 4f
 466	 mov		%o0, %g1
 467	sethi		%hi(PAGE_SIZE), %g3
 468	sub		%g2, %g3, %g2
 4691:	add		%g1, %g2, %o0	/* ARG0: virtual address */
 470	mov		0, %o1		/* ARG1: mmu context */
 471	mov		HV_MMU_ALL, %o2	/* ARG2: flags */
 472	ta		HV_MMU_UNMAP_ADDR_TRAP
 473	brnz,pn		%o0, 3f
 474	 mov		HV_MMU_UNMAP_ADDR_TRAP, %o1
 475	brnz,pt		%g2, 1b
 476	 sub		%g2, %g3, %g2
 4772:	retl
 478	 nop
 4793:	sethi		%hi(__hypervisor_tlb_tl0_error), %o2
 480	jmpl		%o2 + %lo(__hypervisor_tlb_tl0_error), %g0
 481	 nop
 4824:	mov		0, %o0		/* ARG0: CPU lists unimplemented */
 483	mov		0, %o1		/* ARG1: CPU lists unimplemented */
 484	mov		0, %o2		/* ARG2: mmu context == nucleus */
 485	mov		HV_MMU_ALL, %o3	/* ARG3: flags */
 486	mov		HV_FAST_MMU_DEMAP_CTX, %o5
 487	ta		HV_FAST_TRAP
 488	brnz,pn		%o0, 3b
 489	 mov		HV_FAST_MMU_DEMAP_CTX, %o1
 490	retl
 491	 nop
 492
 493#ifdef DCACHE_ALIASING_POSSIBLE
 494	/* XXX Niagara and friends have an 8K cache, so no aliasing is
 495	 * XXX possible, but nothing explicit in the Hypervisor API
 496	 * XXX guarantees this.
 497	 */
 498__hypervisor_flush_dcache_page:	/* 2 insns */
 499	retl
 500	 nop
 501#endif
 502
 503tlb_patch_one:
 5041:	lduw		[%o1], %g1
 505	stw		%g1, [%o0]
 506	flush		%o0
 507	subcc		%o2, 1, %o2
 508	add		%o1, 4, %o1
 509	bne,pt		%icc, 1b
 510	 add		%o0, 4, %o0
 511	retl
 512	 nop
 513
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 514#ifdef CONFIG_SMP
 515	/* These are all called by the slaves of a cross call, at
 516	 * trap level 1, with interrupts fully disabled.
 517	 *
 518	 * Register usage:
 519	 *   %g5	mm->context	(all tlb flushes)
 520	 *   %g1	address arg 1	(tlb page and range flushes)
 521	 *   %g7	address arg 2	(tlb range flush only)
 522	 *
 523	 *   %g6	scratch 1
 524	 *   %g2	scratch 2
 525	 *   %g3	scratch 3
 526	 *   %g4	scratch 4
 527	 */
 528	.align		32
 529	.globl		xcall_flush_tlb_mm
 530xcall_flush_tlb_mm:	/* 24 insns */
 531	mov		PRIMARY_CONTEXT, %g2
 532	ldxa		[%g2] ASI_DMMU, %g3
 533	srlx		%g3, CTX_PGSZ1_NUC_SHIFT, %g4
 534	sllx		%g4, CTX_PGSZ1_NUC_SHIFT, %g4
 535	or		%g5, %g4, %g5	/* Preserve nucleus page size fields */
 536	stxa		%g5, [%g2] ASI_DMMU
 537	mov		0x40, %g4
 538	stxa		%g0, [%g4] ASI_DMMU_DEMAP
 539	stxa		%g0, [%g4] ASI_IMMU_DEMAP
 540	stxa		%g3, [%g2] ASI_DMMU
 541	retry
 542	nop
 543	nop
 544	nop
 545	nop
 546	nop
 547	nop
 548	nop
 549	nop
 550	nop
 551	nop
 552	nop
 553	nop
 554	nop
 555
 556	.globl		xcall_flush_tlb_page
 557xcall_flush_tlb_page:	/* 20 insns */
 558	/* %g5=context, %g1=vaddr */
 
 559	mov		PRIMARY_CONTEXT, %g4
 560	ldxa		[%g4] ASI_DMMU, %g2
 561	srlx		%g2, CTX_PGSZ1_NUC_SHIFT, %g4
 562	sllx		%g4, CTX_PGSZ1_NUC_SHIFT, %g4
 563	or		%g5, %g4, %g5
 564	mov		PRIMARY_CONTEXT, %g4
 565	stxa		%g5, [%g4] ASI_DMMU
 566	andcc		%g1, 0x1, %g0
 
 
 567	be,pn		%icc, 2f
 568	 andn		%g1, 0x1, %g5
 
 569	stxa		%g0, [%g5] ASI_IMMU_DEMAP
 5702:	stxa		%g0, [%g5] ASI_DMMU_DEMAP
 571	membar		#Sync
 
 
 572	stxa		%g2, [%g4] ASI_DMMU
 573	retry
 574	nop
 575	nop
 576	nop
 577	nop
 578	nop
 579
 580	.globl		xcall_flush_tlb_kernel_range
 581xcall_flush_tlb_kernel_range:	/* 44 insns */
 582	sethi		%hi(PAGE_SIZE - 1), %g2
 583	or		%g2, %lo(PAGE_SIZE - 1), %g2
 584	andn		%g1, %g2, %g1
 585	andn		%g7, %g2, %g7
 586	sub		%g7, %g1, %g3
 587	srlx		%g3, 18, %g2
 588	brnz,pn		%g2, 2f
 589	 add		%g2, 1, %g2
 590	sub		%g3, %g2, %g3
 591	or		%g1, 0x20, %g1		! Nucleus
 5921:	stxa		%g0, [%g1 + %g3] ASI_DMMU_DEMAP
 593	stxa		%g0, [%g1 + %g3] ASI_IMMU_DEMAP
 594	membar		#Sync
 595	brnz,pt		%g3, 1b
 596	 sub		%g3, %g2, %g3
 597	retry
 5982:	mov		63 * 8, %g1
 5991:	ldxa		[%g1] ASI_ITLB_DATA_ACCESS, %g2
 600	andcc		%g2, 0x40, %g0			/* _PAGE_L_4U */
 601	bne,pn		%xcc, 2f
 602	 mov		TLB_TAG_ACCESS, %g2
 603	stxa		%g0, [%g2] ASI_IMMU
 604	stxa		%g0, [%g1] ASI_ITLB_DATA_ACCESS
 605	membar		#Sync
 6062:	ldxa		[%g1] ASI_DTLB_DATA_ACCESS, %g2
 607	andcc		%g2, 0x40, %g0
 608	bne,pn		%xcc, 2f
 609	 mov		TLB_TAG_ACCESS, %g2
 610	stxa		%g0, [%g2] ASI_DMMU
 611	stxa		%g0, [%g1] ASI_DTLB_DATA_ACCESS
 612	membar		#Sync
 6132:	sub		%g1, 8, %g1
 614	brgez,pt	%g1, 1b
 615	 nop
 616	retry
 617	nop
 618	nop
 619	nop
 620	nop
 621	nop
 622	nop
 623	nop
 624	nop
 625	nop
 626
 627	/* This runs in a very controlled environment, so we do
 628	 * not need to worry about BH races etc.
 629	 */
 630	.globl		xcall_sync_tick
 631xcall_sync_tick:
 632
 633661:	rdpr		%pstate, %g2
 634	wrpr		%g2, PSTATE_IG | PSTATE_AG, %pstate
 635	.section	.sun4v_2insn_patch, "ax"
 636	.word		661b
 637	nop
 638	nop
 639	.previous
 640
 641	rdpr		%pil, %g2
 642	wrpr		%g0, PIL_NORMAL_MAX, %pil
 643	sethi		%hi(109f), %g7
 644	b,pt		%xcc, etrap_irq
 645109:	 or		%g7, %lo(109b), %g7
 646#ifdef CONFIG_TRACE_IRQFLAGS
 647	call		trace_hardirqs_off
 648	 nop
 649#endif
 650	call		smp_synchronize_tick_client
 651	 nop
 652	b		rtrap_xcall
 653	 ldx		[%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
 654
 655	.globl		xcall_fetch_glob_regs
 656xcall_fetch_glob_regs:
 657	sethi		%hi(global_cpu_snapshot), %g1
 658	or		%g1, %lo(global_cpu_snapshot), %g1
 659	__GET_CPUID(%g2)
 660	sllx		%g2, 6, %g3
 661	add		%g1, %g3, %g1
 662	rdpr		%tstate, %g7
 663	stx		%g7, [%g1 + GR_SNAP_TSTATE]
 664	rdpr		%tpc, %g7
 665	stx		%g7, [%g1 + GR_SNAP_TPC]
 666	rdpr		%tnpc, %g7
 667	stx		%g7, [%g1 + GR_SNAP_TNPC]
 668	stx		%o7, [%g1 + GR_SNAP_O7]
 669	stx		%i7, [%g1 + GR_SNAP_I7]
 670	/* Don't try this at home kids... */
 671	rdpr		%cwp, %g3
 672	sub		%g3, 1, %g7
 673	wrpr		%g7, %cwp
 674	mov		%i7, %g7
 675	wrpr		%g3, %cwp
 676	stx		%g7, [%g1 + GR_SNAP_RPC]
 677	sethi		%hi(trap_block), %g7
 678	or		%g7, %lo(trap_block), %g7
 679	sllx		%g2, TRAP_BLOCK_SZ_SHIFT, %g2
 680	add		%g7, %g2, %g7
 681	ldx		[%g7 + TRAP_PER_CPU_THREAD], %g3
 682	stx		%g3, [%g1 + GR_SNAP_THREAD]
 683	retry
 684
 685	.globl		xcall_fetch_glob_pmu
 686xcall_fetch_glob_pmu:
 687	sethi		%hi(global_cpu_snapshot), %g1
 688	or		%g1, %lo(global_cpu_snapshot), %g1
 689	__GET_CPUID(%g2)
 690	sllx		%g2, 6, %g3
 691	add		%g1, %g3, %g1
 692	rd		%pic, %g7
 693	stx		%g7, [%g1 + (4 * 8)]
 694	rd		%pcr, %g7
 695	stx		%g7, [%g1 + (0 * 8)]
 696	retry
 697
 698	.globl		xcall_fetch_glob_pmu_n4
 699xcall_fetch_glob_pmu_n4:
 700	sethi		%hi(global_cpu_snapshot), %g1
 701	or		%g1, %lo(global_cpu_snapshot), %g1
 702	__GET_CPUID(%g2)
 703	sllx		%g2, 6, %g3
 704	add		%g1, %g3, %g1
 705
 706	ldxa		[%g0] ASI_PIC, %g7
 707	stx		%g7, [%g1 + (4 * 8)]
 708	mov		0x08, %g3
 709	ldxa		[%g3] ASI_PIC, %g7
 710	stx		%g7, [%g1 + (5 * 8)]
 711	mov		0x10, %g3
 712	ldxa		[%g3] ASI_PIC, %g7
 713	stx		%g7, [%g1 + (6 * 8)]
 714	mov		0x18, %g3
 715	ldxa		[%g3] ASI_PIC, %g7
 716	stx		%g7, [%g1 + (7 * 8)]
 717
 718	mov		%o0, %g2
 719	mov		%o1, %g3
 720	mov		%o5, %g7
 721
 722	mov		HV_FAST_VT_GET_PERFREG, %o5
 723	mov		3, %o0
 724	ta		HV_FAST_TRAP
 725	stx		%o1, [%g1 + (3 * 8)]
 726	mov		HV_FAST_VT_GET_PERFREG, %o5
 727	mov		2, %o0
 728	ta		HV_FAST_TRAP
 729	stx		%o1, [%g1 + (2 * 8)]
 730	mov		HV_FAST_VT_GET_PERFREG, %o5
 731	mov		1, %o0
 732	ta		HV_FAST_TRAP
 733	stx		%o1, [%g1 + (1 * 8)]
 734	mov		HV_FAST_VT_GET_PERFREG, %o5
 735	mov		0, %o0
 736	ta		HV_FAST_TRAP
 737	stx		%o1, [%g1 + (0 * 8)]
 738
 739	mov		%g2, %o0
 740	mov		%g3, %o1
 741	mov		%g7, %o5
 742
 743	retry
 744
 745__cheetah_xcall_flush_tlb_kernel_range:	/* 44 insns */
 746	sethi		%hi(PAGE_SIZE - 1), %g2
 747	or		%g2, %lo(PAGE_SIZE - 1), %g2
 748	andn		%g1, %g2, %g1
 749	andn		%g7, %g2, %g7
 750	sub		%g7, %g1, %g3
 751	srlx		%g3, 18, %g2
 752	brnz,pn		%g2, 2f
 753	 add		%g2, 1, %g2
 754	sub		%g3, %g2, %g3
 755	or		%g1, 0x20, %g1		! Nucleus
 7561:	stxa		%g0, [%g1 + %g3] ASI_DMMU_DEMAP
 757	stxa		%g0, [%g1 + %g3] ASI_IMMU_DEMAP
 758	membar		#Sync
 759	brnz,pt		%g3, 1b
 760	 sub		%g3, %g2, %g3
 761	retry
 7622:	mov		0x80, %g2
 763	stxa		%g0, [%g2] ASI_DMMU_DEMAP
 764	membar		#Sync
 765	stxa		%g0, [%g2] ASI_IMMU_DEMAP
 766	membar		#Sync
 767	retry
 768	nop
 769	nop
 770	nop
 771	nop
 772	nop
 773	nop
 774	nop
 775	nop
 776	nop
 777	nop
 778	nop
 779	nop
 780	nop
 781	nop
 782	nop
 783	nop
 784	nop
 785	nop
 786	nop
 787	nop
 788	nop
 789	nop
 790
 791#ifdef DCACHE_ALIASING_POSSIBLE
 792	.align		32
 793	.globl		xcall_flush_dcache_page_cheetah
 794xcall_flush_dcache_page_cheetah: /* %g1 == physical page address */
 795	sethi		%hi(PAGE_SIZE), %g3
 7961:	subcc		%g3, (1 << 5), %g3
 797	stxa		%g0, [%g1 + %g3] ASI_DCACHE_INVALIDATE
 798	membar		#Sync
 799	bne,pt		%icc, 1b
 800	 nop
 801	retry
 802	nop
 803#endif /* DCACHE_ALIASING_POSSIBLE */
 804
 805	.globl		xcall_flush_dcache_page_spitfire
 806xcall_flush_dcache_page_spitfire: /* %g1 == physical page address
 807				     %g7 == kernel page virtual address
 808				     %g5 == (page->mapping != NULL)  */
 809#ifdef DCACHE_ALIASING_POSSIBLE
 810	srlx		%g1, (13 - 2), %g1	! Form tag comparitor
 811	sethi		%hi(L1DCACHE_SIZE), %g3	! D$ size == 16K
 812	sub		%g3, (1 << 5), %g3	! D$ linesize == 32
 8131:	ldxa		[%g3] ASI_DCACHE_TAG, %g2
 814	andcc		%g2, 0x3, %g0
 815	be,pn		%xcc, 2f
 816	 andn		%g2, 0x3, %g2
 817	cmp		%g2, %g1
 818
 819	bne,pt		%xcc, 2f
 820	 nop
 821	stxa		%g0, [%g3] ASI_DCACHE_TAG
 822	membar		#Sync
 8232:	cmp		%g3, 0
 824	bne,pt		%xcc, 1b
 825	 sub		%g3, (1 << 5), %g3
 826
 827	brz,pn		%g5, 2f
 828#endif /* DCACHE_ALIASING_POSSIBLE */
 829	 sethi		%hi(PAGE_SIZE), %g3
 830
 8311:	flush		%g7
 832	subcc		%g3, (1 << 5), %g3
 833	bne,pt		%icc, 1b
 834	 add		%g7, (1 << 5), %g7
 835
 8362:	retry
 837	nop
 838	nop
 839
 840	/* %g5:	error
 841	 * %g6:	tlb op
 842	 */
 843__hypervisor_tlb_xcall_error:
 844	mov	%g5, %g4
 845	mov	%g6, %g5
 846	ba,pt	%xcc, etrap
 847	 rd	%pc, %g7
 848	mov	%l4, %o0
 849	call	hypervisor_tlbop_error_xcall
 850	 mov	%l5, %o1
 851	ba,a,pt	%xcc, rtrap
 852
 853	.globl		__hypervisor_xcall_flush_tlb_mm
 854__hypervisor_xcall_flush_tlb_mm: /* 24 insns */
 855	/* %g5=ctx, g1,g2,g3,g4,g7=scratch, %g6=unusable */
 856	mov		%o0, %g2
 857	mov		%o1, %g3
 858	mov		%o2, %g4
 859	mov		%o3, %g1
 860	mov		%o5, %g7
 861	clr		%o0		/* ARG0: CPU lists unimplemented */
 862	clr		%o1		/* ARG1: CPU lists unimplemented */
 863	mov		%g5, %o2	/* ARG2: mmu context */
 864	mov		HV_MMU_ALL, %o3	/* ARG3: flags */
 865	mov		HV_FAST_MMU_DEMAP_CTX, %o5
 866	ta		HV_FAST_TRAP
 867	mov		HV_FAST_MMU_DEMAP_CTX, %g6
 868	brnz,pn		%o0, 1f
 869	 mov		%o0, %g5
 870	mov		%g2, %o0
 871	mov		%g3, %o1
 872	mov		%g4, %o2
 873	mov		%g1, %o3
 874	mov		%g7, %o5
 875	membar		#Sync
 876	retry
 8771:	sethi		%hi(__hypervisor_tlb_xcall_error), %g4
 878	jmpl		%g4 + %lo(__hypervisor_tlb_xcall_error), %g0
 879	 nop
 880
 881	.globl		__hypervisor_xcall_flush_tlb_page
 882__hypervisor_xcall_flush_tlb_page: /* 20 insns */
 883	/* %g5=ctx, %g1=vaddr */
 
 884	mov		%o0, %g2
 885	mov		%o1, %g3
 886	mov		%o2, %g4
 887	mov		%g1, %o0	        /* ARG0: virtual address */
 
 888	mov		%g5, %o1		/* ARG1: mmu context */
 889	mov		HV_MMU_ALL, %o2		/* ARG2: flags */
 890	srlx		%o0, PAGE_SHIFT, %o0
 891	sllx		%o0, PAGE_SHIFT, %o0
 892	ta		HV_MMU_UNMAP_ADDR_TRAP
 893	mov		HV_MMU_UNMAP_ADDR_TRAP, %g6
 894	brnz,a,pn	%o0, 1f
 895	 mov		%o0, %g5
 
 
 896	mov		%g2, %o0
 897	mov		%g3, %o1
 898	mov		%g4, %o2
 899	membar		#Sync
 900	retry
 9011:	sethi		%hi(__hypervisor_tlb_xcall_error), %g4
 902	jmpl		%g4 + %lo(__hypervisor_tlb_xcall_error), %g0
 903	 nop
 904
 905	.globl		__hypervisor_xcall_flush_tlb_kernel_range
 906__hypervisor_xcall_flush_tlb_kernel_range: /* 44 insns */
 907	/* %g1=start, %g7=end, g2,g3,g4,g5,g6=scratch */
 908	sethi		%hi(PAGE_SIZE - 1), %g2
 909	or		%g2, %lo(PAGE_SIZE - 1), %g2
 910	andn		%g1, %g2, %g1
 911	andn		%g7, %g2, %g7
 912	sub		%g7, %g1, %g3
 913	srlx		%g3, 18, %g7
 914	add		%g2, 1, %g2
 915	sub		%g3, %g2, %g3
 916	mov		%o0, %g2
 917	mov		%o1, %g4
 918	brnz,pn		%g7, 2f
 919	 mov		%o2, %g7
 9201:	add		%g1, %g3, %o0	/* ARG0: virtual address */
 921	mov		0, %o1		/* ARG1: mmu context */
 922	mov		HV_MMU_ALL, %o2	/* ARG2: flags */
 923	ta		HV_MMU_UNMAP_ADDR_TRAP
 924	mov		HV_MMU_UNMAP_ADDR_TRAP, %g6
 925	brnz,pn		%o0, 1f
 926	 mov		%o0, %g5
 927	sethi		%hi(PAGE_SIZE), %o2
 928	brnz,pt		%g3, 1b
 929	 sub		%g3, %o2, %g3
 9305:	mov		%g2, %o0
 931	mov		%g4, %o1
 932	mov		%g7, %o2
 933	membar		#Sync
 934	retry
 9351:	sethi		%hi(__hypervisor_tlb_xcall_error), %g4
 936	jmpl		%g4 + %lo(__hypervisor_tlb_xcall_error), %g0
 937	 nop
 9382:	mov		%o3, %g1
 939	mov		%o5, %g3
 940	mov		0, %o0		/* ARG0: CPU lists unimplemented */
 941	mov		0, %o1		/* ARG1: CPU lists unimplemented */
 942	mov		0, %o2		/* ARG2: mmu context == nucleus */
 943	mov		HV_MMU_ALL, %o3	/* ARG3: flags */
 944	mov		HV_FAST_MMU_DEMAP_CTX, %o5
 945	ta		HV_FAST_TRAP
 946	mov		%g1, %o3
 947	brz,pt		%o0, 5b
 948	 mov		%g3, %o5
 949	mov		HV_FAST_MMU_DEMAP_CTX, %g6
 950	ba,pt		%xcc, 1b
 951	 clr		%g5
 952
 953	/* These just get rescheduled to PIL vectors. */
 954	.globl		xcall_call_function
 955xcall_call_function:
 956	wr		%g0, (1 << PIL_SMP_CALL_FUNC), %set_softint
 957	retry
 958
 959	.globl		xcall_call_function_single
 960xcall_call_function_single:
 961	wr		%g0, (1 << PIL_SMP_CALL_FUNC_SNGL), %set_softint
 962	retry
 963
 964	.globl		xcall_receive_signal
 965xcall_receive_signal:
 966	wr		%g0, (1 << PIL_SMP_RECEIVE_SIGNAL), %set_softint
 967	retry
 968
 969	.globl		xcall_capture
 970xcall_capture:
 971	wr		%g0, (1 << PIL_SMP_CAPTURE), %set_softint
 972	retry
 973
 974	.globl		xcall_new_mmu_context_version
 975xcall_new_mmu_context_version:
 976	wr		%g0, (1 << PIL_SMP_CTX_NEW_VERSION), %set_softint
 977	retry
 978
 979#ifdef CONFIG_KGDB
 980	.globl		xcall_kgdb_capture
 981xcall_kgdb_capture:
 982	wr		%g0, (1 << PIL_KGDB_CAPTURE), %set_softint
 983	retry
 984#endif
 985
 986#endif /* CONFIG_SMP */
 987
 988	.globl		cheetah_patch_cachetlbops
 989cheetah_patch_cachetlbops:
 990	save		%sp, -128, %sp
 991
 992	sethi		%hi(__flush_tlb_mm), %o0
 993	or		%o0, %lo(__flush_tlb_mm), %o0
 994	sethi		%hi(__cheetah_flush_tlb_mm), %o1
 995	or		%o1, %lo(__cheetah_flush_tlb_mm), %o1
 996	call		tlb_patch_one
 997	 mov		19, %o2
 998
 999	sethi		%hi(__flush_tlb_page), %o0
1000	or		%o0, %lo(__flush_tlb_page), %o0
1001	sethi		%hi(__cheetah_flush_tlb_page), %o1
1002	or		%o1, %lo(__cheetah_flush_tlb_page), %o1
1003	call		tlb_patch_one
1004	 mov		22, %o2
1005
1006	sethi		%hi(__flush_tlb_pending), %o0
1007	or		%o0, %lo(__flush_tlb_pending), %o0
1008	sethi		%hi(__cheetah_flush_tlb_pending), %o1
1009	or		%o1, %lo(__cheetah_flush_tlb_pending), %o1
1010	call		tlb_patch_one
1011	 mov		27, %o2
1012
1013	sethi		%hi(__flush_tlb_kernel_range), %o0
1014	or		%o0, %lo(__flush_tlb_kernel_range), %o0
1015	sethi		%hi(__cheetah_flush_tlb_kernel_range), %o1
1016	or		%o1, %lo(__cheetah_flush_tlb_kernel_range), %o1
1017	call		tlb_patch_one
1018	 mov		31, %o2
1019
1020#ifdef DCACHE_ALIASING_POSSIBLE
1021	sethi		%hi(__flush_dcache_page), %o0
1022	or		%o0, %lo(__flush_dcache_page), %o0
1023	sethi		%hi(__cheetah_flush_dcache_page), %o1
1024	or		%o1, %lo(__cheetah_flush_dcache_page), %o1
1025	call		tlb_patch_one
1026	 mov		11, %o2
1027#endif /* DCACHE_ALIASING_POSSIBLE */
1028
1029#ifdef CONFIG_SMP
1030	sethi		%hi(xcall_flush_tlb_kernel_range), %o0
1031	or		%o0, %lo(xcall_flush_tlb_kernel_range), %o0
1032	sethi		%hi(__cheetah_xcall_flush_tlb_kernel_range), %o1
1033	or		%o1, %lo(__cheetah_xcall_flush_tlb_kernel_range), %o1
1034	call		tlb_patch_one
1035	 mov		44, %o2
1036#endif /* CONFIG_SMP */
1037
1038	ret
1039	 restore
1040
1041	.globl		hypervisor_patch_cachetlbops
1042hypervisor_patch_cachetlbops:
1043	save		%sp, -128, %sp
1044
1045	sethi		%hi(__flush_tlb_mm), %o0
1046	or		%o0, %lo(__flush_tlb_mm), %o0
1047	sethi		%hi(__hypervisor_flush_tlb_mm), %o1
1048	or		%o1, %lo(__hypervisor_flush_tlb_mm), %o1
1049	call		tlb_patch_one
1050	 mov		19, %o2
1051
1052	sethi		%hi(__flush_tlb_page), %o0
1053	or		%o0, %lo(__flush_tlb_page), %o0
1054	sethi		%hi(__hypervisor_flush_tlb_page), %o1
1055	or		%o1, %lo(__hypervisor_flush_tlb_page), %o1
1056	call		tlb_patch_one
1057	 mov		22, %o2
1058
1059	sethi		%hi(__flush_tlb_pending), %o0
1060	or		%o0, %lo(__flush_tlb_pending), %o0
1061	sethi		%hi(__hypervisor_flush_tlb_pending), %o1
1062	or		%o1, %lo(__hypervisor_flush_tlb_pending), %o1
1063	call		tlb_patch_one
1064	 mov		27, %o2
1065
1066	sethi		%hi(__flush_tlb_kernel_range), %o0
1067	or		%o0, %lo(__flush_tlb_kernel_range), %o0
1068	sethi		%hi(__hypervisor_flush_tlb_kernel_range), %o1
1069	or		%o1, %lo(__hypervisor_flush_tlb_kernel_range), %o1
1070	call		tlb_patch_one
1071	 mov		31, %o2
1072
1073#ifdef DCACHE_ALIASING_POSSIBLE
1074	sethi		%hi(__flush_dcache_page), %o0
1075	or		%o0, %lo(__flush_dcache_page), %o0
1076	sethi		%hi(__hypervisor_flush_dcache_page), %o1
1077	or		%o1, %lo(__hypervisor_flush_dcache_page), %o1
1078	call		tlb_patch_one
1079	 mov		2, %o2
1080#endif /* DCACHE_ALIASING_POSSIBLE */
1081
1082#ifdef CONFIG_SMP
1083	sethi		%hi(xcall_flush_tlb_mm), %o0
1084	or		%o0, %lo(xcall_flush_tlb_mm), %o0
1085	sethi		%hi(__hypervisor_xcall_flush_tlb_mm), %o1
1086	or		%o1, %lo(__hypervisor_xcall_flush_tlb_mm), %o1
1087	call		tlb_patch_one
1088	 mov		24, %o2
1089
1090	sethi		%hi(xcall_flush_tlb_page), %o0
1091	or		%o0, %lo(xcall_flush_tlb_page), %o0
1092	sethi		%hi(__hypervisor_xcall_flush_tlb_page), %o1
1093	or		%o1, %lo(__hypervisor_xcall_flush_tlb_page), %o1
1094	call		tlb_patch_one
1095	 mov		20, %o2
1096
1097	sethi		%hi(xcall_flush_tlb_kernel_range), %o0
1098	or		%o0, %lo(xcall_flush_tlb_kernel_range), %o0
1099	sethi		%hi(__hypervisor_xcall_flush_tlb_kernel_range), %o1
1100	or		%o1, %lo(__hypervisor_xcall_flush_tlb_kernel_range), %o1
1101	call		tlb_patch_one
1102	 mov		44, %o2
1103#endif /* CONFIG_SMP */
1104
1105	ret
1106	 restore