Linux Audio

Check our new training course

Loading...
v4.17
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/*
   3 * ultra.S: Don't expand these all over the place...
   4 *
   5 * Copyright (C) 1997, 2000, 2008 David S. Miller (davem@davemloft.net)
   6 */
   7
   8#include <asm/asi.h>
   9#include <asm/pgtable.h>
  10#include <asm/page.h>
  11#include <asm/spitfire.h>
  12#include <asm/mmu_context.h>
  13#include <asm/mmu.h>
  14#include <asm/pil.h>
  15#include <asm/head.h>
  16#include <asm/thread_info.h>
  17#include <asm/cacheflush.h>
  18#include <asm/hypervisor.h>
  19#include <asm/cpudata.h>
  20
  21	/* Basically, most of the Spitfire vs. Cheetah madness
  22	 * has to do with the fact that Cheetah does not support
  23	 * IMMU flushes out of the secondary context.  Someone needs
  24	 * to throw a south lake birthday party for the folks
  25	 * in Microelectronics who refused to fix this shit.
  26	 */
  27
  28	/* This file is meant to be read efficiently by the CPU, not humans.
  29	 * Staraj sie tego nikomu nie pierdolnac...
  30	 */
  31	.text
  32	.align		32
  33	.globl		__flush_tlb_mm
  34__flush_tlb_mm:		/* 19 insns */
  35	/* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */
  36	ldxa		[%o1] ASI_DMMU, %g2
  37	cmp		%g2, %o0
  38	bne,pn		%icc, __spitfire_flush_tlb_mm_slow
  39	 mov		0x50, %g3
  40	stxa		%g0, [%g3] ASI_DMMU_DEMAP
  41	stxa		%g0, [%g3] ASI_IMMU_DEMAP
  42	sethi		%hi(KERNBASE), %g3
  43	flush		%g3
  44	retl
  45	 nop
  46	nop
  47	nop
  48	nop
  49	nop
  50	nop
  51	nop
  52	nop
  53	nop
  54	nop
  55
  56	.align		32
  57	.globl		__flush_tlb_page
  58__flush_tlb_page:	/* 22 insns */
  59	/* %o0 = context, %o1 = vaddr */
  60	rdpr		%pstate, %g7
  61	andn		%g7, PSTATE_IE, %g2
  62	wrpr		%g2, %pstate
  63	mov		SECONDARY_CONTEXT, %o4
  64	ldxa		[%o4] ASI_DMMU, %g2
  65	stxa		%o0, [%o4] ASI_DMMU
  66	andcc		%o1, 1, %g0
  67	andn		%o1, 1, %o3
  68	be,pn		%icc, 1f
  69	 or		%o3, 0x10, %o3
  70	stxa		%g0, [%o3] ASI_IMMU_DEMAP
  711:	stxa		%g0, [%o3] ASI_DMMU_DEMAP
  72	membar		#Sync
  73	stxa		%g2, [%o4] ASI_DMMU
  74	sethi		%hi(KERNBASE), %o4
  75	flush		%o4
  76	retl
  77	 wrpr		%g7, 0x0, %pstate
  78	nop
  79	nop
  80	nop
  81	nop
  82
  83	.align		32
  84	.globl		__flush_tlb_pending
  85__flush_tlb_pending:	/* 27 insns */
  86	/* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
  87	rdpr		%pstate, %g7
  88	sllx		%o1, 3, %o1
  89	andn		%g7, PSTATE_IE, %g2
  90	wrpr		%g2, %pstate
  91	mov		SECONDARY_CONTEXT, %o4
  92	ldxa		[%o4] ASI_DMMU, %g2
  93	stxa		%o0, [%o4] ASI_DMMU
  941:	sub		%o1, (1 << 3), %o1
  95	ldx		[%o2 + %o1], %o3
  96	andcc		%o3, 1, %g0
  97	andn		%o3, 1, %o3
  98	be,pn		%icc, 2f
  99	 or		%o3, 0x10, %o3
 100	stxa		%g0, [%o3] ASI_IMMU_DEMAP
 1012:	stxa		%g0, [%o3] ASI_DMMU_DEMAP
 102	membar		#Sync
 103	brnz,pt		%o1, 1b
 104	 nop
 105	stxa		%g2, [%o4] ASI_DMMU
 106	sethi		%hi(KERNBASE), %o4
 107	flush		%o4
 108	retl
 109	 wrpr		%g7, 0x0, %pstate
 110	nop
 111	nop
 112	nop
 113	nop
 114
 115	.align		32
 116	.globl		__flush_tlb_kernel_range
 117__flush_tlb_kernel_range:	/* 31 insns */
 118	/* %o0=start, %o1=end */
 119	cmp		%o0, %o1
 120	be,pn		%xcc, 2f
 121	 sub		%o1, %o0, %o3
 122	srlx		%o3, 18, %o4
 123	brnz,pn		%o4, __spitfire_flush_tlb_kernel_range_slow
 124	 sethi		%hi(PAGE_SIZE), %o4
 125	sub		%o3, %o4, %o3
 126	or		%o0, 0x20, %o0		! Nucleus
 1271:	stxa		%g0, [%o0 + %o3] ASI_DMMU_DEMAP
 128	stxa		%g0, [%o0 + %o3] ASI_IMMU_DEMAP
 129	membar		#Sync
 130	brnz,pt		%o3, 1b
 131	 sub		%o3, %o4, %o3
 1322:	sethi		%hi(KERNBASE), %o3
 133	flush		%o3
 134	retl
 135	 nop
 136	nop
 137	nop
 138	nop
 139	nop
 140	nop
 141	nop
 142	nop
 143	nop
 144	nop
 145	nop
 146	nop
 147	nop
 148	nop
 149	nop
 150
 151__spitfire_flush_tlb_kernel_range_slow:
 152	mov		63 * 8, %o4
 1531:	ldxa		[%o4] ASI_ITLB_DATA_ACCESS, %o3
 154	andcc		%o3, 0x40, %g0			/* _PAGE_L_4U */
 155	bne,pn		%xcc, 2f
 156	 mov		TLB_TAG_ACCESS, %o3
 157	stxa		%g0, [%o3] ASI_IMMU
 158	stxa		%g0, [%o4] ASI_ITLB_DATA_ACCESS
 159	membar		#Sync
 1602:	ldxa		[%o4] ASI_DTLB_DATA_ACCESS, %o3
 161	andcc		%o3, 0x40, %g0
 162	bne,pn		%xcc, 2f
 163	 mov		TLB_TAG_ACCESS, %o3
 164	stxa		%g0, [%o3] ASI_DMMU
 165	stxa		%g0, [%o4] ASI_DTLB_DATA_ACCESS
 166	membar		#Sync
 1672:	sub		%o4, 8, %o4
 168	brgez,pt	%o4, 1b
 169	 nop
 170	retl
 171	 nop
 172
 173__spitfire_flush_tlb_mm_slow:
 174	rdpr		%pstate, %g1
 175	wrpr		%g1, PSTATE_IE, %pstate
 176	stxa		%o0, [%o1] ASI_DMMU
 177	stxa		%g0, [%g3] ASI_DMMU_DEMAP
 178	stxa		%g0, [%g3] ASI_IMMU_DEMAP
 179	flush		%g6
 180	stxa		%g2, [%o1] ASI_DMMU
 181	sethi		%hi(KERNBASE), %o1
 182	flush		%o1
 183	retl
 184	 wrpr		%g1, 0, %pstate
 185
 186/*
 187 * The following code flushes one page_size worth.
 188 */
 189	.section .kprobes.text, "ax"
 190	.align		32
 191	.globl		__flush_icache_page
 192__flush_icache_page:	/* %o0 = phys_page */
 193	srlx		%o0, PAGE_SHIFT, %o0
 194	sethi		%hi(PAGE_OFFSET), %g1
 195	sllx		%o0, PAGE_SHIFT, %o0
 196	sethi		%hi(PAGE_SIZE), %g2
 197	ldx		[%g1 + %lo(PAGE_OFFSET)], %g1
 198	add		%o0, %g1, %o0
 1991:	subcc		%g2, 32, %g2
 200	bne,pt		%icc, 1b
 201	 flush		%o0 + %g2
 202	retl
 203	 nop
 204
 205#ifdef DCACHE_ALIASING_POSSIBLE
 206
 207#if (PAGE_SHIFT != 13)
 208#error only page shift of 13 is supported by dcache flush
 209#endif
 210
 211#define DTAG_MASK 0x3
 212
 213	/* This routine is Spitfire specific so the hardcoded
 214	 * D-cache size and line-size are OK.
 215	 */
 216	.align		64
 217	.globl		__flush_dcache_page
 218__flush_dcache_page:	/* %o0=kaddr, %o1=flush_icache */
 219	sethi		%hi(PAGE_OFFSET), %g1
 220	ldx		[%g1 + %lo(PAGE_OFFSET)], %g1
 221	sub		%o0, %g1, %o0			! physical address
 222	srlx		%o0, 11, %o0			! make D-cache TAG
 223	sethi		%hi(1 << 14), %o2		! D-cache size
 224	sub		%o2, (1 << 5), %o2		! D-cache line size
 2251:	ldxa		[%o2] ASI_DCACHE_TAG, %o3	! load D-cache TAG
 226	andcc		%o3, DTAG_MASK, %g0		! Valid?
 227	be,pn		%xcc, 2f			! Nope, branch
 228	 andn		%o3, DTAG_MASK, %o3		! Clear valid bits
 229	cmp		%o3, %o0			! TAG match?
 230	bne,pt		%xcc, 2f			! Nope, branch
 231	 nop
 232	stxa		%g0, [%o2] ASI_DCACHE_TAG	! Invalidate TAG
 233	membar		#Sync
 2342:	brnz,pt		%o2, 1b
 235	 sub		%o2, (1 << 5), %o2		! D-cache line size
 236
 237	/* The I-cache does not snoop local stores so we
 238	 * better flush that too when necessary.
 239	 */
 240	brnz,pt		%o1, __flush_icache_page
 241	 sllx		%o0, 11, %o0
 242	retl
 243	 nop
 244
 245#endif /* DCACHE_ALIASING_POSSIBLE */
 246
 247	.previous
 248
 249	/* Cheetah specific versions, patched at boot time. */
 250__cheetah_flush_tlb_mm: /* 19 insns */
 251	rdpr		%pstate, %g7
 252	andn		%g7, PSTATE_IE, %g2
 253	wrpr		%g2, 0x0, %pstate
 254	wrpr		%g0, 1, %tl
 255	mov		PRIMARY_CONTEXT, %o2
 256	mov		0x40, %g3
 257	ldxa		[%o2] ASI_DMMU, %g2
 258	srlx		%g2, CTX_PGSZ1_NUC_SHIFT, %o1
 259	sllx		%o1, CTX_PGSZ1_NUC_SHIFT, %o1
 260	or		%o0, %o1, %o0	/* Preserve nucleus page size fields */
 261	stxa		%o0, [%o2] ASI_DMMU
 262	stxa		%g0, [%g3] ASI_DMMU_DEMAP
 263	stxa		%g0, [%g3] ASI_IMMU_DEMAP
 264	stxa		%g2, [%o2] ASI_DMMU
 265	sethi		%hi(KERNBASE), %o2
 266	flush		%o2
 267	wrpr		%g0, 0, %tl
 268	retl
 269	 wrpr		%g7, 0x0, %pstate
 270
 271__cheetah_flush_tlb_page:	/* 22 insns */
 272	/* %o0 = context, %o1 = vaddr */
 273	rdpr		%pstate, %g7
 274	andn		%g7, PSTATE_IE, %g2
 275	wrpr		%g2, 0x0, %pstate
 276	wrpr		%g0, 1, %tl
 277	mov		PRIMARY_CONTEXT, %o4
 278	ldxa		[%o4] ASI_DMMU, %g2
 279	srlx		%g2, CTX_PGSZ1_NUC_SHIFT, %o3
 280	sllx		%o3, CTX_PGSZ1_NUC_SHIFT, %o3
 281	or		%o0, %o3, %o0	/* Preserve nucleus page size fields */
 282	stxa		%o0, [%o4] ASI_DMMU
 283	andcc		%o1, 1, %g0
 284	be,pn		%icc, 1f
 285	 andn		%o1, 1, %o3
 286	stxa		%g0, [%o3] ASI_IMMU_DEMAP
 2871:	stxa		%g0, [%o3] ASI_DMMU_DEMAP	
 288	membar		#Sync
 289	stxa		%g2, [%o4] ASI_DMMU
 290	sethi		%hi(KERNBASE), %o4
 291	flush		%o4
 292	wrpr		%g0, 0, %tl
 293	retl
 294	 wrpr		%g7, 0x0, %pstate
 295
 296__cheetah_flush_tlb_pending:	/* 27 insns */
 297	/* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
 298	rdpr		%pstate, %g7
 299	sllx		%o1, 3, %o1
 300	andn		%g7, PSTATE_IE, %g2
 301	wrpr		%g2, 0x0, %pstate
 302	wrpr		%g0, 1, %tl
 303	mov		PRIMARY_CONTEXT, %o4
 304	ldxa		[%o4] ASI_DMMU, %g2
 305	srlx		%g2, CTX_PGSZ1_NUC_SHIFT, %o3
 306	sllx		%o3, CTX_PGSZ1_NUC_SHIFT, %o3
 307	or		%o0, %o3, %o0	/* Preserve nucleus page size fields */
 308	stxa		%o0, [%o4] ASI_DMMU
 3091:	sub		%o1, (1 << 3), %o1
 310	ldx		[%o2 + %o1], %o3
 311	andcc		%o3, 1, %g0
 312	be,pn		%icc, 2f
 313	 andn		%o3, 1, %o3
 314	stxa		%g0, [%o3] ASI_IMMU_DEMAP
 3152:	stxa		%g0, [%o3] ASI_DMMU_DEMAP	
 316	membar		#Sync
 317	brnz,pt		%o1, 1b
 318	 nop
 319	stxa		%g2, [%o4] ASI_DMMU
 320	sethi		%hi(KERNBASE), %o4
 321	flush		%o4
 322	wrpr		%g0, 0, %tl
 323	retl
 324	 wrpr		%g7, 0x0, %pstate
 325
 326__cheetah_flush_tlb_kernel_range:	/* 31 insns */
 327	/* %o0=start, %o1=end */
 328	cmp		%o0, %o1
 329	be,pn		%xcc, 2f
 330	 sub		%o1, %o0, %o3
 331	srlx		%o3, 18, %o4
 332	brnz,pn		%o4, 3f
 333	 sethi		%hi(PAGE_SIZE), %o4
 334	sub		%o3, %o4, %o3
 335	or		%o0, 0x20, %o0		! Nucleus
 3361:	stxa		%g0, [%o0 + %o3] ASI_DMMU_DEMAP
 337	stxa		%g0, [%o0 + %o3] ASI_IMMU_DEMAP
 338	membar		#Sync
 339	brnz,pt		%o3, 1b
 340	 sub		%o3, %o4, %o3
 3412:	sethi		%hi(KERNBASE), %o3
 342	flush		%o3
 343	retl
 344	 nop
 3453:	mov		0x80, %o4
 346	stxa		%g0, [%o4] ASI_DMMU_DEMAP
 347	membar		#Sync
 348	stxa		%g0, [%o4] ASI_IMMU_DEMAP
 349	membar		#Sync
 350	retl
 351	 nop
 352	nop
 353	nop
 354	nop
 355	nop
 356	nop
 357	nop
 358	nop
 359
 360#ifdef DCACHE_ALIASING_POSSIBLE
 361__cheetah_flush_dcache_page: /* 11 insns */
 362	sethi		%hi(PAGE_OFFSET), %g1
 363	ldx		[%g1 + %lo(PAGE_OFFSET)], %g1
 364	sub		%o0, %g1, %o0
 365	sethi		%hi(PAGE_SIZE), %o4
 3661:	subcc		%o4, (1 << 5), %o4
 367	stxa		%g0, [%o0 + %o4] ASI_DCACHE_INVALIDATE
 368	membar		#Sync
 369	bne,pt		%icc, 1b
 370	 nop
 371	retl		/* I-cache flush never needed on Cheetah, see callers. */
 372	 nop
 373#endif /* DCACHE_ALIASING_POSSIBLE */
 374
 375	/* Hypervisor specific versions, patched at boot time.  */
 376__hypervisor_tlb_tl0_error:
 377	save		%sp, -192, %sp
 378	mov		%i0, %o0
 379	call		hypervisor_tlbop_error
 380	 mov		%i1, %o1
 381	ret
 382	 restore
 383
 384__hypervisor_flush_tlb_mm: /* 19 insns */
 385	mov		%o0, %o2	/* ARG2: mmu context */
 386	mov		0, %o0		/* ARG0: CPU lists unimplemented */
 387	mov		0, %o1		/* ARG1: CPU lists unimplemented */
 388	mov		HV_MMU_ALL, %o3	/* ARG3: flags */
 389	mov		HV_FAST_MMU_DEMAP_CTX, %o5
 390	ta		HV_FAST_TRAP
 391	brnz,pn		%o0, 1f
 392	 mov		HV_FAST_MMU_DEMAP_CTX, %o1
 393	retl
 394	 nop
 3951:	sethi		%hi(__hypervisor_tlb_tl0_error), %o5
 396	jmpl		%o5 + %lo(__hypervisor_tlb_tl0_error), %g0
 397	 nop
 398	nop
 399	nop
 400	nop
 401	nop
 402	nop
 403	nop
 404
 405__hypervisor_flush_tlb_page: /* 22 insns */
 406	/* %o0 = context, %o1 = vaddr */
 407	mov		%o0, %g2
 408	mov		%o1, %o0              /* ARG0: vaddr + IMMU-bit */
 409	mov		%g2, %o1	      /* ARG1: mmu context */
 410	mov		HV_MMU_ALL, %o2	      /* ARG2: flags */
 411	srlx		%o0, PAGE_SHIFT, %o0
 412	sllx		%o0, PAGE_SHIFT, %o0
 413	ta		HV_MMU_UNMAP_ADDR_TRAP
 414	brnz,pn		%o0, 1f
 415	 mov		HV_MMU_UNMAP_ADDR_TRAP, %o1
 416	retl
 417	 nop
 4181:	sethi		%hi(__hypervisor_tlb_tl0_error), %o2
 419	jmpl		%o2 + %lo(__hypervisor_tlb_tl0_error), %g0
 420	 nop
 421	nop
 422	nop
 423	nop
 424	nop
 425	nop
 426	nop
 427	nop
 428	nop
 429
 430__hypervisor_flush_tlb_pending: /* 27 insns */
 431	/* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
 432	sllx		%o1, 3, %g1
 433	mov		%o2, %g2
 434	mov		%o0, %g3
 4351:	sub		%g1, (1 << 3), %g1
 436	ldx		[%g2 + %g1], %o0      /* ARG0: vaddr + IMMU-bit */
 437	mov		%g3, %o1	      /* ARG1: mmu context */
 438	mov		HV_MMU_ALL, %o2	      /* ARG2: flags */
 439	srlx		%o0, PAGE_SHIFT, %o0
 440	sllx		%o0, PAGE_SHIFT, %o0
 441	ta		HV_MMU_UNMAP_ADDR_TRAP
 442	brnz,pn		%o0, 1f
 443	 mov		HV_MMU_UNMAP_ADDR_TRAP, %o1
 444	brnz,pt		%g1, 1b
 445	 nop
 446	retl
 447	 nop
 4481:	sethi		%hi(__hypervisor_tlb_tl0_error), %o2
 449	jmpl		%o2 + %lo(__hypervisor_tlb_tl0_error), %g0
 450	 nop
 451	nop
 452	nop
 453	nop
 454	nop
 455	nop
 456	nop
 457	nop
 458	nop
 459
 460__hypervisor_flush_tlb_kernel_range: /* 31 insns */
 461	/* %o0=start, %o1=end */
 462	cmp		%o0, %o1
 463	be,pn		%xcc, 2f
 464	 sub		%o1, %o0, %g2
 465	srlx		%g2, 18, %g3
 466	brnz,pn		%g3, 4f
 467	 mov		%o0, %g1
 468	sethi		%hi(PAGE_SIZE), %g3
 469	sub		%g2, %g3, %g2
 4701:	add		%g1, %g2, %o0	/* ARG0: virtual address */
 471	mov		0, %o1		/* ARG1: mmu context */
 472	mov		HV_MMU_ALL, %o2	/* ARG2: flags */
 473	ta		HV_MMU_UNMAP_ADDR_TRAP
 474	brnz,pn		%o0, 3f
 475	 mov		HV_MMU_UNMAP_ADDR_TRAP, %o1
 476	brnz,pt		%g2, 1b
 477	 sub		%g2, %g3, %g2
 4782:	retl
 479	 nop
 4803:	sethi		%hi(__hypervisor_tlb_tl0_error), %o2
 481	jmpl		%o2 + %lo(__hypervisor_tlb_tl0_error), %g0
 482	 nop
 4834:	mov		0, %o0		/* ARG0: CPU lists unimplemented */
 484	mov		0, %o1		/* ARG1: CPU lists unimplemented */
 485	mov		0, %o2		/* ARG2: mmu context == nucleus */
 486	mov		HV_MMU_ALL, %o3	/* ARG3: flags */
 487	mov		HV_FAST_MMU_DEMAP_CTX, %o5
 488	ta		HV_FAST_TRAP
 489	brnz,pn		%o0, 3b
 490	 mov		HV_FAST_MMU_DEMAP_CTX, %o1
 491	retl
 492	 nop
 493
 494#ifdef DCACHE_ALIASING_POSSIBLE
 495	/* XXX Niagara and friends have an 8K cache, so no aliasing is
 496	 * XXX possible, but nothing explicit in the Hypervisor API
 497	 * XXX guarantees this.
 498	 */
 499__hypervisor_flush_dcache_page:	/* 2 insns */
 500	retl
 501	 nop
 502#endif
 503
 504tlb_patch_one:
 5051:	lduw		[%o1], %g1
 506	stw		%g1, [%o0]
 507	flush		%o0
 508	subcc		%o2, 1, %o2
 509	add		%o1, 4, %o1
 510	bne,pt		%icc, 1b
 511	 add		%o0, 4, %o0
 512	retl
 513	 nop
 514
 515#ifdef CONFIG_SMP
 516	/* These are all called by the slaves of a cross call, at
 517	 * trap level 1, with interrupts fully disabled.
 518	 *
 519	 * Register usage:
 520	 *   %g5	mm->context	(all tlb flushes)
 521	 *   %g1	address arg 1	(tlb page and range flushes)
 522	 *   %g7	address arg 2	(tlb range flush only)
 523	 *
 524	 *   %g6	scratch 1
 525	 *   %g2	scratch 2
 526	 *   %g3	scratch 3
 527	 *   %g4	scratch 4
 528	 */
 529	.align		32
 530	.globl		xcall_flush_tlb_mm
 531xcall_flush_tlb_mm:	/* 24 insns */
 532	mov		PRIMARY_CONTEXT, %g2
 533	ldxa		[%g2] ASI_DMMU, %g3
 534	srlx		%g3, CTX_PGSZ1_NUC_SHIFT, %g4
 535	sllx		%g4, CTX_PGSZ1_NUC_SHIFT, %g4
 536	or		%g5, %g4, %g5	/* Preserve nucleus page size fields */
 537	stxa		%g5, [%g2] ASI_DMMU
 538	mov		0x40, %g4
 539	stxa		%g0, [%g4] ASI_DMMU_DEMAP
 540	stxa		%g0, [%g4] ASI_IMMU_DEMAP
 541	stxa		%g3, [%g2] ASI_DMMU
 542	retry
 543	nop
 544	nop
 545	nop
 546	nop
 547	nop
 548	nop
 549	nop
 550	nop
 551	nop
 552	nop
 553	nop
 554	nop
 555	nop
 556
 557	.globl		xcall_flush_tlb_page
 558xcall_flush_tlb_page:	/* 20 insns */
 559	/* %g5=context, %g1=vaddr */
 560	mov		PRIMARY_CONTEXT, %g4
 561	ldxa		[%g4] ASI_DMMU, %g2
 562	srlx		%g2, CTX_PGSZ1_NUC_SHIFT, %g4
 563	sllx		%g4, CTX_PGSZ1_NUC_SHIFT, %g4
 564	or		%g5, %g4, %g5
 565	mov		PRIMARY_CONTEXT, %g4
 566	stxa		%g5, [%g4] ASI_DMMU
 567	andcc		%g1, 0x1, %g0
 568	be,pn		%icc, 2f
 569	 andn		%g1, 0x1, %g5
 570	stxa		%g0, [%g5] ASI_IMMU_DEMAP
 5712:	stxa		%g0, [%g5] ASI_DMMU_DEMAP
 572	membar		#Sync
 573	stxa		%g2, [%g4] ASI_DMMU
 574	retry
 575	nop
 576	nop
 577	nop
 578	nop
 579	nop
 580
 581	.globl		xcall_flush_tlb_kernel_range
 582xcall_flush_tlb_kernel_range:	/* 44 insns */
 583	sethi		%hi(PAGE_SIZE - 1), %g2
 584	or		%g2, %lo(PAGE_SIZE - 1), %g2
 585	andn		%g1, %g2, %g1
 586	andn		%g7, %g2, %g7
 587	sub		%g7, %g1, %g3
 588	srlx		%g3, 18, %g2
 589	brnz,pn		%g2, 2f
 590	 add		%g2, 1, %g2
 591	sub		%g3, %g2, %g3
 592	or		%g1, 0x20, %g1		! Nucleus
 5931:	stxa		%g0, [%g1 + %g3] ASI_DMMU_DEMAP
 594	stxa		%g0, [%g1 + %g3] ASI_IMMU_DEMAP
 595	membar		#Sync
 596	brnz,pt		%g3, 1b
 597	 sub		%g3, %g2, %g3
 598	retry
 5992:	mov		63 * 8, %g1
 6001:	ldxa		[%g1] ASI_ITLB_DATA_ACCESS, %g2
 601	andcc		%g2, 0x40, %g0			/* _PAGE_L_4U */
 602	bne,pn		%xcc, 2f
 603	 mov		TLB_TAG_ACCESS, %g2
 604	stxa		%g0, [%g2] ASI_IMMU
 605	stxa		%g0, [%g1] ASI_ITLB_DATA_ACCESS
 606	membar		#Sync
 6072:	ldxa		[%g1] ASI_DTLB_DATA_ACCESS, %g2
 608	andcc		%g2, 0x40, %g0
 609	bne,pn		%xcc, 2f
 610	 mov		TLB_TAG_ACCESS, %g2
 611	stxa		%g0, [%g2] ASI_DMMU
 612	stxa		%g0, [%g1] ASI_DTLB_DATA_ACCESS
 613	membar		#Sync
 6142:	sub		%g1, 8, %g1
 615	brgez,pt	%g1, 1b
 616	 nop
 617	retry
 618	nop
 619	nop
 620	nop
 621	nop
 622	nop
 623	nop
 624	nop
 625	nop
 626	nop
 627
 628	/* This runs in a very controlled environment, so we do
 629	 * not need to worry about BH races etc.
 630	 */
 631	.globl		xcall_sync_tick
 632xcall_sync_tick:
 633
 634661:	rdpr		%pstate, %g2
 635	wrpr		%g2, PSTATE_IG | PSTATE_AG, %pstate
 636	.section	.sun4v_2insn_patch, "ax"
 637	.word		661b
 638	nop
 639	nop
 640	.previous
 641
 642	rdpr		%pil, %g2
 643	wrpr		%g0, PIL_NORMAL_MAX, %pil
 644	sethi		%hi(109f), %g7
 645	b,pt		%xcc, etrap_irq
 646109:	 or		%g7, %lo(109b), %g7
 647#ifdef CONFIG_TRACE_IRQFLAGS
 648	call		trace_hardirqs_off
 649	 nop
 650#endif
 651	call		smp_synchronize_tick_client
 652	 nop
 653	b		rtrap_xcall
 654	 ldx		[%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
 655
 656	.globl		xcall_fetch_glob_regs
 657xcall_fetch_glob_regs:
 658	sethi		%hi(global_cpu_snapshot), %g1
 659	or		%g1, %lo(global_cpu_snapshot), %g1
 660	__GET_CPUID(%g2)
 661	sllx		%g2, 6, %g3
 662	add		%g1, %g3, %g1
 663	rdpr		%tstate, %g7
 664	stx		%g7, [%g1 + GR_SNAP_TSTATE]
 665	rdpr		%tpc, %g7
 666	stx		%g7, [%g1 + GR_SNAP_TPC]
 667	rdpr		%tnpc, %g7
 668	stx		%g7, [%g1 + GR_SNAP_TNPC]
 669	stx		%o7, [%g1 + GR_SNAP_O7]
 670	stx		%i7, [%g1 + GR_SNAP_I7]
 671	/* Don't try this at home kids... */
 672	rdpr		%cwp, %g3
 673	sub		%g3, 1, %g7
 674	wrpr		%g7, %cwp
 675	mov		%i7, %g7
 676	wrpr		%g3, %cwp
 677	stx		%g7, [%g1 + GR_SNAP_RPC]
 678	sethi		%hi(trap_block), %g7
 679	or		%g7, %lo(trap_block), %g7
 680	sllx		%g2, TRAP_BLOCK_SZ_SHIFT, %g2
 681	add		%g7, %g2, %g7
 682	ldx		[%g7 + TRAP_PER_CPU_THREAD], %g3
 683	stx		%g3, [%g1 + GR_SNAP_THREAD]
 684	retry
 685
 686	.globl		xcall_fetch_glob_pmu
 687xcall_fetch_glob_pmu:
 688	sethi		%hi(global_cpu_snapshot), %g1
 689	or		%g1, %lo(global_cpu_snapshot), %g1
 690	__GET_CPUID(%g2)
 691	sllx		%g2, 6, %g3
 692	add		%g1, %g3, %g1
 693	rd		%pic, %g7
 694	stx		%g7, [%g1 + (4 * 8)]
 695	rd		%pcr, %g7
 696	stx		%g7, [%g1 + (0 * 8)]
 697	retry
 698
 699	.globl		xcall_fetch_glob_pmu_n4
 700xcall_fetch_glob_pmu_n4:
 701	sethi		%hi(global_cpu_snapshot), %g1
 702	or		%g1, %lo(global_cpu_snapshot), %g1
 703	__GET_CPUID(%g2)
 704	sllx		%g2, 6, %g3
 705	add		%g1, %g3, %g1
 706
 707	ldxa		[%g0] ASI_PIC, %g7
 708	stx		%g7, [%g1 + (4 * 8)]
 709	mov		0x08, %g3
 710	ldxa		[%g3] ASI_PIC, %g7
 711	stx		%g7, [%g1 + (5 * 8)]
 712	mov		0x10, %g3
 713	ldxa		[%g3] ASI_PIC, %g7
 714	stx		%g7, [%g1 + (6 * 8)]
 715	mov		0x18, %g3
 716	ldxa		[%g3] ASI_PIC, %g7
 717	stx		%g7, [%g1 + (7 * 8)]
 718
 719	mov		%o0, %g2
 720	mov		%o1, %g3
 721	mov		%o5, %g7
 722
 723	mov		HV_FAST_VT_GET_PERFREG, %o5
 724	mov		3, %o0
 725	ta		HV_FAST_TRAP
 726	stx		%o1, [%g1 + (3 * 8)]
 727	mov		HV_FAST_VT_GET_PERFREG, %o5
 728	mov		2, %o0
 729	ta		HV_FAST_TRAP
 730	stx		%o1, [%g1 + (2 * 8)]
 731	mov		HV_FAST_VT_GET_PERFREG, %o5
 732	mov		1, %o0
 733	ta		HV_FAST_TRAP
 734	stx		%o1, [%g1 + (1 * 8)]
 735	mov		HV_FAST_VT_GET_PERFREG, %o5
 736	mov		0, %o0
 737	ta		HV_FAST_TRAP
 738	stx		%o1, [%g1 + (0 * 8)]
 739
 740	mov		%g2, %o0
 741	mov		%g3, %o1
 742	mov		%g7, %o5
 743
 744	retry
 745
 746__cheetah_xcall_flush_tlb_kernel_range:	/* 44 insns */
 747	sethi		%hi(PAGE_SIZE - 1), %g2
 748	or		%g2, %lo(PAGE_SIZE - 1), %g2
 749	andn		%g1, %g2, %g1
 750	andn		%g7, %g2, %g7
 751	sub		%g7, %g1, %g3
 752	srlx		%g3, 18, %g2
 753	brnz,pn		%g2, 2f
 754	 add		%g2, 1, %g2
 755	sub		%g3, %g2, %g3
 756	or		%g1, 0x20, %g1		! Nucleus
 7571:	stxa		%g0, [%g1 + %g3] ASI_DMMU_DEMAP
 758	stxa		%g0, [%g1 + %g3] ASI_IMMU_DEMAP
 759	membar		#Sync
 760	brnz,pt		%g3, 1b
 761	 sub		%g3, %g2, %g3
 762	retry
 7632:	mov		0x80, %g2
 764	stxa		%g0, [%g2] ASI_DMMU_DEMAP
 765	membar		#Sync
 766	stxa		%g0, [%g2] ASI_IMMU_DEMAP
 767	membar		#Sync
 768	retry
 769	nop
 770	nop
 771	nop
 772	nop
 773	nop
 774	nop
 775	nop
 776	nop
 777	nop
 778	nop
 779	nop
 780	nop
 781	nop
 782	nop
 783	nop
 784	nop
 785	nop
 786	nop
 787	nop
 788	nop
 789	nop
 790	nop
 791
 792#ifdef DCACHE_ALIASING_POSSIBLE
 793	.align		32
 794	.globl		xcall_flush_dcache_page_cheetah
 795xcall_flush_dcache_page_cheetah: /* %g1 == physical page address */
 796	sethi		%hi(PAGE_SIZE), %g3
 7971:	subcc		%g3, (1 << 5), %g3
 798	stxa		%g0, [%g1 + %g3] ASI_DCACHE_INVALIDATE
 799	membar		#Sync
 800	bne,pt		%icc, 1b
 801	 nop
 802	retry
 803	nop
 804#endif /* DCACHE_ALIASING_POSSIBLE */
 805
 806	.globl		xcall_flush_dcache_page_spitfire
 807xcall_flush_dcache_page_spitfire: /* %g1 == physical page address
 808				     %g7 == kernel page virtual address
 809				     %g5 == (page->mapping != NULL)  */
 810#ifdef DCACHE_ALIASING_POSSIBLE
 811	srlx		%g1, (13 - 2), %g1	! Form tag comparitor
 812	sethi		%hi(L1DCACHE_SIZE), %g3	! D$ size == 16K
 813	sub		%g3, (1 << 5), %g3	! D$ linesize == 32
 8141:	ldxa		[%g3] ASI_DCACHE_TAG, %g2
 815	andcc		%g2, 0x3, %g0
 816	be,pn		%xcc, 2f
 817	 andn		%g2, 0x3, %g2
 818	cmp		%g2, %g1
 819
 820	bne,pt		%xcc, 2f
 821	 nop
 822	stxa		%g0, [%g3] ASI_DCACHE_TAG
 823	membar		#Sync
 8242:	cmp		%g3, 0
 825	bne,pt		%xcc, 1b
 826	 sub		%g3, (1 << 5), %g3
 827
 828	brz,pn		%g5, 2f
 829#endif /* DCACHE_ALIASING_POSSIBLE */
 830	 sethi		%hi(PAGE_SIZE), %g3
 831
 8321:	flush		%g7
 833	subcc		%g3, (1 << 5), %g3
 834	bne,pt		%icc, 1b
 835	 add		%g7, (1 << 5), %g7
 836
 8372:	retry
 838	nop
 839	nop
 840
 841	/* %g5:	error
 842	 * %g6:	tlb op
 843	 */
 844__hypervisor_tlb_xcall_error:
 845	mov	%g5, %g4
 846	mov	%g6, %g5
 847	ba,pt	%xcc, etrap
 848	 rd	%pc, %g7
 849	mov	%l4, %o0
 850	call	hypervisor_tlbop_error_xcall
 851	 mov	%l5, %o1
 852	ba,a,pt	%xcc, rtrap
 853
 854	.globl		__hypervisor_xcall_flush_tlb_mm
 855__hypervisor_xcall_flush_tlb_mm: /* 24 insns */
 856	/* %g5=ctx, g1,g2,g3,g4,g7=scratch, %g6=unusable */
 857	mov		%o0, %g2
 858	mov		%o1, %g3
 859	mov		%o2, %g4
 860	mov		%o3, %g1
 861	mov		%o5, %g7
 862	clr		%o0		/* ARG0: CPU lists unimplemented */
 863	clr		%o1		/* ARG1: CPU lists unimplemented */
 864	mov		%g5, %o2	/* ARG2: mmu context */
 865	mov		HV_MMU_ALL, %o3	/* ARG3: flags */
 866	mov		HV_FAST_MMU_DEMAP_CTX, %o5
 867	ta		HV_FAST_TRAP
 868	mov		HV_FAST_MMU_DEMAP_CTX, %g6
 869	brnz,pn		%o0, 1f
 870	 mov		%o0, %g5
 871	mov		%g2, %o0
 872	mov		%g3, %o1
 873	mov		%g4, %o2
 874	mov		%g1, %o3
 875	mov		%g7, %o5
 876	membar		#Sync
 877	retry
 8781:	sethi		%hi(__hypervisor_tlb_xcall_error), %g4
 879	jmpl		%g4 + %lo(__hypervisor_tlb_xcall_error), %g0
 880	 nop
 881
 882	.globl		__hypervisor_xcall_flush_tlb_page
 883__hypervisor_xcall_flush_tlb_page: /* 20 insns */
 884	/* %g5=ctx, %g1=vaddr */
 885	mov		%o0, %g2
 886	mov		%o1, %g3
 887	mov		%o2, %g4
 888	mov		%g1, %o0	        /* ARG0: virtual address */
 889	mov		%g5, %o1		/* ARG1: mmu context */
 890	mov		HV_MMU_ALL, %o2		/* ARG2: flags */
 891	srlx		%o0, PAGE_SHIFT, %o0
 892	sllx		%o0, PAGE_SHIFT, %o0
 893	ta		HV_MMU_UNMAP_ADDR_TRAP
 894	mov		HV_MMU_UNMAP_ADDR_TRAP, %g6
 895	brnz,a,pn	%o0, 1f
 896	 mov		%o0, %g5
 897	mov		%g2, %o0
 898	mov		%g3, %o1
 899	mov		%g4, %o2
 900	membar		#Sync
 901	retry
 9021:	sethi		%hi(__hypervisor_tlb_xcall_error), %g4
 903	jmpl		%g4 + %lo(__hypervisor_tlb_xcall_error), %g0
 904	 nop
 905
 906	.globl		__hypervisor_xcall_flush_tlb_kernel_range
 907__hypervisor_xcall_flush_tlb_kernel_range: /* 44 insns */
 908	/* %g1=start, %g7=end, g2,g3,g4,g5,g6=scratch */
 909	sethi		%hi(PAGE_SIZE - 1), %g2
 910	or		%g2, %lo(PAGE_SIZE - 1), %g2
 911	andn		%g1, %g2, %g1
 912	andn		%g7, %g2, %g7
 913	sub		%g7, %g1, %g3
 914	srlx		%g3, 18, %g7
 915	add		%g2, 1, %g2
 916	sub		%g3, %g2, %g3
 917	mov		%o0, %g2
 918	mov		%o1, %g4
 919	brnz,pn		%g7, 2f
 920	 mov		%o2, %g7
 9211:	add		%g1, %g3, %o0	/* ARG0: virtual address */
 922	mov		0, %o1		/* ARG1: mmu context */
 923	mov		HV_MMU_ALL, %o2	/* ARG2: flags */
 924	ta		HV_MMU_UNMAP_ADDR_TRAP
 925	mov		HV_MMU_UNMAP_ADDR_TRAP, %g6
 926	brnz,pn		%o0, 1f
 927	 mov		%o0, %g5
 928	sethi		%hi(PAGE_SIZE), %o2
 929	brnz,pt		%g3, 1b
 930	 sub		%g3, %o2, %g3
 9315:	mov		%g2, %o0
 932	mov		%g4, %o1
 933	mov		%g7, %o2
 934	membar		#Sync
 935	retry
 9361:	sethi		%hi(__hypervisor_tlb_xcall_error), %g4
 937	jmpl		%g4 + %lo(__hypervisor_tlb_xcall_error), %g0
 938	 nop
 9392:	mov		%o3, %g1
 940	mov		%o5, %g3
 941	mov		0, %o0		/* ARG0: CPU lists unimplemented */
 942	mov		0, %o1		/* ARG1: CPU lists unimplemented */
 943	mov		0, %o2		/* ARG2: mmu context == nucleus */
 944	mov		HV_MMU_ALL, %o3	/* ARG3: flags */
 945	mov		HV_FAST_MMU_DEMAP_CTX, %o5
 946	ta		HV_FAST_TRAP
 947	mov		%g1, %o3
 948	brz,pt		%o0, 5b
 949	 mov		%g3, %o5
 950	mov		HV_FAST_MMU_DEMAP_CTX, %g6
 951	ba,pt		%xcc, 1b
 952	 clr		%g5
 953
 954	/* These just get rescheduled to PIL vectors. */
 955	.globl		xcall_call_function
 956xcall_call_function:
 957	wr		%g0, (1 << PIL_SMP_CALL_FUNC), %set_softint
 958	retry
 959
 960	.globl		xcall_call_function_single
 961xcall_call_function_single:
 962	wr		%g0, (1 << PIL_SMP_CALL_FUNC_SNGL), %set_softint
 963	retry
 964
 965	.globl		xcall_receive_signal
 966xcall_receive_signal:
 967	wr		%g0, (1 << PIL_SMP_RECEIVE_SIGNAL), %set_softint
 968	retry
 969
 970	.globl		xcall_capture
 971xcall_capture:
 972	wr		%g0, (1 << PIL_SMP_CAPTURE), %set_softint
 
 
 
 
 
 973	retry
 974
 975#ifdef CONFIG_KGDB
 976	.globl		xcall_kgdb_capture
 977xcall_kgdb_capture:
 978	wr		%g0, (1 << PIL_KGDB_CAPTURE), %set_softint
 979	retry
 980#endif
 981
 982#endif /* CONFIG_SMP */
 983
 984	.globl		cheetah_patch_cachetlbops
 985cheetah_patch_cachetlbops:
 986	save		%sp, -128, %sp
 987
 988	sethi		%hi(__flush_tlb_mm), %o0
 989	or		%o0, %lo(__flush_tlb_mm), %o0
 990	sethi		%hi(__cheetah_flush_tlb_mm), %o1
 991	or		%o1, %lo(__cheetah_flush_tlb_mm), %o1
 992	call		tlb_patch_one
 993	 mov		19, %o2
 994
 995	sethi		%hi(__flush_tlb_page), %o0
 996	or		%o0, %lo(__flush_tlb_page), %o0
 997	sethi		%hi(__cheetah_flush_tlb_page), %o1
 998	or		%o1, %lo(__cheetah_flush_tlb_page), %o1
 999	call		tlb_patch_one
1000	 mov		22, %o2
1001
1002	sethi		%hi(__flush_tlb_pending), %o0
1003	or		%o0, %lo(__flush_tlb_pending), %o0
1004	sethi		%hi(__cheetah_flush_tlb_pending), %o1
1005	or		%o1, %lo(__cheetah_flush_tlb_pending), %o1
1006	call		tlb_patch_one
1007	 mov		27, %o2
1008
1009	sethi		%hi(__flush_tlb_kernel_range), %o0
1010	or		%o0, %lo(__flush_tlb_kernel_range), %o0
1011	sethi		%hi(__cheetah_flush_tlb_kernel_range), %o1
1012	or		%o1, %lo(__cheetah_flush_tlb_kernel_range), %o1
1013	call		tlb_patch_one
1014	 mov		31, %o2
1015
1016#ifdef DCACHE_ALIASING_POSSIBLE
1017	sethi		%hi(__flush_dcache_page), %o0
1018	or		%o0, %lo(__flush_dcache_page), %o0
1019	sethi		%hi(__cheetah_flush_dcache_page), %o1
1020	or		%o1, %lo(__cheetah_flush_dcache_page), %o1
1021	call		tlb_patch_one
1022	 mov		11, %o2
1023#endif /* DCACHE_ALIASING_POSSIBLE */
1024
1025#ifdef CONFIG_SMP
1026	sethi		%hi(xcall_flush_tlb_kernel_range), %o0
1027	or		%o0, %lo(xcall_flush_tlb_kernel_range), %o0
1028	sethi		%hi(__cheetah_xcall_flush_tlb_kernel_range), %o1
1029	or		%o1, %lo(__cheetah_xcall_flush_tlb_kernel_range), %o1
1030	call		tlb_patch_one
1031	 mov		44, %o2
1032#endif /* CONFIG_SMP */
1033
1034	ret
1035	 restore
1036
1037	.globl		hypervisor_patch_cachetlbops
1038hypervisor_patch_cachetlbops:
1039	save		%sp, -128, %sp
1040
1041	sethi		%hi(__flush_tlb_mm), %o0
1042	or		%o0, %lo(__flush_tlb_mm), %o0
1043	sethi		%hi(__hypervisor_flush_tlb_mm), %o1
1044	or		%o1, %lo(__hypervisor_flush_tlb_mm), %o1
1045	call		tlb_patch_one
1046	 mov		19, %o2
1047
1048	sethi		%hi(__flush_tlb_page), %o0
1049	or		%o0, %lo(__flush_tlb_page), %o0
1050	sethi		%hi(__hypervisor_flush_tlb_page), %o1
1051	or		%o1, %lo(__hypervisor_flush_tlb_page), %o1
1052	call		tlb_patch_one
1053	 mov		22, %o2
1054
1055	sethi		%hi(__flush_tlb_pending), %o0
1056	or		%o0, %lo(__flush_tlb_pending), %o0
1057	sethi		%hi(__hypervisor_flush_tlb_pending), %o1
1058	or		%o1, %lo(__hypervisor_flush_tlb_pending), %o1
1059	call		tlb_patch_one
1060	 mov		27, %o2
1061
1062	sethi		%hi(__flush_tlb_kernel_range), %o0
1063	or		%o0, %lo(__flush_tlb_kernel_range), %o0
1064	sethi		%hi(__hypervisor_flush_tlb_kernel_range), %o1
1065	or		%o1, %lo(__hypervisor_flush_tlb_kernel_range), %o1
1066	call		tlb_patch_one
1067	 mov		31, %o2
1068
1069#ifdef DCACHE_ALIASING_POSSIBLE
1070	sethi		%hi(__flush_dcache_page), %o0
1071	or		%o0, %lo(__flush_dcache_page), %o0
1072	sethi		%hi(__hypervisor_flush_dcache_page), %o1
1073	or		%o1, %lo(__hypervisor_flush_dcache_page), %o1
1074	call		tlb_patch_one
1075	 mov		2, %o2
1076#endif /* DCACHE_ALIASING_POSSIBLE */
1077
1078#ifdef CONFIG_SMP
1079	sethi		%hi(xcall_flush_tlb_mm), %o0
1080	or		%o0, %lo(xcall_flush_tlb_mm), %o0
1081	sethi		%hi(__hypervisor_xcall_flush_tlb_mm), %o1
1082	or		%o1, %lo(__hypervisor_xcall_flush_tlb_mm), %o1
1083	call		tlb_patch_one
1084	 mov		24, %o2
1085
1086	sethi		%hi(xcall_flush_tlb_page), %o0
1087	or		%o0, %lo(xcall_flush_tlb_page), %o0
1088	sethi		%hi(__hypervisor_xcall_flush_tlb_page), %o1
1089	or		%o1, %lo(__hypervisor_xcall_flush_tlb_page), %o1
1090	call		tlb_patch_one
1091	 mov		20, %o2
1092
1093	sethi		%hi(xcall_flush_tlb_kernel_range), %o0
1094	or		%o0, %lo(xcall_flush_tlb_kernel_range), %o0
1095	sethi		%hi(__hypervisor_xcall_flush_tlb_kernel_range), %o1
1096	or		%o1, %lo(__hypervisor_xcall_flush_tlb_kernel_range), %o1
1097	call		tlb_patch_one
1098	 mov		44, %o2
1099#endif /* CONFIG_SMP */
1100
1101	ret
1102	 restore
v4.10.11
 
   1/*
   2 * ultra.S: Don't expand these all over the place...
   3 *
   4 * Copyright (C) 1997, 2000, 2008 David S. Miller (davem@davemloft.net)
   5 */
   6
   7#include <asm/asi.h>
   8#include <asm/pgtable.h>
   9#include <asm/page.h>
  10#include <asm/spitfire.h>
  11#include <asm/mmu_context.h>
  12#include <asm/mmu.h>
  13#include <asm/pil.h>
  14#include <asm/head.h>
  15#include <asm/thread_info.h>
  16#include <asm/cacheflush.h>
  17#include <asm/hypervisor.h>
  18#include <asm/cpudata.h>
  19
  20	/* Basically, most of the Spitfire vs. Cheetah madness
  21	 * has to do with the fact that Cheetah does not support
  22	 * IMMU flushes out of the secondary context.  Someone needs
  23	 * to throw a south lake birthday party for the folks
  24	 * in Microelectronics who refused to fix this shit.
  25	 */
  26
  27	/* This file is meant to be read efficiently by the CPU, not humans.
  28	 * Staraj sie tego nikomu nie pierdolnac...
  29	 */
  30	.text
  31	.align		32
  32	.globl		__flush_tlb_mm
  33__flush_tlb_mm:		/* 19 insns */
  34	/* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */
  35	ldxa		[%o1] ASI_DMMU, %g2
  36	cmp		%g2, %o0
  37	bne,pn		%icc, __spitfire_flush_tlb_mm_slow
  38	 mov		0x50, %g3
  39	stxa		%g0, [%g3] ASI_DMMU_DEMAP
  40	stxa		%g0, [%g3] ASI_IMMU_DEMAP
  41	sethi		%hi(KERNBASE), %g3
  42	flush		%g3
  43	retl
  44	 nop
  45	nop
  46	nop
  47	nop
  48	nop
  49	nop
  50	nop
  51	nop
  52	nop
  53	nop
  54
  55	.align		32
  56	.globl		__flush_tlb_page
  57__flush_tlb_page:	/* 22 insns */
  58	/* %o0 = context, %o1 = vaddr */
  59	rdpr		%pstate, %g7
  60	andn		%g7, PSTATE_IE, %g2
  61	wrpr		%g2, %pstate
  62	mov		SECONDARY_CONTEXT, %o4
  63	ldxa		[%o4] ASI_DMMU, %g2
  64	stxa		%o0, [%o4] ASI_DMMU
  65	andcc		%o1, 1, %g0
  66	andn		%o1, 1, %o3
  67	be,pn		%icc, 1f
  68	 or		%o3, 0x10, %o3
  69	stxa		%g0, [%o3] ASI_IMMU_DEMAP
  701:	stxa		%g0, [%o3] ASI_DMMU_DEMAP
  71	membar		#Sync
  72	stxa		%g2, [%o4] ASI_DMMU
  73	sethi		%hi(KERNBASE), %o4
  74	flush		%o4
  75	retl
  76	 wrpr		%g7, 0x0, %pstate
  77	nop
  78	nop
  79	nop
  80	nop
  81
  82	.align		32
  83	.globl		__flush_tlb_pending
  84__flush_tlb_pending:	/* 27 insns */
  85	/* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
  86	rdpr		%pstate, %g7
  87	sllx		%o1, 3, %o1
  88	andn		%g7, PSTATE_IE, %g2
  89	wrpr		%g2, %pstate
  90	mov		SECONDARY_CONTEXT, %o4
  91	ldxa		[%o4] ASI_DMMU, %g2
  92	stxa		%o0, [%o4] ASI_DMMU
  931:	sub		%o1, (1 << 3), %o1
  94	ldx		[%o2 + %o1], %o3
  95	andcc		%o3, 1, %g0
  96	andn		%o3, 1, %o3
  97	be,pn		%icc, 2f
  98	 or		%o3, 0x10, %o3
  99	stxa		%g0, [%o3] ASI_IMMU_DEMAP
 1002:	stxa		%g0, [%o3] ASI_DMMU_DEMAP
 101	membar		#Sync
 102	brnz,pt		%o1, 1b
 103	 nop
 104	stxa		%g2, [%o4] ASI_DMMU
 105	sethi		%hi(KERNBASE), %o4
 106	flush		%o4
 107	retl
 108	 wrpr		%g7, 0x0, %pstate
 109	nop
 110	nop
 111	nop
 112	nop
 113
 114	.align		32
 115	.globl		__flush_tlb_kernel_range
 116__flush_tlb_kernel_range:	/* 31 insns */
 117	/* %o0=start, %o1=end */
 118	cmp		%o0, %o1
 119	be,pn		%xcc, 2f
 120	 sub		%o1, %o0, %o3
 121	srlx		%o3, 18, %o4
 122	brnz,pn		%o4, __spitfire_flush_tlb_kernel_range_slow
 123	 sethi		%hi(PAGE_SIZE), %o4
 124	sub		%o3, %o4, %o3
 125	or		%o0, 0x20, %o0		! Nucleus
 1261:	stxa		%g0, [%o0 + %o3] ASI_DMMU_DEMAP
 127	stxa		%g0, [%o0 + %o3] ASI_IMMU_DEMAP
 128	membar		#Sync
 129	brnz,pt		%o3, 1b
 130	 sub		%o3, %o4, %o3
 1312:	sethi		%hi(KERNBASE), %o3
 132	flush		%o3
 133	retl
 134	 nop
 135	nop
 136	nop
 137	nop
 138	nop
 139	nop
 140	nop
 141	nop
 142	nop
 143	nop
 144	nop
 145	nop
 146	nop
 147	nop
 148	nop
 149
 150__spitfire_flush_tlb_kernel_range_slow:
 151	mov		63 * 8, %o4
 1521:	ldxa		[%o4] ASI_ITLB_DATA_ACCESS, %o3
 153	andcc		%o3, 0x40, %g0			/* _PAGE_L_4U */
 154	bne,pn		%xcc, 2f
 155	 mov		TLB_TAG_ACCESS, %o3
 156	stxa		%g0, [%o3] ASI_IMMU
 157	stxa		%g0, [%o4] ASI_ITLB_DATA_ACCESS
 158	membar		#Sync
 1592:	ldxa		[%o4] ASI_DTLB_DATA_ACCESS, %o3
 160	andcc		%o3, 0x40, %g0
 161	bne,pn		%xcc, 2f
 162	 mov		TLB_TAG_ACCESS, %o3
 163	stxa		%g0, [%o3] ASI_DMMU
 164	stxa		%g0, [%o4] ASI_DTLB_DATA_ACCESS
 165	membar		#Sync
 1662:	sub		%o4, 8, %o4
 167	brgez,pt	%o4, 1b
 168	 nop
 169	retl
 170	 nop
 171
 172__spitfire_flush_tlb_mm_slow:
 173	rdpr		%pstate, %g1
 174	wrpr		%g1, PSTATE_IE, %pstate
 175	stxa		%o0, [%o1] ASI_DMMU
 176	stxa		%g0, [%g3] ASI_DMMU_DEMAP
 177	stxa		%g0, [%g3] ASI_IMMU_DEMAP
 178	flush		%g6
 179	stxa		%g2, [%o1] ASI_DMMU
 180	sethi		%hi(KERNBASE), %o1
 181	flush		%o1
 182	retl
 183	 wrpr		%g1, 0, %pstate
 184
 185/*
 186 * The following code flushes one page_size worth.
 187 */
 188	.section .kprobes.text, "ax"
 189	.align		32
 190	.globl		__flush_icache_page
 191__flush_icache_page:	/* %o0 = phys_page */
 192	srlx		%o0, PAGE_SHIFT, %o0
 193	sethi		%hi(PAGE_OFFSET), %g1
 194	sllx		%o0, PAGE_SHIFT, %o0
 195	sethi		%hi(PAGE_SIZE), %g2
 196	ldx		[%g1 + %lo(PAGE_OFFSET)], %g1
 197	add		%o0, %g1, %o0
 1981:	subcc		%g2, 32, %g2
 199	bne,pt		%icc, 1b
 200	 flush		%o0 + %g2
 201	retl
 202	 nop
 203
 204#ifdef DCACHE_ALIASING_POSSIBLE
 205
 206#if (PAGE_SHIFT != 13)
 207#error only page shift of 13 is supported by dcache flush
 208#endif
 209
 210#define DTAG_MASK 0x3
 211
 212	/* This routine is Spitfire specific so the hardcoded
 213	 * D-cache size and line-size are OK.
 214	 */
 215	.align		64
 216	.globl		__flush_dcache_page
 217__flush_dcache_page:	/* %o0=kaddr, %o1=flush_icache */
 218	sethi		%hi(PAGE_OFFSET), %g1
 219	ldx		[%g1 + %lo(PAGE_OFFSET)], %g1
 220	sub		%o0, %g1, %o0			! physical address
 221	srlx		%o0, 11, %o0			! make D-cache TAG
 222	sethi		%hi(1 << 14), %o2		! D-cache size
 223	sub		%o2, (1 << 5), %o2		! D-cache line size
 2241:	ldxa		[%o2] ASI_DCACHE_TAG, %o3	! load D-cache TAG
 225	andcc		%o3, DTAG_MASK, %g0		! Valid?
 226	be,pn		%xcc, 2f			! Nope, branch
 227	 andn		%o3, DTAG_MASK, %o3		! Clear valid bits
 228	cmp		%o3, %o0			! TAG match?
 229	bne,pt		%xcc, 2f			! Nope, branch
 230	 nop
 231	stxa		%g0, [%o2] ASI_DCACHE_TAG	! Invalidate TAG
 232	membar		#Sync
 2332:	brnz,pt		%o2, 1b
 234	 sub		%o2, (1 << 5), %o2		! D-cache line size
 235
 236	/* The I-cache does not snoop local stores so we
 237	 * better flush that too when necessary.
 238	 */
 239	brnz,pt		%o1, __flush_icache_page
 240	 sllx		%o0, 11, %o0
 241	retl
 242	 nop
 243
 244#endif /* DCACHE_ALIASING_POSSIBLE */
 245
 246	.previous
 247
 248	/* Cheetah specific versions, patched at boot time. */
 249__cheetah_flush_tlb_mm: /* 19 insns */
 250	rdpr		%pstate, %g7
 251	andn		%g7, PSTATE_IE, %g2
 252	wrpr		%g2, 0x0, %pstate
 253	wrpr		%g0, 1, %tl
 254	mov		PRIMARY_CONTEXT, %o2
 255	mov		0x40, %g3
 256	ldxa		[%o2] ASI_DMMU, %g2
 257	srlx		%g2, CTX_PGSZ1_NUC_SHIFT, %o1
 258	sllx		%o1, CTX_PGSZ1_NUC_SHIFT, %o1
 259	or		%o0, %o1, %o0	/* Preserve nucleus page size fields */
 260	stxa		%o0, [%o2] ASI_DMMU
 261	stxa		%g0, [%g3] ASI_DMMU_DEMAP
 262	stxa		%g0, [%g3] ASI_IMMU_DEMAP
 263	stxa		%g2, [%o2] ASI_DMMU
 264	sethi		%hi(KERNBASE), %o2
 265	flush		%o2
 266	wrpr		%g0, 0, %tl
 267	retl
 268	 wrpr		%g7, 0x0, %pstate
 269
 270__cheetah_flush_tlb_page:	/* 22 insns */
 271	/* %o0 = context, %o1 = vaddr */
 272	rdpr		%pstate, %g7
 273	andn		%g7, PSTATE_IE, %g2
 274	wrpr		%g2, 0x0, %pstate
 275	wrpr		%g0, 1, %tl
 276	mov		PRIMARY_CONTEXT, %o4
 277	ldxa		[%o4] ASI_DMMU, %g2
 278	srlx		%g2, CTX_PGSZ1_NUC_SHIFT, %o3
 279	sllx		%o3, CTX_PGSZ1_NUC_SHIFT, %o3
 280	or		%o0, %o3, %o0	/* Preserve nucleus page size fields */
 281	stxa		%o0, [%o4] ASI_DMMU
 282	andcc		%o1, 1, %g0
 283	be,pn		%icc, 1f
 284	 andn		%o1, 1, %o3
 285	stxa		%g0, [%o3] ASI_IMMU_DEMAP
 2861:	stxa		%g0, [%o3] ASI_DMMU_DEMAP	
 287	membar		#Sync
 288	stxa		%g2, [%o4] ASI_DMMU
 289	sethi		%hi(KERNBASE), %o4
 290	flush		%o4
 291	wrpr		%g0, 0, %tl
 292	retl
 293	 wrpr		%g7, 0x0, %pstate
 294
 295__cheetah_flush_tlb_pending:	/* 27 insns */
 296	/* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
 297	rdpr		%pstate, %g7
 298	sllx		%o1, 3, %o1
 299	andn		%g7, PSTATE_IE, %g2
 300	wrpr		%g2, 0x0, %pstate
 301	wrpr		%g0, 1, %tl
 302	mov		PRIMARY_CONTEXT, %o4
 303	ldxa		[%o4] ASI_DMMU, %g2
 304	srlx		%g2, CTX_PGSZ1_NUC_SHIFT, %o3
 305	sllx		%o3, CTX_PGSZ1_NUC_SHIFT, %o3
 306	or		%o0, %o3, %o0	/* Preserve nucleus page size fields */
 307	stxa		%o0, [%o4] ASI_DMMU
 3081:	sub		%o1, (1 << 3), %o1
 309	ldx		[%o2 + %o1], %o3
 310	andcc		%o3, 1, %g0
 311	be,pn		%icc, 2f
 312	 andn		%o3, 1, %o3
 313	stxa		%g0, [%o3] ASI_IMMU_DEMAP
 3142:	stxa		%g0, [%o3] ASI_DMMU_DEMAP	
 315	membar		#Sync
 316	brnz,pt		%o1, 1b
 317	 nop
 318	stxa		%g2, [%o4] ASI_DMMU
 319	sethi		%hi(KERNBASE), %o4
 320	flush		%o4
 321	wrpr		%g0, 0, %tl
 322	retl
 323	 wrpr		%g7, 0x0, %pstate
 324
 325__cheetah_flush_tlb_kernel_range:	/* 31 insns */
 326	/* %o0=start, %o1=end */
 327	cmp		%o0, %o1
 328	be,pn		%xcc, 2f
 329	 sub		%o1, %o0, %o3
 330	srlx		%o3, 18, %o4
 331	brnz,pn		%o4, 3f
 332	 sethi		%hi(PAGE_SIZE), %o4
 333	sub		%o3, %o4, %o3
 334	or		%o0, 0x20, %o0		! Nucleus
 3351:	stxa		%g0, [%o0 + %o3] ASI_DMMU_DEMAP
 336	stxa		%g0, [%o0 + %o3] ASI_IMMU_DEMAP
 337	membar		#Sync
 338	brnz,pt		%o3, 1b
 339	 sub		%o3, %o4, %o3
 3402:	sethi		%hi(KERNBASE), %o3
 341	flush		%o3
 342	retl
 343	 nop
 3443:	mov		0x80, %o4
 345	stxa		%g0, [%o4] ASI_DMMU_DEMAP
 346	membar		#Sync
 347	stxa		%g0, [%o4] ASI_IMMU_DEMAP
 348	membar		#Sync
 349	retl
 350	 nop
 351	nop
 352	nop
 353	nop
 354	nop
 355	nop
 356	nop
 357	nop
 358
 359#ifdef DCACHE_ALIASING_POSSIBLE
 360__cheetah_flush_dcache_page: /* 11 insns */
 361	sethi		%hi(PAGE_OFFSET), %g1
 362	ldx		[%g1 + %lo(PAGE_OFFSET)], %g1
 363	sub		%o0, %g1, %o0
 364	sethi		%hi(PAGE_SIZE), %o4
 3651:	subcc		%o4, (1 << 5), %o4
 366	stxa		%g0, [%o0 + %o4] ASI_DCACHE_INVALIDATE
 367	membar		#Sync
 368	bne,pt		%icc, 1b
 369	 nop
 370	retl		/* I-cache flush never needed on Cheetah, see callers. */
 371	 nop
 372#endif /* DCACHE_ALIASING_POSSIBLE */
 373
 374	/* Hypervisor specific versions, patched at boot time.  */
 375__hypervisor_tlb_tl0_error:
 376	save		%sp, -192, %sp
 377	mov		%i0, %o0
 378	call		hypervisor_tlbop_error
 379	 mov		%i1, %o1
 380	ret
 381	 restore
 382
 383__hypervisor_flush_tlb_mm: /* 19 insns */
 384	mov		%o0, %o2	/* ARG2: mmu context */
 385	mov		0, %o0		/* ARG0: CPU lists unimplemented */
 386	mov		0, %o1		/* ARG1: CPU lists unimplemented */
 387	mov		HV_MMU_ALL, %o3	/* ARG3: flags */
 388	mov		HV_FAST_MMU_DEMAP_CTX, %o5
 389	ta		HV_FAST_TRAP
 390	brnz,pn		%o0, 1f
 391	 mov		HV_FAST_MMU_DEMAP_CTX, %o1
 392	retl
 393	 nop
 3941:	sethi		%hi(__hypervisor_tlb_tl0_error), %o5
 395	jmpl		%o5 + %lo(__hypervisor_tlb_tl0_error), %g0
 396	 nop
 397	nop
 398	nop
 399	nop
 400	nop
 401	nop
 402	nop
 403
 404__hypervisor_flush_tlb_page: /* 22 insns */
 405	/* %o0 = context, %o1 = vaddr */
 406	mov		%o0, %g2
 407	mov		%o1, %o0              /* ARG0: vaddr + IMMU-bit */
 408	mov		%g2, %o1	      /* ARG1: mmu context */
 409	mov		HV_MMU_ALL, %o2	      /* ARG2: flags */
 410	srlx		%o0, PAGE_SHIFT, %o0
 411	sllx		%o0, PAGE_SHIFT, %o0
 412	ta		HV_MMU_UNMAP_ADDR_TRAP
 413	brnz,pn		%o0, 1f
 414	 mov		HV_MMU_UNMAP_ADDR_TRAP, %o1
 415	retl
 416	 nop
 4171:	sethi		%hi(__hypervisor_tlb_tl0_error), %o2
 418	jmpl		%o2 + %lo(__hypervisor_tlb_tl0_error), %g0
 419	 nop
 420	nop
 421	nop
 422	nop
 423	nop
 424	nop
 425	nop
 426	nop
 427	nop
 428
 429__hypervisor_flush_tlb_pending: /* 27 insns */
 430	/* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
 431	sllx		%o1, 3, %g1
 432	mov		%o2, %g2
 433	mov		%o0, %g3
 4341:	sub		%g1, (1 << 3), %g1
 435	ldx		[%g2 + %g1], %o0      /* ARG0: vaddr + IMMU-bit */
 436	mov		%g3, %o1	      /* ARG1: mmu context */
 437	mov		HV_MMU_ALL, %o2	      /* ARG2: flags */
 438	srlx		%o0, PAGE_SHIFT, %o0
 439	sllx		%o0, PAGE_SHIFT, %o0
 440	ta		HV_MMU_UNMAP_ADDR_TRAP
 441	brnz,pn		%o0, 1f
 442	 mov		HV_MMU_UNMAP_ADDR_TRAP, %o1
 443	brnz,pt		%g1, 1b
 444	 nop
 445	retl
 446	 nop
 4471:	sethi		%hi(__hypervisor_tlb_tl0_error), %o2
 448	jmpl		%o2 + %lo(__hypervisor_tlb_tl0_error), %g0
 449	 nop
 450	nop
 451	nop
 452	nop
 453	nop
 454	nop
 455	nop
 456	nop
 457	nop
 458
 459__hypervisor_flush_tlb_kernel_range: /* 31 insns */
 460	/* %o0=start, %o1=end */
 461	cmp		%o0, %o1
 462	be,pn		%xcc, 2f
 463	 sub		%o1, %o0, %g2
 464	srlx		%g2, 18, %g3
 465	brnz,pn		%g3, 4f
 466	 mov		%o0, %g1
 467	sethi		%hi(PAGE_SIZE), %g3
 468	sub		%g2, %g3, %g2
 4691:	add		%g1, %g2, %o0	/* ARG0: virtual address */
 470	mov		0, %o1		/* ARG1: mmu context */
 471	mov		HV_MMU_ALL, %o2	/* ARG2: flags */
 472	ta		HV_MMU_UNMAP_ADDR_TRAP
 473	brnz,pn		%o0, 3f
 474	 mov		HV_MMU_UNMAP_ADDR_TRAP, %o1
 475	brnz,pt		%g2, 1b
 476	 sub		%g2, %g3, %g2
 4772:	retl
 478	 nop
 4793:	sethi		%hi(__hypervisor_tlb_tl0_error), %o2
 480	jmpl		%o2 + %lo(__hypervisor_tlb_tl0_error), %g0
 481	 nop
 4824:	mov		0, %o0		/* ARG0: CPU lists unimplemented */
 483	mov		0, %o1		/* ARG1: CPU lists unimplemented */
 484	mov		0, %o2		/* ARG2: mmu context == nucleus */
 485	mov		HV_MMU_ALL, %o3	/* ARG3: flags */
 486	mov		HV_FAST_MMU_DEMAP_CTX, %o5
 487	ta		HV_FAST_TRAP
 488	brnz,pn		%o0, 3b
 489	 mov		HV_FAST_MMU_DEMAP_CTX, %o1
 490	retl
 491	 nop
 492
 493#ifdef DCACHE_ALIASING_POSSIBLE
 494	/* XXX Niagara and friends have an 8K cache, so no aliasing is
 495	 * XXX possible, but nothing explicit in the Hypervisor API
 496	 * XXX guarantees this.
 497	 */
 498__hypervisor_flush_dcache_page:	/* 2 insns */
 499	retl
 500	 nop
 501#endif
 502
 503tlb_patch_one:
 5041:	lduw		[%o1], %g1
 505	stw		%g1, [%o0]
 506	flush		%o0
 507	subcc		%o2, 1, %o2
 508	add		%o1, 4, %o1
 509	bne,pt		%icc, 1b
 510	 add		%o0, 4, %o0
 511	retl
 512	 nop
 513
 514#ifdef CONFIG_SMP
 515	/* These are all called by the slaves of a cross call, at
 516	 * trap level 1, with interrupts fully disabled.
 517	 *
 518	 * Register usage:
 519	 *   %g5	mm->context	(all tlb flushes)
 520	 *   %g1	address arg 1	(tlb page and range flushes)
 521	 *   %g7	address arg 2	(tlb range flush only)
 522	 *
 523	 *   %g6	scratch 1
 524	 *   %g2	scratch 2
 525	 *   %g3	scratch 3
 526	 *   %g4	scratch 4
 527	 */
 528	.align		32
 529	.globl		xcall_flush_tlb_mm
 530xcall_flush_tlb_mm:	/* 24 insns */
 531	mov		PRIMARY_CONTEXT, %g2
 532	ldxa		[%g2] ASI_DMMU, %g3
 533	srlx		%g3, CTX_PGSZ1_NUC_SHIFT, %g4
 534	sllx		%g4, CTX_PGSZ1_NUC_SHIFT, %g4
 535	or		%g5, %g4, %g5	/* Preserve nucleus page size fields */
 536	stxa		%g5, [%g2] ASI_DMMU
 537	mov		0x40, %g4
 538	stxa		%g0, [%g4] ASI_DMMU_DEMAP
 539	stxa		%g0, [%g4] ASI_IMMU_DEMAP
 540	stxa		%g3, [%g2] ASI_DMMU
 541	retry
 542	nop
 543	nop
 544	nop
 545	nop
 546	nop
 547	nop
 548	nop
 549	nop
 550	nop
 551	nop
 552	nop
 553	nop
 554	nop
 555
 556	.globl		xcall_flush_tlb_page
 557xcall_flush_tlb_page:	/* 20 insns */
 558	/* %g5=context, %g1=vaddr */
 559	mov		PRIMARY_CONTEXT, %g4
 560	ldxa		[%g4] ASI_DMMU, %g2
 561	srlx		%g2, CTX_PGSZ1_NUC_SHIFT, %g4
 562	sllx		%g4, CTX_PGSZ1_NUC_SHIFT, %g4
 563	or		%g5, %g4, %g5
 564	mov		PRIMARY_CONTEXT, %g4
 565	stxa		%g5, [%g4] ASI_DMMU
 566	andcc		%g1, 0x1, %g0
 567	be,pn		%icc, 2f
 568	 andn		%g1, 0x1, %g5
 569	stxa		%g0, [%g5] ASI_IMMU_DEMAP
 5702:	stxa		%g0, [%g5] ASI_DMMU_DEMAP
 571	membar		#Sync
 572	stxa		%g2, [%g4] ASI_DMMU
 573	retry
 574	nop
 575	nop
 576	nop
 577	nop
 578	nop
 579
 580	.globl		xcall_flush_tlb_kernel_range
 581xcall_flush_tlb_kernel_range:	/* 44 insns */
 582	sethi		%hi(PAGE_SIZE - 1), %g2
 583	or		%g2, %lo(PAGE_SIZE - 1), %g2
 584	andn		%g1, %g2, %g1
 585	andn		%g7, %g2, %g7
 586	sub		%g7, %g1, %g3
 587	srlx		%g3, 18, %g2
 588	brnz,pn		%g2, 2f
 589	 add		%g2, 1, %g2
 590	sub		%g3, %g2, %g3
 591	or		%g1, 0x20, %g1		! Nucleus
 5921:	stxa		%g0, [%g1 + %g3] ASI_DMMU_DEMAP
 593	stxa		%g0, [%g1 + %g3] ASI_IMMU_DEMAP
 594	membar		#Sync
 595	brnz,pt		%g3, 1b
 596	 sub		%g3, %g2, %g3
 597	retry
 5982:	mov		63 * 8, %g1
 5991:	ldxa		[%g1] ASI_ITLB_DATA_ACCESS, %g2
 600	andcc		%g2, 0x40, %g0			/* _PAGE_L_4U */
 601	bne,pn		%xcc, 2f
 602	 mov		TLB_TAG_ACCESS, %g2
 603	stxa		%g0, [%g2] ASI_IMMU
 604	stxa		%g0, [%g1] ASI_ITLB_DATA_ACCESS
 605	membar		#Sync
 6062:	ldxa		[%g1] ASI_DTLB_DATA_ACCESS, %g2
 607	andcc		%g2, 0x40, %g0
 608	bne,pn		%xcc, 2f
 609	 mov		TLB_TAG_ACCESS, %g2
 610	stxa		%g0, [%g2] ASI_DMMU
 611	stxa		%g0, [%g1] ASI_DTLB_DATA_ACCESS
 612	membar		#Sync
 6132:	sub		%g1, 8, %g1
 614	brgez,pt	%g1, 1b
 615	 nop
 616	retry
 617	nop
 618	nop
 619	nop
 620	nop
 621	nop
 622	nop
 623	nop
 624	nop
 625	nop
 626
 627	/* This runs in a very controlled environment, so we do
 628	 * not need to worry about BH races etc.
 629	 */
 630	.globl		xcall_sync_tick
 631xcall_sync_tick:
 632
 633661:	rdpr		%pstate, %g2
 634	wrpr		%g2, PSTATE_IG | PSTATE_AG, %pstate
 635	.section	.sun4v_2insn_patch, "ax"
 636	.word		661b
 637	nop
 638	nop
 639	.previous
 640
 641	rdpr		%pil, %g2
 642	wrpr		%g0, PIL_NORMAL_MAX, %pil
 643	sethi		%hi(109f), %g7
 644	b,pt		%xcc, etrap_irq
 645109:	 or		%g7, %lo(109b), %g7
 646#ifdef CONFIG_TRACE_IRQFLAGS
 647	call		trace_hardirqs_off
 648	 nop
 649#endif
 650	call		smp_synchronize_tick_client
 651	 nop
 652	b		rtrap_xcall
 653	 ldx		[%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
 654
 655	.globl		xcall_fetch_glob_regs
 656xcall_fetch_glob_regs:
 657	sethi		%hi(global_cpu_snapshot), %g1
 658	or		%g1, %lo(global_cpu_snapshot), %g1
 659	__GET_CPUID(%g2)
 660	sllx		%g2, 6, %g3
 661	add		%g1, %g3, %g1
 662	rdpr		%tstate, %g7
 663	stx		%g7, [%g1 + GR_SNAP_TSTATE]
 664	rdpr		%tpc, %g7
 665	stx		%g7, [%g1 + GR_SNAP_TPC]
 666	rdpr		%tnpc, %g7
 667	stx		%g7, [%g1 + GR_SNAP_TNPC]
 668	stx		%o7, [%g1 + GR_SNAP_O7]
 669	stx		%i7, [%g1 + GR_SNAP_I7]
 670	/* Don't try this at home kids... */
 671	rdpr		%cwp, %g3
 672	sub		%g3, 1, %g7
 673	wrpr		%g7, %cwp
 674	mov		%i7, %g7
 675	wrpr		%g3, %cwp
 676	stx		%g7, [%g1 + GR_SNAP_RPC]
 677	sethi		%hi(trap_block), %g7
 678	or		%g7, %lo(trap_block), %g7
 679	sllx		%g2, TRAP_BLOCK_SZ_SHIFT, %g2
 680	add		%g7, %g2, %g7
 681	ldx		[%g7 + TRAP_PER_CPU_THREAD], %g3
 682	stx		%g3, [%g1 + GR_SNAP_THREAD]
 683	retry
 684
 685	.globl		xcall_fetch_glob_pmu
 686xcall_fetch_glob_pmu:
 687	sethi		%hi(global_cpu_snapshot), %g1
 688	or		%g1, %lo(global_cpu_snapshot), %g1
 689	__GET_CPUID(%g2)
 690	sllx		%g2, 6, %g3
 691	add		%g1, %g3, %g1
 692	rd		%pic, %g7
 693	stx		%g7, [%g1 + (4 * 8)]
 694	rd		%pcr, %g7
 695	stx		%g7, [%g1 + (0 * 8)]
 696	retry
 697
 698	.globl		xcall_fetch_glob_pmu_n4
 699xcall_fetch_glob_pmu_n4:
 700	sethi		%hi(global_cpu_snapshot), %g1
 701	or		%g1, %lo(global_cpu_snapshot), %g1
 702	__GET_CPUID(%g2)
 703	sllx		%g2, 6, %g3
 704	add		%g1, %g3, %g1
 705
 706	ldxa		[%g0] ASI_PIC, %g7
 707	stx		%g7, [%g1 + (4 * 8)]
 708	mov		0x08, %g3
 709	ldxa		[%g3] ASI_PIC, %g7
 710	stx		%g7, [%g1 + (5 * 8)]
 711	mov		0x10, %g3
 712	ldxa		[%g3] ASI_PIC, %g7
 713	stx		%g7, [%g1 + (6 * 8)]
 714	mov		0x18, %g3
 715	ldxa		[%g3] ASI_PIC, %g7
 716	stx		%g7, [%g1 + (7 * 8)]
 717
 718	mov		%o0, %g2
 719	mov		%o1, %g3
 720	mov		%o5, %g7
 721
 722	mov		HV_FAST_VT_GET_PERFREG, %o5
 723	mov		3, %o0
 724	ta		HV_FAST_TRAP
 725	stx		%o1, [%g1 + (3 * 8)]
 726	mov		HV_FAST_VT_GET_PERFREG, %o5
 727	mov		2, %o0
 728	ta		HV_FAST_TRAP
 729	stx		%o1, [%g1 + (2 * 8)]
 730	mov		HV_FAST_VT_GET_PERFREG, %o5
 731	mov		1, %o0
 732	ta		HV_FAST_TRAP
 733	stx		%o1, [%g1 + (1 * 8)]
 734	mov		HV_FAST_VT_GET_PERFREG, %o5
 735	mov		0, %o0
 736	ta		HV_FAST_TRAP
 737	stx		%o1, [%g1 + (0 * 8)]
 738
 739	mov		%g2, %o0
 740	mov		%g3, %o1
 741	mov		%g7, %o5
 742
 743	retry
 744
 745__cheetah_xcall_flush_tlb_kernel_range:	/* 44 insns */
 746	sethi		%hi(PAGE_SIZE - 1), %g2
 747	or		%g2, %lo(PAGE_SIZE - 1), %g2
 748	andn		%g1, %g2, %g1
 749	andn		%g7, %g2, %g7
 750	sub		%g7, %g1, %g3
 751	srlx		%g3, 18, %g2
 752	brnz,pn		%g2, 2f
 753	 add		%g2, 1, %g2
 754	sub		%g3, %g2, %g3
 755	or		%g1, 0x20, %g1		! Nucleus
 7561:	stxa		%g0, [%g1 + %g3] ASI_DMMU_DEMAP
 757	stxa		%g0, [%g1 + %g3] ASI_IMMU_DEMAP
 758	membar		#Sync
 759	brnz,pt		%g3, 1b
 760	 sub		%g3, %g2, %g3
 761	retry
 7622:	mov		0x80, %g2
 763	stxa		%g0, [%g2] ASI_DMMU_DEMAP
 764	membar		#Sync
 765	stxa		%g0, [%g2] ASI_IMMU_DEMAP
 766	membar		#Sync
 767	retry
 768	nop
 769	nop
 770	nop
 771	nop
 772	nop
 773	nop
 774	nop
 775	nop
 776	nop
 777	nop
 778	nop
 779	nop
 780	nop
 781	nop
 782	nop
 783	nop
 784	nop
 785	nop
 786	nop
 787	nop
 788	nop
 789	nop
 790
 791#ifdef DCACHE_ALIASING_POSSIBLE
 792	.align		32
 793	.globl		xcall_flush_dcache_page_cheetah
 794xcall_flush_dcache_page_cheetah: /* %g1 == physical page address */
 795	sethi		%hi(PAGE_SIZE), %g3
 7961:	subcc		%g3, (1 << 5), %g3
 797	stxa		%g0, [%g1 + %g3] ASI_DCACHE_INVALIDATE
 798	membar		#Sync
 799	bne,pt		%icc, 1b
 800	 nop
 801	retry
 802	nop
 803#endif /* DCACHE_ALIASING_POSSIBLE */
 804
 805	.globl		xcall_flush_dcache_page_spitfire
 806xcall_flush_dcache_page_spitfire: /* %g1 == physical page address
 807				     %g7 == kernel page virtual address
 808				     %g5 == (page->mapping != NULL)  */
 809#ifdef DCACHE_ALIASING_POSSIBLE
 810	srlx		%g1, (13 - 2), %g1	! Form tag comparitor
 811	sethi		%hi(L1DCACHE_SIZE), %g3	! D$ size == 16K
 812	sub		%g3, (1 << 5), %g3	! D$ linesize == 32
 8131:	ldxa		[%g3] ASI_DCACHE_TAG, %g2
 814	andcc		%g2, 0x3, %g0
 815	be,pn		%xcc, 2f
 816	 andn		%g2, 0x3, %g2
 817	cmp		%g2, %g1
 818
 819	bne,pt		%xcc, 2f
 820	 nop
 821	stxa		%g0, [%g3] ASI_DCACHE_TAG
 822	membar		#Sync
 8232:	cmp		%g3, 0
 824	bne,pt		%xcc, 1b
 825	 sub		%g3, (1 << 5), %g3
 826
 827	brz,pn		%g5, 2f
 828#endif /* DCACHE_ALIASING_POSSIBLE */
 829	 sethi		%hi(PAGE_SIZE), %g3
 830
 8311:	flush		%g7
 832	subcc		%g3, (1 << 5), %g3
 833	bne,pt		%icc, 1b
 834	 add		%g7, (1 << 5), %g7
 835
 8362:	retry
 837	nop
 838	nop
 839
 840	/* %g5:	error
 841	 * %g6:	tlb op
 842	 */
 843__hypervisor_tlb_xcall_error:
 844	mov	%g5, %g4
 845	mov	%g6, %g5
 846	ba,pt	%xcc, etrap
 847	 rd	%pc, %g7
 848	mov	%l4, %o0
 849	call	hypervisor_tlbop_error_xcall
 850	 mov	%l5, %o1
 851	ba,a,pt	%xcc, rtrap
 852
 853	.globl		__hypervisor_xcall_flush_tlb_mm
 854__hypervisor_xcall_flush_tlb_mm: /* 24 insns */
 855	/* %g5=ctx, g1,g2,g3,g4,g7=scratch, %g6=unusable */
 856	mov		%o0, %g2
 857	mov		%o1, %g3
 858	mov		%o2, %g4
 859	mov		%o3, %g1
 860	mov		%o5, %g7
 861	clr		%o0		/* ARG0: CPU lists unimplemented */
 862	clr		%o1		/* ARG1: CPU lists unimplemented */
 863	mov		%g5, %o2	/* ARG2: mmu context */
 864	mov		HV_MMU_ALL, %o3	/* ARG3: flags */
 865	mov		HV_FAST_MMU_DEMAP_CTX, %o5
 866	ta		HV_FAST_TRAP
 867	mov		HV_FAST_MMU_DEMAP_CTX, %g6
 868	brnz,pn		%o0, 1f
 869	 mov		%o0, %g5
 870	mov		%g2, %o0
 871	mov		%g3, %o1
 872	mov		%g4, %o2
 873	mov		%g1, %o3
 874	mov		%g7, %o5
 875	membar		#Sync
 876	retry
 8771:	sethi		%hi(__hypervisor_tlb_xcall_error), %g4
 878	jmpl		%g4 + %lo(__hypervisor_tlb_xcall_error), %g0
 879	 nop
 880
 881	.globl		__hypervisor_xcall_flush_tlb_page
 882__hypervisor_xcall_flush_tlb_page: /* 20 insns */
 883	/* %g5=ctx, %g1=vaddr */
 884	mov		%o0, %g2
 885	mov		%o1, %g3
 886	mov		%o2, %g4
 887	mov		%g1, %o0	        /* ARG0: virtual address */
 888	mov		%g5, %o1		/* ARG1: mmu context */
 889	mov		HV_MMU_ALL, %o2		/* ARG2: flags */
 890	srlx		%o0, PAGE_SHIFT, %o0
 891	sllx		%o0, PAGE_SHIFT, %o0
 892	ta		HV_MMU_UNMAP_ADDR_TRAP
 893	mov		HV_MMU_UNMAP_ADDR_TRAP, %g6
 894	brnz,a,pn	%o0, 1f
 895	 mov		%o0, %g5
 896	mov		%g2, %o0
 897	mov		%g3, %o1
 898	mov		%g4, %o2
 899	membar		#Sync
 900	retry
 9011:	sethi		%hi(__hypervisor_tlb_xcall_error), %g4
 902	jmpl		%g4 + %lo(__hypervisor_tlb_xcall_error), %g0
 903	 nop
 904
 905	.globl		__hypervisor_xcall_flush_tlb_kernel_range
 906__hypervisor_xcall_flush_tlb_kernel_range: /* 44 insns */
 907	/* %g1=start, %g7=end, g2,g3,g4,g5,g6=scratch */
 908	sethi		%hi(PAGE_SIZE - 1), %g2
 909	or		%g2, %lo(PAGE_SIZE - 1), %g2
 910	andn		%g1, %g2, %g1
 911	andn		%g7, %g2, %g7
 912	sub		%g7, %g1, %g3
 913	srlx		%g3, 18, %g7
 914	add		%g2, 1, %g2
 915	sub		%g3, %g2, %g3
 916	mov		%o0, %g2
 917	mov		%o1, %g4
 918	brnz,pn		%g7, 2f
 919	 mov		%o2, %g7
 9201:	add		%g1, %g3, %o0	/* ARG0: virtual address */
 921	mov		0, %o1		/* ARG1: mmu context */
 922	mov		HV_MMU_ALL, %o2	/* ARG2: flags */
 923	ta		HV_MMU_UNMAP_ADDR_TRAP
 924	mov		HV_MMU_UNMAP_ADDR_TRAP, %g6
 925	brnz,pn		%o0, 1f
 926	 mov		%o0, %g5
 927	sethi		%hi(PAGE_SIZE), %o2
 928	brnz,pt		%g3, 1b
 929	 sub		%g3, %o2, %g3
 9305:	mov		%g2, %o0
 931	mov		%g4, %o1
 932	mov		%g7, %o2
 933	membar		#Sync
 934	retry
 9351:	sethi		%hi(__hypervisor_tlb_xcall_error), %g4
 936	jmpl		%g4 + %lo(__hypervisor_tlb_xcall_error), %g0
 937	 nop
 9382:	mov		%o3, %g1
 939	mov		%o5, %g3
 940	mov		0, %o0		/* ARG0: CPU lists unimplemented */
 941	mov		0, %o1		/* ARG1: CPU lists unimplemented */
 942	mov		0, %o2		/* ARG2: mmu context == nucleus */
 943	mov		HV_MMU_ALL, %o3	/* ARG3: flags */
 944	mov		HV_FAST_MMU_DEMAP_CTX, %o5
 945	ta		HV_FAST_TRAP
 946	mov		%g1, %o3
 947	brz,pt		%o0, 5b
 948	 mov		%g3, %o5
 949	mov		HV_FAST_MMU_DEMAP_CTX, %g6
 950	ba,pt		%xcc, 1b
 951	 clr		%g5
 952
 953	/* These just get rescheduled to PIL vectors. */
 954	.globl		xcall_call_function
 955xcall_call_function:
 956	wr		%g0, (1 << PIL_SMP_CALL_FUNC), %set_softint
 957	retry
 958
 959	.globl		xcall_call_function_single
 960xcall_call_function_single:
 961	wr		%g0, (1 << PIL_SMP_CALL_FUNC_SNGL), %set_softint
 962	retry
 963
 964	.globl		xcall_receive_signal
 965xcall_receive_signal:
 966	wr		%g0, (1 << PIL_SMP_RECEIVE_SIGNAL), %set_softint
 967	retry
 968
 969	.globl		xcall_capture
 970xcall_capture:
 971	wr		%g0, (1 << PIL_SMP_CAPTURE), %set_softint
 972	retry
 973
 974	.globl		xcall_new_mmu_context_version
 975xcall_new_mmu_context_version:
 976	wr		%g0, (1 << PIL_SMP_CTX_NEW_VERSION), %set_softint
 977	retry
 978
 979#ifdef CONFIG_KGDB
 980	.globl		xcall_kgdb_capture
 981xcall_kgdb_capture:
 982	wr		%g0, (1 << PIL_KGDB_CAPTURE), %set_softint
 983	retry
 984#endif
 985
 986#endif /* CONFIG_SMP */
 987
 988	.globl		cheetah_patch_cachetlbops
 989cheetah_patch_cachetlbops:
 990	save		%sp, -128, %sp
 991
 992	sethi		%hi(__flush_tlb_mm), %o0
 993	or		%o0, %lo(__flush_tlb_mm), %o0
 994	sethi		%hi(__cheetah_flush_tlb_mm), %o1
 995	or		%o1, %lo(__cheetah_flush_tlb_mm), %o1
 996	call		tlb_patch_one
 997	 mov		19, %o2
 998
 999	sethi		%hi(__flush_tlb_page), %o0
1000	or		%o0, %lo(__flush_tlb_page), %o0
1001	sethi		%hi(__cheetah_flush_tlb_page), %o1
1002	or		%o1, %lo(__cheetah_flush_tlb_page), %o1
1003	call		tlb_patch_one
1004	 mov		22, %o2
1005
1006	sethi		%hi(__flush_tlb_pending), %o0
1007	or		%o0, %lo(__flush_tlb_pending), %o0
1008	sethi		%hi(__cheetah_flush_tlb_pending), %o1
1009	or		%o1, %lo(__cheetah_flush_tlb_pending), %o1
1010	call		tlb_patch_one
1011	 mov		27, %o2
1012
1013	sethi		%hi(__flush_tlb_kernel_range), %o0
1014	or		%o0, %lo(__flush_tlb_kernel_range), %o0
1015	sethi		%hi(__cheetah_flush_tlb_kernel_range), %o1
1016	or		%o1, %lo(__cheetah_flush_tlb_kernel_range), %o1
1017	call		tlb_patch_one
1018	 mov		31, %o2
1019
1020#ifdef DCACHE_ALIASING_POSSIBLE
1021	sethi		%hi(__flush_dcache_page), %o0
1022	or		%o0, %lo(__flush_dcache_page), %o0
1023	sethi		%hi(__cheetah_flush_dcache_page), %o1
1024	or		%o1, %lo(__cheetah_flush_dcache_page), %o1
1025	call		tlb_patch_one
1026	 mov		11, %o2
1027#endif /* DCACHE_ALIASING_POSSIBLE */
1028
1029#ifdef CONFIG_SMP
1030	sethi		%hi(xcall_flush_tlb_kernel_range), %o0
1031	or		%o0, %lo(xcall_flush_tlb_kernel_range), %o0
1032	sethi		%hi(__cheetah_xcall_flush_tlb_kernel_range), %o1
1033	or		%o1, %lo(__cheetah_xcall_flush_tlb_kernel_range), %o1
1034	call		tlb_patch_one
1035	 mov		44, %o2
1036#endif /* CONFIG_SMP */
1037
1038	ret
1039	 restore
1040
1041	.globl		hypervisor_patch_cachetlbops
1042hypervisor_patch_cachetlbops:
1043	save		%sp, -128, %sp
1044
1045	sethi		%hi(__flush_tlb_mm), %o0
1046	or		%o0, %lo(__flush_tlb_mm), %o0
1047	sethi		%hi(__hypervisor_flush_tlb_mm), %o1
1048	or		%o1, %lo(__hypervisor_flush_tlb_mm), %o1
1049	call		tlb_patch_one
1050	 mov		19, %o2
1051
1052	sethi		%hi(__flush_tlb_page), %o0
1053	or		%o0, %lo(__flush_tlb_page), %o0
1054	sethi		%hi(__hypervisor_flush_tlb_page), %o1
1055	or		%o1, %lo(__hypervisor_flush_tlb_page), %o1
1056	call		tlb_patch_one
1057	 mov		22, %o2
1058
1059	sethi		%hi(__flush_tlb_pending), %o0
1060	or		%o0, %lo(__flush_tlb_pending), %o0
1061	sethi		%hi(__hypervisor_flush_tlb_pending), %o1
1062	or		%o1, %lo(__hypervisor_flush_tlb_pending), %o1
1063	call		tlb_patch_one
1064	 mov		27, %o2
1065
1066	sethi		%hi(__flush_tlb_kernel_range), %o0
1067	or		%o0, %lo(__flush_tlb_kernel_range), %o0
1068	sethi		%hi(__hypervisor_flush_tlb_kernel_range), %o1
1069	or		%o1, %lo(__hypervisor_flush_tlb_kernel_range), %o1
1070	call		tlb_patch_one
1071	 mov		31, %o2
1072
1073#ifdef DCACHE_ALIASING_POSSIBLE
1074	sethi		%hi(__flush_dcache_page), %o0
1075	or		%o0, %lo(__flush_dcache_page), %o0
1076	sethi		%hi(__hypervisor_flush_dcache_page), %o1
1077	or		%o1, %lo(__hypervisor_flush_dcache_page), %o1
1078	call		tlb_patch_one
1079	 mov		2, %o2
1080#endif /* DCACHE_ALIASING_POSSIBLE */
1081
1082#ifdef CONFIG_SMP
1083	sethi		%hi(xcall_flush_tlb_mm), %o0
1084	or		%o0, %lo(xcall_flush_tlb_mm), %o0
1085	sethi		%hi(__hypervisor_xcall_flush_tlb_mm), %o1
1086	or		%o1, %lo(__hypervisor_xcall_flush_tlb_mm), %o1
1087	call		tlb_patch_one
1088	 mov		24, %o2
1089
1090	sethi		%hi(xcall_flush_tlb_page), %o0
1091	or		%o0, %lo(xcall_flush_tlb_page), %o0
1092	sethi		%hi(__hypervisor_xcall_flush_tlb_page), %o1
1093	or		%o1, %lo(__hypervisor_xcall_flush_tlb_page), %o1
1094	call		tlb_patch_one
1095	 mov		20, %o2
1096
1097	sethi		%hi(xcall_flush_tlb_kernel_range), %o0
1098	or		%o0, %lo(xcall_flush_tlb_kernel_range), %o0
1099	sethi		%hi(__hypervisor_xcall_flush_tlb_kernel_range), %o1
1100	or		%o1, %lo(__hypervisor_xcall_flush_tlb_kernel_range), %o1
1101	call		tlb_patch_one
1102	 mov		44, %o2
1103#endif /* CONFIG_SMP */
1104
1105	ret
1106	 restore