Linux Audio

Check our new training course

Loading...
v6.2
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/*
   3 * ultra.S: Don't expand these all over the place...
   4 *
   5 * Copyright (C) 1997, 2000, 2008 David S. Miller (davem@davemloft.net)
   6 */
   7
   8#include <linux/pgtable.h>
   9#include <asm/asi.h>
 
  10#include <asm/page.h>
  11#include <asm/spitfire.h>
  12#include <asm/mmu_context.h>
  13#include <asm/mmu.h>
  14#include <asm/pil.h>
  15#include <asm/head.h>
  16#include <asm/thread_info.h>
  17#include <asm/cacheflush.h>
  18#include <asm/hypervisor.h>
  19#include <asm/cpudata.h>
  20
  21	/* Basically, most of the Spitfire vs. Cheetah madness
  22	 * has to do with the fact that Cheetah does not support
  23	 * IMMU flushes out of the secondary context.  Someone needs
  24	 * to throw a south lake birthday party for the folks
  25	 * in Microelectronics who refused to fix this shit.
  26	 */
  27
  28	/* This file is meant to be read efficiently by the CPU, not humans.
  29	 * Staraj sie tego nikomu nie pierdolnac...
  30	 */
  31	.text
  32	.align		32
  33	.globl		__flush_tlb_mm
  34__flush_tlb_mm:		/* 19 insns */
  35	/* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */
  36	ldxa		[%o1] ASI_DMMU, %g2
  37	cmp		%g2, %o0
  38	bne,pn		%icc, __spitfire_flush_tlb_mm_slow
  39	 mov		0x50, %g3
  40	stxa		%g0, [%g3] ASI_DMMU_DEMAP
  41	stxa		%g0, [%g3] ASI_IMMU_DEMAP
  42	sethi		%hi(KERNBASE), %g3
  43	flush		%g3
  44	retl
  45	 nop
  46	nop
  47	nop
  48	nop
  49	nop
  50	nop
  51	nop
  52	nop
  53	nop
  54	nop
  55
  56	.align		32
  57	.globl		__flush_tlb_page
  58__flush_tlb_page:	/* 22 insns */
  59	/* %o0 = context, %o1 = vaddr */
  60	rdpr		%pstate, %g7
  61	andn		%g7, PSTATE_IE, %g2
  62	wrpr		%g2, %pstate
  63	mov		SECONDARY_CONTEXT, %o4
  64	ldxa		[%o4] ASI_DMMU, %g2
  65	stxa		%o0, [%o4] ASI_DMMU
  66	andcc		%o1, 1, %g0
  67	andn		%o1, 1, %o3
  68	be,pn		%icc, 1f
  69	 or		%o3, 0x10, %o3
  70	stxa		%g0, [%o3] ASI_IMMU_DEMAP
  711:	stxa		%g0, [%o3] ASI_DMMU_DEMAP
  72	membar		#Sync
  73	stxa		%g2, [%o4] ASI_DMMU
  74	sethi		%hi(KERNBASE), %o4
  75	flush		%o4
  76	retl
  77	 wrpr		%g7, 0x0, %pstate
  78	nop
  79	nop
  80	nop
  81	nop
  82
  83	.align		32
  84	.globl		__flush_tlb_pending
  85__flush_tlb_pending:	/* 27 insns */
  86	/* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
  87	rdpr		%pstate, %g7
  88	sllx		%o1, 3, %o1
  89	andn		%g7, PSTATE_IE, %g2
  90	wrpr		%g2, %pstate
  91	mov		SECONDARY_CONTEXT, %o4
  92	ldxa		[%o4] ASI_DMMU, %g2
  93	stxa		%o0, [%o4] ASI_DMMU
  941:	sub		%o1, (1 << 3), %o1
  95	ldx		[%o2 + %o1], %o3
  96	andcc		%o3, 1, %g0
  97	andn		%o3, 1, %o3
  98	be,pn		%icc, 2f
  99	 or		%o3, 0x10, %o3
 100	stxa		%g0, [%o3] ASI_IMMU_DEMAP
 1012:	stxa		%g0, [%o3] ASI_DMMU_DEMAP
 102	membar		#Sync
 103	brnz,pt		%o1, 1b
 104	 nop
 105	stxa		%g2, [%o4] ASI_DMMU
 106	sethi		%hi(KERNBASE), %o4
 107	flush		%o4
 108	retl
 109	 wrpr		%g7, 0x0, %pstate
 110	nop
 111	nop
 112	nop
 113	nop
 114
 115	.align		32
 116	.globl		__flush_tlb_kernel_range
 117__flush_tlb_kernel_range:	/* 31 insns */
 118	/* %o0=start, %o1=end */
 119	cmp		%o0, %o1
 120	be,pn		%xcc, 2f
 121	 sub		%o1, %o0, %o3
 122	srlx		%o3, 18, %o4
 123	brnz,pn		%o4, __spitfire_flush_tlb_kernel_range_slow
 124	 sethi		%hi(PAGE_SIZE), %o4
 
 125	sub		%o3, %o4, %o3
 126	or		%o0, 0x20, %o0		! Nucleus
 1271:	stxa		%g0, [%o0 + %o3] ASI_DMMU_DEMAP
 128	stxa		%g0, [%o0 + %o3] ASI_IMMU_DEMAP
 129	membar		#Sync
 130	brnz,pt		%o3, 1b
 131	 sub		%o3, %o4, %o3
 1322:	sethi		%hi(KERNBASE), %o3
 133	flush		%o3
 134	retl
 135	 nop
 136	nop
 137	nop
 138	nop
 139	nop
 140	nop
 141	nop
 142	nop
 143	nop
 144	nop
 145	nop
 146	nop
 147	nop
 148	nop
 149	nop
 150
 151__spitfire_flush_tlb_kernel_range_slow:
 152	mov		63 * 8, %o4
 1531:	ldxa		[%o4] ASI_ITLB_DATA_ACCESS, %o3
 154	andcc		%o3, 0x40, %g0			/* _PAGE_L_4U */
 155	bne,pn		%xcc, 2f
 156	 mov		TLB_TAG_ACCESS, %o3
 157	stxa		%g0, [%o3] ASI_IMMU
 158	stxa		%g0, [%o4] ASI_ITLB_DATA_ACCESS
 159	membar		#Sync
 1602:	ldxa		[%o4] ASI_DTLB_DATA_ACCESS, %o3
 161	andcc		%o3, 0x40, %g0
 162	bne,pn		%xcc, 2f
 163	 mov		TLB_TAG_ACCESS, %o3
 164	stxa		%g0, [%o3] ASI_DMMU
 165	stxa		%g0, [%o4] ASI_DTLB_DATA_ACCESS
 166	membar		#Sync
 1672:	sub		%o4, 8, %o4
 168	brgez,pt	%o4, 1b
 169	 nop
 170	retl
 171	 nop
 172
 173__spitfire_flush_tlb_mm_slow:
 174	rdpr		%pstate, %g1
 175	wrpr		%g1, PSTATE_IE, %pstate
 176	stxa		%o0, [%o1] ASI_DMMU
 177	stxa		%g0, [%g3] ASI_DMMU_DEMAP
 178	stxa		%g0, [%g3] ASI_IMMU_DEMAP
 179	flush		%g6
 180	stxa		%g2, [%o1] ASI_DMMU
 181	sethi		%hi(KERNBASE), %o1
 182	flush		%o1
 183	retl
 184	 wrpr		%g1, 0, %pstate
 185
 186/*
 187 * The following code flushes one page_size worth.
 188 */
 189	.section .kprobes.text, "ax"
 190	.align		32
 191	.globl		__flush_icache_page
 192__flush_icache_page:	/* %o0 = phys_page */
 193	srlx		%o0, PAGE_SHIFT, %o0
 194	sethi		%hi(PAGE_OFFSET), %g1
 195	sllx		%o0, PAGE_SHIFT, %o0
 196	sethi		%hi(PAGE_SIZE), %g2
 197	ldx		[%g1 + %lo(PAGE_OFFSET)], %g1
 198	add		%o0, %g1, %o0
 1991:	subcc		%g2, 32, %g2
 200	bne,pt		%icc, 1b
 201	 flush		%o0 + %g2
 202	retl
 203	 nop
 204
 205#ifdef DCACHE_ALIASING_POSSIBLE
 206
 207#if (PAGE_SHIFT != 13)
 208#error only page shift of 13 is supported by dcache flush
 209#endif
 210
 211#define DTAG_MASK 0x3
 212
 213	/* This routine is Spitfire specific so the hardcoded
 214	 * D-cache size and line-size are OK.
 215	 */
 216	.align		64
 217	.globl		__flush_dcache_page
 218__flush_dcache_page:	/* %o0=kaddr, %o1=flush_icache */
 219	sethi		%hi(PAGE_OFFSET), %g1
 220	ldx		[%g1 + %lo(PAGE_OFFSET)], %g1
 221	sub		%o0, %g1, %o0			! physical address
 222	srlx		%o0, 11, %o0			! make D-cache TAG
 223	sethi		%hi(1 << 14), %o2		! D-cache size
 224	sub		%o2, (1 << 5), %o2		! D-cache line size
 2251:	ldxa		[%o2] ASI_DCACHE_TAG, %o3	! load D-cache TAG
 226	andcc		%o3, DTAG_MASK, %g0		! Valid?
 227	be,pn		%xcc, 2f			! Nope, branch
 228	 andn		%o3, DTAG_MASK, %o3		! Clear valid bits
 229	cmp		%o3, %o0			! TAG match?
 230	bne,pt		%xcc, 2f			! Nope, branch
 231	 nop
 232	stxa		%g0, [%o2] ASI_DCACHE_TAG	! Invalidate TAG
 233	membar		#Sync
 2342:	brnz,pt		%o2, 1b
 235	 sub		%o2, (1 << 5), %o2		! D-cache line size
 236
 237	/* The I-cache does not snoop local stores so we
 238	 * better flush that too when necessary.
 239	 */
 240	brnz,pt		%o1, __flush_icache_page
 241	 sllx		%o0, 11, %o0
 242	retl
 243	 nop
 244
 245#endif /* DCACHE_ALIASING_POSSIBLE */
 246
 247	.previous
 248
 249	/* Cheetah specific versions, patched at boot time. */
 250__cheetah_flush_tlb_mm: /* 19 insns */
 251	rdpr		%pstate, %g7
 252	andn		%g7, PSTATE_IE, %g2
 253	wrpr		%g2, 0x0, %pstate
 254	wrpr		%g0, 1, %tl
 255	mov		PRIMARY_CONTEXT, %o2
 256	mov		0x40, %g3
 257	ldxa		[%o2] ASI_DMMU, %g2
 258	srlx		%g2, CTX_PGSZ1_NUC_SHIFT, %o1
 259	sllx		%o1, CTX_PGSZ1_NUC_SHIFT, %o1
 260	or		%o0, %o1, %o0	/* Preserve nucleus page size fields */
 261	stxa		%o0, [%o2] ASI_DMMU
 262	stxa		%g0, [%g3] ASI_DMMU_DEMAP
 263	stxa		%g0, [%g3] ASI_IMMU_DEMAP
 264	stxa		%g2, [%o2] ASI_DMMU
 265	sethi		%hi(KERNBASE), %o2
 266	flush		%o2
 267	wrpr		%g0, 0, %tl
 268	retl
 269	 wrpr		%g7, 0x0, %pstate
 270
 271__cheetah_flush_tlb_page:	/* 22 insns */
 272	/* %o0 = context, %o1 = vaddr */
 273	rdpr		%pstate, %g7
 274	andn		%g7, PSTATE_IE, %g2
 275	wrpr		%g2, 0x0, %pstate
 276	wrpr		%g0, 1, %tl
 277	mov		PRIMARY_CONTEXT, %o4
 278	ldxa		[%o4] ASI_DMMU, %g2
 279	srlx		%g2, CTX_PGSZ1_NUC_SHIFT, %o3
 280	sllx		%o3, CTX_PGSZ1_NUC_SHIFT, %o3
 281	or		%o0, %o3, %o0	/* Preserve nucleus page size fields */
 282	stxa		%o0, [%o4] ASI_DMMU
 283	andcc		%o1, 1, %g0
 284	be,pn		%icc, 1f
 285	 andn		%o1, 1, %o3
 286	stxa		%g0, [%o3] ASI_IMMU_DEMAP
 2871:	stxa		%g0, [%o3] ASI_DMMU_DEMAP	
 288	membar		#Sync
 289	stxa		%g2, [%o4] ASI_DMMU
 290	sethi		%hi(KERNBASE), %o4
 291	flush		%o4
 292	wrpr		%g0, 0, %tl
 293	retl
 294	 wrpr		%g7, 0x0, %pstate
 295
 296__cheetah_flush_tlb_pending:	/* 27 insns */
 297	/* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
 298	rdpr		%pstate, %g7
 299	sllx		%o1, 3, %o1
 300	andn		%g7, PSTATE_IE, %g2
 301	wrpr		%g2, 0x0, %pstate
 302	wrpr		%g0, 1, %tl
 303	mov		PRIMARY_CONTEXT, %o4
 304	ldxa		[%o4] ASI_DMMU, %g2
 305	srlx		%g2, CTX_PGSZ1_NUC_SHIFT, %o3
 306	sllx		%o3, CTX_PGSZ1_NUC_SHIFT, %o3
 307	or		%o0, %o3, %o0	/* Preserve nucleus page size fields */
 308	stxa		%o0, [%o4] ASI_DMMU
 3091:	sub		%o1, (1 << 3), %o1
 310	ldx		[%o2 + %o1], %o3
 311	andcc		%o3, 1, %g0
 312	be,pn		%icc, 2f
 313	 andn		%o3, 1, %o3
 314	stxa		%g0, [%o3] ASI_IMMU_DEMAP
 3152:	stxa		%g0, [%o3] ASI_DMMU_DEMAP	
 316	membar		#Sync
 317	brnz,pt		%o1, 1b
 318	 nop
 319	stxa		%g2, [%o4] ASI_DMMU
 320	sethi		%hi(KERNBASE), %o4
 321	flush		%o4
 322	wrpr		%g0, 0, %tl
 323	retl
 324	 wrpr		%g7, 0x0, %pstate
 325
 326__cheetah_flush_tlb_kernel_range:	/* 31 insns */
 327	/* %o0=start, %o1=end */
 328	cmp		%o0, %o1
 329	be,pn		%xcc, 2f
 330	 sub		%o1, %o0, %o3
 331	srlx		%o3, 18, %o4
 332	brnz,pn		%o4, 3f
 333	 sethi		%hi(PAGE_SIZE), %o4
 334	sub		%o3, %o4, %o3
 335	or		%o0, 0x20, %o0		! Nucleus
 3361:	stxa		%g0, [%o0 + %o3] ASI_DMMU_DEMAP
 337	stxa		%g0, [%o0 + %o3] ASI_IMMU_DEMAP
 338	membar		#Sync
 339	brnz,pt		%o3, 1b
 340	 sub		%o3, %o4, %o3
 3412:	sethi		%hi(KERNBASE), %o3
 342	flush		%o3
 343	retl
 344	 nop
 3453:	mov		0x80, %o4
 346	stxa		%g0, [%o4] ASI_DMMU_DEMAP
 347	membar		#Sync
 348	stxa		%g0, [%o4] ASI_IMMU_DEMAP
 349	membar		#Sync
 350	retl
 351	 nop
 352	nop
 353	nop
 354	nop
 355	nop
 356	nop
 357	nop
 358	nop
 359
 360#ifdef DCACHE_ALIASING_POSSIBLE
 361__cheetah_flush_dcache_page: /* 11 insns */
 362	sethi		%hi(PAGE_OFFSET), %g1
 363	ldx		[%g1 + %lo(PAGE_OFFSET)], %g1
 364	sub		%o0, %g1, %o0
 365	sethi		%hi(PAGE_SIZE), %o4
 3661:	subcc		%o4, (1 << 5), %o4
 367	stxa		%g0, [%o0 + %o4] ASI_DCACHE_INVALIDATE
 368	membar		#Sync
 369	bne,pt		%icc, 1b
 370	 nop
 371	retl		/* I-cache flush never needed on Cheetah, see callers. */
 372	 nop
 373#endif /* DCACHE_ALIASING_POSSIBLE */
 374
 375	/* Hypervisor specific versions, patched at boot time.  */
 376__hypervisor_tlb_tl0_error:
 377	save		%sp, -192, %sp
 378	mov		%i0, %o0
 379	call		hypervisor_tlbop_error
 380	 mov		%i1, %o1
 381	ret
 382	 restore
 383
 384__hypervisor_flush_tlb_mm: /* 19 insns */
 385	mov		%o0, %o2	/* ARG2: mmu context */
 386	mov		0, %o0		/* ARG0: CPU lists unimplemented */
 387	mov		0, %o1		/* ARG1: CPU lists unimplemented */
 388	mov		HV_MMU_ALL, %o3	/* ARG3: flags */
 389	mov		HV_FAST_MMU_DEMAP_CTX, %o5
 390	ta		HV_FAST_TRAP
 391	brnz,pn		%o0, 1f
 392	 mov		HV_FAST_MMU_DEMAP_CTX, %o1
 393	retl
 394	 nop
 3951:	sethi		%hi(__hypervisor_tlb_tl0_error), %o5
 396	jmpl		%o5 + %lo(__hypervisor_tlb_tl0_error), %g0
 397	 nop
 398	nop
 399	nop
 400	nop
 401	nop
 402	nop
 403	nop
 404
 405__hypervisor_flush_tlb_page: /* 22 insns */
 406	/* %o0 = context, %o1 = vaddr */
 407	mov		%o0, %g2
 408	mov		%o1, %o0              /* ARG0: vaddr + IMMU-bit */
 409	mov		%g2, %o1	      /* ARG1: mmu context */
 410	mov		HV_MMU_ALL, %o2	      /* ARG2: flags */
 411	srlx		%o0, PAGE_SHIFT, %o0
 412	sllx		%o0, PAGE_SHIFT, %o0
 413	ta		HV_MMU_UNMAP_ADDR_TRAP
 414	brnz,pn		%o0, 1f
 415	 mov		HV_MMU_UNMAP_ADDR_TRAP, %o1
 416	retl
 417	 nop
 4181:	sethi		%hi(__hypervisor_tlb_tl0_error), %o2
 419	jmpl		%o2 + %lo(__hypervisor_tlb_tl0_error), %g0
 420	 nop
 421	nop
 422	nop
 423	nop
 424	nop
 425	nop
 426	nop
 427	nop
 428	nop
 429
 430__hypervisor_flush_tlb_pending: /* 27 insns */
 431	/* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
 432	sllx		%o1, 3, %g1
 433	mov		%o2, %g2
 434	mov		%o0, %g3
 4351:	sub		%g1, (1 << 3), %g1
 436	ldx		[%g2 + %g1], %o0      /* ARG0: vaddr + IMMU-bit */
 437	mov		%g3, %o1	      /* ARG1: mmu context */
 438	mov		HV_MMU_ALL, %o2	      /* ARG2: flags */
 439	srlx		%o0, PAGE_SHIFT, %o0
 440	sllx		%o0, PAGE_SHIFT, %o0
 441	ta		HV_MMU_UNMAP_ADDR_TRAP
 442	brnz,pn		%o0, 1f
 443	 mov		HV_MMU_UNMAP_ADDR_TRAP, %o1
 444	brnz,pt		%g1, 1b
 445	 nop
 446	retl
 447	 nop
 4481:	sethi		%hi(__hypervisor_tlb_tl0_error), %o2
 449	jmpl		%o2 + %lo(__hypervisor_tlb_tl0_error), %g0
 450	 nop
 451	nop
 452	nop
 453	nop
 454	nop
 455	nop
 456	nop
 457	nop
 458	nop
 459
 460__hypervisor_flush_tlb_kernel_range: /* 31 insns */
 461	/* %o0=start, %o1=end */
 462	cmp		%o0, %o1
 463	be,pn		%xcc, 2f
 464	 sub		%o1, %o0, %g2
 465	srlx		%g2, 18, %g3
 466	brnz,pn		%g3, 4f
 467	 mov		%o0, %g1
 468	sethi		%hi(PAGE_SIZE), %g3
 469	sub		%g2, %g3, %g2
 4701:	add		%g1, %g2, %o0	/* ARG0: virtual address */
 471	mov		0, %o1		/* ARG1: mmu context */
 472	mov		HV_MMU_ALL, %o2	/* ARG2: flags */
 473	ta		HV_MMU_UNMAP_ADDR_TRAP
 474	brnz,pn		%o0, 3f
 475	 mov		HV_MMU_UNMAP_ADDR_TRAP, %o1
 476	brnz,pt		%g2, 1b
 477	 sub		%g2, %g3, %g2
 4782:	retl
 479	 nop
 4803:	sethi		%hi(__hypervisor_tlb_tl0_error), %o2
 481	jmpl		%o2 + %lo(__hypervisor_tlb_tl0_error), %g0
 482	 nop
 4834:	mov		0, %o0		/* ARG0: CPU lists unimplemented */
 484	mov		0, %o1		/* ARG1: CPU lists unimplemented */
 485	mov		0, %o2		/* ARG2: mmu context == nucleus */
 486	mov		HV_MMU_ALL, %o3	/* ARG3: flags */
 487	mov		HV_FAST_MMU_DEMAP_CTX, %o5
 488	ta		HV_FAST_TRAP
 489	brnz,pn		%o0, 3b
 490	 mov		HV_FAST_MMU_DEMAP_CTX, %o1
 491	retl
 492	 nop
 493
 494#ifdef DCACHE_ALIASING_POSSIBLE
 495	/* XXX Niagara and friends have an 8K cache, so no aliasing is
 496	 * XXX possible, but nothing explicit in the Hypervisor API
 497	 * XXX guarantees this.
 498	 */
 499__hypervisor_flush_dcache_page:	/* 2 insns */
 500	retl
 501	 nop
 502#endif
 503
 504tlb_patch_one:
 5051:	lduw		[%o1], %g1
 506	stw		%g1, [%o0]
 507	flush		%o0
 508	subcc		%o2, 1, %o2
 509	add		%o1, 4, %o1
 510	bne,pt		%icc, 1b
 511	 add		%o0, 4, %o0
 512	retl
 513	 nop
 514
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 515#ifdef CONFIG_SMP
 516	/* These are all called by the slaves of a cross call, at
 517	 * trap level 1, with interrupts fully disabled.
 518	 *
 519	 * Register usage:
 520	 *   %g5	mm->context	(all tlb flushes)
 521	 *   %g1	address arg 1	(tlb page and range flushes)
 522	 *   %g7	address arg 2	(tlb range flush only)
 523	 *
 524	 *   %g6	scratch 1
 525	 *   %g2	scratch 2
 526	 *   %g3	scratch 3
 527	 *   %g4	scratch 4
 528	 */
 529	.align		32
 530	.globl		xcall_flush_tlb_mm
 531xcall_flush_tlb_mm:	/* 24 insns */
 532	mov		PRIMARY_CONTEXT, %g2
 533	ldxa		[%g2] ASI_DMMU, %g3
 534	srlx		%g3, CTX_PGSZ1_NUC_SHIFT, %g4
 535	sllx		%g4, CTX_PGSZ1_NUC_SHIFT, %g4
 536	or		%g5, %g4, %g5	/* Preserve nucleus page size fields */
 537	stxa		%g5, [%g2] ASI_DMMU
 538	mov		0x40, %g4
 539	stxa		%g0, [%g4] ASI_DMMU_DEMAP
 540	stxa		%g0, [%g4] ASI_IMMU_DEMAP
 541	stxa		%g3, [%g2] ASI_DMMU
 542	retry
 543	nop
 544	nop
 545	nop
 546	nop
 547	nop
 548	nop
 549	nop
 550	nop
 551	nop
 552	nop
 553	nop
 554	nop
 555	nop
 556
 557	.globl		xcall_flush_tlb_page
 558xcall_flush_tlb_page:	/* 20 insns */
 559	/* %g5=context, %g1=vaddr */
 560	mov		PRIMARY_CONTEXT, %g4
 561	ldxa		[%g4] ASI_DMMU, %g2
 562	srlx		%g2, CTX_PGSZ1_NUC_SHIFT, %g4
 563	sllx		%g4, CTX_PGSZ1_NUC_SHIFT, %g4
 564	or		%g5, %g4, %g5
 565	mov		PRIMARY_CONTEXT, %g4
 566	stxa		%g5, [%g4] ASI_DMMU
 567	andcc		%g1, 0x1, %g0
 568	be,pn		%icc, 2f
 569	 andn		%g1, 0x1, %g5
 570	stxa		%g0, [%g5] ASI_IMMU_DEMAP
 5712:	stxa		%g0, [%g5] ASI_DMMU_DEMAP
 572	membar		#Sync
 573	stxa		%g2, [%g4] ASI_DMMU
 574	retry
 575	nop
 576	nop
 577	nop
 578	nop
 579	nop
 580
 581	.globl		xcall_flush_tlb_kernel_range
 582xcall_flush_tlb_kernel_range:	/* 44 insns */
 583	sethi		%hi(PAGE_SIZE - 1), %g2
 584	or		%g2, %lo(PAGE_SIZE - 1), %g2
 585	andn		%g1, %g2, %g1
 586	andn		%g7, %g2, %g7
 587	sub		%g7, %g1, %g3
 588	srlx		%g3, 18, %g2
 589	brnz,pn		%g2, 2f
 590	 sethi		%hi(PAGE_SIZE), %g2
 591	sub		%g3, %g2, %g3
 592	or		%g1, 0x20, %g1		! Nucleus
 5931:	stxa		%g0, [%g1 + %g3] ASI_DMMU_DEMAP
 594	stxa		%g0, [%g1 + %g3] ASI_IMMU_DEMAP
 595	membar		#Sync
 596	brnz,pt		%g3, 1b
 597	 sub		%g3, %g2, %g3
 598	retry
 5992:	mov		63 * 8, %g1
 6001:	ldxa		[%g1] ASI_ITLB_DATA_ACCESS, %g2
 601	andcc		%g2, 0x40, %g0			/* _PAGE_L_4U */
 602	bne,pn		%xcc, 2f
 603	 mov		TLB_TAG_ACCESS, %g2
 604	stxa		%g0, [%g2] ASI_IMMU
 605	stxa		%g0, [%g1] ASI_ITLB_DATA_ACCESS
 606	membar		#Sync
 6072:	ldxa		[%g1] ASI_DTLB_DATA_ACCESS, %g2
 608	andcc		%g2, 0x40, %g0
 609	bne,pn		%xcc, 2f
 610	 mov		TLB_TAG_ACCESS, %g2
 611	stxa		%g0, [%g2] ASI_DMMU
 612	stxa		%g0, [%g1] ASI_DTLB_DATA_ACCESS
 613	membar		#Sync
 6142:	sub		%g1, 8, %g1
 615	brgez,pt	%g1, 1b
 616	 nop
 617	retry
 618	nop
 619	nop
 620	nop
 621	nop
 622	nop
 623	nop
 624	nop
 625	nop
 626	nop
 627
 628	/* This runs in a very controlled environment, so we do
 629	 * not need to worry about BH races etc.
 630	 */
 631	.globl		xcall_sync_tick
 632xcall_sync_tick:
 633
 634661:	rdpr		%pstate, %g2
 635	wrpr		%g2, PSTATE_IG | PSTATE_AG, %pstate
 636	.section	.sun4v_2insn_patch, "ax"
 637	.word		661b
 638	nop
 639	nop
 640	.previous
 641
 642	rdpr		%pil, %g2
 643	wrpr		%g0, PIL_NORMAL_MAX, %pil
 644	sethi		%hi(109f), %g7
 645	b,pt		%xcc, etrap_irq
 646109:	 or		%g7, %lo(109b), %g7
 647#ifdef CONFIG_TRACE_IRQFLAGS
 648	call		trace_hardirqs_off
 649	 nop
 650#endif
 651	call		smp_synchronize_tick_client
 652	 nop
 653	b		rtrap_xcall
 654	 ldx		[%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
 655
 656	.globl		xcall_fetch_glob_regs
 657xcall_fetch_glob_regs:
 658	sethi		%hi(global_cpu_snapshot), %g1
 659	or		%g1, %lo(global_cpu_snapshot), %g1
 660	__GET_CPUID(%g2)
 661	sllx		%g2, 6, %g3
 662	add		%g1, %g3, %g1
 663	rdpr		%tstate, %g7
 664	stx		%g7, [%g1 + GR_SNAP_TSTATE]
 665	rdpr		%tpc, %g7
 666	stx		%g7, [%g1 + GR_SNAP_TPC]
 667	rdpr		%tnpc, %g7
 668	stx		%g7, [%g1 + GR_SNAP_TNPC]
 669	stx		%o7, [%g1 + GR_SNAP_O7]
 670	stx		%i7, [%g1 + GR_SNAP_I7]
 671	/* Don't try this at home kids... */
 672	rdpr		%cwp, %g3
 673	sub		%g3, 1, %g7
 674	wrpr		%g7, %cwp
 675	mov		%i7, %g7
 676	wrpr		%g3, %cwp
 677	stx		%g7, [%g1 + GR_SNAP_RPC]
 678	sethi		%hi(trap_block), %g7
 679	or		%g7, %lo(trap_block), %g7
 680	sllx		%g2, TRAP_BLOCK_SZ_SHIFT, %g2
 681	add		%g7, %g2, %g7
 682	ldx		[%g7 + TRAP_PER_CPU_THREAD], %g3
 683	stx		%g3, [%g1 + GR_SNAP_THREAD]
 684	retry
 685
 686	.globl		xcall_fetch_glob_pmu
 687xcall_fetch_glob_pmu:
 688	sethi		%hi(global_cpu_snapshot), %g1
 689	or		%g1, %lo(global_cpu_snapshot), %g1
 690	__GET_CPUID(%g2)
 691	sllx		%g2, 6, %g3
 692	add		%g1, %g3, %g1
 693	rd		%pic, %g7
 694	stx		%g7, [%g1 + (4 * 8)]
 695	rd		%pcr, %g7
 696	stx		%g7, [%g1 + (0 * 8)]
 697	retry
 698
 699	.globl		xcall_fetch_glob_pmu_n4
 700xcall_fetch_glob_pmu_n4:
 701	sethi		%hi(global_cpu_snapshot), %g1
 702	or		%g1, %lo(global_cpu_snapshot), %g1
 703	__GET_CPUID(%g2)
 704	sllx		%g2, 6, %g3
 705	add		%g1, %g3, %g1
 706
 707	ldxa		[%g0] ASI_PIC, %g7
 708	stx		%g7, [%g1 + (4 * 8)]
 709	mov		0x08, %g3
 710	ldxa		[%g3] ASI_PIC, %g7
 711	stx		%g7, [%g1 + (5 * 8)]
 712	mov		0x10, %g3
 713	ldxa		[%g3] ASI_PIC, %g7
 714	stx		%g7, [%g1 + (6 * 8)]
 715	mov		0x18, %g3
 716	ldxa		[%g3] ASI_PIC, %g7
 717	stx		%g7, [%g1 + (7 * 8)]
 718
 719	mov		%o0, %g2
 720	mov		%o1, %g3
 721	mov		%o5, %g7
 722
 723	mov		HV_FAST_VT_GET_PERFREG, %o5
 724	mov		3, %o0
 725	ta		HV_FAST_TRAP
 726	stx		%o1, [%g1 + (3 * 8)]
 727	mov		HV_FAST_VT_GET_PERFREG, %o5
 728	mov		2, %o0
 729	ta		HV_FAST_TRAP
 730	stx		%o1, [%g1 + (2 * 8)]
 731	mov		HV_FAST_VT_GET_PERFREG, %o5
 732	mov		1, %o0
 733	ta		HV_FAST_TRAP
 734	stx		%o1, [%g1 + (1 * 8)]
 735	mov		HV_FAST_VT_GET_PERFREG, %o5
 736	mov		0, %o0
 737	ta		HV_FAST_TRAP
 738	stx		%o1, [%g1 + (0 * 8)]
 739
 740	mov		%g2, %o0
 741	mov		%g3, %o1
 742	mov		%g7, %o5
 743
 744	retry
 745
 746__cheetah_xcall_flush_tlb_kernel_range:	/* 44 insns */
 747	sethi		%hi(PAGE_SIZE - 1), %g2
 748	or		%g2, %lo(PAGE_SIZE - 1), %g2
 749	andn		%g1, %g2, %g1
 750	andn		%g7, %g2, %g7
 751	sub		%g7, %g1, %g3
 752	srlx		%g3, 18, %g2
 753	brnz,pn		%g2, 2f
 754	 sethi		%hi(PAGE_SIZE), %g2
 755	sub		%g3, %g2, %g3
 756	or		%g1, 0x20, %g1		! Nucleus
 7571:	stxa		%g0, [%g1 + %g3] ASI_DMMU_DEMAP
 758	stxa		%g0, [%g1 + %g3] ASI_IMMU_DEMAP
 759	membar		#Sync
 760	brnz,pt		%g3, 1b
 761	 sub		%g3, %g2, %g3
 762	retry
 7632:	mov		0x80, %g2
 764	stxa		%g0, [%g2] ASI_DMMU_DEMAP
 765	membar		#Sync
 766	stxa		%g0, [%g2] ASI_IMMU_DEMAP
 767	membar		#Sync
 768	retry
 769	nop
 770	nop
 771	nop
 772	nop
 773	nop
 774	nop
 775	nop
 776	nop
 777	nop
 778	nop
 779	nop
 780	nop
 781	nop
 782	nop
 783	nop
 784	nop
 785	nop
 786	nop
 787	nop
 788	nop
 789	nop
 790	nop
 791
 792#ifdef DCACHE_ALIASING_POSSIBLE
 793	.align		32
 794	.globl		xcall_flush_dcache_page_cheetah
 795xcall_flush_dcache_page_cheetah: /* %g1 == physical page address */
 796	sethi		%hi(PAGE_SIZE), %g3
 7971:	subcc		%g3, (1 << 5), %g3
 798	stxa		%g0, [%g1 + %g3] ASI_DCACHE_INVALIDATE
 799	membar		#Sync
 800	bne,pt		%icc, 1b
 801	 nop
 802	retry
 803	nop
 804#endif /* DCACHE_ALIASING_POSSIBLE */
 805
 806	.globl		xcall_flush_dcache_page_spitfire
 807xcall_flush_dcache_page_spitfire: /* %g1 == physical page address
 808				     %g7 == kernel page virtual address
 809				     %g5 == (page->mapping != NULL)  */
 810#ifdef DCACHE_ALIASING_POSSIBLE
 811	srlx		%g1, (13 - 2), %g1	! Form tag comparitor
 812	sethi		%hi(L1DCACHE_SIZE), %g3	! D$ size == 16K
 813	sub		%g3, (1 << 5), %g3	! D$ linesize == 32
 8141:	ldxa		[%g3] ASI_DCACHE_TAG, %g2
 815	andcc		%g2, 0x3, %g0
 816	be,pn		%xcc, 2f
 817	 andn		%g2, 0x3, %g2
 818	cmp		%g2, %g1
 819
 820	bne,pt		%xcc, 2f
 821	 nop
 822	stxa		%g0, [%g3] ASI_DCACHE_TAG
 823	membar		#Sync
 8242:	cmp		%g3, 0
 825	bne,pt		%xcc, 1b
 826	 sub		%g3, (1 << 5), %g3
 827
 828	brz,pn		%g5, 2f
 829#endif /* DCACHE_ALIASING_POSSIBLE */
 830	 sethi		%hi(PAGE_SIZE), %g3
 831
 8321:	flush		%g7
 833	subcc		%g3, (1 << 5), %g3
 834	bne,pt		%icc, 1b
 835	 add		%g7, (1 << 5), %g7
 836
 8372:	retry
 838	nop
 839	nop
 840
 841	/* %g5:	error
 842	 * %g6:	tlb op
 843	 */
 844__hypervisor_tlb_xcall_error:
 845	mov	%g5, %g4
 846	mov	%g6, %g5
 847	ba,pt	%xcc, etrap
 848	 rd	%pc, %g7
 849	mov	%l4, %o0
 850	call	hypervisor_tlbop_error_xcall
 851	 mov	%l5, %o1
 852	ba,a,pt	%xcc, rtrap
 853
 854	.globl		__hypervisor_xcall_flush_tlb_mm
 855__hypervisor_xcall_flush_tlb_mm: /* 24 insns */
 856	/* %g5=ctx, g1,g2,g3,g4,g7=scratch, %g6=unusable */
 857	mov		%o0, %g2
 858	mov		%o1, %g3
 859	mov		%o2, %g4
 860	mov		%o3, %g1
 861	mov		%o5, %g7
 862	clr		%o0		/* ARG0: CPU lists unimplemented */
 863	clr		%o1		/* ARG1: CPU lists unimplemented */
 864	mov		%g5, %o2	/* ARG2: mmu context */
 865	mov		HV_MMU_ALL, %o3	/* ARG3: flags */
 866	mov		HV_FAST_MMU_DEMAP_CTX, %o5
 867	ta		HV_FAST_TRAP
 868	mov		HV_FAST_MMU_DEMAP_CTX, %g6
 869	brnz,pn		%o0, 1f
 870	 mov		%o0, %g5
 871	mov		%g2, %o0
 872	mov		%g3, %o1
 873	mov		%g4, %o2
 874	mov		%g1, %o3
 875	mov		%g7, %o5
 876	membar		#Sync
 877	retry
 8781:	sethi		%hi(__hypervisor_tlb_xcall_error), %g4
 879	jmpl		%g4 + %lo(__hypervisor_tlb_xcall_error), %g0
 880	 nop
 881
 882	.globl		__hypervisor_xcall_flush_tlb_page
 883__hypervisor_xcall_flush_tlb_page: /* 20 insns */
 884	/* %g5=ctx, %g1=vaddr */
 885	mov		%o0, %g2
 886	mov		%o1, %g3
 887	mov		%o2, %g4
 888	mov		%g1, %o0	        /* ARG0: virtual address */
 889	mov		%g5, %o1		/* ARG1: mmu context */
 890	mov		HV_MMU_ALL, %o2		/* ARG2: flags */
 891	srlx		%o0, PAGE_SHIFT, %o0
 892	sllx		%o0, PAGE_SHIFT, %o0
 893	ta		HV_MMU_UNMAP_ADDR_TRAP
 894	mov		HV_MMU_UNMAP_ADDR_TRAP, %g6
 895	brnz,a,pn	%o0, 1f
 896	 mov		%o0, %g5
 897	mov		%g2, %o0
 898	mov		%g3, %o1
 899	mov		%g4, %o2
 900	membar		#Sync
 901	retry
 9021:	sethi		%hi(__hypervisor_tlb_xcall_error), %g4
 903	jmpl		%g4 + %lo(__hypervisor_tlb_xcall_error), %g0
 904	 nop
 905
 906	.globl		__hypervisor_xcall_flush_tlb_kernel_range
 907__hypervisor_xcall_flush_tlb_kernel_range: /* 44 insns */
 908	/* %g1=start, %g7=end, g2,g3,g4,g5,g6=scratch */
 909	sethi		%hi(PAGE_SIZE - 1), %g2
 910	or		%g2, %lo(PAGE_SIZE - 1), %g2
 911	andn		%g1, %g2, %g1
 912	andn		%g7, %g2, %g7
 913	sub		%g7, %g1, %g3
 914	srlx		%g3, 18, %g7
 915	add		%g2, 1, %g2
 916	sub		%g3, %g2, %g3
 917	mov		%o0, %g2
 918	mov		%o1, %g4
 919	brnz,pn		%g7, 2f
 920	 mov		%o2, %g7
 9211:	add		%g1, %g3, %o0	/* ARG0: virtual address */
 922	mov		0, %o1		/* ARG1: mmu context */
 923	mov		HV_MMU_ALL, %o2	/* ARG2: flags */
 924	ta		HV_MMU_UNMAP_ADDR_TRAP
 925	mov		HV_MMU_UNMAP_ADDR_TRAP, %g6
 926	brnz,pn		%o0, 1f
 927	 mov		%o0, %g5
 928	sethi		%hi(PAGE_SIZE), %o2
 929	brnz,pt		%g3, 1b
 930	 sub		%g3, %o2, %g3
 9315:	mov		%g2, %o0
 932	mov		%g4, %o1
 933	mov		%g7, %o2
 934	membar		#Sync
 935	retry
 9361:	sethi		%hi(__hypervisor_tlb_xcall_error), %g4
 937	jmpl		%g4 + %lo(__hypervisor_tlb_xcall_error), %g0
 938	 nop
 9392:	mov		%o3, %g1
 940	mov		%o5, %g3
 941	mov		0, %o0		/* ARG0: CPU lists unimplemented */
 942	mov		0, %o1		/* ARG1: CPU lists unimplemented */
 943	mov		0, %o2		/* ARG2: mmu context == nucleus */
 944	mov		HV_MMU_ALL, %o3	/* ARG3: flags */
 945	mov		HV_FAST_MMU_DEMAP_CTX, %o5
 946	ta		HV_FAST_TRAP
 947	mov		%g1, %o3
 948	brz,pt		%o0, 5b
 949	 mov		%g3, %o5
 950	mov		HV_FAST_MMU_DEMAP_CTX, %g6
 951	ba,pt		%xcc, 1b
 952	 clr		%g5
 953
 954	/* These just get rescheduled to PIL vectors. */
 955	.globl		xcall_call_function
 956xcall_call_function:
 957	wr		%g0, (1 << PIL_SMP_CALL_FUNC), %set_softint
 958	retry
 959
 960	.globl		xcall_call_function_single
 961xcall_call_function_single:
 962	wr		%g0, (1 << PIL_SMP_CALL_FUNC_SNGL), %set_softint
 963	retry
 964
 965	.globl		xcall_receive_signal
 966xcall_receive_signal:
 967	wr		%g0, (1 << PIL_SMP_RECEIVE_SIGNAL), %set_softint
 968	retry
 969
 970	.globl		xcall_capture
 971xcall_capture:
 972	wr		%g0, (1 << PIL_SMP_CAPTURE), %set_softint
 973	retry
 974
 
 
 
 
 
 975#ifdef CONFIG_KGDB
 976	.globl		xcall_kgdb_capture
 977xcall_kgdb_capture:
 978	wr		%g0, (1 << PIL_KGDB_CAPTURE), %set_softint
 979	retry
 980#endif
 981
 982#endif /* CONFIG_SMP */
 983
 984	.globl		cheetah_patch_cachetlbops
 985cheetah_patch_cachetlbops:
 986	save		%sp, -128, %sp
 987
 988	sethi		%hi(__flush_tlb_mm), %o0
 989	or		%o0, %lo(__flush_tlb_mm), %o0
 990	sethi		%hi(__cheetah_flush_tlb_mm), %o1
 991	or		%o1, %lo(__cheetah_flush_tlb_mm), %o1
 992	call		tlb_patch_one
 993	 mov		19, %o2
 994
 995	sethi		%hi(__flush_tlb_page), %o0
 996	or		%o0, %lo(__flush_tlb_page), %o0
 997	sethi		%hi(__cheetah_flush_tlb_page), %o1
 998	or		%o1, %lo(__cheetah_flush_tlb_page), %o1
 999	call		tlb_patch_one
1000	 mov		22, %o2
1001
1002	sethi		%hi(__flush_tlb_pending), %o0
1003	or		%o0, %lo(__flush_tlb_pending), %o0
1004	sethi		%hi(__cheetah_flush_tlb_pending), %o1
1005	or		%o1, %lo(__cheetah_flush_tlb_pending), %o1
1006	call		tlb_patch_one
1007	 mov		27, %o2
1008
1009	sethi		%hi(__flush_tlb_kernel_range), %o0
1010	or		%o0, %lo(__flush_tlb_kernel_range), %o0
1011	sethi		%hi(__cheetah_flush_tlb_kernel_range), %o1
1012	or		%o1, %lo(__cheetah_flush_tlb_kernel_range), %o1
1013	call		tlb_patch_one
1014	 mov		31, %o2
1015
1016#ifdef DCACHE_ALIASING_POSSIBLE
1017	sethi		%hi(__flush_dcache_page), %o0
1018	or		%o0, %lo(__flush_dcache_page), %o0
1019	sethi		%hi(__cheetah_flush_dcache_page), %o1
1020	or		%o1, %lo(__cheetah_flush_dcache_page), %o1
1021	call		tlb_patch_one
1022	 mov		11, %o2
1023#endif /* DCACHE_ALIASING_POSSIBLE */
1024
1025#ifdef CONFIG_SMP
1026	sethi		%hi(xcall_flush_tlb_kernel_range), %o0
1027	or		%o0, %lo(xcall_flush_tlb_kernel_range), %o0
1028	sethi		%hi(__cheetah_xcall_flush_tlb_kernel_range), %o1
1029	or		%o1, %lo(__cheetah_xcall_flush_tlb_kernel_range), %o1
1030	call		tlb_patch_one
1031	 mov		44, %o2
1032#endif /* CONFIG_SMP */
1033
1034	ret
1035	 restore
1036
1037	.globl		hypervisor_patch_cachetlbops
1038hypervisor_patch_cachetlbops:
1039	save		%sp, -128, %sp
1040
1041	sethi		%hi(__flush_tlb_mm), %o0
1042	or		%o0, %lo(__flush_tlb_mm), %o0
1043	sethi		%hi(__hypervisor_flush_tlb_mm), %o1
1044	or		%o1, %lo(__hypervisor_flush_tlb_mm), %o1
1045	call		tlb_patch_one
1046	 mov		19, %o2
1047
1048	sethi		%hi(__flush_tlb_page), %o0
1049	or		%o0, %lo(__flush_tlb_page), %o0
1050	sethi		%hi(__hypervisor_flush_tlb_page), %o1
1051	or		%o1, %lo(__hypervisor_flush_tlb_page), %o1
1052	call		tlb_patch_one
1053	 mov		22, %o2
1054
1055	sethi		%hi(__flush_tlb_pending), %o0
1056	or		%o0, %lo(__flush_tlb_pending), %o0
1057	sethi		%hi(__hypervisor_flush_tlb_pending), %o1
1058	or		%o1, %lo(__hypervisor_flush_tlb_pending), %o1
1059	call		tlb_patch_one
1060	 mov		27, %o2
1061
1062	sethi		%hi(__flush_tlb_kernel_range), %o0
1063	or		%o0, %lo(__flush_tlb_kernel_range), %o0
1064	sethi		%hi(__hypervisor_flush_tlb_kernel_range), %o1
1065	or		%o1, %lo(__hypervisor_flush_tlb_kernel_range), %o1
1066	call		tlb_patch_one
1067	 mov		31, %o2
1068
1069#ifdef DCACHE_ALIASING_POSSIBLE
1070	sethi		%hi(__flush_dcache_page), %o0
1071	or		%o0, %lo(__flush_dcache_page), %o0
1072	sethi		%hi(__hypervisor_flush_dcache_page), %o1
1073	or		%o1, %lo(__hypervisor_flush_dcache_page), %o1
1074	call		tlb_patch_one
1075	 mov		2, %o2
1076#endif /* DCACHE_ALIASING_POSSIBLE */
1077
1078#ifdef CONFIG_SMP
1079	sethi		%hi(xcall_flush_tlb_mm), %o0
1080	or		%o0, %lo(xcall_flush_tlb_mm), %o0
1081	sethi		%hi(__hypervisor_xcall_flush_tlb_mm), %o1
1082	or		%o1, %lo(__hypervisor_xcall_flush_tlb_mm), %o1
1083	call		tlb_patch_one
1084	 mov		24, %o2
1085
1086	sethi		%hi(xcall_flush_tlb_page), %o0
1087	or		%o0, %lo(xcall_flush_tlb_page), %o0
1088	sethi		%hi(__hypervisor_xcall_flush_tlb_page), %o1
1089	or		%o1, %lo(__hypervisor_xcall_flush_tlb_page), %o1
1090	call		tlb_patch_one
1091	 mov		20, %o2
1092
1093	sethi		%hi(xcall_flush_tlb_kernel_range), %o0
1094	or		%o0, %lo(xcall_flush_tlb_kernel_range), %o0
1095	sethi		%hi(__hypervisor_xcall_flush_tlb_kernel_range), %o1
1096	or		%o1, %lo(__hypervisor_xcall_flush_tlb_kernel_range), %o1
1097	call		tlb_patch_one
1098	 mov		44, %o2
1099#endif /* CONFIG_SMP */
1100
1101	ret
1102	 restore
v4.6
 
  1/*
  2 * ultra.S: Don't expand these all over the place...
  3 *
  4 * Copyright (C) 1997, 2000, 2008 David S. Miller (davem@davemloft.net)
  5 */
  6
 
  7#include <asm/asi.h>
  8#include <asm/pgtable.h>
  9#include <asm/page.h>
 10#include <asm/spitfire.h>
 11#include <asm/mmu_context.h>
 12#include <asm/mmu.h>
 13#include <asm/pil.h>
 14#include <asm/head.h>
 15#include <asm/thread_info.h>
 16#include <asm/cacheflush.h>
 17#include <asm/hypervisor.h>
 18#include <asm/cpudata.h>
 19
 20	/* Basically, most of the Spitfire vs. Cheetah madness
 21	 * has to do with the fact that Cheetah does not support
 22	 * IMMU flushes out of the secondary context.  Someone needs
 23	 * to throw a south lake birthday party for the folks
 24	 * in Microelectronics who refused to fix this shit.
 25	 */
 26
 27	/* This file is meant to be read efficiently by the CPU, not humans.
 28	 * Staraj sie tego nikomu nie pierdolnac...
 29	 */
 30	.text
 31	.align		32
 32	.globl		__flush_tlb_mm
 33__flush_tlb_mm:		/* 18 insns */
 34	/* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */
 35	ldxa		[%o1] ASI_DMMU, %g2
 36	cmp		%g2, %o0
 37	bne,pn		%icc, __spitfire_flush_tlb_mm_slow
 38	 mov		0x50, %g3
 39	stxa		%g0, [%g3] ASI_DMMU_DEMAP
 40	stxa		%g0, [%g3] ASI_IMMU_DEMAP
 41	sethi		%hi(KERNBASE), %g3
 42	flush		%g3
 43	retl
 44	 nop
 45	nop
 46	nop
 47	nop
 48	nop
 49	nop
 50	nop
 51	nop
 52	nop
 53	nop
 54
 55	.align		32
 56	.globl		__flush_tlb_page
 57__flush_tlb_page:	/* 22 insns */
 58	/* %o0 = context, %o1 = vaddr */
 59	rdpr		%pstate, %g7
 60	andn		%g7, PSTATE_IE, %g2
 61	wrpr		%g2, %pstate
 62	mov		SECONDARY_CONTEXT, %o4
 63	ldxa		[%o4] ASI_DMMU, %g2
 64	stxa		%o0, [%o4] ASI_DMMU
 65	andcc		%o1, 1, %g0
 66	andn		%o1, 1, %o3
 67	be,pn		%icc, 1f
 68	 or		%o3, 0x10, %o3
 69	stxa		%g0, [%o3] ASI_IMMU_DEMAP
 701:	stxa		%g0, [%o3] ASI_DMMU_DEMAP
 71	membar		#Sync
 72	stxa		%g2, [%o4] ASI_DMMU
 73	sethi		%hi(KERNBASE), %o4
 74	flush		%o4
 75	retl
 76	 wrpr		%g7, 0x0, %pstate
 77	nop
 78	nop
 79	nop
 80	nop
 81
 82	.align		32
 83	.globl		__flush_tlb_pending
 84__flush_tlb_pending:	/* 26 insns */
 85	/* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
 86	rdpr		%pstate, %g7
 87	sllx		%o1, 3, %o1
 88	andn		%g7, PSTATE_IE, %g2
 89	wrpr		%g2, %pstate
 90	mov		SECONDARY_CONTEXT, %o4
 91	ldxa		[%o4] ASI_DMMU, %g2
 92	stxa		%o0, [%o4] ASI_DMMU
 931:	sub		%o1, (1 << 3), %o1
 94	ldx		[%o2 + %o1], %o3
 95	andcc		%o3, 1, %g0
 96	andn		%o3, 1, %o3
 97	be,pn		%icc, 2f
 98	 or		%o3, 0x10, %o3
 99	stxa		%g0, [%o3] ASI_IMMU_DEMAP
1002:	stxa		%g0, [%o3] ASI_DMMU_DEMAP
101	membar		#Sync
102	brnz,pt		%o1, 1b
103	 nop
104	stxa		%g2, [%o4] ASI_DMMU
105	sethi		%hi(KERNBASE), %o4
106	flush		%o4
107	retl
108	 wrpr		%g7, 0x0, %pstate
109	nop
110	nop
111	nop
112	nop
113
114	.align		32
115	.globl		__flush_tlb_kernel_range
116__flush_tlb_kernel_range:	/* 16 insns */
117	/* %o0=start, %o1=end */
118	cmp		%o0, %o1
119	be,pn		%xcc, 2f
 
 
 
120	 sethi		%hi(PAGE_SIZE), %o4
121	sub		%o1, %o0, %o3
122	sub		%o3, %o4, %o3
123	or		%o0, 0x20, %o0		! Nucleus
1241:	stxa		%g0, [%o0 + %o3] ASI_DMMU_DEMAP
125	stxa		%g0, [%o0 + %o3] ASI_IMMU_DEMAP
126	membar		#Sync
127	brnz,pt		%o3, 1b
128	 sub		%o3, %o4, %o3
1292:	sethi		%hi(KERNBASE), %o3
130	flush		%o3
131	retl
132	 nop
133	nop
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
134
135__spitfire_flush_tlb_mm_slow:
136	rdpr		%pstate, %g1
137	wrpr		%g1, PSTATE_IE, %pstate
138	stxa		%o0, [%o1] ASI_DMMU
139	stxa		%g0, [%g3] ASI_DMMU_DEMAP
140	stxa		%g0, [%g3] ASI_IMMU_DEMAP
141	flush		%g6
142	stxa		%g2, [%o1] ASI_DMMU
143	sethi		%hi(KERNBASE), %o1
144	flush		%o1
145	retl
146	 wrpr		%g1, 0, %pstate
147
148/*
149 * The following code flushes one page_size worth.
150 */
151	.section .kprobes.text, "ax"
152	.align		32
153	.globl		__flush_icache_page
154__flush_icache_page:	/* %o0 = phys_page */
155	srlx		%o0, PAGE_SHIFT, %o0
156	sethi		%hi(PAGE_OFFSET), %g1
157	sllx		%o0, PAGE_SHIFT, %o0
158	sethi		%hi(PAGE_SIZE), %g2
159	ldx		[%g1 + %lo(PAGE_OFFSET)], %g1
160	add		%o0, %g1, %o0
1611:	subcc		%g2, 32, %g2
162	bne,pt		%icc, 1b
163	 flush		%o0 + %g2
164	retl
165	 nop
166
167#ifdef DCACHE_ALIASING_POSSIBLE
168
169#if (PAGE_SHIFT != 13)
170#error only page shift of 13 is supported by dcache flush
171#endif
172
173#define DTAG_MASK 0x3
174
175	/* This routine is Spitfire specific so the hardcoded
176	 * D-cache size and line-size are OK.
177	 */
178	.align		64
179	.globl		__flush_dcache_page
180__flush_dcache_page:	/* %o0=kaddr, %o1=flush_icache */
181	sethi		%hi(PAGE_OFFSET), %g1
182	ldx		[%g1 + %lo(PAGE_OFFSET)], %g1
183	sub		%o0, %g1, %o0			! physical address
184	srlx		%o0, 11, %o0			! make D-cache TAG
185	sethi		%hi(1 << 14), %o2		! D-cache size
186	sub		%o2, (1 << 5), %o2		! D-cache line size
1871:	ldxa		[%o2] ASI_DCACHE_TAG, %o3	! load D-cache TAG
188	andcc		%o3, DTAG_MASK, %g0		! Valid?
189	be,pn		%xcc, 2f			! Nope, branch
190	 andn		%o3, DTAG_MASK, %o3		! Clear valid bits
191	cmp		%o3, %o0			! TAG match?
192	bne,pt		%xcc, 2f			! Nope, branch
193	 nop
194	stxa		%g0, [%o2] ASI_DCACHE_TAG	! Invalidate TAG
195	membar		#Sync
1962:	brnz,pt		%o2, 1b
197	 sub		%o2, (1 << 5), %o2		! D-cache line size
198
199	/* The I-cache does not snoop local stores so we
200	 * better flush that too when necessary.
201	 */
202	brnz,pt		%o1, __flush_icache_page
203	 sllx		%o0, 11, %o0
204	retl
205	 nop
206
207#endif /* DCACHE_ALIASING_POSSIBLE */
208
209	.previous
210
211	/* Cheetah specific versions, patched at boot time. */
212__cheetah_flush_tlb_mm: /* 19 insns */
213	rdpr		%pstate, %g7
214	andn		%g7, PSTATE_IE, %g2
215	wrpr		%g2, 0x0, %pstate
216	wrpr		%g0, 1, %tl
217	mov		PRIMARY_CONTEXT, %o2
218	mov		0x40, %g3
219	ldxa		[%o2] ASI_DMMU, %g2
220	srlx		%g2, CTX_PGSZ1_NUC_SHIFT, %o1
221	sllx		%o1, CTX_PGSZ1_NUC_SHIFT, %o1
222	or		%o0, %o1, %o0	/* Preserve nucleus page size fields */
223	stxa		%o0, [%o2] ASI_DMMU
224	stxa		%g0, [%g3] ASI_DMMU_DEMAP
225	stxa		%g0, [%g3] ASI_IMMU_DEMAP
226	stxa		%g2, [%o2] ASI_DMMU
227	sethi		%hi(KERNBASE), %o2
228	flush		%o2
229	wrpr		%g0, 0, %tl
230	retl
231	 wrpr		%g7, 0x0, %pstate
232
233__cheetah_flush_tlb_page:	/* 22 insns */
234	/* %o0 = context, %o1 = vaddr */
235	rdpr		%pstate, %g7
236	andn		%g7, PSTATE_IE, %g2
237	wrpr		%g2, 0x0, %pstate
238	wrpr		%g0, 1, %tl
239	mov		PRIMARY_CONTEXT, %o4
240	ldxa		[%o4] ASI_DMMU, %g2
241	srlx		%g2, CTX_PGSZ1_NUC_SHIFT, %o3
242	sllx		%o3, CTX_PGSZ1_NUC_SHIFT, %o3
243	or		%o0, %o3, %o0	/* Preserve nucleus page size fields */
244	stxa		%o0, [%o4] ASI_DMMU
245	andcc		%o1, 1, %g0
246	be,pn		%icc, 1f
247	 andn		%o1, 1, %o3
248	stxa		%g0, [%o3] ASI_IMMU_DEMAP
2491:	stxa		%g0, [%o3] ASI_DMMU_DEMAP	
250	membar		#Sync
251	stxa		%g2, [%o4] ASI_DMMU
252	sethi		%hi(KERNBASE), %o4
253	flush		%o4
254	wrpr		%g0, 0, %tl
255	retl
256	 wrpr		%g7, 0x0, %pstate
257
258__cheetah_flush_tlb_pending:	/* 27 insns */
259	/* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
260	rdpr		%pstate, %g7
261	sllx		%o1, 3, %o1
262	andn		%g7, PSTATE_IE, %g2
263	wrpr		%g2, 0x0, %pstate
264	wrpr		%g0, 1, %tl
265	mov		PRIMARY_CONTEXT, %o4
266	ldxa		[%o4] ASI_DMMU, %g2
267	srlx		%g2, CTX_PGSZ1_NUC_SHIFT, %o3
268	sllx		%o3, CTX_PGSZ1_NUC_SHIFT, %o3
269	or		%o0, %o3, %o0	/* Preserve nucleus page size fields */
270	stxa		%o0, [%o4] ASI_DMMU
2711:	sub		%o1, (1 << 3), %o1
272	ldx		[%o2 + %o1], %o3
273	andcc		%o3, 1, %g0
274	be,pn		%icc, 2f
275	 andn		%o3, 1, %o3
276	stxa		%g0, [%o3] ASI_IMMU_DEMAP
2772:	stxa		%g0, [%o3] ASI_DMMU_DEMAP	
278	membar		#Sync
279	brnz,pt		%o1, 1b
280	 nop
281	stxa		%g2, [%o4] ASI_DMMU
282	sethi		%hi(KERNBASE), %o4
283	flush		%o4
284	wrpr		%g0, 0, %tl
285	retl
286	 wrpr		%g7, 0x0, %pstate
287
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
288#ifdef DCACHE_ALIASING_POSSIBLE
289__cheetah_flush_dcache_page: /* 11 insns */
290	sethi		%hi(PAGE_OFFSET), %g1
291	ldx		[%g1 + %lo(PAGE_OFFSET)], %g1
292	sub		%o0, %g1, %o0
293	sethi		%hi(PAGE_SIZE), %o4
2941:	subcc		%o4, (1 << 5), %o4
295	stxa		%g0, [%o0 + %o4] ASI_DCACHE_INVALIDATE
296	membar		#Sync
297	bne,pt		%icc, 1b
298	 nop
299	retl		/* I-cache flush never needed on Cheetah, see callers. */
300	 nop
301#endif /* DCACHE_ALIASING_POSSIBLE */
302
303	/* Hypervisor specific versions, patched at boot time.  */
304__hypervisor_tlb_tl0_error:
305	save		%sp, -192, %sp
306	mov		%i0, %o0
307	call		hypervisor_tlbop_error
308	 mov		%i1, %o1
309	ret
310	 restore
311
312__hypervisor_flush_tlb_mm: /* 10 insns */
313	mov		%o0, %o2	/* ARG2: mmu context */
314	mov		0, %o0		/* ARG0: CPU lists unimplemented */
315	mov		0, %o1		/* ARG1: CPU lists unimplemented */
316	mov		HV_MMU_ALL, %o3	/* ARG3: flags */
317	mov		HV_FAST_MMU_DEMAP_CTX, %o5
318	ta		HV_FAST_TRAP
319	brnz,pn		%o0, __hypervisor_tlb_tl0_error
320	 mov		HV_FAST_MMU_DEMAP_CTX, %o1
321	retl
322	 nop
 
 
 
 
 
 
 
 
 
323
324__hypervisor_flush_tlb_page: /* 11 insns */
325	/* %o0 = context, %o1 = vaddr */
326	mov		%o0, %g2
327	mov		%o1, %o0              /* ARG0: vaddr + IMMU-bit */
328	mov		%g2, %o1	      /* ARG1: mmu context */
329	mov		HV_MMU_ALL, %o2	      /* ARG2: flags */
330	srlx		%o0, PAGE_SHIFT, %o0
331	sllx		%o0, PAGE_SHIFT, %o0
332	ta		HV_MMU_UNMAP_ADDR_TRAP
333	brnz,pn		%o0, __hypervisor_tlb_tl0_error
334	 mov		HV_MMU_UNMAP_ADDR_TRAP, %o1
335	retl
336	 nop
 
 
 
 
 
 
 
 
 
 
 
337
338__hypervisor_flush_tlb_pending: /* 16 insns */
339	/* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
340	sllx		%o1, 3, %g1
341	mov		%o2, %g2
342	mov		%o0, %g3
3431:	sub		%g1, (1 << 3), %g1
344	ldx		[%g2 + %g1], %o0      /* ARG0: vaddr + IMMU-bit */
345	mov		%g3, %o1	      /* ARG1: mmu context */
346	mov		HV_MMU_ALL, %o2	      /* ARG2: flags */
347	srlx		%o0, PAGE_SHIFT, %o0
348	sllx		%o0, PAGE_SHIFT, %o0
349	ta		HV_MMU_UNMAP_ADDR_TRAP
350	brnz,pn		%o0, __hypervisor_tlb_tl0_error
351	 mov		HV_MMU_UNMAP_ADDR_TRAP, %o1
352	brnz,pt		%g1, 1b
353	 nop
354	retl
355	 nop
 
 
 
 
 
 
 
 
 
 
 
356
357__hypervisor_flush_tlb_kernel_range: /* 16 insns */
358	/* %o0=start, %o1=end */
359	cmp		%o0, %o1
360	be,pn		%xcc, 2f
361	 sethi		%hi(PAGE_SIZE), %g3
362	mov		%o0, %g1
363	sub		%o1, %g1, %g2
 
 
364	sub		%g2, %g3, %g2
3651:	add		%g1, %g2, %o0	/* ARG0: virtual address */
366	mov		0, %o1		/* ARG1: mmu context */
367	mov		HV_MMU_ALL, %o2	/* ARG2: flags */
368	ta		HV_MMU_UNMAP_ADDR_TRAP
369	brnz,pn		%o0, __hypervisor_tlb_tl0_error
370	 mov		HV_MMU_UNMAP_ADDR_TRAP, %o1
371	brnz,pt		%g2, 1b
372	 sub		%g2, %g3, %g2
3732:	retl
374	 nop
 
 
 
 
 
 
 
 
 
 
 
 
 
375
376#ifdef DCACHE_ALIASING_POSSIBLE
377	/* XXX Niagara and friends have an 8K cache, so no aliasing is
378	 * XXX possible, but nothing explicit in the Hypervisor API
379	 * XXX guarantees this.
380	 */
381__hypervisor_flush_dcache_page:	/* 2 insns */
382	retl
383	 nop
384#endif
385
386tlb_patch_one:
3871:	lduw		[%o1], %g1
388	stw		%g1, [%o0]
389	flush		%o0
390	subcc		%o2, 1, %o2
391	add		%o1, 4, %o1
392	bne,pt		%icc, 1b
393	 add		%o0, 4, %o0
394	retl
395	 nop
396
397	.globl		cheetah_patch_cachetlbops
398cheetah_patch_cachetlbops:
399	save		%sp, -128, %sp
400
401	sethi		%hi(__flush_tlb_mm), %o0
402	or		%o0, %lo(__flush_tlb_mm), %o0
403	sethi		%hi(__cheetah_flush_tlb_mm), %o1
404	or		%o1, %lo(__cheetah_flush_tlb_mm), %o1
405	call		tlb_patch_one
406	 mov		19, %o2
407
408	sethi		%hi(__flush_tlb_page), %o0
409	or		%o0, %lo(__flush_tlb_page), %o0
410	sethi		%hi(__cheetah_flush_tlb_page), %o1
411	or		%o1, %lo(__cheetah_flush_tlb_page), %o1
412	call		tlb_patch_one
413	 mov		22, %o2
414
415	sethi		%hi(__flush_tlb_pending), %o0
416	or		%o0, %lo(__flush_tlb_pending), %o0
417	sethi		%hi(__cheetah_flush_tlb_pending), %o1
418	or		%o1, %lo(__cheetah_flush_tlb_pending), %o1
419	call		tlb_patch_one
420	 mov		27, %o2
421
422#ifdef DCACHE_ALIASING_POSSIBLE
423	sethi		%hi(__flush_dcache_page), %o0
424	or		%o0, %lo(__flush_dcache_page), %o0
425	sethi		%hi(__cheetah_flush_dcache_page), %o1
426	or		%o1, %lo(__cheetah_flush_dcache_page), %o1
427	call		tlb_patch_one
428	 mov		11, %o2
429#endif /* DCACHE_ALIASING_POSSIBLE */
430
431	ret
432	 restore
433
434#ifdef CONFIG_SMP
435	/* These are all called by the slaves of a cross call, at
436	 * trap level 1, with interrupts fully disabled.
437	 *
438	 * Register usage:
439	 *   %g5	mm->context	(all tlb flushes)
440	 *   %g1	address arg 1	(tlb page and range flushes)
441	 *   %g7	address arg 2	(tlb range flush only)
442	 *
443	 *   %g6	scratch 1
444	 *   %g2	scratch 2
445	 *   %g3	scratch 3
446	 *   %g4	scratch 4
447	 */
448	.align		32
449	.globl		xcall_flush_tlb_mm
450xcall_flush_tlb_mm:	/* 21 insns */
451	mov		PRIMARY_CONTEXT, %g2
452	ldxa		[%g2] ASI_DMMU, %g3
453	srlx		%g3, CTX_PGSZ1_NUC_SHIFT, %g4
454	sllx		%g4, CTX_PGSZ1_NUC_SHIFT, %g4
455	or		%g5, %g4, %g5	/* Preserve nucleus page size fields */
456	stxa		%g5, [%g2] ASI_DMMU
457	mov		0x40, %g4
458	stxa		%g0, [%g4] ASI_DMMU_DEMAP
459	stxa		%g0, [%g4] ASI_IMMU_DEMAP
460	stxa		%g3, [%g2] ASI_DMMU
461	retry
462	nop
463	nop
464	nop
465	nop
466	nop
467	nop
468	nop
469	nop
470	nop
471	nop
 
 
 
472
473	.globl		xcall_flush_tlb_page
474xcall_flush_tlb_page:	/* 17 insns */
475	/* %g5=context, %g1=vaddr */
476	mov		PRIMARY_CONTEXT, %g4
477	ldxa		[%g4] ASI_DMMU, %g2
478	srlx		%g2, CTX_PGSZ1_NUC_SHIFT, %g4
479	sllx		%g4, CTX_PGSZ1_NUC_SHIFT, %g4
480	or		%g5, %g4, %g5
481	mov		PRIMARY_CONTEXT, %g4
482	stxa		%g5, [%g4] ASI_DMMU
483	andcc		%g1, 0x1, %g0
484	be,pn		%icc, 2f
485	 andn		%g1, 0x1, %g5
486	stxa		%g0, [%g5] ASI_IMMU_DEMAP
4872:	stxa		%g0, [%g5] ASI_DMMU_DEMAP
488	membar		#Sync
489	stxa		%g2, [%g4] ASI_DMMU
490	retry
491	nop
492	nop
 
 
 
493
494	.globl		xcall_flush_tlb_kernel_range
495xcall_flush_tlb_kernel_range:	/* 25 insns */
496	sethi		%hi(PAGE_SIZE - 1), %g2
497	or		%g2, %lo(PAGE_SIZE - 1), %g2
498	andn		%g1, %g2, %g1
499	andn		%g7, %g2, %g7
500	sub		%g7, %g1, %g3
501	add		%g2, 1, %g2
 
 
502	sub		%g3, %g2, %g3
503	or		%g1, 0x20, %g1		! Nucleus
5041:	stxa		%g0, [%g1 + %g3] ASI_DMMU_DEMAP
505	stxa		%g0, [%g1 + %g3] ASI_IMMU_DEMAP
506	membar		#Sync
507	brnz,pt		%g3, 1b
508	 sub		%g3, %g2, %g3
509	retry
510	nop
511	nop
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
512	nop
513	nop
514	nop
515	nop
516	nop
517	nop
518	nop
519	nop
520	nop
521
522	/* This runs in a very controlled environment, so we do
523	 * not need to worry about BH races etc.
524	 */
525	.globl		xcall_sync_tick
526xcall_sync_tick:
527
528661:	rdpr		%pstate, %g2
529	wrpr		%g2, PSTATE_IG | PSTATE_AG, %pstate
530	.section	.sun4v_2insn_patch, "ax"
531	.word		661b
532	nop
533	nop
534	.previous
535
536	rdpr		%pil, %g2
537	wrpr		%g0, PIL_NORMAL_MAX, %pil
538	sethi		%hi(109f), %g7
539	b,pt		%xcc, etrap_irq
540109:	 or		%g7, %lo(109b), %g7
541#ifdef CONFIG_TRACE_IRQFLAGS
542	call		trace_hardirqs_off
543	 nop
544#endif
545	call		smp_synchronize_tick_client
546	 nop
547	b		rtrap_xcall
548	 ldx		[%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
549
550	.globl		xcall_fetch_glob_regs
551xcall_fetch_glob_regs:
552	sethi		%hi(global_cpu_snapshot), %g1
553	or		%g1, %lo(global_cpu_snapshot), %g1
554	__GET_CPUID(%g2)
555	sllx		%g2, 6, %g3
556	add		%g1, %g3, %g1
557	rdpr		%tstate, %g7
558	stx		%g7, [%g1 + GR_SNAP_TSTATE]
559	rdpr		%tpc, %g7
560	stx		%g7, [%g1 + GR_SNAP_TPC]
561	rdpr		%tnpc, %g7
562	stx		%g7, [%g1 + GR_SNAP_TNPC]
563	stx		%o7, [%g1 + GR_SNAP_O7]
564	stx		%i7, [%g1 + GR_SNAP_I7]
565	/* Don't try this at home kids... */
566	rdpr		%cwp, %g3
567	sub		%g3, 1, %g7
568	wrpr		%g7, %cwp
569	mov		%i7, %g7
570	wrpr		%g3, %cwp
571	stx		%g7, [%g1 + GR_SNAP_RPC]
572	sethi		%hi(trap_block), %g7
573	or		%g7, %lo(trap_block), %g7
574	sllx		%g2, TRAP_BLOCK_SZ_SHIFT, %g2
575	add		%g7, %g2, %g7
576	ldx		[%g7 + TRAP_PER_CPU_THREAD], %g3
577	stx		%g3, [%g1 + GR_SNAP_THREAD]
578	retry
579
580	.globl		xcall_fetch_glob_pmu
581xcall_fetch_glob_pmu:
582	sethi		%hi(global_cpu_snapshot), %g1
583	or		%g1, %lo(global_cpu_snapshot), %g1
584	__GET_CPUID(%g2)
585	sllx		%g2, 6, %g3
586	add		%g1, %g3, %g1
587	rd		%pic, %g7
588	stx		%g7, [%g1 + (4 * 8)]
589	rd		%pcr, %g7
590	stx		%g7, [%g1 + (0 * 8)]
591	retry
592
593	.globl		xcall_fetch_glob_pmu_n4
594xcall_fetch_glob_pmu_n4:
595	sethi		%hi(global_cpu_snapshot), %g1
596	or		%g1, %lo(global_cpu_snapshot), %g1
597	__GET_CPUID(%g2)
598	sllx		%g2, 6, %g3
599	add		%g1, %g3, %g1
600
601	ldxa		[%g0] ASI_PIC, %g7
602	stx		%g7, [%g1 + (4 * 8)]
603	mov		0x08, %g3
604	ldxa		[%g3] ASI_PIC, %g7
605	stx		%g7, [%g1 + (5 * 8)]
606	mov		0x10, %g3
607	ldxa		[%g3] ASI_PIC, %g7
608	stx		%g7, [%g1 + (6 * 8)]
609	mov		0x18, %g3
610	ldxa		[%g3] ASI_PIC, %g7
611	stx		%g7, [%g1 + (7 * 8)]
612
613	mov		%o0, %g2
614	mov		%o1, %g3
615	mov		%o5, %g7
616
617	mov		HV_FAST_VT_GET_PERFREG, %o5
618	mov		3, %o0
619	ta		HV_FAST_TRAP
620	stx		%o1, [%g1 + (3 * 8)]
621	mov		HV_FAST_VT_GET_PERFREG, %o5
622	mov		2, %o0
623	ta		HV_FAST_TRAP
624	stx		%o1, [%g1 + (2 * 8)]
625	mov		HV_FAST_VT_GET_PERFREG, %o5
626	mov		1, %o0
627	ta		HV_FAST_TRAP
628	stx		%o1, [%g1 + (1 * 8)]
629	mov		HV_FAST_VT_GET_PERFREG, %o5
630	mov		0, %o0
631	ta		HV_FAST_TRAP
632	stx		%o1, [%g1 + (0 * 8)]
633
634	mov		%g2, %o0
635	mov		%g3, %o1
636	mov		%g7, %o5
637
638	retry
639
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
640#ifdef DCACHE_ALIASING_POSSIBLE
641	.align		32
642	.globl		xcall_flush_dcache_page_cheetah
643xcall_flush_dcache_page_cheetah: /* %g1 == physical page address */
644	sethi		%hi(PAGE_SIZE), %g3
6451:	subcc		%g3, (1 << 5), %g3
646	stxa		%g0, [%g1 + %g3] ASI_DCACHE_INVALIDATE
647	membar		#Sync
648	bne,pt		%icc, 1b
649	 nop
650	retry
651	nop
652#endif /* DCACHE_ALIASING_POSSIBLE */
653
654	.globl		xcall_flush_dcache_page_spitfire
655xcall_flush_dcache_page_spitfire: /* %g1 == physical page address
656				     %g7 == kernel page virtual address
657				     %g5 == (page->mapping != NULL)  */
658#ifdef DCACHE_ALIASING_POSSIBLE
659	srlx		%g1, (13 - 2), %g1	! Form tag comparitor
660	sethi		%hi(L1DCACHE_SIZE), %g3	! D$ size == 16K
661	sub		%g3, (1 << 5), %g3	! D$ linesize == 32
6621:	ldxa		[%g3] ASI_DCACHE_TAG, %g2
663	andcc		%g2, 0x3, %g0
664	be,pn		%xcc, 2f
665	 andn		%g2, 0x3, %g2
666	cmp		%g2, %g1
667
668	bne,pt		%xcc, 2f
669	 nop
670	stxa		%g0, [%g3] ASI_DCACHE_TAG
671	membar		#Sync
6722:	cmp		%g3, 0
673	bne,pt		%xcc, 1b
674	 sub		%g3, (1 << 5), %g3
675
676	brz,pn		%g5, 2f
677#endif /* DCACHE_ALIASING_POSSIBLE */
678	 sethi		%hi(PAGE_SIZE), %g3
679
6801:	flush		%g7
681	subcc		%g3, (1 << 5), %g3
682	bne,pt		%icc, 1b
683	 add		%g7, (1 << 5), %g7
684
6852:	retry
686	nop
687	nop
688
689	/* %g5:	error
690	 * %g6:	tlb op
691	 */
692__hypervisor_tlb_xcall_error:
693	mov	%g5, %g4
694	mov	%g6, %g5
695	ba,pt	%xcc, etrap
696	 rd	%pc, %g7
697	mov	%l4, %o0
698	call	hypervisor_tlbop_error_xcall
699	 mov	%l5, %o1
700	ba,a,pt	%xcc, rtrap
701
702	.globl		__hypervisor_xcall_flush_tlb_mm
703__hypervisor_xcall_flush_tlb_mm: /* 21 insns */
704	/* %g5=ctx, g1,g2,g3,g4,g7=scratch, %g6=unusable */
705	mov		%o0, %g2
706	mov		%o1, %g3
707	mov		%o2, %g4
708	mov		%o3, %g1
709	mov		%o5, %g7
710	clr		%o0		/* ARG0: CPU lists unimplemented */
711	clr		%o1		/* ARG1: CPU lists unimplemented */
712	mov		%g5, %o2	/* ARG2: mmu context */
713	mov		HV_MMU_ALL, %o3	/* ARG3: flags */
714	mov		HV_FAST_MMU_DEMAP_CTX, %o5
715	ta		HV_FAST_TRAP
716	mov		HV_FAST_MMU_DEMAP_CTX, %g6
717	brnz,pn		%o0, __hypervisor_tlb_xcall_error
718	 mov		%o0, %g5
719	mov		%g2, %o0
720	mov		%g3, %o1
721	mov		%g4, %o2
722	mov		%g1, %o3
723	mov		%g7, %o5
724	membar		#Sync
725	retry
 
 
 
726
727	.globl		__hypervisor_xcall_flush_tlb_page
728__hypervisor_xcall_flush_tlb_page: /* 17 insns */
729	/* %g5=ctx, %g1=vaddr */
730	mov		%o0, %g2
731	mov		%o1, %g3
732	mov		%o2, %g4
733	mov		%g1, %o0	        /* ARG0: virtual address */
734	mov		%g5, %o1		/* ARG1: mmu context */
735	mov		HV_MMU_ALL, %o2		/* ARG2: flags */
736	srlx		%o0, PAGE_SHIFT, %o0
737	sllx		%o0, PAGE_SHIFT, %o0
738	ta		HV_MMU_UNMAP_ADDR_TRAP
739	mov		HV_MMU_UNMAP_ADDR_TRAP, %g6
740	brnz,a,pn	%o0, __hypervisor_tlb_xcall_error
741	 mov		%o0, %g5
742	mov		%g2, %o0
743	mov		%g3, %o1
744	mov		%g4, %o2
745	membar		#Sync
746	retry
 
 
 
747
748	.globl		__hypervisor_xcall_flush_tlb_kernel_range
749__hypervisor_xcall_flush_tlb_kernel_range: /* 25 insns */
750	/* %g1=start, %g7=end, g2,g3,g4,g5,g6=scratch */
751	sethi		%hi(PAGE_SIZE - 1), %g2
752	or		%g2, %lo(PAGE_SIZE - 1), %g2
753	andn		%g1, %g2, %g1
754	andn		%g7, %g2, %g7
755	sub		%g7, %g1, %g3
 
756	add		%g2, 1, %g2
757	sub		%g3, %g2, %g3
758	mov		%o0, %g2
759	mov		%o1, %g4
760	mov		%o2, %g7
 
7611:	add		%g1, %g3, %o0	/* ARG0: virtual address */
762	mov		0, %o1		/* ARG1: mmu context */
763	mov		HV_MMU_ALL, %o2	/* ARG2: flags */
764	ta		HV_MMU_UNMAP_ADDR_TRAP
765	mov		HV_MMU_UNMAP_ADDR_TRAP, %g6
766	brnz,pn		%o0, __hypervisor_tlb_xcall_error
767	 mov		%o0, %g5
768	sethi		%hi(PAGE_SIZE), %o2
769	brnz,pt		%g3, 1b
770	 sub		%g3, %o2, %g3
771	mov		%g2, %o0
772	mov		%g4, %o1
773	mov		%g7, %o2
774	membar		#Sync
775	retry
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
776
777	/* These just get rescheduled to PIL vectors. */
778	.globl		xcall_call_function
779xcall_call_function:
780	wr		%g0, (1 << PIL_SMP_CALL_FUNC), %set_softint
781	retry
782
783	.globl		xcall_call_function_single
784xcall_call_function_single:
785	wr		%g0, (1 << PIL_SMP_CALL_FUNC_SNGL), %set_softint
786	retry
787
788	.globl		xcall_receive_signal
789xcall_receive_signal:
790	wr		%g0, (1 << PIL_SMP_RECEIVE_SIGNAL), %set_softint
791	retry
792
793	.globl		xcall_capture
794xcall_capture:
795	wr		%g0, (1 << PIL_SMP_CAPTURE), %set_softint
796	retry
797
798	.globl		xcall_new_mmu_context_version
799xcall_new_mmu_context_version:
800	wr		%g0, (1 << PIL_SMP_CTX_NEW_VERSION), %set_softint
801	retry
802
803#ifdef CONFIG_KGDB
804	.globl		xcall_kgdb_capture
805xcall_kgdb_capture:
806	wr		%g0, (1 << PIL_KGDB_CAPTURE), %set_softint
807	retry
808#endif
809
810#endif /* CONFIG_SMP */
811
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
812
813	.globl		hypervisor_patch_cachetlbops
814hypervisor_patch_cachetlbops:
815	save		%sp, -128, %sp
816
817	sethi		%hi(__flush_tlb_mm), %o0
818	or		%o0, %lo(__flush_tlb_mm), %o0
819	sethi		%hi(__hypervisor_flush_tlb_mm), %o1
820	or		%o1, %lo(__hypervisor_flush_tlb_mm), %o1
821	call		tlb_patch_one
822	 mov		10, %o2
823
824	sethi		%hi(__flush_tlb_page), %o0
825	or		%o0, %lo(__flush_tlb_page), %o0
826	sethi		%hi(__hypervisor_flush_tlb_page), %o1
827	or		%o1, %lo(__hypervisor_flush_tlb_page), %o1
828	call		tlb_patch_one
829	 mov		11, %o2
830
831	sethi		%hi(__flush_tlb_pending), %o0
832	or		%o0, %lo(__flush_tlb_pending), %o0
833	sethi		%hi(__hypervisor_flush_tlb_pending), %o1
834	or		%o1, %lo(__hypervisor_flush_tlb_pending), %o1
835	call		tlb_patch_one
836	 mov		16, %o2
837
838	sethi		%hi(__flush_tlb_kernel_range), %o0
839	or		%o0, %lo(__flush_tlb_kernel_range), %o0
840	sethi		%hi(__hypervisor_flush_tlb_kernel_range), %o1
841	or		%o1, %lo(__hypervisor_flush_tlb_kernel_range), %o1
842	call		tlb_patch_one
843	 mov		16, %o2
844
845#ifdef DCACHE_ALIASING_POSSIBLE
846	sethi		%hi(__flush_dcache_page), %o0
847	or		%o0, %lo(__flush_dcache_page), %o0
848	sethi		%hi(__hypervisor_flush_dcache_page), %o1
849	or		%o1, %lo(__hypervisor_flush_dcache_page), %o1
850	call		tlb_patch_one
851	 mov		2, %o2
852#endif /* DCACHE_ALIASING_POSSIBLE */
853
854#ifdef CONFIG_SMP
855	sethi		%hi(xcall_flush_tlb_mm), %o0
856	or		%o0, %lo(xcall_flush_tlb_mm), %o0
857	sethi		%hi(__hypervisor_xcall_flush_tlb_mm), %o1
858	or		%o1, %lo(__hypervisor_xcall_flush_tlb_mm), %o1
859	call		tlb_patch_one
860	 mov		21, %o2
861
862	sethi		%hi(xcall_flush_tlb_page), %o0
863	or		%o0, %lo(xcall_flush_tlb_page), %o0
864	sethi		%hi(__hypervisor_xcall_flush_tlb_page), %o1
865	or		%o1, %lo(__hypervisor_xcall_flush_tlb_page), %o1
866	call		tlb_patch_one
867	 mov		17, %o2
868
869	sethi		%hi(xcall_flush_tlb_kernel_range), %o0
870	or		%o0, %lo(xcall_flush_tlb_kernel_range), %o0
871	sethi		%hi(__hypervisor_xcall_flush_tlb_kernel_range), %o1
872	or		%o1, %lo(__hypervisor_xcall_flush_tlb_kernel_range), %o1
873	call		tlb_patch_one
874	 mov		25, %o2
875#endif /* CONFIG_SMP */
876
877	ret
878	 restore