Loading...
1/*
2 * ultra.S: Don't expand these all over the place...
3 *
4 * Copyright (C) 1997, 2000, 2008 David S. Miller (davem@davemloft.net)
5 */
6
7#include <asm/asi.h>
8#include <asm/pgtable.h>
9#include <asm/page.h>
10#include <asm/spitfire.h>
11#include <asm/mmu_context.h>
12#include <asm/mmu.h>
13#include <asm/pil.h>
14#include <asm/head.h>
15#include <asm/thread_info.h>
16#include <asm/cacheflush.h>
17#include <asm/hypervisor.h>
18#include <asm/cpudata.h>
19
20 /* Basically, most of the Spitfire vs. Cheetah madness
21 * has to do with the fact that Cheetah does not support
22 * IMMU flushes out of the secondary context. Someone needs
23 * to throw a south lake birthday party for the folks
24 * in Microelectronics who refused to fix this shit.
25 */
26
27 /* This file is meant to be read efficiently by the CPU, not humans.
28 * Staraj sie tego nikomu nie pierdolnac...
29 */
30 .text
31 .align 32
32 .globl __flush_tlb_mm
33__flush_tlb_mm: /* 18 insns */
34 /* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */
35 ldxa [%o1] ASI_DMMU, %g2
36 cmp %g2, %o0
37 bne,pn %icc, __spitfire_flush_tlb_mm_slow
38 mov 0x50, %g3
39 stxa %g0, [%g3] ASI_DMMU_DEMAP
40 stxa %g0, [%g3] ASI_IMMU_DEMAP
41 sethi %hi(KERNBASE), %g3
42 flush %g3
43 retl
44 nop
45 nop
46 nop
47 nop
48 nop
49 nop
50 nop
51 nop
52 nop
53 nop
54
55 .align 32
56 .globl __flush_tlb_pending
57__flush_tlb_pending: /* 26 insns */
58 /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
59 rdpr %pstate, %g7
60 sllx %o1, 3, %o1
61 andn %g7, PSTATE_IE, %g2
62 wrpr %g2, %pstate
63 mov SECONDARY_CONTEXT, %o4
64 ldxa [%o4] ASI_DMMU, %g2
65 stxa %o0, [%o4] ASI_DMMU
661: sub %o1, (1 << 3), %o1
67 ldx [%o2 + %o1], %o3
68 andcc %o3, 1, %g0
69 andn %o3, 1, %o3
70 be,pn %icc, 2f
71 or %o3, 0x10, %o3
72 stxa %g0, [%o3] ASI_IMMU_DEMAP
732: stxa %g0, [%o3] ASI_DMMU_DEMAP
74 membar #Sync
75 brnz,pt %o1, 1b
76 nop
77 stxa %g2, [%o4] ASI_DMMU
78 sethi %hi(KERNBASE), %o4
79 flush %o4
80 retl
81 wrpr %g7, 0x0, %pstate
82 nop
83 nop
84 nop
85 nop
86
87 .align 32
88 .globl __flush_tlb_kernel_range
89__flush_tlb_kernel_range: /* 16 insns */
90 /* %o0=start, %o1=end */
91 cmp %o0, %o1
92 be,pn %xcc, 2f
93 sethi %hi(PAGE_SIZE), %o4
94 sub %o1, %o0, %o3
95 sub %o3, %o4, %o3
96 or %o0, 0x20, %o0 ! Nucleus
971: stxa %g0, [%o0 + %o3] ASI_DMMU_DEMAP
98 stxa %g0, [%o0 + %o3] ASI_IMMU_DEMAP
99 membar #Sync
100 brnz,pt %o3, 1b
101 sub %o3, %o4, %o3
1022: sethi %hi(KERNBASE), %o3
103 flush %o3
104 retl
105 nop
106 nop
107
108__spitfire_flush_tlb_mm_slow:
109 rdpr %pstate, %g1
110 wrpr %g1, PSTATE_IE, %pstate
111 stxa %o0, [%o1] ASI_DMMU
112 stxa %g0, [%g3] ASI_DMMU_DEMAP
113 stxa %g0, [%g3] ASI_IMMU_DEMAP
114 flush %g6
115 stxa %g2, [%o1] ASI_DMMU
116 sethi %hi(KERNBASE), %o1
117 flush %o1
118 retl
119 wrpr %g1, 0, %pstate
120
121/*
122 * The following code flushes one page_size worth.
123 */
124 .section .kprobes.text, "ax"
125 .align 32
126 .globl __flush_icache_page
127__flush_icache_page: /* %o0 = phys_page */
128 srlx %o0, PAGE_SHIFT, %o0
129 sethi %uhi(PAGE_OFFSET), %g1
130 sllx %o0, PAGE_SHIFT, %o0
131 sethi %hi(PAGE_SIZE), %g2
132 sllx %g1, 32, %g1
133 add %o0, %g1, %o0
1341: subcc %g2, 32, %g2
135 bne,pt %icc, 1b
136 flush %o0 + %g2
137 retl
138 nop
139
140#ifdef DCACHE_ALIASING_POSSIBLE
141
142#if (PAGE_SHIFT != 13)
143#error only page shift of 13 is supported by dcache flush
144#endif
145
146#define DTAG_MASK 0x3
147
148 /* This routine is Spitfire specific so the hardcoded
149 * D-cache size and line-size are OK.
150 */
151 .align 64
152 .globl __flush_dcache_page
153__flush_dcache_page: /* %o0=kaddr, %o1=flush_icache */
154 sethi %uhi(PAGE_OFFSET), %g1
155 sllx %g1, 32, %g1
156 sub %o0, %g1, %o0 ! physical address
157 srlx %o0, 11, %o0 ! make D-cache TAG
158 sethi %hi(1 << 14), %o2 ! D-cache size
159 sub %o2, (1 << 5), %o2 ! D-cache line size
1601: ldxa [%o2] ASI_DCACHE_TAG, %o3 ! load D-cache TAG
161 andcc %o3, DTAG_MASK, %g0 ! Valid?
162 be,pn %xcc, 2f ! Nope, branch
163 andn %o3, DTAG_MASK, %o3 ! Clear valid bits
164 cmp %o3, %o0 ! TAG match?
165 bne,pt %xcc, 2f ! Nope, branch
166 nop
167 stxa %g0, [%o2] ASI_DCACHE_TAG ! Invalidate TAG
168 membar #Sync
1692: brnz,pt %o2, 1b
170 sub %o2, (1 << 5), %o2 ! D-cache line size
171
172 /* The I-cache does not snoop local stores so we
173 * better flush that too when necessary.
174 */
175 brnz,pt %o1, __flush_icache_page
176 sllx %o0, 11, %o0
177 retl
178 nop
179
180#endif /* DCACHE_ALIASING_POSSIBLE */
181
182 .previous
183
184 /* Cheetah specific versions, patched at boot time. */
185__cheetah_flush_tlb_mm: /* 19 insns */
186 rdpr %pstate, %g7
187 andn %g7, PSTATE_IE, %g2
188 wrpr %g2, 0x0, %pstate
189 wrpr %g0, 1, %tl
190 mov PRIMARY_CONTEXT, %o2
191 mov 0x40, %g3
192 ldxa [%o2] ASI_DMMU, %g2
193 srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o1
194 sllx %o1, CTX_PGSZ1_NUC_SHIFT, %o1
195 or %o0, %o1, %o0 /* Preserve nucleus page size fields */
196 stxa %o0, [%o2] ASI_DMMU
197 stxa %g0, [%g3] ASI_DMMU_DEMAP
198 stxa %g0, [%g3] ASI_IMMU_DEMAP
199 stxa %g2, [%o2] ASI_DMMU
200 sethi %hi(KERNBASE), %o2
201 flush %o2
202 wrpr %g0, 0, %tl
203 retl
204 wrpr %g7, 0x0, %pstate
205
206__cheetah_flush_tlb_pending: /* 27 insns */
207 /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
208 rdpr %pstate, %g7
209 sllx %o1, 3, %o1
210 andn %g7, PSTATE_IE, %g2
211 wrpr %g2, 0x0, %pstate
212 wrpr %g0, 1, %tl
213 mov PRIMARY_CONTEXT, %o4
214 ldxa [%o4] ASI_DMMU, %g2
215 srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o3
216 sllx %o3, CTX_PGSZ1_NUC_SHIFT, %o3
217 or %o0, %o3, %o0 /* Preserve nucleus page size fields */
218 stxa %o0, [%o4] ASI_DMMU
2191: sub %o1, (1 << 3), %o1
220 ldx [%o2 + %o1], %o3
221 andcc %o3, 1, %g0
222 be,pn %icc, 2f
223 andn %o3, 1, %o3
224 stxa %g0, [%o3] ASI_IMMU_DEMAP
2252: stxa %g0, [%o3] ASI_DMMU_DEMAP
226 membar #Sync
227 brnz,pt %o1, 1b
228 nop
229 stxa %g2, [%o4] ASI_DMMU
230 sethi %hi(KERNBASE), %o4
231 flush %o4
232 wrpr %g0, 0, %tl
233 retl
234 wrpr %g7, 0x0, %pstate
235
236#ifdef DCACHE_ALIASING_POSSIBLE
237__cheetah_flush_dcache_page: /* 11 insns */
238 sethi %uhi(PAGE_OFFSET), %g1
239 sllx %g1, 32, %g1
240 sub %o0, %g1, %o0
241 sethi %hi(PAGE_SIZE), %o4
2421: subcc %o4, (1 << 5), %o4
243 stxa %g0, [%o0 + %o4] ASI_DCACHE_INVALIDATE
244 membar #Sync
245 bne,pt %icc, 1b
246 nop
247 retl /* I-cache flush never needed on Cheetah, see callers. */
248 nop
249#endif /* DCACHE_ALIASING_POSSIBLE */
250
251 /* Hypervisor specific versions, patched at boot time. */
252__hypervisor_tlb_tl0_error:
253 save %sp, -192, %sp
254 mov %i0, %o0
255 call hypervisor_tlbop_error
256 mov %i1, %o1
257 ret
258 restore
259
260__hypervisor_flush_tlb_mm: /* 10 insns */
261 mov %o0, %o2 /* ARG2: mmu context */
262 mov 0, %o0 /* ARG0: CPU lists unimplemented */
263 mov 0, %o1 /* ARG1: CPU lists unimplemented */
264 mov HV_MMU_ALL, %o3 /* ARG3: flags */
265 mov HV_FAST_MMU_DEMAP_CTX, %o5
266 ta HV_FAST_TRAP
267 brnz,pn %o0, __hypervisor_tlb_tl0_error
268 mov HV_FAST_MMU_DEMAP_CTX, %o1
269 retl
270 nop
271
272__hypervisor_flush_tlb_pending: /* 16 insns */
273 /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
274 sllx %o1, 3, %g1
275 mov %o2, %g2
276 mov %o0, %g3
2771: sub %g1, (1 << 3), %g1
278 ldx [%g2 + %g1], %o0 /* ARG0: vaddr + IMMU-bit */
279 mov %g3, %o1 /* ARG1: mmu context */
280 mov HV_MMU_ALL, %o2 /* ARG2: flags */
281 srlx %o0, PAGE_SHIFT, %o0
282 sllx %o0, PAGE_SHIFT, %o0
283 ta HV_MMU_UNMAP_ADDR_TRAP
284 brnz,pn %o0, __hypervisor_tlb_tl0_error
285 mov HV_MMU_UNMAP_ADDR_TRAP, %o1
286 brnz,pt %g1, 1b
287 nop
288 retl
289 nop
290
291__hypervisor_flush_tlb_kernel_range: /* 16 insns */
292 /* %o0=start, %o1=end */
293 cmp %o0, %o1
294 be,pn %xcc, 2f
295 sethi %hi(PAGE_SIZE), %g3
296 mov %o0, %g1
297 sub %o1, %g1, %g2
298 sub %g2, %g3, %g2
2991: add %g1, %g2, %o0 /* ARG0: virtual address */
300 mov 0, %o1 /* ARG1: mmu context */
301 mov HV_MMU_ALL, %o2 /* ARG2: flags */
302 ta HV_MMU_UNMAP_ADDR_TRAP
303 brnz,pn %o0, __hypervisor_tlb_tl0_error
304 mov HV_MMU_UNMAP_ADDR_TRAP, %o1
305 brnz,pt %g2, 1b
306 sub %g2, %g3, %g2
3072: retl
308 nop
309
310#ifdef DCACHE_ALIASING_POSSIBLE
311 /* XXX Niagara and friends have an 8K cache, so no aliasing is
312 * XXX possible, but nothing explicit in the Hypervisor API
313 * XXX guarantees this.
314 */
315__hypervisor_flush_dcache_page: /* 2 insns */
316 retl
317 nop
318#endif
319
320tlb_patch_one:
3211: lduw [%o1], %g1
322 stw %g1, [%o0]
323 flush %o0
324 subcc %o2, 1, %o2
325 add %o1, 4, %o1
326 bne,pt %icc, 1b
327 add %o0, 4, %o0
328 retl
329 nop
330
331 .globl cheetah_patch_cachetlbops
332cheetah_patch_cachetlbops:
333 save %sp, -128, %sp
334
335 sethi %hi(__flush_tlb_mm), %o0
336 or %o0, %lo(__flush_tlb_mm), %o0
337 sethi %hi(__cheetah_flush_tlb_mm), %o1
338 or %o1, %lo(__cheetah_flush_tlb_mm), %o1
339 call tlb_patch_one
340 mov 19, %o2
341
342 sethi %hi(__flush_tlb_pending), %o0
343 or %o0, %lo(__flush_tlb_pending), %o0
344 sethi %hi(__cheetah_flush_tlb_pending), %o1
345 or %o1, %lo(__cheetah_flush_tlb_pending), %o1
346 call tlb_patch_one
347 mov 27, %o2
348
349#ifdef DCACHE_ALIASING_POSSIBLE
350 sethi %hi(__flush_dcache_page), %o0
351 or %o0, %lo(__flush_dcache_page), %o0
352 sethi %hi(__cheetah_flush_dcache_page), %o1
353 or %o1, %lo(__cheetah_flush_dcache_page), %o1
354 call tlb_patch_one
355 mov 11, %o2
356#endif /* DCACHE_ALIASING_POSSIBLE */
357
358 ret
359 restore
360
361#ifdef CONFIG_SMP
362 /* These are all called by the slaves of a cross call, at
363 * trap level 1, with interrupts fully disabled.
364 *
365 * Register usage:
366 * %g5 mm->context (all tlb flushes)
367 * %g1 address arg 1 (tlb page and range flushes)
368 * %g7 address arg 2 (tlb range flush only)
369 *
370 * %g6 scratch 1
371 * %g2 scratch 2
372 * %g3 scratch 3
373 * %g4 scratch 4
374 */
375 .align 32
376 .globl xcall_flush_tlb_mm
377xcall_flush_tlb_mm: /* 21 insns */
378 mov PRIMARY_CONTEXT, %g2
379 ldxa [%g2] ASI_DMMU, %g3
380 srlx %g3, CTX_PGSZ1_NUC_SHIFT, %g4
381 sllx %g4, CTX_PGSZ1_NUC_SHIFT, %g4
382 or %g5, %g4, %g5 /* Preserve nucleus page size fields */
383 stxa %g5, [%g2] ASI_DMMU
384 mov 0x40, %g4
385 stxa %g0, [%g4] ASI_DMMU_DEMAP
386 stxa %g0, [%g4] ASI_IMMU_DEMAP
387 stxa %g3, [%g2] ASI_DMMU
388 retry
389 nop
390 nop
391 nop
392 nop
393 nop
394 nop
395 nop
396 nop
397 nop
398 nop
399
400 .globl xcall_flush_tlb_pending
401xcall_flush_tlb_pending: /* 21 insns */
402 /* %g5=context, %g1=nr, %g7=vaddrs[] */
403 sllx %g1, 3, %g1
404 mov PRIMARY_CONTEXT, %g4
405 ldxa [%g4] ASI_DMMU, %g2
406 srlx %g2, CTX_PGSZ1_NUC_SHIFT, %g4
407 sllx %g4, CTX_PGSZ1_NUC_SHIFT, %g4
408 or %g5, %g4, %g5
409 mov PRIMARY_CONTEXT, %g4
410 stxa %g5, [%g4] ASI_DMMU
4111: sub %g1, (1 << 3), %g1
412 ldx [%g7 + %g1], %g5
413 andcc %g5, 0x1, %g0
414 be,pn %icc, 2f
415
416 andn %g5, 0x1, %g5
417 stxa %g0, [%g5] ASI_IMMU_DEMAP
4182: stxa %g0, [%g5] ASI_DMMU_DEMAP
419 membar #Sync
420 brnz,pt %g1, 1b
421 nop
422 stxa %g2, [%g4] ASI_DMMU
423 retry
424 nop
425
426 .globl xcall_flush_tlb_kernel_range
427xcall_flush_tlb_kernel_range: /* 25 insns */
428 sethi %hi(PAGE_SIZE - 1), %g2
429 or %g2, %lo(PAGE_SIZE - 1), %g2
430 andn %g1, %g2, %g1
431 andn %g7, %g2, %g7
432 sub %g7, %g1, %g3
433 add %g2, 1, %g2
434 sub %g3, %g2, %g3
435 or %g1, 0x20, %g1 ! Nucleus
4361: stxa %g0, [%g1 + %g3] ASI_DMMU_DEMAP
437 stxa %g0, [%g1 + %g3] ASI_IMMU_DEMAP
438 membar #Sync
439 brnz,pt %g3, 1b
440 sub %g3, %g2, %g3
441 retry
442 nop
443 nop
444 nop
445 nop
446 nop
447 nop
448 nop
449 nop
450 nop
451 nop
452 nop
453
454 /* This runs in a very controlled environment, so we do
455 * not need to worry about BH races etc.
456 */
457 .globl xcall_sync_tick
458xcall_sync_tick:
459
460661: rdpr %pstate, %g2
461 wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate
462 .section .sun4v_2insn_patch, "ax"
463 .word 661b
464 nop
465 nop
466 .previous
467
468 rdpr %pil, %g2
469 wrpr %g0, PIL_NORMAL_MAX, %pil
470 sethi %hi(109f), %g7
471 b,pt %xcc, etrap_irq
472109: or %g7, %lo(109b), %g7
473#ifdef CONFIG_TRACE_IRQFLAGS
474 call trace_hardirqs_off
475 nop
476#endif
477 call smp_synchronize_tick_client
478 nop
479 b rtrap_xcall
480 ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
481
482 .globl xcall_fetch_glob_regs
483xcall_fetch_glob_regs:
484 sethi %hi(global_reg_snapshot), %g1
485 or %g1, %lo(global_reg_snapshot), %g1
486 __GET_CPUID(%g2)
487 sllx %g2, 6, %g3
488 add %g1, %g3, %g1
489 rdpr %tstate, %g7
490 stx %g7, [%g1 + GR_SNAP_TSTATE]
491 rdpr %tpc, %g7
492 stx %g7, [%g1 + GR_SNAP_TPC]
493 rdpr %tnpc, %g7
494 stx %g7, [%g1 + GR_SNAP_TNPC]
495 stx %o7, [%g1 + GR_SNAP_O7]
496 stx %i7, [%g1 + GR_SNAP_I7]
497 /* Don't try this at home kids... */
498 rdpr %cwp, %g2
499 sub %g2, 1, %g7
500 wrpr %g7, %cwp
501 mov %i7, %g7
502 wrpr %g2, %cwp
503 stx %g7, [%g1 + GR_SNAP_RPC]
504 sethi %hi(trap_block), %g7
505 or %g7, %lo(trap_block), %g7
506 sllx %g2, TRAP_BLOCK_SZ_SHIFT, %g2
507 add %g7, %g2, %g7
508 ldx [%g7 + TRAP_PER_CPU_THREAD], %g3
509 stx %g3, [%g1 + GR_SNAP_THREAD]
510 retry
511
512#ifdef DCACHE_ALIASING_POSSIBLE
513 .align 32
514 .globl xcall_flush_dcache_page_cheetah
515xcall_flush_dcache_page_cheetah: /* %g1 == physical page address */
516 sethi %hi(PAGE_SIZE), %g3
5171: subcc %g3, (1 << 5), %g3
518 stxa %g0, [%g1 + %g3] ASI_DCACHE_INVALIDATE
519 membar #Sync
520 bne,pt %icc, 1b
521 nop
522 retry
523 nop
524#endif /* DCACHE_ALIASING_POSSIBLE */
525
526 .globl xcall_flush_dcache_page_spitfire
527xcall_flush_dcache_page_spitfire: /* %g1 == physical page address
528 %g7 == kernel page virtual address
529 %g5 == (page->mapping != NULL) */
530#ifdef DCACHE_ALIASING_POSSIBLE
531 srlx %g1, (13 - 2), %g1 ! Form tag comparitor
532 sethi %hi(L1DCACHE_SIZE), %g3 ! D$ size == 16K
533 sub %g3, (1 << 5), %g3 ! D$ linesize == 32
5341: ldxa [%g3] ASI_DCACHE_TAG, %g2
535 andcc %g2, 0x3, %g0
536 be,pn %xcc, 2f
537 andn %g2, 0x3, %g2
538 cmp %g2, %g1
539
540 bne,pt %xcc, 2f
541 nop
542 stxa %g0, [%g3] ASI_DCACHE_TAG
543 membar #Sync
5442: cmp %g3, 0
545 bne,pt %xcc, 1b
546 sub %g3, (1 << 5), %g3
547
548 brz,pn %g5, 2f
549#endif /* DCACHE_ALIASING_POSSIBLE */
550 sethi %hi(PAGE_SIZE), %g3
551
5521: flush %g7
553 subcc %g3, (1 << 5), %g3
554 bne,pt %icc, 1b
555 add %g7, (1 << 5), %g7
556
5572: retry
558 nop
559 nop
560
561 /* %g5: error
562 * %g6: tlb op
563 */
564__hypervisor_tlb_xcall_error:
565 mov %g5, %g4
566 mov %g6, %g5
567 ba,pt %xcc, etrap
568 rd %pc, %g7
569 mov %l4, %o0
570 call hypervisor_tlbop_error_xcall
571 mov %l5, %o1
572 ba,a,pt %xcc, rtrap
573
574 .globl __hypervisor_xcall_flush_tlb_mm
575__hypervisor_xcall_flush_tlb_mm: /* 21 insns */
576 /* %g5=ctx, g1,g2,g3,g4,g7=scratch, %g6=unusable */
577 mov %o0, %g2
578 mov %o1, %g3
579 mov %o2, %g4
580 mov %o3, %g1
581 mov %o5, %g7
582 clr %o0 /* ARG0: CPU lists unimplemented */
583 clr %o1 /* ARG1: CPU lists unimplemented */
584 mov %g5, %o2 /* ARG2: mmu context */
585 mov HV_MMU_ALL, %o3 /* ARG3: flags */
586 mov HV_FAST_MMU_DEMAP_CTX, %o5
587 ta HV_FAST_TRAP
588 mov HV_FAST_MMU_DEMAP_CTX, %g6
589 brnz,pn %o0, __hypervisor_tlb_xcall_error
590 mov %o0, %g5
591 mov %g2, %o0
592 mov %g3, %o1
593 mov %g4, %o2
594 mov %g1, %o3
595 mov %g7, %o5
596 membar #Sync
597 retry
598
599 .globl __hypervisor_xcall_flush_tlb_pending
600__hypervisor_xcall_flush_tlb_pending: /* 21 insns */
601 /* %g5=ctx, %g1=nr, %g7=vaddrs[], %g2,%g3,%g4,g6=scratch */
602 sllx %g1, 3, %g1
603 mov %o0, %g2
604 mov %o1, %g3
605 mov %o2, %g4
6061: sub %g1, (1 << 3), %g1
607 ldx [%g7 + %g1], %o0 /* ARG0: virtual address */
608 mov %g5, %o1 /* ARG1: mmu context */
609 mov HV_MMU_ALL, %o2 /* ARG2: flags */
610 srlx %o0, PAGE_SHIFT, %o0
611 sllx %o0, PAGE_SHIFT, %o0
612 ta HV_MMU_UNMAP_ADDR_TRAP
613 mov HV_MMU_UNMAP_ADDR_TRAP, %g6
614 brnz,a,pn %o0, __hypervisor_tlb_xcall_error
615 mov %o0, %g5
616 brnz,pt %g1, 1b
617 nop
618 mov %g2, %o0
619 mov %g3, %o1
620 mov %g4, %o2
621 membar #Sync
622 retry
623
624 .globl __hypervisor_xcall_flush_tlb_kernel_range
625__hypervisor_xcall_flush_tlb_kernel_range: /* 25 insns */
626 /* %g1=start, %g7=end, g2,g3,g4,g5,g6=scratch */
627 sethi %hi(PAGE_SIZE - 1), %g2
628 or %g2, %lo(PAGE_SIZE - 1), %g2
629 andn %g1, %g2, %g1
630 andn %g7, %g2, %g7
631 sub %g7, %g1, %g3
632 add %g2, 1, %g2
633 sub %g3, %g2, %g3
634 mov %o0, %g2
635 mov %o1, %g4
636 mov %o2, %g7
6371: add %g1, %g3, %o0 /* ARG0: virtual address */
638 mov 0, %o1 /* ARG1: mmu context */
639 mov HV_MMU_ALL, %o2 /* ARG2: flags */
640 ta HV_MMU_UNMAP_ADDR_TRAP
641 mov HV_MMU_UNMAP_ADDR_TRAP, %g6
642 brnz,pn %o0, __hypervisor_tlb_xcall_error
643 mov %o0, %g5
644 sethi %hi(PAGE_SIZE), %o2
645 brnz,pt %g3, 1b
646 sub %g3, %o2, %g3
647 mov %g2, %o0
648 mov %g4, %o1
649 mov %g7, %o2
650 membar #Sync
651 retry
652
653 /* These just get rescheduled to PIL vectors. */
654 .globl xcall_call_function
655xcall_call_function:
656 wr %g0, (1 << PIL_SMP_CALL_FUNC), %set_softint
657 retry
658
659 .globl xcall_call_function_single
660xcall_call_function_single:
661 wr %g0, (1 << PIL_SMP_CALL_FUNC_SNGL), %set_softint
662 retry
663
664 .globl xcall_receive_signal
665xcall_receive_signal:
666 wr %g0, (1 << PIL_SMP_RECEIVE_SIGNAL), %set_softint
667 retry
668
669 .globl xcall_capture
670xcall_capture:
671 wr %g0, (1 << PIL_SMP_CAPTURE), %set_softint
672 retry
673
674 .globl xcall_new_mmu_context_version
675xcall_new_mmu_context_version:
676 wr %g0, (1 << PIL_SMP_CTX_NEW_VERSION), %set_softint
677 retry
678
679#ifdef CONFIG_KGDB
680 .globl xcall_kgdb_capture
681xcall_kgdb_capture:
682 wr %g0, (1 << PIL_KGDB_CAPTURE), %set_softint
683 retry
684#endif
685
686#endif /* CONFIG_SMP */
687
688
689 .globl hypervisor_patch_cachetlbops
690hypervisor_patch_cachetlbops:
691 save %sp, -128, %sp
692
693 sethi %hi(__flush_tlb_mm), %o0
694 or %o0, %lo(__flush_tlb_mm), %o0
695 sethi %hi(__hypervisor_flush_tlb_mm), %o1
696 or %o1, %lo(__hypervisor_flush_tlb_mm), %o1
697 call tlb_patch_one
698 mov 10, %o2
699
700 sethi %hi(__flush_tlb_pending), %o0
701 or %o0, %lo(__flush_tlb_pending), %o0
702 sethi %hi(__hypervisor_flush_tlb_pending), %o1
703 or %o1, %lo(__hypervisor_flush_tlb_pending), %o1
704 call tlb_patch_one
705 mov 16, %o2
706
707 sethi %hi(__flush_tlb_kernel_range), %o0
708 or %o0, %lo(__flush_tlb_kernel_range), %o0
709 sethi %hi(__hypervisor_flush_tlb_kernel_range), %o1
710 or %o1, %lo(__hypervisor_flush_tlb_kernel_range), %o1
711 call tlb_patch_one
712 mov 16, %o2
713
714#ifdef DCACHE_ALIASING_POSSIBLE
715 sethi %hi(__flush_dcache_page), %o0
716 or %o0, %lo(__flush_dcache_page), %o0
717 sethi %hi(__hypervisor_flush_dcache_page), %o1
718 or %o1, %lo(__hypervisor_flush_dcache_page), %o1
719 call tlb_patch_one
720 mov 2, %o2
721#endif /* DCACHE_ALIASING_POSSIBLE */
722
723#ifdef CONFIG_SMP
724 sethi %hi(xcall_flush_tlb_mm), %o0
725 or %o0, %lo(xcall_flush_tlb_mm), %o0
726 sethi %hi(__hypervisor_xcall_flush_tlb_mm), %o1
727 or %o1, %lo(__hypervisor_xcall_flush_tlb_mm), %o1
728 call tlb_patch_one
729 mov 21, %o2
730
731 sethi %hi(xcall_flush_tlb_pending), %o0
732 or %o0, %lo(xcall_flush_tlb_pending), %o0
733 sethi %hi(__hypervisor_xcall_flush_tlb_pending), %o1
734 or %o1, %lo(__hypervisor_xcall_flush_tlb_pending), %o1
735 call tlb_patch_one
736 mov 21, %o2
737
738 sethi %hi(xcall_flush_tlb_kernel_range), %o0
739 or %o0, %lo(xcall_flush_tlb_kernel_range), %o0
740 sethi %hi(__hypervisor_xcall_flush_tlb_kernel_range), %o1
741 or %o1, %lo(__hypervisor_xcall_flush_tlb_kernel_range), %o1
742 call tlb_patch_one
743 mov 25, %o2
744#endif /* CONFIG_SMP */
745
746 ret
747 restore
1/*
2 * ultra.S: Don't expand these all over the place...
3 *
4 * Copyright (C) 1997, 2000, 2008 David S. Miller (davem@davemloft.net)
5 */
6
7#include <asm/asi.h>
8#include <asm/pgtable.h>
9#include <asm/page.h>
10#include <asm/spitfire.h>
11#include <asm/mmu_context.h>
12#include <asm/mmu.h>
13#include <asm/pil.h>
14#include <asm/head.h>
15#include <asm/thread_info.h>
16#include <asm/cacheflush.h>
17#include <asm/hypervisor.h>
18#include <asm/cpudata.h>
19
20 /* Basically, most of the Spitfire vs. Cheetah madness
21 * has to do with the fact that Cheetah does not support
22 * IMMU flushes out of the secondary context. Someone needs
23 * to throw a south lake birthday party for the folks
24 * in Microelectronics who refused to fix this shit.
25 */
26
27 /* This file is meant to be read efficiently by the CPU, not humans.
28 * Staraj sie tego nikomu nie pierdolnac...
29 */
30 .text
31 .align 32
32 .globl __flush_tlb_mm
33__flush_tlb_mm: /* 18 insns */
34 /* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */
35 ldxa [%o1] ASI_DMMU, %g2
36 cmp %g2, %o0
37 bne,pn %icc, __spitfire_flush_tlb_mm_slow
38 mov 0x50, %g3
39 stxa %g0, [%g3] ASI_DMMU_DEMAP
40 stxa %g0, [%g3] ASI_IMMU_DEMAP
41 sethi %hi(KERNBASE), %g3
42 flush %g3
43 retl
44 nop
45 nop
46 nop
47 nop
48 nop
49 nop
50 nop
51 nop
52 nop
53 nop
54
55 .align 32
56 .globl __flush_tlb_page
57__flush_tlb_page: /* 22 insns */
58 /* %o0 = context, %o1 = vaddr */
59 rdpr %pstate, %g7
60 andn %g7, PSTATE_IE, %g2
61 wrpr %g2, %pstate
62 mov SECONDARY_CONTEXT, %o4
63 ldxa [%o4] ASI_DMMU, %g2
64 stxa %o0, [%o4] ASI_DMMU
65 andcc %o1, 1, %g0
66 andn %o1, 1, %o3
67 be,pn %icc, 1f
68 or %o3, 0x10, %o3
69 stxa %g0, [%o3] ASI_IMMU_DEMAP
701: stxa %g0, [%o3] ASI_DMMU_DEMAP
71 membar #Sync
72 stxa %g2, [%o4] ASI_DMMU
73 sethi %hi(KERNBASE), %o4
74 flush %o4
75 retl
76 wrpr %g7, 0x0, %pstate
77 nop
78 nop
79 nop
80 nop
81
82 .align 32
83 .globl __flush_tlb_pending
84__flush_tlb_pending: /* 26 insns */
85 /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
86 rdpr %pstate, %g7
87 sllx %o1, 3, %o1
88 andn %g7, PSTATE_IE, %g2
89 wrpr %g2, %pstate
90 mov SECONDARY_CONTEXT, %o4
91 ldxa [%o4] ASI_DMMU, %g2
92 stxa %o0, [%o4] ASI_DMMU
931: sub %o1, (1 << 3), %o1
94 ldx [%o2 + %o1], %o3
95 andcc %o3, 1, %g0
96 andn %o3, 1, %o3
97 be,pn %icc, 2f
98 or %o3, 0x10, %o3
99 stxa %g0, [%o3] ASI_IMMU_DEMAP
1002: stxa %g0, [%o3] ASI_DMMU_DEMAP
101 membar #Sync
102 brnz,pt %o1, 1b
103 nop
104 stxa %g2, [%o4] ASI_DMMU
105 sethi %hi(KERNBASE), %o4
106 flush %o4
107 retl
108 wrpr %g7, 0x0, %pstate
109 nop
110 nop
111 nop
112 nop
113
114 .align 32
115 .globl __flush_tlb_kernel_range
116__flush_tlb_kernel_range: /* 16 insns */
117 /* %o0=start, %o1=end */
118 cmp %o0, %o1
119 be,pn %xcc, 2f
120 sethi %hi(PAGE_SIZE), %o4
121 sub %o1, %o0, %o3
122 sub %o3, %o4, %o3
123 or %o0, 0x20, %o0 ! Nucleus
1241: stxa %g0, [%o0 + %o3] ASI_DMMU_DEMAP
125 stxa %g0, [%o0 + %o3] ASI_IMMU_DEMAP
126 membar #Sync
127 brnz,pt %o3, 1b
128 sub %o3, %o4, %o3
1292: sethi %hi(KERNBASE), %o3
130 flush %o3
131 retl
132 nop
133 nop
134
135__spitfire_flush_tlb_mm_slow:
136 rdpr %pstate, %g1
137 wrpr %g1, PSTATE_IE, %pstate
138 stxa %o0, [%o1] ASI_DMMU
139 stxa %g0, [%g3] ASI_DMMU_DEMAP
140 stxa %g0, [%g3] ASI_IMMU_DEMAP
141 flush %g6
142 stxa %g2, [%o1] ASI_DMMU
143 sethi %hi(KERNBASE), %o1
144 flush %o1
145 retl
146 wrpr %g1, 0, %pstate
147
148/*
149 * The following code flushes one page_size worth.
150 */
151 .section .kprobes.text, "ax"
152 .align 32
153 .globl __flush_icache_page
154__flush_icache_page: /* %o0 = phys_page */
155 srlx %o0, PAGE_SHIFT, %o0
156 sethi %hi(PAGE_OFFSET), %g1
157 sllx %o0, PAGE_SHIFT, %o0
158 sethi %hi(PAGE_SIZE), %g2
159 ldx [%g1 + %lo(PAGE_OFFSET)], %g1
160 add %o0, %g1, %o0
1611: subcc %g2, 32, %g2
162 bne,pt %icc, 1b
163 flush %o0 + %g2
164 retl
165 nop
166
167#ifdef DCACHE_ALIASING_POSSIBLE
168
169#if (PAGE_SHIFT != 13)
170#error only page shift of 13 is supported by dcache flush
171#endif
172
173#define DTAG_MASK 0x3
174
175 /* This routine is Spitfire specific so the hardcoded
176 * D-cache size and line-size are OK.
177 */
178 .align 64
179 .globl __flush_dcache_page
180__flush_dcache_page: /* %o0=kaddr, %o1=flush_icache */
181 sethi %hi(PAGE_OFFSET), %g1
182 ldx [%g1 + %lo(PAGE_OFFSET)], %g1
183 sub %o0, %g1, %o0 ! physical address
184 srlx %o0, 11, %o0 ! make D-cache TAG
185 sethi %hi(1 << 14), %o2 ! D-cache size
186 sub %o2, (1 << 5), %o2 ! D-cache line size
1871: ldxa [%o2] ASI_DCACHE_TAG, %o3 ! load D-cache TAG
188 andcc %o3, DTAG_MASK, %g0 ! Valid?
189 be,pn %xcc, 2f ! Nope, branch
190 andn %o3, DTAG_MASK, %o3 ! Clear valid bits
191 cmp %o3, %o0 ! TAG match?
192 bne,pt %xcc, 2f ! Nope, branch
193 nop
194 stxa %g0, [%o2] ASI_DCACHE_TAG ! Invalidate TAG
195 membar #Sync
1962: brnz,pt %o2, 1b
197 sub %o2, (1 << 5), %o2 ! D-cache line size
198
199 /* The I-cache does not snoop local stores so we
200 * better flush that too when necessary.
201 */
202 brnz,pt %o1, __flush_icache_page
203 sllx %o0, 11, %o0
204 retl
205 nop
206
207#endif /* DCACHE_ALIASING_POSSIBLE */
208
209 .previous
210
211 /* Cheetah specific versions, patched at boot time. */
212__cheetah_flush_tlb_mm: /* 19 insns */
213 rdpr %pstate, %g7
214 andn %g7, PSTATE_IE, %g2
215 wrpr %g2, 0x0, %pstate
216 wrpr %g0, 1, %tl
217 mov PRIMARY_CONTEXT, %o2
218 mov 0x40, %g3
219 ldxa [%o2] ASI_DMMU, %g2
220 srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o1
221 sllx %o1, CTX_PGSZ1_NUC_SHIFT, %o1
222 or %o0, %o1, %o0 /* Preserve nucleus page size fields */
223 stxa %o0, [%o2] ASI_DMMU
224 stxa %g0, [%g3] ASI_DMMU_DEMAP
225 stxa %g0, [%g3] ASI_IMMU_DEMAP
226 stxa %g2, [%o2] ASI_DMMU
227 sethi %hi(KERNBASE), %o2
228 flush %o2
229 wrpr %g0, 0, %tl
230 retl
231 wrpr %g7, 0x0, %pstate
232
233__cheetah_flush_tlb_page: /* 22 insns */
234 /* %o0 = context, %o1 = vaddr */
235 rdpr %pstate, %g7
236 andn %g7, PSTATE_IE, %g2
237 wrpr %g2, 0x0, %pstate
238 wrpr %g0, 1, %tl
239 mov PRIMARY_CONTEXT, %o4
240 ldxa [%o4] ASI_DMMU, %g2
241 srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o3
242 sllx %o3, CTX_PGSZ1_NUC_SHIFT, %o3
243 or %o0, %o3, %o0 /* Preserve nucleus page size fields */
244 stxa %o0, [%o4] ASI_DMMU
245 andcc %o1, 1, %g0
246 be,pn %icc, 1f
247 andn %o1, 1, %o3
248 stxa %g0, [%o3] ASI_IMMU_DEMAP
2491: stxa %g0, [%o3] ASI_DMMU_DEMAP
250 membar #Sync
251 stxa %g2, [%o4] ASI_DMMU
252 sethi %hi(KERNBASE), %o4
253 flush %o4
254 wrpr %g0, 0, %tl
255 retl
256 wrpr %g7, 0x0, %pstate
257
258__cheetah_flush_tlb_pending: /* 27 insns */
259 /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
260 rdpr %pstate, %g7
261 sllx %o1, 3, %o1
262 andn %g7, PSTATE_IE, %g2
263 wrpr %g2, 0x0, %pstate
264 wrpr %g0, 1, %tl
265 mov PRIMARY_CONTEXT, %o4
266 ldxa [%o4] ASI_DMMU, %g2
267 srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o3
268 sllx %o3, CTX_PGSZ1_NUC_SHIFT, %o3
269 or %o0, %o3, %o0 /* Preserve nucleus page size fields */
270 stxa %o0, [%o4] ASI_DMMU
2711: sub %o1, (1 << 3), %o1
272 ldx [%o2 + %o1], %o3
273 andcc %o3, 1, %g0
274 be,pn %icc, 2f
275 andn %o3, 1, %o3
276 stxa %g0, [%o3] ASI_IMMU_DEMAP
2772: stxa %g0, [%o3] ASI_DMMU_DEMAP
278 membar #Sync
279 brnz,pt %o1, 1b
280 nop
281 stxa %g2, [%o4] ASI_DMMU
282 sethi %hi(KERNBASE), %o4
283 flush %o4
284 wrpr %g0, 0, %tl
285 retl
286 wrpr %g7, 0x0, %pstate
287
288#ifdef DCACHE_ALIASING_POSSIBLE
289__cheetah_flush_dcache_page: /* 11 insns */
290 sethi %hi(PAGE_OFFSET), %g1
291 ldx [%g1 + %lo(PAGE_OFFSET)], %g1
292 sub %o0, %g1, %o0
293 sethi %hi(PAGE_SIZE), %o4
2941: subcc %o4, (1 << 5), %o4
295 stxa %g0, [%o0 + %o4] ASI_DCACHE_INVALIDATE
296 membar #Sync
297 bne,pt %icc, 1b
298 nop
299 retl /* I-cache flush never needed on Cheetah, see callers. */
300 nop
301#endif /* DCACHE_ALIASING_POSSIBLE */
302
303 /* Hypervisor specific versions, patched at boot time. */
304__hypervisor_tlb_tl0_error:
305 save %sp, -192, %sp
306 mov %i0, %o0
307 call hypervisor_tlbop_error
308 mov %i1, %o1
309 ret
310 restore
311
312__hypervisor_flush_tlb_mm: /* 10 insns */
313 mov %o0, %o2 /* ARG2: mmu context */
314 mov 0, %o0 /* ARG0: CPU lists unimplemented */
315 mov 0, %o1 /* ARG1: CPU lists unimplemented */
316 mov HV_MMU_ALL, %o3 /* ARG3: flags */
317 mov HV_FAST_MMU_DEMAP_CTX, %o5
318 ta HV_FAST_TRAP
319 brnz,pn %o0, __hypervisor_tlb_tl0_error
320 mov HV_FAST_MMU_DEMAP_CTX, %o1
321 retl
322 nop
323
324__hypervisor_flush_tlb_page: /* 11 insns */
325 /* %o0 = context, %o1 = vaddr */
326 mov %o0, %g2
327 mov %o1, %o0 /* ARG0: vaddr + IMMU-bit */
328 mov %g2, %o1 /* ARG1: mmu context */
329 mov HV_MMU_ALL, %o2 /* ARG2: flags */
330 srlx %o0, PAGE_SHIFT, %o0
331 sllx %o0, PAGE_SHIFT, %o0
332 ta HV_MMU_UNMAP_ADDR_TRAP
333 brnz,pn %o0, __hypervisor_tlb_tl0_error
334 mov HV_MMU_UNMAP_ADDR_TRAP, %o1
335 retl
336 nop
337
338__hypervisor_flush_tlb_pending: /* 16 insns */
339 /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
340 sllx %o1, 3, %g1
341 mov %o2, %g2
342 mov %o0, %g3
3431: sub %g1, (1 << 3), %g1
344 ldx [%g2 + %g1], %o0 /* ARG0: vaddr + IMMU-bit */
345 mov %g3, %o1 /* ARG1: mmu context */
346 mov HV_MMU_ALL, %o2 /* ARG2: flags */
347 srlx %o0, PAGE_SHIFT, %o0
348 sllx %o0, PAGE_SHIFT, %o0
349 ta HV_MMU_UNMAP_ADDR_TRAP
350 brnz,pn %o0, __hypervisor_tlb_tl0_error
351 mov HV_MMU_UNMAP_ADDR_TRAP, %o1
352 brnz,pt %g1, 1b
353 nop
354 retl
355 nop
356
357__hypervisor_flush_tlb_kernel_range: /* 16 insns */
358 /* %o0=start, %o1=end */
359 cmp %o0, %o1
360 be,pn %xcc, 2f
361 sethi %hi(PAGE_SIZE), %g3
362 mov %o0, %g1
363 sub %o1, %g1, %g2
364 sub %g2, %g3, %g2
3651: add %g1, %g2, %o0 /* ARG0: virtual address */
366 mov 0, %o1 /* ARG1: mmu context */
367 mov HV_MMU_ALL, %o2 /* ARG2: flags */
368 ta HV_MMU_UNMAP_ADDR_TRAP
369 brnz,pn %o0, __hypervisor_tlb_tl0_error
370 mov HV_MMU_UNMAP_ADDR_TRAP, %o1
371 brnz,pt %g2, 1b
372 sub %g2, %g3, %g2
3732: retl
374 nop
375
376#ifdef DCACHE_ALIASING_POSSIBLE
377 /* XXX Niagara and friends have an 8K cache, so no aliasing is
378 * XXX possible, but nothing explicit in the Hypervisor API
379 * XXX guarantees this.
380 */
381__hypervisor_flush_dcache_page: /* 2 insns */
382 retl
383 nop
384#endif
385
386tlb_patch_one:
3871: lduw [%o1], %g1
388 stw %g1, [%o0]
389 flush %o0
390 subcc %o2, 1, %o2
391 add %o1, 4, %o1
392 bne,pt %icc, 1b
393 add %o0, 4, %o0
394 retl
395 nop
396
397 .globl cheetah_patch_cachetlbops
398cheetah_patch_cachetlbops:
399 save %sp, -128, %sp
400
401 sethi %hi(__flush_tlb_mm), %o0
402 or %o0, %lo(__flush_tlb_mm), %o0
403 sethi %hi(__cheetah_flush_tlb_mm), %o1
404 or %o1, %lo(__cheetah_flush_tlb_mm), %o1
405 call tlb_patch_one
406 mov 19, %o2
407
408 sethi %hi(__flush_tlb_page), %o0
409 or %o0, %lo(__flush_tlb_page), %o0
410 sethi %hi(__cheetah_flush_tlb_page), %o1
411 or %o1, %lo(__cheetah_flush_tlb_page), %o1
412 call tlb_patch_one
413 mov 22, %o2
414
415 sethi %hi(__flush_tlb_pending), %o0
416 or %o0, %lo(__flush_tlb_pending), %o0
417 sethi %hi(__cheetah_flush_tlb_pending), %o1
418 or %o1, %lo(__cheetah_flush_tlb_pending), %o1
419 call tlb_patch_one
420 mov 27, %o2
421
422#ifdef DCACHE_ALIASING_POSSIBLE
423 sethi %hi(__flush_dcache_page), %o0
424 or %o0, %lo(__flush_dcache_page), %o0
425 sethi %hi(__cheetah_flush_dcache_page), %o1
426 or %o1, %lo(__cheetah_flush_dcache_page), %o1
427 call tlb_patch_one
428 mov 11, %o2
429#endif /* DCACHE_ALIASING_POSSIBLE */
430
431 ret
432 restore
433
434#ifdef CONFIG_SMP
435 /* These are all called by the slaves of a cross call, at
436 * trap level 1, with interrupts fully disabled.
437 *
438 * Register usage:
439 * %g5 mm->context (all tlb flushes)
440 * %g1 address arg 1 (tlb page and range flushes)
441 * %g7 address arg 2 (tlb range flush only)
442 *
443 * %g6 scratch 1
444 * %g2 scratch 2
445 * %g3 scratch 3
446 * %g4 scratch 4
447 */
448 .align 32
449 .globl xcall_flush_tlb_mm
450xcall_flush_tlb_mm: /* 21 insns */
451 mov PRIMARY_CONTEXT, %g2
452 ldxa [%g2] ASI_DMMU, %g3
453 srlx %g3, CTX_PGSZ1_NUC_SHIFT, %g4
454 sllx %g4, CTX_PGSZ1_NUC_SHIFT, %g4
455 or %g5, %g4, %g5 /* Preserve nucleus page size fields */
456 stxa %g5, [%g2] ASI_DMMU
457 mov 0x40, %g4
458 stxa %g0, [%g4] ASI_DMMU_DEMAP
459 stxa %g0, [%g4] ASI_IMMU_DEMAP
460 stxa %g3, [%g2] ASI_DMMU
461 retry
462 nop
463 nop
464 nop
465 nop
466 nop
467 nop
468 nop
469 nop
470 nop
471 nop
472
473 .globl xcall_flush_tlb_page
474xcall_flush_tlb_page: /* 17 insns */
475 /* %g5=context, %g1=vaddr */
476 mov PRIMARY_CONTEXT, %g4
477 ldxa [%g4] ASI_DMMU, %g2
478 srlx %g2, CTX_PGSZ1_NUC_SHIFT, %g4
479 sllx %g4, CTX_PGSZ1_NUC_SHIFT, %g4
480 or %g5, %g4, %g5
481 mov PRIMARY_CONTEXT, %g4
482 stxa %g5, [%g4] ASI_DMMU
483 andcc %g1, 0x1, %g0
484 be,pn %icc, 2f
485 andn %g1, 0x1, %g5
486 stxa %g0, [%g5] ASI_IMMU_DEMAP
4872: stxa %g0, [%g5] ASI_DMMU_DEMAP
488 membar #Sync
489 stxa %g2, [%g4] ASI_DMMU
490 retry
491 nop
492 nop
493
494 .globl xcall_flush_tlb_kernel_range
495xcall_flush_tlb_kernel_range: /* 25 insns */
496 sethi %hi(PAGE_SIZE - 1), %g2
497 or %g2, %lo(PAGE_SIZE - 1), %g2
498 andn %g1, %g2, %g1
499 andn %g7, %g2, %g7
500 sub %g7, %g1, %g3
501 add %g2, 1, %g2
502 sub %g3, %g2, %g3
503 or %g1, 0x20, %g1 ! Nucleus
5041: stxa %g0, [%g1 + %g3] ASI_DMMU_DEMAP
505 stxa %g0, [%g1 + %g3] ASI_IMMU_DEMAP
506 membar #Sync
507 brnz,pt %g3, 1b
508 sub %g3, %g2, %g3
509 retry
510 nop
511 nop
512 nop
513 nop
514 nop
515 nop
516 nop
517 nop
518 nop
519 nop
520 nop
521
522 /* This runs in a very controlled environment, so we do
523 * not need to worry about BH races etc.
524 */
525 .globl xcall_sync_tick
526xcall_sync_tick:
527
528661: rdpr %pstate, %g2
529 wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate
530 .section .sun4v_2insn_patch, "ax"
531 .word 661b
532 nop
533 nop
534 .previous
535
536 rdpr %pil, %g2
537 wrpr %g0, PIL_NORMAL_MAX, %pil
538 sethi %hi(109f), %g7
539 b,pt %xcc, etrap_irq
540109: or %g7, %lo(109b), %g7
541#ifdef CONFIG_TRACE_IRQFLAGS
542 call trace_hardirqs_off
543 nop
544#endif
545 call smp_synchronize_tick_client
546 nop
547 b rtrap_xcall
548 ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
549
550 .globl xcall_fetch_glob_regs
551xcall_fetch_glob_regs:
552 sethi %hi(global_cpu_snapshot), %g1
553 or %g1, %lo(global_cpu_snapshot), %g1
554 __GET_CPUID(%g2)
555 sllx %g2, 6, %g3
556 add %g1, %g3, %g1
557 rdpr %tstate, %g7
558 stx %g7, [%g1 + GR_SNAP_TSTATE]
559 rdpr %tpc, %g7
560 stx %g7, [%g1 + GR_SNAP_TPC]
561 rdpr %tnpc, %g7
562 stx %g7, [%g1 + GR_SNAP_TNPC]
563 stx %o7, [%g1 + GR_SNAP_O7]
564 stx %i7, [%g1 + GR_SNAP_I7]
565 /* Don't try this at home kids... */
566 rdpr %cwp, %g3
567 sub %g3, 1, %g7
568 wrpr %g7, %cwp
569 mov %i7, %g7
570 wrpr %g3, %cwp
571 stx %g7, [%g1 + GR_SNAP_RPC]
572 sethi %hi(trap_block), %g7
573 or %g7, %lo(trap_block), %g7
574 sllx %g2, TRAP_BLOCK_SZ_SHIFT, %g2
575 add %g7, %g2, %g7
576 ldx [%g7 + TRAP_PER_CPU_THREAD], %g3
577 stx %g3, [%g1 + GR_SNAP_THREAD]
578 retry
579
580 .globl xcall_fetch_glob_pmu
581xcall_fetch_glob_pmu:
582 sethi %hi(global_cpu_snapshot), %g1
583 or %g1, %lo(global_cpu_snapshot), %g1
584 __GET_CPUID(%g2)
585 sllx %g2, 6, %g3
586 add %g1, %g3, %g1
587 rd %pic, %g7
588 stx %g7, [%g1 + (4 * 8)]
589 rd %pcr, %g7
590 stx %g7, [%g1 + (0 * 8)]
591 retry
592
593 .globl xcall_fetch_glob_pmu_n4
594xcall_fetch_glob_pmu_n4:
595 sethi %hi(global_cpu_snapshot), %g1
596 or %g1, %lo(global_cpu_snapshot), %g1
597 __GET_CPUID(%g2)
598 sllx %g2, 6, %g3
599 add %g1, %g3, %g1
600
601 ldxa [%g0] ASI_PIC, %g7
602 stx %g7, [%g1 + (4 * 8)]
603 mov 0x08, %g3
604 ldxa [%g3] ASI_PIC, %g7
605 stx %g7, [%g1 + (5 * 8)]
606 mov 0x10, %g3
607 ldxa [%g3] ASI_PIC, %g7
608 stx %g7, [%g1 + (6 * 8)]
609 mov 0x18, %g3
610 ldxa [%g3] ASI_PIC, %g7
611 stx %g7, [%g1 + (7 * 8)]
612
613 mov %o0, %g2
614 mov %o1, %g3
615 mov %o5, %g7
616
617 mov HV_FAST_VT_GET_PERFREG, %o5
618 mov 3, %o0
619 ta HV_FAST_TRAP
620 stx %o1, [%g1 + (3 * 8)]
621 mov HV_FAST_VT_GET_PERFREG, %o5
622 mov 2, %o0
623 ta HV_FAST_TRAP
624 stx %o1, [%g1 + (2 * 8)]
625 mov HV_FAST_VT_GET_PERFREG, %o5
626 mov 1, %o0
627 ta HV_FAST_TRAP
628 stx %o1, [%g1 + (1 * 8)]
629 mov HV_FAST_VT_GET_PERFREG, %o5
630 mov 0, %o0
631 ta HV_FAST_TRAP
632 stx %o1, [%g1 + (0 * 8)]
633
634 mov %g2, %o0
635 mov %g3, %o1
636 mov %g7, %o5
637
638 retry
639
640#ifdef DCACHE_ALIASING_POSSIBLE
641 .align 32
642 .globl xcall_flush_dcache_page_cheetah
643xcall_flush_dcache_page_cheetah: /* %g1 == physical page address */
644 sethi %hi(PAGE_SIZE), %g3
6451: subcc %g3, (1 << 5), %g3
646 stxa %g0, [%g1 + %g3] ASI_DCACHE_INVALIDATE
647 membar #Sync
648 bne,pt %icc, 1b
649 nop
650 retry
651 nop
652#endif /* DCACHE_ALIASING_POSSIBLE */
653
654 .globl xcall_flush_dcache_page_spitfire
655xcall_flush_dcache_page_spitfire: /* %g1 == physical page address
656 %g7 == kernel page virtual address
657 %g5 == (page->mapping != NULL) */
658#ifdef DCACHE_ALIASING_POSSIBLE
659 srlx %g1, (13 - 2), %g1 ! Form tag comparitor
660 sethi %hi(L1DCACHE_SIZE), %g3 ! D$ size == 16K
661 sub %g3, (1 << 5), %g3 ! D$ linesize == 32
6621: ldxa [%g3] ASI_DCACHE_TAG, %g2
663 andcc %g2, 0x3, %g0
664 be,pn %xcc, 2f
665 andn %g2, 0x3, %g2
666 cmp %g2, %g1
667
668 bne,pt %xcc, 2f
669 nop
670 stxa %g0, [%g3] ASI_DCACHE_TAG
671 membar #Sync
6722: cmp %g3, 0
673 bne,pt %xcc, 1b
674 sub %g3, (1 << 5), %g3
675
676 brz,pn %g5, 2f
677#endif /* DCACHE_ALIASING_POSSIBLE */
678 sethi %hi(PAGE_SIZE), %g3
679
6801: flush %g7
681 subcc %g3, (1 << 5), %g3
682 bne,pt %icc, 1b
683 add %g7, (1 << 5), %g7
684
6852: retry
686 nop
687 nop
688
689 /* %g5: error
690 * %g6: tlb op
691 */
692__hypervisor_tlb_xcall_error:
693 mov %g5, %g4
694 mov %g6, %g5
695 ba,pt %xcc, etrap
696 rd %pc, %g7
697 mov %l4, %o0
698 call hypervisor_tlbop_error_xcall
699 mov %l5, %o1
700 ba,a,pt %xcc, rtrap
701
702 .globl __hypervisor_xcall_flush_tlb_mm
703__hypervisor_xcall_flush_tlb_mm: /* 21 insns */
704 /* %g5=ctx, g1,g2,g3,g4,g7=scratch, %g6=unusable */
705 mov %o0, %g2
706 mov %o1, %g3
707 mov %o2, %g4
708 mov %o3, %g1
709 mov %o5, %g7
710 clr %o0 /* ARG0: CPU lists unimplemented */
711 clr %o1 /* ARG1: CPU lists unimplemented */
712 mov %g5, %o2 /* ARG2: mmu context */
713 mov HV_MMU_ALL, %o3 /* ARG3: flags */
714 mov HV_FAST_MMU_DEMAP_CTX, %o5
715 ta HV_FAST_TRAP
716 mov HV_FAST_MMU_DEMAP_CTX, %g6
717 brnz,pn %o0, __hypervisor_tlb_xcall_error
718 mov %o0, %g5
719 mov %g2, %o0
720 mov %g3, %o1
721 mov %g4, %o2
722 mov %g1, %o3
723 mov %g7, %o5
724 membar #Sync
725 retry
726
727 .globl __hypervisor_xcall_flush_tlb_page
728__hypervisor_xcall_flush_tlb_page: /* 17 insns */
729 /* %g5=ctx, %g1=vaddr */
730 mov %o0, %g2
731 mov %o1, %g3
732 mov %o2, %g4
733 mov %g1, %o0 /* ARG0: virtual address */
734 mov %g5, %o1 /* ARG1: mmu context */
735 mov HV_MMU_ALL, %o2 /* ARG2: flags */
736 srlx %o0, PAGE_SHIFT, %o0
737 sllx %o0, PAGE_SHIFT, %o0
738 ta HV_MMU_UNMAP_ADDR_TRAP
739 mov HV_MMU_UNMAP_ADDR_TRAP, %g6
740 brnz,a,pn %o0, __hypervisor_tlb_xcall_error
741 mov %o0, %g5
742 mov %g2, %o0
743 mov %g3, %o1
744 mov %g4, %o2
745 membar #Sync
746 retry
747
748 .globl __hypervisor_xcall_flush_tlb_kernel_range
749__hypervisor_xcall_flush_tlb_kernel_range: /* 25 insns */
750 /* %g1=start, %g7=end, g2,g3,g4,g5,g6=scratch */
751 sethi %hi(PAGE_SIZE - 1), %g2
752 or %g2, %lo(PAGE_SIZE - 1), %g2
753 andn %g1, %g2, %g1
754 andn %g7, %g2, %g7
755 sub %g7, %g1, %g3
756 add %g2, 1, %g2
757 sub %g3, %g2, %g3
758 mov %o0, %g2
759 mov %o1, %g4
760 mov %o2, %g7
7611: add %g1, %g3, %o0 /* ARG0: virtual address */
762 mov 0, %o1 /* ARG1: mmu context */
763 mov HV_MMU_ALL, %o2 /* ARG2: flags */
764 ta HV_MMU_UNMAP_ADDR_TRAP
765 mov HV_MMU_UNMAP_ADDR_TRAP, %g6
766 brnz,pn %o0, __hypervisor_tlb_xcall_error
767 mov %o0, %g5
768 sethi %hi(PAGE_SIZE), %o2
769 brnz,pt %g3, 1b
770 sub %g3, %o2, %g3
771 mov %g2, %o0
772 mov %g4, %o1
773 mov %g7, %o2
774 membar #Sync
775 retry
776
777 /* These just get rescheduled to PIL vectors. */
778 .globl xcall_call_function
779xcall_call_function:
780 wr %g0, (1 << PIL_SMP_CALL_FUNC), %set_softint
781 retry
782
783 .globl xcall_call_function_single
784xcall_call_function_single:
785 wr %g0, (1 << PIL_SMP_CALL_FUNC_SNGL), %set_softint
786 retry
787
788 .globl xcall_receive_signal
789xcall_receive_signal:
790 wr %g0, (1 << PIL_SMP_RECEIVE_SIGNAL), %set_softint
791 retry
792
793 .globl xcall_capture
794xcall_capture:
795 wr %g0, (1 << PIL_SMP_CAPTURE), %set_softint
796 retry
797
798 .globl xcall_new_mmu_context_version
799xcall_new_mmu_context_version:
800 wr %g0, (1 << PIL_SMP_CTX_NEW_VERSION), %set_softint
801 retry
802
803#ifdef CONFIG_KGDB
804 .globl xcall_kgdb_capture
805xcall_kgdb_capture:
806 wr %g0, (1 << PIL_KGDB_CAPTURE), %set_softint
807 retry
808#endif
809
810#endif /* CONFIG_SMP */
811
812
813 .globl hypervisor_patch_cachetlbops
814hypervisor_patch_cachetlbops:
815 save %sp, -128, %sp
816
817 sethi %hi(__flush_tlb_mm), %o0
818 or %o0, %lo(__flush_tlb_mm), %o0
819 sethi %hi(__hypervisor_flush_tlb_mm), %o1
820 or %o1, %lo(__hypervisor_flush_tlb_mm), %o1
821 call tlb_patch_one
822 mov 10, %o2
823
824 sethi %hi(__flush_tlb_page), %o0
825 or %o0, %lo(__flush_tlb_page), %o0
826 sethi %hi(__hypervisor_flush_tlb_page), %o1
827 or %o1, %lo(__hypervisor_flush_tlb_page), %o1
828 call tlb_patch_one
829 mov 11, %o2
830
831 sethi %hi(__flush_tlb_pending), %o0
832 or %o0, %lo(__flush_tlb_pending), %o0
833 sethi %hi(__hypervisor_flush_tlb_pending), %o1
834 or %o1, %lo(__hypervisor_flush_tlb_pending), %o1
835 call tlb_patch_one
836 mov 16, %o2
837
838 sethi %hi(__flush_tlb_kernel_range), %o0
839 or %o0, %lo(__flush_tlb_kernel_range), %o0
840 sethi %hi(__hypervisor_flush_tlb_kernel_range), %o1
841 or %o1, %lo(__hypervisor_flush_tlb_kernel_range), %o1
842 call tlb_patch_one
843 mov 16, %o2
844
845#ifdef DCACHE_ALIASING_POSSIBLE
846 sethi %hi(__flush_dcache_page), %o0
847 or %o0, %lo(__flush_dcache_page), %o0
848 sethi %hi(__hypervisor_flush_dcache_page), %o1
849 or %o1, %lo(__hypervisor_flush_dcache_page), %o1
850 call tlb_patch_one
851 mov 2, %o2
852#endif /* DCACHE_ALIASING_POSSIBLE */
853
854#ifdef CONFIG_SMP
855 sethi %hi(xcall_flush_tlb_mm), %o0
856 or %o0, %lo(xcall_flush_tlb_mm), %o0
857 sethi %hi(__hypervisor_xcall_flush_tlb_mm), %o1
858 or %o1, %lo(__hypervisor_xcall_flush_tlb_mm), %o1
859 call tlb_patch_one
860 mov 21, %o2
861
862 sethi %hi(xcall_flush_tlb_page), %o0
863 or %o0, %lo(xcall_flush_tlb_page), %o0
864 sethi %hi(__hypervisor_xcall_flush_tlb_page), %o1
865 or %o1, %lo(__hypervisor_xcall_flush_tlb_page), %o1
866 call tlb_patch_one
867 mov 17, %o2
868
869 sethi %hi(xcall_flush_tlb_kernel_range), %o0
870 or %o0, %lo(xcall_flush_tlb_kernel_range), %o0
871 sethi %hi(__hypervisor_xcall_flush_tlb_kernel_range), %o1
872 or %o1, %lo(__hypervisor_xcall_flush_tlb_kernel_range), %o1
873 call tlb_patch_one
874 mov 25, %o2
875#endif /* CONFIG_SMP */
876
877 ret
878 restore