Loading...
1/*
2 * ultra.S: Don't expand these all over the place...
3 *
4 * Copyright (C) 1997, 2000, 2008 David S. Miller (davem@davemloft.net)
5 */
6
7#include <asm/asi.h>
8#include <asm/pgtable.h>
9#include <asm/page.h>
10#include <asm/spitfire.h>
11#include <asm/mmu_context.h>
12#include <asm/mmu.h>
13#include <asm/pil.h>
14#include <asm/head.h>
15#include <asm/thread_info.h>
16#include <asm/cacheflush.h>
17#include <asm/hypervisor.h>
18#include <asm/cpudata.h>
19
20 /* Basically, most of the Spitfire vs. Cheetah madness
21 * has to do with the fact that Cheetah does not support
22 * IMMU flushes out of the secondary context. Someone needs
23 * to throw a south lake birthday party for the folks
24 * in Microelectronics who refused to fix this shit.
25 */
26
27 /* This file is meant to be read efficiently by the CPU, not humans.
28 * Staraj sie tego nikomu nie pierdolnac...
29 */
30 .text
31 .align 32
32 .globl __flush_tlb_mm
33__flush_tlb_mm: /* 18 insns */
34 /* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */
35 ldxa [%o1] ASI_DMMU, %g2
36 cmp %g2, %o0
37 bne,pn %icc, __spitfire_flush_tlb_mm_slow
38 mov 0x50, %g3
39 stxa %g0, [%g3] ASI_DMMU_DEMAP
40 stxa %g0, [%g3] ASI_IMMU_DEMAP
41 sethi %hi(KERNBASE), %g3
42 flush %g3
43 retl
44 nop
45 nop
46 nop
47 nop
48 nop
49 nop
50 nop
51 nop
52 nop
53 nop
54
55 .align 32
56 .globl __flush_tlb_pending
57__flush_tlb_pending: /* 26 insns */
58 /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
59 rdpr %pstate, %g7
60 sllx %o1, 3, %o1
61 andn %g7, PSTATE_IE, %g2
62 wrpr %g2, %pstate
63 mov SECONDARY_CONTEXT, %o4
64 ldxa [%o4] ASI_DMMU, %g2
65 stxa %o0, [%o4] ASI_DMMU
661: sub %o1, (1 << 3), %o1
67 ldx [%o2 + %o1], %o3
68 andcc %o3, 1, %g0
69 andn %o3, 1, %o3
70 be,pn %icc, 2f
71 or %o3, 0x10, %o3
72 stxa %g0, [%o3] ASI_IMMU_DEMAP
732: stxa %g0, [%o3] ASI_DMMU_DEMAP
74 membar #Sync
75 brnz,pt %o1, 1b
76 nop
77 stxa %g2, [%o4] ASI_DMMU
78 sethi %hi(KERNBASE), %o4
79 flush %o4
80 retl
81 wrpr %g7, 0x0, %pstate
82 nop
83 nop
84 nop
85 nop
86
87 .align 32
88 .globl __flush_tlb_kernel_range
89__flush_tlb_kernel_range: /* 16 insns */
90 /* %o0=start, %o1=end */
91 cmp %o0, %o1
92 be,pn %xcc, 2f
93 sethi %hi(PAGE_SIZE), %o4
94 sub %o1, %o0, %o3
95 sub %o3, %o4, %o3
96 or %o0, 0x20, %o0 ! Nucleus
971: stxa %g0, [%o0 + %o3] ASI_DMMU_DEMAP
98 stxa %g0, [%o0 + %o3] ASI_IMMU_DEMAP
99 membar #Sync
100 brnz,pt %o3, 1b
101 sub %o3, %o4, %o3
1022: sethi %hi(KERNBASE), %o3
103 flush %o3
104 retl
105 nop
106 nop
107
108__spitfire_flush_tlb_mm_slow:
109 rdpr %pstate, %g1
110 wrpr %g1, PSTATE_IE, %pstate
111 stxa %o0, [%o1] ASI_DMMU
112 stxa %g0, [%g3] ASI_DMMU_DEMAP
113 stxa %g0, [%g3] ASI_IMMU_DEMAP
114 flush %g6
115 stxa %g2, [%o1] ASI_DMMU
116 sethi %hi(KERNBASE), %o1
117 flush %o1
118 retl
119 wrpr %g1, 0, %pstate
120
121/*
122 * The following code flushes one page_size worth.
123 */
124 .section .kprobes.text, "ax"
125 .align 32
126 .globl __flush_icache_page
127__flush_icache_page: /* %o0 = phys_page */
128 srlx %o0, PAGE_SHIFT, %o0
129 sethi %uhi(PAGE_OFFSET), %g1
130 sllx %o0, PAGE_SHIFT, %o0
131 sethi %hi(PAGE_SIZE), %g2
132 sllx %g1, 32, %g1
133 add %o0, %g1, %o0
1341: subcc %g2, 32, %g2
135 bne,pt %icc, 1b
136 flush %o0 + %g2
137 retl
138 nop
139
140#ifdef DCACHE_ALIASING_POSSIBLE
141
142#if (PAGE_SHIFT != 13)
143#error only page shift of 13 is supported by dcache flush
144#endif
145
146#define DTAG_MASK 0x3
147
148 /* This routine is Spitfire specific so the hardcoded
149 * D-cache size and line-size are OK.
150 */
151 .align 64
152 .globl __flush_dcache_page
153__flush_dcache_page: /* %o0=kaddr, %o1=flush_icache */
154 sethi %uhi(PAGE_OFFSET), %g1
155 sllx %g1, 32, %g1
156 sub %o0, %g1, %o0 ! physical address
157 srlx %o0, 11, %o0 ! make D-cache TAG
158 sethi %hi(1 << 14), %o2 ! D-cache size
159 sub %o2, (1 << 5), %o2 ! D-cache line size
1601: ldxa [%o2] ASI_DCACHE_TAG, %o3 ! load D-cache TAG
161 andcc %o3, DTAG_MASK, %g0 ! Valid?
162 be,pn %xcc, 2f ! Nope, branch
163 andn %o3, DTAG_MASK, %o3 ! Clear valid bits
164 cmp %o3, %o0 ! TAG match?
165 bne,pt %xcc, 2f ! Nope, branch
166 nop
167 stxa %g0, [%o2] ASI_DCACHE_TAG ! Invalidate TAG
168 membar #Sync
1692: brnz,pt %o2, 1b
170 sub %o2, (1 << 5), %o2 ! D-cache line size
171
172 /* The I-cache does not snoop local stores so we
173 * better flush that too when necessary.
174 */
175 brnz,pt %o1, __flush_icache_page
176 sllx %o0, 11, %o0
177 retl
178 nop
179
180#endif /* DCACHE_ALIASING_POSSIBLE */
181
182 .previous
183
184 /* Cheetah specific versions, patched at boot time. */
185__cheetah_flush_tlb_mm: /* 19 insns */
186 rdpr %pstate, %g7
187 andn %g7, PSTATE_IE, %g2
188 wrpr %g2, 0x0, %pstate
189 wrpr %g0, 1, %tl
190 mov PRIMARY_CONTEXT, %o2
191 mov 0x40, %g3
192 ldxa [%o2] ASI_DMMU, %g2
193 srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o1
194 sllx %o1, CTX_PGSZ1_NUC_SHIFT, %o1
195 or %o0, %o1, %o0 /* Preserve nucleus page size fields */
196 stxa %o0, [%o2] ASI_DMMU
197 stxa %g0, [%g3] ASI_DMMU_DEMAP
198 stxa %g0, [%g3] ASI_IMMU_DEMAP
199 stxa %g2, [%o2] ASI_DMMU
200 sethi %hi(KERNBASE), %o2
201 flush %o2
202 wrpr %g0, 0, %tl
203 retl
204 wrpr %g7, 0x0, %pstate
205
206__cheetah_flush_tlb_pending: /* 27 insns */
207 /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
208 rdpr %pstate, %g7
209 sllx %o1, 3, %o1
210 andn %g7, PSTATE_IE, %g2
211 wrpr %g2, 0x0, %pstate
212 wrpr %g0, 1, %tl
213 mov PRIMARY_CONTEXT, %o4
214 ldxa [%o4] ASI_DMMU, %g2
215 srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o3
216 sllx %o3, CTX_PGSZ1_NUC_SHIFT, %o3
217 or %o0, %o3, %o0 /* Preserve nucleus page size fields */
218 stxa %o0, [%o4] ASI_DMMU
2191: sub %o1, (1 << 3), %o1
220 ldx [%o2 + %o1], %o3
221 andcc %o3, 1, %g0
222 be,pn %icc, 2f
223 andn %o3, 1, %o3
224 stxa %g0, [%o3] ASI_IMMU_DEMAP
2252: stxa %g0, [%o3] ASI_DMMU_DEMAP
226 membar #Sync
227 brnz,pt %o1, 1b
228 nop
229 stxa %g2, [%o4] ASI_DMMU
230 sethi %hi(KERNBASE), %o4
231 flush %o4
232 wrpr %g0, 0, %tl
233 retl
234 wrpr %g7, 0x0, %pstate
235
236#ifdef DCACHE_ALIASING_POSSIBLE
237__cheetah_flush_dcache_page: /* 11 insns */
238 sethi %uhi(PAGE_OFFSET), %g1
239 sllx %g1, 32, %g1
240 sub %o0, %g1, %o0
241 sethi %hi(PAGE_SIZE), %o4
2421: subcc %o4, (1 << 5), %o4
243 stxa %g0, [%o0 + %o4] ASI_DCACHE_INVALIDATE
244 membar #Sync
245 bne,pt %icc, 1b
246 nop
247 retl /* I-cache flush never needed on Cheetah, see callers. */
248 nop
249#endif /* DCACHE_ALIASING_POSSIBLE */
250
251 /* Hypervisor specific versions, patched at boot time. */
252__hypervisor_tlb_tl0_error:
253 save %sp, -192, %sp
254 mov %i0, %o0
255 call hypervisor_tlbop_error
256 mov %i1, %o1
257 ret
258 restore
259
260__hypervisor_flush_tlb_mm: /* 10 insns */
261 mov %o0, %o2 /* ARG2: mmu context */
262 mov 0, %o0 /* ARG0: CPU lists unimplemented */
263 mov 0, %o1 /* ARG1: CPU lists unimplemented */
264 mov HV_MMU_ALL, %o3 /* ARG3: flags */
265 mov HV_FAST_MMU_DEMAP_CTX, %o5
266 ta HV_FAST_TRAP
267 brnz,pn %o0, __hypervisor_tlb_tl0_error
268 mov HV_FAST_MMU_DEMAP_CTX, %o1
269 retl
270 nop
271
272__hypervisor_flush_tlb_pending: /* 16 insns */
273 /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
274 sllx %o1, 3, %g1
275 mov %o2, %g2
276 mov %o0, %g3
2771: sub %g1, (1 << 3), %g1
278 ldx [%g2 + %g1], %o0 /* ARG0: vaddr + IMMU-bit */
279 mov %g3, %o1 /* ARG1: mmu context */
280 mov HV_MMU_ALL, %o2 /* ARG2: flags */
281 srlx %o0, PAGE_SHIFT, %o0
282 sllx %o0, PAGE_SHIFT, %o0
283 ta HV_MMU_UNMAP_ADDR_TRAP
284 brnz,pn %o0, __hypervisor_tlb_tl0_error
285 mov HV_MMU_UNMAP_ADDR_TRAP, %o1
286 brnz,pt %g1, 1b
287 nop
288 retl
289 nop
290
291__hypervisor_flush_tlb_kernel_range: /* 16 insns */
292 /* %o0=start, %o1=end */
293 cmp %o0, %o1
294 be,pn %xcc, 2f
295 sethi %hi(PAGE_SIZE), %g3
296 mov %o0, %g1
297 sub %o1, %g1, %g2
298 sub %g2, %g3, %g2
2991: add %g1, %g2, %o0 /* ARG0: virtual address */
300 mov 0, %o1 /* ARG1: mmu context */
301 mov HV_MMU_ALL, %o2 /* ARG2: flags */
302 ta HV_MMU_UNMAP_ADDR_TRAP
303 brnz,pn %o0, __hypervisor_tlb_tl0_error
304 mov HV_MMU_UNMAP_ADDR_TRAP, %o1
305 brnz,pt %g2, 1b
306 sub %g2, %g3, %g2
3072: retl
308 nop
309
310#ifdef DCACHE_ALIASING_POSSIBLE
311 /* XXX Niagara and friends have an 8K cache, so no aliasing is
312 * XXX possible, but nothing explicit in the Hypervisor API
313 * XXX guarantees this.
314 */
315__hypervisor_flush_dcache_page: /* 2 insns */
316 retl
317 nop
318#endif
319
320tlb_patch_one:
3211: lduw [%o1], %g1
322 stw %g1, [%o0]
323 flush %o0
324 subcc %o2, 1, %o2
325 add %o1, 4, %o1
326 bne,pt %icc, 1b
327 add %o0, 4, %o0
328 retl
329 nop
330
331 .globl cheetah_patch_cachetlbops
332cheetah_patch_cachetlbops:
333 save %sp, -128, %sp
334
335 sethi %hi(__flush_tlb_mm), %o0
336 or %o0, %lo(__flush_tlb_mm), %o0
337 sethi %hi(__cheetah_flush_tlb_mm), %o1
338 or %o1, %lo(__cheetah_flush_tlb_mm), %o1
339 call tlb_patch_one
340 mov 19, %o2
341
342 sethi %hi(__flush_tlb_pending), %o0
343 or %o0, %lo(__flush_tlb_pending), %o0
344 sethi %hi(__cheetah_flush_tlb_pending), %o1
345 or %o1, %lo(__cheetah_flush_tlb_pending), %o1
346 call tlb_patch_one
347 mov 27, %o2
348
349#ifdef DCACHE_ALIASING_POSSIBLE
350 sethi %hi(__flush_dcache_page), %o0
351 or %o0, %lo(__flush_dcache_page), %o0
352 sethi %hi(__cheetah_flush_dcache_page), %o1
353 or %o1, %lo(__cheetah_flush_dcache_page), %o1
354 call tlb_patch_one
355 mov 11, %o2
356#endif /* DCACHE_ALIASING_POSSIBLE */
357
358 ret
359 restore
360
361#ifdef CONFIG_SMP
362 /* These are all called by the slaves of a cross call, at
363 * trap level 1, with interrupts fully disabled.
364 *
365 * Register usage:
366 * %g5 mm->context (all tlb flushes)
367 * %g1 address arg 1 (tlb page and range flushes)
368 * %g7 address arg 2 (tlb range flush only)
369 *
370 * %g6 scratch 1
371 * %g2 scratch 2
372 * %g3 scratch 3
373 * %g4 scratch 4
374 */
375 .align 32
376 .globl xcall_flush_tlb_mm
377xcall_flush_tlb_mm: /* 21 insns */
378 mov PRIMARY_CONTEXT, %g2
379 ldxa [%g2] ASI_DMMU, %g3
380 srlx %g3, CTX_PGSZ1_NUC_SHIFT, %g4
381 sllx %g4, CTX_PGSZ1_NUC_SHIFT, %g4
382 or %g5, %g4, %g5 /* Preserve nucleus page size fields */
383 stxa %g5, [%g2] ASI_DMMU
384 mov 0x40, %g4
385 stxa %g0, [%g4] ASI_DMMU_DEMAP
386 stxa %g0, [%g4] ASI_IMMU_DEMAP
387 stxa %g3, [%g2] ASI_DMMU
388 retry
389 nop
390 nop
391 nop
392 nop
393 nop
394 nop
395 nop
396 nop
397 nop
398 nop
399
400 .globl xcall_flush_tlb_pending
401xcall_flush_tlb_pending: /* 21 insns */
402 /* %g5=context, %g1=nr, %g7=vaddrs[] */
403 sllx %g1, 3, %g1
404 mov PRIMARY_CONTEXT, %g4
405 ldxa [%g4] ASI_DMMU, %g2
406 srlx %g2, CTX_PGSZ1_NUC_SHIFT, %g4
407 sllx %g4, CTX_PGSZ1_NUC_SHIFT, %g4
408 or %g5, %g4, %g5
409 mov PRIMARY_CONTEXT, %g4
410 stxa %g5, [%g4] ASI_DMMU
4111: sub %g1, (1 << 3), %g1
412 ldx [%g7 + %g1], %g5
413 andcc %g5, 0x1, %g0
414 be,pn %icc, 2f
415
416 andn %g5, 0x1, %g5
417 stxa %g0, [%g5] ASI_IMMU_DEMAP
4182: stxa %g0, [%g5] ASI_DMMU_DEMAP
419 membar #Sync
420 brnz,pt %g1, 1b
421 nop
422 stxa %g2, [%g4] ASI_DMMU
423 retry
424 nop
425
426 .globl xcall_flush_tlb_kernel_range
427xcall_flush_tlb_kernel_range: /* 25 insns */
428 sethi %hi(PAGE_SIZE - 1), %g2
429 or %g2, %lo(PAGE_SIZE - 1), %g2
430 andn %g1, %g2, %g1
431 andn %g7, %g2, %g7
432 sub %g7, %g1, %g3
433 add %g2, 1, %g2
434 sub %g3, %g2, %g3
435 or %g1, 0x20, %g1 ! Nucleus
4361: stxa %g0, [%g1 + %g3] ASI_DMMU_DEMAP
437 stxa %g0, [%g1 + %g3] ASI_IMMU_DEMAP
438 membar #Sync
439 brnz,pt %g3, 1b
440 sub %g3, %g2, %g3
441 retry
442 nop
443 nop
444 nop
445 nop
446 nop
447 nop
448 nop
449 nop
450 nop
451 nop
452 nop
453
454 /* This runs in a very controlled environment, so we do
455 * not need to worry about BH races etc.
456 */
457 .globl xcall_sync_tick
458xcall_sync_tick:
459
460661: rdpr %pstate, %g2
461 wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate
462 .section .sun4v_2insn_patch, "ax"
463 .word 661b
464 nop
465 nop
466 .previous
467
468 rdpr %pil, %g2
469 wrpr %g0, PIL_NORMAL_MAX, %pil
470 sethi %hi(109f), %g7
471 b,pt %xcc, etrap_irq
472109: or %g7, %lo(109b), %g7
473#ifdef CONFIG_TRACE_IRQFLAGS
474 call trace_hardirqs_off
475 nop
476#endif
477 call smp_synchronize_tick_client
478 nop
479 b rtrap_xcall
480 ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
481
482 .globl xcall_fetch_glob_regs
483xcall_fetch_glob_regs:
484 sethi %hi(global_reg_snapshot), %g1
485 or %g1, %lo(global_reg_snapshot), %g1
486 __GET_CPUID(%g2)
487 sllx %g2, 6, %g3
488 add %g1, %g3, %g1
489 rdpr %tstate, %g7
490 stx %g7, [%g1 + GR_SNAP_TSTATE]
491 rdpr %tpc, %g7
492 stx %g7, [%g1 + GR_SNAP_TPC]
493 rdpr %tnpc, %g7
494 stx %g7, [%g1 + GR_SNAP_TNPC]
495 stx %o7, [%g1 + GR_SNAP_O7]
496 stx %i7, [%g1 + GR_SNAP_I7]
497 /* Don't try this at home kids... */
498 rdpr %cwp, %g2
499 sub %g2, 1, %g7
500 wrpr %g7, %cwp
501 mov %i7, %g7
502 wrpr %g2, %cwp
503 stx %g7, [%g1 + GR_SNAP_RPC]
504 sethi %hi(trap_block), %g7
505 or %g7, %lo(trap_block), %g7
506 sllx %g2, TRAP_BLOCK_SZ_SHIFT, %g2
507 add %g7, %g2, %g7
508 ldx [%g7 + TRAP_PER_CPU_THREAD], %g3
509 stx %g3, [%g1 + GR_SNAP_THREAD]
510 retry
511
512#ifdef DCACHE_ALIASING_POSSIBLE
513 .align 32
514 .globl xcall_flush_dcache_page_cheetah
515xcall_flush_dcache_page_cheetah: /* %g1 == physical page address */
516 sethi %hi(PAGE_SIZE), %g3
5171: subcc %g3, (1 << 5), %g3
518 stxa %g0, [%g1 + %g3] ASI_DCACHE_INVALIDATE
519 membar #Sync
520 bne,pt %icc, 1b
521 nop
522 retry
523 nop
524#endif /* DCACHE_ALIASING_POSSIBLE */
525
526 .globl xcall_flush_dcache_page_spitfire
527xcall_flush_dcache_page_spitfire: /* %g1 == physical page address
528 %g7 == kernel page virtual address
529 %g5 == (page->mapping != NULL) */
530#ifdef DCACHE_ALIASING_POSSIBLE
531 srlx %g1, (13 - 2), %g1 ! Form tag comparitor
532 sethi %hi(L1DCACHE_SIZE), %g3 ! D$ size == 16K
533 sub %g3, (1 << 5), %g3 ! D$ linesize == 32
5341: ldxa [%g3] ASI_DCACHE_TAG, %g2
535 andcc %g2, 0x3, %g0
536 be,pn %xcc, 2f
537 andn %g2, 0x3, %g2
538 cmp %g2, %g1
539
540 bne,pt %xcc, 2f
541 nop
542 stxa %g0, [%g3] ASI_DCACHE_TAG
543 membar #Sync
5442: cmp %g3, 0
545 bne,pt %xcc, 1b
546 sub %g3, (1 << 5), %g3
547
548 brz,pn %g5, 2f
549#endif /* DCACHE_ALIASING_POSSIBLE */
550 sethi %hi(PAGE_SIZE), %g3
551
5521: flush %g7
553 subcc %g3, (1 << 5), %g3
554 bne,pt %icc, 1b
555 add %g7, (1 << 5), %g7
556
5572: retry
558 nop
559 nop
560
561 /* %g5: error
562 * %g6: tlb op
563 */
564__hypervisor_tlb_xcall_error:
565 mov %g5, %g4
566 mov %g6, %g5
567 ba,pt %xcc, etrap
568 rd %pc, %g7
569 mov %l4, %o0
570 call hypervisor_tlbop_error_xcall
571 mov %l5, %o1
572 ba,a,pt %xcc, rtrap
573
574 .globl __hypervisor_xcall_flush_tlb_mm
575__hypervisor_xcall_flush_tlb_mm: /* 21 insns */
576 /* %g5=ctx, g1,g2,g3,g4,g7=scratch, %g6=unusable */
577 mov %o0, %g2
578 mov %o1, %g3
579 mov %o2, %g4
580 mov %o3, %g1
581 mov %o5, %g7
582 clr %o0 /* ARG0: CPU lists unimplemented */
583 clr %o1 /* ARG1: CPU lists unimplemented */
584 mov %g5, %o2 /* ARG2: mmu context */
585 mov HV_MMU_ALL, %o3 /* ARG3: flags */
586 mov HV_FAST_MMU_DEMAP_CTX, %o5
587 ta HV_FAST_TRAP
588 mov HV_FAST_MMU_DEMAP_CTX, %g6
589 brnz,pn %o0, __hypervisor_tlb_xcall_error
590 mov %o0, %g5
591 mov %g2, %o0
592 mov %g3, %o1
593 mov %g4, %o2
594 mov %g1, %o3
595 mov %g7, %o5
596 membar #Sync
597 retry
598
599 .globl __hypervisor_xcall_flush_tlb_pending
600__hypervisor_xcall_flush_tlb_pending: /* 21 insns */
601 /* %g5=ctx, %g1=nr, %g7=vaddrs[], %g2,%g3,%g4,g6=scratch */
602 sllx %g1, 3, %g1
603 mov %o0, %g2
604 mov %o1, %g3
605 mov %o2, %g4
6061: sub %g1, (1 << 3), %g1
607 ldx [%g7 + %g1], %o0 /* ARG0: virtual address */
608 mov %g5, %o1 /* ARG1: mmu context */
609 mov HV_MMU_ALL, %o2 /* ARG2: flags */
610 srlx %o0, PAGE_SHIFT, %o0
611 sllx %o0, PAGE_SHIFT, %o0
612 ta HV_MMU_UNMAP_ADDR_TRAP
613 mov HV_MMU_UNMAP_ADDR_TRAP, %g6
614 brnz,a,pn %o0, __hypervisor_tlb_xcall_error
615 mov %o0, %g5
616 brnz,pt %g1, 1b
617 nop
618 mov %g2, %o0
619 mov %g3, %o1
620 mov %g4, %o2
621 membar #Sync
622 retry
623
624 .globl __hypervisor_xcall_flush_tlb_kernel_range
625__hypervisor_xcall_flush_tlb_kernel_range: /* 25 insns */
626 /* %g1=start, %g7=end, g2,g3,g4,g5,g6=scratch */
627 sethi %hi(PAGE_SIZE - 1), %g2
628 or %g2, %lo(PAGE_SIZE - 1), %g2
629 andn %g1, %g2, %g1
630 andn %g7, %g2, %g7
631 sub %g7, %g1, %g3
632 add %g2, 1, %g2
633 sub %g3, %g2, %g3
634 mov %o0, %g2
635 mov %o1, %g4
636 mov %o2, %g7
6371: add %g1, %g3, %o0 /* ARG0: virtual address */
638 mov 0, %o1 /* ARG1: mmu context */
639 mov HV_MMU_ALL, %o2 /* ARG2: flags */
640 ta HV_MMU_UNMAP_ADDR_TRAP
641 mov HV_MMU_UNMAP_ADDR_TRAP, %g6
642 brnz,pn %o0, __hypervisor_tlb_xcall_error
643 mov %o0, %g5
644 sethi %hi(PAGE_SIZE), %o2
645 brnz,pt %g3, 1b
646 sub %g3, %o2, %g3
647 mov %g2, %o0
648 mov %g4, %o1
649 mov %g7, %o2
650 membar #Sync
651 retry
652
653 /* These just get rescheduled to PIL vectors. */
654 .globl xcall_call_function
655xcall_call_function:
656 wr %g0, (1 << PIL_SMP_CALL_FUNC), %set_softint
657 retry
658
659 .globl xcall_call_function_single
660xcall_call_function_single:
661 wr %g0, (1 << PIL_SMP_CALL_FUNC_SNGL), %set_softint
662 retry
663
664 .globl xcall_receive_signal
665xcall_receive_signal:
666 wr %g0, (1 << PIL_SMP_RECEIVE_SIGNAL), %set_softint
667 retry
668
669 .globl xcall_capture
670xcall_capture:
671 wr %g0, (1 << PIL_SMP_CAPTURE), %set_softint
672 retry
673
674 .globl xcall_new_mmu_context_version
675xcall_new_mmu_context_version:
676 wr %g0, (1 << PIL_SMP_CTX_NEW_VERSION), %set_softint
677 retry
678
679#ifdef CONFIG_KGDB
680 .globl xcall_kgdb_capture
681xcall_kgdb_capture:
682 wr %g0, (1 << PIL_KGDB_CAPTURE), %set_softint
683 retry
684#endif
685
686#endif /* CONFIG_SMP */
687
688
689 .globl hypervisor_patch_cachetlbops
690hypervisor_patch_cachetlbops:
691 save %sp, -128, %sp
692
693 sethi %hi(__flush_tlb_mm), %o0
694 or %o0, %lo(__flush_tlb_mm), %o0
695 sethi %hi(__hypervisor_flush_tlb_mm), %o1
696 or %o1, %lo(__hypervisor_flush_tlb_mm), %o1
697 call tlb_patch_one
698 mov 10, %o2
699
700 sethi %hi(__flush_tlb_pending), %o0
701 or %o0, %lo(__flush_tlb_pending), %o0
702 sethi %hi(__hypervisor_flush_tlb_pending), %o1
703 or %o1, %lo(__hypervisor_flush_tlb_pending), %o1
704 call tlb_patch_one
705 mov 16, %o2
706
707 sethi %hi(__flush_tlb_kernel_range), %o0
708 or %o0, %lo(__flush_tlb_kernel_range), %o0
709 sethi %hi(__hypervisor_flush_tlb_kernel_range), %o1
710 or %o1, %lo(__hypervisor_flush_tlb_kernel_range), %o1
711 call tlb_patch_one
712 mov 16, %o2
713
714#ifdef DCACHE_ALIASING_POSSIBLE
715 sethi %hi(__flush_dcache_page), %o0
716 or %o0, %lo(__flush_dcache_page), %o0
717 sethi %hi(__hypervisor_flush_dcache_page), %o1
718 or %o1, %lo(__hypervisor_flush_dcache_page), %o1
719 call tlb_patch_one
720 mov 2, %o2
721#endif /* DCACHE_ALIASING_POSSIBLE */
722
723#ifdef CONFIG_SMP
724 sethi %hi(xcall_flush_tlb_mm), %o0
725 or %o0, %lo(xcall_flush_tlb_mm), %o0
726 sethi %hi(__hypervisor_xcall_flush_tlb_mm), %o1
727 or %o1, %lo(__hypervisor_xcall_flush_tlb_mm), %o1
728 call tlb_patch_one
729 mov 21, %o2
730
731 sethi %hi(xcall_flush_tlb_pending), %o0
732 or %o0, %lo(xcall_flush_tlb_pending), %o0
733 sethi %hi(__hypervisor_xcall_flush_tlb_pending), %o1
734 or %o1, %lo(__hypervisor_xcall_flush_tlb_pending), %o1
735 call tlb_patch_one
736 mov 21, %o2
737
738 sethi %hi(xcall_flush_tlb_kernel_range), %o0
739 or %o0, %lo(xcall_flush_tlb_kernel_range), %o0
740 sethi %hi(__hypervisor_xcall_flush_tlb_kernel_range), %o1
741 or %o1, %lo(__hypervisor_xcall_flush_tlb_kernel_range), %o1
742 call tlb_patch_one
743 mov 25, %o2
744#endif /* CONFIG_SMP */
745
746 ret
747 restore
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * ultra.S: Don't expand these all over the place...
4 *
5 * Copyright (C) 1997, 2000, 2008 David S. Miller (davem@davemloft.net)
6 */
7
8#include <asm/asi.h>
9#include <asm/pgtable.h>
10#include <asm/page.h>
11#include <asm/spitfire.h>
12#include <asm/mmu_context.h>
13#include <asm/mmu.h>
14#include <asm/pil.h>
15#include <asm/head.h>
16#include <asm/thread_info.h>
17#include <asm/cacheflush.h>
18#include <asm/hypervisor.h>
19#include <asm/cpudata.h>
20
21 /* Basically, most of the Spitfire vs. Cheetah madness
22 * has to do with the fact that Cheetah does not support
23 * IMMU flushes out of the secondary context. Someone needs
24 * to throw a south lake birthday party for the folks
25 * in Microelectronics who refused to fix this shit.
26 */
27
28 /* This file is meant to be read efficiently by the CPU, not humans.
29 * Staraj sie tego nikomu nie pierdolnac...
30 */
31 .text
32 .align 32
33 .globl __flush_tlb_mm
34__flush_tlb_mm: /* 19 insns */
35 /* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */
36 ldxa [%o1] ASI_DMMU, %g2
37 cmp %g2, %o0
38 bne,pn %icc, __spitfire_flush_tlb_mm_slow
39 mov 0x50, %g3
40 stxa %g0, [%g3] ASI_DMMU_DEMAP
41 stxa %g0, [%g3] ASI_IMMU_DEMAP
42 sethi %hi(KERNBASE), %g3
43 flush %g3
44 retl
45 nop
46 nop
47 nop
48 nop
49 nop
50 nop
51 nop
52 nop
53 nop
54 nop
55
56 .align 32
57 .globl __flush_tlb_page
58__flush_tlb_page: /* 22 insns */
59 /* %o0 = context, %o1 = vaddr */
60 rdpr %pstate, %g7
61 andn %g7, PSTATE_IE, %g2
62 wrpr %g2, %pstate
63 mov SECONDARY_CONTEXT, %o4
64 ldxa [%o4] ASI_DMMU, %g2
65 stxa %o0, [%o4] ASI_DMMU
66 andcc %o1, 1, %g0
67 andn %o1, 1, %o3
68 be,pn %icc, 1f
69 or %o3, 0x10, %o3
70 stxa %g0, [%o3] ASI_IMMU_DEMAP
711: stxa %g0, [%o3] ASI_DMMU_DEMAP
72 membar #Sync
73 stxa %g2, [%o4] ASI_DMMU
74 sethi %hi(KERNBASE), %o4
75 flush %o4
76 retl
77 wrpr %g7, 0x0, %pstate
78 nop
79 nop
80 nop
81 nop
82
83 .align 32
84 .globl __flush_tlb_pending
85__flush_tlb_pending: /* 27 insns */
86 /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
87 rdpr %pstate, %g7
88 sllx %o1, 3, %o1
89 andn %g7, PSTATE_IE, %g2
90 wrpr %g2, %pstate
91 mov SECONDARY_CONTEXT, %o4
92 ldxa [%o4] ASI_DMMU, %g2
93 stxa %o0, [%o4] ASI_DMMU
941: sub %o1, (1 << 3), %o1
95 ldx [%o2 + %o1], %o3
96 andcc %o3, 1, %g0
97 andn %o3, 1, %o3
98 be,pn %icc, 2f
99 or %o3, 0x10, %o3
100 stxa %g0, [%o3] ASI_IMMU_DEMAP
1012: stxa %g0, [%o3] ASI_DMMU_DEMAP
102 membar #Sync
103 brnz,pt %o1, 1b
104 nop
105 stxa %g2, [%o4] ASI_DMMU
106 sethi %hi(KERNBASE), %o4
107 flush %o4
108 retl
109 wrpr %g7, 0x0, %pstate
110 nop
111 nop
112 nop
113 nop
114
115 .align 32
116 .globl __flush_tlb_kernel_range
117__flush_tlb_kernel_range: /* 31 insns */
118 /* %o0=start, %o1=end */
119 cmp %o0, %o1
120 be,pn %xcc, 2f
121 sub %o1, %o0, %o3
122 srlx %o3, 18, %o4
123 brnz,pn %o4, __spitfire_flush_tlb_kernel_range_slow
124 sethi %hi(PAGE_SIZE), %o4
125 sub %o3, %o4, %o3
126 or %o0, 0x20, %o0 ! Nucleus
1271: stxa %g0, [%o0 + %o3] ASI_DMMU_DEMAP
128 stxa %g0, [%o0 + %o3] ASI_IMMU_DEMAP
129 membar #Sync
130 brnz,pt %o3, 1b
131 sub %o3, %o4, %o3
1322: sethi %hi(KERNBASE), %o3
133 flush %o3
134 retl
135 nop
136 nop
137 nop
138 nop
139 nop
140 nop
141 nop
142 nop
143 nop
144 nop
145 nop
146 nop
147 nop
148 nop
149 nop
150
151__spitfire_flush_tlb_kernel_range_slow:
152 mov 63 * 8, %o4
1531: ldxa [%o4] ASI_ITLB_DATA_ACCESS, %o3
154 andcc %o3, 0x40, %g0 /* _PAGE_L_4U */
155 bne,pn %xcc, 2f
156 mov TLB_TAG_ACCESS, %o3
157 stxa %g0, [%o3] ASI_IMMU
158 stxa %g0, [%o4] ASI_ITLB_DATA_ACCESS
159 membar #Sync
1602: ldxa [%o4] ASI_DTLB_DATA_ACCESS, %o3
161 andcc %o3, 0x40, %g0
162 bne,pn %xcc, 2f
163 mov TLB_TAG_ACCESS, %o3
164 stxa %g0, [%o3] ASI_DMMU
165 stxa %g0, [%o4] ASI_DTLB_DATA_ACCESS
166 membar #Sync
1672: sub %o4, 8, %o4
168 brgez,pt %o4, 1b
169 nop
170 retl
171 nop
172
173__spitfire_flush_tlb_mm_slow:
174 rdpr %pstate, %g1
175 wrpr %g1, PSTATE_IE, %pstate
176 stxa %o0, [%o1] ASI_DMMU
177 stxa %g0, [%g3] ASI_DMMU_DEMAP
178 stxa %g0, [%g3] ASI_IMMU_DEMAP
179 flush %g6
180 stxa %g2, [%o1] ASI_DMMU
181 sethi %hi(KERNBASE), %o1
182 flush %o1
183 retl
184 wrpr %g1, 0, %pstate
185
186/*
187 * The following code flushes one page_size worth.
188 */
189 .section .kprobes.text, "ax"
190 .align 32
191 .globl __flush_icache_page
192__flush_icache_page: /* %o0 = phys_page */
193 srlx %o0, PAGE_SHIFT, %o0
194 sethi %hi(PAGE_OFFSET), %g1
195 sllx %o0, PAGE_SHIFT, %o0
196 sethi %hi(PAGE_SIZE), %g2
197 ldx [%g1 + %lo(PAGE_OFFSET)], %g1
198 add %o0, %g1, %o0
1991: subcc %g2, 32, %g2
200 bne,pt %icc, 1b
201 flush %o0 + %g2
202 retl
203 nop
204
205#ifdef DCACHE_ALIASING_POSSIBLE
206
207#if (PAGE_SHIFT != 13)
208#error only page shift of 13 is supported by dcache flush
209#endif
210
211#define DTAG_MASK 0x3
212
213 /* This routine is Spitfire specific so the hardcoded
214 * D-cache size and line-size are OK.
215 */
216 .align 64
217 .globl __flush_dcache_page
218__flush_dcache_page: /* %o0=kaddr, %o1=flush_icache */
219 sethi %hi(PAGE_OFFSET), %g1
220 ldx [%g1 + %lo(PAGE_OFFSET)], %g1
221 sub %o0, %g1, %o0 ! physical address
222 srlx %o0, 11, %o0 ! make D-cache TAG
223 sethi %hi(1 << 14), %o2 ! D-cache size
224 sub %o2, (1 << 5), %o2 ! D-cache line size
2251: ldxa [%o2] ASI_DCACHE_TAG, %o3 ! load D-cache TAG
226 andcc %o3, DTAG_MASK, %g0 ! Valid?
227 be,pn %xcc, 2f ! Nope, branch
228 andn %o3, DTAG_MASK, %o3 ! Clear valid bits
229 cmp %o3, %o0 ! TAG match?
230 bne,pt %xcc, 2f ! Nope, branch
231 nop
232 stxa %g0, [%o2] ASI_DCACHE_TAG ! Invalidate TAG
233 membar #Sync
2342: brnz,pt %o2, 1b
235 sub %o2, (1 << 5), %o2 ! D-cache line size
236
237 /* The I-cache does not snoop local stores so we
238 * better flush that too when necessary.
239 */
240 brnz,pt %o1, __flush_icache_page
241 sllx %o0, 11, %o0
242 retl
243 nop
244
245#endif /* DCACHE_ALIASING_POSSIBLE */
246
247 .previous
248
249 /* Cheetah specific versions, patched at boot time. */
250__cheetah_flush_tlb_mm: /* 19 insns */
251 rdpr %pstate, %g7
252 andn %g7, PSTATE_IE, %g2
253 wrpr %g2, 0x0, %pstate
254 wrpr %g0, 1, %tl
255 mov PRIMARY_CONTEXT, %o2
256 mov 0x40, %g3
257 ldxa [%o2] ASI_DMMU, %g2
258 srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o1
259 sllx %o1, CTX_PGSZ1_NUC_SHIFT, %o1
260 or %o0, %o1, %o0 /* Preserve nucleus page size fields */
261 stxa %o0, [%o2] ASI_DMMU
262 stxa %g0, [%g3] ASI_DMMU_DEMAP
263 stxa %g0, [%g3] ASI_IMMU_DEMAP
264 stxa %g2, [%o2] ASI_DMMU
265 sethi %hi(KERNBASE), %o2
266 flush %o2
267 wrpr %g0, 0, %tl
268 retl
269 wrpr %g7, 0x0, %pstate
270
271__cheetah_flush_tlb_page: /* 22 insns */
272 /* %o0 = context, %o1 = vaddr */
273 rdpr %pstate, %g7
274 andn %g7, PSTATE_IE, %g2
275 wrpr %g2, 0x0, %pstate
276 wrpr %g0, 1, %tl
277 mov PRIMARY_CONTEXT, %o4
278 ldxa [%o4] ASI_DMMU, %g2
279 srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o3
280 sllx %o3, CTX_PGSZ1_NUC_SHIFT, %o3
281 or %o0, %o3, %o0 /* Preserve nucleus page size fields */
282 stxa %o0, [%o4] ASI_DMMU
283 andcc %o1, 1, %g0
284 be,pn %icc, 1f
285 andn %o1, 1, %o3
286 stxa %g0, [%o3] ASI_IMMU_DEMAP
2871: stxa %g0, [%o3] ASI_DMMU_DEMAP
288 membar #Sync
289 stxa %g2, [%o4] ASI_DMMU
290 sethi %hi(KERNBASE), %o4
291 flush %o4
292 wrpr %g0, 0, %tl
293 retl
294 wrpr %g7, 0x0, %pstate
295
296__cheetah_flush_tlb_pending: /* 27 insns */
297 /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
298 rdpr %pstate, %g7
299 sllx %o1, 3, %o1
300 andn %g7, PSTATE_IE, %g2
301 wrpr %g2, 0x0, %pstate
302 wrpr %g0, 1, %tl
303 mov PRIMARY_CONTEXT, %o4
304 ldxa [%o4] ASI_DMMU, %g2
305 srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o3
306 sllx %o3, CTX_PGSZ1_NUC_SHIFT, %o3
307 or %o0, %o3, %o0 /* Preserve nucleus page size fields */
308 stxa %o0, [%o4] ASI_DMMU
3091: sub %o1, (1 << 3), %o1
310 ldx [%o2 + %o1], %o3
311 andcc %o3, 1, %g0
312 be,pn %icc, 2f
313 andn %o3, 1, %o3
314 stxa %g0, [%o3] ASI_IMMU_DEMAP
3152: stxa %g0, [%o3] ASI_DMMU_DEMAP
316 membar #Sync
317 brnz,pt %o1, 1b
318 nop
319 stxa %g2, [%o4] ASI_DMMU
320 sethi %hi(KERNBASE), %o4
321 flush %o4
322 wrpr %g0, 0, %tl
323 retl
324 wrpr %g7, 0x0, %pstate
325
326__cheetah_flush_tlb_kernel_range: /* 31 insns */
327 /* %o0=start, %o1=end */
328 cmp %o0, %o1
329 be,pn %xcc, 2f
330 sub %o1, %o0, %o3
331 srlx %o3, 18, %o4
332 brnz,pn %o4, 3f
333 sethi %hi(PAGE_SIZE), %o4
334 sub %o3, %o4, %o3
335 or %o0, 0x20, %o0 ! Nucleus
3361: stxa %g0, [%o0 + %o3] ASI_DMMU_DEMAP
337 stxa %g0, [%o0 + %o3] ASI_IMMU_DEMAP
338 membar #Sync
339 brnz,pt %o3, 1b
340 sub %o3, %o4, %o3
3412: sethi %hi(KERNBASE), %o3
342 flush %o3
343 retl
344 nop
3453: mov 0x80, %o4
346 stxa %g0, [%o4] ASI_DMMU_DEMAP
347 membar #Sync
348 stxa %g0, [%o4] ASI_IMMU_DEMAP
349 membar #Sync
350 retl
351 nop
352 nop
353 nop
354 nop
355 nop
356 nop
357 nop
358 nop
359
360#ifdef DCACHE_ALIASING_POSSIBLE
361__cheetah_flush_dcache_page: /* 11 insns */
362 sethi %hi(PAGE_OFFSET), %g1
363 ldx [%g1 + %lo(PAGE_OFFSET)], %g1
364 sub %o0, %g1, %o0
365 sethi %hi(PAGE_SIZE), %o4
3661: subcc %o4, (1 << 5), %o4
367 stxa %g0, [%o0 + %o4] ASI_DCACHE_INVALIDATE
368 membar #Sync
369 bne,pt %icc, 1b
370 nop
371 retl /* I-cache flush never needed on Cheetah, see callers. */
372 nop
373#endif /* DCACHE_ALIASING_POSSIBLE */
374
375 /* Hypervisor specific versions, patched at boot time. */
376__hypervisor_tlb_tl0_error:
377 save %sp, -192, %sp
378 mov %i0, %o0
379 call hypervisor_tlbop_error
380 mov %i1, %o1
381 ret
382 restore
383
384__hypervisor_flush_tlb_mm: /* 19 insns */
385 mov %o0, %o2 /* ARG2: mmu context */
386 mov 0, %o0 /* ARG0: CPU lists unimplemented */
387 mov 0, %o1 /* ARG1: CPU lists unimplemented */
388 mov HV_MMU_ALL, %o3 /* ARG3: flags */
389 mov HV_FAST_MMU_DEMAP_CTX, %o5
390 ta HV_FAST_TRAP
391 brnz,pn %o0, 1f
392 mov HV_FAST_MMU_DEMAP_CTX, %o1
393 retl
394 nop
3951: sethi %hi(__hypervisor_tlb_tl0_error), %o5
396 jmpl %o5 + %lo(__hypervisor_tlb_tl0_error), %g0
397 nop
398 nop
399 nop
400 nop
401 nop
402 nop
403 nop
404
405__hypervisor_flush_tlb_page: /* 22 insns */
406 /* %o0 = context, %o1 = vaddr */
407 mov %o0, %g2
408 mov %o1, %o0 /* ARG0: vaddr + IMMU-bit */
409 mov %g2, %o1 /* ARG1: mmu context */
410 mov HV_MMU_ALL, %o2 /* ARG2: flags */
411 srlx %o0, PAGE_SHIFT, %o0
412 sllx %o0, PAGE_SHIFT, %o0
413 ta HV_MMU_UNMAP_ADDR_TRAP
414 brnz,pn %o0, 1f
415 mov HV_MMU_UNMAP_ADDR_TRAP, %o1
416 retl
417 nop
4181: sethi %hi(__hypervisor_tlb_tl0_error), %o2
419 jmpl %o2 + %lo(__hypervisor_tlb_tl0_error), %g0
420 nop
421 nop
422 nop
423 nop
424 nop
425 nop
426 nop
427 nop
428 nop
429
430__hypervisor_flush_tlb_pending: /* 27 insns */
431 /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
432 sllx %o1, 3, %g1
433 mov %o2, %g2
434 mov %o0, %g3
4351: sub %g1, (1 << 3), %g1
436 ldx [%g2 + %g1], %o0 /* ARG0: vaddr + IMMU-bit */
437 mov %g3, %o1 /* ARG1: mmu context */
438 mov HV_MMU_ALL, %o2 /* ARG2: flags */
439 srlx %o0, PAGE_SHIFT, %o0
440 sllx %o0, PAGE_SHIFT, %o0
441 ta HV_MMU_UNMAP_ADDR_TRAP
442 brnz,pn %o0, 1f
443 mov HV_MMU_UNMAP_ADDR_TRAP, %o1
444 brnz,pt %g1, 1b
445 nop
446 retl
447 nop
4481: sethi %hi(__hypervisor_tlb_tl0_error), %o2
449 jmpl %o2 + %lo(__hypervisor_tlb_tl0_error), %g0
450 nop
451 nop
452 nop
453 nop
454 nop
455 nop
456 nop
457 nop
458 nop
459
460__hypervisor_flush_tlb_kernel_range: /* 31 insns */
461 /* %o0=start, %o1=end */
462 cmp %o0, %o1
463 be,pn %xcc, 2f
464 sub %o1, %o0, %g2
465 srlx %g2, 18, %g3
466 brnz,pn %g3, 4f
467 mov %o0, %g1
468 sethi %hi(PAGE_SIZE), %g3
469 sub %g2, %g3, %g2
4701: add %g1, %g2, %o0 /* ARG0: virtual address */
471 mov 0, %o1 /* ARG1: mmu context */
472 mov HV_MMU_ALL, %o2 /* ARG2: flags */
473 ta HV_MMU_UNMAP_ADDR_TRAP
474 brnz,pn %o0, 3f
475 mov HV_MMU_UNMAP_ADDR_TRAP, %o1
476 brnz,pt %g2, 1b
477 sub %g2, %g3, %g2
4782: retl
479 nop
4803: sethi %hi(__hypervisor_tlb_tl0_error), %o2
481 jmpl %o2 + %lo(__hypervisor_tlb_tl0_error), %g0
482 nop
4834: mov 0, %o0 /* ARG0: CPU lists unimplemented */
484 mov 0, %o1 /* ARG1: CPU lists unimplemented */
485 mov 0, %o2 /* ARG2: mmu context == nucleus */
486 mov HV_MMU_ALL, %o3 /* ARG3: flags */
487 mov HV_FAST_MMU_DEMAP_CTX, %o5
488 ta HV_FAST_TRAP
489 brnz,pn %o0, 3b
490 mov HV_FAST_MMU_DEMAP_CTX, %o1
491 retl
492 nop
493
494#ifdef DCACHE_ALIASING_POSSIBLE
495 /* XXX Niagara and friends have an 8K cache, so no aliasing is
496 * XXX possible, but nothing explicit in the Hypervisor API
497 * XXX guarantees this.
498 */
499__hypervisor_flush_dcache_page: /* 2 insns */
500 retl
501 nop
502#endif
503
504tlb_patch_one:
5051: lduw [%o1], %g1
506 stw %g1, [%o0]
507 flush %o0
508 subcc %o2, 1, %o2
509 add %o1, 4, %o1
510 bne,pt %icc, 1b
511 add %o0, 4, %o0
512 retl
513 nop
514
515#ifdef CONFIG_SMP
516 /* These are all called by the slaves of a cross call, at
517 * trap level 1, with interrupts fully disabled.
518 *
519 * Register usage:
520 * %g5 mm->context (all tlb flushes)
521 * %g1 address arg 1 (tlb page and range flushes)
522 * %g7 address arg 2 (tlb range flush only)
523 *
524 * %g6 scratch 1
525 * %g2 scratch 2
526 * %g3 scratch 3
527 * %g4 scratch 4
528 */
529 .align 32
530 .globl xcall_flush_tlb_mm
531xcall_flush_tlb_mm: /* 24 insns */
532 mov PRIMARY_CONTEXT, %g2
533 ldxa [%g2] ASI_DMMU, %g3
534 srlx %g3, CTX_PGSZ1_NUC_SHIFT, %g4
535 sllx %g4, CTX_PGSZ1_NUC_SHIFT, %g4
536 or %g5, %g4, %g5 /* Preserve nucleus page size fields */
537 stxa %g5, [%g2] ASI_DMMU
538 mov 0x40, %g4
539 stxa %g0, [%g4] ASI_DMMU_DEMAP
540 stxa %g0, [%g4] ASI_IMMU_DEMAP
541 stxa %g3, [%g2] ASI_DMMU
542 retry
543 nop
544 nop
545 nop
546 nop
547 nop
548 nop
549 nop
550 nop
551 nop
552 nop
553 nop
554 nop
555 nop
556
557 .globl xcall_flush_tlb_page
558xcall_flush_tlb_page: /* 20 insns */
559 /* %g5=context, %g1=vaddr */
560 mov PRIMARY_CONTEXT, %g4
561 ldxa [%g4] ASI_DMMU, %g2
562 srlx %g2, CTX_PGSZ1_NUC_SHIFT, %g4
563 sllx %g4, CTX_PGSZ1_NUC_SHIFT, %g4
564 or %g5, %g4, %g5
565 mov PRIMARY_CONTEXT, %g4
566 stxa %g5, [%g4] ASI_DMMU
567 andcc %g1, 0x1, %g0
568 be,pn %icc, 2f
569 andn %g1, 0x1, %g5
570 stxa %g0, [%g5] ASI_IMMU_DEMAP
5712: stxa %g0, [%g5] ASI_DMMU_DEMAP
572 membar #Sync
573 stxa %g2, [%g4] ASI_DMMU
574 retry
575 nop
576 nop
577 nop
578 nop
579 nop
580
581 .globl xcall_flush_tlb_kernel_range
582xcall_flush_tlb_kernel_range: /* 44 insns */
583 sethi %hi(PAGE_SIZE - 1), %g2
584 or %g2, %lo(PAGE_SIZE - 1), %g2
585 andn %g1, %g2, %g1
586 andn %g7, %g2, %g7
587 sub %g7, %g1, %g3
588 srlx %g3, 18, %g2
589 brnz,pn %g2, 2f
590 sethi %hi(PAGE_SIZE), %g2
591 sub %g3, %g2, %g3
592 or %g1, 0x20, %g1 ! Nucleus
5931: stxa %g0, [%g1 + %g3] ASI_DMMU_DEMAP
594 stxa %g0, [%g1 + %g3] ASI_IMMU_DEMAP
595 membar #Sync
596 brnz,pt %g3, 1b
597 sub %g3, %g2, %g3
598 retry
5992: mov 63 * 8, %g1
6001: ldxa [%g1] ASI_ITLB_DATA_ACCESS, %g2
601 andcc %g2, 0x40, %g0 /* _PAGE_L_4U */
602 bne,pn %xcc, 2f
603 mov TLB_TAG_ACCESS, %g2
604 stxa %g0, [%g2] ASI_IMMU
605 stxa %g0, [%g1] ASI_ITLB_DATA_ACCESS
606 membar #Sync
6072: ldxa [%g1] ASI_DTLB_DATA_ACCESS, %g2
608 andcc %g2, 0x40, %g0
609 bne,pn %xcc, 2f
610 mov TLB_TAG_ACCESS, %g2
611 stxa %g0, [%g2] ASI_DMMU
612 stxa %g0, [%g1] ASI_DTLB_DATA_ACCESS
613 membar #Sync
6142: sub %g1, 8, %g1
615 brgez,pt %g1, 1b
616 nop
617 retry
618 nop
619 nop
620 nop
621 nop
622 nop
623 nop
624 nop
625 nop
626 nop
627
628 /* This runs in a very controlled environment, so we do
629 * not need to worry about BH races etc.
630 */
631 .globl xcall_sync_tick
632xcall_sync_tick:
633
634661: rdpr %pstate, %g2
635 wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate
636 .section .sun4v_2insn_patch, "ax"
637 .word 661b
638 nop
639 nop
640 .previous
641
642 rdpr %pil, %g2
643 wrpr %g0, PIL_NORMAL_MAX, %pil
644 sethi %hi(109f), %g7
645 b,pt %xcc, etrap_irq
646109: or %g7, %lo(109b), %g7
647#ifdef CONFIG_TRACE_IRQFLAGS
648 call trace_hardirqs_off
649 nop
650#endif
651 call smp_synchronize_tick_client
652 nop
653 b rtrap_xcall
654 ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
655
656 .globl xcall_fetch_glob_regs
657xcall_fetch_glob_regs:
658 sethi %hi(global_cpu_snapshot), %g1
659 or %g1, %lo(global_cpu_snapshot), %g1
660 __GET_CPUID(%g2)
661 sllx %g2, 6, %g3
662 add %g1, %g3, %g1
663 rdpr %tstate, %g7
664 stx %g7, [%g1 + GR_SNAP_TSTATE]
665 rdpr %tpc, %g7
666 stx %g7, [%g1 + GR_SNAP_TPC]
667 rdpr %tnpc, %g7
668 stx %g7, [%g1 + GR_SNAP_TNPC]
669 stx %o7, [%g1 + GR_SNAP_O7]
670 stx %i7, [%g1 + GR_SNAP_I7]
671 /* Don't try this at home kids... */
672 rdpr %cwp, %g3
673 sub %g3, 1, %g7
674 wrpr %g7, %cwp
675 mov %i7, %g7
676 wrpr %g3, %cwp
677 stx %g7, [%g1 + GR_SNAP_RPC]
678 sethi %hi(trap_block), %g7
679 or %g7, %lo(trap_block), %g7
680 sllx %g2, TRAP_BLOCK_SZ_SHIFT, %g2
681 add %g7, %g2, %g7
682 ldx [%g7 + TRAP_PER_CPU_THREAD], %g3
683 stx %g3, [%g1 + GR_SNAP_THREAD]
684 retry
685
686 .globl xcall_fetch_glob_pmu
687xcall_fetch_glob_pmu:
688 sethi %hi(global_cpu_snapshot), %g1
689 or %g1, %lo(global_cpu_snapshot), %g1
690 __GET_CPUID(%g2)
691 sllx %g2, 6, %g3
692 add %g1, %g3, %g1
693 rd %pic, %g7
694 stx %g7, [%g1 + (4 * 8)]
695 rd %pcr, %g7
696 stx %g7, [%g1 + (0 * 8)]
697 retry
698
699 .globl xcall_fetch_glob_pmu_n4
700xcall_fetch_glob_pmu_n4:
701 sethi %hi(global_cpu_snapshot), %g1
702 or %g1, %lo(global_cpu_snapshot), %g1
703 __GET_CPUID(%g2)
704 sllx %g2, 6, %g3
705 add %g1, %g3, %g1
706
707 ldxa [%g0] ASI_PIC, %g7
708 stx %g7, [%g1 + (4 * 8)]
709 mov 0x08, %g3
710 ldxa [%g3] ASI_PIC, %g7
711 stx %g7, [%g1 + (5 * 8)]
712 mov 0x10, %g3
713 ldxa [%g3] ASI_PIC, %g7
714 stx %g7, [%g1 + (6 * 8)]
715 mov 0x18, %g3
716 ldxa [%g3] ASI_PIC, %g7
717 stx %g7, [%g1 + (7 * 8)]
718
719 mov %o0, %g2
720 mov %o1, %g3
721 mov %o5, %g7
722
723 mov HV_FAST_VT_GET_PERFREG, %o5
724 mov 3, %o0
725 ta HV_FAST_TRAP
726 stx %o1, [%g1 + (3 * 8)]
727 mov HV_FAST_VT_GET_PERFREG, %o5
728 mov 2, %o0
729 ta HV_FAST_TRAP
730 stx %o1, [%g1 + (2 * 8)]
731 mov HV_FAST_VT_GET_PERFREG, %o5
732 mov 1, %o0
733 ta HV_FAST_TRAP
734 stx %o1, [%g1 + (1 * 8)]
735 mov HV_FAST_VT_GET_PERFREG, %o5
736 mov 0, %o0
737 ta HV_FAST_TRAP
738 stx %o1, [%g1 + (0 * 8)]
739
740 mov %g2, %o0
741 mov %g3, %o1
742 mov %g7, %o5
743
744 retry
745
746__cheetah_xcall_flush_tlb_kernel_range: /* 44 insns */
747 sethi %hi(PAGE_SIZE - 1), %g2
748 or %g2, %lo(PAGE_SIZE - 1), %g2
749 andn %g1, %g2, %g1
750 andn %g7, %g2, %g7
751 sub %g7, %g1, %g3
752 srlx %g3, 18, %g2
753 brnz,pn %g2, 2f
754 sethi %hi(PAGE_SIZE), %g2
755 sub %g3, %g2, %g3
756 or %g1, 0x20, %g1 ! Nucleus
7571: stxa %g0, [%g1 + %g3] ASI_DMMU_DEMAP
758 stxa %g0, [%g1 + %g3] ASI_IMMU_DEMAP
759 membar #Sync
760 brnz,pt %g3, 1b
761 sub %g3, %g2, %g3
762 retry
7632: mov 0x80, %g2
764 stxa %g0, [%g2] ASI_DMMU_DEMAP
765 membar #Sync
766 stxa %g0, [%g2] ASI_IMMU_DEMAP
767 membar #Sync
768 retry
769 nop
770 nop
771 nop
772 nop
773 nop
774 nop
775 nop
776 nop
777 nop
778 nop
779 nop
780 nop
781 nop
782 nop
783 nop
784 nop
785 nop
786 nop
787 nop
788 nop
789 nop
790 nop
791
792#ifdef DCACHE_ALIASING_POSSIBLE
793 .align 32
794 .globl xcall_flush_dcache_page_cheetah
795xcall_flush_dcache_page_cheetah: /* %g1 == physical page address */
796 sethi %hi(PAGE_SIZE), %g3
7971: subcc %g3, (1 << 5), %g3
798 stxa %g0, [%g1 + %g3] ASI_DCACHE_INVALIDATE
799 membar #Sync
800 bne,pt %icc, 1b
801 nop
802 retry
803 nop
804#endif /* DCACHE_ALIASING_POSSIBLE */
805
806 .globl xcall_flush_dcache_page_spitfire
807xcall_flush_dcache_page_spitfire: /* %g1 == physical page address
808 %g7 == kernel page virtual address
809 %g5 == (page->mapping != NULL) */
810#ifdef DCACHE_ALIASING_POSSIBLE
811 srlx %g1, (13 - 2), %g1 ! Form tag comparitor
812 sethi %hi(L1DCACHE_SIZE), %g3 ! D$ size == 16K
813 sub %g3, (1 << 5), %g3 ! D$ linesize == 32
8141: ldxa [%g3] ASI_DCACHE_TAG, %g2
815 andcc %g2, 0x3, %g0
816 be,pn %xcc, 2f
817 andn %g2, 0x3, %g2
818 cmp %g2, %g1
819
820 bne,pt %xcc, 2f
821 nop
822 stxa %g0, [%g3] ASI_DCACHE_TAG
823 membar #Sync
8242: cmp %g3, 0
825 bne,pt %xcc, 1b
826 sub %g3, (1 << 5), %g3
827
828 brz,pn %g5, 2f
829#endif /* DCACHE_ALIASING_POSSIBLE */
830 sethi %hi(PAGE_SIZE), %g3
831
8321: flush %g7
833 subcc %g3, (1 << 5), %g3
834 bne,pt %icc, 1b
835 add %g7, (1 << 5), %g7
836
8372: retry
838 nop
839 nop
840
841 /* %g5: error
842 * %g6: tlb op
843 */
844__hypervisor_tlb_xcall_error:
845 mov %g5, %g4
846 mov %g6, %g5
847 ba,pt %xcc, etrap
848 rd %pc, %g7
849 mov %l4, %o0
850 call hypervisor_tlbop_error_xcall
851 mov %l5, %o1
852 ba,a,pt %xcc, rtrap
853
854 .globl __hypervisor_xcall_flush_tlb_mm
855__hypervisor_xcall_flush_tlb_mm: /* 24 insns */
856 /* %g5=ctx, g1,g2,g3,g4,g7=scratch, %g6=unusable */
857 mov %o0, %g2
858 mov %o1, %g3
859 mov %o2, %g4
860 mov %o3, %g1
861 mov %o5, %g7
862 clr %o0 /* ARG0: CPU lists unimplemented */
863 clr %o1 /* ARG1: CPU lists unimplemented */
864 mov %g5, %o2 /* ARG2: mmu context */
865 mov HV_MMU_ALL, %o3 /* ARG3: flags */
866 mov HV_FAST_MMU_DEMAP_CTX, %o5
867 ta HV_FAST_TRAP
868 mov HV_FAST_MMU_DEMAP_CTX, %g6
869 brnz,pn %o0, 1f
870 mov %o0, %g5
871 mov %g2, %o0
872 mov %g3, %o1
873 mov %g4, %o2
874 mov %g1, %o3
875 mov %g7, %o5
876 membar #Sync
877 retry
8781: sethi %hi(__hypervisor_tlb_xcall_error), %g4
879 jmpl %g4 + %lo(__hypervisor_tlb_xcall_error), %g0
880 nop
881
882 .globl __hypervisor_xcall_flush_tlb_page
883__hypervisor_xcall_flush_tlb_page: /* 20 insns */
884 /* %g5=ctx, %g1=vaddr */
885 mov %o0, %g2
886 mov %o1, %g3
887 mov %o2, %g4
888 mov %g1, %o0 /* ARG0: virtual address */
889 mov %g5, %o1 /* ARG1: mmu context */
890 mov HV_MMU_ALL, %o2 /* ARG2: flags */
891 srlx %o0, PAGE_SHIFT, %o0
892 sllx %o0, PAGE_SHIFT, %o0
893 ta HV_MMU_UNMAP_ADDR_TRAP
894 mov HV_MMU_UNMAP_ADDR_TRAP, %g6
895 brnz,a,pn %o0, 1f
896 mov %o0, %g5
897 mov %g2, %o0
898 mov %g3, %o1
899 mov %g4, %o2
900 membar #Sync
901 retry
9021: sethi %hi(__hypervisor_tlb_xcall_error), %g4
903 jmpl %g4 + %lo(__hypervisor_tlb_xcall_error), %g0
904 nop
905
906 .globl __hypervisor_xcall_flush_tlb_kernel_range
907__hypervisor_xcall_flush_tlb_kernel_range: /* 44 insns */
908 /* %g1=start, %g7=end, g2,g3,g4,g5,g6=scratch */
909 sethi %hi(PAGE_SIZE - 1), %g2
910 or %g2, %lo(PAGE_SIZE - 1), %g2
911 andn %g1, %g2, %g1
912 andn %g7, %g2, %g7
913 sub %g7, %g1, %g3
914 srlx %g3, 18, %g7
915 add %g2, 1, %g2
916 sub %g3, %g2, %g3
917 mov %o0, %g2
918 mov %o1, %g4
919 brnz,pn %g7, 2f
920 mov %o2, %g7
9211: add %g1, %g3, %o0 /* ARG0: virtual address */
922 mov 0, %o1 /* ARG1: mmu context */
923 mov HV_MMU_ALL, %o2 /* ARG2: flags */
924 ta HV_MMU_UNMAP_ADDR_TRAP
925 mov HV_MMU_UNMAP_ADDR_TRAP, %g6
926 brnz,pn %o0, 1f
927 mov %o0, %g5
928 sethi %hi(PAGE_SIZE), %o2
929 brnz,pt %g3, 1b
930 sub %g3, %o2, %g3
9315: mov %g2, %o0
932 mov %g4, %o1
933 mov %g7, %o2
934 membar #Sync
935 retry
9361: sethi %hi(__hypervisor_tlb_xcall_error), %g4
937 jmpl %g4 + %lo(__hypervisor_tlb_xcall_error), %g0
938 nop
9392: mov %o3, %g1
940 mov %o5, %g3
941 mov 0, %o0 /* ARG0: CPU lists unimplemented */
942 mov 0, %o1 /* ARG1: CPU lists unimplemented */
943 mov 0, %o2 /* ARG2: mmu context == nucleus */
944 mov HV_MMU_ALL, %o3 /* ARG3: flags */
945 mov HV_FAST_MMU_DEMAP_CTX, %o5
946 ta HV_FAST_TRAP
947 mov %g1, %o3
948 brz,pt %o0, 5b
949 mov %g3, %o5
950 mov HV_FAST_MMU_DEMAP_CTX, %g6
951 ba,pt %xcc, 1b
952 clr %g5
953
954 /* These just get rescheduled to PIL vectors. */
955 .globl xcall_call_function
956xcall_call_function:
957 wr %g0, (1 << PIL_SMP_CALL_FUNC), %set_softint
958 retry
959
960 .globl xcall_call_function_single
961xcall_call_function_single:
962 wr %g0, (1 << PIL_SMP_CALL_FUNC_SNGL), %set_softint
963 retry
964
965 .globl xcall_receive_signal
966xcall_receive_signal:
967 wr %g0, (1 << PIL_SMP_RECEIVE_SIGNAL), %set_softint
968 retry
969
970 .globl xcall_capture
971xcall_capture:
972 wr %g0, (1 << PIL_SMP_CAPTURE), %set_softint
973 retry
974
975#ifdef CONFIG_KGDB
976 .globl xcall_kgdb_capture
977xcall_kgdb_capture:
978 wr %g0, (1 << PIL_KGDB_CAPTURE), %set_softint
979 retry
980#endif
981
982#endif /* CONFIG_SMP */
983
984 .globl cheetah_patch_cachetlbops
985cheetah_patch_cachetlbops:
986 save %sp, -128, %sp
987
988 sethi %hi(__flush_tlb_mm), %o0
989 or %o0, %lo(__flush_tlb_mm), %o0
990 sethi %hi(__cheetah_flush_tlb_mm), %o1
991 or %o1, %lo(__cheetah_flush_tlb_mm), %o1
992 call tlb_patch_one
993 mov 19, %o2
994
995 sethi %hi(__flush_tlb_page), %o0
996 or %o0, %lo(__flush_tlb_page), %o0
997 sethi %hi(__cheetah_flush_tlb_page), %o1
998 or %o1, %lo(__cheetah_flush_tlb_page), %o1
999 call tlb_patch_one
1000 mov 22, %o2
1001
1002 sethi %hi(__flush_tlb_pending), %o0
1003 or %o0, %lo(__flush_tlb_pending), %o0
1004 sethi %hi(__cheetah_flush_tlb_pending), %o1
1005 or %o1, %lo(__cheetah_flush_tlb_pending), %o1
1006 call tlb_patch_one
1007 mov 27, %o2
1008
1009 sethi %hi(__flush_tlb_kernel_range), %o0
1010 or %o0, %lo(__flush_tlb_kernel_range), %o0
1011 sethi %hi(__cheetah_flush_tlb_kernel_range), %o1
1012 or %o1, %lo(__cheetah_flush_tlb_kernel_range), %o1
1013 call tlb_patch_one
1014 mov 31, %o2
1015
1016#ifdef DCACHE_ALIASING_POSSIBLE
1017 sethi %hi(__flush_dcache_page), %o0
1018 or %o0, %lo(__flush_dcache_page), %o0
1019 sethi %hi(__cheetah_flush_dcache_page), %o1
1020 or %o1, %lo(__cheetah_flush_dcache_page), %o1
1021 call tlb_patch_one
1022 mov 11, %o2
1023#endif /* DCACHE_ALIASING_POSSIBLE */
1024
1025#ifdef CONFIG_SMP
1026 sethi %hi(xcall_flush_tlb_kernel_range), %o0
1027 or %o0, %lo(xcall_flush_tlb_kernel_range), %o0
1028 sethi %hi(__cheetah_xcall_flush_tlb_kernel_range), %o1
1029 or %o1, %lo(__cheetah_xcall_flush_tlb_kernel_range), %o1
1030 call tlb_patch_one
1031 mov 44, %o2
1032#endif /* CONFIG_SMP */
1033
1034 ret
1035 restore
1036
1037 .globl hypervisor_patch_cachetlbops
1038hypervisor_patch_cachetlbops:
1039 save %sp, -128, %sp
1040
1041 sethi %hi(__flush_tlb_mm), %o0
1042 or %o0, %lo(__flush_tlb_mm), %o0
1043 sethi %hi(__hypervisor_flush_tlb_mm), %o1
1044 or %o1, %lo(__hypervisor_flush_tlb_mm), %o1
1045 call tlb_patch_one
1046 mov 19, %o2
1047
1048 sethi %hi(__flush_tlb_page), %o0
1049 or %o0, %lo(__flush_tlb_page), %o0
1050 sethi %hi(__hypervisor_flush_tlb_page), %o1
1051 or %o1, %lo(__hypervisor_flush_tlb_page), %o1
1052 call tlb_patch_one
1053 mov 22, %o2
1054
1055 sethi %hi(__flush_tlb_pending), %o0
1056 or %o0, %lo(__flush_tlb_pending), %o0
1057 sethi %hi(__hypervisor_flush_tlb_pending), %o1
1058 or %o1, %lo(__hypervisor_flush_tlb_pending), %o1
1059 call tlb_patch_one
1060 mov 27, %o2
1061
1062 sethi %hi(__flush_tlb_kernel_range), %o0
1063 or %o0, %lo(__flush_tlb_kernel_range), %o0
1064 sethi %hi(__hypervisor_flush_tlb_kernel_range), %o1
1065 or %o1, %lo(__hypervisor_flush_tlb_kernel_range), %o1
1066 call tlb_patch_one
1067 mov 31, %o2
1068
1069#ifdef DCACHE_ALIASING_POSSIBLE
1070 sethi %hi(__flush_dcache_page), %o0
1071 or %o0, %lo(__flush_dcache_page), %o0
1072 sethi %hi(__hypervisor_flush_dcache_page), %o1
1073 or %o1, %lo(__hypervisor_flush_dcache_page), %o1
1074 call tlb_patch_one
1075 mov 2, %o2
1076#endif /* DCACHE_ALIASING_POSSIBLE */
1077
1078#ifdef CONFIG_SMP
1079 sethi %hi(xcall_flush_tlb_mm), %o0
1080 or %o0, %lo(xcall_flush_tlb_mm), %o0
1081 sethi %hi(__hypervisor_xcall_flush_tlb_mm), %o1
1082 or %o1, %lo(__hypervisor_xcall_flush_tlb_mm), %o1
1083 call tlb_patch_one
1084 mov 24, %o2
1085
1086 sethi %hi(xcall_flush_tlb_page), %o0
1087 or %o0, %lo(xcall_flush_tlb_page), %o0
1088 sethi %hi(__hypervisor_xcall_flush_tlb_page), %o1
1089 or %o1, %lo(__hypervisor_xcall_flush_tlb_page), %o1
1090 call tlb_patch_one
1091 mov 20, %o2
1092
1093 sethi %hi(xcall_flush_tlb_kernel_range), %o0
1094 or %o0, %lo(xcall_flush_tlb_kernel_range), %o0
1095 sethi %hi(__hypervisor_xcall_flush_tlb_kernel_range), %o1
1096 or %o1, %lo(__hypervisor_xcall_flush_tlb_kernel_range), %o1
1097 call tlb_patch_one
1098 mov 44, %o2
1099#endif /* CONFIG_SMP */
1100
1101 ret
1102 restore