Loading...
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * Linux/PA-RISC Project (http://www.parisc-linux.org/)
4 *
5 * kernel entry points (interruptions, system call wrappers)
6 * Copyright (C) 1999,2000 Philipp Rumpf
7 * Copyright (C) 1999 SuSE GmbH Nuernberg
8 * Copyright (C) 2000 Hewlett-Packard (John Marvin)
9 * Copyright (C) 1999 Hewlett-Packard (Frank Rowand)
10 */
11
12#include <asm/asm-offsets.h>
13
14/* we have the following possibilities to act on an interruption:
15 * - handle in assembly and use shadowed registers only
16 * - save registers to kernel stack and handle in assembly or C */
17
18
19#include <asm/psw.h>
20#include <asm/cache.h> /* for L1_CACHE_SHIFT */
21#include <asm/assembly.h> /* for LDREG/STREG defines */
22#include <asm/pgtable.h>
23#include <asm/signal.h>
24#include <asm/unistd.h>
25#include <asm/ldcw.h>
26#include <asm/traps.h>
27#include <asm/thread_info.h>
28#include <asm/alternative.h>
29
30#include <linux/linkage.h>
31
32#ifdef CONFIG_64BIT
33 .level 2.0w
34#else
35 .level 2.0
36#endif
37
38 .import pa_tlb_lock,data
39 .macro load_pa_tlb_lock reg
40 mfctl %cr25,\reg
41 addil L%(PAGE_SIZE << (PGD_ALLOC_ORDER - 1)),\reg
42 .endm
43
44 /* space_to_prot macro creates a prot id from a space id */
45
46#if (SPACEID_SHIFT) == 0
47 .macro space_to_prot spc prot
48 depd,z \spc,62,31,\prot
49 .endm
50#else
51 .macro space_to_prot spc prot
52 extrd,u \spc,(64 - (SPACEID_SHIFT)),32,\prot
53 .endm
54#endif
55
56 /* Switch to virtual mapping, trashing only %r1 */
57 .macro virt_map
58 /* pcxt_ssm_bug */
59 rsm PSW_SM_I, %r0 /* barrier for "Relied upon Translation */
60 mtsp %r0, %sr4
61 mtsp %r0, %sr5
62 mtsp %r0, %sr6
63 tovirt_r1 %r29
64 load32 KERNEL_PSW, %r1
65
66 rsm PSW_SM_QUIET,%r0 /* second "heavy weight" ctl op */
67 mtctl %r0, %cr17 /* Clear IIASQ tail */
68 mtctl %r0, %cr17 /* Clear IIASQ head */
69 mtctl %r1, %ipsw
70 load32 4f, %r1
71 mtctl %r1, %cr18 /* Set IIAOQ tail */
72 ldo 4(%r1), %r1
73 mtctl %r1, %cr18 /* Set IIAOQ head */
74 rfir
75 nop
764:
77 .endm
78
79 /*
80 * The "get_stack" macros are responsible for determining the
81 * kernel stack value.
82 *
83 * If sr7 == 0
84 * Already using a kernel stack, so call the
85 * get_stack_use_r30 macro to push a pt_regs structure
86 * on the stack, and store registers there.
87 * else
88 * Need to set up a kernel stack, so call the
89 * get_stack_use_cr30 macro to set up a pointer
90 * to the pt_regs structure contained within the
91 * task pointer pointed to by cr30. Set the stack
92 * pointer to point to the end of the task structure.
93 *
94 * Note that we use shadowed registers for temps until
95 * we can save %r26 and %r29. %r26 is used to preserve
96 * %r8 (a shadowed register) which temporarily contained
97 * either the fault type ("code") or the eirr. We need
98 * to use a non-shadowed register to carry the value over
99 * the rfir in virt_map. We use %r26 since this value winds
100 * up being passed as the argument to either do_cpu_irq_mask
101 * or handle_interruption. %r29 is used to hold a pointer
102 * the register save area, and once again, it needs to
103 * be a non-shadowed register so that it survives the rfir.
104 *
105 * N.B. TASK_SZ_ALGN and PT_SZ_ALGN include space for a stack frame.
106 */
107
108 .macro get_stack_use_cr30
109
110 /* we save the registers in the task struct */
111
112 copy %r30, %r17
113 mfctl %cr30, %r1
114 ldo THREAD_SZ_ALGN(%r1), %r30
115 mtsp %r0,%sr7
116 mtsp %r16,%sr3
117 tophys %r1,%r9
118 LDREG TI_TASK(%r9), %r1 /* thread_info -> task_struct */
119 tophys %r1,%r9
120 ldo TASK_REGS(%r9),%r9
121 STREG %r17,PT_GR30(%r9)
122 STREG %r29,PT_GR29(%r9)
123 STREG %r26,PT_GR26(%r9)
124 STREG %r16,PT_SR7(%r9)
125 copy %r9,%r29
126 .endm
127
128 .macro get_stack_use_r30
129
130 /* we put a struct pt_regs on the stack and save the registers there */
131
132 tophys %r30,%r9
133 copy %r30,%r1
134 ldo PT_SZ_ALGN(%r30),%r30
135 STREG %r1,PT_GR30(%r9)
136 STREG %r29,PT_GR29(%r9)
137 STREG %r26,PT_GR26(%r9)
138 STREG %r16,PT_SR7(%r9)
139 copy %r9,%r29
140 .endm
141
142 .macro rest_stack
143 LDREG PT_GR1(%r29), %r1
144 LDREG PT_GR30(%r29),%r30
145 LDREG PT_GR29(%r29),%r29
146 .endm
147
148 /* default interruption handler
149 * (calls traps.c:handle_interruption) */
150 .macro def code
151 b intr_save
152 ldi \code, %r8
153 .align 32
154 .endm
155
156 /* Interrupt interruption handler
157 * (calls irq.c:do_cpu_irq_mask) */
158 .macro extint code
159 b intr_extint
160 mfsp %sr7,%r16
161 .align 32
162 .endm
163
164 .import os_hpmc, code
165
166 /* HPMC handler */
167 .macro hpmc code
168 nop /* must be a NOP, will be patched later */
169 load32 PA(os_hpmc), %r3
170 bv,n 0(%r3)
171 nop
172 .word 0 /* checksum (will be patched) */
173 .word 0 /* address of handler */
174 .word 0 /* length of handler */
175 .endm
176
177 /*
178 * Performance Note: Instructions will be moved up into
179 * this part of the code later on, once we are sure
180 * that the tlb miss handlers are close to final form.
181 */
182
183 /* Register definitions for tlb miss handler macros */
184
185 va = r8 /* virtual address for which the trap occurred */
186 spc = r24 /* space for which the trap occurred */
187
188#ifndef CONFIG_64BIT
189
190 /*
191 * itlb miss interruption handler (parisc 1.1 - 32 bit)
192 */
193
194 .macro itlb_11 code
195
196 mfctl %pcsq, spc
197 b itlb_miss_11
198 mfctl %pcoq, va
199
200 .align 32
201 .endm
202#endif
203
204 /*
205 * itlb miss interruption handler (parisc 2.0)
206 */
207
208 .macro itlb_20 code
209 mfctl %pcsq, spc
210#ifdef CONFIG_64BIT
211 b itlb_miss_20w
212#else
213 b itlb_miss_20
214#endif
215 mfctl %pcoq, va
216
217 .align 32
218 .endm
219
220#ifndef CONFIG_64BIT
221 /*
222 * naitlb miss interruption handler (parisc 1.1 - 32 bit)
223 */
224
225 .macro naitlb_11 code
226
227 mfctl %isr,spc
228 b naitlb_miss_11
229 mfctl %ior,va
230
231 .align 32
232 .endm
233#endif
234
235 /*
236 * naitlb miss interruption handler (parisc 2.0)
237 */
238
239 .macro naitlb_20 code
240
241 mfctl %isr,spc
242#ifdef CONFIG_64BIT
243 b naitlb_miss_20w
244#else
245 b naitlb_miss_20
246#endif
247 mfctl %ior,va
248
249 .align 32
250 .endm
251
252#ifndef CONFIG_64BIT
253 /*
254 * dtlb miss interruption handler (parisc 1.1 - 32 bit)
255 */
256
257 .macro dtlb_11 code
258
259 mfctl %isr, spc
260 b dtlb_miss_11
261 mfctl %ior, va
262
263 .align 32
264 .endm
265#endif
266
267 /*
268 * dtlb miss interruption handler (parisc 2.0)
269 */
270
271 .macro dtlb_20 code
272
273 mfctl %isr, spc
274#ifdef CONFIG_64BIT
275 b dtlb_miss_20w
276#else
277 b dtlb_miss_20
278#endif
279 mfctl %ior, va
280
281 .align 32
282 .endm
283
284#ifndef CONFIG_64BIT
285 /* nadtlb miss interruption handler (parisc 1.1 - 32 bit) */
286
287 .macro nadtlb_11 code
288
289 mfctl %isr,spc
290 b nadtlb_miss_11
291 mfctl %ior,va
292
293 .align 32
294 .endm
295#endif
296
297 /* nadtlb miss interruption handler (parisc 2.0) */
298
299 .macro nadtlb_20 code
300
301 mfctl %isr,spc
302#ifdef CONFIG_64BIT
303 b nadtlb_miss_20w
304#else
305 b nadtlb_miss_20
306#endif
307 mfctl %ior,va
308
309 .align 32
310 .endm
311
312#ifndef CONFIG_64BIT
313 /*
314 * dirty bit trap interruption handler (parisc 1.1 - 32 bit)
315 */
316
317 .macro dbit_11 code
318
319 mfctl %isr,spc
320 b dbit_trap_11
321 mfctl %ior,va
322
323 .align 32
324 .endm
325#endif
326
327 /*
328 * dirty bit trap interruption handler (parisc 2.0)
329 */
330
331 .macro dbit_20 code
332
333 mfctl %isr,spc
334#ifdef CONFIG_64BIT
335 b dbit_trap_20w
336#else
337 b dbit_trap_20
338#endif
339 mfctl %ior,va
340
341 .align 32
342 .endm
343
344 /* In LP64, the space contains part of the upper 32 bits of the
345 * fault. We have to extract this and place it in the va,
346 * zeroing the corresponding bits in the space register */
347 .macro space_adjust spc,va,tmp
348#ifdef CONFIG_64BIT
349 extrd,u \spc,63,SPACEID_SHIFT,\tmp
350 depd %r0,63,SPACEID_SHIFT,\spc
351 depd \tmp,31,SPACEID_SHIFT,\va
352#endif
353 .endm
354
355 .import swapper_pg_dir,code
356
357 /* Get the pgd. For faults on space zero (kernel space), this
358 * is simply swapper_pg_dir. For user space faults, the
359 * pgd is stored in %cr25 */
360 .macro get_pgd spc,reg
361 ldil L%PA(swapper_pg_dir),\reg
362 ldo R%PA(swapper_pg_dir)(\reg),\reg
363 or,COND(=) %r0,\spc,%r0
364 mfctl %cr25,\reg
365 .endm
366
367 /*
368 space_check(spc,tmp,fault)
369
370 spc - The space we saw the fault with.
371 tmp - The place to store the current space.
372 fault - Function to call on failure.
373
374 Only allow faults on different spaces from the
375 currently active one if we're the kernel
376
377 */
378 .macro space_check spc,tmp,fault
379 mfsp %sr7,\tmp
380 /* check against %r0 which is same value as LINUX_GATEWAY_SPACE */
381 or,COND(<>) %r0,\spc,%r0 /* user may execute gateway page
382 * as kernel, so defeat the space
383 * check if it is */
384 copy \spc,\tmp
385 or,COND(=) %r0,\tmp,%r0 /* nullify if executing as kernel */
386 cmpb,COND(<>),n \tmp,\spc,\fault
387 .endm
388
389 /* Look up a PTE in a 2-Level scheme (faulting at each
390 * level if the entry isn't present
391 *
392 * NOTE: we use ldw even for LP64, since the short pointers
393 * can address up to 1TB
394 */
395 .macro L2_ptep pmd,pte,index,va,fault
396#if CONFIG_PGTABLE_LEVELS == 3
397 extru \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
398#else
399# if defined(CONFIG_64BIT)
400 extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
401 #else
402 # if PAGE_SIZE > 4096
403 extru \va,31-ASM_PGDIR_SHIFT,32-ASM_PGDIR_SHIFT,\index
404 # else
405 extru \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
406 # endif
407# endif
408#endif
409 dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */
410 copy %r0,\pte
411 ldw,s \index(\pmd),\pmd
412 bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault
413 dep %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
414 SHLREG \pmd,PxD_VALUE_SHIFT,\pmd
415 extru \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
416 dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */
417 shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd /* pmd is now pte */
418 .endm
419
420 /* Look up PTE in a 3-Level scheme.
421 *
422 * Here we implement a Hybrid L2/L3 scheme: we allocate the
423 * first pmd adjacent to the pgd. This means that we can
424 * subtract a constant offset to get to it. The pmd and pgd
425 * sizes are arranged so that a single pmd covers 4GB (giving
426 * a full LP64 process access to 8TB) so our lookups are
427 * effectively L2 for the first 4GB of the kernel (i.e. for
428 * all ILP32 processes and all the kernel for machines with
429 * under 4GB of memory) */
430 .macro L3_ptep pgd,pte,index,va,fault
431#if CONFIG_PGTABLE_LEVELS == 3 /* we might have a 2-Level scheme, e.g. with 16kb page size */
432 extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
433 extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
434 ldw,s \index(\pgd),\pgd
435 extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
436 bb,>=,n \pgd,_PxD_PRESENT_BIT,\fault
437 extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
438 shld \pgd,PxD_VALUE_SHIFT,\index
439 extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
440 copy \index,\pgd
441 extrd,u,*<> \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
442 ldo ASM_PGD_PMD_OFFSET(\pgd),\pgd
443#endif
444 L2_ptep \pgd,\pte,\index,\va,\fault
445 .endm
446
447 /* Acquire pa_tlb_lock lock and check page is present. */
448 .macro tlb_lock spc,ptp,pte,tmp,tmp1,fault
449#ifdef CONFIG_SMP
45098: cmpib,COND(=),n 0,\spc,2f
451 load_pa_tlb_lock \tmp
4521: LDCW 0(\tmp),\tmp1
453 cmpib,COND(=) 0,\tmp1,1b
454 nop
455 LDREG 0(\ptp),\pte
456 bb,<,n \pte,_PAGE_PRESENT_BIT,3f
457 LDCW 0(\tmp),\tmp1
458 b \fault
459 stw \spc,0(\tmp)
46099: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
461#endif
4622: LDREG 0(\ptp),\pte
463 bb,>=,n \pte,_PAGE_PRESENT_BIT,\fault
4643:
465 .endm
466
467 /* Release pa_tlb_lock lock without reloading lock address. */
468 .macro tlb_unlock0 spc,tmp,tmp1
469#ifdef CONFIG_SMP
47098: or,COND(=) %r0,\spc,%r0
471 LDCW 0(\tmp),\tmp1
472 or,COND(=) %r0,\spc,%r0
473 stw \spc,0(\tmp)
47499: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
475#endif
476 .endm
477
478 /* Release pa_tlb_lock lock. */
479 .macro tlb_unlock1 spc,tmp,tmp1
480#ifdef CONFIG_SMP
48198: load_pa_tlb_lock \tmp
48299: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
483 tlb_unlock0 \spc,\tmp,\tmp1
484#endif
485 .endm
486
487 /* Set the _PAGE_ACCESSED bit of the PTE. Be clever and
488 * don't needlessly dirty the cache line if it was already set */
489 .macro update_accessed ptp,pte,tmp,tmp1
490 ldi _PAGE_ACCESSED,\tmp1
491 or \tmp1,\pte,\tmp
492 and,COND(<>) \tmp1,\pte,%r0
493 STREG \tmp,0(\ptp)
494 .endm
495
496 /* Set the dirty bit (and accessed bit). No need to be
497 * clever, this is only used from the dirty fault */
498 .macro update_dirty ptp,pte,tmp
499 ldi _PAGE_ACCESSED|_PAGE_DIRTY,\tmp
500 or \tmp,\pte,\pte
501 STREG \pte,0(\ptp)
502 .endm
503
504 /* We have (depending on the page size):
505 * - 38 to 52-bit Physical Page Number
506 * - 12 to 26-bit page offset
507 */
508 /* bitshift difference between a PFN (based on kernel's PAGE_SIZE)
509 * to a CPU TLB 4k PFN (4k => 12 bits to shift) */
510 #define PAGE_ADD_SHIFT (PAGE_SHIFT-12)
511 #define PAGE_ADD_HUGE_SHIFT (REAL_HPAGE_SHIFT-12)
512
513 /* Drop prot bits and convert to page addr for iitlbt and idtlbt */
514 .macro convert_for_tlb_insert20 pte,tmp
515#ifdef CONFIG_HUGETLB_PAGE
516 copy \pte,\tmp
517 extrd,u \tmp,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
518 64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
519
520 depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\
521 (63-58)+PAGE_ADD_SHIFT,\pte
522 extrd,u,*= \tmp,_PAGE_HPAGE_BIT+32,1,%r0
523 depdi _HUGE_PAGE_SIZE_ENCODING_DEFAULT,63,\
524 (63-58)+PAGE_ADD_HUGE_SHIFT,\pte
525#else /* Huge pages disabled */
526 extrd,u \pte,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
527 64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
528 depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\
529 (63-58)+PAGE_ADD_SHIFT,\pte
530#endif
531 .endm
532
533 /* Convert the pte and prot to tlb insertion values. How
534 * this happens is quite subtle, read below */
535 .macro make_insert_tlb spc,pte,prot,tmp
536 space_to_prot \spc \prot /* create prot id from space */
537 /* The following is the real subtlety. This is depositing
538 * T <-> _PAGE_REFTRAP
539 * D <-> _PAGE_DIRTY
540 * B <-> _PAGE_DMB (memory break)
541 *
542 * Then incredible subtlety: The access rights are
543 * _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE
544 * See 3-14 of the parisc 2.0 manual
545 *
546 * Finally, _PAGE_READ goes in the top bit of PL1 (so we
547 * trigger an access rights trap in user space if the user
548 * tries to read an unreadable page */
549 depd \pte,8,7,\prot
550
551 /* PAGE_USER indicates the page can be read with user privileges,
552 * so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1
553 * contains _PAGE_READ) */
554 extrd,u,*= \pte,_PAGE_USER_BIT+32,1,%r0
555 depdi 7,11,3,\prot
556 /* If we're a gateway page, drop PL2 back to zero for promotion
557 * to kernel privilege (so we can execute the page as kernel).
558 * Any privilege promotion page always denys read and write */
559 extrd,u,*= \pte,_PAGE_GATEWAY_BIT+32,1,%r0
560 depd %r0,11,2,\prot /* If Gateway, Set PL2 to 0 */
561
562 /* Enforce uncacheable pages.
563 * This should ONLY be use for MMIO on PA 2.0 machines.
564 * Memory/DMA is cache coherent on all PA2.0 machines we support
565 * (that means T-class is NOT supported) and the memory controllers
566 * on most of those machines only handles cache transactions.
567 */
568 extrd,u,*= \pte,_PAGE_NO_CACHE_BIT+32,1,%r0
569 depdi 1,12,1,\prot
570
571 /* Drop prot bits and convert to page addr for iitlbt and idtlbt */
572 convert_for_tlb_insert20 \pte \tmp
573 .endm
574
575 /* Identical macro to make_insert_tlb above, except it
576 * makes the tlb entry for the differently formatted pa11
577 * insertion instructions */
578 .macro make_insert_tlb_11 spc,pte,prot
579 zdep \spc,30,15,\prot
580 dep \pte,8,7,\prot
581 extru,= \pte,_PAGE_NO_CACHE_BIT,1,%r0
582 depi 1,12,1,\prot
583 extru,= \pte,_PAGE_USER_BIT,1,%r0
584 depi 7,11,3,\prot /* Set for user space (1 rsvd for read) */
585 extru,= \pte,_PAGE_GATEWAY_BIT,1,%r0
586 depi 0,11,2,\prot /* If Gateway, Set PL2 to 0 */
587
588 /* Get rid of prot bits and convert to page addr for iitlba */
589
590 depi 0,31,ASM_PFN_PTE_SHIFT,\pte
591 SHRREG \pte,(ASM_PFN_PTE_SHIFT-(31-26)),\pte
592 .endm
593
594 /* This is for ILP32 PA2.0 only. The TLB insertion needs
595 * to extend into I/O space if the address is 0xfXXXXXXX
596 * so we extend the f's into the top word of the pte in
597 * this case */
598 .macro f_extend pte,tmp
599 extrd,s \pte,42,4,\tmp
600 addi,<> 1,\tmp,%r0
601 extrd,s \pte,63,25,\pte
602 .endm
603
604 /* The alias region is an 8MB aligned 16MB to do clear and
605 * copy user pages at addresses congruent with the user
606 * virtual address.
607 *
608 * To use the alias page, you set %r26 up with the to TLB
609 * entry (identifying the physical page) and %r23 up with
610 * the from tlb entry (or nothing if only a to entry---for
611 * clear_user_page_asm) */
612 .macro do_alias spc,tmp,tmp1,va,pte,prot,fault,patype
613 cmpib,COND(<>),n 0,\spc,\fault
614 ldil L%(TMPALIAS_MAP_START),\tmp
615#if defined(CONFIG_64BIT) && (TMPALIAS_MAP_START >= 0x80000000)
616 /* on LP64, ldi will sign extend into the upper 32 bits,
617 * which is behaviour we don't want */
618 depdi 0,31,32,\tmp
619#endif
620 copy \va,\tmp1
621 depi 0,31,23,\tmp1
622 cmpb,COND(<>),n \tmp,\tmp1,\fault
623 mfctl %cr19,\tmp /* iir */
624 /* get the opcode (first six bits) into \tmp */
625 extrw,u \tmp,5,6,\tmp
626 /*
627 * Only setting the T bit prevents data cache movein
628 * Setting access rights to zero prevents instruction cache movein
629 *
630 * Note subtlety here: _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE go
631 * to type field and _PAGE_READ goes to top bit of PL1
632 */
633 ldi (_PAGE_REFTRAP|_PAGE_READ|_PAGE_WRITE),\prot
634 /*
635 * so if the opcode is one (i.e. this is a memory management
636 * instruction) nullify the next load so \prot is only T.
637 * Otherwise this is a normal data operation
638 */
639 cmpiclr,= 0x01,\tmp,%r0
640 ldi (_PAGE_DIRTY|_PAGE_READ|_PAGE_WRITE),\prot
641.ifc \patype,20
642 depd,z \prot,8,7,\prot
643.else
644.ifc \patype,11
645 depw,z \prot,8,7,\prot
646.else
647 .error "undefined PA type to do_alias"
648.endif
649.endif
650 /*
651 * OK, it is in the temp alias region, check whether "from" or "to".
652 * Check "subtle" note in pacache.S re: r23/r26.
653 */
654#ifdef CONFIG_64BIT
655 extrd,u,*= \va,41,1,%r0
656#else
657 extrw,u,= \va,9,1,%r0
658#endif
659 or,COND(tr) %r23,%r0,\pte
660 or %r26,%r0,\pte
661 .endm
662
663
664 /*
665 * Fault_vectors are architecturally required to be aligned on a 2K
666 * boundary
667 */
668
669 .section .text.hot
670 .align 2048
671
672ENTRY(fault_vector_20)
673 /* First vector is invalid (0) */
674 .ascii "cows can fly"
675 .byte 0
676 .align 32
677
678 hpmc 1
679 def 2
680 def 3
681 extint 4
682 def 5
683 itlb_20 PARISC_ITLB_TRAP
684 def 7
685 def 8
686 def 9
687 def 10
688 def 11
689 def 12
690 def 13
691 def 14
692 dtlb_20 15
693 naitlb_20 16
694 nadtlb_20 17
695 def 18
696 def 19
697 dbit_20 20
698 def 21
699 def 22
700 def 23
701 def 24
702 def 25
703 def 26
704 def 27
705 def 28
706 def 29
707 def 30
708 def 31
709END(fault_vector_20)
710
711#ifndef CONFIG_64BIT
712
713 .align 2048
714
715ENTRY(fault_vector_11)
716 /* First vector is invalid (0) */
717 .ascii "cows can fly"
718 .byte 0
719 .align 32
720
721 hpmc 1
722 def 2
723 def 3
724 extint 4
725 def 5
726 itlb_11 PARISC_ITLB_TRAP
727 def 7
728 def 8
729 def 9
730 def 10
731 def 11
732 def 12
733 def 13
734 def 14
735 dtlb_11 15
736 naitlb_11 16
737 nadtlb_11 17
738 def 18
739 def 19
740 dbit_11 20
741 def 21
742 def 22
743 def 23
744 def 24
745 def 25
746 def 26
747 def 27
748 def 28
749 def 29
750 def 30
751 def 31
752END(fault_vector_11)
753
754#endif
755 /* Fault vector is separately protected and *must* be on its own page */
756 .align PAGE_SIZE
757
758 .import handle_interruption,code
759 .import do_cpu_irq_mask,code
760
761 /*
762 * Child Returns here
763 *
764 * copy_thread moved args into task save area.
765 */
766
767ENTRY(ret_from_kernel_thread)
768 /* Call schedule_tail first though */
769 BL schedule_tail, %r2
770 nop
771
772 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
773 LDREG TASK_PT_GR25(%r1), %r26
774#ifdef CONFIG_64BIT
775 LDREG TASK_PT_GR27(%r1), %r27
776#endif
777 LDREG TASK_PT_GR26(%r1), %r1
778 ble 0(%sr7, %r1)
779 copy %r31, %r2
780 b finish_child_return
781 nop
782END(ret_from_kernel_thread)
783
784
785 /*
786 * struct task_struct *_switch_to(struct task_struct *prev,
787 * struct task_struct *next)
788 *
789 * switch kernel stacks and return prev */
790ENTRY_CFI(_switch_to)
791 STREG %r2, -RP_OFFSET(%r30)
792
793 callee_save_float
794 callee_save
795
796 load32 _switch_to_ret, %r2
797
798 STREG %r2, TASK_PT_KPC(%r26)
799 LDREG TASK_PT_KPC(%r25), %r2
800
801 STREG %r30, TASK_PT_KSP(%r26)
802 LDREG TASK_PT_KSP(%r25), %r30
803 LDREG TASK_THREAD_INFO(%r25), %r25
804 bv %r0(%r2)
805 mtctl %r25,%cr30
806
807ENTRY(_switch_to_ret)
808 mtctl %r0, %cr0 /* Needed for single stepping */
809 callee_rest
810 callee_rest_float
811
812 LDREG -RP_OFFSET(%r30), %r2
813 bv %r0(%r2)
814 copy %r26, %r28
815ENDPROC_CFI(_switch_to)
816
817 /*
818 * Common rfi return path for interruptions, kernel execve, and
819 * sys_rt_sigreturn (sometimes). The sys_rt_sigreturn syscall will
820 * return via this path if the signal was received when the process
821 * was running; if the process was blocked on a syscall then the
822 * normal syscall_exit path is used. All syscalls for traced
823 * proceses exit via intr_restore.
824 *
825 * XXX If any syscalls that change a processes space id ever exit
826 * this way, then we will need to copy %sr3 in to PT_SR[3..7], and
827 * adjust IASQ[0..1].
828 *
829 */
830
831 .align PAGE_SIZE
832
833ENTRY_CFI(syscall_exit_rfi)
834 mfctl %cr30,%r16
835 LDREG TI_TASK(%r16), %r16 /* thread_info -> task_struct */
836 ldo TASK_REGS(%r16),%r16
837 /* Force iaoq to userspace, as the user has had access to our current
838 * context via sigcontext. Also Filter the PSW for the same reason.
839 */
840 LDREG PT_IAOQ0(%r16),%r19
841 depi 3,31,2,%r19
842 STREG %r19,PT_IAOQ0(%r16)
843 LDREG PT_IAOQ1(%r16),%r19
844 depi 3,31,2,%r19
845 STREG %r19,PT_IAOQ1(%r16)
846 LDREG PT_PSW(%r16),%r19
847 load32 USER_PSW_MASK,%r1
848#ifdef CONFIG_64BIT
849 load32 USER_PSW_HI_MASK,%r20
850 depd %r20,31,32,%r1
851#endif
852 and %r19,%r1,%r19 /* Mask out bits that user shouldn't play with */
853 load32 USER_PSW,%r1
854 or %r19,%r1,%r19 /* Make sure default USER_PSW bits are set */
855 STREG %r19,PT_PSW(%r16)
856
857 /*
858 * If we aren't being traced, we never saved space registers
859 * (we don't store them in the sigcontext), so set them
860 * to "proper" values now (otherwise we'll wind up restoring
861 * whatever was last stored in the task structure, which might
862 * be inconsistent if an interrupt occurred while on the gateway
863 * page). Note that we may be "trashing" values the user put in
864 * them, but we don't support the user changing them.
865 */
866
867 STREG %r0,PT_SR2(%r16)
868 mfsp %sr3,%r19
869 STREG %r19,PT_SR0(%r16)
870 STREG %r19,PT_SR1(%r16)
871 STREG %r19,PT_SR3(%r16)
872 STREG %r19,PT_SR4(%r16)
873 STREG %r19,PT_SR5(%r16)
874 STREG %r19,PT_SR6(%r16)
875 STREG %r19,PT_SR7(%r16)
876
877ENTRY(intr_return)
878 /* check for reschedule */
879 mfctl %cr30,%r1
880 LDREG TI_FLAGS(%r1),%r19 /* sched.h: TIF_NEED_RESCHED */
881 bb,<,n %r19,31-TIF_NEED_RESCHED,intr_do_resched /* forward */
882
883 .import do_notify_resume,code
884intr_check_sig:
885 /* As above */
886 mfctl %cr30,%r1
887 LDREG TI_FLAGS(%r1),%r19
888 ldi (_TIF_SIGPENDING|_TIF_NOTIFY_RESUME), %r20
889 and,COND(<>) %r19, %r20, %r0
890 b,n intr_restore /* skip past if we've nothing to do */
891
892 /* This check is critical to having LWS
893 * working. The IASQ is zero on the gateway
894 * page and we cannot deliver any signals until
895 * we get off the gateway page.
896 *
897 * Only do signals if we are returning to user space
898 */
899 LDREG PT_IASQ0(%r16), %r20
900 cmpib,COND(=),n LINUX_GATEWAY_SPACE, %r20, intr_restore /* backward */
901 LDREG PT_IASQ1(%r16), %r20
902 cmpib,COND(=),n LINUX_GATEWAY_SPACE, %r20, intr_restore /* backward */
903
904 /* NOTE: We need to enable interrupts if we have to deliver
905 * signals. We used to do this earlier but it caused kernel
906 * stack overflows. */
907 ssm PSW_SM_I, %r0
908
909 copy %r0, %r25 /* long in_syscall = 0 */
910#ifdef CONFIG_64BIT
911 ldo -16(%r30),%r29 /* Reference param save area */
912#endif
913
914 BL do_notify_resume,%r2
915 copy %r16, %r26 /* struct pt_regs *regs */
916
917 b,n intr_check_sig
918
919intr_restore:
920 copy %r16,%r29
921 ldo PT_FR31(%r29),%r1
922 rest_fp %r1
923 rest_general %r29
924
925 /* inverse of virt_map */
926 pcxt_ssm_bug
927 rsm PSW_SM_QUIET,%r0 /* prepare for rfi */
928 tophys_r1 %r29
929
930 /* Restore space id's and special cr's from PT_REGS
931 * structure pointed to by r29
932 */
933 rest_specials %r29
934
935 /* IMPORTANT: rest_stack restores r29 last (we are using it)!
936 * It also restores r1 and r30.
937 */
938 rest_stack
939
940 rfi
941 nop
942
943#ifndef CONFIG_PREEMPT
944# define intr_do_preempt intr_restore
945#endif /* !CONFIG_PREEMPT */
946
947 .import schedule,code
948intr_do_resched:
949 /* Only call schedule on return to userspace. If we're returning
950 * to kernel space, we may schedule if CONFIG_PREEMPT, otherwise
951 * we jump back to intr_restore.
952 */
953 LDREG PT_IASQ0(%r16), %r20
954 cmpib,COND(=) 0, %r20, intr_do_preempt
955 nop
956 LDREG PT_IASQ1(%r16), %r20
957 cmpib,COND(=) 0, %r20, intr_do_preempt
958 nop
959
960 /* NOTE: We need to enable interrupts if we schedule. We used
961 * to do this earlier but it caused kernel stack overflows. */
962 ssm PSW_SM_I, %r0
963
964#ifdef CONFIG_64BIT
965 ldo -16(%r30),%r29 /* Reference param save area */
966#endif
967
968 ldil L%intr_check_sig, %r2
969#ifndef CONFIG_64BIT
970 b schedule
971#else
972 load32 schedule, %r20
973 bv %r0(%r20)
974#endif
975 ldo R%intr_check_sig(%r2), %r2
976
977 /* preempt the current task on returning to kernel
978 * mode from an interrupt, iff need_resched is set,
979 * and preempt_count is 0. otherwise, we continue on
980 * our merry way back to the current running task.
981 */
982#ifdef CONFIG_PREEMPT
983 .import preempt_schedule_irq,code
984intr_do_preempt:
985 rsm PSW_SM_I, %r0 /* disable interrupts */
986
987 /* current_thread_info()->preempt_count */
988 mfctl %cr30, %r1
989 LDREG TI_PRE_COUNT(%r1), %r19
990 cmpib,COND(<>) 0, %r19, intr_restore /* if preempt_count > 0 */
991 nop /* prev insn branched backwards */
992
993 /* check if we interrupted a critical path */
994 LDREG PT_PSW(%r16), %r20
995 bb,<,n %r20, 31 - PSW_SM_I, intr_restore
996 nop
997
998 BL preempt_schedule_irq, %r2
999 nop
1000
1001 b,n intr_restore /* ssm PSW_SM_I done by intr_restore */
1002#endif /* CONFIG_PREEMPT */
1003
1004 /*
1005 * External interrupts.
1006 */
1007
1008intr_extint:
1009 cmpib,COND(=),n 0,%r16,1f
1010
1011 get_stack_use_cr30
1012 b,n 2f
1013
10141:
1015 get_stack_use_r30
10162:
1017 save_specials %r29
1018 virt_map
1019 save_general %r29
1020
1021 ldo PT_FR0(%r29), %r24
1022 save_fp %r24
1023
1024 loadgp
1025
1026 copy %r29, %r26 /* arg0 is pt_regs */
1027 copy %r29, %r16 /* save pt_regs */
1028
1029 ldil L%intr_return, %r2
1030
1031#ifdef CONFIG_64BIT
1032 ldo -16(%r30),%r29 /* Reference param save area */
1033#endif
1034
1035 b do_cpu_irq_mask
1036 ldo R%intr_return(%r2), %r2 /* return to intr_return, not here */
1037ENDPROC_CFI(syscall_exit_rfi)
1038
1039
1040 /* Generic interruptions (illegal insn, unaligned, page fault, etc) */
1041
1042ENTRY_CFI(intr_save) /* for os_hpmc */
1043 mfsp %sr7,%r16
1044 cmpib,COND(=),n 0,%r16,1f
1045 get_stack_use_cr30
1046 b 2f
1047 copy %r8,%r26
1048
10491:
1050 get_stack_use_r30
1051 copy %r8,%r26
1052
10532:
1054 save_specials %r29
1055
1056 /* If this trap is a itlb miss, skip saving/adjusting isr/ior */
1057 cmpib,COND(=),n PARISC_ITLB_TRAP,%r26,skip_save_ior
1058
1059
1060 mfctl %isr, %r16
1061 nop /* serialize mfctl on PA 2.0 to avoid 4 cycle penalty */
1062 mfctl %ior, %r17
1063
1064
1065#ifdef CONFIG_64BIT
1066 /*
1067 * If the interrupted code was running with W bit off (32 bit),
1068 * clear the b bits (bits 0 & 1) in the ior.
1069 * save_specials left ipsw value in r8 for us to test.
1070 */
1071 extrd,u,*<> %r8,PSW_W_BIT,1,%r0
1072 depdi 0,1,2,%r17
1073
1074 /* adjust isr/ior: get high bits from isr and deposit in ior */
1075 space_adjust %r16,%r17,%r1
1076#endif
1077 STREG %r16, PT_ISR(%r29)
1078 STREG %r17, PT_IOR(%r29)
1079
1080#if 0 && defined(CONFIG_64BIT)
1081 /* Revisit when we have 64-bit code above 4Gb */
1082 b,n intr_save2
1083
1084skip_save_ior:
1085 /* We have a itlb miss, and when executing code above 4 Gb on ILP64, we
1086 * need to adjust iasq/iaoq here in the same way we adjusted isr/ior
1087 * above.
1088 */
1089 extrd,u,* %r8,PSW_W_BIT,1,%r1
1090 cmpib,COND(=),n 1,%r1,intr_save2
1091 LDREG PT_IASQ0(%r29), %r16
1092 LDREG PT_IAOQ0(%r29), %r17
1093 /* adjust iasq/iaoq */
1094 space_adjust %r16,%r17,%r1
1095 STREG %r16, PT_IASQ0(%r29)
1096 STREG %r17, PT_IAOQ0(%r29)
1097#else
1098skip_save_ior:
1099#endif
1100
1101intr_save2:
1102 virt_map
1103 save_general %r29
1104
1105 ldo PT_FR0(%r29), %r25
1106 save_fp %r25
1107
1108 loadgp
1109
1110 copy %r29, %r25 /* arg1 is pt_regs */
1111#ifdef CONFIG_64BIT
1112 ldo -16(%r30),%r29 /* Reference param save area */
1113#endif
1114
1115 ldil L%intr_check_sig, %r2
1116 copy %r25, %r16 /* save pt_regs */
1117
1118 b handle_interruption
1119 ldo R%intr_check_sig(%r2), %r2
1120ENDPROC_CFI(intr_save)
1121
1122
1123 /*
1124 * Note for all tlb miss handlers:
1125 *
1126 * cr24 contains a pointer to the kernel address space
1127 * page directory.
1128 *
1129 * cr25 contains a pointer to the current user address
1130 * space page directory.
1131 *
1132 * sr3 will contain the space id of the user address space
1133 * of the current running thread while that thread is
1134 * running in the kernel.
1135 */
1136
1137 /*
1138 * register number allocations. Note that these are all
1139 * in the shadowed registers
1140 */
1141
1142 t0 = r1 /* temporary register 0 */
1143 va = r8 /* virtual address for which the trap occurred */
1144 t1 = r9 /* temporary register 1 */
1145 pte = r16 /* pte/phys page # */
1146 prot = r17 /* prot bits */
1147 spc = r24 /* space for which the trap occurred */
1148 ptp = r25 /* page directory/page table pointer */
1149
1150#ifdef CONFIG_64BIT
1151
1152dtlb_miss_20w:
1153 space_adjust spc,va,t0
1154 get_pgd spc,ptp
1155 space_check spc,t0,dtlb_fault
1156
1157 L3_ptep ptp,pte,t0,va,dtlb_check_alias_20w
1158
1159 tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20w
1160 update_accessed ptp,pte,t0,t1
1161
1162 make_insert_tlb spc,pte,prot,t1
1163
1164 idtlbt pte,prot
1165
1166 tlb_unlock1 spc,t0,t1
1167 rfir
1168 nop
1169
1170dtlb_check_alias_20w:
1171 do_alias spc,t0,t1,va,pte,prot,dtlb_fault,20
1172
1173 idtlbt pte,prot
1174
1175 rfir
1176 nop
1177
1178nadtlb_miss_20w:
1179 space_adjust spc,va,t0
1180 get_pgd spc,ptp
1181 space_check spc,t0,nadtlb_fault
1182
1183 L3_ptep ptp,pte,t0,va,nadtlb_check_alias_20w
1184
1185 tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20w
1186 update_accessed ptp,pte,t0,t1
1187
1188 make_insert_tlb spc,pte,prot,t1
1189
1190 idtlbt pte,prot
1191
1192 tlb_unlock1 spc,t0,t1
1193 rfir
1194 nop
1195
1196nadtlb_check_alias_20w:
1197 do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate,20
1198
1199 idtlbt pte,prot
1200
1201 rfir
1202 nop
1203
1204#else
1205
1206dtlb_miss_11:
1207 get_pgd spc,ptp
1208
1209 space_check spc,t0,dtlb_fault
1210
1211 L2_ptep ptp,pte,t0,va,dtlb_check_alias_11
1212
1213 tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_11
1214 update_accessed ptp,pte,t0,t1
1215
1216 make_insert_tlb_11 spc,pte,prot
1217
1218 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
1219 mtsp spc,%sr1
1220
1221 idtlba pte,(%sr1,va)
1222 idtlbp prot,(%sr1,va)
1223
1224 mtsp t1, %sr1 /* Restore sr1 */
1225
1226 tlb_unlock1 spc,t0,t1
1227 rfir
1228 nop
1229
1230dtlb_check_alias_11:
1231 do_alias spc,t0,t1,va,pte,prot,dtlb_fault,11
1232
1233 idtlba pte,(va)
1234 idtlbp prot,(va)
1235
1236 rfir
1237 nop
1238
1239nadtlb_miss_11:
1240 get_pgd spc,ptp
1241
1242 space_check spc,t0,nadtlb_fault
1243
1244 L2_ptep ptp,pte,t0,va,nadtlb_check_alias_11
1245
1246 tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_11
1247 update_accessed ptp,pte,t0,t1
1248
1249 make_insert_tlb_11 spc,pte,prot
1250
1251 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
1252 mtsp spc,%sr1
1253
1254 idtlba pte,(%sr1,va)
1255 idtlbp prot,(%sr1,va)
1256
1257 mtsp t1, %sr1 /* Restore sr1 */
1258
1259 tlb_unlock1 spc,t0,t1
1260 rfir
1261 nop
1262
1263nadtlb_check_alias_11:
1264 do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate,11
1265
1266 idtlba pte,(va)
1267 idtlbp prot,(va)
1268
1269 rfir
1270 nop
1271
1272dtlb_miss_20:
1273 space_adjust spc,va,t0
1274 get_pgd spc,ptp
1275 space_check spc,t0,dtlb_fault
1276
1277 L2_ptep ptp,pte,t0,va,dtlb_check_alias_20
1278
1279 tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20
1280 update_accessed ptp,pte,t0,t1
1281
1282 make_insert_tlb spc,pte,prot,t1
1283
1284 f_extend pte,t1
1285
1286 idtlbt pte,prot
1287
1288 tlb_unlock1 spc,t0,t1
1289 rfir
1290 nop
1291
1292dtlb_check_alias_20:
1293 do_alias spc,t0,t1,va,pte,prot,dtlb_fault,20
1294
1295 idtlbt pte,prot
1296
1297 rfir
1298 nop
1299
1300nadtlb_miss_20:
1301 get_pgd spc,ptp
1302
1303 space_check spc,t0,nadtlb_fault
1304
1305 L2_ptep ptp,pte,t0,va,nadtlb_check_alias_20
1306
1307 tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20
1308 update_accessed ptp,pte,t0,t1
1309
1310 make_insert_tlb spc,pte,prot,t1
1311
1312 f_extend pte,t1
1313
1314 idtlbt pte,prot
1315
1316 tlb_unlock1 spc,t0,t1
1317 rfir
1318 nop
1319
1320nadtlb_check_alias_20:
1321 do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate,20
1322
1323 idtlbt pte,prot
1324
1325 rfir
1326 nop
1327
1328#endif
1329
1330nadtlb_emulate:
1331
1332 /*
1333 * Non access misses can be caused by fdc,fic,pdc,lpa,probe and
1334 * probei instructions. We don't want to fault for these
1335 * instructions (not only does it not make sense, it can cause
1336 * deadlocks, since some flushes are done with the mmap
1337 * semaphore held). If the translation doesn't exist, we can't
1338 * insert a translation, so have to emulate the side effects
1339 * of the instruction. Since we don't insert a translation
1340 * we can get a lot of faults during a flush loop, so it makes
1341 * sense to try to do it here with minimum overhead. We only
1342 * emulate fdc,fic,pdc,probew,prober instructions whose base
1343 * and index registers are not shadowed. We defer everything
1344 * else to the "slow" path.
1345 */
1346
1347 mfctl %cr19,%r9 /* Get iir */
1348
1349 /* PA 2.0 Arch Ref. Book pg 382 has a good description of the insn bits.
1350 Checks for fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw */
1351
1352 /* Checks for fdc,fdce,pdc,"fic,4f" only */
1353 ldi 0x280,%r16
1354 and %r9,%r16,%r17
1355 cmpb,<>,n %r16,%r17,nadtlb_probe_check
1356 bb,>=,n %r9,26,nadtlb_nullify /* m bit not set, just nullify */
1357 BL get_register,%r25
1358 extrw,u %r9,15,5,%r8 /* Get index register # */
1359 cmpib,COND(=),n -1,%r1,nadtlb_fault /* have to use slow path */
1360 copy %r1,%r24
1361 BL get_register,%r25
1362 extrw,u %r9,10,5,%r8 /* Get base register # */
1363 cmpib,COND(=),n -1,%r1,nadtlb_fault /* have to use slow path */
1364 BL set_register,%r25
1365 add,l %r1,%r24,%r1 /* doesn't affect c/b bits */
1366
1367nadtlb_nullify:
1368 mfctl %ipsw,%r8
1369 ldil L%PSW_N,%r9
1370 or %r8,%r9,%r8 /* Set PSW_N */
1371 mtctl %r8,%ipsw
1372
1373 rfir
1374 nop
1375
1376 /*
1377 When there is no translation for the probe address then we
1378 must nullify the insn and return zero in the target register.
1379 This will indicate to the calling code that it does not have
1380 write/read privileges to this address.
1381
1382 This should technically work for prober and probew in PA 1.1,
1383 and also probe,r and probe,w in PA 2.0
1384
1385 WARNING: USE ONLY NON-SHADOW REGISTERS WITH PROBE INSN!
1386 THE SLOW-PATH EMULATION HAS NOT BEEN WRITTEN YET.
1387
1388 */
1389nadtlb_probe_check:
1390 ldi 0x80,%r16
1391 and %r9,%r16,%r17
1392 cmpb,<>,n %r16,%r17,nadtlb_fault /* Must be probe,[rw]*/
1393 BL get_register,%r25 /* Find the target register */
1394 extrw,u %r9,31,5,%r8 /* Get target register */
1395 cmpib,COND(=),n -1,%r1,nadtlb_fault /* have to use slow path */
1396 BL set_register,%r25
1397 copy %r0,%r1 /* Write zero to target register */
1398 b nadtlb_nullify /* Nullify return insn */
1399 nop
1400
1401
1402#ifdef CONFIG_64BIT
1403itlb_miss_20w:
1404
1405 /*
1406 * I miss is a little different, since we allow users to fault
1407 * on the gateway page which is in the kernel address space.
1408 */
1409
1410 space_adjust spc,va,t0
1411 get_pgd spc,ptp
1412 space_check spc,t0,itlb_fault
1413
1414 L3_ptep ptp,pte,t0,va,itlb_fault
1415
1416 tlb_lock spc,ptp,pte,t0,t1,itlb_fault
1417 update_accessed ptp,pte,t0,t1
1418
1419 make_insert_tlb spc,pte,prot,t1
1420
1421 iitlbt pte,prot
1422
1423 tlb_unlock1 spc,t0,t1
1424 rfir
1425 nop
1426
1427naitlb_miss_20w:
1428
1429 /*
1430 * I miss is a little different, since we allow users to fault
1431 * on the gateway page which is in the kernel address space.
1432 */
1433
1434 space_adjust spc,va,t0
1435 get_pgd spc,ptp
1436 space_check spc,t0,naitlb_fault
1437
1438 L3_ptep ptp,pte,t0,va,naitlb_check_alias_20w
1439
1440 tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20w
1441 update_accessed ptp,pte,t0,t1
1442
1443 make_insert_tlb spc,pte,prot,t1
1444
1445 iitlbt pte,prot
1446
1447 tlb_unlock1 spc,t0,t1
1448 rfir
1449 nop
1450
1451naitlb_check_alias_20w:
1452 do_alias spc,t0,t1,va,pte,prot,naitlb_fault,20
1453
1454 iitlbt pte,prot
1455
1456 rfir
1457 nop
1458
1459#else
1460
1461itlb_miss_11:
1462 get_pgd spc,ptp
1463
1464 space_check spc,t0,itlb_fault
1465
1466 L2_ptep ptp,pte,t0,va,itlb_fault
1467
1468 tlb_lock spc,ptp,pte,t0,t1,itlb_fault
1469 update_accessed ptp,pte,t0,t1
1470
1471 make_insert_tlb_11 spc,pte,prot
1472
1473 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
1474 mtsp spc,%sr1
1475
1476 iitlba pte,(%sr1,va)
1477 iitlbp prot,(%sr1,va)
1478
1479 mtsp t1, %sr1 /* Restore sr1 */
1480
1481 tlb_unlock1 spc,t0,t1
1482 rfir
1483 nop
1484
1485naitlb_miss_11:
1486 get_pgd spc,ptp
1487
1488 space_check spc,t0,naitlb_fault
1489
1490 L2_ptep ptp,pte,t0,va,naitlb_check_alias_11
1491
1492 tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_11
1493 update_accessed ptp,pte,t0,t1
1494
1495 make_insert_tlb_11 spc,pte,prot
1496
1497 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
1498 mtsp spc,%sr1
1499
1500 iitlba pte,(%sr1,va)
1501 iitlbp prot,(%sr1,va)
1502
1503 mtsp t1, %sr1 /* Restore sr1 */
1504
1505 tlb_unlock1 spc,t0,t1
1506 rfir
1507 nop
1508
1509naitlb_check_alias_11:
1510 do_alias spc,t0,t1,va,pte,prot,itlb_fault,11
1511
1512 iitlba pte,(%sr0, va)
1513 iitlbp prot,(%sr0, va)
1514
1515 rfir
1516 nop
1517
1518
1519itlb_miss_20:
1520 get_pgd spc,ptp
1521
1522 space_check spc,t0,itlb_fault
1523
1524 L2_ptep ptp,pte,t0,va,itlb_fault
1525
1526 tlb_lock spc,ptp,pte,t0,t1,itlb_fault
1527 update_accessed ptp,pte,t0,t1
1528
1529 make_insert_tlb spc,pte,prot,t1
1530
1531 f_extend pte,t1
1532
1533 iitlbt pte,prot
1534
1535 tlb_unlock1 spc,t0,t1
1536 rfir
1537 nop
1538
1539naitlb_miss_20:
1540 get_pgd spc,ptp
1541
1542 space_check spc,t0,naitlb_fault
1543
1544 L2_ptep ptp,pte,t0,va,naitlb_check_alias_20
1545
1546 tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20
1547 update_accessed ptp,pte,t0,t1
1548
1549 make_insert_tlb spc,pte,prot,t1
1550
1551 f_extend pte,t1
1552
1553 iitlbt pte,prot
1554
1555 tlb_unlock1 spc,t0,t1
1556 rfir
1557 nop
1558
1559naitlb_check_alias_20:
1560 do_alias spc,t0,t1,va,pte,prot,naitlb_fault,20
1561
1562 iitlbt pte,prot
1563
1564 rfir
1565 nop
1566
1567#endif
1568
1569#ifdef CONFIG_64BIT
1570
1571dbit_trap_20w:
1572 space_adjust spc,va,t0
1573 get_pgd spc,ptp
1574 space_check spc,t0,dbit_fault
1575
1576 L3_ptep ptp,pte,t0,va,dbit_fault
1577
1578 tlb_lock spc,ptp,pte,t0,t1,dbit_fault
1579 update_dirty ptp,pte,t1
1580
1581 make_insert_tlb spc,pte,prot,t1
1582
1583 idtlbt pte,prot
1584
1585 tlb_unlock0 spc,t0,t1
1586 rfir
1587 nop
1588#else
1589
1590dbit_trap_11:
1591
1592 get_pgd spc,ptp
1593
1594 space_check spc,t0,dbit_fault
1595
1596 L2_ptep ptp,pte,t0,va,dbit_fault
1597
1598 tlb_lock spc,ptp,pte,t0,t1,dbit_fault
1599 update_dirty ptp,pte,t1
1600
1601 make_insert_tlb_11 spc,pte,prot
1602
1603 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
1604 mtsp spc,%sr1
1605
1606 idtlba pte,(%sr1,va)
1607 idtlbp prot,(%sr1,va)
1608
1609 mtsp t1, %sr1 /* Restore sr1 */
1610
1611 tlb_unlock0 spc,t0,t1
1612 rfir
1613 nop
1614
1615dbit_trap_20:
1616 get_pgd spc,ptp
1617
1618 space_check spc,t0,dbit_fault
1619
1620 L2_ptep ptp,pte,t0,va,dbit_fault
1621
1622 tlb_lock spc,ptp,pte,t0,t1,dbit_fault
1623 update_dirty ptp,pte,t1
1624
1625 make_insert_tlb spc,pte,prot,t1
1626
1627 f_extend pte,t1
1628
1629 idtlbt pte,prot
1630
1631 tlb_unlock0 spc,t0,t1
1632 rfir
1633 nop
1634#endif
1635
1636 .import handle_interruption,code
1637
1638kernel_bad_space:
1639 b intr_save
1640 ldi 31,%r8 /* Use an unused code */
1641
1642dbit_fault:
1643 b intr_save
1644 ldi 20,%r8
1645
1646itlb_fault:
1647 b intr_save
1648 ldi PARISC_ITLB_TRAP,%r8
1649
1650nadtlb_fault:
1651 b intr_save
1652 ldi 17,%r8
1653
1654naitlb_fault:
1655 b intr_save
1656 ldi 16,%r8
1657
1658dtlb_fault:
1659 b intr_save
1660 ldi 15,%r8
1661
1662 /* Register saving semantics for system calls:
1663
1664 %r1 clobbered by system call macro in userspace
1665 %r2 saved in PT_REGS by gateway page
1666 %r3 - %r18 preserved by C code (saved by signal code)
1667 %r19 - %r20 saved in PT_REGS by gateway page
1668 %r21 - %r22 non-standard syscall args
1669 stored in kernel stack by gateway page
1670 %r23 - %r26 arg3-arg0, saved in PT_REGS by gateway page
1671 %r27 - %r30 saved in PT_REGS by gateway page
1672 %r31 syscall return pointer
1673 */
1674
1675 /* Floating point registers (FIXME: what do we do with these?)
1676
1677 %fr0 - %fr3 status/exception, not preserved
1678 %fr4 - %fr7 arguments
1679 %fr8 - %fr11 not preserved by C code
1680 %fr12 - %fr21 preserved by C code
1681 %fr22 - %fr31 not preserved by C code
1682 */
1683
1684 .macro reg_save regs
1685 STREG %r3, PT_GR3(\regs)
1686 STREG %r4, PT_GR4(\regs)
1687 STREG %r5, PT_GR5(\regs)
1688 STREG %r6, PT_GR6(\regs)
1689 STREG %r7, PT_GR7(\regs)
1690 STREG %r8, PT_GR8(\regs)
1691 STREG %r9, PT_GR9(\regs)
1692 STREG %r10,PT_GR10(\regs)
1693 STREG %r11,PT_GR11(\regs)
1694 STREG %r12,PT_GR12(\regs)
1695 STREG %r13,PT_GR13(\regs)
1696 STREG %r14,PT_GR14(\regs)
1697 STREG %r15,PT_GR15(\regs)
1698 STREG %r16,PT_GR16(\regs)
1699 STREG %r17,PT_GR17(\regs)
1700 STREG %r18,PT_GR18(\regs)
1701 .endm
1702
1703 .macro reg_restore regs
1704 LDREG PT_GR3(\regs), %r3
1705 LDREG PT_GR4(\regs), %r4
1706 LDREG PT_GR5(\regs), %r5
1707 LDREG PT_GR6(\regs), %r6
1708 LDREG PT_GR7(\regs), %r7
1709 LDREG PT_GR8(\regs), %r8
1710 LDREG PT_GR9(\regs), %r9
1711 LDREG PT_GR10(\regs),%r10
1712 LDREG PT_GR11(\regs),%r11
1713 LDREG PT_GR12(\regs),%r12
1714 LDREG PT_GR13(\regs),%r13
1715 LDREG PT_GR14(\regs),%r14
1716 LDREG PT_GR15(\regs),%r15
1717 LDREG PT_GR16(\regs),%r16
1718 LDREG PT_GR17(\regs),%r17
1719 LDREG PT_GR18(\regs),%r18
1720 .endm
1721
1722 .macro fork_like name
1723ENTRY_CFI(sys_\name\()_wrapper)
1724 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
1725 ldo TASK_REGS(%r1),%r1
1726 reg_save %r1
1727 mfctl %cr27, %r28
1728 ldil L%sys_\name, %r31
1729 be R%sys_\name(%sr4,%r31)
1730 STREG %r28, PT_CR27(%r1)
1731ENDPROC_CFI(sys_\name\()_wrapper)
1732 .endm
1733
1734fork_like clone
1735fork_like clone3
1736fork_like fork
1737fork_like vfork
1738
1739 /* Set the return value for the child */
1740ENTRY(child_return)
1741 BL schedule_tail, %r2
1742 nop
1743finish_child_return:
1744 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
1745 ldo TASK_REGS(%r1),%r1 /* get pt regs */
1746
1747 LDREG PT_CR27(%r1), %r3
1748 mtctl %r3, %cr27
1749 reg_restore %r1
1750 b syscall_exit
1751 copy %r0,%r28
1752END(child_return)
1753
1754ENTRY_CFI(sys_rt_sigreturn_wrapper)
1755 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26
1756 ldo TASK_REGS(%r26),%r26 /* get pt regs */
1757 /* Don't save regs, we are going to restore them from sigcontext. */
1758 STREG %r2, -RP_OFFSET(%r30)
1759#ifdef CONFIG_64BIT
1760 ldo FRAME_SIZE(%r30), %r30
1761 BL sys_rt_sigreturn,%r2
1762 ldo -16(%r30),%r29 /* Reference param save area */
1763#else
1764 BL sys_rt_sigreturn,%r2
1765 ldo FRAME_SIZE(%r30), %r30
1766#endif
1767
1768 ldo -FRAME_SIZE(%r30), %r30
1769 LDREG -RP_OFFSET(%r30), %r2
1770
1771 /* FIXME: I think we need to restore a few more things here. */
1772 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1773 ldo TASK_REGS(%r1),%r1 /* get pt regs */
1774 reg_restore %r1
1775
1776 /* If the signal was received while the process was blocked on a
1777 * syscall, then r2 will take us to syscall_exit; otherwise r2 will
1778 * take us to syscall_exit_rfi and on to intr_return.
1779 */
1780 bv %r0(%r2)
1781 LDREG PT_GR28(%r1),%r28 /* reload original r28 for syscall_exit */
1782ENDPROC_CFI(sys_rt_sigreturn_wrapper)
1783
1784ENTRY(syscall_exit)
1785 /* NOTE: Not all syscalls exit this way. rt_sigreturn will exit
1786 * via syscall_exit_rfi if the signal was received while the process
1787 * was running.
1788 */
1789
1790 /* save return value now */
1791
1792 mfctl %cr30, %r1
1793 LDREG TI_TASK(%r1),%r1
1794 STREG %r28,TASK_PT_GR28(%r1)
1795
1796 /* Seems to me that dp could be wrong here, if the syscall involved
1797 * calling a module, and nothing got round to restoring dp on return.
1798 */
1799 loadgp
1800
1801syscall_check_resched:
1802
1803 /* check for reschedule */
1804
1805 LDREG TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19 /* long */
1806 bb,<,n %r19, 31-TIF_NEED_RESCHED, syscall_do_resched /* forward */
1807
1808 .import do_signal,code
1809syscall_check_sig:
1810 LDREG TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19
1811 ldi (_TIF_SIGPENDING|_TIF_NOTIFY_RESUME), %r26
1812 and,COND(<>) %r19, %r26, %r0
1813 b,n syscall_restore /* skip past if we've nothing to do */
1814
1815syscall_do_signal:
1816 /* Save callee-save registers (for sigcontext).
1817 * FIXME: After this point the process structure should be
1818 * consistent with all the relevant state of the process
1819 * before the syscall. We need to verify this.
1820 */
1821 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1822 ldo TASK_REGS(%r1), %r26 /* struct pt_regs *regs */
1823 reg_save %r26
1824
1825#ifdef CONFIG_64BIT
1826 ldo -16(%r30),%r29 /* Reference param save area */
1827#endif
1828
1829 BL do_notify_resume,%r2
1830 ldi 1, %r25 /* long in_syscall = 1 */
1831
1832 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1833 ldo TASK_REGS(%r1), %r20 /* reload pt_regs */
1834 reg_restore %r20
1835
1836 b,n syscall_check_sig
1837
1838syscall_restore:
1839 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1840
1841 /* Are we being ptraced? */
1842 ldw TASK_FLAGS(%r1),%r19
1843 ldi _TIF_SYSCALL_TRACE_MASK,%r2
1844 and,COND(=) %r19,%r2,%r0
1845 b,n syscall_restore_rfi
1846
1847 ldo TASK_PT_FR31(%r1),%r19 /* reload fpregs */
1848 rest_fp %r19
1849
1850 LDREG TASK_PT_SAR(%r1),%r19 /* restore SAR */
1851 mtsar %r19
1852
1853 LDREG TASK_PT_GR2(%r1),%r2 /* restore user rp */
1854 LDREG TASK_PT_GR19(%r1),%r19
1855 LDREG TASK_PT_GR20(%r1),%r20
1856 LDREG TASK_PT_GR21(%r1),%r21
1857 LDREG TASK_PT_GR22(%r1),%r22
1858 LDREG TASK_PT_GR23(%r1),%r23
1859 LDREG TASK_PT_GR24(%r1),%r24
1860 LDREG TASK_PT_GR25(%r1),%r25
1861 LDREG TASK_PT_GR26(%r1),%r26
1862 LDREG TASK_PT_GR27(%r1),%r27 /* restore user dp */
1863 LDREG TASK_PT_GR28(%r1),%r28 /* syscall return value */
1864 LDREG TASK_PT_GR29(%r1),%r29
1865 LDREG TASK_PT_GR31(%r1),%r31 /* restore syscall rp */
1866
1867 /* NOTE: We use rsm/ssm pair to make this operation atomic */
1868 LDREG TASK_PT_GR30(%r1),%r1 /* Get user sp */
1869 rsm PSW_SM_I, %r0
1870 copy %r1,%r30 /* Restore user sp */
1871 mfsp %sr3,%r1 /* Get user space id */
1872 mtsp %r1,%sr7 /* Restore sr7 */
1873 ssm PSW_SM_I, %r0
1874
1875 /* Set sr2 to zero for userspace syscalls to work. */
1876 mtsp %r0,%sr2
1877 mtsp %r1,%sr4 /* Restore sr4 */
1878 mtsp %r1,%sr5 /* Restore sr5 */
1879 mtsp %r1,%sr6 /* Restore sr6 */
1880
1881 depi 3,31,2,%r31 /* ensure return to user mode. */
1882
1883#ifdef CONFIG_64BIT
1884 /* decide whether to reset the wide mode bit
1885 *
1886 * For a syscall, the W bit is stored in the lowest bit
1887 * of sp. Extract it and reset W if it is zero */
1888 extrd,u,*<> %r30,63,1,%r1
1889 rsm PSW_SM_W, %r0
1890 /* now reset the lowest bit of sp if it was set */
1891 xor %r30,%r1,%r30
1892#endif
1893 be,n 0(%sr3,%r31) /* return to user space */
1894
1895 /* We have to return via an RFI, so that PSW T and R bits can be set
1896 * appropriately.
1897 * This sets up pt_regs so we can return via intr_restore, which is not
1898 * the most efficient way of doing things, but it works.
1899 */
1900syscall_restore_rfi:
1901 ldo -1(%r0),%r2 /* Set recovery cntr to -1 */
1902 mtctl %r2,%cr0 /* for immediate trap */
1903 LDREG TASK_PT_PSW(%r1),%r2 /* Get old PSW */
1904 ldi 0x0b,%r20 /* Create new PSW */
1905 depi -1,13,1,%r20 /* C, Q, D, and I bits */
1906
1907 /* The values of SINGLESTEP_BIT and BLOCKSTEP_BIT are
1908 * set in thread_info.h and converted to PA bitmap
1909 * numbers in asm-offsets.c */
1910
1911 /* if ((%r19.SINGLESTEP_BIT)) { %r20.27=1} */
1912 extru,= %r19,TIF_SINGLESTEP_PA_BIT,1,%r0
1913 depi -1,27,1,%r20 /* R bit */
1914
1915 /* if ((%r19.BLOCKSTEP_BIT)) { %r20.7=1} */
1916 extru,= %r19,TIF_BLOCKSTEP_PA_BIT,1,%r0
1917 depi -1,7,1,%r20 /* T bit */
1918
1919 STREG %r20,TASK_PT_PSW(%r1)
1920
1921 /* Always store space registers, since sr3 can be changed (e.g. fork) */
1922
1923 mfsp %sr3,%r25
1924 STREG %r25,TASK_PT_SR3(%r1)
1925 STREG %r25,TASK_PT_SR4(%r1)
1926 STREG %r25,TASK_PT_SR5(%r1)
1927 STREG %r25,TASK_PT_SR6(%r1)
1928 STREG %r25,TASK_PT_SR7(%r1)
1929 STREG %r25,TASK_PT_IASQ0(%r1)
1930 STREG %r25,TASK_PT_IASQ1(%r1)
1931
1932 /* XXX W bit??? */
1933 /* Now if old D bit is clear, it means we didn't save all registers
1934 * on syscall entry, so do that now. This only happens on TRACEME
1935 * calls, or if someone attached to us while we were on a syscall.
1936 * We could make this more efficient by not saving r3-r18, but
1937 * then we wouldn't be able to use the common intr_restore path.
1938 * It is only for traced processes anyway, so performance is not
1939 * an issue.
1940 */
1941 bb,< %r2,30,pt_regs_ok /* Branch if D set */
1942 ldo TASK_REGS(%r1),%r25
1943 reg_save %r25 /* Save r3 to r18 */
1944
1945 /* Save the current sr */
1946 mfsp %sr0,%r2
1947 STREG %r2,TASK_PT_SR0(%r1)
1948
1949 /* Save the scratch sr */
1950 mfsp %sr1,%r2
1951 STREG %r2,TASK_PT_SR1(%r1)
1952
1953 /* sr2 should be set to zero for userspace syscalls */
1954 STREG %r0,TASK_PT_SR2(%r1)
1955
1956 LDREG TASK_PT_GR31(%r1),%r2
1957 depi 3,31,2,%r2 /* ensure return to user mode. */
1958 STREG %r2,TASK_PT_IAOQ0(%r1)
1959 ldo 4(%r2),%r2
1960 STREG %r2,TASK_PT_IAOQ1(%r1)
1961 b intr_restore
1962 copy %r25,%r16
1963
1964pt_regs_ok:
1965 LDREG TASK_PT_IAOQ0(%r1),%r2
1966 depi 3,31,2,%r2 /* ensure return to user mode. */
1967 STREG %r2,TASK_PT_IAOQ0(%r1)
1968 LDREG TASK_PT_IAOQ1(%r1),%r2
1969 depi 3,31,2,%r2
1970 STREG %r2,TASK_PT_IAOQ1(%r1)
1971 b intr_restore
1972 copy %r25,%r16
1973
1974syscall_do_resched:
1975 load32 syscall_check_resched,%r2 /* if resched, we start over again */
1976 load32 schedule,%r19
1977 bv %r0(%r19) /* jumps to schedule() */
1978#ifdef CONFIG_64BIT
1979 ldo -16(%r30),%r29 /* Reference param save area */
1980#else
1981 nop
1982#endif
1983END(syscall_exit)
1984
1985
1986#ifdef CONFIG_FUNCTION_TRACER
1987
1988 .import ftrace_function_trampoline,code
1989 .align L1_CACHE_BYTES
1990ENTRY_CFI(mcount, caller)
1991_mcount:
1992 .export _mcount,data
1993 /*
1994 * The 64bit mcount() function pointer needs 4 dwords, of which the
1995 * first two are free. We optimize it here and put 2 instructions for
1996 * calling mcount(), and 2 instructions for ftrace_stub(). That way we
1997 * have all on one L1 cacheline.
1998 */
1999 ldi 0, %arg3
2000 b ftrace_function_trampoline
2001 copy %r3, %arg2 /* caller original %sp */
2002ftrace_stub:
2003 .globl ftrace_stub
2004 .type ftrace_stub, @function
2005#ifdef CONFIG_64BIT
2006 bve (%rp)
2007#else
2008 bv %r0(%rp)
2009#endif
2010 nop
2011#ifdef CONFIG_64BIT
2012 .dword mcount
2013 .dword 0 /* code in head.S puts value of global gp here */
2014#endif
2015ENDPROC_CFI(mcount)
2016
2017#ifdef CONFIG_DYNAMIC_FTRACE
2018
2019#ifdef CONFIG_64BIT
2020#define FTRACE_FRAME_SIZE (2*FRAME_SIZE)
2021#else
2022#define FTRACE_FRAME_SIZE FRAME_SIZE
2023#endif
2024ENTRY_CFI(ftrace_caller, caller,frame=FTRACE_FRAME_SIZE,CALLS,SAVE_RP,SAVE_SP)
2025ftrace_caller:
2026 .global ftrace_caller
2027
2028 STREG %r3, -FTRACE_FRAME_SIZE+1*REG_SZ(%sp)
2029 ldo -FTRACE_FRAME_SIZE(%sp), %r3
2030 STREG %rp, -RP_OFFSET(%r3)
2031
2032 /* Offset 0 is already allocated for %r1 */
2033 STREG %r23, 2*REG_SZ(%r3)
2034 STREG %r24, 3*REG_SZ(%r3)
2035 STREG %r25, 4*REG_SZ(%r3)
2036 STREG %r26, 5*REG_SZ(%r3)
2037 STREG %r28, 6*REG_SZ(%r3)
2038 STREG %r29, 7*REG_SZ(%r3)
2039#ifdef CONFIG_64BIT
2040 STREG %r19, 8*REG_SZ(%r3)
2041 STREG %r20, 9*REG_SZ(%r3)
2042 STREG %r21, 10*REG_SZ(%r3)
2043 STREG %r22, 11*REG_SZ(%r3)
2044 STREG %r27, 12*REG_SZ(%r3)
2045 STREG %r31, 13*REG_SZ(%r3)
2046 loadgp
2047 ldo -16(%sp),%r29
2048#endif
2049 LDREG 0(%r3), %r25
2050 copy %rp, %r26
2051 ldo -8(%r25), %r25
2052 ldi 0, %r23 /* no pt_regs */
2053 b,l ftrace_function_trampoline, %rp
2054 copy %r3, %r24
2055
2056 LDREG -RP_OFFSET(%r3), %rp
2057 LDREG 2*REG_SZ(%r3), %r23
2058 LDREG 3*REG_SZ(%r3), %r24
2059 LDREG 4*REG_SZ(%r3), %r25
2060 LDREG 5*REG_SZ(%r3), %r26
2061 LDREG 6*REG_SZ(%r3), %r28
2062 LDREG 7*REG_SZ(%r3), %r29
2063#ifdef CONFIG_64BIT
2064 LDREG 8*REG_SZ(%r3), %r19
2065 LDREG 9*REG_SZ(%r3), %r20
2066 LDREG 10*REG_SZ(%r3), %r21
2067 LDREG 11*REG_SZ(%r3), %r22
2068 LDREG 12*REG_SZ(%r3), %r27
2069 LDREG 13*REG_SZ(%r3), %r31
2070#endif
2071 LDREG 1*REG_SZ(%r3), %r3
2072
2073 LDREGM -FTRACE_FRAME_SIZE(%sp), %r1
2074 /* Adjust return point to jump back to beginning of traced function */
2075 ldo -4(%r1), %r1
2076 bv,n (%r1)
2077
2078ENDPROC_CFI(ftrace_caller)
2079
2080#ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS
2081ENTRY_CFI(ftrace_regs_caller,caller,frame=FTRACE_FRAME_SIZE+PT_SZ_ALGN,
2082 CALLS,SAVE_RP,SAVE_SP)
2083ftrace_regs_caller:
2084 .global ftrace_regs_caller
2085
2086 ldo -FTRACE_FRAME_SIZE(%sp), %r1
2087 STREG %rp, -RP_OFFSET(%r1)
2088
2089 copy %sp, %r1
2090 ldo PT_SZ_ALGN(%sp), %sp
2091
2092 STREG %rp, PT_GR2(%r1)
2093 STREG %r3, PT_GR3(%r1)
2094 STREG %r4, PT_GR4(%r1)
2095 STREG %r5, PT_GR5(%r1)
2096 STREG %r6, PT_GR6(%r1)
2097 STREG %r7, PT_GR7(%r1)
2098 STREG %r8, PT_GR8(%r1)
2099 STREG %r9, PT_GR9(%r1)
2100 STREG %r10, PT_GR10(%r1)
2101 STREG %r11, PT_GR11(%r1)
2102 STREG %r12, PT_GR12(%r1)
2103 STREG %r13, PT_GR13(%r1)
2104 STREG %r14, PT_GR14(%r1)
2105 STREG %r15, PT_GR15(%r1)
2106 STREG %r16, PT_GR16(%r1)
2107 STREG %r17, PT_GR17(%r1)
2108 STREG %r18, PT_GR18(%r1)
2109 STREG %r19, PT_GR19(%r1)
2110 STREG %r20, PT_GR20(%r1)
2111 STREG %r21, PT_GR21(%r1)
2112 STREG %r22, PT_GR22(%r1)
2113 STREG %r23, PT_GR23(%r1)
2114 STREG %r24, PT_GR24(%r1)
2115 STREG %r25, PT_GR25(%r1)
2116 STREG %r26, PT_GR26(%r1)
2117 STREG %r27, PT_GR27(%r1)
2118 STREG %r28, PT_GR28(%r1)
2119 STREG %r29, PT_GR29(%r1)
2120 STREG %r30, PT_GR30(%r1)
2121 STREG %r31, PT_GR31(%r1)
2122 mfctl %cr11, %r26
2123 STREG %r26, PT_SAR(%r1)
2124
2125 copy %rp, %r26
2126 LDREG -FTRACE_FRAME_SIZE-PT_SZ_ALGN(%sp), %r25
2127 ldo -8(%r25), %r25
2128 ldo -FTRACE_FRAME_SIZE(%r1), %arg2
2129 b,l ftrace_function_trampoline, %rp
2130 copy %r1, %arg3 /* struct pt_regs */
2131
2132 ldo -PT_SZ_ALGN(%sp), %r1
2133
2134 LDREG PT_SAR(%r1), %rp
2135 mtctl %rp, %cr11
2136
2137 LDREG PT_GR2(%r1), %rp
2138 LDREG PT_GR3(%r1), %r3
2139 LDREG PT_GR4(%r1), %r4
2140 LDREG PT_GR5(%r1), %r5
2141 LDREG PT_GR6(%r1), %r6
2142 LDREG PT_GR7(%r1), %r7
2143 LDREG PT_GR8(%r1), %r8
2144 LDREG PT_GR9(%r1), %r9
2145 LDREG PT_GR10(%r1),%r10
2146 LDREG PT_GR11(%r1),%r11
2147 LDREG PT_GR12(%r1),%r12
2148 LDREG PT_GR13(%r1),%r13
2149 LDREG PT_GR14(%r1),%r14
2150 LDREG PT_GR15(%r1),%r15
2151 LDREG PT_GR16(%r1),%r16
2152 LDREG PT_GR17(%r1),%r17
2153 LDREG PT_GR18(%r1),%r18
2154 LDREG PT_GR19(%r1),%r19
2155 LDREG PT_GR20(%r1),%r20
2156 LDREG PT_GR21(%r1),%r21
2157 LDREG PT_GR22(%r1),%r22
2158 LDREG PT_GR23(%r1),%r23
2159 LDREG PT_GR24(%r1),%r24
2160 LDREG PT_GR25(%r1),%r25
2161 LDREG PT_GR26(%r1),%r26
2162 LDREG PT_GR27(%r1),%r27
2163 LDREG PT_GR28(%r1),%r28
2164 LDREG PT_GR29(%r1),%r29
2165 LDREG PT_GR30(%r1),%r30
2166 LDREG PT_GR31(%r1),%r31
2167
2168 ldo -PT_SZ_ALGN(%sp), %sp
2169 LDREGM -FTRACE_FRAME_SIZE(%sp), %r1
2170 /* Adjust return point to jump back to beginning of traced function */
2171 ldo -4(%r1), %r1
2172 bv,n (%r1)
2173
2174ENDPROC_CFI(ftrace_regs_caller)
2175
2176#endif
2177#endif
2178
2179#ifdef CONFIG_FUNCTION_GRAPH_TRACER
2180 .align 8
2181ENTRY_CFI(return_to_handler, caller,frame=FRAME_SIZE)
2182 .export parisc_return_to_handler,data
2183parisc_return_to_handler:
2184 copy %r3,%r1
2185 STREG %r0,-RP_OFFSET(%sp) /* store 0 as %rp */
2186 copy %sp,%r3
2187 STREGM %r1,FRAME_SIZE(%sp)
2188 STREG %ret0,8(%r3)
2189 STREG %ret1,16(%r3)
2190
2191#ifdef CONFIG_64BIT
2192 loadgp
2193#endif
2194
2195 /* call ftrace_return_to_handler(0) */
2196 .import ftrace_return_to_handler,code
2197 load32 ftrace_return_to_handler,%ret0
2198 load32 .Lftrace_ret,%r2
2199#ifdef CONFIG_64BIT
2200 ldo -16(%sp),%ret1 /* Reference param save area */
2201 bve (%ret0)
2202#else
2203 bv %r0(%ret0)
2204#endif
2205 ldi 0,%r26
2206.Lftrace_ret:
2207 copy %ret0,%rp
2208
2209 /* restore original return values */
2210 LDREG 8(%r3),%ret0
2211 LDREG 16(%r3),%ret1
2212
2213 /* return from function */
2214#ifdef CONFIG_64BIT
2215 bve (%rp)
2216#else
2217 bv %r0(%rp)
2218#endif
2219 LDREGM -FRAME_SIZE(%sp),%r3
2220ENDPROC_CFI(return_to_handler)
2221
2222#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2223
2224#endif /* CONFIG_FUNCTION_TRACER */
2225
2226#ifdef CONFIG_IRQSTACKS
2227/* void call_on_stack(unsigned long param1, void *func,
2228 unsigned long new_stack) */
2229ENTRY_CFI(call_on_stack, FRAME=2*FRAME_SIZE,CALLS,SAVE_RP,SAVE_SP)
2230ENTRY(_call_on_stack)
2231 copy %sp, %r1
2232
2233 /* Regarding the HPPA calling conventions for function pointers,
2234 we assume the PIC register is not changed across call. For
2235 CONFIG_64BIT, the argument pointer is left to point at the
2236 argument region allocated for the call to call_on_stack. */
2237
2238 /* Switch to new stack. We allocate two frames. */
2239 ldo 2*FRAME_SIZE(%arg2), %sp
2240# ifdef CONFIG_64BIT
2241 /* Save previous stack pointer and return pointer in frame marker */
2242 STREG %rp, -FRAME_SIZE-RP_OFFSET(%sp)
2243 /* Calls always use function descriptor */
2244 LDREG 16(%arg1), %arg1
2245 bve,l (%arg1), %rp
2246 STREG %r1, -FRAME_SIZE-REG_SZ(%sp)
2247 LDREG -FRAME_SIZE-RP_OFFSET(%sp), %rp
2248 bve (%rp)
2249 LDREG -FRAME_SIZE-REG_SZ(%sp), %sp
2250# else
2251 /* Save previous stack pointer and return pointer in frame marker */
2252 STREG %r1, -FRAME_SIZE-REG_SZ(%sp)
2253 STREG %rp, -FRAME_SIZE-RP_OFFSET(%sp)
2254 /* Calls use function descriptor if PLABEL bit is set */
2255 bb,>=,n %arg1, 30, 1f
2256 depwi 0,31,2, %arg1
2257 LDREG 0(%arg1), %arg1
22581:
2259 be,l 0(%sr4,%arg1), %sr0, %r31
2260 copy %r31, %rp
2261 LDREG -FRAME_SIZE-RP_OFFSET(%sp), %rp
2262 bv (%rp)
2263 LDREG -FRAME_SIZE-REG_SZ(%sp), %sp
2264# endif /* CONFIG_64BIT */
2265ENDPROC_CFI(call_on_stack)
2266#endif /* CONFIG_IRQSTACKS */
2267
2268ENTRY_CFI(get_register)
2269 /*
2270 * get_register is used by the non access tlb miss handlers to
2271 * copy the value of the general register specified in r8 into
2272 * r1. This routine can't be used for shadowed registers, since
2273 * the rfir will restore the original value. So, for the shadowed
2274 * registers we put a -1 into r1 to indicate that the register
2275 * should not be used (the register being copied could also have
2276 * a -1 in it, but that is OK, it just means that we will have
2277 * to use the slow path instead).
2278 */
2279 blr %r8,%r0
2280 nop
2281 bv %r0(%r25) /* r0 */
2282 copy %r0,%r1
2283 bv %r0(%r25) /* r1 - shadowed */
2284 ldi -1,%r1
2285 bv %r0(%r25) /* r2 */
2286 copy %r2,%r1
2287 bv %r0(%r25) /* r3 */
2288 copy %r3,%r1
2289 bv %r0(%r25) /* r4 */
2290 copy %r4,%r1
2291 bv %r0(%r25) /* r5 */
2292 copy %r5,%r1
2293 bv %r0(%r25) /* r6 */
2294 copy %r6,%r1
2295 bv %r0(%r25) /* r7 */
2296 copy %r7,%r1
2297 bv %r0(%r25) /* r8 - shadowed */
2298 ldi -1,%r1
2299 bv %r0(%r25) /* r9 - shadowed */
2300 ldi -1,%r1
2301 bv %r0(%r25) /* r10 */
2302 copy %r10,%r1
2303 bv %r0(%r25) /* r11 */
2304 copy %r11,%r1
2305 bv %r0(%r25) /* r12 */
2306 copy %r12,%r1
2307 bv %r0(%r25) /* r13 */
2308 copy %r13,%r1
2309 bv %r0(%r25) /* r14 */
2310 copy %r14,%r1
2311 bv %r0(%r25) /* r15 */
2312 copy %r15,%r1
2313 bv %r0(%r25) /* r16 - shadowed */
2314 ldi -1,%r1
2315 bv %r0(%r25) /* r17 - shadowed */
2316 ldi -1,%r1
2317 bv %r0(%r25) /* r18 */
2318 copy %r18,%r1
2319 bv %r0(%r25) /* r19 */
2320 copy %r19,%r1
2321 bv %r0(%r25) /* r20 */
2322 copy %r20,%r1
2323 bv %r0(%r25) /* r21 */
2324 copy %r21,%r1
2325 bv %r0(%r25) /* r22 */
2326 copy %r22,%r1
2327 bv %r0(%r25) /* r23 */
2328 copy %r23,%r1
2329 bv %r0(%r25) /* r24 - shadowed */
2330 ldi -1,%r1
2331 bv %r0(%r25) /* r25 - shadowed */
2332 ldi -1,%r1
2333 bv %r0(%r25) /* r26 */
2334 copy %r26,%r1
2335 bv %r0(%r25) /* r27 */
2336 copy %r27,%r1
2337 bv %r0(%r25) /* r28 */
2338 copy %r28,%r1
2339 bv %r0(%r25) /* r29 */
2340 copy %r29,%r1
2341 bv %r0(%r25) /* r30 */
2342 copy %r30,%r1
2343 bv %r0(%r25) /* r31 */
2344 copy %r31,%r1
2345ENDPROC_CFI(get_register)
2346
2347
2348ENTRY_CFI(set_register)
2349 /*
2350 * set_register is used by the non access tlb miss handlers to
2351 * copy the value of r1 into the general register specified in
2352 * r8.
2353 */
2354 blr %r8,%r0
2355 nop
2356 bv %r0(%r25) /* r0 (silly, but it is a place holder) */
2357 copy %r1,%r0
2358 bv %r0(%r25) /* r1 */
2359 copy %r1,%r1
2360 bv %r0(%r25) /* r2 */
2361 copy %r1,%r2
2362 bv %r0(%r25) /* r3 */
2363 copy %r1,%r3
2364 bv %r0(%r25) /* r4 */
2365 copy %r1,%r4
2366 bv %r0(%r25) /* r5 */
2367 copy %r1,%r5
2368 bv %r0(%r25) /* r6 */
2369 copy %r1,%r6
2370 bv %r0(%r25) /* r7 */
2371 copy %r1,%r7
2372 bv %r0(%r25) /* r8 */
2373 copy %r1,%r8
2374 bv %r0(%r25) /* r9 */
2375 copy %r1,%r9
2376 bv %r0(%r25) /* r10 */
2377 copy %r1,%r10
2378 bv %r0(%r25) /* r11 */
2379 copy %r1,%r11
2380 bv %r0(%r25) /* r12 */
2381 copy %r1,%r12
2382 bv %r0(%r25) /* r13 */
2383 copy %r1,%r13
2384 bv %r0(%r25) /* r14 */
2385 copy %r1,%r14
2386 bv %r0(%r25) /* r15 */
2387 copy %r1,%r15
2388 bv %r0(%r25) /* r16 */
2389 copy %r1,%r16
2390 bv %r0(%r25) /* r17 */
2391 copy %r1,%r17
2392 bv %r0(%r25) /* r18 */
2393 copy %r1,%r18
2394 bv %r0(%r25) /* r19 */
2395 copy %r1,%r19
2396 bv %r0(%r25) /* r20 */
2397 copy %r1,%r20
2398 bv %r0(%r25) /* r21 */
2399 copy %r1,%r21
2400 bv %r0(%r25) /* r22 */
2401 copy %r1,%r22
2402 bv %r0(%r25) /* r23 */
2403 copy %r1,%r23
2404 bv %r0(%r25) /* r24 */
2405 copy %r1,%r24
2406 bv %r0(%r25) /* r25 */
2407 copy %r1,%r25
2408 bv %r0(%r25) /* r26 */
2409 copy %r1,%r26
2410 bv %r0(%r25) /* r27 */
2411 copy %r1,%r27
2412 bv %r0(%r25) /* r28 */
2413 copy %r1,%r28
2414 bv %r0(%r25) /* r29 */
2415 copy %r1,%r29
2416 bv %r0(%r25) /* r30 */
2417 copy %r1,%r30
2418 bv %r0(%r25) /* r31 */
2419 copy %r1,%r31
2420ENDPROC_CFI(set_register)
2421
1/*
2 * Linux/PA-RISC Project (http://www.parisc-linux.org/)
3 *
4 * kernel entry points (interruptions, system call wrappers)
5 * Copyright (C) 1999,2000 Philipp Rumpf
6 * Copyright (C) 1999 SuSE GmbH Nuernberg
7 * Copyright (C) 2000 Hewlett-Packard (John Marvin)
8 * Copyright (C) 1999 Hewlett-Packard (Frank Rowand)
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2, or (at your option)
13 * any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 */
24
25#include <asm/asm-offsets.h>
26
27/* we have the following possibilities to act on an interruption:
28 * - handle in assembly and use shadowed registers only
29 * - save registers to kernel stack and handle in assembly or C */
30
31
32#include <asm/psw.h>
33#include <asm/cache.h> /* for L1_CACHE_SHIFT */
34#include <asm/assembly.h> /* for LDREG/STREG defines */
35#include <asm/pgtable.h>
36#include <asm/signal.h>
37#include <asm/unistd.h>
38#include <asm/thread_info.h>
39
40#include <linux/linkage.h>
41
42#ifdef CONFIG_64BIT
43 .level 2.0w
44#else
45 .level 2.0
46#endif
47
48 .import pa_dbit_lock,data
49
50 /* space_to_prot macro creates a prot id from a space id */
51
52#if (SPACEID_SHIFT) == 0
53 .macro space_to_prot spc prot
54 depd,z \spc,62,31,\prot
55 .endm
56#else
57 .macro space_to_prot spc prot
58 extrd,u \spc,(64 - (SPACEID_SHIFT)),32,\prot
59 .endm
60#endif
61
62 /* Switch to virtual mapping, trashing only %r1 */
63 .macro virt_map
64 /* pcxt_ssm_bug */
65 rsm PSW_SM_I, %r0 /* barrier for "Relied upon Translation */
66 mtsp %r0, %sr4
67 mtsp %r0, %sr5
68 mfsp %sr7, %r1
69 or,= %r0,%r1,%r0 /* Only save sr7 in sr3 if sr7 != 0 */
70 mtsp %r1, %sr3
71 tovirt_r1 %r29
72 load32 KERNEL_PSW, %r1
73
74 rsm PSW_SM_QUIET,%r0 /* second "heavy weight" ctl op */
75 mtsp %r0, %sr6
76 mtsp %r0, %sr7
77 mtctl %r0, %cr17 /* Clear IIASQ tail */
78 mtctl %r0, %cr17 /* Clear IIASQ head */
79 mtctl %r1, %ipsw
80 load32 4f, %r1
81 mtctl %r1, %cr18 /* Set IIAOQ tail */
82 ldo 4(%r1), %r1
83 mtctl %r1, %cr18 /* Set IIAOQ head */
84 rfir
85 nop
864:
87 .endm
88
89 /*
90 * The "get_stack" macros are responsible for determining the
91 * kernel stack value.
92 *
93 * If sr7 == 0
94 * Already using a kernel stack, so call the
95 * get_stack_use_r30 macro to push a pt_regs structure
96 * on the stack, and store registers there.
97 * else
98 * Need to set up a kernel stack, so call the
99 * get_stack_use_cr30 macro to set up a pointer
100 * to the pt_regs structure contained within the
101 * task pointer pointed to by cr30. Set the stack
102 * pointer to point to the end of the task structure.
103 *
104 * Note that we use shadowed registers for temps until
105 * we can save %r26 and %r29. %r26 is used to preserve
106 * %r8 (a shadowed register) which temporarily contained
107 * either the fault type ("code") or the eirr. We need
108 * to use a non-shadowed register to carry the value over
109 * the rfir in virt_map. We use %r26 since this value winds
110 * up being passed as the argument to either do_cpu_irq_mask
111 * or handle_interruption. %r29 is used to hold a pointer
112 * the register save area, and once again, it needs to
113 * be a non-shadowed register so that it survives the rfir.
114 *
115 * N.B. TASK_SZ_ALGN and PT_SZ_ALGN include space for a stack frame.
116 */
117
118 .macro get_stack_use_cr30
119
120 /* we save the registers in the task struct */
121
122 mfctl %cr30, %r1
123 tophys %r1,%r9
124 LDREG TI_TASK(%r9), %r1 /* thread_info -> task_struct */
125 tophys %r1,%r9
126 ldo TASK_REGS(%r9),%r9
127 STREG %r30, PT_GR30(%r9)
128 STREG %r29,PT_GR29(%r9)
129 STREG %r26,PT_GR26(%r9)
130 copy %r9,%r29
131 mfctl %cr30, %r1
132 ldo THREAD_SZ_ALGN(%r1), %r30
133 .endm
134
135 .macro get_stack_use_r30
136
137 /* we put a struct pt_regs on the stack and save the registers there */
138
139 tophys %r30,%r9
140 STREG %r30,PT_GR30(%r9)
141 ldo PT_SZ_ALGN(%r30),%r30
142 STREG %r29,PT_GR29(%r9)
143 STREG %r26,PT_GR26(%r9)
144 copy %r9,%r29
145 .endm
146
147 .macro rest_stack
148 LDREG PT_GR1(%r29), %r1
149 LDREG PT_GR30(%r29),%r30
150 LDREG PT_GR29(%r29),%r29
151 .endm
152
153 /* default interruption handler
154 * (calls traps.c:handle_interruption) */
155 .macro def code
156 b intr_save
157 ldi \code, %r8
158 .align 32
159 .endm
160
161 /* Interrupt interruption handler
162 * (calls irq.c:do_cpu_irq_mask) */
163 .macro extint code
164 b intr_extint
165 mfsp %sr7,%r16
166 .align 32
167 .endm
168
169 .import os_hpmc, code
170
171 /* HPMC handler */
172 .macro hpmc code
173 nop /* must be a NOP, will be patched later */
174 load32 PA(os_hpmc), %r3
175 bv,n 0(%r3)
176 nop
177 .word 0 /* checksum (will be patched) */
178 .word PA(os_hpmc) /* address of handler */
179 .word 0 /* length of handler */
180 .endm
181
182 /*
183 * Performance Note: Instructions will be moved up into
184 * this part of the code later on, once we are sure
185 * that the tlb miss handlers are close to final form.
186 */
187
188 /* Register definitions for tlb miss handler macros */
189
190 va = r8 /* virtual address for which the trap occurred */
191 spc = r24 /* space for which the trap occurred */
192
193#ifndef CONFIG_64BIT
194
195 /*
196 * itlb miss interruption handler (parisc 1.1 - 32 bit)
197 */
198
199 .macro itlb_11 code
200
201 mfctl %pcsq, spc
202 b itlb_miss_11
203 mfctl %pcoq, va
204
205 .align 32
206 .endm
207#endif
208
209 /*
210 * itlb miss interruption handler (parisc 2.0)
211 */
212
213 .macro itlb_20 code
214 mfctl %pcsq, spc
215#ifdef CONFIG_64BIT
216 b itlb_miss_20w
217#else
218 b itlb_miss_20
219#endif
220 mfctl %pcoq, va
221
222 .align 32
223 .endm
224
225#ifndef CONFIG_64BIT
226 /*
227 * naitlb miss interruption handler (parisc 1.1 - 32 bit)
228 */
229
230 .macro naitlb_11 code
231
232 mfctl %isr,spc
233 b naitlb_miss_11
234 mfctl %ior,va
235
236 .align 32
237 .endm
238#endif
239
240 /*
241 * naitlb miss interruption handler (parisc 2.0)
242 */
243
244 .macro naitlb_20 code
245
246 mfctl %isr,spc
247#ifdef CONFIG_64BIT
248 b naitlb_miss_20w
249#else
250 b naitlb_miss_20
251#endif
252 mfctl %ior,va
253
254 .align 32
255 .endm
256
257#ifndef CONFIG_64BIT
258 /*
259 * dtlb miss interruption handler (parisc 1.1 - 32 bit)
260 */
261
262 .macro dtlb_11 code
263
264 mfctl %isr, spc
265 b dtlb_miss_11
266 mfctl %ior, va
267
268 .align 32
269 .endm
270#endif
271
272 /*
273 * dtlb miss interruption handler (parisc 2.0)
274 */
275
276 .macro dtlb_20 code
277
278 mfctl %isr, spc
279#ifdef CONFIG_64BIT
280 b dtlb_miss_20w
281#else
282 b dtlb_miss_20
283#endif
284 mfctl %ior, va
285
286 .align 32
287 .endm
288
289#ifndef CONFIG_64BIT
290 /* nadtlb miss interruption handler (parisc 1.1 - 32 bit) */
291
292 .macro nadtlb_11 code
293
294 mfctl %isr,spc
295 b nadtlb_miss_11
296 mfctl %ior,va
297
298 .align 32
299 .endm
300#endif
301
302 /* nadtlb miss interruption handler (parisc 2.0) */
303
304 .macro nadtlb_20 code
305
306 mfctl %isr,spc
307#ifdef CONFIG_64BIT
308 b nadtlb_miss_20w
309#else
310 b nadtlb_miss_20
311#endif
312 mfctl %ior,va
313
314 .align 32
315 .endm
316
317#ifndef CONFIG_64BIT
318 /*
319 * dirty bit trap interruption handler (parisc 1.1 - 32 bit)
320 */
321
322 .macro dbit_11 code
323
324 mfctl %isr,spc
325 b dbit_trap_11
326 mfctl %ior,va
327
328 .align 32
329 .endm
330#endif
331
332 /*
333 * dirty bit trap interruption handler (parisc 2.0)
334 */
335
336 .macro dbit_20 code
337
338 mfctl %isr,spc
339#ifdef CONFIG_64BIT
340 b dbit_trap_20w
341#else
342 b dbit_trap_20
343#endif
344 mfctl %ior,va
345
346 .align 32
347 .endm
348
349 /* In LP64, the space contains part of the upper 32 bits of the
350 * fault. We have to extract this and place it in the va,
351 * zeroing the corresponding bits in the space register */
352 .macro space_adjust spc,va,tmp
353#ifdef CONFIG_64BIT
354 extrd,u \spc,63,SPACEID_SHIFT,\tmp
355 depd %r0,63,SPACEID_SHIFT,\spc
356 depd \tmp,31,SPACEID_SHIFT,\va
357#endif
358 .endm
359
360 .import swapper_pg_dir,code
361
362 /* Get the pgd. For faults on space zero (kernel space), this
363 * is simply swapper_pg_dir. For user space faults, the
364 * pgd is stored in %cr25 */
365 .macro get_pgd spc,reg
366 ldil L%PA(swapper_pg_dir),\reg
367 ldo R%PA(swapper_pg_dir)(\reg),\reg
368 or,COND(=) %r0,\spc,%r0
369 mfctl %cr25,\reg
370 .endm
371
372 /*
373 space_check(spc,tmp,fault)
374
375 spc - The space we saw the fault with.
376 tmp - The place to store the current space.
377 fault - Function to call on failure.
378
379 Only allow faults on different spaces from the
380 currently active one if we're the kernel
381
382 */
383 .macro space_check spc,tmp,fault
384 mfsp %sr7,\tmp
385 or,COND(<>) %r0,\spc,%r0 /* user may execute gateway page
386 * as kernel, so defeat the space
387 * check if it is */
388 copy \spc,\tmp
389 or,COND(=) %r0,\tmp,%r0 /* nullify if executing as kernel */
390 cmpb,COND(<>),n \tmp,\spc,\fault
391 .endm
392
393 /* Look up a PTE in a 2-Level scheme (faulting at each
394 * level if the entry isn't present
395 *
396 * NOTE: we use ldw even for LP64, since the short pointers
397 * can address up to 1TB
398 */
399 .macro L2_ptep pmd,pte,index,va,fault
400#if PT_NLEVELS == 3
401 extru \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
402#else
403 extru \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
404#endif
405 dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */
406 copy %r0,\pte
407 ldw,s \index(\pmd),\pmd
408 bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault
409 dep %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
410 copy \pmd,%r9
411 SHLREG %r9,PxD_VALUE_SHIFT,\pmd
412 extru \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
413 dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */
414 shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd
415 LDREG %r0(\pmd),\pte /* pmd is now pte */
416 bb,>=,n \pte,_PAGE_PRESENT_BIT,\fault
417 .endm
418
419 /* Look up PTE in a 3-Level scheme.
420 *
421 * Here we implement a Hybrid L2/L3 scheme: we allocate the
422 * first pmd adjacent to the pgd. This means that we can
423 * subtract a constant offset to get to it. The pmd and pgd
424 * sizes are arranged so that a single pmd covers 4GB (giving
425 * a full LP64 process access to 8TB) so our lookups are
426 * effectively L2 for the first 4GB of the kernel (i.e. for
427 * all ILP32 processes and all the kernel for machines with
428 * under 4GB of memory) */
429 .macro L3_ptep pgd,pte,index,va,fault
430#if PT_NLEVELS == 3 /* we might have a 2-Level scheme, e.g. with 16kb page size */
431 extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
432 copy %r0,\pte
433 extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
434 ldw,s \index(\pgd),\pgd
435 extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
436 bb,>=,n \pgd,_PxD_PRESENT_BIT,\fault
437 extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
438 shld \pgd,PxD_VALUE_SHIFT,\index
439 extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
440 copy \index,\pgd
441 extrd,u,*<> \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
442 ldo ASM_PGD_PMD_OFFSET(\pgd),\pgd
443#endif
444 L2_ptep \pgd,\pte,\index,\va,\fault
445 .endm
446
447 /* Set the _PAGE_ACCESSED bit of the PTE. Be clever and
448 * don't needlessly dirty the cache line if it was already set */
449 .macro update_ptep ptep,pte,tmp,tmp1
450 ldi _PAGE_ACCESSED,\tmp1
451 or \tmp1,\pte,\tmp
452 and,COND(<>) \tmp1,\pte,%r0
453 STREG \tmp,0(\ptep)
454 .endm
455
456 /* Set the dirty bit (and accessed bit). No need to be
457 * clever, this is only used from the dirty fault */
458 .macro update_dirty ptep,pte,tmp
459 ldi _PAGE_ACCESSED|_PAGE_DIRTY,\tmp
460 or \tmp,\pte,\pte
461 STREG \pte,0(\ptep)
462 .endm
463
464 /* bitshift difference between a PFN (based on kernel's PAGE_SIZE)
465 * to a CPU TLB 4k PFN (4k => 12 bits to shift) */
466 #define PAGE_ADD_SHIFT (PAGE_SHIFT-12)
467
468 /* Drop prot bits and convert to page addr for iitlbt and idtlbt */
469 .macro convert_for_tlb_insert20 pte
470 extrd,u \pte,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
471 64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
472 depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\
473 (63-58)+PAGE_ADD_SHIFT,\pte
474 .endm
475
476 /* Convert the pte and prot to tlb insertion values. How
477 * this happens is quite subtle, read below */
478 .macro make_insert_tlb spc,pte,prot
479 space_to_prot \spc \prot /* create prot id from space */
480 /* The following is the real subtlety. This is depositing
481 * T <-> _PAGE_REFTRAP
482 * D <-> _PAGE_DIRTY
483 * B <-> _PAGE_DMB (memory break)
484 *
485 * Then incredible subtlety: The access rights are
486 * _PAGE_GATEWAY _PAGE_EXEC _PAGE_READ
487 * See 3-14 of the parisc 2.0 manual
488 *
489 * Finally, _PAGE_READ goes in the top bit of PL1 (so we
490 * trigger an access rights trap in user space if the user
491 * tries to read an unreadable page */
492 depd \pte,8,7,\prot
493
494 /* PAGE_USER indicates the page can be read with user privileges,
495 * so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1
496 * contains _PAGE_READ */
497 extrd,u,*= \pte,_PAGE_USER_BIT+32,1,%r0
498 depdi 7,11,3,\prot
499 /* If we're a gateway page, drop PL2 back to zero for promotion
500 * to kernel privilege (so we can execute the page as kernel).
501 * Any privilege promotion page always denys read and write */
502 extrd,u,*= \pte,_PAGE_GATEWAY_BIT+32,1,%r0
503 depd %r0,11,2,\prot /* If Gateway, Set PL2 to 0 */
504
505 /* Enforce uncacheable pages.
506 * This should ONLY be use for MMIO on PA 2.0 machines.
507 * Memory/DMA is cache coherent on all PA2.0 machines we support
508 * (that means T-class is NOT supported) and the memory controllers
509 * on most of those machines only handles cache transactions.
510 */
511 extrd,u,*= \pte,_PAGE_NO_CACHE_BIT+32,1,%r0
512 depdi 1,12,1,\prot
513
514 /* Drop prot bits and convert to page addr for iitlbt and idtlbt */
515 convert_for_tlb_insert20 \pte
516 .endm
517
518 /* Identical macro to make_insert_tlb above, except it
519 * makes the tlb entry for the differently formatted pa11
520 * insertion instructions */
521 .macro make_insert_tlb_11 spc,pte,prot
522 zdep \spc,30,15,\prot
523 dep \pte,8,7,\prot
524 extru,= \pte,_PAGE_NO_CACHE_BIT,1,%r0
525 depi 1,12,1,\prot
526 extru,= \pte,_PAGE_USER_BIT,1,%r0
527 depi 7,11,3,\prot /* Set for user space (1 rsvd for read) */
528 extru,= \pte,_PAGE_GATEWAY_BIT,1,%r0
529 depi 0,11,2,\prot /* If Gateway, Set PL2 to 0 */
530
531 /* Get rid of prot bits and convert to page addr for iitlba */
532
533 depi 0,31,ASM_PFN_PTE_SHIFT,\pte
534 SHRREG \pte,(ASM_PFN_PTE_SHIFT-(31-26)),\pte
535 .endm
536
537 /* This is for ILP32 PA2.0 only. The TLB insertion needs
538 * to extend into I/O space if the address is 0xfXXXXXXX
539 * so we extend the f's into the top word of the pte in
540 * this case */
541 .macro f_extend pte,tmp
542 extrd,s \pte,42,4,\tmp
543 addi,<> 1,\tmp,%r0
544 extrd,s \pte,63,25,\pte
545 .endm
546
547 /* The alias region is an 8MB aligned 16MB to do clear and
548 * copy user pages at addresses congruent with the user
549 * virtual address.
550 *
551 * To use the alias page, you set %r26 up with the to TLB
552 * entry (identifying the physical page) and %r23 up with
553 * the from tlb entry (or nothing if only a to entry---for
554 * clear_user_page_asm) */
555 .macro do_alias spc,tmp,tmp1,va,pte,prot,fault
556 cmpib,COND(<>),n 0,\spc,\fault
557 ldil L%(TMPALIAS_MAP_START),\tmp
558#if defined(CONFIG_64BIT) && (TMPALIAS_MAP_START >= 0x80000000)
559 /* on LP64, ldi will sign extend into the upper 32 bits,
560 * which is behaviour we don't want */
561 depdi 0,31,32,\tmp
562#endif
563 copy \va,\tmp1
564 depi 0,31,23,\tmp1
565 cmpb,COND(<>),n \tmp,\tmp1,\fault
566 mfctl %cr19,\tmp /* iir */
567 /* get the opcode (first six bits) into \tmp */
568 extrw,u \tmp,5,6,\tmp
569 /*
570 * Only setting the T bit prevents data cache movein
571 * Setting access rights to zero prevents instruction cache movein
572 *
573 * Note subtlety here: _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE go
574 * to type field and _PAGE_READ goes to top bit of PL1
575 */
576 ldi (_PAGE_REFTRAP|_PAGE_READ|_PAGE_WRITE),\prot
577 /*
578 * so if the opcode is one (i.e. this is a memory management
579 * instruction) nullify the next load so \prot is only T.
580 * Otherwise this is a normal data operation
581 */
582 cmpiclr,= 0x01,\tmp,%r0
583 ldi (_PAGE_DIRTY|_PAGE_READ|_PAGE_WRITE),\prot
584 depd,z \prot,8,7,\prot
585 /*
586 * OK, it is in the temp alias region, check whether "from" or "to".
587 * Check "subtle" note in pacache.S re: r23/r26.
588 */
589#ifdef CONFIG_64BIT
590 extrd,u,*= \va,41,1,%r0
591#else
592 extrw,u,= \va,9,1,%r0
593#endif
594 or,COND(tr) %r23,%r0,\pte
595 or %r26,%r0,\pte
596 .endm
597
598
599 /*
600 * Align fault_vector_20 on 4K boundary so that both
601 * fault_vector_11 and fault_vector_20 are on the
602 * same page. This is only necessary as long as we
603 * write protect the kernel text, which we may stop
604 * doing once we use large page translations to cover
605 * the static part of the kernel address space.
606 */
607
608 .text
609
610 .align PAGE_SIZE
611
612ENTRY(fault_vector_20)
613 /* First vector is invalid (0) */
614 .ascii "cows can fly"
615 .byte 0
616 .align 32
617
618 hpmc 1
619 def 2
620 def 3
621 extint 4
622 def 5
623 itlb_20 6
624 def 7
625 def 8
626 def 9
627 def 10
628 def 11
629 def 12
630 def 13
631 def 14
632 dtlb_20 15
633 naitlb_20 16
634 nadtlb_20 17
635 def 18
636 def 19
637 dbit_20 20
638 def 21
639 def 22
640 def 23
641 def 24
642 def 25
643 def 26
644 def 27
645 def 28
646 def 29
647 def 30
648 def 31
649END(fault_vector_20)
650
651#ifndef CONFIG_64BIT
652
653 .align 2048
654
655ENTRY(fault_vector_11)
656 /* First vector is invalid (0) */
657 .ascii "cows can fly"
658 .byte 0
659 .align 32
660
661 hpmc 1
662 def 2
663 def 3
664 extint 4
665 def 5
666 itlb_11 6
667 def 7
668 def 8
669 def 9
670 def 10
671 def 11
672 def 12
673 def 13
674 def 14
675 dtlb_11 15
676 naitlb_11 16
677 nadtlb_11 17
678 def 18
679 def 19
680 dbit_11 20
681 def 21
682 def 22
683 def 23
684 def 24
685 def 25
686 def 26
687 def 27
688 def 28
689 def 29
690 def 30
691 def 31
692END(fault_vector_11)
693
694#endif
695 /* Fault vector is separately protected and *must* be on its own page */
696 .align PAGE_SIZE
697ENTRY(end_fault_vector)
698
699 .import handle_interruption,code
700 .import do_cpu_irq_mask,code
701
702 /*
703 * r26 = function to be called
704 * r25 = argument to pass in
705 * r24 = flags for do_fork()
706 *
707 * Kernel threads don't ever return, so they don't need
708 * a true register context. We just save away the arguments
709 * for copy_thread/ret_ to properly set up the child.
710 */
711
712#define CLONE_VM 0x100 /* Must agree with <linux/sched.h> */
713#define CLONE_UNTRACED 0x00800000
714
715 .import do_fork
716ENTRY(__kernel_thread)
717 STREG %r2, -RP_OFFSET(%r30)
718
719 copy %r30, %r1
720 ldo PT_SZ_ALGN(%r30),%r30
721#ifdef CONFIG_64BIT
722 /* Yo, function pointers in wide mode are little structs... -PB */
723 ldd 24(%r26), %r2
724 STREG %r2, PT_GR27(%r1) /* Store childs %dp */
725 ldd 16(%r26), %r26
726
727 STREG %r22, PT_GR22(%r1) /* save r22 (arg5) */
728 copy %r0, %r22 /* user_tid */
729#endif
730 STREG %r26, PT_GR26(%r1) /* Store function & argument for child */
731 STREG %r25, PT_GR25(%r1)
732 ldil L%CLONE_UNTRACED, %r26
733 ldo CLONE_VM(%r26), %r26 /* Force CLONE_VM since only init_mm */
734 or %r26, %r24, %r26 /* will have kernel mappings. */
735 ldi 1, %r25 /* stack_start, signals kernel thread */
736 stw %r0, -52(%r30) /* user_tid */
737#ifdef CONFIG_64BIT
738 ldo -16(%r30),%r29 /* Reference param save area */
739#endif
740 BL do_fork, %r2
741 copy %r1, %r24 /* pt_regs */
742
743 /* Parent Returns here */
744
745 LDREG -PT_SZ_ALGN-RP_OFFSET(%r30), %r2
746 ldo -PT_SZ_ALGN(%r30), %r30
747 bv %r0(%r2)
748 nop
749ENDPROC(__kernel_thread)
750
751 /*
752 * Child Returns here
753 *
754 * copy_thread moved args from temp save area set up above
755 * into task save area.
756 */
757
758ENTRY(ret_from_kernel_thread)
759
760 /* Call schedule_tail first though */
761 BL schedule_tail, %r2
762 nop
763
764 LDREG TI_TASK-THREAD_SZ_ALGN(%r30), %r1
765 LDREG TASK_PT_GR25(%r1), %r26
766#ifdef CONFIG_64BIT
767 LDREG TASK_PT_GR27(%r1), %r27
768 LDREG TASK_PT_GR22(%r1), %r22
769#endif
770 LDREG TASK_PT_GR26(%r1), %r1
771 ble 0(%sr7, %r1)
772 copy %r31, %r2
773
774#ifdef CONFIG_64BIT
775 ldo -16(%r30),%r29 /* Reference param save area */
776 loadgp /* Thread could have been in a module */
777#endif
778#ifndef CONFIG_64BIT
779 b sys_exit
780#else
781 load32 sys_exit, %r1
782 bv %r0(%r1)
783#endif
784 ldi 0, %r26
785ENDPROC(ret_from_kernel_thread)
786
787 .import sys_execve, code
788ENTRY(__execve)
789 copy %r2, %r15
790 copy %r30, %r16
791 ldo PT_SZ_ALGN(%r30), %r30
792 STREG %r26, PT_GR26(%r16)
793 STREG %r25, PT_GR25(%r16)
794 STREG %r24, PT_GR24(%r16)
795#ifdef CONFIG_64BIT
796 ldo -16(%r30),%r29 /* Reference param save area */
797#endif
798 BL sys_execve, %r2
799 copy %r16, %r26
800
801 cmpib,=,n 0,%r28,intr_return /* forward */
802
803 /* yes, this will trap and die. */
804 copy %r15, %r2
805 copy %r16, %r30
806 bv %r0(%r2)
807 nop
808ENDPROC(__execve)
809
810
811 /*
812 * struct task_struct *_switch_to(struct task_struct *prev,
813 * struct task_struct *next)
814 *
815 * switch kernel stacks and return prev */
816ENTRY(_switch_to)
817 STREG %r2, -RP_OFFSET(%r30)
818
819 callee_save_float
820 callee_save
821
822 load32 _switch_to_ret, %r2
823
824 STREG %r2, TASK_PT_KPC(%r26)
825 LDREG TASK_PT_KPC(%r25), %r2
826
827 STREG %r30, TASK_PT_KSP(%r26)
828 LDREG TASK_PT_KSP(%r25), %r30
829 LDREG TASK_THREAD_INFO(%r25), %r25
830 bv %r0(%r2)
831 mtctl %r25,%cr30
832
833_switch_to_ret:
834 mtctl %r0, %cr0 /* Needed for single stepping */
835 callee_rest
836 callee_rest_float
837
838 LDREG -RP_OFFSET(%r30), %r2
839 bv %r0(%r2)
840 copy %r26, %r28
841ENDPROC(_switch_to)
842
843 /*
844 * Common rfi return path for interruptions, kernel execve, and
845 * sys_rt_sigreturn (sometimes). The sys_rt_sigreturn syscall will
846 * return via this path if the signal was received when the process
847 * was running; if the process was blocked on a syscall then the
848 * normal syscall_exit path is used. All syscalls for traced
849 * proceses exit via intr_restore.
850 *
851 * XXX If any syscalls that change a processes space id ever exit
852 * this way, then we will need to copy %sr3 in to PT_SR[3..7], and
853 * adjust IASQ[0..1].
854 *
855 */
856
857 .align PAGE_SIZE
858
859ENTRY(syscall_exit_rfi)
860 mfctl %cr30,%r16
861 LDREG TI_TASK(%r16), %r16 /* thread_info -> task_struct */
862 ldo TASK_REGS(%r16),%r16
863 /* Force iaoq to userspace, as the user has had access to our current
864 * context via sigcontext. Also Filter the PSW for the same reason.
865 */
866 LDREG PT_IAOQ0(%r16),%r19
867 depi 3,31,2,%r19
868 STREG %r19,PT_IAOQ0(%r16)
869 LDREG PT_IAOQ1(%r16),%r19
870 depi 3,31,2,%r19
871 STREG %r19,PT_IAOQ1(%r16)
872 LDREG PT_PSW(%r16),%r19
873 load32 USER_PSW_MASK,%r1
874#ifdef CONFIG_64BIT
875 load32 USER_PSW_HI_MASK,%r20
876 depd %r20,31,32,%r1
877#endif
878 and %r19,%r1,%r19 /* Mask out bits that user shouldn't play with */
879 load32 USER_PSW,%r1
880 or %r19,%r1,%r19 /* Make sure default USER_PSW bits are set */
881 STREG %r19,PT_PSW(%r16)
882
883 /*
884 * If we aren't being traced, we never saved space registers
885 * (we don't store them in the sigcontext), so set them
886 * to "proper" values now (otherwise we'll wind up restoring
887 * whatever was last stored in the task structure, which might
888 * be inconsistent if an interrupt occurred while on the gateway
889 * page). Note that we may be "trashing" values the user put in
890 * them, but we don't support the user changing them.
891 */
892
893 STREG %r0,PT_SR2(%r16)
894 mfsp %sr3,%r19
895 STREG %r19,PT_SR0(%r16)
896 STREG %r19,PT_SR1(%r16)
897 STREG %r19,PT_SR3(%r16)
898 STREG %r19,PT_SR4(%r16)
899 STREG %r19,PT_SR5(%r16)
900 STREG %r19,PT_SR6(%r16)
901 STREG %r19,PT_SR7(%r16)
902
903intr_return:
904 /* NOTE: Need to enable interrupts incase we schedule. */
905 ssm PSW_SM_I, %r0
906
907intr_check_resched:
908
909 /* check for reschedule */
910 mfctl %cr30,%r1
911 LDREG TI_FLAGS(%r1),%r19 /* sched.h: TIF_NEED_RESCHED */
912 bb,<,n %r19,31-TIF_NEED_RESCHED,intr_do_resched /* forward */
913
914 .import do_notify_resume,code
915intr_check_sig:
916 /* As above */
917 mfctl %cr30,%r1
918 LDREG TI_FLAGS(%r1),%r19
919 ldi (_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK|_TIF_NOTIFY_RESUME), %r20
920 and,COND(<>) %r19, %r20, %r0
921 b,n intr_restore /* skip past if we've nothing to do */
922
923 /* This check is critical to having LWS
924 * working. The IASQ is zero on the gateway
925 * page and we cannot deliver any signals until
926 * we get off the gateway page.
927 *
928 * Only do signals if we are returning to user space
929 */
930 LDREG PT_IASQ0(%r16), %r20
931 cmpib,COND(=),n 0,%r20,intr_restore /* backward */
932 LDREG PT_IASQ1(%r16), %r20
933 cmpib,COND(=),n 0,%r20,intr_restore /* backward */
934
935 copy %r0, %r25 /* long in_syscall = 0 */
936#ifdef CONFIG_64BIT
937 ldo -16(%r30),%r29 /* Reference param save area */
938#endif
939
940 BL do_notify_resume,%r2
941 copy %r16, %r26 /* struct pt_regs *regs */
942
943 b,n intr_check_sig
944
945intr_restore:
946 copy %r16,%r29
947 ldo PT_FR31(%r29),%r1
948 rest_fp %r1
949 rest_general %r29
950
951 /* inverse of virt_map */
952 pcxt_ssm_bug
953 rsm PSW_SM_QUIET,%r0 /* prepare for rfi */
954 tophys_r1 %r29
955
956 /* Restore space id's and special cr's from PT_REGS
957 * structure pointed to by r29
958 */
959 rest_specials %r29
960
961 /* IMPORTANT: rest_stack restores r29 last (we are using it)!
962 * It also restores r1 and r30.
963 */
964 rest_stack
965
966 rfi
967 nop
968
969#ifndef CONFIG_PREEMPT
970# define intr_do_preempt intr_restore
971#endif /* !CONFIG_PREEMPT */
972
973 .import schedule,code
974intr_do_resched:
975 /* Only call schedule on return to userspace. If we're returning
976 * to kernel space, we may schedule if CONFIG_PREEMPT, otherwise
977 * we jump back to intr_restore.
978 */
979 LDREG PT_IASQ0(%r16), %r20
980 cmpib,COND(=) 0, %r20, intr_do_preempt
981 nop
982 LDREG PT_IASQ1(%r16), %r20
983 cmpib,COND(=) 0, %r20, intr_do_preempt
984 nop
985
986#ifdef CONFIG_64BIT
987 ldo -16(%r30),%r29 /* Reference param save area */
988#endif
989
990 ldil L%intr_check_sig, %r2
991#ifndef CONFIG_64BIT
992 b schedule
993#else
994 load32 schedule, %r20
995 bv %r0(%r20)
996#endif
997 ldo R%intr_check_sig(%r2), %r2
998
999 /* preempt the current task on returning to kernel
1000 * mode from an interrupt, iff need_resched is set,
1001 * and preempt_count is 0. otherwise, we continue on
1002 * our merry way back to the current running task.
1003 */
1004#ifdef CONFIG_PREEMPT
1005 .import preempt_schedule_irq,code
1006intr_do_preempt:
1007 rsm PSW_SM_I, %r0 /* disable interrupts */
1008
1009 /* current_thread_info()->preempt_count */
1010 mfctl %cr30, %r1
1011 LDREG TI_PRE_COUNT(%r1), %r19
1012 cmpib,COND(<>) 0, %r19, intr_restore /* if preempt_count > 0 */
1013 nop /* prev insn branched backwards */
1014
1015 /* check if we interrupted a critical path */
1016 LDREG PT_PSW(%r16), %r20
1017 bb,<,n %r20, 31 - PSW_SM_I, intr_restore
1018 nop
1019
1020 BL preempt_schedule_irq, %r2
1021 nop
1022
1023 b,n intr_restore /* ssm PSW_SM_I done by intr_restore */
1024#endif /* CONFIG_PREEMPT */
1025
1026 /*
1027 * External interrupts.
1028 */
1029
1030intr_extint:
1031 cmpib,COND(=),n 0,%r16,1f
1032
1033 get_stack_use_cr30
1034 b,n 2f
1035
10361:
1037 get_stack_use_r30
10382:
1039 save_specials %r29
1040 virt_map
1041 save_general %r29
1042
1043 ldo PT_FR0(%r29), %r24
1044 save_fp %r24
1045
1046 loadgp
1047
1048 copy %r29, %r26 /* arg0 is pt_regs */
1049 copy %r29, %r16 /* save pt_regs */
1050
1051 ldil L%intr_return, %r2
1052
1053#ifdef CONFIG_64BIT
1054 ldo -16(%r30),%r29 /* Reference param save area */
1055#endif
1056
1057 b do_cpu_irq_mask
1058 ldo R%intr_return(%r2), %r2 /* return to intr_return, not here */
1059ENDPROC(syscall_exit_rfi)
1060
1061
1062 /* Generic interruptions (illegal insn, unaligned, page fault, etc) */
1063
1064ENTRY(intr_save) /* for os_hpmc */
1065 mfsp %sr7,%r16
1066 cmpib,COND(=),n 0,%r16,1f
1067 get_stack_use_cr30
1068 b 2f
1069 copy %r8,%r26
1070
10711:
1072 get_stack_use_r30
1073 copy %r8,%r26
1074
10752:
1076 save_specials %r29
1077
1078 /* If this trap is a itlb miss, skip saving/adjusting isr/ior */
1079
1080 /*
1081 * FIXME: 1) Use a #define for the hardwired "6" below (and in
1082 * traps.c.
1083 * 2) Once we start executing code above 4 Gb, we need
1084 * to adjust iasq/iaoq here in the same way we
1085 * adjust isr/ior below.
1086 */
1087
1088 cmpib,COND(=),n 6,%r26,skip_save_ior
1089
1090
1091 mfctl %cr20, %r16 /* isr */
1092 nop /* serialize mfctl on PA 2.0 to avoid 4 cycle penalty */
1093 mfctl %cr21, %r17 /* ior */
1094
1095
1096#ifdef CONFIG_64BIT
1097 /*
1098 * If the interrupted code was running with W bit off (32 bit),
1099 * clear the b bits (bits 0 & 1) in the ior.
1100 * save_specials left ipsw value in r8 for us to test.
1101 */
1102 extrd,u,*<> %r8,PSW_W_BIT,1,%r0
1103 depdi 0,1,2,%r17
1104
1105 /*
1106 * FIXME: This code has hardwired assumptions about the split
1107 * between space bits and offset bits. This will change
1108 * when we allow alternate page sizes.
1109 */
1110
1111 /* adjust isr/ior. */
1112 extrd,u %r16,63,SPACEID_SHIFT,%r1 /* get high bits from isr for ior */
1113 depd %r1,31,SPACEID_SHIFT,%r17 /* deposit them into ior */
1114 depdi 0,63,SPACEID_SHIFT,%r16 /* clear them from isr */
1115#endif
1116 STREG %r16, PT_ISR(%r29)
1117 STREG %r17, PT_IOR(%r29)
1118
1119
1120skip_save_ior:
1121 virt_map
1122 save_general %r29
1123
1124 ldo PT_FR0(%r29), %r25
1125 save_fp %r25
1126
1127 loadgp
1128
1129 copy %r29, %r25 /* arg1 is pt_regs */
1130#ifdef CONFIG_64BIT
1131 ldo -16(%r30),%r29 /* Reference param save area */
1132#endif
1133
1134 ldil L%intr_check_sig, %r2
1135 copy %r25, %r16 /* save pt_regs */
1136
1137 b handle_interruption
1138 ldo R%intr_check_sig(%r2), %r2
1139ENDPROC(intr_save)
1140
1141
1142 /*
1143 * Note for all tlb miss handlers:
1144 *
1145 * cr24 contains a pointer to the kernel address space
1146 * page directory.
1147 *
1148 * cr25 contains a pointer to the current user address
1149 * space page directory.
1150 *
1151 * sr3 will contain the space id of the user address space
1152 * of the current running thread while that thread is
1153 * running in the kernel.
1154 */
1155
1156 /*
1157 * register number allocations. Note that these are all
1158 * in the shadowed registers
1159 */
1160
1161 t0 = r1 /* temporary register 0 */
1162 va = r8 /* virtual address for which the trap occurred */
1163 t1 = r9 /* temporary register 1 */
1164 pte = r16 /* pte/phys page # */
1165 prot = r17 /* prot bits */
1166 spc = r24 /* space for which the trap occurred */
1167 ptp = r25 /* page directory/page table pointer */
1168
1169#ifdef CONFIG_64BIT
1170
1171dtlb_miss_20w:
1172 space_adjust spc,va,t0
1173 get_pgd spc,ptp
1174 space_check spc,t0,dtlb_fault
1175
1176 L3_ptep ptp,pte,t0,va,dtlb_check_alias_20w
1177
1178 update_ptep ptp,pte,t0,t1
1179
1180 make_insert_tlb spc,pte,prot
1181
1182 idtlbt pte,prot
1183
1184 rfir
1185 nop
1186
1187dtlb_check_alias_20w:
1188 do_alias spc,t0,t1,va,pte,prot,dtlb_fault
1189
1190 idtlbt pte,prot
1191
1192 rfir
1193 nop
1194
1195nadtlb_miss_20w:
1196 space_adjust spc,va,t0
1197 get_pgd spc,ptp
1198 space_check spc,t0,nadtlb_fault
1199
1200 L3_ptep ptp,pte,t0,va,nadtlb_check_alias_20w
1201
1202 update_ptep ptp,pte,t0,t1
1203
1204 make_insert_tlb spc,pte,prot
1205
1206 idtlbt pte,prot
1207
1208 rfir
1209 nop
1210
1211nadtlb_check_alias_20w:
1212 do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate
1213
1214 idtlbt pte,prot
1215
1216 rfir
1217 nop
1218
1219#else
1220
1221dtlb_miss_11:
1222 get_pgd spc,ptp
1223
1224 space_check spc,t0,dtlb_fault
1225
1226 L2_ptep ptp,pte,t0,va,dtlb_check_alias_11
1227
1228 update_ptep ptp,pte,t0,t1
1229
1230 make_insert_tlb_11 spc,pte,prot
1231
1232 mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
1233 mtsp spc,%sr1
1234
1235 idtlba pte,(%sr1,va)
1236 idtlbp prot,(%sr1,va)
1237
1238 mtsp t0, %sr1 /* Restore sr1 */
1239
1240 rfir
1241 nop
1242
1243dtlb_check_alias_11:
1244 do_alias spc,t0,t1,va,pte,prot,dtlb_fault
1245
1246 idtlba pte,(va)
1247 idtlbp prot,(va)
1248
1249 rfir
1250 nop
1251
1252nadtlb_miss_11:
1253 get_pgd spc,ptp
1254
1255 space_check spc,t0,nadtlb_fault
1256
1257 L2_ptep ptp,pte,t0,va,nadtlb_check_alias_11
1258
1259 update_ptep ptp,pte,t0,t1
1260
1261 make_insert_tlb_11 spc,pte,prot
1262
1263
1264 mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
1265 mtsp spc,%sr1
1266
1267 idtlba pte,(%sr1,va)
1268 idtlbp prot,(%sr1,va)
1269
1270 mtsp t0, %sr1 /* Restore sr1 */
1271
1272 rfir
1273 nop
1274
1275nadtlb_check_alias_11:
1276 do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate
1277
1278 idtlba pte,(va)
1279 idtlbp prot,(va)
1280
1281 rfir
1282 nop
1283
1284dtlb_miss_20:
1285 space_adjust spc,va,t0
1286 get_pgd spc,ptp
1287 space_check spc,t0,dtlb_fault
1288
1289 L2_ptep ptp,pte,t0,va,dtlb_check_alias_20
1290
1291 update_ptep ptp,pte,t0,t1
1292
1293 make_insert_tlb spc,pte,prot
1294
1295 f_extend pte,t0
1296
1297 idtlbt pte,prot
1298
1299 rfir
1300 nop
1301
1302dtlb_check_alias_20:
1303 do_alias spc,t0,t1,va,pte,prot,dtlb_fault
1304
1305 idtlbt pte,prot
1306
1307 rfir
1308 nop
1309
1310nadtlb_miss_20:
1311 get_pgd spc,ptp
1312
1313 space_check spc,t0,nadtlb_fault
1314
1315 L2_ptep ptp,pte,t0,va,nadtlb_check_alias_20
1316
1317 update_ptep ptp,pte,t0,t1
1318
1319 make_insert_tlb spc,pte,prot
1320
1321 f_extend pte,t0
1322
1323 idtlbt pte,prot
1324
1325 rfir
1326 nop
1327
1328nadtlb_check_alias_20:
1329 do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate
1330
1331 idtlbt pte,prot
1332
1333 rfir
1334 nop
1335
1336#endif
1337
1338nadtlb_emulate:
1339
1340 /*
1341 * Non access misses can be caused by fdc,fic,pdc,lpa,probe and
1342 * probei instructions. We don't want to fault for these
1343 * instructions (not only does it not make sense, it can cause
1344 * deadlocks, since some flushes are done with the mmap
1345 * semaphore held). If the translation doesn't exist, we can't
1346 * insert a translation, so have to emulate the side effects
1347 * of the instruction. Since we don't insert a translation
1348 * we can get a lot of faults during a flush loop, so it makes
1349 * sense to try to do it here with minimum overhead. We only
1350 * emulate fdc,fic,pdc,probew,prober instructions whose base
1351 * and index registers are not shadowed. We defer everything
1352 * else to the "slow" path.
1353 */
1354
1355 mfctl %cr19,%r9 /* Get iir */
1356
1357 /* PA 2.0 Arch Ref. Book pg 382 has a good description of the insn bits.
1358 Checks for fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw */
1359
1360 /* Checks for fdc,fdce,pdc,"fic,4f" only */
1361 ldi 0x280,%r16
1362 and %r9,%r16,%r17
1363 cmpb,<>,n %r16,%r17,nadtlb_probe_check
1364 bb,>=,n %r9,26,nadtlb_nullify /* m bit not set, just nullify */
1365 BL get_register,%r25
1366 extrw,u %r9,15,5,%r8 /* Get index register # */
1367 cmpib,COND(=),n -1,%r1,nadtlb_fault /* have to use slow path */
1368 copy %r1,%r24
1369 BL get_register,%r25
1370 extrw,u %r9,10,5,%r8 /* Get base register # */
1371 cmpib,COND(=),n -1,%r1,nadtlb_fault /* have to use slow path */
1372 BL set_register,%r25
1373 add,l %r1,%r24,%r1 /* doesn't affect c/b bits */
1374
1375nadtlb_nullify:
1376 mfctl %ipsw,%r8
1377 ldil L%PSW_N,%r9
1378 or %r8,%r9,%r8 /* Set PSW_N */
1379 mtctl %r8,%ipsw
1380
1381 rfir
1382 nop
1383
1384 /*
1385 When there is no translation for the probe address then we
1386 must nullify the insn and return zero in the target regsiter.
1387 This will indicate to the calling code that it does not have
1388 write/read privileges to this address.
1389
1390 This should technically work for prober and probew in PA 1.1,
1391 and also probe,r and probe,w in PA 2.0
1392
1393 WARNING: USE ONLY NON-SHADOW REGISTERS WITH PROBE INSN!
1394 THE SLOW-PATH EMULATION HAS NOT BEEN WRITTEN YET.
1395
1396 */
1397nadtlb_probe_check:
1398 ldi 0x80,%r16
1399 and %r9,%r16,%r17
1400 cmpb,<>,n %r16,%r17,nadtlb_fault /* Must be probe,[rw]*/
1401 BL get_register,%r25 /* Find the target register */
1402 extrw,u %r9,31,5,%r8 /* Get target register */
1403 cmpib,COND(=),n -1,%r1,nadtlb_fault /* have to use slow path */
1404 BL set_register,%r25
1405 copy %r0,%r1 /* Write zero to target register */
1406 b nadtlb_nullify /* Nullify return insn */
1407 nop
1408
1409
1410#ifdef CONFIG_64BIT
1411itlb_miss_20w:
1412
1413 /*
1414 * I miss is a little different, since we allow users to fault
1415 * on the gateway page which is in the kernel address space.
1416 */
1417
1418 space_adjust spc,va,t0
1419 get_pgd spc,ptp
1420 space_check spc,t0,itlb_fault
1421
1422 L3_ptep ptp,pte,t0,va,itlb_fault
1423
1424 update_ptep ptp,pte,t0,t1
1425
1426 make_insert_tlb spc,pte,prot
1427
1428 iitlbt pte,prot
1429
1430 rfir
1431 nop
1432
1433naitlb_miss_20w:
1434
1435 /*
1436 * I miss is a little different, since we allow users to fault
1437 * on the gateway page which is in the kernel address space.
1438 */
1439
1440 space_adjust spc,va,t0
1441 get_pgd spc,ptp
1442 space_check spc,t0,naitlb_fault
1443
1444 L3_ptep ptp,pte,t0,va,naitlb_check_alias_20w
1445
1446 update_ptep ptp,pte,t0,t1
1447
1448 make_insert_tlb spc,pte,prot
1449
1450 iitlbt pte,prot
1451
1452 rfir
1453 nop
1454
1455naitlb_check_alias_20w:
1456 do_alias spc,t0,t1,va,pte,prot,naitlb_fault
1457
1458 iitlbt pte,prot
1459
1460 rfir
1461 nop
1462
1463#else
1464
1465itlb_miss_11:
1466 get_pgd spc,ptp
1467
1468 space_check spc,t0,itlb_fault
1469
1470 L2_ptep ptp,pte,t0,va,itlb_fault
1471
1472 update_ptep ptp,pte,t0,t1
1473
1474 make_insert_tlb_11 spc,pte,prot
1475
1476 mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
1477 mtsp spc,%sr1
1478
1479 iitlba pte,(%sr1,va)
1480 iitlbp prot,(%sr1,va)
1481
1482 mtsp t0, %sr1 /* Restore sr1 */
1483
1484 rfir
1485 nop
1486
1487naitlb_miss_11:
1488 get_pgd spc,ptp
1489
1490 space_check spc,t0,naitlb_fault
1491
1492 L2_ptep ptp,pte,t0,va,naitlb_check_alias_11
1493
1494 update_ptep ptp,pte,t0,t1
1495
1496 make_insert_tlb_11 spc,pte,prot
1497
1498 mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
1499 mtsp spc,%sr1
1500
1501 iitlba pte,(%sr1,va)
1502 iitlbp prot,(%sr1,va)
1503
1504 mtsp t0, %sr1 /* Restore sr1 */
1505
1506 rfir
1507 nop
1508
1509naitlb_check_alias_11:
1510 do_alias spc,t0,t1,va,pte,prot,itlb_fault
1511
1512 iitlba pte,(%sr0, va)
1513 iitlbp prot,(%sr0, va)
1514
1515 rfir
1516 nop
1517
1518
1519itlb_miss_20:
1520 get_pgd spc,ptp
1521
1522 space_check spc,t0,itlb_fault
1523
1524 L2_ptep ptp,pte,t0,va,itlb_fault
1525
1526 update_ptep ptp,pte,t0,t1
1527
1528 make_insert_tlb spc,pte,prot
1529
1530 f_extend pte,t0
1531
1532 iitlbt pte,prot
1533
1534 rfir
1535 nop
1536
1537naitlb_miss_20:
1538 get_pgd spc,ptp
1539
1540 space_check spc,t0,naitlb_fault
1541
1542 L2_ptep ptp,pte,t0,va,naitlb_check_alias_20
1543
1544 update_ptep ptp,pte,t0,t1
1545
1546 make_insert_tlb spc,pte,prot
1547
1548 f_extend pte,t0
1549
1550 iitlbt pte,prot
1551
1552 rfir
1553 nop
1554
1555naitlb_check_alias_20:
1556 do_alias spc,t0,t1,va,pte,prot,naitlb_fault
1557
1558 iitlbt pte,prot
1559
1560 rfir
1561 nop
1562
1563#endif
1564
1565#ifdef CONFIG_64BIT
1566
1567dbit_trap_20w:
1568 space_adjust spc,va,t0
1569 get_pgd spc,ptp
1570 space_check spc,t0,dbit_fault
1571
1572 L3_ptep ptp,pte,t0,va,dbit_fault
1573
1574#ifdef CONFIG_SMP
1575 cmpib,COND(=),n 0,spc,dbit_nolock_20w
1576 load32 PA(pa_dbit_lock),t0
1577
1578dbit_spin_20w:
1579 LDCW 0(t0),t1
1580 cmpib,COND(=) 0,t1,dbit_spin_20w
1581 nop
1582
1583dbit_nolock_20w:
1584#endif
1585 update_dirty ptp,pte,t1
1586
1587 make_insert_tlb spc,pte,prot
1588
1589 idtlbt pte,prot
1590#ifdef CONFIG_SMP
1591 cmpib,COND(=),n 0,spc,dbit_nounlock_20w
1592 ldi 1,t1
1593 stw t1,0(t0)
1594
1595dbit_nounlock_20w:
1596#endif
1597
1598 rfir
1599 nop
1600#else
1601
1602dbit_trap_11:
1603
1604 get_pgd spc,ptp
1605
1606 space_check spc,t0,dbit_fault
1607
1608 L2_ptep ptp,pte,t0,va,dbit_fault
1609
1610#ifdef CONFIG_SMP
1611 cmpib,COND(=),n 0,spc,dbit_nolock_11
1612 load32 PA(pa_dbit_lock),t0
1613
1614dbit_spin_11:
1615 LDCW 0(t0),t1
1616 cmpib,= 0,t1,dbit_spin_11
1617 nop
1618
1619dbit_nolock_11:
1620#endif
1621 update_dirty ptp,pte,t1
1622
1623 make_insert_tlb_11 spc,pte,prot
1624
1625 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
1626 mtsp spc,%sr1
1627
1628 idtlba pte,(%sr1,va)
1629 idtlbp prot,(%sr1,va)
1630
1631 mtsp t1, %sr1 /* Restore sr1 */
1632#ifdef CONFIG_SMP
1633 cmpib,COND(=),n 0,spc,dbit_nounlock_11
1634 ldi 1,t1
1635 stw t1,0(t0)
1636
1637dbit_nounlock_11:
1638#endif
1639
1640 rfir
1641 nop
1642
1643dbit_trap_20:
1644 get_pgd spc,ptp
1645
1646 space_check spc,t0,dbit_fault
1647
1648 L2_ptep ptp,pte,t0,va,dbit_fault
1649
1650#ifdef CONFIG_SMP
1651 cmpib,COND(=),n 0,spc,dbit_nolock_20
1652 load32 PA(pa_dbit_lock),t0
1653
1654dbit_spin_20:
1655 LDCW 0(t0),t1
1656 cmpib,= 0,t1,dbit_spin_20
1657 nop
1658
1659dbit_nolock_20:
1660#endif
1661 update_dirty ptp,pte,t1
1662
1663 make_insert_tlb spc,pte,prot
1664
1665 f_extend pte,t1
1666
1667 idtlbt pte,prot
1668
1669#ifdef CONFIG_SMP
1670 cmpib,COND(=),n 0,spc,dbit_nounlock_20
1671 ldi 1,t1
1672 stw t1,0(t0)
1673
1674dbit_nounlock_20:
1675#endif
1676
1677 rfir
1678 nop
1679#endif
1680
1681 .import handle_interruption,code
1682
1683kernel_bad_space:
1684 b intr_save
1685 ldi 31,%r8 /* Use an unused code */
1686
1687dbit_fault:
1688 b intr_save
1689 ldi 20,%r8
1690
1691itlb_fault:
1692 b intr_save
1693 ldi 6,%r8
1694
1695nadtlb_fault:
1696 b intr_save
1697 ldi 17,%r8
1698
1699naitlb_fault:
1700 b intr_save
1701 ldi 16,%r8
1702
1703dtlb_fault:
1704 b intr_save
1705 ldi 15,%r8
1706
1707 /* Register saving semantics for system calls:
1708
1709 %r1 clobbered by system call macro in userspace
1710 %r2 saved in PT_REGS by gateway page
1711 %r3 - %r18 preserved by C code (saved by signal code)
1712 %r19 - %r20 saved in PT_REGS by gateway page
1713 %r21 - %r22 non-standard syscall args
1714 stored in kernel stack by gateway page
1715 %r23 - %r26 arg3-arg0, saved in PT_REGS by gateway page
1716 %r27 - %r30 saved in PT_REGS by gateway page
1717 %r31 syscall return pointer
1718 */
1719
1720 /* Floating point registers (FIXME: what do we do with these?)
1721
1722 %fr0 - %fr3 status/exception, not preserved
1723 %fr4 - %fr7 arguments
1724 %fr8 - %fr11 not preserved by C code
1725 %fr12 - %fr21 preserved by C code
1726 %fr22 - %fr31 not preserved by C code
1727 */
1728
1729 .macro reg_save regs
1730 STREG %r3, PT_GR3(\regs)
1731 STREG %r4, PT_GR4(\regs)
1732 STREG %r5, PT_GR5(\regs)
1733 STREG %r6, PT_GR6(\regs)
1734 STREG %r7, PT_GR7(\regs)
1735 STREG %r8, PT_GR8(\regs)
1736 STREG %r9, PT_GR9(\regs)
1737 STREG %r10,PT_GR10(\regs)
1738 STREG %r11,PT_GR11(\regs)
1739 STREG %r12,PT_GR12(\regs)
1740 STREG %r13,PT_GR13(\regs)
1741 STREG %r14,PT_GR14(\regs)
1742 STREG %r15,PT_GR15(\regs)
1743 STREG %r16,PT_GR16(\regs)
1744 STREG %r17,PT_GR17(\regs)
1745 STREG %r18,PT_GR18(\regs)
1746 .endm
1747
1748 .macro reg_restore regs
1749 LDREG PT_GR3(\regs), %r3
1750 LDREG PT_GR4(\regs), %r4
1751 LDREG PT_GR5(\regs), %r5
1752 LDREG PT_GR6(\regs), %r6
1753 LDREG PT_GR7(\regs), %r7
1754 LDREG PT_GR8(\regs), %r8
1755 LDREG PT_GR9(\regs), %r9
1756 LDREG PT_GR10(\regs),%r10
1757 LDREG PT_GR11(\regs),%r11
1758 LDREG PT_GR12(\regs),%r12
1759 LDREG PT_GR13(\regs),%r13
1760 LDREG PT_GR14(\regs),%r14
1761 LDREG PT_GR15(\regs),%r15
1762 LDREG PT_GR16(\regs),%r16
1763 LDREG PT_GR17(\regs),%r17
1764 LDREG PT_GR18(\regs),%r18
1765 .endm
1766
1767ENTRY(sys_fork_wrapper)
1768 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
1769 ldo TASK_REGS(%r1),%r1
1770 reg_save %r1
1771 mfctl %cr27, %r3
1772 STREG %r3, PT_CR27(%r1)
1773
1774 STREG %r2,-RP_OFFSET(%r30)
1775 ldo FRAME_SIZE(%r30),%r30
1776#ifdef CONFIG_64BIT
1777 ldo -16(%r30),%r29 /* Reference param save area */
1778#endif
1779
1780 /* These are call-clobbered registers and therefore
1781 also syscall-clobbered (we hope). */
1782 STREG %r2,PT_GR19(%r1) /* save for child */
1783 STREG %r30,PT_GR21(%r1)
1784
1785 LDREG PT_GR30(%r1),%r25
1786 copy %r1,%r24
1787 BL sys_clone,%r2
1788 ldi SIGCHLD,%r26
1789
1790 LDREG -RP_OFFSET-FRAME_SIZE(%r30),%r2
1791wrapper_exit:
1792 ldo -FRAME_SIZE(%r30),%r30 /* get the stackframe */
1793 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1794 ldo TASK_REGS(%r1),%r1 /* get pt regs */
1795
1796 LDREG PT_CR27(%r1), %r3
1797 mtctl %r3, %cr27
1798 reg_restore %r1
1799
1800 /* strace expects syscall # to be preserved in r20 */
1801 ldi __NR_fork,%r20
1802 bv %r0(%r2)
1803 STREG %r20,PT_GR20(%r1)
1804ENDPROC(sys_fork_wrapper)
1805
1806 /* Set the return value for the child */
1807ENTRY(child_return)
1808 BL schedule_tail, %r2
1809 nop
1810
1811 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE-FRAME_SIZE(%r30), %r1
1812 LDREG TASK_PT_GR19(%r1),%r2
1813 b wrapper_exit
1814 copy %r0,%r28
1815ENDPROC(child_return)
1816
1817
1818ENTRY(sys_clone_wrapper)
1819 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1820 ldo TASK_REGS(%r1),%r1 /* get pt regs */
1821 reg_save %r1
1822 mfctl %cr27, %r3
1823 STREG %r3, PT_CR27(%r1)
1824
1825 STREG %r2,-RP_OFFSET(%r30)
1826 ldo FRAME_SIZE(%r30),%r30
1827#ifdef CONFIG_64BIT
1828 ldo -16(%r30),%r29 /* Reference param save area */
1829#endif
1830
1831 /* WARNING - Clobbers r19 and r21, userspace must save these! */
1832 STREG %r2,PT_GR19(%r1) /* save for child */
1833 STREG %r30,PT_GR21(%r1)
1834 BL sys_clone,%r2
1835 copy %r1,%r24
1836
1837 b wrapper_exit
1838 LDREG -RP_OFFSET-FRAME_SIZE(%r30),%r2
1839ENDPROC(sys_clone_wrapper)
1840
1841
1842ENTRY(sys_vfork_wrapper)
1843 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1844 ldo TASK_REGS(%r1),%r1 /* get pt regs */
1845 reg_save %r1
1846 mfctl %cr27, %r3
1847 STREG %r3, PT_CR27(%r1)
1848
1849 STREG %r2,-RP_OFFSET(%r30)
1850 ldo FRAME_SIZE(%r30),%r30
1851#ifdef CONFIG_64BIT
1852 ldo -16(%r30),%r29 /* Reference param save area */
1853#endif
1854
1855 STREG %r2,PT_GR19(%r1) /* save for child */
1856 STREG %r30,PT_GR21(%r1)
1857
1858 BL sys_vfork,%r2
1859 copy %r1,%r26
1860
1861 b wrapper_exit
1862 LDREG -RP_OFFSET-FRAME_SIZE(%r30),%r2
1863ENDPROC(sys_vfork_wrapper)
1864
1865
1866 .macro execve_wrapper execve
1867 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1868 ldo TASK_REGS(%r1),%r1 /* get pt regs */
1869
1870 /*
1871 * Do we need to save/restore r3-r18 here?
1872 * I don't think so. why would new thread need old
1873 * threads registers?
1874 */
1875
1876 /* %arg0 - %arg3 are already saved for us. */
1877
1878 STREG %r2,-RP_OFFSET(%r30)
1879 ldo FRAME_SIZE(%r30),%r30
1880#ifdef CONFIG_64BIT
1881 ldo -16(%r30),%r29 /* Reference param save area */
1882#endif
1883 BL \execve,%r2
1884 copy %r1,%arg0
1885
1886 ldo -FRAME_SIZE(%r30),%r30
1887 LDREG -RP_OFFSET(%r30),%r2
1888
1889 /* If exec succeeded we need to load the args */
1890
1891 ldo -1024(%r0),%r1
1892 cmpb,>>= %r28,%r1,error_\execve
1893 copy %r2,%r19
1894
1895error_\execve:
1896 bv %r0(%r19)
1897 nop
1898 .endm
1899
1900 .import sys_execve
1901ENTRY(sys_execve_wrapper)
1902 execve_wrapper sys_execve
1903ENDPROC(sys_execve_wrapper)
1904
1905#ifdef CONFIG_64BIT
1906 .import sys32_execve
1907ENTRY(sys32_execve_wrapper)
1908 execve_wrapper sys32_execve
1909ENDPROC(sys32_execve_wrapper)
1910#endif
1911
1912ENTRY(sys_rt_sigreturn_wrapper)
1913 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26
1914 ldo TASK_REGS(%r26),%r26 /* get pt regs */
1915 /* Don't save regs, we are going to restore them from sigcontext. */
1916 STREG %r2, -RP_OFFSET(%r30)
1917#ifdef CONFIG_64BIT
1918 ldo FRAME_SIZE(%r30), %r30
1919 BL sys_rt_sigreturn,%r2
1920 ldo -16(%r30),%r29 /* Reference param save area */
1921#else
1922 BL sys_rt_sigreturn,%r2
1923 ldo FRAME_SIZE(%r30), %r30
1924#endif
1925
1926 ldo -FRAME_SIZE(%r30), %r30
1927 LDREG -RP_OFFSET(%r30), %r2
1928
1929 /* FIXME: I think we need to restore a few more things here. */
1930 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1931 ldo TASK_REGS(%r1),%r1 /* get pt regs */
1932 reg_restore %r1
1933
1934 /* If the signal was received while the process was blocked on a
1935 * syscall, then r2 will take us to syscall_exit; otherwise r2 will
1936 * take us to syscall_exit_rfi and on to intr_return.
1937 */
1938 bv %r0(%r2)
1939 LDREG PT_GR28(%r1),%r28 /* reload original r28 for syscall_exit */
1940ENDPROC(sys_rt_sigreturn_wrapper)
1941
1942ENTRY(sys_sigaltstack_wrapper)
1943 /* Get the user stack pointer */
1944 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1945 ldo TASK_REGS(%r1),%r24 /* get pt regs */
1946 LDREG TASK_PT_GR30(%r24),%r24
1947 STREG %r2, -RP_OFFSET(%r30)
1948#ifdef CONFIG_64BIT
1949 ldo FRAME_SIZE(%r30), %r30
1950 BL do_sigaltstack,%r2
1951 ldo -16(%r30),%r29 /* Reference param save area */
1952#else
1953 BL do_sigaltstack,%r2
1954 ldo FRAME_SIZE(%r30), %r30
1955#endif
1956
1957 ldo -FRAME_SIZE(%r30), %r30
1958 LDREG -RP_OFFSET(%r30), %r2
1959 bv %r0(%r2)
1960 nop
1961ENDPROC(sys_sigaltstack_wrapper)
1962
1963#ifdef CONFIG_64BIT
1964ENTRY(sys32_sigaltstack_wrapper)
1965 /* Get the user stack pointer */
1966 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r24
1967 LDREG TASK_PT_GR30(%r24),%r24
1968 STREG %r2, -RP_OFFSET(%r30)
1969 ldo FRAME_SIZE(%r30), %r30
1970 BL do_sigaltstack32,%r2
1971 ldo -16(%r30),%r29 /* Reference param save area */
1972
1973 ldo -FRAME_SIZE(%r30), %r30
1974 LDREG -RP_OFFSET(%r30), %r2
1975 bv %r0(%r2)
1976 nop
1977ENDPROC(sys32_sigaltstack_wrapper)
1978#endif
1979
1980ENTRY(syscall_exit)
1981 /* NOTE: HP-UX syscalls also come through here
1982 * after hpux_syscall_exit fixes up return
1983 * values. */
1984
1985 /* NOTE: Not all syscalls exit this way. rt_sigreturn will exit
1986 * via syscall_exit_rfi if the signal was received while the process
1987 * was running.
1988 */
1989
1990 /* save return value now */
1991
1992 mfctl %cr30, %r1
1993 LDREG TI_TASK(%r1),%r1
1994 STREG %r28,TASK_PT_GR28(%r1)
1995
1996#ifdef CONFIG_HPUX
1997/* <linux/personality.h> cannot be easily included */
1998#define PER_HPUX 0x10
1999 ldw TASK_PERSONALITY(%r1),%r19
2000
2001 /* We can't use "CMPIB<> PER_HPUX" since "im5" field is sign extended */
2002 ldo -PER_HPUX(%r19), %r19
2003 cmpib,COND(<>),n 0,%r19,1f
2004
2005 /* Save other hpux returns if personality is PER_HPUX */
2006 STREG %r22,TASK_PT_GR22(%r1)
2007 STREG %r29,TASK_PT_GR29(%r1)
20081:
2009
2010#endif /* CONFIG_HPUX */
2011
2012 /* Seems to me that dp could be wrong here, if the syscall involved
2013 * calling a module, and nothing got round to restoring dp on return.
2014 */
2015 loadgp
2016
2017syscall_check_resched:
2018
2019 /* check for reschedule */
2020
2021 LDREG TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19 /* long */
2022 bb,<,n %r19, 31-TIF_NEED_RESCHED, syscall_do_resched /* forward */
2023
2024 .import do_signal,code
2025syscall_check_sig:
2026 LDREG TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19
2027 ldi (_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK), %r26
2028 and,COND(<>) %r19, %r26, %r0
2029 b,n syscall_restore /* skip past if we've nothing to do */
2030
2031syscall_do_signal:
2032 /* Save callee-save registers (for sigcontext).
2033 * FIXME: After this point the process structure should be
2034 * consistent with all the relevant state of the process
2035 * before the syscall. We need to verify this.
2036 */
2037 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
2038 ldo TASK_REGS(%r1), %r26 /* struct pt_regs *regs */
2039 reg_save %r26
2040
2041#ifdef CONFIG_64BIT
2042 ldo -16(%r30),%r29 /* Reference param save area */
2043#endif
2044
2045 BL do_notify_resume,%r2
2046 ldi 1, %r25 /* long in_syscall = 1 */
2047
2048 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
2049 ldo TASK_REGS(%r1), %r20 /* reload pt_regs */
2050 reg_restore %r20
2051
2052 b,n syscall_check_sig
2053
2054syscall_restore:
2055 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
2056
2057 /* Are we being ptraced? */
2058 ldw TASK_FLAGS(%r1),%r19
2059 ldi (_TIF_SINGLESTEP|_TIF_BLOCKSTEP),%r2
2060 and,COND(=) %r19,%r2,%r0
2061 b,n syscall_restore_rfi
2062
2063 ldo TASK_PT_FR31(%r1),%r19 /* reload fpregs */
2064 rest_fp %r19
2065
2066 LDREG TASK_PT_SAR(%r1),%r19 /* restore SAR */
2067 mtsar %r19
2068
2069 LDREG TASK_PT_GR2(%r1),%r2 /* restore user rp */
2070 LDREG TASK_PT_GR19(%r1),%r19
2071 LDREG TASK_PT_GR20(%r1),%r20
2072 LDREG TASK_PT_GR21(%r1),%r21
2073 LDREG TASK_PT_GR22(%r1),%r22
2074 LDREG TASK_PT_GR23(%r1),%r23
2075 LDREG TASK_PT_GR24(%r1),%r24
2076 LDREG TASK_PT_GR25(%r1),%r25
2077 LDREG TASK_PT_GR26(%r1),%r26
2078 LDREG TASK_PT_GR27(%r1),%r27 /* restore user dp */
2079 LDREG TASK_PT_GR28(%r1),%r28 /* syscall return value */
2080 LDREG TASK_PT_GR29(%r1),%r29
2081 LDREG TASK_PT_GR31(%r1),%r31 /* restore syscall rp */
2082
2083 /* NOTE: We use rsm/ssm pair to make this operation atomic */
2084 LDREG TASK_PT_GR30(%r1),%r1 /* Get user sp */
2085 rsm PSW_SM_I, %r0
2086 copy %r1,%r30 /* Restore user sp */
2087 mfsp %sr3,%r1 /* Get user space id */
2088 mtsp %r1,%sr7 /* Restore sr7 */
2089 ssm PSW_SM_I, %r0
2090
2091 /* Set sr2 to zero for userspace syscalls to work. */
2092 mtsp %r0,%sr2
2093 mtsp %r1,%sr4 /* Restore sr4 */
2094 mtsp %r1,%sr5 /* Restore sr5 */
2095 mtsp %r1,%sr6 /* Restore sr6 */
2096
2097 depi 3,31,2,%r31 /* ensure return to user mode. */
2098
2099#ifdef CONFIG_64BIT
2100 /* decide whether to reset the wide mode bit
2101 *
2102 * For a syscall, the W bit is stored in the lowest bit
2103 * of sp. Extract it and reset W if it is zero */
2104 extrd,u,*<> %r30,63,1,%r1
2105 rsm PSW_SM_W, %r0
2106 /* now reset the lowest bit of sp if it was set */
2107 xor %r30,%r1,%r30
2108#endif
2109 be,n 0(%sr3,%r31) /* return to user space */
2110
2111 /* We have to return via an RFI, so that PSW T and R bits can be set
2112 * appropriately.
2113 * This sets up pt_regs so we can return via intr_restore, which is not
2114 * the most efficient way of doing things, but it works.
2115 */
2116syscall_restore_rfi:
2117 ldo -1(%r0),%r2 /* Set recovery cntr to -1 */
2118 mtctl %r2,%cr0 /* for immediate trap */
2119 LDREG TASK_PT_PSW(%r1),%r2 /* Get old PSW */
2120 ldi 0x0b,%r20 /* Create new PSW */
2121 depi -1,13,1,%r20 /* C, Q, D, and I bits */
2122
2123 /* The values of SINGLESTEP_BIT and BLOCKSTEP_BIT are
2124 * set in thread_info.h and converted to PA bitmap
2125 * numbers in asm-offsets.c */
2126
2127 /* if ((%r19.SINGLESTEP_BIT)) { %r20.27=1} */
2128 extru,= %r19,TIF_SINGLESTEP_PA_BIT,1,%r0
2129 depi -1,27,1,%r20 /* R bit */
2130
2131 /* if ((%r19.BLOCKSTEP_BIT)) { %r20.7=1} */
2132 extru,= %r19,TIF_BLOCKSTEP_PA_BIT,1,%r0
2133 depi -1,7,1,%r20 /* T bit */
2134
2135 STREG %r20,TASK_PT_PSW(%r1)
2136
2137 /* Always store space registers, since sr3 can be changed (e.g. fork) */
2138
2139 mfsp %sr3,%r25
2140 STREG %r25,TASK_PT_SR3(%r1)
2141 STREG %r25,TASK_PT_SR4(%r1)
2142 STREG %r25,TASK_PT_SR5(%r1)
2143 STREG %r25,TASK_PT_SR6(%r1)
2144 STREG %r25,TASK_PT_SR7(%r1)
2145 STREG %r25,TASK_PT_IASQ0(%r1)
2146 STREG %r25,TASK_PT_IASQ1(%r1)
2147
2148 /* XXX W bit??? */
2149 /* Now if old D bit is clear, it means we didn't save all registers
2150 * on syscall entry, so do that now. This only happens on TRACEME
2151 * calls, or if someone attached to us while we were on a syscall.
2152 * We could make this more efficient by not saving r3-r18, but
2153 * then we wouldn't be able to use the common intr_restore path.
2154 * It is only for traced processes anyway, so performance is not
2155 * an issue.
2156 */
2157 bb,< %r2,30,pt_regs_ok /* Branch if D set */
2158 ldo TASK_REGS(%r1),%r25
2159 reg_save %r25 /* Save r3 to r18 */
2160
2161 /* Save the current sr */
2162 mfsp %sr0,%r2
2163 STREG %r2,TASK_PT_SR0(%r1)
2164
2165 /* Save the scratch sr */
2166 mfsp %sr1,%r2
2167 STREG %r2,TASK_PT_SR1(%r1)
2168
2169 /* sr2 should be set to zero for userspace syscalls */
2170 STREG %r0,TASK_PT_SR2(%r1)
2171
2172pt_regs_ok:
2173 LDREG TASK_PT_GR31(%r1),%r2
2174 depi 3,31,2,%r2 /* ensure return to user mode. */
2175 STREG %r2,TASK_PT_IAOQ0(%r1)
2176 ldo 4(%r2),%r2
2177 STREG %r2,TASK_PT_IAOQ1(%r1)
2178 copy %r25,%r16
2179 b intr_restore
2180 nop
2181
2182 .import schedule,code
2183syscall_do_resched:
2184 BL schedule,%r2
2185#ifdef CONFIG_64BIT
2186 ldo -16(%r30),%r29 /* Reference param save area */
2187#else
2188 nop
2189#endif
2190 b syscall_check_resched /* if resched, we start over again */
2191 nop
2192ENDPROC(syscall_exit)
2193
2194
2195#ifdef CONFIG_FUNCTION_TRACER
2196 .import ftrace_function_trampoline,code
2197ENTRY(_mcount)
2198 copy %r3, %arg2
2199 b ftrace_function_trampoline
2200 nop
2201ENDPROC(_mcount)
2202
2203ENTRY(return_to_handler)
2204 load32 return_trampoline, %rp
2205 copy %ret0, %arg0
2206 copy %ret1, %arg1
2207 b ftrace_return_to_handler
2208 nop
2209return_trampoline:
2210 copy %ret0, %rp
2211 copy %r23, %ret0
2212 copy %r24, %ret1
2213
2214.globl ftrace_stub
2215ftrace_stub:
2216 bv %r0(%rp)
2217 nop
2218ENDPROC(return_to_handler)
2219#endif /* CONFIG_FUNCTION_TRACER */
2220
2221
2222get_register:
2223 /*
2224 * get_register is used by the non access tlb miss handlers to
2225 * copy the value of the general register specified in r8 into
2226 * r1. This routine can't be used for shadowed registers, since
2227 * the rfir will restore the original value. So, for the shadowed
2228 * registers we put a -1 into r1 to indicate that the register
2229 * should not be used (the register being copied could also have
2230 * a -1 in it, but that is OK, it just means that we will have
2231 * to use the slow path instead).
2232 */
2233 blr %r8,%r0
2234 nop
2235 bv %r0(%r25) /* r0 */
2236 copy %r0,%r1
2237 bv %r0(%r25) /* r1 - shadowed */
2238 ldi -1,%r1
2239 bv %r0(%r25) /* r2 */
2240 copy %r2,%r1
2241 bv %r0(%r25) /* r3 */
2242 copy %r3,%r1
2243 bv %r0(%r25) /* r4 */
2244 copy %r4,%r1
2245 bv %r0(%r25) /* r5 */
2246 copy %r5,%r1
2247 bv %r0(%r25) /* r6 */
2248 copy %r6,%r1
2249 bv %r0(%r25) /* r7 */
2250 copy %r7,%r1
2251 bv %r0(%r25) /* r8 - shadowed */
2252 ldi -1,%r1
2253 bv %r0(%r25) /* r9 - shadowed */
2254 ldi -1,%r1
2255 bv %r0(%r25) /* r10 */
2256 copy %r10,%r1
2257 bv %r0(%r25) /* r11 */
2258 copy %r11,%r1
2259 bv %r0(%r25) /* r12 */
2260 copy %r12,%r1
2261 bv %r0(%r25) /* r13 */
2262 copy %r13,%r1
2263 bv %r0(%r25) /* r14 */
2264 copy %r14,%r1
2265 bv %r0(%r25) /* r15 */
2266 copy %r15,%r1
2267 bv %r0(%r25) /* r16 - shadowed */
2268 ldi -1,%r1
2269 bv %r0(%r25) /* r17 - shadowed */
2270 ldi -1,%r1
2271 bv %r0(%r25) /* r18 */
2272 copy %r18,%r1
2273 bv %r0(%r25) /* r19 */
2274 copy %r19,%r1
2275 bv %r0(%r25) /* r20 */
2276 copy %r20,%r1
2277 bv %r0(%r25) /* r21 */
2278 copy %r21,%r1
2279 bv %r0(%r25) /* r22 */
2280 copy %r22,%r1
2281 bv %r0(%r25) /* r23 */
2282 copy %r23,%r1
2283 bv %r0(%r25) /* r24 - shadowed */
2284 ldi -1,%r1
2285 bv %r0(%r25) /* r25 - shadowed */
2286 ldi -1,%r1
2287 bv %r0(%r25) /* r26 */
2288 copy %r26,%r1
2289 bv %r0(%r25) /* r27 */
2290 copy %r27,%r1
2291 bv %r0(%r25) /* r28 */
2292 copy %r28,%r1
2293 bv %r0(%r25) /* r29 */
2294 copy %r29,%r1
2295 bv %r0(%r25) /* r30 */
2296 copy %r30,%r1
2297 bv %r0(%r25) /* r31 */
2298 copy %r31,%r1
2299
2300
2301set_register:
2302 /*
2303 * set_register is used by the non access tlb miss handlers to
2304 * copy the value of r1 into the general register specified in
2305 * r8.
2306 */
2307 blr %r8,%r0
2308 nop
2309 bv %r0(%r25) /* r0 (silly, but it is a place holder) */
2310 copy %r1,%r0
2311 bv %r0(%r25) /* r1 */
2312 copy %r1,%r1
2313 bv %r0(%r25) /* r2 */
2314 copy %r1,%r2
2315 bv %r0(%r25) /* r3 */
2316 copy %r1,%r3
2317 bv %r0(%r25) /* r4 */
2318 copy %r1,%r4
2319 bv %r0(%r25) /* r5 */
2320 copy %r1,%r5
2321 bv %r0(%r25) /* r6 */
2322 copy %r1,%r6
2323 bv %r0(%r25) /* r7 */
2324 copy %r1,%r7
2325 bv %r0(%r25) /* r8 */
2326 copy %r1,%r8
2327 bv %r0(%r25) /* r9 */
2328 copy %r1,%r9
2329 bv %r0(%r25) /* r10 */
2330 copy %r1,%r10
2331 bv %r0(%r25) /* r11 */
2332 copy %r1,%r11
2333 bv %r0(%r25) /* r12 */
2334 copy %r1,%r12
2335 bv %r0(%r25) /* r13 */
2336 copy %r1,%r13
2337 bv %r0(%r25) /* r14 */
2338 copy %r1,%r14
2339 bv %r0(%r25) /* r15 */
2340 copy %r1,%r15
2341 bv %r0(%r25) /* r16 */
2342 copy %r1,%r16
2343 bv %r0(%r25) /* r17 */
2344 copy %r1,%r17
2345 bv %r0(%r25) /* r18 */
2346 copy %r1,%r18
2347 bv %r0(%r25) /* r19 */
2348 copy %r1,%r19
2349 bv %r0(%r25) /* r20 */
2350 copy %r1,%r20
2351 bv %r0(%r25) /* r21 */
2352 copy %r1,%r21
2353 bv %r0(%r25) /* r22 */
2354 copy %r1,%r22
2355 bv %r0(%r25) /* r23 */
2356 copy %r1,%r23
2357 bv %r0(%r25) /* r24 */
2358 copy %r1,%r24
2359 bv %r0(%r25) /* r25 */
2360 copy %r1,%r25
2361 bv %r0(%r25) /* r26 */
2362 copy %r1,%r26
2363 bv %r0(%r25) /* r27 */
2364 copy %r1,%r27
2365 bv %r0(%r25) /* r28 */
2366 copy %r1,%r28
2367 bv %r0(%r25) /* r29 */
2368 copy %r1,%r29
2369 bv %r0(%r25) /* r30 */
2370 copy %r1,%r30
2371 bv %r0(%r25) /* r31 */
2372 copy %r1,%r31
2373