Loading...
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * Linux/PA-RISC Project (http://www.parisc-linux.org/)
4 *
5 * kernel entry points (interruptions, system call wrappers)
6 * Copyright (C) 1999,2000 Philipp Rumpf
7 * Copyright (C) 1999 SuSE GmbH Nuernberg
8 * Copyright (C) 2000 Hewlett-Packard (John Marvin)
9 * Copyright (C) 1999 Hewlett-Packard (Frank Rowand)
10 */
11
12#include <asm/asm-offsets.h>
13
14/* we have the following possibilities to act on an interruption:
15 * - handle in assembly and use shadowed registers only
16 * - save registers to kernel stack and handle in assembly or C */
17
18
19#include <asm/psw.h>
20#include <asm/cache.h> /* for L1_CACHE_SHIFT */
21#include <asm/assembly.h> /* for LDREG/STREG defines */
22#include <asm/signal.h>
23#include <asm/unistd.h>
24#include <asm/ldcw.h>
25#include <asm/traps.h>
26#include <asm/thread_info.h>
27#include <asm/alternative.h>
28
29#include <linux/linkage.h>
30#include <linux/pgtable.h>
31
32#ifdef CONFIG_64BIT
33 .level 2.0w
34#else
35 .level 2.0
36#endif
37
38 .import pa_tlb_lock,data
39 .macro load_pa_tlb_lock reg
40 mfctl %cr25,\reg
41 addil L%(PAGE_SIZE << (PGD_ALLOC_ORDER - 1)),\reg
42 .endm
43
44 /* space_to_prot macro creates a prot id from a space id */
45
46#if (SPACEID_SHIFT) == 0
47 .macro space_to_prot spc prot
48 depd,z \spc,62,31,\prot
49 .endm
50#else
51 .macro space_to_prot spc prot
52 extrd,u \spc,(64 - (SPACEID_SHIFT)),32,\prot
53 .endm
54#endif
55
56 /* Switch to virtual mapping, trashing only %r1 */
57 .macro virt_map
58 /* pcxt_ssm_bug */
59 rsm PSW_SM_I, %r0 /* barrier for "Relied upon Translation */
60 mtsp %r0, %sr4
61 mtsp %r0, %sr5
62 mtsp %r0, %sr6
63 tovirt_r1 %r29
64 load32 KERNEL_PSW, %r1
65
66 rsm PSW_SM_QUIET,%r0 /* second "heavy weight" ctl op */
67 mtctl %r0, %cr17 /* Clear IIASQ tail */
68 mtctl %r0, %cr17 /* Clear IIASQ head */
69 mtctl %r1, %ipsw
70 load32 4f, %r1
71 mtctl %r1, %cr18 /* Set IIAOQ tail */
72 ldo 4(%r1), %r1
73 mtctl %r1, %cr18 /* Set IIAOQ head */
74 rfir
75 nop
764:
77 .endm
78
79 /*
80 * The "get_stack" macros are responsible for determining the
81 * kernel stack value.
82 *
83 * If sr7 == 0
84 * Already using a kernel stack, so call the
85 * get_stack_use_r30 macro to push a pt_regs structure
86 * on the stack, and store registers there.
87 * else
88 * Need to set up a kernel stack, so call the
89 * get_stack_use_cr30 macro to set up a pointer
90 * to the pt_regs structure contained within the
91 * task pointer pointed to by cr30. Set the stack
92 * pointer to point to the end of the task structure.
93 *
94 * Note that we use shadowed registers for temps until
95 * we can save %r26 and %r29. %r26 is used to preserve
96 * %r8 (a shadowed register) which temporarily contained
97 * either the fault type ("code") or the eirr. We need
98 * to use a non-shadowed register to carry the value over
99 * the rfir in virt_map. We use %r26 since this value winds
100 * up being passed as the argument to either do_cpu_irq_mask
101 * or handle_interruption. %r29 is used to hold a pointer
102 * the register save area, and once again, it needs to
103 * be a non-shadowed register so that it survives the rfir.
104 *
105 * N.B. TASK_SZ_ALGN and PT_SZ_ALGN include space for a stack frame.
106 */
107
108 .macro get_stack_use_cr30
109
110 /* we save the registers in the task struct */
111
112 copy %r30, %r17
113 mfctl %cr30, %r1
114 ldo THREAD_SZ_ALGN(%r1), %r30
115 mtsp %r0,%sr7
116 mtsp %r16,%sr3
117 tophys %r1,%r9
118 LDREG TI_TASK(%r9), %r1 /* thread_info -> task_struct */
119 tophys %r1,%r9
120 ldo TASK_REGS(%r9),%r9
121 STREG %r17,PT_GR30(%r9)
122 STREG %r29,PT_GR29(%r9)
123 STREG %r26,PT_GR26(%r9)
124 STREG %r16,PT_SR7(%r9)
125 copy %r9,%r29
126 .endm
127
128 .macro get_stack_use_r30
129
130 /* we put a struct pt_regs on the stack and save the registers there */
131
132 tophys %r30,%r9
133 copy %r30,%r1
134 ldo PT_SZ_ALGN(%r30),%r30
135 STREG %r1,PT_GR30(%r9)
136 STREG %r29,PT_GR29(%r9)
137 STREG %r26,PT_GR26(%r9)
138 STREG %r16,PT_SR7(%r9)
139 copy %r9,%r29
140 .endm
141
142 .macro rest_stack
143 LDREG PT_GR1(%r29), %r1
144 LDREG PT_GR30(%r29),%r30
145 LDREG PT_GR29(%r29),%r29
146 .endm
147
148 /* default interruption handler
149 * (calls traps.c:handle_interruption) */
150 .macro def code
151 b intr_save
152 ldi \code, %r8
153 .align 32
154 .endm
155
156 /* Interrupt interruption handler
157 * (calls irq.c:do_cpu_irq_mask) */
158 .macro extint code
159 b intr_extint
160 mfsp %sr7,%r16
161 .align 32
162 .endm
163
164 .import os_hpmc, code
165
166 /* HPMC handler */
167 .macro hpmc code
168 nop /* must be a NOP, will be patched later */
169 load32 PA(os_hpmc), %r3
170 bv,n 0(%r3)
171 nop
172 .word 0 /* checksum (will be patched) */
173 .word 0 /* address of handler */
174 .word 0 /* length of handler */
175 .endm
176
177 /*
178 * Performance Note: Instructions will be moved up into
179 * this part of the code later on, once we are sure
180 * that the tlb miss handlers are close to final form.
181 */
182
183 /* Register definitions for tlb miss handler macros */
184
185 va = r8 /* virtual address for which the trap occurred */
186 spc = r24 /* space for which the trap occurred */
187
188#ifndef CONFIG_64BIT
189
190 /*
191 * itlb miss interruption handler (parisc 1.1 - 32 bit)
192 */
193
194 .macro itlb_11 code
195
196 mfctl %pcsq, spc
197 b itlb_miss_11
198 mfctl %pcoq, va
199
200 .align 32
201 .endm
202#endif
203
204 /*
205 * itlb miss interruption handler (parisc 2.0)
206 */
207
208 .macro itlb_20 code
209 mfctl %pcsq, spc
210#ifdef CONFIG_64BIT
211 b itlb_miss_20w
212#else
213 b itlb_miss_20
214#endif
215 mfctl %pcoq, va
216
217 .align 32
218 .endm
219
220#ifndef CONFIG_64BIT
221 /*
222 * naitlb miss interruption handler (parisc 1.1 - 32 bit)
223 */
224
225 .macro naitlb_11 code
226
227 mfctl %isr,spc
228 b naitlb_miss_11
229 mfctl %ior,va
230
231 .align 32
232 .endm
233#endif
234
235 /*
236 * naitlb miss interruption handler (parisc 2.0)
237 */
238
239 .macro naitlb_20 code
240
241 mfctl %isr,spc
242#ifdef CONFIG_64BIT
243 b naitlb_miss_20w
244#else
245 b naitlb_miss_20
246#endif
247 mfctl %ior,va
248
249 .align 32
250 .endm
251
252#ifndef CONFIG_64BIT
253 /*
254 * dtlb miss interruption handler (parisc 1.1 - 32 bit)
255 */
256
257 .macro dtlb_11 code
258
259 mfctl %isr, spc
260 b dtlb_miss_11
261 mfctl %ior, va
262
263 .align 32
264 .endm
265#endif
266
267 /*
268 * dtlb miss interruption handler (parisc 2.0)
269 */
270
271 .macro dtlb_20 code
272
273 mfctl %isr, spc
274#ifdef CONFIG_64BIT
275 b dtlb_miss_20w
276#else
277 b dtlb_miss_20
278#endif
279 mfctl %ior, va
280
281 .align 32
282 .endm
283
284#ifndef CONFIG_64BIT
285 /* nadtlb miss interruption handler (parisc 1.1 - 32 bit) */
286
287 .macro nadtlb_11 code
288
289 mfctl %isr,spc
290 b nadtlb_miss_11
291 mfctl %ior,va
292
293 .align 32
294 .endm
295#endif
296
297 /* nadtlb miss interruption handler (parisc 2.0) */
298
299 .macro nadtlb_20 code
300
301 mfctl %isr,spc
302#ifdef CONFIG_64BIT
303 b nadtlb_miss_20w
304#else
305 b nadtlb_miss_20
306#endif
307 mfctl %ior,va
308
309 .align 32
310 .endm
311
312#ifndef CONFIG_64BIT
313 /*
314 * dirty bit trap interruption handler (parisc 1.1 - 32 bit)
315 */
316
317 .macro dbit_11 code
318
319 mfctl %isr,spc
320 b dbit_trap_11
321 mfctl %ior,va
322
323 .align 32
324 .endm
325#endif
326
327 /*
328 * dirty bit trap interruption handler (parisc 2.0)
329 */
330
331 .macro dbit_20 code
332
333 mfctl %isr,spc
334#ifdef CONFIG_64BIT
335 b dbit_trap_20w
336#else
337 b dbit_trap_20
338#endif
339 mfctl %ior,va
340
341 .align 32
342 .endm
343
344 /* In LP64, the space contains part of the upper 32 bits of the
345 * fault. We have to extract this and place it in the va,
346 * zeroing the corresponding bits in the space register */
347 .macro space_adjust spc,va,tmp
348#ifdef CONFIG_64BIT
349 extrd,u \spc,63,SPACEID_SHIFT,\tmp
350 depd %r0,63,SPACEID_SHIFT,\spc
351 depd \tmp,31,SPACEID_SHIFT,\va
352#endif
353 .endm
354
355 .import swapper_pg_dir,code
356
357 /* Get the pgd. For faults on space zero (kernel space), this
358 * is simply swapper_pg_dir. For user space faults, the
359 * pgd is stored in %cr25 */
360 .macro get_pgd spc,reg
361 ldil L%PA(swapper_pg_dir),\reg
362 ldo R%PA(swapper_pg_dir)(\reg),\reg
363 or,COND(=) %r0,\spc,%r0
364 mfctl %cr25,\reg
365 .endm
366
367 /*
368 space_check(spc,tmp,fault)
369
370 spc - The space we saw the fault with.
371 tmp - The place to store the current space.
372 fault - Function to call on failure.
373
374 Only allow faults on different spaces from the
375 currently active one if we're the kernel
376
377 */
378 .macro space_check spc,tmp,fault
379 mfsp %sr7,\tmp
380 /* check against %r0 which is same value as LINUX_GATEWAY_SPACE */
381 or,COND(<>) %r0,\spc,%r0 /* user may execute gateway page
382 * as kernel, so defeat the space
383 * check if it is */
384 copy \spc,\tmp
385 or,COND(=) %r0,\tmp,%r0 /* nullify if executing as kernel */
386 cmpb,COND(<>),n \tmp,\spc,\fault
387 .endm
388
389 /* Look up a PTE in a 2-Level scheme (faulting at each
390 * level if the entry isn't present
391 *
392 * NOTE: we use ldw even for LP64, since the short pointers
393 * can address up to 1TB
394 */
395 .macro L2_ptep pmd,pte,index,va,fault
396#if CONFIG_PGTABLE_LEVELS == 3
397 extru \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
398#else
399# if defined(CONFIG_64BIT)
400 extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
401 #else
402 # if PAGE_SIZE > 4096
403 extru \va,31-ASM_PGDIR_SHIFT,32-ASM_PGDIR_SHIFT,\index
404 # else
405 extru \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
406 # endif
407# endif
408#endif
409 dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */
410 copy %r0,\pte
411 ldw,s \index(\pmd),\pmd
412 bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault
413 dep %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
414 SHLREG \pmd,PxD_VALUE_SHIFT,\pmd
415 extru \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
416 dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */
417 shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd /* pmd is now pte */
418 .endm
419
420 /* Look up PTE in a 3-Level scheme.
421 *
422 * Here we implement a Hybrid L2/L3 scheme: we allocate the
423 * first pmd adjacent to the pgd. This means that we can
424 * subtract a constant offset to get to it. The pmd and pgd
425 * sizes are arranged so that a single pmd covers 4GB (giving
426 * a full LP64 process access to 8TB) so our lookups are
427 * effectively L2 for the first 4GB of the kernel (i.e. for
428 * all ILP32 processes and all the kernel for machines with
429 * under 4GB of memory) */
430 .macro L3_ptep pgd,pte,index,va,fault
431#if CONFIG_PGTABLE_LEVELS == 3 /* we might have a 2-Level scheme, e.g. with 16kb page size */
432 extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
433 extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
434 ldw,s \index(\pgd),\pgd
435 extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
436 bb,>=,n \pgd,_PxD_PRESENT_BIT,\fault
437 extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
438 shld \pgd,PxD_VALUE_SHIFT,\index
439 extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
440 copy \index,\pgd
441 extrd,u,*<> \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
442 ldo ASM_PGD_PMD_OFFSET(\pgd),\pgd
443#endif
444 L2_ptep \pgd,\pte,\index,\va,\fault
445 .endm
446
447 /* Acquire pa_tlb_lock lock and check page is present. */
448 .macro tlb_lock spc,ptp,pte,tmp,tmp1,fault
449#ifdef CONFIG_SMP
45098: cmpib,COND(=),n 0,\spc,2f
451 load_pa_tlb_lock \tmp
4521: LDCW 0(\tmp),\tmp1
453 cmpib,COND(=) 0,\tmp1,1b
454 nop
455 LDREG 0(\ptp),\pte
456 bb,<,n \pte,_PAGE_PRESENT_BIT,3f
457 b \fault
458 stw \spc,0(\tmp)
45999: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
460#endif
4612: LDREG 0(\ptp),\pte
462 bb,>=,n \pte,_PAGE_PRESENT_BIT,\fault
4633:
464 .endm
465
466 /* Release pa_tlb_lock lock without reloading lock address.
467 Note that the values in the register spc are limited to
468 NR_SPACE_IDS (262144). Thus, the stw instruction always
469 stores a nonzero value even when register spc is 64 bits.
470 We use an ordered store to ensure all prior accesses are
471 performed prior to releasing the lock. */
472 .macro tlb_unlock0 spc,tmp
473#ifdef CONFIG_SMP
47498: or,COND(=) %r0,\spc,%r0
475 stw,ma \spc,0(\tmp)
47699: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
477#endif
478 .endm
479
480 /* Release pa_tlb_lock lock. */
481 .macro tlb_unlock1 spc,tmp
482#ifdef CONFIG_SMP
48398: load_pa_tlb_lock \tmp
48499: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
485 tlb_unlock0 \spc,\tmp
486#endif
487 .endm
488
489 /* Set the _PAGE_ACCESSED bit of the PTE. Be clever and
490 * don't needlessly dirty the cache line if it was already set */
491 .macro update_accessed ptp,pte,tmp,tmp1
492 ldi _PAGE_ACCESSED,\tmp1
493 or \tmp1,\pte,\tmp
494 and,COND(<>) \tmp1,\pte,%r0
495 STREG \tmp,0(\ptp)
496 .endm
497
498 /* Set the dirty bit (and accessed bit). No need to be
499 * clever, this is only used from the dirty fault */
500 .macro update_dirty ptp,pte,tmp
501 ldi _PAGE_ACCESSED|_PAGE_DIRTY,\tmp
502 or \tmp,\pte,\pte
503 STREG \pte,0(\ptp)
504 .endm
505
506 /* We have (depending on the page size):
507 * - 38 to 52-bit Physical Page Number
508 * - 12 to 26-bit page offset
509 */
510 /* bitshift difference between a PFN (based on kernel's PAGE_SIZE)
511 * to a CPU TLB 4k PFN (4k => 12 bits to shift) */
512 #define PAGE_ADD_SHIFT (PAGE_SHIFT-12)
513 #define PAGE_ADD_HUGE_SHIFT (REAL_HPAGE_SHIFT-12)
514
515 /* Drop prot bits and convert to page addr for iitlbt and idtlbt */
516 .macro convert_for_tlb_insert20 pte,tmp
517#ifdef CONFIG_HUGETLB_PAGE
518 copy \pte,\tmp
519 extrd,u \tmp,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
520 64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
521
522 depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\
523 (63-58)+PAGE_ADD_SHIFT,\pte
524 extrd,u,*= \tmp,_PAGE_HPAGE_BIT+32,1,%r0
525 depdi _HUGE_PAGE_SIZE_ENCODING_DEFAULT,63,\
526 (63-58)+PAGE_ADD_HUGE_SHIFT,\pte
527#else /* Huge pages disabled */
528 extrd,u \pte,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
529 64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
530 depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\
531 (63-58)+PAGE_ADD_SHIFT,\pte
532#endif
533 .endm
534
535 /* Convert the pte and prot to tlb insertion values. How
536 * this happens is quite subtle, read below */
537 .macro make_insert_tlb spc,pte,prot,tmp
538 space_to_prot \spc \prot /* create prot id from space */
539 /* The following is the real subtlety. This is depositing
540 * T <-> _PAGE_REFTRAP
541 * D <-> _PAGE_DIRTY
542 * B <-> _PAGE_DMB (memory break)
543 *
544 * Then incredible subtlety: The access rights are
545 * _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE
546 * See 3-14 of the parisc 2.0 manual
547 *
548 * Finally, _PAGE_READ goes in the top bit of PL1 (so we
549 * trigger an access rights trap in user space if the user
550 * tries to read an unreadable page */
551 depd \pte,8,7,\prot
552
553 /* PAGE_USER indicates the page can be read with user privileges,
554 * so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1
555 * contains _PAGE_READ) */
556 extrd,u,*= \pte,_PAGE_USER_BIT+32,1,%r0
557 depdi 7,11,3,\prot
558 /* If we're a gateway page, drop PL2 back to zero for promotion
559 * to kernel privilege (so we can execute the page as kernel).
560 * Any privilege promotion page always denys read and write */
561 extrd,u,*= \pte,_PAGE_GATEWAY_BIT+32,1,%r0
562 depd %r0,11,2,\prot /* If Gateway, Set PL2 to 0 */
563
564 /* Enforce uncacheable pages.
565 * This should ONLY be use for MMIO on PA 2.0 machines.
566 * Memory/DMA is cache coherent on all PA2.0 machines we support
567 * (that means T-class is NOT supported) and the memory controllers
568 * on most of those machines only handles cache transactions.
569 */
570 extrd,u,*= \pte,_PAGE_NO_CACHE_BIT+32,1,%r0
571 depdi 1,12,1,\prot
572
573 /* Drop prot bits and convert to page addr for iitlbt and idtlbt */
574 convert_for_tlb_insert20 \pte \tmp
575 .endm
576
577 /* Identical macro to make_insert_tlb above, except it
578 * makes the tlb entry for the differently formatted pa11
579 * insertion instructions */
580 .macro make_insert_tlb_11 spc,pte,prot
581 zdep \spc,30,15,\prot
582 dep \pte,8,7,\prot
583 extru,= \pte,_PAGE_NO_CACHE_BIT,1,%r0
584 depi 1,12,1,\prot
585 extru,= \pte,_PAGE_USER_BIT,1,%r0
586 depi 7,11,3,\prot /* Set for user space (1 rsvd for read) */
587 extru,= \pte,_PAGE_GATEWAY_BIT,1,%r0
588 depi 0,11,2,\prot /* If Gateway, Set PL2 to 0 */
589
590 /* Get rid of prot bits and convert to page addr for iitlba */
591
592 depi 0,31,ASM_PFN_PTE_SHIFT,\pte
593 SHRREG \pte,(ASM_PFN_PTE_SHIFT-(31-26)),\pte
594 .endm
595
596 /* This is for ILP32 PA2.0 only. The TLB insertion needs
597 * to extend into I/O space if the address is 0xfXXXXXXX
598 * so we extend the f's into the top word of the pte in
599 * this case */
600 .macro f_extend pte,tmp
601 extrd,s \pte,42,4,\tmp
602 addi,<> 1,\tmp,%r0
603 extrd,s \pte,63,25,\pte
604 .endm
605
606 /* The alias region is an 8MB aligned 16MB to do clear and
607 * copy user pages at addresses congruent with the user
608 * virtual address.
609 *
610 * To use the alias page, you set %r26 up with the to TLB
611 * entry (identifying the physical page) and %r23 up with
612 * the from tlb entry (or nothing if only a to entry---for
613 * clear_user_page_asm) */
614 .macro do_alias spc,tmp,tmp1,va,pte,prot,fault,patype
615 cmpib,COND(<>),n 0,\spc,\fault
616 ldil L%(TMPALIAS_MAP_START),\tmp
617#if defined(CONFIG_64BIT) && (TMPALIAS_MAP_START >= 0x80000000)
618 /* on LP64, ldi will sign extend into the upper 32 bits,
619 * which is behaviour we don't want */
620 depdi 0,31,32,\tmp
621#endif
622 copy \va,\tmp1
623 depi 0,31,23,\tmp1
624 cmpb,COND(<>),n \tmp,\tmp1,\fault
625 mfctl %cr19,\tmp /* iir */
626 /* get the opcode (first six bits) into \tmp */
627 extrw,u \tmp,5,6,\tmp
628 /*
629 * Only setting the T bit prevents data cache movein
630 * Setting access rights to zero prevents instruction cache movein
631 *
632 * Note subtlety here: _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE go
633 * to type field and _PAGE_READ goes to top bit of PL1
634 */
635 ldi (_PAGE_REFTRAP|_PAGE_READ|_PAGE_WRITE),\prot
636 /*
637 * so if the opcode is one (i.e. this is a memory management
638 * instruction) nullify the next load so \prot is only T.
639 * Otherwise this is a normal data operation
640 */
641 cmpiclr,= 0x01,\tmp,%r0
642 ldi (_PAGE_DIRTY|_PAGE_READ|_PAGE_WRITE),\prot
643.ifc \patype,20
644 depd,z \prot,8,7,\prot
645.else
646.ifc \patype,11
647 depw,z \prot,8,7,\prot
648.else
649 .error "undefined PA type to do_alias"
650.endif
651.endif
652 /*
653 * OK, it is in the temp alias region, check whether "from" or "to".
654 * Check "subtle" note in pacache.S re: r23/r26.
655 */
656#ifdef CONFIG_64BIT
657 extrd,u,*= \va,41,1,%r0
658#else
659 extrw,u,= \va,9,1,%r0
660#endif
661 or,COND(tr) %r23,%r0,\pte
662 or %r26,%r0,\pte
663 .endm
664
665
666 /*
667 * Fault_vectors are architecturally required to be aligned on a 2K
668 * boundary
669 */
670
671 .section .text.hot
672 .align 2048
673
674ENTRY(fault_vector_20)
675 /* First vector is invalid (0) */
676 .ascii "cows can fly"
677 .byte 0
678 .align 32
679
680 hpmc 1
681 def 2
682 def 3
683 extint 4
684 def 5
685 itlb_20 PARISC_ITLB_TRAP
686 def 7
687 def 8
688 def 9
689 def 10
690 def 11
691 def 12
692 def 13
693 def 14
694 dtlb_20 15
695 naitlb_20 16
696 nadtlb_20 17
697 def 18
698 def 19
699 dbit_20 20
700 def 21
701 def 22
702 def 23
703 def 24
704 def 25
705 def 26
706 def 27
707 def 28
708 def 29
709 def 30
710 def 31
711END(fault_vector_20)
712
713#ifndef CONFIG_64BIT
714
715 .align 2048
716
717ENTRY(fault_vector_11)
718 /* First vector is invalid (0) */
719 .ascii "cows can fly"
720 .byte 0
721 .align 32
722
723 hpmc 1
724 def 2
725 def 3
726 extint 4
727 def 5
728 itlb_11 PARISC_ITLB_TRAP
729 def 7
730 def 8
731 def 9
732 def 10
733 def 11
734 def 12
735 def 13
736 def 14
737 dtlb_11 15
738 naitlb_11 16
739 nadtlb_11 17
740 def 18
741 def 19
742 dbit_11 20
743 def 21
744 def 22
745 def 23
746 def 24
747 def 25
748 def 26
749 def 27
750 def 28
751 def 29
752 def 30
753 def 31
754END(fault_vector_11)
755
756#endif
757 /* Fault vector is separately protected and *must* be on its own page */
758 .align PAGE_SIZE
759
760 .import handle_interruption,code
761 .import do_cpu_irq_mask,code
762
763 /*
764 * Child Returns here
765 *
766 * copy_thread moved args into task save area.
767 */
768
769ENTRY(ret_from_kernel_thread)
770 /* Call schedule_tail first though */
771 BL schedule_tail, %r2
772 nop
773
774 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
775 LDREG TASK_PT_GR25(%r1), %r26
776#ifdef CONFIG_64BIT
777 LDREG TASK_PT_GR27(%r1), %r27
778#endif
779 LDREG TASK_PT_GR26(%r1), %r1
780 ble 0(%sr7, %r1)
781 copy %r31, %r2
782 b finish_child_return
783 nop
784END(ret_from_kernel_thread)
785
786
787 /*
788 * struct task_struct *_switch_to(struct task_struct *prev,
789 * struct task_struct *next)
790 *
791 * switch kernel stacks and return prev */
792ENTRY_CFI(_switch_to)
793 STREG %r2, -RP_OFFSET(%r30)
794
795 callee_save_float
796 callee_save
797
798 load32 _switch_to_ret, %r2
799
800 STREG %r2, TASK_PT_KPC(%r26)
801 LDREG TASK_PT_KPC(%r25), %r2
802
803 STREG %r30, TASK_PT_KSP(%r26)
804 LDREG TASK_PT_KSP(%r25), %r30
805 LDREG TASK_THREAD_INFO(%r25), %r25
806 bv %r0(%r2)
807 mtctl %r25,%cr30
808
809ENTRY(_switch_to_ret)
810 mtctl %r0, %cr0 /* Needed for single stepping */
811 callee_rest
812 callee_rest_float
813
814 LDREG -RP_OFFSET(%r30), %r2
815 bv %r0(%r2)
816 copy %r26, %r28
817ENDPROC_CFI(_switch_to)
818
819 /*
820 * Common rfi return path for interruptions, kernel execve, and
821 * sys_rt_sigreturn (sometimes). The sys_rt_sigreturn syscall will
822 * return via this path if the signal was received when the process
823 * was running; if the process was blocked on a syscall then the
824 * normal syscall_exit path is used. All syscalls for traced
825 * proceses exit via intr_restore.
826 *
827 * XXX If any syscalls that change a processes space id ever exit
828 * this way, then we will need to copy %sr3 in to PT_SR[3..7], and
829 * adjust IASQ[0..1].
830 *
831 */
832
833 .align PAGE_SIZE
834
835ENTRY_CFI(syscall_exit_rfi)
836 mfctl %cr30,%r16
837 LDREG TI_TASK(%r16), %r16 /* thread_info -> task_struct */
838 ldo TASK_REGS(%r16),%r16
839 /* Force iaoq to userspace, as the user has had access to our current
840 * context via sigcontext. Also Filter the PSW for the same reason.
841 */
842 LDREG PT_IAOQ0(%r16),%r19
843 depi 3,31,2,%r19
844 STREG %r19,PT_IAOQ0(%r16)
845 LDREG PT_IAOQ1(%r16),%r19
846 depi 3,31,2,%r19
847 STREG %r19,PT_IAOQ1(%r16)
848 LDREG PT_PSW(%r16),%r19
849 load32 USER_PSW_MASK,%r1
850#ifdef CONFIG_64BIT
851 load32 USER_PSW_HI_MASK,%r20
852 depd %r20,31,32,%r1
853#endif
854 and %r19,%r1,%r19 /* Mask out bits that user shouldn't play with */
855 load32 USER_PSW,%r1
856 or %r19,%r1,%r19 /* Make sure default USER_PSW bits are set */
857 STREG %r19,PT_PSW(%r16)
858
859 /*
860 * If we aren't being traced, we never saved space registers
861 * (we don't store them in the sigcontext), so set them
862 * to "proper" values now (otherwise we'll wind up restoring
863 * whatever was last stored in the task structure, which might
864 * be inconsistent if an interrupt occurred while on the gateway
865 * page). Note that we may be "trashing" values the user put in
866 * them, but we don't support the user changing them.
867 */
868
869 STREG %r0,PT_SR2(%r16)
870 mfsp %sr3,%r19
871 STREG %r19,PT_SR0(%r16)
872 STREG %r19,PT_SR1(%r16)
873 STREG %r19,PT_SR3(%r16)
874 STREG %r19,PT_SR4(%r16)
875 STREG %r19,PT_SR5(%r16)
876 STREG %r19,PT_SR6(%r16)
877 STREG %r19,PT_SR7(%r16)
878
879ENTRY(intr_return)
880 /* check for reschedule */
881 mfctl %cr30,%r1
882 LDREG TI_FLAGS(%r1),%r19 /* sched.h: TIF_NEED_RESCHED */
883 bb,<,n %r19,31-TIF_NEED_RESCHED,intr_do_resched /* forward */
884
885 .import do_notify_resume,code
886intr_check_sig:
887 /* As above */
888 mfctl %cr30,%r1
889 LDREG TI_FLAGS(%r1),%r19
890 ldi (_TIF_SIGPENDING|_TIF_NOTIFY_RESUME), %r20
891 and,COND(<>) %r19, %r20, %r0
892 b,n intr_restore /* skip past if we've nothing to do */
893
894 /* This check is critical to having LWS
895 * working. The IASQ is zero on the gateway
896 * page and we cannot deliver any signals until
897 * we get off the gateway page.
898 *
899 * Only do signals if we are returning to user space
900 */
901 LDREG PT_IASQ0(%r16), %r20
902 cmpib,COND(=),n LINUX_GATEWAY_SPACE, %r20, intr_restore /* backward */
903 LDREG PT_IASQ1(%r16), %r20
904 cmpib,COND(=),n LINUX_GATEWAY_SPACE, %r20, intr_restore /* backward */
905
906 /* NOTE: We need to enable interrupts if we have to deliver
907 * signals. We used to do this earlier but it caused kernel
908 * stack overflows. */
909 ssm PSW_SM_I, %r0
910
911 copy %r0, %r25 /* long in_syscall = 0 */
912#ifdef CONFIG_64BIT
913 ldo -16(%r30),%r29 /* Reference param save area */
914#endif
915
916 BL do_notify_resume,%r2
917 copy %r16, %r26 /* struct pt_regs *regs */
918
919 b,n intr_check_sig
920
921intr_restore:
922 copy %r16,%r29
923 ldo PT_FR31(%r29),%r1
924 rest_fp %r1
925 rest_general %r29
926
927 /* inverse of virt_map */
928 pcxt_ssm_bug
929 rsm PSW_SM_QUIET,%r0 /* prepare for rfi */
930 tophys_r1 %r29
931
932 /* Restore space id's and special cr's from PT_REGS
933 * structure pointed to by r29
934 */
935 rest_specials %r29
936
937 /* IMPORTANT: rest_stack restores r29 last (we are using it)!
938 * It also restores r1 and r30.
939 */
940 rest_stack
941
942 rfi
943 nop
944
945#ifndef CONFIG_PREEMPTION
946# define intr_do_preempt intr_restore
947#endif /* !CONFIG_PREEMPTION */
948
949 .import schedule,code
950intr_do_resched:
951 /* Only call schedule on return to userspace. If we're returning
952 * to kernel space, we may schedule if CONFIG_PREEMPTION, otherwise
953 * we jump back to intr_restore.
954 */
955 LDREG PT_IASQ0(%r16), %r20
956 cmpib,COND(=) 0, %r20, intr_do_preempt
957 nop
958 LDREG PT_IASQ1(%r16), %r20
959 cmpib,COND(=) 0, %r20, intr_do_preempt
960 nop
961
962 /* NOTE: We need to enable interrupts if we schedule. We used
963 * to do this earlier but it caused kernel stack overflows. */
964 ssm PSW_SM_I, %r0
965
966#ifdef CONFIG_64BIT
967 ldo -16(%r30),%r29 /* Reference param save area */
968#endif
969
970 ldil L%intr_check_sig, %r2
971#ifndef CONFIG_64BIT
972 b schedule
973#else
974 load32 schedule, %r20
975 bv %r0(%r20)
976#endif
977 ldo R%intr_check_sig(%r2), %r2
978
979 /* preempt the current task on returning to kernel
980 * mode from an interrupt, iff need_resched is set,
981 * and preempt_count is 0. otherwise, we continue on
982 * our merry way back to the current running task.
983 */
984#ifdef CONFIG_PREEMPTION
985 .import preempt_schedule_irq,code
986intr_do_preempt:
987 rsm PSW_SM_I, %r0 /* disable interrupts */
988
989 /* current_thread_info()->preempt_count */
990 mfctl %cr30, %r1
991 LDREG TI_PRE_COUNT(%r1), %r19
992 cmpib,COND(<>) 0, %r19, intr_restore /* if preempt_count > 0 */
993 nop /* prev insn branched backwards */
994
995 /* check if we interrupted a critical path */
996 LDREG PT_PSW(%r16), %r20
997 bb,<,n %r20, 31 - PSW_SM_I, intr_restore
998 nop
999
1000 BL preempt_schedule_irq, %r2
1001 nop
1002
1003 b,n intr_restore /* ssm PSW_SM_I done by intr_restore */
1004#endif /* CONFIG_PREEMPTION */
1005
1006 /*
1007 * External interrupts.
1008 */
1009
1010intr_extint:
1011 cmpib,COND(=),n 0,%r16,1f
1012
1013 get_stack_use_cr30
1014 b,n 2f
1015
10161:
1017 get_stack_use_r30
10182:
1019 save_specials %r29
1020 virt_map
1021 save_general %r29
1022
1023 ldo PT_FR0(%r29), %r24
1024 save_fp %r24
1025
1026 loadgp
1027
1028 copy %r29, %r26 /* arg0 is pt_regs */
1029 copy %r29, %r16 /* save pt_regs */
1030
1031 ldil L%intr_return, %r2
1032
1033#ifdef CONFIG_64BIT
1034 ldo -16(%r30),%r29 /* Reference param save area */
1035#endif
1036
1037 b do_cpu_irq_mask
1038 ldo R%intr_return(%r2), %r2 /* return to intr_return, not here */
1039ENDPROC_CFI(syscall_exit_rfi)
1040
1041
1042 /* Generic interruptions (illegal insn, unaligned, page fault, etc) */
1043
1044ENTRY_CFI(intr_save) /* for os_hpmc */
1045 mfsp %sr7,%r16
1046 cmpib,COND(=),n 0,%r16,1f
1047 get_stack_use_cr30
1048 b 2f
1049 copy %r8,%r26
1050
10511:
1052 get_stack_use_r30
1053 copy %r8,%r26
1054
10552:
1056 save_specials %r29
1057
1058 /* If this trap is a itlb miss, skip saving/adjusting isr/ior */
1059 cmpib,COND(=),n PARISC_ITLB_TRAP,%r26,skip_save_ior
1060
1061
1062 mfctl %isr, %r16
1063 nop /* serialize mfctl on PA 2.0 to avoid 4 cycle penalty */
1064 mfctl %ior, %r17
1065
1066
1067#ifdef CONFIG_64BIT
1068 /*
1069 * If the interrupted code was running with W bit off (32 bit),
1070 * clear the b bits (bits 0 & 1) in the ior.
1071 * save_specials left ipsw value in r8 for us to test.
1072 */
1073 extrd,u,*<> %r8,PSW_W_BIT,1,%r0
1074 depdi 0,1,2,%r17
1075
1076 /* adjust isr/ior: get high bits from isr and deposit in ior */
1077 space_adjust %r16,%r17,%r1
1078#endif
1079 STREG %r16, PT_ISR(%r29)
1080 STREG %r17, PT_IOR(%r29)
1081
1082#if 0 && defined(CONFIG_64BIT)
1083 /* Revisit when we have 64-bit code above 4Gb */
1084 b,n intr_save2
1085
1086skip_save_ior:
1087 /* We have a itlb miss, and when executing code above 4 Gb on ILP64, we
1088 * need to adjust iasq/iaoq here in the same way we adjusted isr/ior
1089 * above.
1090 */
1091 extrd,u,* %r8,PSW_W_BIT,1,%r1
1092 cmpib,COND(=),n 1,%r1,intr_save2
1093 LDREG PT_IASQ0(%r29), %r16
1094 LDREG PT_IAOQ0(%r29), %r17
1095 /* adjust iasq/iaoq */
1096 space_adjust %r16,%r17,%r1
1097 STREG %r16, PT_IASQ0(%r29)
1098 STREG %r17, PT_IAOQ0(%r29)
1099#else
1100skip_save_ior:
1101#endif
1102
1103intr_save2:
1104 virt_map
1105 save_general %r29
1106
1107 ldo PT_FR0(%r29), %r25
1108 save_fp %r25
1109
1110 loadgp
1111
1112 copy %r29, %r25 /* arg1 is pt_regs */
1113#ifdef CONFIG_64BIT
1114 ldo -16(%r30),%r29 /* Reference param save area */
1115#endif
1116
1117 ldil L%intr_check_sig, %r2
1118 copy %r25, %r16 /* save pt_regs */
1119
1120 b handle_interruption
1121 ldo R%intr_check_sig(%r2), %r2
1122ENDPROC_CFI(intr_save)
1123
1124
1125 /*
1126 * Note for all tlb miss handlers:
1127 *
1128 * cr24 contains a pointer to the kernel address space
1129 * page directory.
1130 *
1131 * cr25 contains a pointer to the current user address
1132 * space page directory.
1133 *
1134 * sr3 will contain the space id of the user address space
1135 * of the current running thread while that thread is
1136 * running in the kernel.
1137 */
1138
1139 /*
1140 * register number allocations. Note that these are all
1141 * in the shadowed registers
1142 */
1143
1144 t0 = r1 /* temporary register 0 */
1145 va = r8 /* virtual address for which the trap occurred */
1146 t1 = r9 /* temporary register 1 */
1147 pte = r16 /* pte/phys page # */
1148 prot = r17 /* prot bits */
1149 spc = r24 /* space for which the trap occurred */
1150 ptp = r25 /* page directory/page table pointer */
1151
1152#ifdef CONFIG_64BIT
1153
1154dtlb_miss_20w:
1155 space_adjust spc,va,t0
1156 get_pgd spc,ptp
1157 space_check spc,t0,dtlb_fault
1158
1159 L3_ptep ptp,pte,t0,va,dtlb_check_alias_20w
1160
1161 tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20w
1162 update_accessed ptp,pte,t0,t1
1163
1164 make_insert_tlb spc,pte,prot,t1
1165
1166 idtlbt pte,prot
1167
1168 tlb_unlock1 spc,t0
1169 rfir
1170 nop
1171
1172dtlb_check_alias_20w:
1173 do_alias spc,t0,t1,va,pte,prot,dtlb_fault,20
1174
1175 idtlbt pte,prot
1176
1177 rfir
1178 nop
1179
1180nadtlb_miss_20w:
1181 space_adjust spc,va,t0
1182 get_pgd spc,ptp
1183 space_check spc,t0,nadtlb_fault
1184
1185 L3_ptep ptp,pte,t0,va,nadtlb_check_alias_20w
1186
1187 tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20w
1188 update_accessed ptp,pte,t0,t1
1189
1190 make_insert_tlb spc,pte,prot,t1
1191
1192 idtlbt pte,prot
1193
1194 tlb_unlock1 spc,t0
1195 rfir
1196 nop
1197
1198nadtlb_check_alias_20w:
1199 do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate,20
1200
1201 idtlbt pte,prot
1202
1203 rfir
1204 nop
1205
1206#else
1207
1208dtlb_miss_11:
1209 get_pgd spc,ptp
1210
1211 space_check spc,t0,dtlb_fault
1212
1213 L2_ptep ptp,pte,t0,va,dtlb_check_alias_11
1214
1215 tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_11
1216 update_accessed ptp,pte,t0,t1
1217
1218 make_insert_tlb_11 spc,pte,prot
1219
1220 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
1221 mtsp spc,%sr1
1222
1223 idtlba pte,(%sr1,va)
1224 idtlbp prot,(%sr1,va)
1225
1226 mtsp t1, %sr1 /* Restore sr1 */
1227
1228 tlb_unlock1 spc,t0
1229 rfir
1230 nop
1231
1232dtlb_check_alias_11:
1233 do_alias spc,t0,t1,va,pte,prot,dtlb_fault,11
1234
1235 idtlba pte,(va)
1236 idtlbp prot,(va)
1237
1238 rfir
1239 nop
1240
1241nadtlb_miss_11:
1242 get_pgd spc,ptp
1243
1244 space_check spc,t0,nadtlb_fault
1245
1246 L2_ptep ptp,pte,t0,va,nadtlb_check_alias_11
1247
1248 tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_11
1249 update_accessed ptp,pte,t0,t1
1250
1251 make_insert_tlb_11 spc,pte,prot
1252
1253 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
1254 mtsp spc,%sr1
1255
1256 idtlba pte,(%sr1,va)
1257 idtlbp prot,(%sr1,va)
1258
1259 mtsp t1, %sr1 /* Restore sr1 */
1260
1261 tlb_unlock1 spc,t0
1262 rfir
1263 nop
1264
1265nadtlb_check_alias_11:
1266 do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate,11
1267
1268 idtlba pte,(va)
1269 idtlbp prot,(va)
1270
1271 rfir
1272 nop
1273
1274dtlb_miss_20:
1275 space_adjust spc,va,t0
1276 get_pgd spc,ptp
1277 space_check spc,t0,dtlb_fault
1278
1279 L2_ptep ptp,pte,t0,va,dtlb_check_alias_20
1280
1281 tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20
1282 update_accessed ptp,pte,t0,t1
1283
1284 make_insert_tlb spc,pte,prot,t1
1285
1286 f_extend pte,t1
1287
1288 idtlbt pte,prot
1289
1290 tlb_unlock1 spc,t0
1291 rfir
1292 nop
1293
1294dtlb_check_alias_20:
1295 do_alias spc,t0,t1,va,pte,prot,dtlb_fault,20
1296
1297 idtlbt pte,prot
1298
1299 rfir
1300 nop
1301
1302nadtlb_miss_20:
1303 get_pgd spc,ptp
1304
1305 space_check spc,t0,nadtlb_fault
1306
1307 L2_ptep ptp,pte,t0,va,nadtlb_check_alias_20
1308
1309 tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20
1310 update_accessed ptp,pte,t0,t1
1311
1312 make_insert_tlb spc,pte,prot,t1
1313
1314 f_extend pte,t1
1315
1316 idtlbt pte,prot
1317
1318 tlb_unlock1 spc,t0
1319 rfir
1320 nop
1321
1322nadtlb_check_alias_20:
1323 do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate,20
1324
1325 idtlbt pte,prot
1326
1327 rfir
1328 nop
1329
1330#endif
1331
1332nadtlb_emulate:
1333
1334 /*
1335 * Non access misses can be caused by fdc,fic,pdc,lpa,probe and
1336 * probei instructions. We don't want to fault for these
1337 * instructions (not only does it not make sense, it can cause
1338 * deadlocks, since some flushes are done with the mmap
1339 * semaphore held). If the translation doesn't exist, we can't
1340 * insert a translation, so have to emulate the side effects
1341 * of the instruction. Since we don't insert a translation
1342 * we can get a lot of faults during a flush loop, so it makes
1343 * sense to try to do it here with minimum overhead. We only
1344 * emulate fdc,fic,pdc,probew,prober instructions whose base
1345 * and index registers are not shadowed. We defer everything
1346 * else to the "slow" path.
1347 */
1348
1349 mfctl %cr19,%r9 /* Get iir */
1350
1351 /* PA 2.0 Arch Ref. Book pg 382 has a good description of the insn bits.
1352 Checks for fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw */
1353
1354 /* Checks for fdc,fdce,pdc,"fic,4f" only */
1355 ldi 0x280,%r16
1356 and %r9,%r16,%r17
1357 cmpb,<>,n %r16,%r17,nadtlb_probe_check
1358 bb,>=,n %r9,26,nadtlb_nullify /* m bit not set, just nullify */
1359 BL get_register,%r25
1360 extrw,u %r9,15,5,%r8 /* Get index register # */
1361 cmpib,COND(=),n -1,%r1,nadtlb_fault /* have to use slow path */
1362 copy %r1,%r24
1363 BL get_register,%r25
1364 extrw,u %r9,10,5,%r8 /* Get base register # */
1365 cmpib,COND(=),n -1,%r1,nadtlb_fault /* have to use slow path */
1366 BL set_register,%r25
1367 add,l %r1,%r24,%r1 /* doesn't affect c/b bits */
1368
1369nadtlb_nullify:
1370 mfctl %ipsw,%r8
1371 ldil L%PSW_N,%r9
1372 or %r8,%r9,%r8 /* Set PSW_N */
1373 mtctl %r8,%ipsw
1374
1375 rfir
1376 nop
1377
1378 /*
1379 When there is no translation for the probe address then we
1380 must nullify the insn and return zero in the target register.
1381 This will indicate to the calling code that it does not have
1382 write/read privileges to this address.
1383
1384 This should technically work for prober and probew in PA 1.1,
1385 and also probe,r and probe,w in PA 2.0
1386
1387 WARNING: USE ONLY NON-SHADOW REGISTERS WITH PROBE INSN!
1388 THE SLOW-PATH EMULATION HAS NOT BEEN WRITTEN YET.
1389
1390 */
1391nadtlb_probe_check:
1392 ldi 0x80,%r16
1393 and %r9,%r16,%r17
1394 cmpb,<>,n %r16,%r17,nadtlb_fault /* Must be probe,[rw]*/
1395 BL get_register,%r25 /* Find the target register */
1396 extrw,u %r9,31,5,%r8 /* Get target register */
1397 cmpib,COND(=),n -1,%r1,nadtlb_fault /* have to use slow path */
1398 BL set_register,%r25
1399 copy %r0,%r1 /* Write zero to target register */
1400 b nadtlb_nullify /* Nullify return insn */
1401 nop
1402
1403
1404#ifdef CONFIG_64BIT
1405itlb_miss_20w:
1406
1407 /*
1408 * I miss is a little different, since we allow users to fault
1409 * on the gateway page which is in the kernel address space.
1410 */
1411
1412 space_adjust spc,va,t0
1413 get_pgd spc,ptp
1414 space_check spc,t0,itlb_fault
1415
1416 L3_ptep ptp,pte,t0,va,itlb_fault
1417
1418 tlb_lock spc,ptp,pte,t0,t1,itlb_fault
1419 update_accessed ptp,pte,t0,t1
1420
1421 make_insert_tlb spc,pte,prot,t1
1422
1423 iitlbt pte,prot
1424
1425 tlb_unlock1 spc,t0
1426 rfir
1427 nop
1428
1429naitlb_miss_20w:
1430
1431 /*
1432 * I miss is a little different, since we allow users to fault
1433 * on the gateway page which is in the kernel address space.
1434 */
1435
1436 space_adjust spc,va,t0
1437 get_pgd spc,ptp
1438 space_check spc,t0,naitlb_fault
1439
1440 L3_ptep ptp,pte,t0,va,naitlb_check_alias_20w
1441
1442 tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20w
1443 update_accessed ptp,pte,t0,t1
1444
1445 make_insert_tlb spc,pte,prot,t1
1446
1447 iitlbt pte,prot
1448
1449 tlb_unlock1 spc,t0
1450 rfir
1451 nop
1452
1453naitlb_check_alias_20w:
1454 do_alias spc,t0,t1,va,pte,prot,naitlb_fault,20
1455
1456 iitlbt pte,prot
1457
1458 rfir
1459 nop
1460
1461#else
1462
1463itlb_miss_11:
1464 get_pgd spc,ptp
1465
1466 space_check spc,t0,itlb_fault
1467
1468 L2_ptep ptp,pte,t0,va,itlb_fault
1469
1470 tlb_lock spc,ptp,pte,t0,t1,itlb_fault
1471 update_accessed ptp,pte,t0,t1
1472
1473 make_insert_tlb_11 spc,pte,prot
1474
1475 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
1476 mtsp spc,%sr1
1477
1478 iitlba pte,(%sr1,va)
1479 iitlbp prot,(%sr1,va)
1480
1481 mtsp t1, %sr1 /* Restore sr1 */
1482
1483 tlb_unlock1 spc,t0
1484 rfir
1485 nop
1486
1487naitlb_miss_11:
1488 get_pgd spc,ptp
1489
1490 space_check spc,t0,naitlb_fault
1491
1492 L2_ptep ptp,pte,t0,va,naitlb_check_alias_11
1493
1494 tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_11
1495 update_accessed ptp,pte,t0,t1
1496
1497 make_insert_tlb_11 spc,pte,prot
1498
1499 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
1500 mtsp spc,%sr1
1501
1502 iitlba pte,(%sr1,va)
1503 iitlbp prot,(%sr1,va)
1504
1505 mtsp t1, %sr1 /* Restore sr1 */
1506
1507 tlb_unlock1 spc,t0
1508 rfir
1509 nop
1510
1511naitlb_check_alias_11:
1512 do_alias spc,t0,t1,va,pte,prot,itlb_fault,11
1513
1514 iitlba pte,(%sr0, va)
1515 iitlbp prot,(%sr0, va)
1516
1517 rfir
1518 nop
1519
1520
1521itlb_miss_20:
1522 get_pgd spc,ptp
1523
1524 space_check spc,t0,itlb_fault
1525
1526 L2_ptep ptp,pte,t0,va,itlb_fault
1527
1528 tlb_lock spc,ptp,pte,t0,t1,itlb_fault
1529 update_accessed ptp,pte,t0,t1
1530
1531 make_insert_tlb spc,pte,prot,t1
1532
1533 f_extend pte,t1
1534
1535 iitlbt pte,prot
1536
1537 tlb_unlock1 spc,t0
1538 rfir
1539 nop
1540
1541naitlb_miss_20:
1542 get_pgd spc,ptp
1543
1544 space_check spc,t0,naitlb_fault
1545
1546 L2_ptep ptp,pte,t0,va,naitlb_check_alias_20
1547
1548 tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20
1549 update_accessed ptp,pte,t0,t1
1550
1551 make_insert_tlb spc,pte,prot,t1
1552
1553 f_extend pte,t1
1554
1555 iitlbt pte,prot
1556
1557 tlb_unlock1 spc,t0
1558 rfir
1559 nop
1560
1561naitlb_check_alias_20:
1562 do_alias spc,t0,t1,va,pte,prot,naitlb_fault,20
1563
1564 iitlbt pte,prot
1565
1566 rfir
1567 nop
1568
1569#endif
1570
1571#ifdef CONFIG_64BIT
1572
1573dbit_trap_20w:
1574 space_adjust spc,va,t0
1575 get_pgd spc,ptp
1576 space_check spc,t0,dbit_fault
1577
1578 L3_ptep ptp,pte,t0,va,dbit_fault
1579
1580 tlb_lock spc,ptp,pte,t0,t1,dbit_fault
1581 update_dirty ptp,pte,t1
1582
1583 make_insert_tlb spc,pte,prot,t1
1584
1585 idtlbt pte,prot
1586
1587 tlb_unlock0 spc,t0
1588 rfir
1589 nop
1590#else
1591
1592dbit_trap_11:
1593
1594 get_pgd spc,ptp
1595
1596 space_check spc,t0,dbit_fault
1597
1598 L2_ptep ptp,pte,t0,va,dbit_fault
1599
1600 tlb_lock spc,ptp,pte,t0,t1,dbit_fault
1601 update_dirty ptp,pte,t1
1602
1603 make_insert_tlb_11 spc,pte,prot
1604
1605 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
1606 mtsp spc,%sr1
1607
1608 idtlba pte,(%sr1,va)
1609 idtlbp prot,(%sr1,va)
1610
1611 mtsp t1, %sr1 /* Restore sr1 */
1612
1613 tlb_unlock0 spc,t0
1614 rfir
1615 nop
1616
1617dbit_trap_20:
1618 get_pgd spc,ptp
1619
1620 space_check spc,t0,dbit_fault
1621
1622 L2_ptep ptp,pte,t0,va,dbit_fault
1623
1624 tlb_lock spc,ptp,pte,t0,t1,dbit_fault
1625 update_dirty ptp,pte,t1
1626
1627 make_insert_tlb spc,pte,prot,t1
1628
1629 f_extend pte,t1
1630
1631 idtlbt pte,prot
1632
1633 tlb_unlock0 spc,t0
1634 rfir
1635 nop
1636#endif
1637
1638 .import handle_interruption,code
1639
1640kernel_bad_space:
1641 b intr_save
1642 ldi 31,%r8 /* Use an unused code */
1643
1644dbit_fault:
1645 b intr_save
1646 ldi 20,%r8
1647
1648itlb_fault:
1649 b intr_save
1650 ldi PARISC_ITLB_TRAP,%r8
1651
1652nadtlb_fault:
1653 b intr_save
1654 ldi 17,%r8
1655
1656naitlb_fault:
1657 b intr_save
1658 ldi 16,%r8
1659
1660dtlb_fault:
1661 b intr_save
1662 ldi 15,%r8
1663
1664 /* Register saving semantics for system calls:
1665
1666 %r1 clobbered by system call macro in userspace
1667 %r2 saved in PT_REGS by gateway page
1668 %r3 - %r18 preserved by C code (saved by signal code)
1669 %r19 - %r20 saved in PT_REGS by gateway page
1670 %r21 - %r22 non-standard syscall args
1671 stored in kernel stack by gateway page
1672 %r23 - %r26 arg3-arg0, saved in PT_REGS by gateway page
1673 %r27 - %r30 saved in PT_REGS by gateway page
1674 %r31 syscall return pointer
1675 */
1676
1677 /* Floating point registers (FIXME: what do we do with these?)
1678
1679 %fr0 - %fr3 status/exception, not preserved
1680 %fr4 - %fr7 arguments
1681 %fr8 - %fr11 not preserved by C code
1682 %fr12 - %fr21 preserved by C code
1683 %fr22 - %fr31 not preserved by C code
1684 */
1685
1686 .macro reg_save regs
1687 STREG %r3, PT_GR3(\regs)
1688 STREG %r4, PT_GR4(\regs)
1689 STREG %r5, PT_GR5(\regs)
1690 STREG %r6, PT_GR6(\regs)
1691 STREG %r7, PT_GR7(\regs)
1692 STREG %r8, PT_GR8(\regs)
1693 STREG %r9, PT_GR9(\regs)
1694 STREG %r10,PT_GR10(\regs)
1695 STREG %r11,PT_GR11(\regs)
1696 STREG %r12,PT_GR12(\regs)
1697 STREG %r13,PT_GR13(\regs)
1698 STREG %r14,PT_GR14(\regs)
1699 STREG %r15,PT_GR15(\regs)
1700 STREG %r16,PT_GR16(\regs)
1701 STREG %r17,PT_GR17(\regs)
1702 STREG %r18,PT_GR18(\regs)
1703 .endm
1704
1705 .macro reg_restore regs
1706 LDREG PT_GR3(\regs), %r3
1707 LDREG PT_GR4(\regs), %r4
1708 LDREG PT_GR5(\regs), %r5
1709 LDREG PT_GR6(\regs), %r6
1710 LDREG PT_GR7(\regs), %r7
1711 LDREG PT_GR8(\regs), %r8
1712 LDREG PT_GR9(\regs), %r9
1713 LDREG PT_GR10(\regs),%r10
1714 LDREG PT_GR11(\regs),%r11
1715 LDREG PT_GR12(\regs),%r12
1716 LDREG PT_GR13(\regs),%r13
1717 LDREG PT_GR14(\regs),%r14
1718 LDREG PT_GR15(\regs),%r15
1719 LDREG PT_GR16(\regs),%r16
1720 LDREG PT_GR17(\regs),%r17
1721 LDREG PT_GR18(\regs),%r18
1722 .endm
1723
1724 .macro fork_like name
1725ENTRY_CFI(sys_\name\()_wrapper)
1726 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
1727 ldo TASK_REGS(%r1),%r1
1728 reg_save %r1
1729 mfctl %cr27, %r28
1730 ldil L%sys_\name, %r31
1731 be R%sys_\name(%sr4,%r31)
1732 STREG %r28, PT_CR27(%r1)
1733ENDPROC_CFI(sys_\name\()_wrapper)
1734 .endm
1735
1736fork_like clone
1737fork_like clone3
1738fork_like fork
1739fork_like vfork
1740
1741 /* Set the return value for the child */
1742ENTRY(child_return)
1743 BL schedule_tail, %r2
1744 nop
1745finish_child_return:
1746 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
1747 ldo TASK_REGS(%r1),%r1 /* get pt regs */
1748
1749 LDREG PT_CR27(%r1), %r3
1750 mtctl %r3, %cr27
1751 reg_restore %r1
1752 b syscall_exit
1753 copy %r0,%r28
1754END(child_return)
1755
1756ENTRY_CFI(sys_rt_sigreturn_wrapper)
1757 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26
1758 ldo TASK_REGS(%r26),%r26 /* get pt regs */
1759 /* Don't save regs, we are going to restore them from sigcontext. */
1760 STREG %r2, -RP_OFFSET(%r30)
1761#ifdef CONFIG_64BIT
1762 ldo FRAME_SIZE(%r30), %r30
1763 BL sys_rt_sigreturn,%r2
1764 ldo -16(%r30),%r29 /* Reference param save area */
1765#else
1766 BL sys_rt_sigreturn,%r2
1767 ldo FRAME_SIZE(%r30), %r30
1768#endif
1769
1770 ldo -FRAME_SIZE(%r30), %r30
1771 LDREG -RP_OFFSET(%r30), %r2
1772
1773 /* FIXME: I think we need to restore a few more things here. */
1774 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1775 ldo TASK_REGS(%r1),%r1 /* get pt regs */
1776 reg_restore %r1
1777
1778 /* If the signal was received while the process was blocked on a
1779 * syscall, then r2 will take us to syscall_exit; otherwise r2 will
1780 * take us to syscall_exit_rfi and on to intr_return.
1781 */
1782 bv %r0(%r2)
1783 LDREG PT_GR28(%r1),%r28 /* reload original r28 for syscall_exit */
1784ENDPROC_CFI(sys_rt_sigreturn_wrapper)
1785
1786ENTRY(syscall_exit)
1787 /* NOTE: Not all syscalls exit this way. rt_sigreturn will exit
1788 * via syscall_exit_rfi if the signal was received while the process
1789 * was running.
1790 */
1791
1792 /* save return value now */
1793
1794 mfctl %cr30, %r1
1795 LDREG TI_TASK(%r1),%r1
1796 STREG %r28,TASK_PT_GR28(%r1)
1797
1798 /* Seems to me that dp could be wrong here, if the syscall involved
1799 * calling a module, and nothing got round to restoring dp on return.
1800 */
1801 loadgp
1802
1803syscall_check_resched:
1804
1805 /* check for reschedule */
1806
1807 LDREG TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19 /* long */
1808 bb,<,n %r19, 31-TIF_NEED_RESCHED, syscall_do_resched /* forward */
1809
1810 .import do_signal,code
1811syscall_check_sig:
1812 LDREG TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19
1813 ldi (_TIF_SIGPENDING|_TIF_NOTIFY_RESUME), %r26
1814 and,COND(<>) %r19, %r26, %r0
1815 b,n syscall_restore /* skip past if we've nothing to do */
1816
1817syscall_do_signal:
1818 /* Save callee-save registers (for sigcontext).
1819 * FIXME: After this point the process structure should be
1820 * consistent with all the relevant state of the process
1821 * before the syscall. We need to verify this.
1822 */
1823 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1824 ldo TASK_REGS(%r1), %r26 /* struct pt_regs *regs */
1825 reg_save %r26
1826
1827#ifdef CONFIG_64BIT
1828 ldo -16(%r30),%r29 /* Reference param save area */
1829#endif
1830
1831 BL do_notify_resume,%r2
1832 ldi 1, %r25 /* long in_syscall = 1 */
1833
1834 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1835 ldo TASK_REGS(%r1), %r20 /* reload pt_regs */
1836 reg_restore %r20
1837
1838 b,n syscall_check_sig
1839
1840syscall_restore:
1841 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
1842
1843 /* Are we being ptraced? */
1844 ldw TASK_FLAGS(%r1),%r19
1845 ldi _TIF_SYSCALL_TRACE_MASK,%r2
1846 and,COND(=) %r19,%r2,%r0
1847 b,n syscall_restore_rfi
1848
1849 ldo TASK_PT_FR31(%r1),%r19 /* reload fpregs */
1850 rest_fp %r19
1851
1852 LDREG TASK_PT_SAR(%r1),%r19 /* restore SAR */
1853 mtsar %r19
1854
1855 LDREG TASK_PT_GR2(%r1),%r2 /* restore user rp */
1856 LDREG TASK_PT_GR19(%r1),%r19
1857 LDREG TASK_PT_GR20(%r1),%r20
1858 LDREG TASK_PT_GR21(%r1),%r21
1859 LDREG TASK_PT_GR22(%r1),%r22
1860 LDREG TASK_PT_GR23(%r1),%r23
1861 LDREG TASK_PT_GR24(%r1),%r24
1862 LDREG TASK_PT_GR25(%r1),%r25
1863 LDREG TASK_PT_GR26(%r1),%r26
1864 LDREG TASK_PT_GR27(%r1),%r27 /* restore user dp */
1865 LDREG TASK_PT_GR28(%r1),%r28 /* syscall return value */
1866 LDREG TASK_PT_GR29(%r1),%r29
1867 LDREG TASK_PT_GR31(%r1),%r31 /* restore syscall rp */
1868
1869 /* NOTE: We use rsm/ssm pair to make this operation atomic */
1870 LDREG TASK_PT_GR30(%r1),%r1 /* Get user sp */
1871 rsm PSW_SM_I, %r0
1872 copy %r1,%r30 /* Restore user sp */
1873 mfsp %sr3,%r1 /* Get user space id */
1874 mtsp %r1,%sr7 /* Restore sr7 */
1875 ssm PSW_SM_I, %r0
1876
1877 /* Set sr2 to zero for userspace syscalls to work. */
1878 mtsp %r0,%sr2
1879 mtsp %r1,%sr4 /* Restore sr4 */
1880 mtsp %r1,%sr5 /* Restore sr5 */
1881 mtsp %r1,%sr6 /* Restore sr6 */
1882
1883 depi 3,31,2,%r31 /* ensure return to user mode. */
1884
1885#ifdef CONFIG_64BIT
1886 /* decide whether to reset the wide mode bit
1887 *
1888 * For a syscall, the W bit is stored in the lowest bit
1889 * of sp. Extract it and reset W if it is zero */
1890 extrd,u,*<> %r30,63,1,%r1
1891 rsm PSW_SM_W, %r0
1892 /* now reset the lowest bit of sp if it was set */
1893 xor %r30,%r1,%r30
1894#endif
1895 be,n 0(%sr3,%r31) /* return to user space */
1896
1897 /* We have to return via an RFI, so that PSW T and R bits can be set
1898 * appropriately.
1899 * This sets up pt_regs so we can return via intr_restore, which is not
1900 * the most efficient way of doing things, but it works.
1901 */
1902syscall_restore_rfi:
1903 ldo -1(%r0),%r2 /* Set recovery cntr to -1 */
1904 mtctl %r2,%cr0 /* for immediate trap */
1905 LDREG TASK_PT_PSW(%r1),%r2 /* Get old PSW */
1906 ldi 0x0b,%r20 /* Create new PSW */
1907 depi -1,13,1,%r20 /* C, Q, D, and I bits */
1908
1909 /* The values of SINGLESTEP_BIT and BLOCKSTEP_BIT are
1910 * set in thread_info.h and converted to PA bitmap
1911 * numbers in asm-offsets.c */
1912
1913 /* if ((%r19.SINGLESTEP_BIT)) { %r20.27=1} */
1914 extru,= %r19,TIF_SINGLESTEP_PA_BIT,1,%r0
1915 depi -1,27,1,%r20 /* R bit */
1916
1917 /* if ((%r19.BLOCKSTEP_BIT)) { %r20.7=1} */
1918 extru,= %r19,TIF_BLOCKSTEP_PA_BIT,1,%r0
1919 depi -1,7,1,%r20 /* T bit */
1920
1921 STREG %r20,TASK_PT_PSW(%r1)
1922
1923 /* Always store space registers, since sr3 can be changed (e.g. fork) */
1924
1925 mfsp %sr3,%r25
1926 STREG %r25,TASK_PT_SR3(%r1)
1927 STREG %r25,TASK_PT_SR4(%r1)
1928 STREG %r25,TASK_PT_SR5(%r1)
1929 STREG %r25,TASK_PT_SR6(%r1)
1930 STREG %r25,TASK_PT_SR7(%r1)
1931 STREG %r25,TASK_PT_IASQ0(%r1)
1932 STREG %r25,TASK_PT_IASQ1(%r1)
1933
1934 /* XXX W bit??? */
1935 /* Now if old D bit is clear, it means we didn't save all registers
1936 * on syscall entry, so do that now. This only happens on TRACEME
1937 * calls, or if someone attached to us while we were on a syscall.
1938 * We could make this more efficient by not saving r3-r18, but
1939 * then we wouldn't be able to use the common intr_restore path.
1940 * It is only for traced processes anyway, so performance is not
1941 * an issue.
1942 */
1943 bb,< %r2,30,pt_regs_ok /* Branch if D set */
1944 ldo TASK_REGS(%r1),%r25
1945 reg_save %r25 /* Save r3 to r18 */
1946
1947 /* Save the current sr */
1948 mfsp %sr0,%r2
1949 STREG %r2,TASK_PT_SR0(%r1)
1950
1951 /* Save the scratch sr */
1952 mfsp %sr1,%r2
1953 STREG %r2,TASK_PT_SR1(%r1)
1954
1955 /* sr2 should be set to zero for userspace syscalls */
1956 STREG %r0,TASK_PT_SR2(%r1)
1957
1958 LDREG TASK_PT_GR31(%r1),%r2
1959 depi 3,31,2,%r2 /* ensure return to user mode. */
1960 STREG %r2,TASK_PT_IAOQ0(%r1)
1961 ldo 4(%r2),%r2
1962 STREG %r2,TASK_PT_IAOQ1(%r1)
1963 b intr_restore
1964 copy %r25,%r16
1965
1966pt_regs_ok:
1967 LDREG TASK_PT_IAOQ0(%r1),%r2
1968 depi 3,31,2,%r2 /* ensure return to user mode. */
1969 STREG %r2,TASK_PT_IAOQ0(%r1)
1970 LDREG TASK_PT_IAOQ1(%r1),%r2
1971 depi 3,31,2,%r2
1972 STREG %r2,TASK_PT_IAOQ1(%r1)
1973 b intr_restore
1974 copy %r25,%r16
1975
1976syscall_do_resched:
1977 load32 syscall_check_resched,%r2 /* if resched, we start over again */
1978 load32 schedule,%r19
1979 bv %r0(%r19) /* jumps to schedule() */
1980#ifdef CONFIG_64BIT
1981 ldo -16(%r30),%r29 /* Reference param save area */
1982#else
1983 nop
1984#endif
1985END(syscall_exit)
1986
1987
1988#ifdef CONFIG_FUNCTION_TRACER
1989
1990 .import ftrace_function_trampoline,code
1991 .align L1_CACHE_BYTES
1992ENTRY_CFI(mcount, caller)
1993_mcount:
1994 .export _mcount,data
1995 /*
1996 * The 64bit mcount() function pointer needs 4 dwords, of which the
1997 * first two are free. We optimize it here and put 2 instructions for
1998 * calling mcount(), and 2 instructions for ftrace_stub(). That way we
1999 * have all on one L1 cacheline.
2000 */
2001 ldi 0, %arg3
2002 b ftrace_function_trampoline
2003 copy %r3, %arg2 /* caller original %sp */
2004ftrace_stub:
2005 .globl ftrace_stub
2006 .type ftrace_stub, @function
2007#ifdef CONFIG_64BIT
2008 bve (%rp)
2009#else
2010 bv %r0(%rp)
2011#endif
2012 nop
2013#ifdef CONFIG_64BIT
2014 .dword mcount
2015 .dword 0 /* code in head.S puts value of global gp here */
2016#endif
2017ENDPROC_CFI(mcount)
2018
2019#ifdef CONFIG_DYNAMIC_FTRACE
2020
2021#ifdef CONFIG_64BIT
2022#define FTRACE_FRAME_SIZE (2*FRAME_SIZE)
2023#else
2024#define FTRACE_FRAME_SIZE FRAME_SIZE
2025#endif
2026ENTRY_CFI(ftrace_caller, caller,frame=FTRACE_FRAME_SIZE,CALLS,SAVE_RP,SAVE_SP)
2027ftrace_caller:
2028 .global ftrace_caller
2029
2030 STREG %r3, -FTRACE_FRAME_SIZE+1*REG_SZ(%sp)
2031 ldo -FTRACE_FRAME_SIZE(%sp), %r3
2032 STREG %rp, -RP_OFFSET(%r3)
2033
2034 /* Offset 0 is already allocated for %r1 */
2035 STREG %r23, 2*REG_SZ(%r3)
2036 STREG %r24, 3*REG_SZ(%r3)
2037 STREG %r25, 4*REG_SZ(%r3)
2038 STREG %r26, 5*REG_SZ(%r3)
2039 STREG %r28, 6*REG_SZ(%r3)
2040 STREG %r29, 7*REG_SZ(%r3)
2041#ifdef CONFIG_64BIT
2042 STREG %r19, 8*REG_SZ(%r3)
2043 STREG %r20, 9*REG_SZ(%r3)
2044 STREG %r21, 10*REG_SZ(%r3)
2045 STREG %r22, 11*REG_SZ(%r3)
2046 STREG %r27, 12*REG_SZ(%r3)
2047 STREG %r31, 13*REG_SZ(%r3)
2048 loadgp
2049 ldo -16(%sp),%r29
2050#endif
2051 LDREG 0(%r3), %r25
2052 copy %rp, %r26
2053 ldo -8(%r25), %r25
2054 ldi 0, %r23 /* no pt_regs */
2055 b,l ftrace_function_trampoline, %rp
2056 copy %r3, %r24
2057
2058 LDREG -RP_OFFSET(%r3), %rp
2059 LDREG 2*REG_SZ(%r3), %r23
2060 LDREG 3*REG_SZ(%r3), %r24
2061 LDREG 4*REG_SZ(%r3), %r25
2062 LDREG 5*REG_SZ(%r3), %r26
2063 LDREG 6*REG_SZ(%r3), %r28
2064 LDREG 7*REG_SZ(%r3), %r29
2065#ifdef CONFIG_64BIT
2066 LDREG 8*REG_SZ(%r3), %r19
2067 LDREG 9*REG_SZ(%r3), %r20
2068 LDREG 10*REG_SZ(%r3), %r21
2069 LDREG 11*REG_SZ(%r3), %r22
2070 LDREG 12*REG_SZ(%r3), %r27
2071 LDREG 13*REG_SZ(%r3), %r31
2072#endif
2073 LDREG 1*REG_SZ(%r3), %r3
2074
2075 LDREGM -FTRACE_FRAME_SIZE(%sp), %r1
2076 /* Adjust return point to jump back to beginning of traced function */
2077 ldo -4(%r1), %r1
2078 bv,n (%r1)
2079
2080ENDPROC_CFI(ftrace_caller)
2081
2082#ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS
2083ENTRY_CFI(ftrace_regs_caller,caller,frame=FTRACE_FRAME_SIZE+PT_SZ_ALGN,
2084 CALLS,SAVE_RP,SAVE_SP)
2085ftrace_regs_caller:
2086 .global ftrace_regs_caller
2087
2088 ldo -FTRACE_FRAME_SIZE(%sp), %r1
2089 STREG %rp, -RP_OFFSET(%r1)
2090
2091 copy %sp, %r1
2092 ldo PT_SZ_ALGN(%sp), %sp
2093
2094 STREG %rp, PT_GR2(%r1)
2095 STREG %r3, PT_GR3(%r1)
2096 STREG %r4, PT_GR4(%r1)
2097 STREG %r5, PT_GR5(%r1)
2098 STREG %r6, PT_GR6(%r1)
2099 STREG %r7, PT_GR7(%r1)
2100 STREG %r8, PT_GR8(%r1)
2101 STREG %r9, PT_GR9(%r1)
2102 STREG %r10, PT_GR10(%r1)
2103 STREG %r11, PT_GR11(%r1)
2104 STREG %r12, PT_GR12(%r1)
2105 STREG %r13, PT_GR13(%r1)
2106 STREG %r14, PT_GR14(%r1)
2107 STREG %r15, PT_GR15(%r1)
2108 STREG %r16, PT_GR16(%r1)
2109 STREG %r17, PT_GR17(%r1)
2110 STREG %r18, PT_GR18(%r1)
2111 STREG %r19, PT_GR19(%r1)
2112 STREG %r20, PT_GR20(%r1)
2113 STREG %r21, PT_GR21(%r1)
2114 STREG %r22, PT_GR22(%r1)
2115 STREG %r23, PT_GR23(%r1)
2116 STREG %r24, PT_GR24(%r1)
2117 STREG %r25, PT_GR25(%r1)
2118 STREG %r26, PT_GR26(%r1)
2119 STREG %r27, PT_GR27(%r1)
2120 STREG %r28, PT_GR28(%r1)
2121 STREG %r29, PT_GR29(%r1)
2122 STREG %r30, PT_GR30(%r1)
2123 STREG %r31, PT_GR31(%r1)
2124 mfctl %cr11, %r26
2125 STREG %r26, PT_SAR(%r1)
2126
2127 copy %rp, %r26
2128 LDREG -FTRACE_FRAME_SIZE-PT_SZ_ALGN(%sp), %r25
2129 ldo -8(%r25), %r25
2130 ldo -FTRACE_FRAME_SIZE(%r1), %arg2
2131 b,l ftrace_function_trampoline, %rp
2132 copy %r1, %arg3 /* struct pt_regs */
2133
2134 ldo -PT_SZ_ALGN(%sp), %r1
2135
2136 LDREG PT_SAR(%r1), %rp
2137 mtctl %rp, %cr11
2138
2139 LDREG PT_GR2(%r1), %rp
2140 LDREG PT_GR3(%r1), %r3
2141 LDREG PT_GR4(%r1), %r4
2142 LDREG PT_GR5(%r1), %r5
2143 LDREG PT_GR6(%r1), %r6
2144 LDREG PT_GR7(%r1), %r7
2145 LDREG PT_GR8(%r1), %r8
2146 LDREG PT_GR9(%r1), %r9
2147 LDREG PT_GR10(%r1),%r10
2148 LDREG PT_GR11(%r1),%r11
2149 LDREG PT_GR12(%r1),%r12
2150 LDREG PT_GR13(%r1),%r13
2151 LDREG PT_GR14(%r1),%r14
2152 LDREG PT_GR15(%r1),%r15
2153 LDREG PT_GR16(%r1),%r16
2154 LDREG PT_GR17(%r1),%r17
2155 LDREG PT_GR18(%r1),%r18
2156 LDREG PT_GR19(%r1),%r19
2157 LDREG PT_GR20(%r1),%r20
2158 LDREG PT_GR21(%r1),%r21
2159 LDREG PT_GR22(%r1),%r22
2160 LDREG PT_GR23(%r1),%r23
2161 LDREG PT_GR24(%r1),%r24
2162 LDREG PT_GR25(%r1),%r25
2163 LDREG PT_GR26(%r1),%r26
2164 LDREG PT_GR27(%r1),%r27
2165 LDREG PT_GR28(%r1),%r28
2166 LDREG PT_GR29(%r1),%r29
2167 LDREG PT_GR30(%r1),%r30
2168 LDREG PT_GR31(%r1),%r31
2169
2170 ldo -PT_SZ_ALGN(%sp), %sp
2171 LDREGM -FTRACE_FRAME_SIZE(%sp), %r1
2172 /* Adjust return point to jump back to beginning of traced function */
2173 ldo -4(%r1), %r1
2174 bv,n (%r1)
2175
2176ENDPROC_CFI(ftrace_regs_caller)
2177
2178#endif
2179#endif
2180
2181#ifdef CONFIG_FUNCTION_GRAPH_TRACER
2182 .align 8
2183ENTRY_CFI(return_to_handler, caller,frame=FRAME_SIZE)
2184 .export parisc_return_to_handler,data
2185parisc_return_to_handler:
2186 copy %r3,%r1
2187 STREG %r0,-RP_OFFSET(%sp) /* store 0 as %rp */
2188 copy %sp,%r3
2189 STREGM %r1,FRAME_SIZE(%sp)
2190 STREG %ret0,8(%r3)
2191 STREG %ret1,16(%r3)
2192
2193#ifdef CONFIG_64BIT
2194 loadgp
2195#endif
2196
2197 /* call ftrace_return_to_handler(0) */
2198 .import ftrace_return_to_handler,code
2199 load32 ftrace_return_to_handler,%ret0
2200 load32 .Lftrace_ret,%r2
2201#ifdef CONFIG_64BIT
2202 ldo -16(%sp),%ret1 /* Reference param save area */
2203 bve (%ret0)
2204#else
2205 bv %r0(%ret0)
2206#endif
2207 ldi 0,%r26
2208.Lftrace_ret:
2209 copy %ret0,%rp
2210
2211 /* restore original return values */
2212 LDREG 8(%r3),%ret0
2213 LDREG 16(%r3),%ret1
2214
2215 /* return from function */
2216#ifdef CONFIG_64BIT
2217 bve (%rp)
2218#else
2219 bv %r0(%rp)
2220#endif
2221 LDREGM -FRAME_SIZE(%sp),%r3
2222ENDPROC_CFI(return_to_handler)
2223
2224#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2225
2226#endif /* CONFIG_FUNCTION_TRACER */
2227
2228#ifdef CONFIG_IRQSTACKS
2229/* void call_on_stack(unsigned long param1, void *func,
2230 unsigned long new_stack) */
2231ENTRY_CFI(call_on_stack, FRAME=2*FRAME_SIZE,CALLS,SAVE_RP,SAVE_SP)
2232ENTRY(_call_on_stack)
2233 copy %sp, %r1
2234
2235 /* Regarding the HPPA calling conventions for function pointers,
2236 we assume the PIC register is not changed across call. For
2237 CONFIG_64BIT, the argument pointer is left to point at the
2238 argument region allocated for the call to call_on_stack. */
2239
2240 /* Switch to new stack. We allocate two frames. */
2241 ldo 2*FRAME_SIZE(%arg2), %sp
2242# ifdef CONFIG_64BIT
2243 /* Save previous stack pointer and return pointer in frame marker */
2244 STREG %rp, -FRAME_SIZE-RP_OFFSET(%sp)
2245 /* Calls always use function descriptor */
2246 LDREG 16(%arg1), %arg1
2247 bve,l (%arg1), %rp
2248 STREG %r1, -FRAME_SIZE-REG_SZ(%sp)
2249 LDREG -FRAME_SIZE-RP_OFFSET(%sp), %rp
2250 bve (%rp)
2251 LDREG -FRAME_SIZE-REG_SZ(%sp), %sp
2252# else
2253 /* Save previous stack pointer and return pointer in frame marker */
2254 STREG %r1, -FRAME_SIZE-REG_SZ(%sp)
2255 STREG %rp, -FRAME_SIZE-RP_OFFSET(%sp)
2256 /* Calls use function descriptor if PLABEL bit is set */
2257 bb,>=,n %arg1, 30, 1f
2258 depwi 0,31,2, %arg1
2259 LDREG 0(%arg1), %arg1
22601:
2261 be,l 0(%sr4,%arg1), %sr0, %r31
2262 copy %r31, %rp
2263 LDREG -FRAME_SIZE-RP_OFFSET(%sp), %rp
2264 bv (%rp)
2265 LDREG -FRAME_SIZE-REG_SZ(%sp), %sp
2266# endif /* CONFIG_64BIT */
2267ENDPROC_CFI(call_on_stack)
2268#endif /* CONFIG_IRQSTACKS */
2269
2270ENTRY_CFI(get_register)
2271 /*
2272 * get_register is used by the non access tlb miss handlers to
2273 * copy the value of the general register specified in r8 into
2274 * r1. This routine can't be used for shadowed registers, since
2275 * the rfir will restore the original value. So, for the shadowed
2276 * registers we put a -1 into r1 to indicate that the register
2277 * should not be used (the register being copied could also have
2278 * a -1 in it, but that is OK, it just means that we will have
2279 * to use the slow path instead).
2280 */
2281 blr %r8,%r0
2282 nop
2283 bv %r0(%r25) /* r0 */
2284 copy %r0,%r1
2285 bv %r0(%r25) /* r1 - shadowed */
2286 ldi -1,%r1
2287 bv %r0(%r25) /* r2 */
2288 copy %r2,%r1
2289 bv %r0(%r25) /* r3 */
2290 copy %r3,%r1
2291 bv %r0(%r25) /* r4 */
2292 copy %r4,%r1
2293 bv %r0(%r25) /* r5 */
2294 copy %r5,%r1
2295 bv %r0(%r25) /* r6 */
2296 copy %r6,%r1
2297 bv %r0(%r25) /* r7 */
2298 copy %r7,%r1
2299 bv %r0(%r25) /* r8 - shadowed */
2300 ldi -1,%r1
2301 bv %r0(%r25) /* r9 - shadowed */
2302 ldi -1,%r1
2303 bv %r0(%r25) /* r10 */
2304 copy %r10,%r1
2305 bv %r0(%r25) /* r11 */
2306 copy %r11,%r1
2307 bv %r0(%r25) /* r12 */
2308 copy %r12,%r1
2309 bv %r0(%r25) /* r13 */
2310 copy %r13,%r1
2311 bv %r0(%r25) /* r14 */
2312 copy %r14,%r1
2313 bv %r0(%r25) /* r15 */
2314 copy %r15,%r1
2315 bv %r0(%r25) /* r16 - shadowed */
2316 ldi -1,%r1
2317 bv %r0(%r25) /* r17 - shadowed */
2318 ldi -1,%r1
2319 bv %r0(%r25) /* r18 */
2320 copy %r18,%r1
2321 bv %r0(%r25) /* r19 */
2322 copy %r19,%r1
2323 bv %r0(%r25) /* r20 */
2324 copy %r20,%r1
2325 bv %r0(%r25) /* r21 */
2326 copy %r21,%r1
2327 bv %r0(%r25) /* r22 */
2328 copy %r22,%r1
2329 bv %r0(%r25) /* r23 */
2330 copy %r23,%r1
2331 bv %r0(%r25) /* r24 - shadowed */
2332 ldi -1,%r1
2333 bv %r0(%r25) /* r25 - shadowed */
2334 ldi -1,%r1
2335 bv %r0(%r25) /* r26 */
2336 copy %r26,%r1
2337 bv %r0(%r25) /* r27 */
2338 copy %r27,%r1
2339 bv %r0(%r25) /* r28 */
2340 copy %r28,%r1
2341 bv %r0(%r25) /* r29 */
2342 copy %r29,%r1
2343 bv %r0(%r25) /* r30 */
2344 copy %r30,%r1
2345 bv %r0(%r25) /* r31 */
2346 copy %r31,%r1
2347ENDPROC_CFI(get_register)
2348
2349
2350ENTRY_CFI(set_register)
2351 /*
2352 * set_register is used by the non access tlb miss handlers to
2353 * copy the value of r1 into the general register specified in
2354 * r8.
2355 */
2356 blr %r8,%r0
2357 nop
2358 bv %r0(%r25) /* r0 (silly, but it is a place holder) */
2359 copy %r1,%r0
2360 bv %r0(%r25) /* r1 */
2361 copy %r1,%r1
2362 bv %r0(%r25) /* r2 */
2363 copy %r1,%r2
2364 bv %r0(%r25) /* r3 */
2365 copy %r1,%r3
2366 bv %r0(%r25) /* r4 */
2367 copy %r1,%r4
2368 bv %r0(%r25) /* r5 */
2369 copy %r1,%r5
2370 bv %r0(%r25) /* r6 */
2371 copy %r1,%r6
2372 bv %r0(%r25) /* r7 */
2373 copy %r1,%r7
2374 bv %r0(%r25) /* r8 */
2375 copy %r1,%r8
2376 bv %r0(%r25) /* r9 */
2377 copy %r1,%r9
2378 bv %r0(%r25) /* r10 */
2379 copy %r1,%r10
2380 bv %r0(%r25) /* r11 */
2381 copy %r1,%r11
2382 bv %r0(%r25) /* r12 */
2383 copy %r1,%r12
2384 bv %r0(%r25) /* r13 */
2385 copy %r1,%r13
2386 bv %r0(%r25) /* r14 */
2387 copy %r1,%r14
2388 bv %r0(%r25) /* r15 */
2389 copy %r1,%r15
2390 bv %r0(%r25) /* r16 */
2391 copy %r1,%r16
2392 bv %r0(%r25) /* r17 */
2393 copy %r1,%r17
2394 bv %r0(%r25) /* r18 */
2395 copy %r1,%r18
2396 bv %r0(%r25) /* r19 */
2397 copy %r1,%r19
2398 bv %r0(%r25) /* r20 */
2399 copy %r1,%r20
2400 bv %r0(%r25) /* r21 */
2401 copy %r1,%r21
2402 bv %r0(%r25) /* r22 */
2403 copy %r1,%r22
2404 bv %r0(%r25) /* r23 */
2405 copy %r1,%r23
2406 bv %r0(%r25) /* r24 */
2407 copy %r1,%r24
2408 bv %r0(%r25) /* r25 */
2409 copy %r1,%r25
2410 bv %r0(%r25) /* r26 */
2411 copy %r1,%r26
2412 bv %r0(%r25) /* r27 */
2413 copy %r1,%r27
2414 bv %r0(%r25) /* r28 */
2415 copy %r1,%r28
2416 bv %r0(%r25) /* r29 */
2417 copy %r1,%r29
2418 bv %r0(%r25) /* r30 */
2419 copy %r1,%r30
2420 bv %r0(%r25) /* r31 */
2421 copy %r1,%r31
2422ENDPROC_CFI(set_register)
2423
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * Linux/PA-RISC Project (http://www.parisc-linux.org/)
4 *
5 * kernel entry points (interruptions, system call wrappers)
6 * Copyright (C) 1999,2000 Philipp Rumpf
7 * Copyright (C) 1999 SuSE GmbH Nuernberg
8 * Copyright (C) 2000 Hewlett-Packard (John Marvin)
9 * Copyright (C) 1999 Hewlett-Packard (Frank Rowand)
10 */
11
12#include <asm/asm-offsets.h>
13
14/* we have the following possibilities to act on an interruption:
15 * - handle in assembly and use shadowed registers only
16 * - save registers to kernel stack and handle in assembly or C */
17
18
19#include <asm/psw.h>
20#include <asm/cache.h> /* for L1_CACHE_SHIFT */
21#include <asm/assembly.h> /* for LDREG/STREG defines */
22#include <asm/signal.h>
23#include <asm/unistd.h>
24#include <asm/ldcw.h>
25#include <asm/traps.h>
26#include <asm/thread_info.h>
27#include <asm/alternative.h>
28#include <asm/spinlock_types.h>
29
30#include <linux/linkage.h>
31#include <linux/pgtable.h>
32
33#ifdef CONFIG_64BIT
34 .level 2.0w
35#else
36 .level 2.0
37#endif
38
39/*
40 * We need seven instructions after a TLB insert for it to take effect.
41 * The PA8800/PA8900 processors are an exception and need 12 instructions.
42 * The RFI changes both IAOQ_Back and IAOQ_Front, so it counts as one.
43 */
44#ifdef CONFIG_64BIT
45#define NUM_PIPELINE_INSNS 12
46#else
47#define NUM_PIPELINE_INSNS 7
48#endif
49
50 /* Insert num nops */
51 .macro insert_nops num
52 .rept \num
53 nop
54 .endr
55 .endm
56
57 /* Get aligned page_table_lock address for this mm from cr28/tr4 */
58 .macro get_ptl reg
59 mfctl %cr28,\reg
60 .endm
61
62 /* space_to_prot macro creates a prot id from a space id */
63
64#if (SPACEID_SHIFT) == 0
65 .macro space_to_prot spc prot
66 depd,z \spc,62,31,\prot
67 .endm
68#else
69 .macro space_to_prot spc prot
70 extrd,u \spc,(64 - (SPACEID_SHIFT)),32,\prot
71 .endm
72#endif
73 /*
74 * The "get_stack" macros are responsible for determining the
75 * kernel stack value.
76 *
77 * If sr7 == 0
78 * Already using a kernel stack, so call the
79 * get_stack_use_r30 macro to push a pt_regs structure
80 * on the stack, and store registers there.
81 * else
82 * Need to set up a kernel stack, so call the
83 * get_stack_use_cr30 macro to set up a pointer
84 * to the pt_regs structure contained within the
85 * task pointer pointed to by cr30. Load the stack
86 * pointer from the task structure.
87 *
88 * Note that we use shadowed registers for temps until
89 * we can save %r26 and %r29. %r26 is used to preserve
90 * %r8 (a shadowed register) which temporarily contained
91 * either the fault type ("code") or the eirr. We need
92 * to use a non-shadowed register to carry the value over
93 * the rfir in virt_map. We use %r26 since this value winds
94 * up being passed as the argument to either do_cpu_irq_mask
95 * or handle_interruption. %r29 is used to hold a pointer
96 * the register save area, and once again, it needs to
97 * be a non-shadowed register so that it survives the rfir.
98 */
99
100 .macro get_stack_use_cr30
101
102 /* we save the registers in the task struct */
103
104 copy %r30, %r17
105 mfctl %cr30, %r1
106 tophys %r1,%r9 /* task_struct */
107 LDREG TASK_STACK(%r9),%r30
108 ldo PT_SZ_ALGN(%r30),%r30
109 mtsp %r0,%sr7 /* clear sr7 after kernel stack was set! */
110 mtsp %r16,%sr3
111 ldo TASK_REGS(%r9),%r9
112 STREG %r17,PT_GR30(%r9)
113 STREG %r29,PT_GR29(%r9)
114 STREG %r26,PT_GR26(%r9)
115 STREG %r16,PT_SR7(%r9)
116 copy %r9,%r29
117 .endm
118
119 .macro get_stack_use_r30
120
121 /* we put a struct pt_regs on the stack and save the registers there */
122
123 tophys %r30,%r9
124 copy %r30,%r1
125 ldo PT_SZ_ALGN(%r30),%r30
126 STREG %r1,PT_GR30(%r9)
127 STREG %r29,PT_GR29(%r9)
128 STREG %r26,PT_GR26(%r9)
129 STREG %r16,PT_SR7(%r9)
130 copy %r9,%r29
131 .endm
132
133 .macro rest_stack
134 LDREG PT_GR1(%r29), %r1
135 LDREG PT_GR30(%r29),%r30
136 LDREG PT_GR29(%r29),%r29
137 .endm
138
139 /* default interruption handler
140 * (calls traps.c:handle_interruption) */
141 .macro def code
142 b intr_save
143 ldi \code, %r8
144 .align 32
145 .endm
146
147 /* Interrupt interruption handler
148 * (calls irq.c:do_cpu_irq_mask) */
149 .macro extint code
150 b intr_extint
151 mfsp %sr7,%r16
152 .align 32
153 .endm
154
155 .import os_hpmc, code
156
157 /* HPMC handler */
158 .macro hpmc code
159 nop /* must be a NOP, will be patched later */
160 load32 PA(os_hpmc), %r3
161 bv,n 0(%r3)
162 nop
163 .word 0 /* checksum (will be patched) */
164 .word 0 /* address of handler */
165 .word 0 /* length of handler */
166 .endm
167
168 /*
169 * Performance Note: Instructions will be moved up into
170 * this part of the code later on, once we are sure
171 * that the tlb miss handlers are close to final form.
172 */
173
174 /* Register definitions for tlb miss handler macros */
175
176 va = r8 /* virtual address for which the trap occurred */
177 spc = r24 /* space for which the trap occurred */
178
179#ifndef CONFIG_64BIT
180
181 /*
182 * itlb miss interruption handler (parisc 1.1 - 32 bit)
183 */
184
185 .macro itlb_11 code
186
187 mfctl %pcsq, spc
188 b itlb_miss_11
189 mfctl %pcoq, va
190
191 .align 32
192 .endm
193#endif
194
195 /*
196 * itlb miss interruption handler (parisc 2.0)
197 */
198
199 .macro itlb_20 code
200 mfctl %pcsq, spc
201#ifdef CONFIG_64BIT
202 b itlb_miss_20w
203#else
204 b itlb_miss_20
205#endif
206 mfctl %pcoq, va
207
208 .align 32
209 .endm
210
211#ifndef CONFIG_64BIT
212 /*
213 * naitlb miss interruption handler (parisc 1.1 - 32 bit)
214 */
215
216 .macro naitlb_11 code
217
218 mfctl %isr,spc
219 b naitlb_miss_11
220 mfctl %ior,va
221
222 .align 32
223 .endm
224#endif
225
226 /*
227 * naitlb miss interruption handler (parisc 2.0)
228 */
229
230 .macro naitlb_20 code
231
232 mfctl %isr,spc
233#ifdef CONFIG_64BIT
234 b naitlb_miss_20w
235#else
236 b naitlb_miss_20
237#endif
238 mfctl %ior,va
239
240 .align 32
241 .endm
242
243#ifndef CONFIG_64BIT
244 /*
245 * dtlb miss interruption handler (parisc 1.1 - 32 bit)
246 */
247
248 .macro dtlb_11 code
249
250 mfctl %isr, spc
251 b dtlb_miss_11
252 mfctl %ior, va
253
254 .align 32
255 .endm
256#endif
257
258 /*
259 * dtlb miss interruption handler (parisc 2.0)
260 */
261
262 .macro dtlb_20 code
263
264 mfctl %isr, spc
265#ifdef CONFIG_64BIT
266 b dtlb_miss_20w
267#else
268 b dtlb_miss_20
269#endif
270 mfctl %ior, va
271
272 .align 32
273 .endm
274
275#ifndef CONFIG_64BIT
276 /* nadtlb miss interruption handler (parisc 1.1 - 32 bit) */
277
278 .macro nadtlb_11 code
279
280 mfctl %isr,spc
281 b nadtlb_miss_11
282 mfctl %ior,va
283
284 .align 32
285 .endm
286#endif
287
288 /* nadtlb miss interruption handler (parisc 2.0) */
289
290 .macro nadtlb_20 code
291
292 mfctl %isr,spc
293#ifdef CONFIG_64BIT
294 b nadtlb_miss_20w
295#else
296 b nadtlb_miss_20
297#endif
298 mfctl %ior,va
299
300 .align 32
301 .endm
302
303#ifndef CONFIG_64BIT
304 /*
305 * dirty bit trap interruption handler (parisc 1.1 - 32 bit)
306 */
307
308 .macro dbit_11 code
309
310 mfctl %isr,spc
311 b dbit_trap_11
312 mfctl %ior,va
313
314 .align 32
315 .endm
316#endif
317
318 /*
319 * dirty bit trap interruption handler (parisc 2.0)
320 */
321
322 .macro dbit_20 code
323
324 mfctl %isr,spc
325#ifdef CONFIG_64BIT
326 b dbit_trap_20w
327#else
328 b dbit_trap_20
329#endif
330 mfctl %ior,va
331
332 .align 32
333 .endm
334
335 /* In LP64, the space contains part of the upper 32 bits of the
336 * fault. We have to extract this and place it in the va,
337 * zeroing the corresponding bits in the space register */
338 .macro space_adjust spc,va,tmp
339#ifdef CONFIG_64BIT
340 extrd,u \spc,63,SPACEID_SHIFT,\tmp
341 depd %r0,63,SPACEID_SHIFT,\spc
342 depd \tmp,31,SPACEID_SHIFT,\va
343#endif
344 .endm
345
346 .import swapper_pg_dir,code
347
348 /* Get the pgd. For faults on space zero (kernel space), this
349 * is simply swapper_pg_dir. For user space faults, the
350 * pgd is stored in %cr25 */
351 .macro get_pgd spc,reg
352 ldil L%PA(swapper_pg_dir),\reg
353 ldo R%PA(swapper_pg_dir)(\reg),\reg
354 or,COND(=) %r0,\spc,%r0
355 mfctl %cr25,\reg
356 .endm
357
358 /*
359 space_check(spc,tmp,fault)
360
361 spc - The space we saw the fault with.
362 tmp - The place to store the current space.
363 fault - Function to call on failure.
364
365 Only allow faults on different spaces from the
366 currently active one if we're the kernel
367
368 */
369 .macro space_check spc,tmp,fault
370 mfsp %sr7,\tmp
371 /* check against %r0 which is same value as LINUX_GATEWAY_SPACE */
372 or,COND(<>) %r0,\spc,%r0 /* user may execute gateway page
373 * as kernel, so defeat the space
374 * check if it is */
375 copy \spc,\tmp
376 or,COND(=) %r0,\tmp,%r0 /* nullify if executing as kernel */
377 cmpb,COND(<>),n \tmp,\spc,\fault
378 .endm
379
380 /* Look up a PTE in a 2-Level scheme (faulting at each
381 * level if the entry isn't present
382 *
383 * NOTE: we use ldw even for LP64, since the short pointers
384 * can address up to 1TB
385 */
386 .macro L2_ptep pmd,pte,index,va,fault
387#if CONFIG_PGTABLE_LEVELS == 3
388 extru_safe \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
389#else
390 extru_safe \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
391#endif
392 dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */
393#if CONFIG_PGTABLE_LEVELS < 3
394 copy %r0,\pte
395#endif
396 ldw,s \index(\pmd),\pmd
397 bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault
398 dep %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
399 SHLREG \pmd,PxD_VALUE_SHIFT,\pmd
400 extru_safe \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
401 dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */
402 shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd /* pmd is now pte */
403 .endm
404
405 /* Look up PTE in a 3-Level scheme. */
406 .macro L3_ptep pgd,pte,index,va,fault
407#if CONFIG_PGTABLE_LEVELS == 3
408 copy %r0,\pte
409 extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
410 ldw,s \index(\pgd),\pgd
411 bb,>=,n \pgd,_PxD_PRESENT_BIT,\fault
412 shld \pgd,PxD_VALUE_SHIFT,\pgd
413#endif
414 L2_ptep \pgd,\pte,\index,\va,\fault
415 .endm
416
417 /* Acquire page_table_lock and check page is present. */
418 .macro ptl_lock spc,ptp,pte,tmp,tmp1,fault
419#ifdef CONFIG_TLB_PTLOCK
42098: cmpib,COND(=),n 0,\spc,2f
421 get_ptl \tmp
4221: LDCW 0(\tmp),\tmp1
423 cmpib,COND(=) 0,\tmp1,1b
424 nop
425 LDREG 0(\ptp),\pte
426 bb,<,n \pte,_PAGE_PRESENT_BIT,3f
427 b \fault
428 stw \tmp1,0(\tmp)
42999: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
430#endif
4312: LDREG 0(\ptp),\pte
432 bb,>=,n \pte,_PAGE_PRESENT_BIT,\fault
4333:
434 .endm
435
436 /* Release page_table_lock if for user space. We use an ordered
437 store to ensure all prior accesses are performed prior to
438 releasing the lock. Note stw may not be executed, so we
439 provide one extra nop when CONFIG_TLB_PTLOCK is defined. */
440 .macro ptl_unlock spc,tmp,tmp2
441#ifdef CONFIG_TLB_PTLOCK
44298: get_ptl \tmp
443 ldi __ARCH_SPIN_LOCK_UNLOCKED_VAL, \tmp2
444 or,COND(=) %r0,\spc,%r0
445 stw,ma \tmp2,0(\tmp)
44699: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
447 insert_nops NUM_PIPELINE_INSNS - 4
448#else
449 insert_nops NUM_PIPELINE_INSNS - 1
450#endif
451 .endm
452
453 /* Set the _PAGE_ACCESSED bit of the PTE. Be clever and
454 * don't needlessly dirty the cache line if it was already set */
455 .macro update_accessed ptp,pte,tmp,tmp1
456 ldi _PAGE_ACCESSED,\tmp1
457 or \tmp1,\pte,\tmp
458 and,COND(<>) \tmp1,\pte,%r0
459 STREG \tmp,0(\ptp)
460 .endm
461
462 /* Set the dirty bit (and accessed bit). No need to be
463 * clever, this is only used from the dirty fault */
464 .macro update_dirty ptp,pte,tmp
465 ldi _PAGE_ACCESSED|_PAGE_DIRTY,\tmp
466 or \tmp,\pte,\pte
467 STREG \pte,0(\ptp)
468 .endm
469
470 /* We have (depending on the page size):
471 * - 38 to 52-bit Physical Page Number
472 * - 12 to 26-bit page offset
473 */
474 /* bitshift difference between a PFN (based on kernel's PAGE_SIZE)
475 * to a CPU TLB 4k PFN (4k => 12 bits to shift) */
476 #define PAGE_ADD_SHIFT (PAGE_SHIFT-12)
477 #define PAGE_ADD_HUGE_SHIFT (REAL_HPAGE_SHIFT-12)
478 #define PFN_START_BIT (63-ASM_PFN_PTE_SHIFT+(63-58)-PAGE_ADD_SHIFT)
479
480 /* Drop prot bits and convert to page addr for iitlbt and idtlbt */
481 .macro convert_for_tlb_insert20 pte,tmp
482#ifdef CONFIG_HUGETLB_PAGE
483 copy \pte,\tmp
484 extrd,u \tmp,PFN_START_BIT,PFN_START_BIT+1,\pte
485
486 depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\
487 (63-58)+PAGE_ADD_SHIFT,\pte
488 extrd,u,*= \tmp,_PAGE_HPAGE_BIT+32,1,%r0
489 depdi _HUGE_PAGE_SIZE_ENCODING_DEFAULT,63,\
490 (63-58)+PAGE_ADD_HUGE_SHIFT,\pte
491#else /* Huge pages disabled */
492 extrd,u \pte,PFN_START_BIT,PFN_START_BIT+1,\pte
493 depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\
494 (63-58)+PAGE_ADD_SHIFT,\pte
495#endif
496 .endm
497
498 /* Convert the pte and prot to tlb insertion values. How
499 * this happens is quite subtle, read below */
500 .macro make_insert_tlb spc,pte,prot,tmp
501 space_to_prot \spc \prot /* create prot id from space */
502 /* The following is the real subtlety. This is depositing
503 * T <-> _PAGE_REFTRAP
504 * D <-> _PAGE_DIRTY
505 * B <-> _PAGE_DMB (memory break)
506 *
507 * Then incredible subtlety: The access rights are
508 * _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE
509 * See 3-14 of the parisc 2.0 manual
510 *
511 * Finally, _PAGE_READ goes in the top bit of PL1 (so we
512 * trigger an access rights trap in user space if the user
513 * tries to read an unreadable page */
514#if _PAGE_SPECIAL_BIT == _PAGE_DMB_BIT
515 /* need to drop DMB bit, as it's used as SPECIAL flag */
516 depi 0,_PAGE_SPECIAL_BIT,1,\pte
517#endif
518 depd \pte,8,7,\prot
519
520 /* PAGE_USER indicates the page can be read with user privileges,
521 * so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1
522 * contains _PAGE_READ) */
523 extrd,u,*= \pte,_PAGE_USER_BIT+32,1,%r0
524 depdi 7,11,3,\prot
525 /* If we're a gateway page, drop PL2 back to zero for promotion
526 * to kernel privilege (so we can execute the page as kernel).
527 * Any privilege promotion page always denys read and write */
528 extrd,u,*= \pte,_PAGE_GATEWAY_BIT+32,1,%r0
529 depd %r0,11,2,\prot /* If Gateway, Set PL2 to 0 */
530
531 /* Enforce uncacheable pages.
532 * This should ONLY be use for MMIO on PA 2.0 machines.
533 * Memory/DMA is cache coherent on all PA2.0 machines we support
534 * (that means T-class is NOT supported) and the memory controllers
535 * on most of those machines only handles cache transactions.
536 */
537 extrd,u,*= \pte,_PAGE_NO_CACHE_BIT+32,1,%r0
538 depdi 1,12,1,\prot
539
540 /* Drop prot bits and convert to page addr for iitlbt and idtlbt */
541 convert_for_tlb_insert20 \pte \tmp
542 .endm
543
544 /* Identical macro to make_insert_tlb above, except it
545 * makes the tlb entry for the differently formatted pa11
546 * insertion instructions */
547 .macro make_insert_tlb_11 spc,pte,prot
548#if _PAGE_SPECIAL_BIT == _PAGE_DMB_BIT
549 /* need to drop DMB bit, as it's used as SPECIAL flag */
550 depi 0,_PAGE_SPECIAL_BIT,1,\pte
551#endif
552 zdep \spc,30,15,\prot
553 dep \pte,8,7,\prot
554 extru,= \pte,_PAGE_NO_CACHE_BIT,1,%r0
555 depi 1,12,1,\prot
556 extru,= \pte,_PAGE_USER_BIT,1,%r0
557 depi 7,11,3,\prot /* Set for user space (1 rsvd for read) */
558 extru,= \pte,_PAGE_GATEWAY_BIT,1,%r0
559 depi 0,11,2,\prot /* If Gateway, Set PL2 to 0 */
560
561 /* Get rid of prot bits and convert to page addr for iitlba */
562
563 depi 0,31,ASM_PFN_PTE_SHIFT,\pte
564 SHRREG \pte,(ASM_PFN_PTE_SHIFT-(31-26)),\pte
565 .endm
566
567 /* This is for ILP32 PA2.0 only. The TLB insertion needs
568 * to extend into I/O space if the address is 0xfXXXXXXX
569 * so we extend the f's into the top word of the pte in
570 * this case */
571 .macro f_extend pte,tmp
572 extrd,s \pte,42,4,\tmp
573 addi,<> 1,\tmp,%r0
574 extrd,s \pte,63,25,\pte
575 .endm
576
577 /* The alias region is comprised of a pair of 4 MB regions
578 * aligned to 8 MB. It is used to clear/copy/flush user pages
579 * using kernel virtual addresses congruent with the user
580 * virtual address.
581 *
582 * To use the alias page, you set %r26 up with the to TLB
583 * entry (identifying the physical page) and %r23 up with
584 * the from tlb entry (or nothing if only a to entry---for
585 * clear_user_page_asm) */
586 .macro do_alias spc,tmp,tmp1,va,pte,prot,fault,patype
587 cmpib,COND(<>),n 0,\spc,\fault
588 ldil L%(TMPALIAS_MAP_START),\tmp
589 copy \va,\tmp1
590 depi_safe 0,31,TMPALIAS_SIZE_BITS+1,\tmp1
591 cmpb,COND(<>),n \tmp,\tmp1,\fault
592 mfctl %cr19,\tmp /* iir */
593 /* get the opcode (first six bits) into \tmp */
594 extrw,u \tmp,5,6,\tmp
595 /*
596 * Only setting the T bit prevents data cache movein
597 * Setting access rights to zero prevents instruction cache movein
598 *
599 * Note subtlety here: _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE go
600 * to type field and _PAGE_READ goes to top bit of PL1
601 */
602 ldi (_PAGE_REFTRAP|_PAGE_READ|_PAGE_WRITE),\prot
603 /*
604 * so if the opcode is one (i.e. this is a memory management
605 * instruction) nullify the next load so \prot is only T.
606 * Otherwise this is a normal data operation
607 */
608 cmpiclr,= 0x01,\tmp,%r0
609 ldi (_PAGE_DIRTY|_PAGE_READ|_PAGE_WRITE),\prot
610.ifc \patype,20
611 depd,z \prot,8,7,\prot
612.else
613.ifc \patype,11
614 depw,z \prot,8,7,\prot
615.else
616 .error "undefined PA type to do_alias"
617.endif
618.endif
619 /*
620 * OK, it is in the temp alias region, check whether "from" or "to".
621 * Check "subtle" note in pacache.S re: r23/r26.
622 */
623 extrw,u,= \va,31-TMPALIAS_SIZE_BITS,1,%r0
624 or,COND(tr) %r23,%r0,\pte
625 or %r26,%r0,\pte
626
627 /* convert phys addr in \pte (from r23 or r26) to tlb insert format */
628 SHRREG \pte,PAGE_SHIFT+PAGE_ADD_SHIFT-5, \pte
629 depi_safe _PAGE_SIZE_ENCODING_DEFAULT, 31,5, \pte
630 .endm
631
632
633 /*
634 * Fault_vectors are architecturally required to be aligned on a 2K
635 * boundary
636 */
637
638 .section .text.hot
639 .align 2048
640
641ENTRY(fault_vector_20)
642 /* First vector is invalid (0) */
643 .ascii "cows can fly"
644 .byte 0
645 .align 32
646
647 hpmc 1
648 def 2
649 def 3
650 extint 4
651 def 5
652 itlb_20 PARISC_ITLB_TRAP
653 def 7
654 def 8
655 def 9
656 def 10
657 def 11
658 def 12
659 def 13
660 def 14
661 dtlb_20 15
662 naitlb_20 16
663 nadtlb_20 17
664 def 18
665 def 19
666 dbit_20 20
667 def 21
668 def 22
669 def 23
670 def 24
671 def 25
672 def 26
673 def 27
674 def 28
675 def 29
676 def 30
677 def 31
678END(fault_vector_20)
679
680#ifndef CONFIG_64BIT
681
682 .align 2048
683
684ENTRY(fault_vector_11)
685 /* First vector is invalid (0) */
686 .ascii "cows can fly"
687 .byte 0
688 .align 32
689
690 hpmc 1
691 def 2
692 def 3
693 extint 4
694 def 5
695 itlb_11 PARISC_ITLB_TRAP
696 def 7
697 def 8
698 def 9
699 def 10
700 def 11
701 def 12
702 def 13
703 def 14
704 dtlb_11 15
705 naitlb_11 16
706 nadtlb_11 17
707 def 18
708 def 19
709 dbit_11 20
710 def 21
711 def 22
712 def 23
713 def 24
714 def 25
715 def 26
716 def 27
717 def 28
718 def 29
719 def 30
720 def 31
721END(fault_vector_11)
722
723#endif
724 /* Fault vector is separately protected and *must* be on its own page */
725 .align PAGE_SIZE
726
727 .import handle_interruption,code
728 .import do_cpu_irq_mask,code
729
730 /*
731 * Child Returns here
732 *
733 * copy_thread moved args into task save area.
734 */
735
736ENTRY(ret_from_kernel_thread)
737 /* Call schedule_tail first though */
738 BL schedule_tail, %r2
739 nop
740
741 mfctl %cr30,%r1 /* task_struct */
742 LDREG TASK_PT_GR25(%r1), %r26
743#ifdef CONFIG_64BIT
744 LDREG TASK_PT_GR27(%r1), %r27
745#endif
746 LDREG TASK_PT_GR26(%r1), %r1
747 ble 0(%sr7, %r1)
748 copy %r31, %r2
749 b finish_child_return
750 nop
751END(ret_from_kernel_thread)
752
753
754 /*
755 * struct task_struct *_switch_to(struct task_struct *prev,
756 * struct task_struct *next)
757 *
758 * switch kernel stacks and return prev */
759ENTRY_CFI(_switch_to)
760 STREG %r2, -RP_OFFSET(%r30)
761
762 callee_save_float
763 callee_save
764
765 load32 _switch_to_ret, %r2
766
767 STREG %r2, TASK_PT_KPC(%r26)
768 LDREG TASK_PT_KPC(%r25), %r2
769
770 STREG %r30, TASK_PT_KSP(%r26)
771 LDREG TASK_PT_KSP(%r25), %r30
772 bv %r0(%r2)
773 mtctl %r25,%cr30
774
775ENTRY(_switch_to_ret)
776 mtctl %r0, %cr0 /* Needed for single stepping */
777 callee_rest
778 callee_rest_float
779
780 LDREG -RP_OFFSET(%r30), %r2
781 bv %r0(%r2)
782 copy %r26, %r28
783ENDPROC_CFI(_switch_to)
784
785 /*
786 * Common rfi return path for interruptions, kernel execve, and
787 * sys_rt_sigreturn (sometimes). The sys_rt_sigreturn syscall will
788 * return via this path if the signal was received when the process
789 * was running; if the process was blocked on a syscall then the
790 * normal syscall_exit path is used. All syscalls for traced
791 * proceses exit via intr_restore.
792 *
793 * XXX If any syscalls that change a processes space id ever exit
794 * this way, then we will need to copy %sr3 in to PT_SR[3..7], and
795 * adjust IASQ[0..1].
796 *
797 */
798
799 .align PAGE_SIZE
800
801ENTRY_CFI(syscall_exit_rfi)
802 mfctl %cr30,%r16 /* task_struct */
803 ldo TASK_REGS(%r16),%r16
804 /* Force iaoq to userspace, as the user has had access to our current
805 * context via sigcontext. Also Filter the PSW for the same reason.
806 */
807 LDREG PT_IAOQ0(%r16),%r19
808 depi PRIV_USER,31,2,%r19
809 STREG %r19,PT_IAOQ0(%r16)
810 LDREG PT_IAOQ1(%r16),%r19
811 depi PRIV_USER,31,2,%r19
812 STREG %r19,PT_IAOQ1(%r16)
813 LDREG PT_PSW(%r16),%r19
814 load32 USER_PSW_MASK,%r1
815#ifdef CONFIG_64BIT
816 load32 USER_PSW_HI_MASK,%r20
817 depd %r20,31,32,%r1
818#endif
819 and %r19,%r1,%r19 /* Mask out bits that user shouldn't play with */
820 load32 USER_PSW,%r1
821 or %r19,%r1,%r19 /* Make sure default USER_PSW bits are set */
822 STREG %r19,PT_PSW(%r16)
823
824 /*
825 * If we aren't being traced, we never saved space registers
826 * (we don't store them in the sigcontext), so set them
827 * to "proper" values now (otherwise we'll wind up restoring
828 * whatever was last stored in the task structure, which might
829 * be inconsistent if an interrupt occurred while on the gateway
830 * page). Note that we may be "trashing" values the user put in
831 * them, but we don't support the user changing them.
832 */
833
834 STREG %r0,PT_SR2(%r16)
835 mfsp %sr3,%r19
836 STREG %r19,PT_SR0(%r16)
837 STREG %r19,PT_SR1(%r16)
838 STREG %r19,PT_SR3(%r16)
839 STREG %r19,PT_SR4(%r16)
840 STREG %r19,PT_SR5(%r16)
841 STREG %r19,PT_SR6(%r16)
842 STREG %r19,PT_SR7(%r16)
843
844ENTRY(intr_return)
845 /* check for reschedule */
846 mfctl %cr30,%r1
847 LDREG TASK_TI_FLAGS(%r1),%r19 /* sched.h: TIF_NEED_RESCHED */
848 bb,<,n %r19,31-TIF_NEED_RESCHED,intr_do_resched /* forward */
849
850 .import do_notify_resume,code
851intr_check_sig:
852 /* As above */
853 mfctl %cr30,%r1
854 LDREG TASK_TI_FLAGS(%r1),%r19
855 ldi (_TIF_USER_WORK_MASK & ~_TIF_NEED_RESCHED), %r20
856 and,COND(<>) %r19, %r20, %r0
857 b,n intr_restore /* skip past if we've nothing to do */
858
859 /* This check is critical to having LWS
860 * working. The IASQ is zero on the gateway
861 * page and we cannot deliver any signals until
862 * we get off the gateway page.
863 *
864 * Only do signals if we are returning to user space
865 */
866 LDREG PT_IASQ0(%r16), %r20
867 cmpib,COND(=),n LINUX_GATEWAY_SPACE, %r20, intr_restore /* forward */
868 LDREG PT_IASQ1(%r16), %r20
869 cmpib,COND(=),n LINUX_GATEWAY_SPACE, %r20, intr_restore /* forward */
870
871 copy %r0, %r25 /* long in_syscall = 0 */
872#ifdef CONFIG_64BIT
873 ldo -16(%r30),%r29 /* Reference param save area */
874#endif
875
876 /* NOTE: We need to enable interrupts if we have to deliver
877 * signals. We used to do this earlier but it caused kernel
878 * stack overflows. */
879 ssm PSW_SM_I, %r0
880
881 BL do_notify_resume,%r2
882 copy %r16, %r26 /* struct pt_regs *regs */
883
884 b,n intr_check_sig
885
886intr_restore:
887 copy %r16,%r29
888 ldo PT_FR31(%r29),%r1
889 rest_fp %r1
890 rest_general %r29
891
892 /* inverse of virt_map */
893 pcxt_ssm_bug
894 rsm PSW_SM_QUIET,%r0 /* prepare for rfi */
895 tophys_r1 %r29
896
897 /* Restore space id's and special cr's from PT_REGS
898 * structure pointed to by r29
899 */
900 rest_specials %r29
901
902 /* IMPORTANT: rest_stack restores r29 last (we are using it)!
903 * It also restores r1 and r30.
904 */
905 rest_stack
906
907 rfi
908 nop
909
910#ifndef CONFIG_PREEMPTION
911# define intr_do_preempt intr_restore
912#endif /* !CONFIG_PREEMPTION */
913
914 .import schedule,code
915intr_do_resched:
916 /* Only call schedule on return to userspace. If we're returning
917 * to kernel space, we may schedule if CONFIG_PREEMPTION, otherwise
918 * we jump back to intr_restore.
919 */
920 LDREG PT_IASQ0(%r16), %r20
921 cmpib,COND(=) 0, %r20, intr_do_preempt
922 nop
923 LDREG PT_IASQ1(%r16), %r20
924 cmpib,COND(=) 0, %r20, intr_do_preempt
925 nop
926
927 /* NOTE: We need to enable interrupts if we schedule. We used
928 * to do this earlier but it caused kernel stack overflows. */
929 ssm PSW_SM_I, %r0
930
931#ifdef CONFIG_64BIT
932 ldo -16(%r30),%r29 /* Reference param save area */
933#endif
934
935 ldil L%intr_check_sig, %r2
936#ifndef CONFIG_64BIT
937 b schedule
938#else
939 load32 schedule, %r20
940 bv %r0(%r20)
941#endif
942 ldo R%intr_check_sig(%r2), %r2
943
944 /* preempt the current task on returning to kernel
945 * mode from an interrupt, iff need_resched is set,
946 * and preempt_count is 0. otherwise, we continue on
947 * our merry way back to the current running task.
948 */
949#ifdef CONFIG_PREEMPTION
950 .import preempt_schedule_irq,code
951intr_do_preempt:
952 rsm PSW_SM_I, %r0 /* disable interrupts */
953
954 /* current_thread_info()->preempt_count */
955 mfctl %cr30, %r1
956 ldw TI_PRE_COUNT(%r1), %r19
957 cmpib,<> 0, %r19, intr_restore /* if preempt_count > 0 */
958 nop /* prev insn branched backwards */
959
960 /* check if we interrupted a critical path */
961 LDREG PT_PSW(%r16), %r20
962 bb,<,n %r20, 31 - PSW_SM_I, intr_restore
963 nop
964
965 /* ssm PSW_SM_I done later in intr_restore */
966#ifdef CONFIG_MLONGCALLS
967 ldil L%intr_restore, %r2
968 load32 preempt_schedule_irq, %r1
969 bv %r0(%r1)
970 ldo R%intr_restore(%r2), %r2
971#else
972 ldil L%intr_restore, %r1
973 BL preempt_schedule_irq, %r2
974 ldo R%intr_restore(%r1), %r2
975#endif
976#endif /* CONFIG_PREEMPTION */
977
978 /*
979 * External interrupts.
980 */
981
982intr_extint:
983 cmpib,COND(=),n 0,%r16,1f
984
985 get_stack_use_cr30
986 b,n 2f
987
9881:
989 get_stack_use_r30
9902:
991 save_specials %r29
992 virt_map
993 save_general %r29
994
995 ldo PT_FR0(%r29), %r24
996 save_fp %r24
997
998 loadgp
999
1000 copy %r29, %r26 /* arg0 is pt_regs */
1001 copy %r29, %r16 /* save pt_regs */
1002
1003 ldil L%intr_return, %r2
1004
1005#ifdef CONFIG_64BIT
1006 ldo -16(%r30),%r29 /* Reference param save area */
1007#endif
1008
1009 b do_cpu_irq_mask
1010 ldo R%intr_return(%r2), %r2 /* return to intr_return, not here */
1011ENDPROC_CFI(syscall_exit_rfi)
1012
1013
1014 /* Generic interruptions (illegal insn, unaligned, page fault, etc) */
1015
1016ENTRY_CFI(intr_save) /* for os_hpmc */
1017 mfsp %sr7,%r16
1018 cmpib,COND(=),n 0,%r16,1f
1019 get_stack_use_cr30
1020 b 2f
1021 copy %r8,%r26
1022
10231:
1024 get_stack_use_r30
1025 copy %r8,%r26
1026
10272:
1028 save_specials %r29
1029
1030 /* If this trap is a itlb miss, skip saving/adjusting isr/ior */
1031 cmpib,COND(=),n PARISC_ITLB_TRAP,%r26,skip_save_ior
1032
1033
1034 mfctl %isr, %r16
1035 nop /* serialize mfctl on PA 2.0 to avoid 4 cycle penalty */
1036 mfctl %ior, %r17
1037
1038
1039#ifdef CONFIG_64BIT
1040 /*
1041 * If the interrupted code was running with W bit off (32 bit),
1042 * clear the b bits (bits 0 & 1) in the ior.
1043 * save_specials left ipsw value in r8 for us to test.
1044 */
1045 extrd,u,*<> %r8,PSW_W_BIT,1,%r0
1046 depdi 0,1,2,%r17
1047
1048 /* adjust isr/ior: get high bits from isr and deposit in ior */
1049 space_adjust %r16,%r17,%r1
1050#endif
1051 STREG %r16, PT_ISR(%r29)
1052 STREG %r17, PT_IOR(%r29)
1053
1054#if 0 && defined(CONFIG_64BIT)
1055 /* Revisit when we have 64-bit code above 4Gb */
1056 b,n intr_save2
1057
1058skip_save_ior:
1059 /* We have a itlb miss, and when executing code above 4 Gb on ILP64, we
1060 * need to adjust iasq/iaoq here in the same way we adjusted isr/ior
1061 * above.
1062 */
1063 extrd,u,* %r8,PSW_W_BIT,1,%r1
1064 cmpib,COND(=),n 1,%r1,intr_save2
1065 LDREG PT_IASQ0(%r29), %r16
1066 LDREG PT_IAOQ0(%r29), %r17
1067 /* adjust iasq/iaoq */
1068 space_adjust %r16,%r17,%r1
1069 STREG %r16, PT_IASQ0(%r29)
1070 STREG %r17, PT_IAOQ0(%r29)
1071#else
1072skip_save_ior:
1073#endif
1074
1075intr_save2:
1076 virt_map
1077 save_general %r29
1078
1079 ldo PT_FR0(%r29), %r25
1080 save_fp %r25
1081
1082 loadgp
1083
1084 copy %r29, %r25 /* arg1 is pt_regs */
1085#ifdef CONFIG_64BIT
1086 ldo -16(%r30),%r29 /* Reference param save area */
1087#endif
1088
1089 ldil L%intr_check_sig, %r2
1090 copy %r25, %r16 /* save pt_regs */
1091
1092 b handle_interruption
1093 ldo R%intr_check_sig(%r2), %r2
1094ENDPROC_CFI(intr_save)
1095
1096
1097 /*
1098 * Note for all tlb miss handlers:
1099 *
1100 * cr24 contains a pointer to the kernel address space
1101 * page directory.
1102 *
1103 * cr25 contains a pointer to the current user address
1104 * space page directory.
1105 *
1106 * sr3 will contain the space id of the user address space
1107 * of the current running thread while that thread is
1108 * running in the kernel.
1109 */
1110
1111 /*
1112 * register number allocations. Note that these are all
1113 * in the shadowed registers
1114 */
1115
1116 t0 = r1 /* temporary register 0 */
1117 va = r8 /* virtual address for which the trap occurred */
1118 t1 = r9 /* temporary register 1 */
1119 pte = r16 /* pte/phys page # */
1120 prot = r17 /* prot bits */
1121 spc = r24 /* space for which the trap occurred */
1122 ptp = r25 /* page directory/page table pointer */
1123
1124#ifdef CONFIG_64BIT
1125
1126dtlb_miss_20w:
1127 space_adjust spc,va,t0
1128 get_pgd spc,ptp
1129 space_check spc,t0,dtlb_fault
1130
1131 L3_ptep ptp,pte,t0,va,dtlb_check_alias_20w
1132
1133 ptl_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20w
1134 update_accessed ptp,pte,t0,t1
1135
1136 make_insert_tlb spc,pte,prot,t1
1137
1138 idtlbt pte,prot
1139
1140 ptl_unlock spc,t0,t1
1141 rfir
1142 nop
1143
1144dtlb_check_alias_20w:
1145 do_alias spc,t0,t1,va,pte,prot,dtlb_fault,20
1146
1147 idtlbt pte,prot
1148
1149 insert_nops NUM_PIPELINE_INSNS - 1
1150 rfir
1151 nop
1152
1153nadtlb_miss_20w:
1154 space_adjust spc,va,t0
1155 get_pgd spc,ptp
1156 space_check spc,t0,nadtlb_fault
1157
1158 L3_ptep ptp,pte,t0,va,nadtlb_check_alias_20w
1159
1160 ptl_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20w
1161 update_accessed ptp,pte,t0,t1
1162
1163 make_insert_tlb spc,pte,prot,t1
1164
1165 idtlbt pte,prot
1166
1167 ptl_unlock spc,t0,t1
1168 rfir
1169 nop
1170
1171nadtlb_check_alias_20w:
1172 do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate,20
1173
1174 idtlbt pte,prot
1175
1176 insert_nops NUM_PIPELINE_INSNS - 1
1177 rfir
1178 nop
1179
1180#else
1181
1182dtlb_miss_11:
1183 get_pgd spc,ptp
1184
1185 space_check spc,t0,dtlb_fault
1186
1187 L2_ptep ptp,pte,t0,va,dtlb_check_alias_11
1188
1189 ptl_lock spc,ptp,pte,t0,t1,dtlb_check_alias_11
1190 update_accessed ptp,pte,t0,t1
1191
1192 make_insert_tlb_11 spc,pte,prot
1193
1194 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
1195 mtsp spc,%sr1
1196
1197 idtlba pte,(%sr1,va)
1198 idtlbp prot,(%sr1,va)
1199
1200 mtsp t1, %sr1 /* Restore sr1 */
1201
1202 ptl_unlock spc,t0,t1
1203 rfir
1204 nop
1205
1206dtlb_check_alias_11:
1207 do_alias spc,t0,t1,va,pte,prot,dtlb_fault,11
1208
1209 idtlba pte,(va)
1210 idtlbp prot,(va)
1211
1212 insert_nops NUM_PIPELINE_INSNS - 1
1213 rfir
1214 nop
1215
1216nadtlb_miss_11:
1217 get_pgd spc,ptp
1218
1219 space_check spc,t0,nadtlb_fault
1220
1221 L2_ptep ptp,pte,t0,va,nadtlb_check_alias_11
1222
1223 ptl_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_11
1224 update_accessed ptp,pte,t0,t1
1225
1226 make_insert_tlb_11 spc,pte,prot
1227
1228 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
1229 mtsp spc,%sr1
1230
1231 idtlba pte,(%sr1,va)
1232 idtlbp prot,(%sr1,va)
1233
1234 mtsp t1, %sr1 /* Restore sr1 */
1235
1236 ptl_unlock spc,t0,t1
1237 rfir
1238 nop
1239
1240nadtlb_check_alias_11:
1241 do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate,11
1242
1243 idtlba pte,(va)
1244 idtlbp prot,(va)
1245
1246 insert_nops NUM_PIPELINE_INSNS - 1
1247 rfir
1248 nop
1249
1250dtlb_miss_20:
1251 space_adjust spc,va,t0
1252 get_pgd spc,ptp
1253 space_check spc,t0,dtlb_fault
1254
1255 L2_ptep ptp,pte,t0,va,dtlb_check_alias_20
1256
1257 ptl_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20
1258 update_accessed ptp,pte,t0,t1
1259
1260 make_insert_tlb spc,pte,prot,t1
1261
1262 f_extend pte,t1
1263
1264 idtlbt pte,prot
1265
1266 ptl_unlock spc,t0,t1
1267 rfir
1268 nop
1269
1270dtlb_check_alias_20:
1271 do_alias spc,t0,t1,va,pte,prot,dtlb_fault,20
1272
1273 idtlbt pte,prot
1274
1275 insert_nops NUM_PIPELINE_INSNS - 1
1276 rfir
1277 nop
1278
1279nadtlb_miss_20:
1280 get_pgd spc,ptp
1281
1282 space_check spc,t0,nadtlb_fault
1283
1284 L2_ptep ptp,pte,t0,va,nadtlb_check_alias_20
1285
1286 ptl_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20
1287 update_accessed ptp,pte,t0,t1
1288
1289 make_insert_tlb spc,pte,prot,t1
1290
1291 f_extend pte,t1
1292
1293 idtlbt pte,prot
1294
1295 ptl_unlock spc,t0,t1
1296 rfir
1297 nop
1298
1299nadtlb_check_alias_20:
1300 do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate,20
1301
1302 idtlbt pte,prot
1303
1304 insert_nops NUM_PIPELINE_INSNS - 1
1305 rfir
1306 nop
1307
1308#endif
1309
1310nadtlb_emulate:
1311
1312 /*
1313 * Non-access misses can be caused by fdc,fic,pdc,lpa,probe and
1314 * probei instructions. The kernel no longer faults doing flushes.
1315 * Use of lpa and probe instructions is rare. Given the issue
1316 * with shadow registers, we defer everything to the "slow" path.
1317 */
1318 b,n nadtlb_fault
1319
1320#ifdef CONFIG_64BIT
1321itlb_miss_20w:
1322
1323 /*
1324 * I miss is a little different, since we allow users to fault
1325 * on the gateway page which is in the kernel address space.
1326 */
1327
1328 space_adjust spc,va,t0
1329 get_pgd spc,ptp
1330 space_check spc,t0,itlb_fault
1331
1332 L3_ptep ptp,pte,t0,va,itlb_fault
1333
1334 ptl_lock spc,ptp,pte,t0,t1,itlb_fault
1335 update_accessed ptp,pte,t0,t1
1336
1337 make_insert_tlb spc,pte,prot,t1
1338
1339 iitlbt pte,prot
1340
1341 ptl_unlock spc,t0,t1
1342 rfir
1343 nop
1344
1345naitlb_miss_20w:
1346
1347 /*
1348 * I miss is a little different, since we allow users to fault
1349 * on the gateway page which is in the kernel address space.
1350 */
1351
1352 space_adjust spc,va,t0
1353 get_pgd spc,ptp
1354 space_check spc,t0,naitlb_fault
1355
1356 L3_ptep ptp,pte,t0,va,naitlb_check_alias_20w
1357
1358 ptl_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20w
1359 update_accessed ptp,pte,t0,t1
1360
1361 make_insert_tlb spc,pte,prot,t1
1362
1363 iitlbt pte,prot
1364
1365 ptl_unlock spc,t0,t1
1366 rfir
1367 nop
1368
1369naitlb_check_alias_20w:
1370 do_alias spc,t0,t1,va,pte,prot,naitlb_fault,20
1371
1372 iitlbt pte,prot
1373
1374 insert_nops NUM_PIPELINE_INSNS - 1
1375 rfir
1376 nop
1377
1378#else
1379
1380itlb_miss_11:
1381 get_pgd spc,ptp
1382
1383 space_check spc,t0,itlb_fault
1384
1385 L2_ptep ptp,pte,t0,va,itlb_fault
1386
1387 ptl_lock spc,ptp,pte,t0,t1,itlb_fault
1388 update_accessed ptp,pte,t0,t1
1389
1390 make_insert_tlb_11 spc,pte,prot
1391
1392 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
1393 mtsp spc,%sr1
1394
1395 iitlba pte,(%sr1,va)
1396 iitlbp prot,(%sr1,va)
1397
1398 mtsp t1, %sr1 /* Restore sr1 */
1399
1400 ptl_unlock spc,t0,t1
1401 rfir
1402 nop
1403
1404naitlb_miss_11:
1405 get_pgd spc,ptp
1406
1407 space_check spc,t0,naitlb_fault
1408
1409 L2_ptep ptp,pte,t0,va,naitlb_check_alias_11
1410
1411 ptl_lock spc,ptp,pte,t0,t1,naitlb_check_alias_11
1412 update_accessed ptp,pte,t0,t1
1413
1414 make_insert_tlb_11 spc,pte,prot
1415
1416 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
1417 mtsp spc,%sr1
1418
1419 iitlba pte,(%sr1,va)
1420 iitlbp prot,(%sr1,va)
1421
1422 mtsp t1, %sr1 /* Restore sr1 */
1423
1424 ptl_unlock spc,t0,t1
1425 rfir
1426 nop
1427
1428naitlb_check_alias_11:
1429 do_alias spc,t0,t1,va,pte,prot,itlb_fault,11
1430
1431 iitlba pte,(%sr0, va)
1432 iitlbp prot,(%sr0, va)
1433
1434 insert_nops NUM_PIPELINE_INSNS - 1
1435 rfir
1436 nop
1437
1438
1439itlb_miss_20:
1440 get_pgd spc,ptp
1441
1442 space_check spc,t0,itlb_fault
1443
1444 L2_ptep ptp,pte,t0,va,itlb_fault
1445
1446 ptl_lock spc,ptp,pte,t0,t1,itlb_fault
1447 update_accessed ptp,pte,t0,t1
1448
1449 make_insert_tlb spc,pte,prot,t1
1450
1451 f_extend pte,t1
1452
1453 iitlbt pte,prot
1454
1455 ptl_unlock spc,t0,t1
1456 rfir
1457 nop
1458
1459naitlb_miss_20:
1460 get_pgd spc,ptp
1461
1462 space_check spc,t0,naitlb_fault
1463
1464 L2_ptep ptp,pte,t0,va,naitlb_check_alias_20
1465
1466 ptl_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20
1467 update_accessed ptp,pte,t0,t1
1468
1469 make_insert_tlb spc,pte,prot,t1
1470
1471 f_extend pte,t1
1472
1473 iitlbt pte,prot
1474
1475 ptl_unlock spc,t0,t1
1476 rfir
1477 nop
1478
1479naitlb_check_alias_20:
1480 do_alias spc,t0,t1,va,pte,prot,naitlb_fault,20
1481
1482 iitlbt pte,prot
1483
1484 insert_nops NUM_PIPELINE_INSNS - 1
1485 rfir
1486 nop
1487
1488#endif
1489
1490#ifdef CONFIG_64BIT
1491
1492dbit_trap_20w:
1493 space_adjust spc,va,t0
1494 get_pgd spc,ptp
1495 space_check spc,t0,dbit_fault
1496
1497 L3_ptep ptp,pte,t0,va,dbit_fault
1498
1499 ptl_lock spc,ptp,pte,t0,t1,dbit_fault
1500 update_dirty ptp,pte,t1
1501
1502 make_insert_tlb spc,pte,prot,t1
1503
1504 idtlbt pte,prot
1505
1506 ptl_unlock spc,t0,t1
1507 rfir
1508 nop
1509#else
1510
1511dbit_trap_11:
1512
1513 get_pgd spc,ptp
1514
1515 space_check spc,t0,dbit_fault
1516
1517 L2_ptep ptp,pte,t0,va,dbit_fault
1518
1519 ptl_lock spc,ptp,pte,t0,t1,dbit_fault
1520 update_dirty ptp,pte,t1
1521
1522 make_insert_tlb_11 spc,pte,prot
1523
1524 mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
1525 mtsp spc,%sr1
1526
1527 idtlba pte,(%sr1,va)
1528 idtlbp prot,(%sr1,va)
1529
1530 mtsp t1, %sr1 /* Restore sr1 */
1531
1532 ptl_unlock spc,t0,t1
1533 rfir
1534 nop
1535
1536dbit_trap_20:
1537 get_pgd spc,ptp
1538
1539 space_check spc,t0,dbit_fault
1540
1541 L2_ptep ptp,pte,t0,va,dbit_fault
1542
1543 ptl_lock spc,ptp,pte,t0,t1,dbit_fault
1544 update_dirty ptp,pte,t1
1545
1546 make_insert_tlb spc,pte,prot,t1
1547
1548 f_extend pte,t1
1549
1550 idtlbt pte,prot
1551
1552 ptl_unlock spc,t0,t1
1553 rfir
1554 nop
1555#endif
1556
1557 .import handle_interruption,code
1558
1559kernel_bad_space:
1560 b intr_save
1561 ldi 31,%r8 /* Use an unused code */
1562
1563dbit_fault:
1564 b intr_save
1565 ldi 20,%r8
1566
1567itlb_fault:
1568 b intr_save
1569 ldi PARISC_ITLB_TRAP,%r8
1570
1571nadtlb_fault:
1572 b intr_save
1573 ldi 17,%r8
1574
1575naitlb_fault:
1576 b intr_save
1577 ldi 16,%r8
1578
1579dtlb_fault:
1580 b intr_save
1581 ldi 15,%r8
1582
1583 /* Register saving semantics for system calls:
1584
1585 %r1 clobbered by system call macro in userspace
1586 %r2 saved in PT_REGS by gateway page
1587 %r3 - %r18 preserved by C code (saved by signal code)
1588 %r19 - %r20 saved in PT_REGS by gateway page
1589 %r21 - %r22 non-standard syscall args
1590 stored in kernel stack by gateway page
1591 %r23 - %r26 arg3-arg0, saved in PT_REGS by gateway page
1592 %r27 - %r30 saved in PT_REGS by gateway page
1593 %r31 syscall return pointer
1594 */
1595
1596 /* Floating point registers (FIXME: what do we do with these?)
1597
1598 %fr0 - %fr3 status/exception, not preserved
1599 %fr4 - %fr7 arguments
1600 %fr8 - %fr11 not preserved by C code
1601 %fr12 - %fr21 preserved by C code
1602 %fr22 - %fr31 not preserved by C code
1603 */
1604
1605 .macro reg_save regs
1606 STREG %r3, PT_GR3(\regs)
1607 STREG %r4, PT_GR4(\regs)
1608 STREG %r5, PT_GR5(\regs)
1609 STREG %r6, PT_GR6(\regs)
1610 STREG %r7, PT_GR7(\regs)
1611 STREG %r8, PT_GR8(\regs)
1612 STREG %r9, PT_GR9(\regs)
1613 STREG %r10,PT_GR10(\regs)
1614 STREG %r11,PT_GR11(\regs)
1615 STREG %r12,PT_GR12(\regs)
1616 STREG %r13,PT_GR13(\regs)
1617 STREG %r14,PT_GR14(\regs)
1618 STREG %r15,PT_GR15(\regs)
1619 STREG %r16,PT_GR16(\regs)
1620 STREG %r17,PT_GR17(\regs)
1621 STREG %r18,PT_GR18(\regs)
1622 .endm
1623
1624 .macro reg_restore regs
1625 LDREG PT_GR3(\regs), %r3
1626 LDREG PT_GR4(\regs), %r4
1627 LDREG PT_GR5(\regs), %r5
1628 LDREG PT_GR6(\regs), %r6
1629 LDREG PT_GR7(\regs), %r7
1630 LDREG PT_GR8(\regs), %r8
1631 LDREG PT_GR9(\regs), %r9
1632 LDREG PT_GR10(\regs),%r10
1633 LDREG PT_GR11(\regs),%r11
1634 LDREG PT_GR12(\regs),%r12
1635 LDREG PT_GR13(\regs),%r13
1636 LDREG PT_GR14(\regs),%r14
1637 LDREG PT_GR15(\regs),%r15
1638 LDREG PT_GR16(\regs),%r16
1639 LDREG PT_GR17(\regs),%r17
1640 LDREG PT_GR18(\regs),%r18
1641 .endm
1642
1643 .macro fork_like name
1644ENTRY_CFI(sys_\name\()_wrapper)
1645 mfctl %cr30,%r1
1646 ldo TASK_REGS(%r1),%r1
1647 reg_save %r1
1648 mfctl %cr27, %r28
1649 ldil L%sys_\name, %r31
1650 be R%sys_\name(%sr4,%r31)
1651 STREG %r28, PT_CR27(%r1)
1652ENDPROC_CFI(sys_\name\()_wrapper)
1653 .endm
1654
1655fork_like clone
1656fork_like clone3
1657fork_like fork
1658fork_like vfork
1659
1660 /* Set the return value for the child */
1661ENTRY(child_return)
1662 BL schedule_tail, %r2
1663 nop
1664finish_child_return:
1665 mfctl %cr30,%r1
1666 ldo TASK_REGS(%r1),%r1 /* get pt regs */
1667
1668 LDREG PT_CR27(%r1), %r3
1669 mtctl %r3, %cr27
1670 reg_restore %r1
1671 b syscall_exit
1672 copy %r0,%r28
1673END(child_return)
1674
1675ENTRY_CFI(sys_rt_sigreturn_wrapper)
1676 mfctl %cr30,%r26
1677 ldo TASK_REGS(%r26),%r26 /* get pt regs */
1678 /* Don't save regs, we are going to restore them from sigcontext. */
1679 STREG %r2, -RP_OFFSET(%r30)
1680#ifdef CONFIG_64BIT
1681 ldo FRAME_SIZE(%r30), %r30
1682 BL sys_rt_sigreturn,%r2
1683 ldo -16(%r30),%r29 /* Reference param save area */
1684#else
1685 BL sys_rt_sigreturn,%r2
1686 ldo FRAME_SIZE(%r30), %r30
1687#endif
1688
1689 ldo -FRAME_SIZE(%r30), %r30
1690 LDREG -RP_OFFSET(%r30), %r2
1691
1692 /* FIXME: I think we need to restore a few more things here. */
1693 mfctl %cr30,%r1
1694 ldo TASK_REGS(%r1),%r1 /* get pt regs */
1695 reg_restore %r1
1696
1697 /* If the signal was received while the process was blocked on a
1698 * syscall, then r2 will take us to syscall_exit; otherwise r2 will
1699 * take us to syscall_exit_rfi and on to intr_return.
1700 */
1701 bv %r0(%r2)
1702 LDREG PT_GR28(%r1),%r28 /* reload original r28 for syscall_exit */
1703ENDPROC_CFI(sys_rt_sigreturn_wrapper)
1704
1705ENTRY(syscall_exit)
1706 /* NOTE: Not all syscalls exit this way. rt_sigreturn will exit
1707 * via syscall_exit_rfi if the signal was received while the process
1708 * was running.
1709 */
1710
1711 /* save return value now */
1712 mfctl %cr30, %r1
1713 STREG %r28,TASK_PT_GR28(%r1)
1714
1715 /* Seems to me that dp could be wrong here, if the syscall involved
1716 * calling a module, and nothing got round to restoring dp on return.
1717 */
1718 loadgp
1719
1720syscall_check_resched:
1721
1722 /* check for reschedule */
1723 mfctl %cr30,%r19
1724 LDREG TASK_TI_FLAGS(%r19),%r19 /* long */
1725 bb,<,n %r19, 31-TIF_NEED_RESCHED, syscall_do_resched /* forward */
1726
1727 .import do_signal,code
1728syscall_check_sig:
1729 mfctl %cr30,%r19
1730 LDREG TASK_TI_FLAGS(%r19),%r19
1731 ldi (_TIF_USER_WORK_MASK & ~_TIF_NEED_RESCHED), %r26
1732 and,COND(<>) %r19, %r26, %r0
1733 b,n syscall_restore /* skip past if we've nothing to do */
1734
1735syscall_do_signal:
1736 /* Save callee-save registers (for sigcontext).
1737 * FIXME: After this point the process structure should be
1738 * consistent with all the relevant state of the process
1739 * before the syscall. We need to verify this.
1740 */
1741 mfctl %cr30,%r1
1742 ldo TASK_REGS(%r1), %r26 /* struct pt_regs *regs */
1743 reg_save %r26
1744
1745#ifdef CONFIG_64BIT
1746 ldo -16(%r30),%r29 /* Reference param save area */
1747#endif
1748
1749 BL do_notify_resume,%r2
1750 ldi 1, %r25 /* long in_syscall = 1 */
1751
1752 mfctl %cr30,%r1
1753 ldo TASK_REGS(%r1), %r20 /* reload pt_regs */
1754 reg_restore %r20
1755
1756 b,n syscall_check_sig
1757
1758syscall_restore:
1759 mfctl %cr30,%r1
1760
1761 /* Are we being ptraced? */
1762 LDREG TASK_TI_FLAGS(%r1),%r19
1763 ldi _TIF_SINGLESTEP|_TIF_BLOCKSTEP,%r2
1764 and,COND(=) %r19,%r2,%r0
1765 b,n syscall_restore_rfi
1766
1767 ldo TASK_PT_FR31(%r1),%r19 /* reload fpregs */
1768 rest_fp %r19
1769
1770 LDREG TASK_PT_SAR(%r1),%r19 /* restore SAR */
1771 mtsar %r19
1772
1773 LDREG TASK_PT_GR2(%r1),%r2 /* restore user rp */
1774 LDREG TASK_PT_GR19(%r1),%r19
1775 LDREG TASK_PT_GR20(%r1),%r20
1776 LDREG TASK_PT_GR21(%r1),%r21
1777 LDREG TASK_PT_GR22(%r1),%r22
1778 LDREG TASK_PT_GR23(%r1),%r23
1779 LDREG TASK_PT_GR24(%r1),%r24
1780 LDREG TASK_PT_GR25(%r1),%r25
1781 LDREG TASK_PT_GR26(%r1),%r26
1782 LDREG TASK_PT_GR27(%r1),%r27 /* restore user dp */
1783 LDREG TASK_PT_GR28(%r1),%r28 /* syscall return value */
1784 LDREG TASK_PT_GR29(%r1),%r29
1785 LDREG TASK_PT_GR31(%r1),%r31 /* restore syscall rp */
1786
1787 /* NOTE: We use rsm/ssm pair to make this operation atomic */
1788 LDREG TASK_PT_GR30(%r1),%r1 /* Get user sp */
1789 rsm PSW_SM_I, %r0
1790 copy %r1,%r30 /* Restore user sp */
1791 mfsp %sr3,%r1 /* Get user space id */
1792 mtsp %r1,%sr7 /* Restore sr7 */
1793 ssm PSW_SM_I, %r0
1794
1795 /* Set sr2 to zero for userspace syscalls to work. */
1796 mtsp %r0,%sr2
1797 mtsp %r1,%sr4 /* Restore sr4 */
1798 mtsp %r1,%sr5 /* Restore sr5 */
1799 mtsp %r1,%sr6 /* Restore sr6 */
1800
1801 depi PRIV_USER,31,2,%r31 /* ensure return to user mode. */
1802
1803#ifdef CONFIG_64BIT
1804 /* decide whether to reset the wide mode bit
1805 *
1806 * For a syscall, the W bit is stored in the lowest bit
1807 * of sp. Extract it and reset W if it is zero */
1808 extrd,u,*<> %r30,63,1,%r1
1809 rsm PSW_SM_W, %r0
1810 /* now reset the lowest bit of sp if it was set */
1811 xor %r30,%r1,%r30
1812#endif
1813 be,n 0(%sr3,%r31) /* return to user space */
1814
1815 /* We have to return via an RFI, so that PSW T and R bits can be set
1816 * appropriately.
1817 * This sets up pt_regs so we can return via intr_restore, which is not
1818 * the most efficient way of doing things, but it works.
1819 */
1820syscall_restore_rfi:
1821 ldo -1(%r0),%r2 /* Set recovery cntr to -1 */
1822 mtctl %r2,%cr0 /* for immediate trap */
1823 LDREG TASK_PT_PSW(%r1),%r2 /* Get old PSW */
1824 ldi 0x0b,%r20 /* Create new PSW */
1825 depi -1,13,1,%r20 /* C, Q, D, and I bits */
1826
1827 /* The values of SINGLESTEP_BIT and BLOCKSTEP_BIT are
1828 * set in thread_info.h and converted to PA bitmap
1829 * numbers in asm-offsets.c */
1830
1831 /* if ((%r19.SINGLESTEP_BIT)) { %r20.27=1} */
1832 extru,= %r19,TIF_SINGLESTEP_PA_BIT,1,%r0
1833 depi -1,27,1,%r20 /* R bit */
1834
1835 /* if ((%r19.BLOCKSTEP_BIT)) { %r20.7=1} */
1836 extru,= %r19,TIF_BLOCKSTEP_PA_BIT,1,%r0
1837 depi -1,7,1,%r20 /* T bit */
1838
1839 STREG %r20,TASK_PT_PSW(%r1)
1840
1841 /* Always store space registers, since sr3 can be changed (e.g. fork) */
1842
1843 mfsp %sr3,%r25
1844 STREG %r25,TASK_PT_SR3(%r1)
1845 STREG %r25,TASK_PT_SR4(%r1)
1846 STREG %r25,TASK_PT_SR5(%r1)
1847 STREG %r25,TASK_PT_SR6(%r1)
1848 STREG %r25,TASK_PT_SR7(%r1)
1849 STREG %r25,TASK_PT_IASQ0(%r1)
1850 STREG %r25,TASK_PT_IASQ1(%r1)
1851
1852 /* XXX W bit??? */
1853 /* Now if old D bit is clear, it means we didn't save all registers
1854 * on syscall entry, so do that now. This only happens on TRACEME
1855 * calls, or if someone attached to us while we were on a syscall.
1856 * We could make this more efficient by not saving r3-r18, but
1857 * then we wouldn't be able to use the common intr_restore path.
1858 * It is only for traced processes anyway, so performance is not
1859 * an issue.
1860 */
1861 bb,< %r2,30,pt_regs_ok /* Branch if D set */
1862 ldo TASK_REGS(%r1),%r25
1863 reg_save %r25 /* Save r3 to r18 */
1864
1865 /* Save the current sr */
1866 mfsp %sr0,%r2
1867 STREG %r2,TASK_PT_SR0(%r1)
1868
1869 /* Save the scratch sr */
1870 mfsp %sr1,%r2
1871 STREG %r2,TASK_PT_SR1(%r1)
1872
1873 /* sr2 should be set to zero for userspace syscalls */
1874 STREG %r0,TASK_PT_SR2(%r1)
1875
1876 LDREG TASK_PT_GR31(%r1),%r2
1877 depi PRIV_USER,31,2,%r2 /* ensure return to user mode. */
1878 STREG %r2,TASK_PT_IAOQ0(%r1)
1879 ldo 4(%r2),%r2
1880 STREG %r2,TASK_PT_IAOQ1(%r1)
1881 b intr_restore
1882 copy %r25,%r16
1883
1884pt_regs_ok:
1885 LDREG TASK_PT_IAOQ0(%r1),%r2
1886 depi PRIV_USER,31,2,%r2 /* ensure return to user mode. */
1887 STREG %r2,TASK_PT_IAOQ0(%r1)
1888 LDREG TASK_PT_IAOQ1(%r1),%r2
1889 depi PRIV_USER,31,2,%r2
1890 STREG %r2,TASK_PT_IAOQ1(%r1)
1891 b intr_restore
1892 copy %r25,%r16
1893
1894syscall_do_resched:
1895 load32 syscall_check_resched,%r2 /* if resched, we start over again */
1896 load32 schedule,%r19
1897 bv %r0(%r19) /* jumps to schedule() */
1898#ifdef CONFIG_64BIT
1899 ldo -16(%r30),%r29 /* Reference param save area */
1900#else
1901 nop
1902#endif
1903END(syscall_exit)
1904
1905
1906#ifdef CONFIG_FUNCTION_TRACER
1907
1908 .import ftrace_function_trampoline,code
1909 .align L1_CACHE_BYTES
1910ENTRY_CFI(mcount, caller)
1911_mcount:
1912 .export _mcount,data
1913 /*
1914 * The 64bit mcount() function pointer needs 4 dwords, of which the
1915 * first two are free. We optimize it here and put 2 instructions for
1916 * calling mcount(), and 2 instructions for ftrace_stub(). That way we
1917 * have all on one L1 cacheline.
1918 */
1919 ldi 0, %arg3
1920 b ftrace_function_trampoline
1921 copy %r3, %arg2 /* caller original %sp */
1922ftrace_stub:
1923 .globl ftrace_stub
1924 .type ftrace_stub, @function
1925#ifdef CONFIG_64BIT
1926 bve (%rp)
1927#else
1928 bv %r0(%rp)
1929#endif
1930 nop
1931#ifdef CONFIG_64BIT
1932 .dword mcount
1933 .dword 0 /* code in head.S puts value of global gp here */
1934#endif
1935ENDPROC_CFI(mcount)
1936
1937#ifdef CONFIG_DYNAMIC_FTRACE
1938
1939#ifdef CONFIG_64BIT
1940#define FTRACE_FRAME_SIZE (2*FRAME_SIZE)
1941#else
1942#define FTRACE_FRAME_SIZE FRAME_SIZE
1943#endif
1944ENTRY_CFI(ftrace_caller, caller,frame=FTRACE_FRAME_SIZE,CALLS,SAVE_RP,SAVE_SP)
1945ftrace_caller:
1946 .global ftrace_caller
1947
1948 STREG %r3, -FTRACE_FRAME_SIZE+1*REG_SZ(%sp)
1949 ldo -FTRACE_FRAME_SIZE(%sp), %r3
1950 STREG %rp, -RP_OFFSET(%r3)
1951
1952 /* Offset 0 is already allocated for %r1 */
1953 STREG %r23, 2*REG_SZ(%r3)
1954 STREG %r24, 3*REG_SZ(%r3)
1955 STREG %r25, 4*REG_SZ(%r3)
1956 STREG %r26, 5*REG_SZ(%r3)
1957 STREG %r28, 6*REG_SZ(%r3)
1958 STREG %r29, 7*REG_SZ(%r3)
1959#ifdef CONFIG_64BIT
1960 STREG %r19, 8*REG_SZ(%r3)
1961 STREG %r20, 9*REG_SZ(%r3)
1962 STREG %r21, 10*REG_SZ(%r3)
1963 STREG %r22, 11*REG_SZ(%r3)
1964 STREG %r27, 12*REG_SZ(%r3)
1965 STREG %r31, 13*REG_SZ(%r3)
1966 loadgp
1967 ldo -16(%sp),%r29
1968#endif
1969 LDREG 0(%r3), %r25
1970 copy %rp, %r26
1971 ldo -8(%r25), %r25
1972 ldi 0, %r23 /* no pt_regs */
1973 b,l ftrace_function_trampoline, %rp
1974 copy %r3, %r24
1975
1976 LDREG -RP_OFFSET(%r3), %rp
1977 LDREG 2*REG_SZ(%r3), %r23
1978 LDREG 3*REG_SZ(%r3), %r24
1979 LDREG 4*REG_SZ(%r3), %r25
1980 LDREG 5*REG_SZ(%r3), %r26
1981 LDREG 6*REG_SZ(%r3), %r28
1982 LDREG 7*REG_SZ(%r3), %r29
1983#ifdef CONFIG_64BIT
1984 LDREG 8*REG_SZ(%r3), %r19
1985 LDREG 9*REG_SZ(%r3), %r20
1986 LDREG 10*REG_SZ(%r3), %r21
1987 LDREG 11*REG_SZ(%r3), %r22
1988 LDREG 12*REG_SZ(%r3), %r27
1989 LDREG 13*REG_SZ(%r3), %r31
1990#endif
1991 LDREG 1*REG_SZ(%r3), %r3
1992
1993 LDREGM -FTRACE_FRAME_SIZE(%sp), %r1
1994 /* Adjust return point to jump back to beginning of traced function */
1995 ldo -4(%r1), %r1
1996 bv,n (%r1)
1997
1998ENDPROC_CFI(ftrace_caller)
1999
2000#ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS
2001ENTRY_CFI(ftrace_regs_caller,caller,frame=FTRACE_FRAME_SIZE+PT_SZ_ALGN,
2002 CALLS,SAVE_RP,SAVE_SP)
2003ftrace_regs_caller:
2004 .global ftrace_regs_caller
2005
2006 ldo -FTRACE_FRAME_SIZE(%sp), %r1
2007 STREG %rp, -RP_OFFSET(%r1)
2008
2009 copy %sp, %r1
2010 ldo PT_SZ_ALGN(%sp), %sp
2011
2012 STREG %rp, PT_GR2(%r1)
2013 STREG %r3, PT_GR3(%r1)
2014 STREG %r4, PT_GR4(%r1)
2015 STREG %r5, PT_GR5(%r1)
2016 STREG %r6, PT_GR6(%r1)
2017 STREG %r7, PT_GR7(%r1)
2018 STREG %r8, PT_GR8(%r1)
2019 STREG %r9, PT_GR9(%r1)
2020 STREG %r10, PT_GR10(%r1)
2021 STREG %r11, PT_GR11(%r1)
2022 STREG %r12, PT_GR12(%r1)
2023 STREG %r13, PT_GR13(%r1)
2024 STREG %r14, PT_GR14(%r1)
2025 STREG %r15, PT_GR15(%r1)
2026 STREG %r16, PT_GR16(%r1)
2027 STREG %r17, PT_GR17(%r1)
2028 STREG %r18, PT_GR18(%r1)
2029 STREG %r19, PT_GR19(%r1)
2030 STREG %r20, PT_GR20(%r1)
2031 STREG %r21, PT_GR21(%r1)
2032 STREG %r22, PT_GR22(%r1)
2033 STREG %r23, PT_GR23(%r1)
2034 STREG %r24, PT_GR24(%r1)
2035 STREG %r25, PT_GR25(%r1)
2036 STREG %r26, PT_GR26(%r1)
2037 STREG %r27, PT_GR27(%r1)
2038 STREG %r28, PT_GR28(%r1)
2039 STREG %r29, PT_GR29(%r1)
2040 STREG %r30, PT_GR30(%r1)
2041 STREG %r31, PT_GR31(%r1)
2042 mfctl %cr11, %r26
2043 STREG %r26, PT_SAR(%r1)
2044
2045 copy %rp, %r26
2046 LDREG -FTRACE_FRAME_SIZE-PT_SZ_ALGN(%sp), %r25
2047 ldo -8(%r25), %r25
2048 ldo -FTRACE_FRAME_SIZE(%r1), %arg2
2049 b,l ftrace_function_trampoline, %rp
2050 copy %r1, %arg3 /* struct pt_regs */
2051
2052 ldo -PT_SZ_ALGN(%sp), %r1
2053
2054 LDREG PT_SAR(%r1), %rp
2055 mtctl %rp, %cr11
2056
2057 LDREG PT_GR2(%r1), %rp
2058 LDREG PT_GR3(%r1), %r3
2059 LDREG PT_GR4(%r1), %r4
2060 LDREG PT_GR5(%r1), %r5
2061 LDREG PT_GR6(%r1), %r6
2062 LDREG PT_GR7(%r1), %r7
2063 LDREG PT_GR8(%r1), %r8
2064 LDREG PT_GR9(%r1), %r9
2065 LDREG PT_GR10(%r1),%r10
2066 LDREG PT_GR11(%r1),%r11
2067 LDREG PT_GR12(%r1),%r12
2068 LDREG PT_GR13(%r1),%r13
2069 LDREG PT_GR14(%r1),%r14
2070 LDREG PT_GR15(%r1),%r15
2071 LDREG PT_GR16(%r1),%r16
2072 LDREG PT_GR17(%r1),%r17
2073 LDREG PT_GR18(%r1),%r18
2074 LDREG PT_GR19(%r1),%r19
2075 LDREG PT_GR20(%r1),%r20
2076 LDREG PT_GR21(%r1),%r21
2077 LDREG PT_GR22(%r1),%r22
2078 LDREG PT_GR23(%r1),%r23
2079 LDREG PT_GR24(%r1),%r24
2080 LDREG PT_GR25(%r1),%r25
2081 LDREG PT_GR26(%r1),%r26
2082 LDREG PT_GR27(%r1),%r27
2083 LDREG PT_GR28(%r1),%r28
2084 LDREG PT_GR29(%r1),%r29
2085 LDREG PT_GR30(%r1),%r30
2086 LDREG PT_GR31(%r1),%r31
2087
2088 ldo -PT_SZ_ALGN(%sp), %sp
2089 LDREGM -FTRACE_FRAME_SIZE(%sp), %r1
2090 /* Adjust return point to jump back to beginning of traced function */
2091 ldo -4(%r1), %r1
2092 bv,n (%r1)
2093
2094ENDPROC_CFI(ftrace_regs_caller)
2095
2096#endif
2097#endif
2098
2099#ifdef CONFIG_FUNCTION_GRAPH_TRACER
2100 .align 8
2101ENTRY_CFI(return_to_handler, caller,frame=FRAME_SIZE)
2102 .export parisc_return_to_handler,data
2103parisc_return_to_handler:
2104 copy %r3,%r1
2105 STREG %r0,-RP_OFFSET(%sp) /* store 0 as %rp */
2106 copy %sp,%r3
2107 STREGM %r1,FRAME_SIZE(%sp)
2108 STREG %ret0,8(%r3)
2109 STREG %ret1,16(%r3)
2110
2111#ifdef CONFIG_64BIT
2112 loadgp
2113#endif
2114
2115 /* call ftrace_return_to_handler(0) */
2116 .import ftrace_return_to_handler,code
2117 load32 ftrace_return_to_handler,%ret0
2118 load32 .Lftrace_ret,%r2
2119#ifdef CONFIG_64BIT
2120 ldo -16(%sp),%ret1 /* Reference param save area */
2121 bve (%ret0)
2122#else
2123 bv %r0(%ret0)
2124#endif
2125 ldi 0,%r26
2126.Lftrace_ret:
2127 copy %ret0,%rp
2128
2129 /* restore original return values */
2130 LDREG 8(%r3),%ret0
2131 LDREG 16(%r3),%ret1
2132
2133 /* return from function */
2134#ifdef CONFIG_64BIT
2135 bve (%rp)
2136#else
2137 bv %r0(%rp)
2138#endif
2139 LDREGM -FRAME_SIZE(%sp),%r3
2140ENDPROC_CFI(return_to_handler)
2141
2142#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2143
2144#endif /* CONFIG_FUNCTION_TRACER */
2145
2146#ifdef CONFIG_IRQSTACKS
2147/* void call_on_stack(unsigned long param1, void *func,
2148 unsigned long new_stack) */
2149ENTRY_CFI(call_on_stack, FRAME=2*FRAME_SIZE,CALLS,SAVE_RP,SAVE_SP)
2150ENTRY(_call_on_stack)
2151 copy %sp, %r1
2152
2153 /* Regarding the HPPA calling conventions for function pointers,
2154 we assume the PIC register is not changed across call. For
2155 CONFIG_64BIT, the argument pointer is left to point at the
2156 argument region allocated for the call to call_on_stack. */
2157
2158 /* Switch to new stack. We allocate two frames. */
2159 ldo 2*FRAME_SIZE(%arg2), %sp
2160# ifdef CONFIG_64BIT
2161 /* Save previous stack pointer and return pointer in frame marker */
2162 STREG %rp, -FRAME_SIZE-RP_OFFSET(%sp)
2163 /* Calls always use function descriptor */
2164 LDREG 16(%arg1), %arg1
2165 bve,l (%arg1), %rp
2166 STREG %r1, -FRAME_SIZE-REG_SZ(%sp)
2167 LDREG -FRAME_SIZE-RP_OFFSET(%sp), %rp
2168 bve (%rp)
2169 LDREG -FRAME_SIZE-REG_SZ(%sp), %sp
2170# else
2171 /* Save previous stack pointer and return pointer in frame marker */
2172 STREG %r1, -FRAME_SIZE-REG_SZ(%sp)
2173 STREG %rp, -FRAME_SIZE-RP_OFFSET(%sp)
2174 /* Calls use function descriptor if PLABEL bit is set */
2175 bb,>=,n %arg1, 30, 1f
2176 depwi 0,31,2, %arg1
2177 LDREG 0(%arg1), %arg1
21781:
2179 be,l 0(%sr4,%arg1), %sr0, %r31
2180 copy %r31, %rp
2181 LDREG -FRAME_SIZE-RP_OFFSET(%sp), %rp
2182 bv (%rp)
2183 LDREG -FRAME_SIZE-REG_SZ(%sp), %sp
2184# endif /* CONFIG_64BIT */
2185ENDPROC_CFI(call_on_stack)
2186#endif /* CONFIG_IRQSTACKS */
2187
2188ENTRY_CFI(get_register)
2189 /*
2190 * get_register is used by the non access tlb miss handlers to
2191 * copy the value of the general register specified in r8 into
2192 * r1. This routine can't be used for shadowed registers, since
2193 * the rfir will restore the original value. So, for the shadowed
2194 * registers we put a -1 into r1 to indicate that the register
2195 * should not be used (the register being copied could also have
2196 * a -1 in it, but that is OK, it just means that we will have
2197 * to use the slow path instead).
2198 */
2199 blr %r8,%r0
2200 nop
2201 bv %r0(%r25) /* r0 */
2202 copy %r0,%r1
2203 bv %r0(%r25) /* r1 - shadowed */
2204 ldi -1,%r1
2205 bv %r0(%r25) /* r2 */
2206 copy %r2,%r1
2207 bv %r0(%r25) /* r3 */
2208 copy %r3,%r1
2209 bv %r0(%r25) /* r4 */
2210 copy %r4,%r1
2211 bv %r0(%r25) /* r5 */
2212 copy %r5,%r1
2213 bv %r0(%r25) /* r6 */
2214 copy %r6,%r1
2215 bv %r0(%r25) /* r7 */
2216 copy %r7,%r1
2217 bv %r0(%r25) /* r8 - shadowed */
2218 ldi -1,%r1
2219 bv %r0(%r25) /* r9 - shadowed */
2220 ldi -1,%r1
2221 bv %r0(%r25) /* r10 */
2222 copy %r10,%r1
2223 bv %r0(%r25) /* r11 */
2224 copy %r11,%r1
2225 bv %r0(%r25) /* r12 */
2226 copy %r12,%r1
2227 bv %r0(%r25) /* r13 */
2228 copy %r13,%r1
2229 bv %r0(%r25) /* r14 */
2230 copy %r14,%r1
2231 bv %r0(%r25) /* r15 */
2232 copy %r15,%r1
2233 bv %r0(%r25) /* r16 - shadowed */
2234 ldi -1,%r1
2235 bv %r0(%r25) /* r17 - shadowed */
2236 ldi -1,%r1
2237 bv %r0(%r25) /* r18 */
2238 copy %r18,%r1
2239 bv %r0(%r25) /* r19 */
2240 copy %r19,%r1
2241 bv %r0(%r25) /* r20 */
2242 copy %r20,%r1
2243 bv %r0(%r25) /* r21 */
2244 copy %r21,%r1
2245 bv %r0(%r25) /* r22 */
2246 copy %r22,%r1
2247 bv %r0(%r25) /* r23 */
2248 copy %r23,%r1
2249 bv %r0(%r25) /* r24 - shadowed */
2250 ldi -1,%r1
2251 bv %r0(%r25) /* r25 - shadowed */
2252 ldi -1,%r1
2253 bv %r0(%r25) /* r26 */
2254 copy %r26,%r1
2255 bv %r0(%r25) /* r27 */
2256 copy %r27,%r1
2257 bv %r0(%r25) /* r28 */
2258 copy %r28,%r1
2259 bv %r0(%r25) /* r29 */
2260 copy %r29,%r1
2261 bv %r0(%r25) /* r30 */
2262 copy %r30,%r1
2263 bv %r0(%r25) /* r31 */
2264 copy %r31,%r1
2265ENDPROC_CFI(get_register)
2266
2267
2268ENTRY_CFI(set_register)
2269 /*
2270 * set_register is used by the non access tlb miss handlers to
2271 * copy the value of r1 into the general register specified in
2272 * r8.
2273 */
2274 blr %r8,%r0
2275 nop
2276 bv %r0(%r25) /* r0 (silly, but it is a place holder) */
2277 copy %r1,%r0
2278 bv %r0(%r25) /* r1 */
2279 copy %r1,%r1
2280 bv %r0(%r25) /* r2 */
2281 copy %r1,%r2
2282 bv %r0(%r25) /* r3 */
2283 copy %r1,%r3
2284 bv %r0(%r25) /* r4 */
2285 copy %r1,%r4
2286 bv %r0(%r25) /* r5 */
2287 copy %r1,%r5
2288 bv %r0(%r25) /* r6 */
2289 copy %r1,%r6
2290 bv %r0(%r25) /* r7 */
2291 copy %r1,%r7
2292 bv %r0(%r25) /* r8 */
2293 copy %r1,%r8
2294 bv %r0(%r25) /* r9 */
2295 copy %r1,%r9
2296 bv %r0(%r25) /* r10 */
2297 copy %r1,%r10
2298 bv %r0(%r25) /* r11 */
2299 copy %r1,%r11
2300 bv %r0(%r25) /* r12 */
2301 copy %r1,%r12
2302 bv %r0(%r25) /* r13 */
2303 copy %r1,%r13
2304 bv %r0(%r25) /* r14 */
2305 copy %r1,%r14
2306 bv %r0(%r25) /* r15 */
2307 copy %r1,%r15
2308 bv %r0(%r25) /* r16 */
2309 copy %r1,%r16
2310 bv %r0(%r25) /* r17 */
2311 copy %r1,%r17
2312 bv %r0(%r25) /* r18 */
2313 copy %r1,%r18
2314 bv %r0(%r25) /* r19 */
2315 copy %r1,%r19
2316 bv %r0(%r25) /* r20 */
2317 copy %r1,%r20
2318 bv %r0(%r25) /* r21 */
2319 copy %r1,%r21
2320 bv %r0(%r25) /* r22 */
2321 copy %r1,%r22
2322 bv %r0(%r25) /* r23 */
2323 copy %r1,%r23
2324 bv %r0(%r25) /* r24 */
2325 copy %r1,%r24
2326 bv %r0(%r25) /* r25 */
2327 copy %r1,%r25
2328 bv %r0(%r25) /* r26 */
2329 copy %r1,%r26
2330 bv %r0(%r25) /* r27 */
2331 copy %r1,%r27
2332 bv %r0(%r25) /* r28 */
2333 copy %r1,%r28
2334 bv %r0(%r25) /* r29 */
2335 copy %r1,%r29
2336 bv %r0(%r25) /* r30 */
2337 copy %r1,%r30
2338 bv %r0(%r25) /* r31 */
2339 copy %r1,%r31
2340ENDPROC_CFI(set_register)
2341