Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * arch/alpha/kernel/entry.S
4 *
5 * Kernel entry-points.
6 */
7
8#include <asm/asm-offsets.h>
9#include <asm/thread_info.h>
10#include <asm/pal.h>
11#include <asm/errno.h>
12#include <asm/unistd.h>
13
14 .text
15 .set noat
16 .cfi_sections .debug_frame
17
18.macro CFI_START_OSF_FRAME func
19 .align 4
20 .globl \func
21 .type \func,@function
22\func:
23 .cfi_startproc simple
24 .cfi_return_column 64
25 .cfi_def_cfa $sp, 48
26 .cfi_rel_offset 64, 8
27 .cfi_rel_offset $gp, 16
28 .cfi_rel_offset $16, 24
29 .cfi_rel_offset $17, 32
30 .cfi_rel_offset $18, 40
31.endm
32
33.macro CFI_END_OSF_FRAME func
34 .cfi_endproc
35 .size \func, . - \func
36.endm
37
38/*
39 * This defines the normal kernel pt-regs layout.
40 *
41 * regs 9-15 preserved by C code
42 * regs 16-18 saved by PAL-code
43 * regs 29-30 saved and set up by PAL-code
44 * JRP - Save regs 16-18 in a special area of the stack, so that
45 * the palcode-provided values are available to the signal handler.
46 */
47
48.macro SAVE_ALL
49 subq $sp, SP_OFF, $sp
50 .cfi_adjust_cfa_offset SP_OFF
51 stq $0, 0($sp)
52 stq $1, 8($sp)
53 stq $2, 16($sp)
54 stq $3, 24($sp)
55 stq $4, 32($sp)
56 stq $28, 144($sp)
57 .cfi_rel_offset $0, 0
58 .cfi_rel_offset $1, 8
59 .cfi_rel_offset $2, 16
60 .cfi_rel_offset $3, 24
61 .cfi_rel_offset $4, 32
62 .cfi_rel_offset $28, 144
63 lda $2, alpha_mv
64 stq $5, 40($sp)
65 stq $6, 48($sp)
66 stq $7, 56($sp)
67 stq $8, 64($sp)
68 stq $19, 72($sp)
69 stq $20, 80($sp)
70 stq $21, 88($sp)
71 ldq $2, HAE_CACHE($2)
72 stq $22, 96($sp)
73 stq $23, 104($sp)
74 stq $24, 112($sp)
75 stq $25, 120($sp)
76 stq $26, 128($sp)
77 stq $27, 136($sp)
78 stq $2, 152($sp)
79 stq $16, 160($sp)
80 stq $17, 168($sp)
81 stq $18, 176($sp)
82 .cfi_rel_offset $5, 40
83 .cfi_rel_offset $6, 48
84 .cfi_rel_offset $7, 56
85 .cfi_rel_offset $8, 64
86 .cfi_rel_offset $19, 72
87 .cfi_rel_offset $20, 80
88 .cfi_rel_offset $21, 88
89 .cfi_rel_offset $22, 96
90 .cfi_rel_offset $23, 104
91 .cfi_rel_offset $24, 112
92 .cfi_rel_offset $25, 120
93 .cfi_rel_offset $26, 128
94 .cfi_rel_offset $27, 136
95.endm
96
97.macro RESTORE_ALL
98 lda $19, alpha_mv
99 ldq $0, 0($sp)
100 ldq $1, 8($sp)
101 ldq $2, 16($sp)
102 ldq $3, 24($sp)
103 ldq $21, 152($sp)
104 ldq $20, HAE_CACHE($19)
105 ldq $4, 32($sp)
106 ldq $5, 40($sp)
107 ldq $6, 48($sp)
108 ldq $7, 56($sp)
109 subq $20, $21, $20
110 ldq $8, 64($sp)
111 beq $20, 99f
112 ldq $20, HAE_REG($19)
113 stq $21, HAE_CACHE($19)
114 stq $21, 0($20)
11599: ldq $19, 72($sp)
116 ldq $20, 80($sp)
117 ldq $21, 88($sp)
118 ldq $22, 96($sp)
119 ldq $23, 104($sp)
120 ldq $24, 112($sp)
121 ldq $25, 120($sp)
122 ldq $26, 128($sp)
123 ldq $27, 136($sp)
124 ldq $28, 144($sp)
125 addq $sp, SP_OFF, $sp
126 .cfi_restore $0
127 .cfi_restore $1
128 .cfi_restore $2
129 .cfi_restore $3
130 .cfi_restore $4
131 .cfi_restore $5
132 .cfi_restore $6
133 .cfi_restore $7
134 .cfi_restore $8
135 .cfi_restore $19
136 .cfi_restore $20
137 .cfi_restore $21
138 .cfi_restore $22
139 .cfi_restore $23
140 .cfi_restore $24
141 .cfi_restore $25
142 .cfi_restore $26
143 .cfi_restore $27
144 .cfi_restore $28
145 .cfi_adjust_cfa_offset -SP_OFF
146.endm
147
148.macro DO_SWITCH_STACK
149 bsr $1, do_switch_stack
150 .cfi_adjust_cfa_offset SWITCH_STACK_SIZE
151 .cfi_rel_offset $9, 0
152 .cfi_rel_offset $10, 8
153 .cfi_rel_offset $11, 16
154 .cfi_rel_offset $12, 24
155 .cfi_rel_offset $13, 32
156 .cfi_rel_offset $14, 40
157 .cfi_rel_offset $15, 48
158.endm
159
160.macro UNDO_SWITCH_STACK
161 bsr $1, undo_switch_stack
162 .cfi_restore $9
163 .cfi_restore $10
164 .cfi_restore $11
165 .cfi_restore $12
166 .cfi_restore $13
167 .cfi_restore $14
168 .cfi_restore $15
169 .cfi_adjust_cfa_offset -SWITCH_STACK_SIZE
170.endm
171
172/*
173 * Non-syscall kernel entry points.
174 */
175
176CFI_START_OSF_FRAME entInt
177 SAVE_ALL
178 lda $8, 0x3fff
179 lda $26, ret_from_sys_call
180 bic $sp, $8, $8
181 mov $sp, $19
182 jsr $31, do_entInt
183CFI_END_OSF_FRAME entInt
184
185CFI_START_OSF_FRAME entArith
186 SAVE_ALL
187 lda $8, 0x3fff
188 lda $26, ret_from_sys_call
189 bic $sp, $8, $8
190 mov $sp, $18
191 jsr $31, do_entArith
192CFI_END_OSF_FRAME entArith
193
194CFI_START_OSF_FRAME entMM
195 SAVE_ALL
196/* save $9 - $15 so the inline exception code can manipulate them. */
197 subq $sp, 64, $sp
198 .cfi_adjust_cfa_offset 64
199 stq $9, 0($sp)
200 stq $10, 8($sp)
201 stq $11, 16($sp)
202 stq $12, 24($sp)
203 stq $13, 32($sp)
204 stq $14, 40($sp)
205 stq $15, 48($sp)
206 .cfi_rel_offset $9, 0
207 .cfi_rel_offset $10, 8
208 .cfi_rel_offset $11, 16
209 .cfi_rel_offset $12, 24
210 .cfi_rel_offset $13, 32
211 .cfi_rel_offset $14, 40
212 .cfi_rel_offset $15, 48
213 addq $sp, 64, $19
214/* handle the fault */
215 lda $8, 0x3fff
216 bic $sp, $8, $8
217 jsr $26, do_page_fault
218/* reload the registers after the exception code played. */
219 ldq $9, 0($sp)
220 ldq $10, 8($sp)
221 ldq $11, 16($sp)
222 ldq $12, 24($sp)
223 ldq $13, 32($sp)
224 ldq $14, 40($sp)
225 ldq $15, 48($sp)
226 addq $sp, 64, $sp
227 .cfi_restore $9
228 .cfi_restore $10
229 .cfi_restore $11
230 .cfi_restore $12
231 .cfi_restore $13
232 .cfi_restore $14
233 .cfi_restore $15
234 .cfi_adjust_cfa_offset -64
235/* finish up the syscall as normal. */
236 br ret_from_sys_call
237CFI_END_OSF_FRAME entMM
238
239CFI_START_OSF_FRAME entIF
240 SAVE_ALL
241 lda $8, 0x3fff
242 lda $26, ret_from_sys_call
243 bic $sp, $8, $8
244 mov $sp, $17
245 jsr $31, do_entIF
246CFI_END_OSF_FRAME entIF
247
248CFI_START_OSF_FRAME entUna
249 lda $sp, -256($sp)
250 .cfi_adjust_cfa_offset 256
251 stq $0, 0($sp)
252 .cfi_rel_offset $0, 0
253 .cfi_remember_state
254 ldq $0, 256($sp) /* get PS */
255 stq $1, 8($sp)
256 stq $2, 16($sp)
257 stq $3, 24($sp)
258 and $0, 8, $0 /* user mode? */
259 stq $4, 32($sp)
260 bne $0, entUnaUser /* yup -> do user-level unaligned fault */
261 stq $5, 40($sp)
262 stq $6, 48($sp)
263 stq $7, 56($sp)
264 stq $8, 64($sp)
265 stq $9, 72($sp)
266 stq $10, 80($sp)
267 stq $11, 88($sp)
268 stq $12, 96($sp)
269 stq $13, 104($sp)
270 stq $14, 112($sp)
271 stq $15, 120($sp)
272 /* 16-18 PAL-saved */
273 stq $19, 152($sp)
274 stq $20, 160($sp)
275 stq $21, 168($sp)
276 stq $22, 176($sp)
277 stq $23, 184($sp)
278 stq $24, 192($sp)
279 stq $25, 200($sp)
280 stq $26, 208($sp)
281 stq $27, 216($sp)
282 stq $28, 224($sp)
283 mov $sp, $19
284 stq $gp, 232($sp)
285 .cfi_rel_offset $1, 1*8
286 .cfi_rel_offset $2, 2*8
287 .cfi_rel_offset $3, 3*8
288 .cfi_rel_offset $4, 4*8
289 .cfi_rel_offset $5, 5*8
290 .cfi_rel_offset $6, 6*8
291 .cfi_rel_offset $7, 7*8
292 .cfi_rel_offset $8, 8*8
293 .cfi_rel_offset $9, 9*8
294 .cfi_rel_offset $10, 10*8
295 .cfi_rel_offset $11, 11*8
296 .cfi_rel_offset $12, 12*8
297 .cfi_rel_offset $13, 13*8
298 .cfi_rel_offset $14, 14*8
299 .cfi_rel_offset $15, 15*8
300 .cfi_rel_offset $19, 19*8
301 .cfi_rel_offset $20, 20*8
302 .cfi_rel_offset $21, 21*8
303 .cfi_rel_offset $22, 22*8
304 .cfi_rel_offset $23, 23*8
305 .cfi_rel_offset $24, 24*8
306 .cfi_rel_offset $25, 25*8
307 .cfi_rel_offset $26, 26*8
308 .cfi_rel_offset $27, 27*8
309 .cfi_rel_offset $28, 28*8
310 .cfi_rel_offset $29, 29*8
311 lda $8, 0x3fff
312 stq $31, 248($sp)
313 bic $sp, $8, $8
314 jsr $26, do_entUna
315 ldq $0, 0($sp)
316 ldq $1, 8($sp)
317 ldq $2, 16($sp)
318 ldq $3, 24($sp)
319 ldq $4, 32($sp)
320 ldq $5, 40($sp)
321 ldq $6, 48($sp)
322 ldq $7, 56($sp)
323 ldq $8, 64($sp)
324 ldq $9, 72($sp)
325 ldq $10, 80($sp)
326 ldq $11, 88($sp)
327 ldq $12, 96($sp)
328 ldq $13, 104($sp)
329 ldq $14, 112($sp)
330 ldq $15, 120($sp)
331 /* 16-18 PAL-saved */
332 ldq $19, 152($sp)
333 ldq $20, 160($sp)
334 ldq $21, 168($sp)
335 ldq $22, 176($sp)
336 ldq $23, 184($sp)
337 ldq $24, 192($sp)
338 ldq $25, 200($sp)
339 ldq $26, 208($sp)
340 ldq $27, 216($sp)
341 ldq $28, 224($sp)
342 ldq $gp, 232($sp)
343 lda $sp, 256($sp)
344 .cfi_restore $1
345 .cfi_restore $2
346 .cfi_restore $3
347 .cfi_restore $4
348 .cfi_restore $5
349 .cfi_restore $6
350 .cfi_restore $7
351 .cfi_restore $8
352 .cfi_restore $9
353 .cfi_restore $10
354 .cfi_restore $11
355 .cfi_restore $12
356 .cfi_restore $13
357 .cfi_restore $14
358 .cfi_restore $15
359 .cfi_restore $19
360 .cfi_restore $20
361 .cfi_restore $21
362 .cfi_restore $22
363 .cfi_restore $23
364 .cfi_restore $24
365 .cfi_restore $25
366 .cfi_restore $26
367 .cfi_restore $27
368 .cfi_restore $28
369 .cfi_restore $29
370 .cfi_adjust_cfa_offset -256
371 call_pal PAL_rti
372
373 .align 4
374entUnaUser:
375 .cfi_restore_state
376 ldq $0, 0($sp) /* restore original $0 */
377 lda $sp, 256($sp) /* pop entUna's stack frame */
378 .cfi_restore $0
379 .cfi_adjust_cfa_offset -256
380 SAVE_ALL /* setup normal kernel stack */
381 lda $sp, -64($sp)
382 .cfi_adjust_cfa_offset 64
383 stq $9, 0($sp)
384 stq $10, 8($sp)
385 stq $11, 16($sp)
386 stq $12, 24($sp)
387 stq $13, 32($sp)
388 stq $14, 40($sp)
389 stq $15, 48($sp)
390 .cfi_rel_offset $9, 0
391 .cfi_rel_offset $10, 8
392 .cfi_rel_offset $11, 16
393 .cfi_rel_offset $12, 24
394 .cfi_rel_offset $13, 32
395 .cfi_rel_offset $14, 40
396 .cfi_rel_offset $15, 48
397 lda $8, 0x3fff
398 addq $sp, 64, $19
399 bic $sp, $8, $8
400 jsr $26, do_entUnaUser
401 ldq $9, 0($sp)
402 ldq $10, 8($sp)
403 ldq $11, 16($sp)
404 ldq $12, 24($sp)
405 ldq $13, 32($sp)
406 ldq $14, 40($sp)
407 ldq $15, 48($sp)
408 lda $sp, 64($sp)
409 .cfi_restore $9
410 .cfi_restore $10
411 .cfi_restore $11
412 .cfi_restore $12
413 .cfi_restore $13
414 .cfi_restore $14
415 .cfi_restore $15
416 .cfi_adjust_cfa_offset -64
417 br ret_from_sys_call
418CFI_END_OSF_FRAME entUna
419
420CFI_START_OSF_FRAME entDbg
421 SAVE_ALL
422 lda $8, 0x3fff
423 lda $26, ret_from_sys_call
424 bic $sp, $8, $8
425 mov $sp, $16
426 jsr $31, do_entDbg
427CFI_END_OSF_FRAME entDbg
428
429/*
430 * The system call entry point is special. Most importantly, it looks
431 * like a function call to userspace as far as clobbered registers. We
432 * do preserve the argument registers (for syscall restarts) and $26
433 * (for leaf syscall functions).
434 *
435 * So much for theory. We don't take advantage of this yet.
436 *
437 * Note that a0-a2 are not saved by PALcode as with the other entry points.
438 */
439
440 .align 4
441 .globl entSys
442 .type entSys, @function
443 .cfi_startproc simple
444 .cfi_return_column 64
445 .cfi_def_cfa $sp, 48
446 .cfi_rel_offset 64, 8
447 .cfi_rel_offset $gp, 16
448entSys:
449 SAVE_ALL
450 lda $8, 0x3fff
451 bic $sp, $8, $8
452 lda $4, NR_syscalls($31)
453 stq $16, SP_OFF+24($sp)
454 lda $5, sys_call_table
455 lda $27, sys_ni_syscall
456 cmpult $0, $4, $4
457 ldl $3, TI_FLAGS($8)
458 stq $17, SP_OFF+32($sp)
459 s8addq $0, $5, $5
460 stq $18, SP_OFF+40($sp)
461 .cfi_rel_offset $16, SP_OFF+24
462 .cfi_rel_offset $17, SP_OFF+32
463 .cfi_rel_offset $18, SP_OFF+40
464#ifdef CONFIG_AUDITSYSCALL
465 lda $6, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
466 and $3, $6, $3
467 bne $3, strace
468#else
469 blbs $3, strace /* check for SYSCALL_TRACE in disguise */
470#endif
471 beq $4, 1f
472 ldq $27, 0($5)
4731: jsr $26, ($27), sys_ni_syscall
474 ldgp $gp, 0($26)
475 blt $0, $syscall_error /* the call failed */
476$ret_success:
477 stq $0, 0($sp)
478 stq $31, 72($sp) /* a3=0 => no error */
479
480 .align 4
481 .globl ret_from_sys_call
482ret_from_sys_call:
483 cmovne $26, 0, $18 /* $18 = 0 => non-restartable */
484 ldq $0, SP_OFF($sp)
485 and $0, 8, $0
486 beq $0, ret_to_kernel
487ret_to_user:
488 /* Make sure need_resched and sigpending don't change between
489 sampling and the rti. */
490 lda $16, 7
491 call_pal PAL_swpipl
492 ldl $17, TI_FLAGS($8)
493 and $17, _TIF_WORK_MASK, $2
494 bne $2, work_pending
495restore_all:
496 ldl $2, TI_STATUS($8)
497 and $2, TS_SAVED_FP | TS_RESTORE_FP, $3
498 bne $3, restore_fpu
499restore_other:
500 .cfi_remember_state
501 RESTORE_ALL
502 call_pal PAL_rti
503
504ret_to_kernel:
505 .cfi_restore_state
506 lda $16, 7
507 call_pal PAL_swpipl
508 br restore_other
509
510 .align 3
511$syscall_error:
512 /*
513 * Some system calls (e.g., ptrace) can return arbitrary
514 * values which might normally be mistaken as error numbers.
515 * Those functions must zero $0 (v0) directly in the stack
516 * frame to indicate that a negative return value wasn't an
517 * error number..
518 */
519 ldq $18, 0($sp) /* old syscall nr (zero if success) */
520 beq $18, $ret_success
521
522 ldq $19, 72($sp) /* .. and this a3 */
523 subq $31, $0, $0 /* with error in v0 */
524 addq $31, 1, $1 /* set a3 for errno return */
525 stq $0, 0($sp)
526 mov $31, $26 /* tell "ret_from_sys_call" we can restart */
527 stq $1, 72($sp) /* a3 for return */
528 br ret_from_sys_call
529
530/*
531 * Do all cleanup when returning from all interrupts and system calls.
532 *
533 * Arguments:
534 * $8: current.
535 * $17: TI_FLAGS.
536 * $18: The old syscall number, or zero if this is not a return
537 * from a syscall that errored and is possibly restartable.
538 * $19: The old a3 value
539 */
540
541 .align 4
542 .type work_pending, @function
543work_pending:
544 and $17, _TIF_NOTIFY_RESUME | _TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL, $2
545 bne $2, $work_notifysig
546
547$work_resched:
548 /*
549 * We can get here only if we returned from syscall without SIGPENDING
550 * or got through work_notifysig already. Either case means no syscall
551 * restarts for us, so let $18 and $19 burn.
552 */
553 jsr $26, schedule
554 mov 0, $18
555 br ret_to_user
556
557$work_notifysig:
558 mov $sp, $16
559 DO_SWITCH_STACK
560 jsr $26, do_work_pending
561 UNDO_SWITCH_STACK
562 br restore_all
563
564/*
565 * PTRACE syscall handler
566 */
567
568 .align 4
569 .type strace, @function
570strace:
571 /* set up signal stack, call syscall_trace */
572 // NB: if anyone adds preemption, this block will need to be protected
573 ldl $1, TI_STATUS($8)
574 and $1, TS_SAVED_FP, $3
575 or $1, TS_SAVED_FP, $2
576 bne $3, 1f
577 stl $2, TI_STATUS($8)
578 bsr $26, __save_fpu
5791:
580 DO_SWITCH_STACK
581 jsr $26, syscall_trace_enter /* returns the syscall number */
582 UNDO_SWITCH_STACK
583
584 /* get the arguments back.. */
585 ldq $16, SP_OFF+24($sp)
586 ldq $17, SP_OFF+32($sp)
587 ldq $18, SP_OFF+40($sp)
588 ldq $19, 72($sp)
589 ldq $20, 80($sp)
590 ldq $21, 88($sp)
591
592 /* get the system call pointer.. */
593 lda $1, NR_syscalls($31)
594 lda $2, sys_call_table
595 lda $27, sys_ni_syscall
596 cmpult $0, $1, $1
597 s8addq $0, $2, $2
598 beq $1, 1f
599 ldq $27, 0($2)
6001: jsr $26, ($27), sys_gettimeofday
601ret_from_straced:
602 ldgp $gp, 0($26)
603
604 /* check return.. */
605 blt $0, $strace_error /* the call failed */
606$strace_success:
607 stq $31, 72($sp) /* a3=0 => no error */
608 stq $0, 0($sp) /* save return value */
609
610 DO_SWITCH_STACK
611 jsr $26, syscall_trace_leave
612 UNDO_SWITCH_STACK
613 br $31, ret_from_sys_call
614
615 .align 3
616$strace_error:
617 ldq $18, 0($sp) /* old syscall nr (zero if success) */
618 beq $18, $strace_success
619 ldq $19, 72($sp) /* .. and this a3 */
620
621 subq $31, $0, $0 /* with error in v0 */
622 addq $31, 1, $1 /* set a3 for errno return */
623 stq $0, 0($sp)
624 stq $1, 72($sp) /* a3 for return */
625
626 DO_SWITCH_STACK
627 mov $18, $9 /* save old syscall number */
628 mov $19, $10 /* save old a3 */
629 jsr $26, syscall_trace_leave
630 mov $9, $18
631 mov $10, $19
632 UNDO_SWITCH_STACK
633
634 mov $31, $26 /* tell "ret_from_sys_call" we can restart */
635 br ret_from_sys_call
636CFI_END_OSF_FRAME entSys
637
638/*
639 * Save and restore the switch stack -- aka the balance of the user context.
640 */
641
642 .align 4
643 .type do_switch_stack, @function
644 .cfi_startproc simple
645 .cfi_return_column 64
646 .cfi_def_cfa $sp, 0
647 .cfi_register 64, $1
648do_switch_stack:
649 lda $sp, -SWITCH_STACK_SIZE($sp)
650 .cfi_adjust_cfa_offset SWITCH_STACK_SIZE
651 stq $9, 0($sp)
652 stq $10, 8($sp)
653 stq $11, 16($sp)
654 stq $12, 24($sp)
655 stq $13, 32($sp)
656 stq $14, 40($sp)
657 stq $15, 48($sp)
658 stq $26, 56($sp)
659 ret $31, ($1), 1
660 .cfi_endproc
661 .size do_switch_stack, .-do_switch_stack
662
663 .align 4
664 .type undo_switch_stack, @function
665 .cfi_startproc simple
666 .cfi_def_cfa $sp, 0
667 .cfi_register 64, $1
668undo_switch_stack:
669 ldq $9, 0($sp)
670 ldq $10, 8($sp)
671 ldq $11, 16($sp)
672 ldq $12, 24($sp)
673 ldq $13, 32($sp)
674 ldq $14, 40($sp)
675 ldq $15, 48($sp)
676 ldq $26, 56($sp)
677 lda $sp, SWITCH_STACK_SIZE($sp)
678 ret $31, ($1), 1
679 .cfi_endproc
680 .size undo_switch_stack, .-undo_switch_stack
681
682#define FR(n) n * 8 + TI_FP($8)
683 .align 4
684 .globl __save_fpu
685 .type __save_fpu, @function
686__save_fpu:
687#define V(n) stt $f##n, FR(n)
688 V( 0); V( 1); V( 2); V( 3)
689 V( 4); V( 5); V( 6); V( 7)
690 V( 8); V( 9); V(10); V(11)
691 V(12); V(13); V(14); V(15)
692 V(16); V(17); V(18); V(19)
693 V(20); V(21); V(22); V(23)
694 V(24); V(25); V(26); V(27)
695 mf_fpcr $f0 # get fpcr
696 V(28); V(29); V(30)
697 stt $f0, FR(31) # save fpcr in slot of $f31
698 ldt $f0, FR(0) # don't let "__save_fpu" change fp state.
699 ret
700#undef V
701 .size __save_fpu, .-__save_fpu
702
703 .align 4
704restore_fpu:
705 and $3, TS_RESTORE_FP, $3
706 bic $2, TS_SAVED_FP | TS_RESTORE_FP, $2
707 beq $3, 1f
708#define V(n) ldt $f##n, FR(n)
709 ldt $f30, FR(31) # get saved fpcr
710 V( 0); V( 1); V( 2); V( 3)
711 mt_fpcr $f30 # install saved fpcr
712 V( 4); V( 5); V( 6); V( 7)
713 V( 8); V( 9); V(10); V(11)
714 V(12); V(13); V(14); V(15)
715 V(16); V(17); V(18); V(19)
716 V(20); V(21); V(22); V(23)
717 V(24); V(25); V(26); V(27)
718 V(28); V(29); V(30)
7191: stl $2, TI_STATUS($8)
720 br restore_other
721#undef V
722
723
724/*
725 * The meat of the context switch code.
726 */
727 .align 4
728 .globl alpha_switch_to
729 .type alpha_switch_to, @function
730 .cfi_startproc
731alpha_switch_to:
732 DO_SWITCH_STACK
733 ldl $1, TI_STATUS($8)
734 and $1, TS_RESTORE_FP, $3
735 bne $3, 1f
736 or $1, TS_RESTORE_FP | TS_SAVED_FP, $2
737 and $1, TS_SAVED_FP, $3
738 stl $2, TI_STATUS($8)
739 bne $3, 1f
740 bsr $26, __save_fpu
7411:
742 call_pal PAL_swpctx
743 lda $8, 0x3fff
744 UNDO_SWITCH_STACK
745 bic $sp, $8, $8
746 mov $17, $0
747 ret
748 .cfi_endproc
749 .size alpha_switch_to, .-alpha_switch_to
750
751/*
752 * New processes begin life here.
753 */
754
755 .globl ret_from_fork
756 .align 4
757 .ent ret_from_fork
758ret_from_fork:
759 lda $26, ret_to_user
760 mov $17, $16
761 jmp $31, schedule_tail
762.end ret_from_fork
763
764/*
765 * ... and new kernel threads - here
766 */
767 .align 4
768 .globl ret_from_kernel_thread
769 .ent ret_from_kernel_thread
770ret_from_kernel_thread:
771 mov $17, $16
772 jsr $26, schedule_tail
773 mov $9, $27
774 mov $10, $16
775 jsr $26, ($9)
776 br $31, ret_to_user
777.end ret_from_kernel_thread
778
779
780/*
781 * Special system calls. Most of these are special in that they either
782 * have to play switch_stack games.
783 */
784
785.macro fork_like name
786 .align 4
787 .globl alpha_\name
788 .ent alpha_\name
789alpha_\name:
790 .prologue 0
791 bsr $1, do_switch_stack
792 // NB: if anyone adds preemption, this block will need to be protected
793 ldl $1, TI_STATUS($8)
794 and $1, TS_SAVED_FP, $3
795 or $1, TS_SAVED_FP, $2
796 bne $3, 1f
797 stl $2, TI_STATUS($8)
798 bsr $26, __save_fpu
7991:
800 jsr $26, sys_\name
801 ldq $26, 56($sp)
802 lda $sp, SWITCH_STACK_SIZE($sp)
803 ret
804.end alpha_\name
805.endm
806
807fork_like fork
808fork_like vfork
809fork_like clone
810fork_like clone3
811
812.macro sigreturn_like name
813 .align 4
814 .globl sys_\name
815 .ent sys_\name
816sys_\name:
817 .prologue 0
818 lda $9, ret_from_straced
819 cmpult $26, $9, $9
820 lda $sp, -SWITCH_STACK_SIZE($sp)
821 jsr $26, do_\name
822 bne $9, 1f
823 jsr $26, syscall_trace_leave
8241: br $1, undo_switch_stack
825 br ret_from_sys_call
826.end sys_\name
827.endm
828
829sigreturn_like sigreturn
830sigreturn_like rt_sigreturn
831
832 .align 4
833 .globl alpha_syscall_zero
834 .ent alpha_syscall_zero
835alpha_syscall_zero:
836 .prologue 0
837 /* Special because it needs to do something opposite to
838 force_successful_syscall_return(). We use the saved
839 syscall number for that, zero meaning "not an error".
840 That works nicely, but for real syscall 0 we need to
841 make sure that this logics doesn't get confused.
842 Store a non-zero there - -ENOSYS we need in register
843 for our return value will do just fine.
844 */
845 lda $0, -ENOSYS
846 unop
847 stq $0, 0($sp)
848 ret
849.end alpha_syscall_zero
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * arch/alpha/kernel/entry.S
4 *
5 * Kernel entry-points.
6 */
7
8#include <asm/asm-offsets.h>
9#include <asm/thread_info.h>
10#include <asm/pal.h>
11#include <asm/errno.h>
12#include <asm/unistd.h>
13
14 .text
15 .set noat
16 .cfi_sections .debug_frame
17
18/* Stack offsets. */
19#define SP_OFF 184
20#define SWITCH_STACK_SIZE 320
21
22.macro CFI_START_OSF_FRAME func
23 .align 4
24 .globl \func
25 .type \func,@function
26\func:
27 .cfi_startproc simple
28 .cfi_return_column 64
29 .cfi_def_cfa $sp, 48
30 .cfi_rel_offset 64, 8
31 .cfi_rel_offset $gp, 16
32 .cfi_rel_offset $16, 24
33 .cfi_rel_offset $17, 32
34 .cfi_rel_offset $18, 40
35.endm
36
37.macro CFI_END_OSF_FRAME func
38 .cfi_endproc
39 .size \func, . - \func
40.endm
41
42/*
43 * This defines the normal kernel pt-regs layout.
44 *
45 * regs 9-15 preserved by C code
46 * regs 16-18 saved by PAL-code
47 * regs 29-30 saved and set up by PAL-code
48 * JRP - Save regs 16-18 in a special area of the stack, so that
49 * the palcode-provided values are available to the signal handler.
50 */
51
52.macro SAVE_ALL
53 subq $sp, SP_OFF, $sp
54 .cfi_adjust_cfa_offset SP_OFF
55 stq $0, 0($sp)
56 stq $1, 8($sp)
57 stq $2, 16($sp)
58 stq $3, 24($sp)
59 stq $4, 32($sp)
60 stq $28, 144($sp)
61 .cfi_rel_offset $0, 0
62 .cfi_rel_offset $1, 8
63 .cfi_rel_offset $2, 16
64 .cfi_rel_offset $3, 24
65 .cfi_rel_offset $4, 32
66 .cfi_rel_offset $28, 144
67 lda $2, alpha_mv
68 stq $5, 40($sp)
69 stq $6, 48($sp)
70 stq $7, 56($sp)
71 stq $8, 64($sp)
72 stq $19, 72($sp)
73 stq $20, 80($sp)
74 stq $21, 88($sp)
75 ldq $2, HAE_CACHE($2)
76 stq $22, 96($sp)
77 stq $23, 104($sp)
78 stq $24, 112($sp)
79 stq $25, 120($sp)
80 stq $26, 128($sp)
81 stq $27, 136($sp)
82 stq $2, 152($sp)
83 stq $16, 160($sp)
84 stq $17, 168($sp)
85 stq $18, 176($sp)
86 .cfi_rel_offset $5, 40
87 .cfi_rel_offset $6, 48
88 .cfi_rel_offset $7, 56
89 .cfi_rel_offset $8, 64
90 .cfi_rel_offset $19, 72
91 .cfi_rel_offset $20, 80
92 .cfi_rel_offset $21, 88
93 .cfi_rel_offset $22, 96
94 .cfi_rel_offset $23, 104
95 .cfi_rel_offset $24, 112
96 .cfi_rel_offset $25, 120
97 .cfi_rel_offset $26, 128
98 .cfi_rel_offset $27, 136
99.endm
100
101.macro RESTORE_ALL
102 lda $19, alpha_mv
103 ldq $0, 0($sp)
104 ldq $1, 8($sp)
105 ldq $2, 16($sp)
106 ldq $3, 24($sp)
107 ldq $21, 152($sp)
108 ldq $20, HAE_CACHE($19)
109 ldq $4, 32($sp)
110 ldq $5, 40($sp)
111 ldq $6, 48($sp)
112 ldq $7, 56($sp)
113 subq $20, $21, $20
114 ldq $8, 64($sp)
115 beq $20, 99f
116 ldq $20, HAE_REG($19)
117 stq $21, HAE_CACHE($19)
118 stq $21, 0($20)
11999: ldq $19, 72($sp)
120 ldq $20, 80($sp)
121 ldq $21, 88($sp)
122 ldq $22, 96($sp)
123 ldq $23, 104($sp)
124 ldq $24, 112($sp)
125 ldq $25, 120($sp)
126 ldq $26, 128($sp)
127 ldq $27, 136($sp)
128 ldq $28, 144($sp)
129 addq $sp, SP_OFF, $sp
130 .cfi_restore $0
131 .cfi_restore $1
132 .cfi_restore $2
133 .cfi_restore $3
134 .cfi_restore $4
135 .cfi_restore $5
136 .cfi_restore $6
137 .cfi_restore $7
138 .cfi_restore $8
139 .cfi_restore $19
140 .cfi_restore $20
141 .cfi_restore $21
142 .cfi_restore $22
143 .cfi_restore $23
144 .cfi_restore $24
145 .cfi_restore $25
146 .cfi_restore $26
147 .cfi_restore $27
148 .cfi_restore $28
149 .cfi_adjust_cfa_offset -SP_OFF
150.endm
151
152.macro DO_SWITCH_STACK
153 bsr $1, do_switch_stack
154 .cfi_adjust_cfa_offset SWITCH_STACK_SIZE
155 .cfi_rel_offset $9, 0
156 .cfi_rel_offset $10, 8
157 .cfi_rel_offset $11, 16
158 .cfi_rel_offset $12, 24
159 .cfi_rel_offset $13, 32
160 .cfi_rel_offset $14, 40
161 .cfi_rel_offset $15, 48
162 /* We don't really care about the FP registers for debugging. */
163.endm
164
165.macro UNDO_SWITCH_STACK
166 bsr $1, undo_switch_stack
167 .cfi_restore $9
168 .cfi_restore $10
169 .cfi_restore $11
170 .cfi_restore $12
171 .cfi_restore $13
172 .cfi_restore $14
173 .cfi_restore $15
174 .cfi_adjust_cfa_offset -SWITCH_STACK_SIZE
175.endm
176
177/*
178 * Non-syscall kernel entry points.
179 */
180
181CFI_START_OSF_FRAME entInt
182 SAVE_ALL
183 lda $8, 0x3fff
184 lda $26, ret_from_sys_call
185 bic $sp, $8, $8
186 mov $sp, $19
187 jsr $31, do_entInt
188CFI_END_OSF_FRAME entInt
189
190CFI_START_OSF_FRAME entArith
191 SAVE_ALL
192 lda $8, 0x3fff
193 lda $26, ret_from_sys_call
194 bic $sp, $8, $8
195 mov $sp, $18
196 jsr $31, do_entArith
197CFI_END_OSF_FRAME entArith
198
199CFI_START_OSF_FRAME entMM
200 SAVE_ALL
201/* save $9 - $15 so the inline exception code can manipulate them. */
202 subq $sp, 56, $sp
203 .cfi_adjust_cfa_offset 56
204 stq $9, 0($sp)
205 stq $10, 8($sp)
206 stq $11, 16($sp)
207 stq $12, 24($sp)
208 stq $13, 32($sp)
209 stq $14, 40($sp)
210 stq $15, 48($sp)
211 .cfi_rel_offset $9, 0
212 .cfi_rel_offset $10, 8
213 .cfi_rel_offset $11, 16
214 .cfi_rel_offset $12, 24
215 .cfi_rel_offset $13, 32
216 .cfi_rel_offset $14, 40
217 .cfi_rel_offset $15, 48
218 addq $sp, 56, $19
219/* handle the fault */
220 lda $8, 0x3fff
221 bic $sp, $8, $8
222 jsr $26, do_page_fault
223/* reload the registers after the exception code played. */
224 ldq $9, 0($sp)
225 ldq $10, 8($sp)
226 ldq $11, 16($sp)
227 ldq $12, 24($sp)
228 ldq $13, 32($sp)
229 ldq $14, 40($sp)
230 ldq $15, 48($sp)
231 addq $sp, 56, $sp
232 .cfi_restore $9
233 .cfi_restore $10
234 .cfi_restore $11
235 .cfi_restore $12
236 .cfi_restore $13
237 .cfi_restore $14
238 .cfi_restore $15
239 .cfi_adjust_cfa_offset -56
240/* finish up the syscall as normal. */
241 br ret_from_sys_call
242CFI_END_OSF_FRAME entMM
243
244CFI_START_OSF_FRAME entIF
245 SAVE_ALL
246 lda $8, 0x3fff
247 lda $26, ret_from_sys_call
248 bic $sp, $8, $8
249 mov $sp, $17
250 jsr $31, do_entIF
251CFI_END_OSF_FRAME entIF
252
253CFI_START_OSF_FRAME entUna
254 lda $sp, -256($sp)
255 .cfi_adjust_cfa_offset 256
256 stq $0, 0($sp)
257 .cfi_rel_offset $0, 0
258 .cfi_remember_state
259 ldq $0, 256($sp) /* get PS */
260 stq $1, 8($sp)
261 stq $2, 16($sp)
262 stq $3, 24($sp)
263 and $0, 8, $0 /* user mode? */
264 stq $4, 32($sp)
265 bne $0, entUnaUser /* yup -> do user-level unaligned fault */
266 stq $5, 40($sp)
267 stq $6, 48($sp)
268 stq $7, 56($sp)
269 stq $8, 64($sp)
270 stq $9, 72($sp)
271 stq $10, 80($sp)
272 stq $11, 88($sp)
273 stq $12, 96($sp)
274 stq $13, 104($sp)
275 stq $14, 112($sp)
276 stq $15, 120($sp)
277 /* 16-18 PAL-saved */
278 stq $19, 152($sp)
279 stq $20, 160($sp)
280 stq $21, 168($sp)
281 stq $22, 176($sp)
282 stq $23, 184($sp)
283 stq $24, 192($sp)
284 stq $25, 200($sp)
285 stq $26, 208($sp)
286 stq $27, 216($sp)
287 stq $28, 224($sp)
288 mov $sp, $19
289 stq $gp, 232($sp)
290 .cfi_rel_offset $1, 1*8
291 .cfi_rel_offset $2, 2*8
292 .cfi_rel_offset $3, 3*8
293 .cfi_rel_offset $4, 4*8
294 .cfi_rel_offset $5, 5*8
295 .cfi_rel_offset $6, 6*8
296 .cfi_rel_offset $7, 7*8
297 .cfi_rel_offset $8, 8*8
298 .cfi_rel_offset $9, 9*8
299 .cfi_rel_offset $10, 10*8
300 .cfi_rel_offset $11, 11*8
301 .cfi_rel_offset $12, 12*8
302 .cfi_rel_offset $13, 13*8
303 .cfi_rel_offset $14, 14*8
304 .cfi_rel_offset $15, 15*8
305 .cfi_rel_offset $19, 19*8
306 .cfi_rel_offset $20, 20*8
307 .cfi_rel_offset $21, 21*8
308 .cfi_rel_offset $22, 22*8
309 .cfi_rel_offset $23, 23*8
310 .cfi_rel_offset $24, 24*8
311 .cfi_rel_offset $25, 25*8
312 .cfi_rel_offset $26, 26*8
313 .cfi_rel_offset $27, 27*8
314 .cfi_rel_offset $28, 28*8
315 .cfi_rel_offset $29, 29*8
316 lda $8, 0x3fff
317 stq $31, 248($sp)
318 bic $sp, $8, $8
319 jsr $26, do_entUna
320 ldq $0, 0($sp)
321 ldq $1, 8($sp)
322 ldq $2, 16($sp)
323 ldq $3, 24($sp)
324 ldq $4, 32($sp)
325 ldq $5, 40($sp)
326 ldq $6, 48($sp)
327 ldq $7, 56($sp)
328 ldq $8, 64($sp)
329 ldq $9, 72($sp)
330 ldq $10, 80($sp)
331 ldq $11, 88($sp)
332 ldq $12, 96($sp)
333 ldq $13, 104($sp)
334 ldq $14, 112($sp)
335 ldq $15, 120($sp)
336 /* 16-18 PAL-saved */
337 ldq $19, 152($sp)
338 ldq $20, 160($sp)
339 ldq $21, 168($sp)
340 ldq $22, 176($sp)
341 ldq $23, 184($sp)
342 ldq $24, 192($sp)
343 ldq $25, 200($sp)
344 ldq $26, 208($sp)
345 ldq $27, 216($sp)
346 ldq $28, 224($sp)
347 ldq $gp, 232($sp)
348 lda $sp, 256($sp)
349 .cfi_restore $1
350 .cfi_restore $2
351 .cfi_restore $3
352 .cfi_restore $4
353 .cfi_restore $5
354 .cfi_restore $6
355 .cfi_restore $7
356 .cfi_restore $8
357 .cfi_restore $9
358 .cfi_restore $10
359 .cfi_restore $11
360 .cfi_restore $12
361 .cfi_restore $13
362 .cfi_restore $14
363 .cfi_restore $15
364 .cfi_restore $19
365 .cfi_restore $20
366 .cfi_restore $21
367 .cfi_restore $22
368 .cfi_restore $23
369 .cfi_restore $24
370 .cfi_restore $25
371 .cfi_restore $26
372 .cfi_restore $27
373 .cfi_restore $28
374 .cfi_restore $29
375 .cfi_adjust_cfa_offset -256
376 call_pal PAL_rti
377
378 .align 4
379entUnaUser:
380 .cfi_restore_state
381 ldq $0, 0($sp) /* restore original $0 */
382 lda $sp, 256($sp) /* pop entUna's stack frame */
383 .cfi_restore $0
384 .cfi_adjust_cfa_offset -256
385 SAVE_ALL /* setup normal kernel stack */
386 lda $sp, -56($sp)
387 .cfi_adjust_cfa_offset 56
388 stq $9, 0($sp)
389 stq $10, 8($sp)
390 stq $11, 16($sp)
391 stq $12, 24($sp)
392 stq $13, 32($sp)
393 stq $14, 40($sp)
394 stq $15, 48($sp)
395 .cfi_rel_offset $9, 0
396 .cfi_rel_offset $10, 8
397 .cfi_rel_offset $11, 16
398 .cfi_rel_offset $12, 24
399 .cfi_rel_offset $13, 32
400 .cfi_rel_offset $14, 40
401 .cfi_rel_offset $15, 48
402 lda $8, 0x3fff
403 addq $sp, 56, $19
404 bic $sp, $8, $8
405 jsr $26, do_entUnaUser
406 ldq $9, 0($sp)
407 ldq $10, 8($sp)
408 ldq $11, 16($sp)
409 ldq $12, 24($sp)
410 ldq $13, 32($sp)
411 ldq $14, 40($sp)
412 ldq $15, 48($sp)
413 lda $sp, 56($sp)
414 .cfi_restore $9
415 .cfi_restore $10
416 .cfi_restore $11
417 .cfi_restore $12
418 .cfi_restore $13
419 .cfi_restore $14
420 .cfi_restore $15
421 .cfi_adjust_cfa_offset -56
422 br ret_from_sys_call
423CFI_END_OSF_FRAME entUna
424
425CFI_START_OSF_FRAME entDbg
426 SAVE_ALL
427 lda $8, 0x3fff
428 lda $26, ret_from_sys_call
429 bic $sp, $8, $8
430 mov $sp, $16
431 jsr $31, do_entDbg
432CFI_END_OSF_FRAME entDbg
433
434/*
435 * The system call entry point is special. Most importantly, it looks
436 * like a function call to userspace as far as clobbered registers. We
437 * do preserve the argument registers (for syscall restarts) and $26
438 * (for leaf syscall functions).
439 *
440 * So much for theory. We don't take advantage of this yet.
441 *
442 * Note that a0-a2 are not saved by PALcode as with the other entry points.
443 */
444
445 .align 4
446 .globl entSys
447 .type entSys, @function
448 .cfi_startproc simple
449 .cfi_return_column 64
450 .cfi_def_cfa $sp, 48
451 .cfi_rel_offset 64, 8
452 .cfi_rel_offset $gp, 16
453entSys:
454 SAVE_ALL
455 lda $8, 0x3fff
456 bic $sp, $8, $8
457 lda $4, NR_SYSCALLS($31)
458 stq $16, SP_OFF+24($sp)
459 lda $5, sys_call_table
460 lda $27, sys_ni_syscall
461 cmpult $0, $4, $4
462 ldl $3, TI_FLAGS($8)
463 stq $17, SP_OFF+32($sp)
464 s8addq $0, $5, $5
465 stq $18, SP_OFF+40($sp)
466 .cfi_rel_offset $16, SP_OFF+24
467 .cfi_rel_offset $17, SP_OFF+32
468 .cfi_rel_offset $18, SP_OFF+40
469#ifdef CONFIG_AUDITSYSCALL
470 lda $6, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
471 and $3, $6, $3
472#endif
473 bne $3, strace
474 beq $4, 1f
475 ldq $27, 0($5)
4761: jsr $26, ($27), alpha_ni_syscall
477 ldgp $gp, 0($26)
478 blt $0, $syscall_error /* the call failed */
479 stq $0, 0($sp)
480 stq $31, 72($sp) /* a3=0 => no error */
481
482 .align 4
483 .globl ret_from_sys_call
484ret_from_sys_call:
485 cmovne $26, 0, $18 /* $18 = 0 => non-restartable */
486 ldq $0, SP_OFF($sp)
487 and $0, 8, $0
488 beq $0, ret_to_kernel
489ret_to_user:
490 /* Make sure need_resched and sigpending don't change between
491 sampling and the rti. */
492 lda $16, 7
493 call_pal PAL_swpipl
494 ldl $17, TI_FLAGS($8)
495 and $17, _TIF_WORK_MASK, $2
496 bne $2, work_pending
497restore_all:
498 .cfi_remember_state
499 RESTORE_ALL
500 call_pal PAL_rti
501
502ret_to_kernel:
503 .cfi_restore_state
504 lda $16, 7
505 call_pal PAL_swpipl
506 br restore_all
507
508 .align 3
509$syscall_error:
510 /*
511 * Some system calls (e.g., ptrace) can return arbitrary
512 * values which might normally be mistaken as error numbers.
513 * Those functions must zero $0 (v0) directly in the stack
514 * frame to indicate that a negative return value wasn't an
515 * error number..
516 */
517 ldq $18, 0($sp) /* old syscall nr (zero if success) */
518 beq $18, $ret_success
519
520 ldq $19, 72($sp) /* .. and this a3 */
521 subq $31, $0, $0 /* with error in v0 */
522 addq $31, 1, $1 /* set a3 for errno return */
523 stq $0, 0($sp)
524 mov $31, $26 /* tell "ret_from_sys_call" we can restart */
525 stq $1, 72($sp) /* a3 for return */
526 br ret_from_sys_call
527
528$ret_success:
529 stq $0, 0($sp)
530 stq $31, 72($sp) /* a3=0 => no error */
531 br ret_from_sys_call
532
533/*
534 * Do all cleanup when returning from all interrupts and system calls.
535 *
536 * Arguments:
537 * $8: current.
538 * $17: TI_FLAGS.
539 * $18: The old syscall number, or zero if this is not a return
540 * from a syscall that errored and is possibly restartable.
541 * $19: The old a3 value
542 */
543
544 .align 4
545 .type work_pending, @function
546work_pending:
547 and $17, _TIF_NOTIFY_RESUME | _TIF_SIGPENDING, $2
548 bne $2, $work_notifysig
549
550$work_resched:
551 /*
552 * We can get here only if we returned from syscall without SIGPENDING
553 * or got through work_notifysig already. Either case means no syscall
554 * restarts for us, so let $18 and $19 burn.
555 */
556 jsr $26, schedule
557 mov 0, $18
558 br ret_to_user
559
560$work_notifysig:
561 mov $sp, $16
562 DO_SWITCH_STACK
563 jsr $26, do_work_pending
564 UNDO_SWITCH_STACK
565 br restore_all
566
567/*
568 * PTRACE syscall handler
569 */
570
571 .align 4
572 .type strace, @function
573strace:
574 /* set up signal stack, call syscall_trace */
575 DO_SWITCH_STACK
576 jsr $26, syscall_trace_enter /* returns the syscall number */
577 UNDO_SWITCH_STACK
578
579 /* get the arguments back.. */
580 ldq $16, SP_OFF+24($sp)
581 ldq $17, SP_OFF+32($sp)
582 ldq $18, SP_OFF+40($sp)
583 ldq $19, 72($sp)
584 ldq $20, 80($sp)
585 ldq $21, 88($sp)
586
587 /* get the system call pointer.. */
588 lda $1, NR_SYSCALLS($31)
589 lda $2, sys_call_table
590 lda $27, alpha_ni_syscall
591 cmpult $0, $1, $1
592 s8addq $0, $2, $2
593 beq $1, 1f
594 ldq $27, 0($2)
5951: jsr $26, ($27), sys_gettimeofday
596ret_from_straced:
597 ldgp $gp, 0($26)
598
599 /* check return.. */
600 blt $0, $strace_error /* the call failed */
601 stq $31, 72($sp) /* a3=0 => no error */
602$strace_success:
603 stq $0, 0($sp) /* save return value */
604
605 DO_SWITCH_STACK
606 jsr $26, syscall_trace_leave
607 UNDO_SWITCH_STACK
608 br $31, ret_from_sys_call
609
610 .align 3
611$strace_error:
612 ldq $18, 0($sp) /* old syscall nr (zero if success) */
613 beq $18, $strace_success
614 ldq $19, 72($sp) /* .. and this a3 */
615
616 subq $31, $0, $0 /* with error in v0 */
617 addq $31, 1, $1 /* set a3 for errno return */
618 stq $0, 0($sp)
619 stq $1, 72($sp) /* a3 for return */
620
621 DO_SWITCH_STACK
622 mov $18, $9 /* save old syscall number */
623 mov $19, $10 /* save old a3 */
624 jsr $26, syscall_trace_leave
625 mov $9, $18
626 mov $10, $19
627 UNDO_SWITCH_STACK
628
629 mov $31, $26 /* tell "ret_from_sys_call" we can restart */
630 br ret_from_sys_call
631CFI_END_OSF_FRAME entSys
632
633/*
634 * Save and restore the switch stack -- aka the balance of the user context.
635 */
636
637 .align 4
638 .type do_switch_stack, @function
639 .cfi_startproc simple
640 .cfi_return_column 64
641 .cfi_def_cfa $sp, 0
642 .cfi_register 64, $1
643do_switch_stack:
644 lda $sp, -SWITCH_STACK_SIZE($sp)
645 .cfi_adjust_cfa_offset SWITCH_STACK_SIZE
646 stq $9, 0($sp)
647 stq $10, 8($sp)
648 stq $11, 16($sp)
649 stq $12, 24($sp)
650 stq $13, 32($sp)
651 stq $14, 40($sp)
652 stq $15, 48($sp)
653 stq $26, 56($sp)
654 stt $f0, 64($sp)
655 stt $f1, 72($sp)
656 stt $f2, 80($sp)
657 stt $f3, 88($sp)
658 stt $f4, 96($sp)
659 stt $f5, 104($sp)
660 stt $f6, 112($sp)
661 stt $f7, 120($sp)
662 stt $f8, 128($sp)
663 stt $f9, 136($sp)
664 stt $f10, 144($sp)
665 stt $f11, 152($sp)
666 stt $f12, 160($sp)
667 stt $f13, 168($sp)
668 stt $f14, 176($sp)
669 stt $f15, 184($sp)
670 stt $f16, 192($sp)
671 stt $f17, 200($sp)
672 stt $f18, 208($sp)
673 stt $f19, 216($sp)
674 stt $f20, 224($sp)
675 stt $f21, 232($sp)
676 stt $f22, 240($sp)
677 stt $f23, 248($sp)
678 stt $f24, 256($sp)
679 stt $f25, 264($sp)
680 stt $f26, 272($sp)
681 stt $f27, 280($sp)
682 mf_fpcr $f0 # get fpcr
683 stt $f28, 288($sp)
684 stt $f29, 296($sp)
685 stt $f30, 304($sp)
686 stt $f0, 312($sp) # save fpcr in slot of $f31
687 ldt $f0, 64($sp) # dont let "do_switch_stack" change fp state.
688 ret $31, ($1), 1
689 .cfi_endproc
690 .size do_switch_stack, .-do_switch_stack
691
692 .align 4
693 .type undo_switch_stack, @function
694 .cfi_startproc simple
695 .cfi_def_cfa $sp, 0
696 .cfi_register 64, $1
697undo_switch_stack:
698 ldq $9, 0($sp)
699 ldq $10, 8($sp)
700 ldq $11, 16($sp)
701 ldq $12, 24($sp)
702 ldq $13, 32($sp)
703 ldq $14, 40($sp)
704 ldq $15, 48($sp)
705 ldq $26, 56($sp)
706 ldt $f30, 312($sp) # get saved fpcr
707 ldt $f0, 64($sp)
708 ldt $f1, 72($sp)
709 ldt $f2, 80($sp)
710 ldt $f3, 88($sp)
711 mt_fpcr $f30 # install saved fpcr
712 ldt $f4, 96($sp)
713 ldt $f5, 104($sp)
714 ldt $f6, 112($sp)
715 ldt $f7, 120($sp)
716 ldt $f8, 128($sp)
717 ldt $f9, 136($sp)
718 ldt $f10, 144($sp)
719 ldt $f11, 152($sp)
720 ldt $f12, 160($sp)
721 ldt $f13, 168($sp)
722 ldt $f14, 176($sp)
723 ldt $f15, 184($sp)
724 ldt $f16, 192($sp)
725 ldt $f17, 200($sp)
726 ldt $f18, 208($sp)
727 ldt $f19, 216($sp)
728 ldt $f20, 224($sp)
729 ldt $f21, 232($sp)
730 ldt $f22, 240($sp)
731 ldt $f23, 248($sp)
732 ldt $f24, 256($sp)
733 ldt $f25, 264($sp)
734 ldt $f26, 272($sp)
735 ldt $f27, 280($sp)
736 ldt $f28, 288($sp)
737 ldt $f29, 296($sp)
738 ldt $f30, 304($sp)
739 lda $sp, SWITCH_STACK_SIZE($sp)
740 ret $31, ($1), 1
741 .cfi_endproc
742 .size undo_switch_stack, .-undo_switch_stack
743
744/*
745 * The meat of the context switch code.
746 */
747
748 .align 4
749 .globl alpha_switch_to
750 .type alpha_switch_to, @function
751 .cfi_startproc
752alpha_switch_to:
753 DO_SWITCH_STACK
754 call_pal PAL_swpctx
755 lda $8, 0x3fff
756 UNDO_SWITCH_STACK
757 bic $sp, $8, $8
758 mov $17, $0
759 ret
760 .cfi_endproc
761 .size alpha_switch_to, .-alpha_switch_to
762
763/*
764 * New processes begin life here.
765 */
766
767 .globl ret_from_fork
768 .align 4
769 .ent ret_from_fork
770ret_from_fork:
771 lda $26, ret_from_sys_call
772 mov $17, $16
773 jmp $31, schedule_tail
774.end ret_from_fork
775
776/*
777 * ... and new kernel threads - here
778 */
779 .align 4
780 .globl ret_from_kernel_thread
781 .ent ret_from_kernel_thread
782ret_from_kernel_thread:
783 mov $17, $16
784 jsr $26, schedule_tail
785 mov $9, $27
786 mov $10, $16
787 jsr $26, ($9)
788 br $31, ret_to_user
789.end ret_from_kernel_thread
790
791
792/*
793 * Special system calls. Most of these are special in that they either
794 * have to play switch_stack games or in some way use the pt_regs struct.
795 */
796
797.macro fork_like name
798 .align 4
799 .globl alpha_\name
800 .ent alpha_\name
801alpha_\name:
802 .prologue 0
803 bsr $1, do_switch_stack
804 jsr $26, sys_\name
805 ldq $26, 56($sp)
806 lda $sp, SWITCH_STACK_SIZE($sp)
807 ret
808.end alpha_\name
809.endm
810
811fork_like fork
812fork_like vfork
813fork_like clone
814
815 .align 4
816 .globl sys_sigreturn
817 .ent sys_sigreturn
818sys_sigreturn:
819 .prologue 0
820 lda $9, ret_from_straced
821 cmpult $26, $9, $9
822 lda $sp, -SWITCH_STACK_SIZE($sp)
823 jsr $26, do_sigreturn
824 bne $9, 1f
825 jsr $26, syscall_trace_leave
8261: br $1, undo_switch_stack
827 br ret_from_sys_call
828.end sys_sigreturn
829
830 .align 4
831 .globl sys_rt_sigreturn
832 .ent sys_rt_sigreturn
833sys_rt_sigreturn:
834 .prologue 0
835 lda $9, ret_from_straced
836 cmpult $26, $9, $9
837 lda $sp, -SWITCH_STACK_SIZE($sp)
838 jsr $26, do_rt_sigreturn
839 bne $9, 1f
840 jsr $26, syscall_trace_leave
8411: br $1, undo_switch_stack
842 br ret_from_sys_call
843.end sys_rt_sigreturn
844
845 .align 4
846 .globl alpha_ni_syscall
847 .ent alpha_ni_syscall
848alpha_ni_syscall:
849 .prologue 0
850 /* Special because it also implements overflow handling via
851 syscall number 0. And if you recall, zero is a special
852 trigger for "not an error". Store large non-zero there. */
853 lda $0, -ENOSYS
854 unop
855 stq $0, 0($sp)
856 ret
857.end alpha_ni_syscall