Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * arch/ia64/kernel/entry.S
4 *
5 * Kernel entry points.
6 *
7 * Copyright (C) 1998-2003, 2005 Hewlett-Packard Co
8 * David Mosberger-Tang <davidm@hpl.hp.com>
9 * Copyright (C) 1999, 2002-2003
10 * Asit Mallick <Asit.K.Mallick@intel.com>
11 * Don Dugger <Don.Dugger@intel.com>
12 * Suresh Siddha <suresh.b.siddha@intel.com>
13 * Fenghua Yu <fenghua.yu@intel.com>
14 * Copyright (C) 1999 VA Linux Systems
15 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
16 */
17/*
18 * ia64_switch_to now places correct virtual mapping in in TR2 for
19 * kernel stack. This allows us to handle interrupts without changing
20 * to physical mode.
21 *
22 * Jonathan Nicklin <nicklin@missioncriticallinux.com>
23 * Patrick O'Rourke <orourke@missioncriticallinux.com>
24 * 11/07/2000
25 */
26/*
27 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
28 * VA Linux Systems Japan K.K.
29 * pv_ops.
30 */
31/*
32 * Global (preserved) predicate usage on syscall entry/exit path:
33 *
34 * pKStk: See entry.h.
35 * pUStk: See entry.h.
36 * pSys: See entry.h.
37 * pNonSys: !pSys
38 */
39
40
41#include <linux/pgtable.h>
42#include <asm/asmmacro.h>
43#include <asm/cache.h>
44#include <asm/errno.h>
45#include <asm/kregs.h>
46#include <asm/asm-offsets.h>
47#include <asm/percpu.h>
48#include <asm/processor.h>
49#include <asm/thread_info.h>
50#include <asm/unistd.h>
51#include <asm/ftrace.h>
52#include <asm/export.h>
53
54#include "minstate.h"
55
56 /*
57 * execve() is special because in case of success, we need to
58 * setup a null register window frame.
59 */
60ENTRY(ia64_execve)
61 /*
62 * Allocate 8 input registers since ptrace() may clobber them
63 */
64 .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
65 alloc loc1=ar.pfs,8,2,3,0
66 mov loc0=rp
67 .body
68 mov out0=in0 // filename
69 ;; // stop bit between alloc and call
70 mov out1=in1 // argv
71 mov out2=in2 // envp
72 br.call.sptk.many rp=sys_execve
73.ret0:
74 cmp4.ge p6,p7=r8,r0
75 mov ar.pfs=loc1 // restore ar.pfs
76 sxt4 r8=r8 // return 64-bit result
77 ;;
78 stf.spill [sp]=f0
79 mov rp=loc0
80(p6) mov ar.pfs=r0 // clear ar.pfs on success
81(p7) br.ret.sptk.many rp
82
83 /*
84 * In theory, we'd have to zap this state only to prevent leaking of
85 * security sensitive state (e.g., if current->mm->dumpable is zero). However,
86 * this executes in less than 20 cycles even on Itanium, so it's not worth
87 * optimizing for...).
88 */
89 mov ar.unat=0; mov ar.lc=0
90 mov r4=0; mov f2=f0; mov b1=r0
91 mov r5=0; mov f3=f0; mov b2=r0
92 mov r6=0; mov f4=f0; mov b3=r0
93 mov r7=0; mov f5=f0; mov b4=r0
94 ldf.fill f12=[sp]; mov f13=f0; mov b5=r0
95 ldf.fill f14=[sp]; ldf.fill f15=[sp]; mov f16=f0
96 ldf.fill f17=[sp]; ldf.fill f18=[sp]; mov f19=f0
97 ldf.fill f20=[sp]; ldf.fill f21=[sp]; mov f22=f0
98 ldf.fill f23=[sp]; ldf.fill f24=[sp]; mov f25=f0
99 ldf.fill f26=[sp]; ldf.fill f27=[sp]; mov f28=f0
100 ldf.fill f29=[sp]; ldf.fill f30=[sp]; mov f31=f0
101 br.ret.sptk.many rp
102END(ia64_execve)
103
104/*
105 * sys_clone2(u64 flags, u64 ustack_base, u64 ustack_size, u64 parent_tidptr, u64 child_tidptr,
106 * u64 tls)
107 */
108GLOBAL_ENTRY(sys_clone2)
109 /*
110 * Allocate 8 input registers since ptrace() may clobber them
111 */
112 .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
113 alloc r16=ar.pfs,8,2,6,0
114 DO_SAVE_SWITCH_STACK
115 mov loc0=rp
116 mov loc1=r16 // save ar.pfs across ia64_clone
117 .body
118 mov out0=in0
119 mov out1=in1
120 mov out2=in2
121 mov out3=in3
122 mov out4=in4
123 mov out5=in5
124 br.call.sptk.many rp=ia64_clone
125.ret1: .restore sp
126 adds sp=IA64_SWITCH_STACK_SIZE,sp // pop the switch stack
127 mov ar.pfs=loc1
128 mov rp=loc0
129 br.ret.sptk.many rp
130END(sys_clone2)
131
132/*
133 * sys_clone(u64 flags, u64 ustack_base, u64 parent_tidptr, u64 child_tidptr, u64 tls)
134 * Deprecated. Use sys_clone2() instead.
135 */
136GLOBAL_ENTRY(sys_clone)
137 /*
138 * Allocate 8 input registers since ptrace() may clobber them
139 */
140 .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
141 alloc r16=ar.pfs,8,2,6,0
142 DO_SAVE_SWITCH_STACK
143 mov loc0=rp
144 mov loc1=r16 // save ar.pfs across ia64_clone
145 .body
146 mov out0=in0
147 mov out1=in1
148 mov out2=16 // stacksize (compensates for 16-byte scratch area)
149 mov out3=in3
150 mov out4=in4
151 mov out5=in5
152 br.call.sptk.many rp=ia64_clone
153.ret2: .restore sp
154 adds sp=IA64_SWITCH_STACK_SIZE,sp // pop the switch stack
155 mov ar.pfs=loc1
156 mov rp=loc0
157 br.ret.sptk.many rp
158END(sys_clone)
159
160/*
161 * prev_task <- ia64_switch_to(struct task_struct *next)
162 * With Ingo's new scheduler, interrupts are disabled when this routine gets
163 * called. The code starting at .map relies on this. The rest of the code
164 * doesn't care about the interrupt masking status.
165 */
166GLOBAL_ENTRY(ia64_switch_to)
167 .prologue
168 alloc r16=ar.pfs,1,0,0,0
169 DO_SAVE_SWITCH_STACK
170 .body
171
172 adds r22=IA64_TASK_THREAD_KSP_OFFSET,r13
173 movl r25=init_task
174 mov r27=IA64_KR(CURRENT_STACK)
175 adds r21=IA64_TASK_THREAD_KSP_OFFSET,in0
176 dep r20=0,in0,61,3 // physical address of "next"
177 ;;
178 st8 [r22]=sp // save kernel stack pointer of old task
179 shr.u r26=r20,IA64_GRANULE_SHIFT
180 cmp.eq p7,p6=r25,in0
181 ;;
182 /*
183 * If we've already mapped this task's page, we can skip doing it again.
184 */
185(p6) cmp.eq p7,p6=r26,r27
186(p6) br.cond.dpnt .map
187 ;;
188.done:
189 ld8 sp=[r21] // load kernel stack pointer of new task
190 MOV_TO_KR(CURRENT, in0, r8, r9) // update "current" application register
191 mov r8=r13 // return pointer to previously running task
192 mov r13=in0 // set "current" pointer
193 ;;
194 DO_LOAD_SWITCH_STACK
195
196#ifdef CONFIG_SMP
197 sync.i // ensure "fc"s done by this CPU are visible on other CPUs
198#endif
199 br.ret.sptk.many rp // boogie on out in new context
200
201.map:
202 RSM_PSR_IC(r25) // interrupts (psr.i) are already disabled here
203 movl r25=PAGE_KERNEL
204 ;;
205 srlz.d
206 or r23=r25,r20 // construct PA | page properties
207 mov r25=IA64_GRANULE_SHIFT<<2
208 ;;
209 MOV_TO_ITIR(p0, r25, r8)
210 MOV_TO_IFA(in0, r8) // VA of next task...
211 ;;
212 mov r25=IA64_TR_CURRENT_STACK
213 MOV_TO_KR(CURRENT_STACK, r26, r8, r9) // remember last page we mapped...
214 ;;
215 itr.d dtr[r25]=r23 // wire in new mapping...
216 SSM_PSR_IC_AND_SRLZ_D(r8, r9) // reenable the psr.ic bit
217 br.cond.sptk .done
218END(ia64_switch_to)
219
220/*
221 * Note that interrupts are enabled during save_switch_stack and load_switch_stack. This
222 * means that we may get an interrupt with "sp" pointing to the new kernel stack while
223 * ar.bspstore is still pointing to the old kernel backing store area. Since ar.rsc,
224 * ar.rnat, ar.bsp, and ar.bspstore are all preserved by interrupts, this is not a
225 * problem. Also, we don't need to specify unwind information for preserved registers
226 * that are not modified in save_switch_stack as the right unwind information is already
227 * specified at the call-site of save_switch_stack.
228 */
229
230/*
231 * save_switch_stack:
232 * - r16 holds ar.pfs
233 * - b7 holds address to return to
234 * - rp (b0) holds return address to save
235 */
236GLOBAL_ENTRY(save_switch_stack)
237 .prologue
238 .altrp b7
239 flushrs // flush dirty regs to backing store (must be first in insn group)
240 .save @priunat,r17
241 mov r17=ar.unat // preserve caller's
242 .body
243#ifdef CONFIG_ITANIUM
244 adds r2=16+128,sp
245 adds r3=16+64,sp
246 adds r14=SW(R4)+16,sp
247 ;;
248 st8.spill [r14]=r4,16 // spill r4
249 lfetch.fault.excl.nt1 [r3],128
250 ;;
251 lfetch.fault.excl.nt1 [r2],128
252 lfetch.fault.excl.nt1 [r3],128
253 ;;
254 lfetch.fault.excl [r2]
255 lfetch.fault.excl [r3]
256 adds r15=SW(R5)+16,sp
257#else
258 add r2=16+3*128,sp
259 add r3=16,sp
260 add r14=SW(R4)+16,sp
261 ;;
262 st8.spill [r14]=r4,SW(R6)-SW(R4) // spill r4 and prefetch offset 0x1c0
263 lfetch.fault.excl.nt1 [r3],128 // prefetch offset 0x010
264 ;;
265 lfetch.fault.excl.nt1 [r3],128 // prefetch offset 0x090
266 lfetch.fault.excl.nt1 [r2],128 // prefetch offset 0x190
267 ;;
268 lfetch.fault.excl.nt1 [r3] // prefetch offset 0x110
269 lfetch.fault.excl.nt1 [r2] // prefetch offset 0x210
270 adds r15=SW(R5)+16,sp
271#endif
272 ;;
273 st8.spill [r15]=r5,SW(R7)-SW(R5) // spill r5
274 mov.m ar.rsc=0 // put RSE in mode: enforced lazy, little endian, pl 0
275 add r2=SW(F2)+16,sp // r2 = &sw->f2
276 ;;
277 st8.spill [r14]=r6,SW(B0)-SW(R6) // spill r6
278 mov.m r18=ar.fpsr // preserve fpsr
279 add r3=SW(F3)+16,sp // r3 = &sw->f3
280 ;;
281 stf.spill [r2]=f2,32
282 mov.m r19=ar.rnat
283 mov r21=b0
284
285 stf.spill [r3]=f3,32
286 st8.spill [r15]=r7,SW(B2)-SW(R7) // spill r7
287 mov r22=b1
288 ;;
289 // since we're done with the spills, read and save ar.unat:
290 mov.m r29=ar.unat
291 mov.m r20=ar.bspstore
292 mov r23=b2
293 stf.spill [r2]=f4,32
294 stf.spill [r3]=f5,32
295 mov r24=b3
296 ;;
297 st8 [r14]=r21,SW(B1)-SW(B0) // save b0
298 st8 [r15]=r23,SW(B3)-SW(B2) // save b2
299 mov r25=b4
300 mov r26=b5
301 ;;
302 st8 [r14]=r22,SW(B4)-SW(B1) // save b1
303 st8 [r15]=r24,SW(AR_PFS)-SW(B3) // save b3
304 mov r21=ar.lc // I-unit
305 stf.spill [r2]=f12,32
306 stf.spill [r3]=f13,32
307 ;;
308 st8 [r14]=r25,SW(B5)-SW(B4) // save b4
309 st8 [r15]=r16,SW(AR_LC)-SW(AR_PFS) // save ar.pfs
310 stf.spill [r2]=f14,32
311 stf.spill [r3]=f15,32
312 ;;
313 st8 [r14]=r26 // save b5
314 st8 [r15]=r21 // save ar.lc
315 stf.spill [r2]=f16,32
316 stf.spill [r3]=f17,32
317 ;;
318 stf.spill [r2]=f18,32
319 stf.spill [r3]=f19,32
320 ;;
321 stf.spill [r2]=f20,32
322 stf.spill [r3]=f21,32
323 ;;
324 stf.spill [r2]=f22,32
325 stf.spill [r3]=f23,32
326 ;;
327 stf.spill [r2]=f24,32
328 stf.spill [r3]=f25,32
329 ;;
330 stf.spill [r2]=f26,32
331 stf.spill [r3]=f27,32
332 ;;
333 stf.spill [r2]=f28,32
334 stf.spill [r3]=f29,32
335 ;;
336 stf.spill [r2]=f30,SW(AR_UNAT)-SW(F30)
337 stf.spill [r3]=f31,SW(PR)-SW(F31)
338 add r14=SW(CALLER_UNAT)+16,sp
339 ;;
340 st8 [r2]=r29,SW(AR_RNAT)-SW(AR_UNAT) // save ar.unat
341 st8 [r14]=r17,SW(AR_FPSR)-SW(CALLER_UNAT) // save caller_unat
342 mov r21=pr
343 ;;
344 st8 [r2]=r19,SW(AR_BSPSTORE)-SW(AR_RNAT) // save ar.rnat
345 st8 [r3]=r21 // save predicate registers
346 ;;
347 st8 [r2]=r20 // save ar.bspstore
348 st8 [r14]=r18 // save fpsr
349 mov ar.rsc=3 // put RSE back into eager mode, pl 0
350 br.cond.sptk.many b7
351END(save_switch_stack)
352
353/*
354 * load_switch_stack:
355 * - "invala" MUST be done at call site (normally in DO_LOAD_SWITCH_STACK)
356 * - b7 holds address to return to
357 * - must not touch r8-r11
358 */
359GLOBAL_ENTRY(load_switch_stack)
360 .prologue
361 .altrp b7
362
363 .body
364 lfetch.fault.nt1 [sp]
365 adds r2=SW(AR_BSPSTORE)+16,sp
366 adds r3=SW(AR_UNAT)+16,sp
367 mov ar.rsc=0 // put RSE into enforced lazy mode
368 adds r14=SW(CALLER_UNAT)+16,sp
369 adds r15=SW(AR_FPSR)+16,sp
370 ;;
371 ld8 r27=[r2],(SW(B0)-SW(AR_BSPSTORE)) // bspstore
372 ld8 r29=[r3],(SW(B1)-SW(AR_UNAT)) // unat
373 ;;
374 ld8 r21=[r2],16 // restore b0
375 ld8 r22=[r3],16 // restore b1
376 ;;
377 ld8 r23=[r2],16 // restore b2
378 ld8 r24=[r3],16 // restore b3
379 ;;
380 ld8 r25=[r2],16 // restore b4
381 ld8 r26=[r3],16 // restore b5
382 ;;
383 ld8 r16=[r2],(SW(PR)-SW(AR_PFS)) // ar.pfs
384 ld8 r17=[r3],(SW(AR_RNAT)-SW(AR_LC)) // ar.lc
385 ;;
386 ld8 r28=[r2] // restore pr
387 ld8 r30=[r3] // restore rnat
388 ;;
389 ld8 r18=[r14],16 // restore caller's unat
390 ld8 r19=[r15],24 // restore fpsr
391 ;;
392 ldf.fill f2=[r14],32
393 ldf.fill f3=[r15],32
394 ;;
395 ldf.fill f4=[r14],32
396 ldf.fill f5=[r15],32
397 ;;
398 ldf.fill f12=[r14],32
399 ldf.fill f13=[r15],32
400 ;;
401 ldf.fill f14=[r14],32
402 ldf.fill f15=[r15],32
403 ;;
404 ldf.fill f16=[r14],32
405 ldf.fill f17=[r15],32
406 ;;
407 ldf.fill f18=[r14],32
408 ldf.fill f19=[r15],32
409 mov b0=r21
410 ;;
411 ldf.fill f20=[r14],32
412 ldf.fill f21=[r15],32
413 mov b1=r22
414 ;;
415 ldf.fill f22=[r14],32
416 ldf.fill f23=[r15],32
417 mov b2=r23
418 ;;
419 mov ar.bspstore=r27
420 mov ar.unat=r29 // establish unat holding the NaT bits for r4-r7
421 mov b3=r24
422 ;;
423 ldf.fill f24=[r14],32
424 ldf.fill f25=[r15],32
425 mov b4=r25
426 ;;
427 ldf.fill f26=[r14],32
428 ldf.fill f27=[r15],32
429 mov b5=r26
430 ;;
431 ldf.fill f28=[r14],32
432 ldf.fill f29=[r15],32
433 mov ar.pfs=r16
434 ;;
435 ldf.fill f30=[r14],32
436 ldf.fill f31=[r15],24
437 mov ar.lc=r17
438 ;;
439 ld8.fill r4=[r14],16
440 ld8.fill r5=[r15],16
441 mov pr=r28,-1
442 ;;
443 ld8.fill r6=[r14],16
444 ld8.fill r7=[r15],16
445
446 mov ar.unat=r18 // restore caller's unat
447 mov ar.rnat=r30 // must restore after bspstore but before rsc!
448 mov ar.fpsr=r19 // restore fpsr
449 mov ar.rsc=3 // put RSE back into eager mode, pl 0
450 br.cond.sptk.many b7
451END(load_switch_stack)
452
453 /*
454 * Invoke a system call, but do some tracing before and after the call.
455 * We MUST preserve the current register frame throughout this routine
456 * because some system calls (such as ia64_execve) directly
457 * manipulate ar.pfs.
458 */
459GLOBAL_ENTRY(ia64_trace_syscall)
460 PT_REGS_UNWIND_INFO(0)
461 /*
462 * We need to preserve the scratch registers f6-f11 in case the system
463 * call is sigreturn.
464 */
465 adds r16=PT(F6)+16,sp
466 adds r17=PT(F7)+16,sp
467 ;;
468 stf.spill [r16]=f6,32
469 stf.spill [r17]=f7,32
470 ;;
471 stf.spill [r16]=f8,32
472 stf.spill [r17]=f9,32
473 ;;
474 stf.spill [r16]=f10
475 stf.spill [r17]=f11
476 br.call.sptk.many rp=syscall_trace_enter // give parent a chance to catch syscall args
477 cmp.lt p6,p0=r8,r0 // check tracehook
478 adds r2=PT(R8)+16,sp // r2 = &pt_regs.r8
479 adds r3=PT(R10)+16,sp // r3 = &pt_regs.r10
480 mov r10=0
481(p6) br.cond.sptk strace_error // syscall failed ->
482 adds r16=PT(F6)+16,sp
483 adds r17=PT(F7)+16,sp
484 ;;
485 ldf.fill f6=[r16],32
486 ldf.fill f7=[r17],32
487 ;;
488 ldf.fill f8=[r16],32
489 ldf.fill f9=[r17],32
490 ;;
491 ldf.fill f10=[r16]
492 ldf.fill f11=[r17]
493 // the syscall number may have changed, so re-load it and re-calculate the
494 // syscall entry-point:
495 adds r15=PT(R15)+16,sp // r15 = &pt_regs.r15 (syscall #)
496 ;;
497 ld8 r15=[r15]
498 mov r3=NR_syscalls - 1
499 ;;
500 adds r15=-1024,r15
501 movl r16=sys_call_table
502 ;;
503 shladd r20=r15,3,r16 // r20 = sys_call_table + 8*(syscall-1024)
504 cmp.leu p6,p7=r15,r3
505 ;;
506(p6) ld8 r20=[r20] // load address of syscall entry point
507(p7) movl r20=sys_ni_syscall
508 ;;
509 mov b6=r20
510 br.call.sptk.many rp=b6 // do the syscall
511.strace_check_retval:
512 cmp.lt p6,p0=r8,r0 // syscall failed?
513 adds r2=PT(R8)+16,sp // r2 = &pt_regs.r8
514 adds r3=PT(R10)+16,sp // r3 = &pt_regs.r10
515 mov r10=0
516(p6) br.cond.sptk strace_error // syscall failed ->
517 ;; // avoid RAW on r10
518.strace_save_retval:
519.mem.offset 0,0; st8.spill [r2]=r8 // store return value in slot for r8
520.mem.offset 8,0; st8.spill [r3]=r10 // clear error indication in slot for r10
521 br.call.sptk.many rp=syscall_trace_leave // give parent a chance to catch return value
522.ret3:
523(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
524(pUStk) rsm psr.i // disable interrupts
525 br.cond.sptk ia64_work_pending_syscall_end
526
527strace_error:
528 ld8 r3=[r2] // load pt_regs.r8
529 sub r9=0,r8 // negate return value to get errno value
530 ;;
531 cmp.ne p6,p0=r3,r0 // is pt_regs.r8!=0?
532 adds r3=16,r2 // r3=&pt_regs.r10
533 ;;
534(p6) mov r10=-1
535(p6) mov r8=r9
536 br.cond.sptk .strace_save_retval
537END(ia64_trace_syscall)
538
539 /*
540 * When traced and returning from sigreturn, we invoke syscall_trace but then
541 * go straight to ia64_leave_kernel rather than ia64_leave_syscall.
542 */
543GLOBAL_ENTRY(ia64_strace_leave_kernel)
544 PT_REGS_UNWIND_INFO(0)
545{ /*
546 * Some versions of gas generate bad unwind info if the first instruction of a
547 * procedure doesn't go into the first slot of a bundle. This is a workaround.
548 */
549 nop.m 0
550 nop.i 0
551 br.call.sptk.many rp=syscall_trace_leave // give parent a chance to catch return value
552}
553.ret4: br.cond.sptk ia64_leave_kernel
554END(ia64_strace_leave_kernel)
555
556ENTRY(call_payload)
557 .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(0)
558 /* call the kernel_thread payload; fn is in r4, arg - in r5 */
559 alloc loc1=ar.pfs,0,3,1,0
560 mov loc0=rp
561 mov loc2=gp
562 mov out0=r5 // arg
563 ld8 r14 = [r4], 8 // fn.address
564 ;;
565 mov b6 = r14
566 ld8 gp = [r4] // fn.gp
567 ;;
568 br.call.sptk.many rp=b6 // fn(arg)
569.ret12: mov gp=loc2
570 mov rp=loc0
571 mov ar.pfs=loc1
572 /* ... and if it has returned, we are going to userland */
573 cmp.ne pKStk,pUStk=r0,r0
574 br.ret.sptk.many rp
575END(call_payload)
576
577GLOBAL_ENTRY(ia64_ret_from_clone)
578 PT_REGS_UNWIND_INFO(0)
579{ /*
580 * Some versions of gas generate bad unwind info if the first instruction of a
581 * procedure doesn't go into the first slot of a bundle. This is a workaround.
582 */
583 nop.m 0
584 nop.i 0
585 /*
586 * We need to call schedule_tail() to complete the scheduling process.
587 * Called by ia64_switch_to() after ia64_clone()->copy_thread(). r8 contains the
588 * address of the previously executing task.
589 */
590 br.call.sptk.many rp=ia64_invoke_schedule_tail
591}
592.ret8:
593(pKStk) br.call.sptk.many rp=call_payload
594 adds r2=TI_FLAGS+IA64_TASK_SIZE,r13
595 ;;
596 ld4 r2=[r2]
597 ;;
598 mov r8=0
599 and r2=_TIF_SYSCALL_TRACEAUDIT,r2
600 ;;
601 cmp.ne p6,p0=r2,r0
602(p6) br.cond.spnt .strace_check_retval
603 ;; // added stop bits to prevent r8 dependency
604END(ia64_ret_from_clone)
605 // fall through
606GLOBAL_ENTRY(ia64_ret_from_syscall)
607 PT_REGS_UNWIND_INFO(0)
608 cmp.ge p6,p7=r8,r0 // syscall executed successfully?
609 adds r2=PT(R8)+16,sp // r2 = &pt_regs.r8
610 mov r10=r0 // clear error indication in r10
611(p7) br.cond.spnt handle_syscall_error // handle potential syscall failure
612END(ia64_ret_from_syscall)
613 // fall through
614
615/*
616 * ia64_leave_syscall(): Same as ia64_leave_kernel, except that it doesn't
617 * need to switch to bank 0 and doesn't restore the scratch registers.
618 * To avoid leaking kernel bits, the scratch registers are set to
619 * the following known-to-be-safe values:
620 *
621 * r1: restored (global pointer)
622 * r2: cleared
623 * r3: 1 (when returning to user-level)
624 * r8-r11: restored (syscall return value(s))
625 * r12: restored (user-level stack pointer)
626 * r13: restored (user-level thread pointer)
627 * r14: set to __kernel_syscall_via_epc
628 * r15: restored (syscall #)
629 * r16-r17: cleared
630 * r18: user-level b6
631 * r19: cleared
632 * r20: user-level ar.fpsr
633 * r21: user-level b0
634 * r22: cleared
635 * r23: user-level ar.bspstore
636 * r24: user-level ar.rnat
637 * r25: user-level ar.unat
638 * r26: user-level ar.pfs
639 * r27: user-level ar.rsc
640 * r28: user-level ip
641 * r29: user-level psr
642 * r30: user-level cfm
643 * r31: user-level pr
644 * f6-f11: cleared
645 * pr: restored (user-level pr)
646 * b0: restored (user-level rp)
647 * b6: restored
648 * b7: set to __kernel_syscall_via_epc
649 * ar.unat: restored (user-level ar.unat)
650 * ar.pfs: restored (user-level ar.pfs)
651 * ar.rsc: restored (user-level ar.rsc)
652 * ar.rnat: restored (user-level ar.rnat)
653 * ar.bspstore: restored (user-level ar.bspstore)
654 * ar.fpsr: restored (user-level ar.fpsr)
655 * ar.ccv: cleared
656 * ar.csd: cleared
657 * ar.ssd: cleared
658 */
659GLOBAL_ENTRY(ia64_leave_syscall)
660 PT_REGS_UNWIND_INFO(0)
661 /*
662 * work.need_resched etc. mustn't get changed by this CPU before it returns to
663 * user- or fsys-mode, hence we disable interrupts early on.
664 *
665 * p6 controls whether current_thread_info()->flags needs to be check for
666 * extra work. We always check for extra work when returning to user-level.
667 * With CONFIG_PREEMPTION, we also check for extra work when the preempt_count
668 * is 0. After extra work processing has been completed, execution
669 * resumes at ia64_work_processed_syscall with p6 set to 1 if the extra-work-check
670 * needs to be redone.
671 */
672#ifdef CONFIG_PREEMPTION
673 RSM_PSR_I(p0, r2, r18) // disable interrupts
674 cmp.eq pLvSys,p0=r0,r0 // pLvSys=1: leave from syscall
675(pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
676 ;;
677 .pred.rel.mutex pUStk,pKStk
678(pKStk) ld4 r21=[r20] // r21 <- preempt_count
679(pUStk) mov r21=0 // r21 <- 0
680 ;;
681 cmp.eq p6,p0=r21,r0 // p6 <- pUStk || (preempt_count == 0)
682#else /* !CONFIG_PREEMPTION */
683 RSM_PSR_I(pUStk, r2, r18)
684 cmp.eq pLvSys,p0=r0,r0 // pLvSys=1: leave from syscall
685(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
686#endif
687.global ia64_work_processed_syscall;
688ia64_work_processed_syscall:
689#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
690 adds r2=PT(LOADRS)+16,r12
691 MOV_FROM_ITC(pUStk, p9, r22, r19) // fetch time at leave
692 adds r18=TI_FLAGS+IA64_TASK_SIZE,r13
693 ;;
694(p6) ld4 r31=[r18] // load current_thread_info()->flags
695 ld8 r19=[r2],PT(B6)-PT(LOADRS) // load ar.rsc value for "loadrs"
696 adds r3=PT(AR_BSPSTORE)+16,r12 // deferred
697 ;;
698#else
699 adds r2=PT(LOADRS)+16,r12
700 adds r3=PT(AR_BSPSTORE)+16,r12
701 adds r18=TI_FLAGS+IA64_TASK_SIZE,r13
702 ;;
703(p6) ld4 r31=[r18] // load current_thread_info()->flags
704 ld8 r19=[r2],PT(B6)-PT(LOADRS) // load ar.rsc value for "loadrs"
705 nop.i 0
706 ;;
707#endif
708 mov r16=ar.bsp // M2 get existing backing store pointer
709 ld8 r18=[r2],PT(R9)-PT(B6) // load b6
710(p6) and r15=TIF_WORK_MASK,r31 // any work other than TIF_SYSCALL_TRACE?
711 ;;
712 ld8 r23=[r3],PT(R11)-PT(AR_BSPSTORE) // load ar.bspstore (may be garbage)
713(p6) cmp4.ne.unc p6,p0=r15, r0 // any special work pending?
714(p6) br.cond.spnt .work_pending_syscall
715 ;;
716 // start restoring the state saved on the kernel stack (struct pt_regs):
717 ld8 r9=[r2],PT(CR_IPSR)-PT(R9)
718 ld8 r11=[r3],PT(CR_IIP)-PT(R11)
719(pNonSys) break 0 // bug check: we shouldn't be here if pNonSys is TRUE!
720 ;;
721 invala // M0|1 invalidate ALAT
722 RSM_PSR_I_IC(r28, r29, r30) // M2 turn off interrupts and interruption collection
723 cmp.eq p9,p0=r0,r0 // A set p9 to indicate that we should restore cr.ifs
724
725 ld8 r29=[r2],16 // M0|1 load cr.ipsr
726 ld8 r28=[r3],16 // M0|1 load cr.iip
727#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
728(pUStk) add r14=TI_AC_LEAVE+IA64_TASK_SIZE,r13
729 ;;
730 ld8 r30=[r2],16 // M0|1 load cr.ifs
731 ld8 r25=[r3],16 // M0|1 load ar.unat
732(pUStk) add r15=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13
733 ;;
734#else
735 mov r22=r0 // A clear r22
736 ;;
737 ld8 r30=[r2],16 // M0|1 load cr.ifs
738 ld8 r25=[r3],16 // M0|1 load ar.unat
739(pUStk) add r14=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13
740 ;;
741#endif
742 ld8 r26=[r2],PT(B0)-PT(AR_PFS) // M0|1 load ar.pfs
743 MOV_FROM_PSR(pKStk, r22, r21) // M2 read PSR now that interrupts are disabled
744 nop 0
745 ;;
746 ld8 r21=[r2],PT(AR_RNAT)-PT(B0) // M0|1 load b0
747 ld8 r27=[r3],PT(PR)-PT(AR_RSC) // M0|1 load ar.rsc
748 mov f6=f0 // F clear f6
749 ;;
750 ld8 r24=[r2],PT(AR_FPSR)-PT(AR_RNAT) // M0|1 load ar.rnat (may be garbage)
751 ld8 r31=[r3],PT(R1)-PT(PR) // M0|1 load predicates
752 mov f7=f0 // F clear f7
753 ;;
754 ld8 r20=[r2],PT(R12)-PT(AR_FPSR) // M0|1 load ar.fpsr
755 ld8.fill r1=[r3],16 // M0|1 load r1
756(pUStk) mov r17=1 // A
757 ;;
758#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
759(pUStk) st1 [r15]=r17 // M2|3
760#else
761(pUStk) st1 [r14]=r17 // M2|3
762#endif
763 ld8.fill r13=[r3],16 // M0|1
764 mov f8=f0 // F clear f8
765 ;;
766 ld8.fill r12=[r2] // M0|1 restore r12 (sp)
767 ld8.fill r15=[r3] // M0|1 restore r15
768 mov b6=r18 // I0 restore b6
769
770 LOAD_PHYS_STACK_REG_SIZE(r17)
771 mov f9=f0 // F clear f9
772(pKStk) br.cond.dpnt.many skip_rbs_switch // B
773
774 srlz.d // M0 ensure interruption collection is off (for cover)
775 shr.u r18=r19,16 // I0|1 get byte size of existing "dirty" partition
776 COVER // B add current frame into dirty partition & set cr.ifs
777 ;;
778#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
779 mov r19=ar.bsp // M2 get new backing store pointer
780 st8 [r14]=r22 // M save time at leave
781 mov f10=f0 // F clear f10
782
783 mov r22=r0 // A clear r22
784 movl r14=__kernel_syscall_via_epc // X
785 ;;
786#else
787 mov r19=ar.bsp // M2 get new backing store pointer
788 mov f10=f0 // F clear f10
789
790 nop.m 0
791 movl r14=__kernel_syscall_via_epc // X
792 ;;
793#endif
794 mov.m ar.csd=r0 // M2 clear ar.csd
795 mov.m ar.ccv=r0 // M2 clear ar.ccv
796 mov b7=r14 // I0 clear b7 (hint with __kernel_syscall_via_epc)
797
798 mov.m ar.ssd=r0 // M2 clear ar.ssd
799 mov f11=f0 // F clear f11
800 br.cond.sptk.many rbs_switch // B
801END(ia64_leave_syscall)
802
803GLOBAL_ENTRY(ia64_leave_kernel)
804 PT_REGS_UNWIND_INFO(0)
805 /*
806 * work.need_resched etc. mustn't get changed by this CPU before it returns to
807 * user- or fsys-mode, hence we disable interrupts early on.
808 *
809 * p6 controls whether current_thread_info()->flags needs to be check for
810 * extra work. We always check for extra work when returning to user-level.
811 * With CONFIG_PREEMPTION, we also check for extra work when the preempt_count
812 * is 0. After extra work processing has been completed, execution
813 * resumes at .work_processed_syscall with p6 set to 1 if the extra-work-check
814 * needs to be redone.
815 */
816#ifdef CONFIG_PREEMPTION
817 RSM_PSR_I(p0, r17, r31) // disable interrupts
818 cmp.eq p0,pLvSys=r0,r0 // pLvSys=0: leave from kernel
819(pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
820 ;;
821 .pred.rel.mutex pUStk,pKStk
822(pKStk) ld4 r21=[r20] // r21 <- preempt_count
823(pUStk) mov r21=0 // r21 <- 0
824 ;;
825 cmp.eq p6,p0=r21,r0 // p6 <- pUStk || (preempt_count == 0)
826#else
827 RSM_PSR_I(pUStk, r17, r31)
828 cmp.eq p0,pLvSys=r0,r0 // pLvSys=0: leave from kernel
829(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
830#endif
831.work_processed_kernel:
832 adds r17=TI_FLAGS+IA64_TASK_SIZE,r13
833 ;;
834(p6) ld4 r31=[r17] // load current_thread_info()->flags
835 adds r21=PT(PR)+16,r12
836 ;;
837
838 lfetch [r21],PT(CR_IPSR)-PT(PR)
839 adds r2=PT(B6)+16,r12
840 adds r3=PT(R16)+16,r12
841 ;;
842 lfetch [r21]
843 ld8 r28=[r2],8 // load b6
844 adds r29=PT(R24)+16,r12
845
846 ld8.fill r16=[r3],PT(AR_CSD)-PT(R16)
847 adds r30=PT(AR_CCV)+16,r12
848(p6) and r19=TIF_WORK_MASK,r31 // any work other than TIF_SYSCALL_TRACE?
849 ;;
850 ld8.fill r24=[r29]
851 ld8 r15=[r30] // load ar.ccv
852(p6) cmp4.ne.unc p6,p0=r19, r0 // any special work pending?
853 ;;
854 ld8 r29=[r2],16 // load b7
855 ld8 r30=[r3],16 // load ar.csd
856(p6) br.cond.spnt .work_pending
857 ;;
858 ld8 r31=[r2],16 // load ar.ssd
859 ld8.fill r8=[r3],16
860 ;;
861 ld8.fill r9=[r2],16
862 ld8.fill r10=[r3],PT(R17)-PT(R10)
863 ;;
864 ld8.fill r11=[r2],PT(R18)-PT(R11)
865 ld8.fill r17=[r3],16
866 ;;
867 ld8.fill r18=[r2],16
868 ld8.fill r19=[r3],16
869 ;;
870 ld8.fill r20=[r2],16
871 ld8.fill r21=[r3],16
872 mov ar.csd=r30
873 mov ar.ssd=r31
874 ;;
875 RSM_PSR_I_IC(r23, r22, r25) // initiate turning off of interrupt and interruption collection
876 invala // invalidate ALAT
877 ;;
878 ld8.fill r22=[r2],24
879 ld8.fill r23=[r3],24
880 mov b6=r28
881 ;;
882 ld8.fill r25=[r2],16
883 ld8.fill r26=[r3],16
884 mov b7=r29
885 ;;
886 ld8.fill r27=[r2],16
887 ld8.fill r28=[r3],16
888 ;;
889 ld8.fill r29=[r2],16
890 ld8.fill r30=[r3],24
891 ;;
892 ld8.fill r31=[r2],PT(F9)-PT(R31)
893 adds r3=PT(F10)-PT(F6),r3
894 ;;
895 ldf.fill f9=[r2],PT(F6)-PT(F9)
896 ldf.fill f10=[r3],PT(F8)-PT(F10)
897 ;;
898 ldf.fill f6=[r2],PT(F7)-PT(F6)
899 ;;
900 ldf.fill f7=[r2],PT(F11)-PT(F7)
901 ldf.fill f8=[r3],32
902 ;;
903 srlz.d // ensure that inter. collection is off (VHPT is don't care, since text is pinned)
904 mov ar.ccv=r15
905 ;;
906 ldf.fill f11=[r2]
907 BSW_0(r2, r3, r15) // switch back to bank 0 (no stop bit required beforehand...)
908 ;;
909(pUStk) mov r18=IA64_KR(CURRENT)// M2 (12 cycle read latency)
910 adds r16=PT(CR_IPSR)+16,r12
911 adds r17=PT(CR_IIP)+16,r12
912
913#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
914 .pred.rel.mutex pUStk,pKStk
915 MOV_FROM_PSR(pKStk, r22, r29) // M2 read PSR now that interrupts are disabled
916 MOV_FROM_ITC(pUStk, p9, r22, r29) // M fetch time at leave
917 nop.i 0
918 ;;
919#else
920 MOV_FROM_PSR(pKStk, r22, r29) // M2 read PSR now that interrupts are disabled
921 nop.i 0
922 nop.i 0
923 ;;
924#endif
925 ld8 r29=[r16],16 // load cr.ipsr
926 ld8 r28=[r17],16 // load cr.iip
927 ;;
928 ld8 r30=[r16],16 // load cr.ifs
929 ld8 r25=[r17],16 // load ar.unat
930 ;;
931 ld8 r26=[r16],16 // load ar.pfs
932 ld8 r27=[r17],16 // load ar.rsc
933 cmp.eq p9,p0=r0,r0 // set p9 to indicate that we should restore cr.ifs
934 ;;
935 ld8 r24=[r16],16 // load ar.rnat (may be garbage)
936 ld8 r23=[r17],16 // load ar.bspstore (may be garbage)
937 ;;
938 ld8 r31=[r16],16 // load predicates
939 ld8 r21=[r17],16 // load b0
940 ;;
941 ld8 r19=[r16],16 // load ar.rsc value for "loadrs"
942 ld8.fill r1=[r17],16 // load r1
943 ;;
944 ld8.fill r12=[r16],16
945 ld8.fill r13=[r17],16
946#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
947(pUStk) adds r3=TI_AC_LEAVE+IA64_TASK_SIZE,r18
948#else
949(pUStk) adds r18=IA64_TASK_THREAD_ON_USTACK_OFFSET,r18
950#endif
951 ;;
952 ld8 r20=[r16],16 // ar.fpsr
953 ld8.fill r15=[r17],16
954#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
955(pUStk) adds r18=IA64_TASK_THREAD_ON_USTACK_OFFSET,r18 // deferred
956#endif
957 ;;
958 ld8.fill r14=[r16],16
959 ld8.fill r2=[r17]
960(pUStk) mov r17=1
961 ;;
962#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
963 // mmi_ : ld8 st1 shr;; mmi_ : st8 st1 shr;;
964 // mib : mov add br -> mib : ld8 add br
965 // bbb_ : br nop cover;; mbb_ : mov br cover;;
966 //
967 // no one require bsp in r16 if (pKStk) branch is selected.
968(pUStk) st8 [r3]=r22 // save time at leave
969(pUStk) st1 [r18]=r17 // restore current->thread.on_ustack
970 shr.u r18=r19,16 // get byte size of existing "dirty" partition
971 ;;
972 ld8.fill r3=[r16] // deferred
973 LOAD_PHYS_STACK_REG_SIZE(r17)
974(pKStk) br.cond.dpnt skip_rbs_switch
975 mov r16=ar.bsp // get existing backing store pointer
976#else
977 ld8.fill r3=[r16]
978(pUStk) st1 [r18]=r17 // restore current->thread.on_ustack
979 shr.u r18=r19,16 // get byte size of existing "dirty" partition
980 ;;
981 mov r16=ar.bsp // get existing backing store pointer
982 LOAD_PHYS_STACK_REG_SIZE(r17)
983(pKStk) br.cond.dpnt skip_rbs_switch
984#endif
985
986 /*
987 * Restore user backing store.
988 *
989 * NOTE: alloc, loadrs, and cover can't be predicated.
990 */
991(pNonSys) br.cond.dpnt dont_preserve_current_frame
992 COVER // add current frame into dirty partition and set cr.ifs
993 ;;
994 mov r19=ar.bsp // get new backing store pointer
995rbs_switch:
996 sub r16=r16,r18 // krbs = old bsp - size of dirty partition
997 cmp.ne p9,p0=r0,r0 // clear p9 to skip restore of cr.ifs
998 ;;
999 sub r19=r19,r16 // calculate total byte size of dirty partition
1000 add r18=64,r18 // don't force in0-in7 into memory...
1001 ;;
1002 shl r19=r19,16 // shift size of dirty partition into loadrs position
1003 ;;
1004dont_preserve_current_frame:
1005 /*
1006 * To prevent leaking bits between the kernel and user-space,
1007 * we must clear the stacked registers in the "invalid" partition here.
1008 * Not pretty, but at least it's fast (3.34 registers/cycle on Itanium,
1009 * 5 registers/cycle on McKinley).
1010 */
1011# define pRecurse p6
1012# define pReturn p7
1013#ifdef CONFIG_ITANIUM
1014# define Nregs 10
1015#else
1016# define Nregs 14
1017#endif
1018 alloc loc0=ar.pfs,2,Nregs-2,2,0
1019 shr.u loc1=r18,9 // RNaTslots <= floor(dirtySize / (64*8))
1020 sub r17=r17,r18 // r17 = (physStackedSize + 8) - dirtySize
1021 ;;
1022 mov ar.rsc=r19 // load ar.rsc to be used for "loadrs"
1023 shladd in0=loc1,3,r17
1024 mov in1=0
1025 ;;
1026 TEXT_ALIGN(32)
1027rse_clear_invalid:
1028#ifdef CONFIG_ITANIUM
1029 // cycle 0
1030 { .mii
1031 alloc loc0=ar.pfs,2,Nregs-2,2,0
1032 cmp.lt pRecurse,p0=Nregs*8,in0 // if more than Nregs regs left to clear, (re)curse
1033 add out0=-Nregs*8,in0
1034}{ .mfb
1035 add out1=1,in1 // increment recursion count
1036 nop.f 0
1037 nop.b 0 // can't do br.call here because of alloc (WAW on CFM)
1038 ;;
1039}{ .mfi // cycle 1
1040 mov loc1=0
1041 nop.f 0
1042 mov loc2=0
1043}{ .mib
1044 mov loc3=0
1045 mov loc4=0
1046(pRecurse) br.call.sptk.many b0=rse_clear_invalid
1047
1048}{ .mfi // cycle 2
1049 mov loc5=0
1050 nop.f 0
1051 cmp.ne pReturn,p0=r0,in1 // if recursion count != 0, we need to do a br.ret
1052}{ .mib
1053 mov loc6=0
1054 mov loc7=0
1055(pReturn) br.ret.sptk.many b0
1056}
1057#else /* !CONFIG_ITANIUM */
1058 alloc loc0=ar.pfs,2,Nregs-2,2,0
1059 cmp.lt pRecurse,p0=Nregs*8,in0 // if more than Nregs regs left to clear, (re)curse
1060 add out0=-Nregs*8,in0
1061 add out1=1,in1 // increment recursion count
1062 mov loc1=0
1063 mov loc2=0
1064 ;;
1065 mov loc3=0
1066 mov loc4=0
1067 mov loc5=0
1068 mov loc6=0
1069 mov loc7=0
1070(pRecurse) br.call.dptk.few b0=rse_clear_invalid
1071 ;;
1072 mov loc8=0
1073 mov loc9=0
1074 cmp.ne pReturn,p0=r0,in1 // if recursion count != 0, we need to do a br.ret
1075 mov loc10=0
1076 mov loc11=0
1077(pReturn) br.ret.dptk.many b0
1078#endif /* !CONFIG_ITANIUM */
1079# undef pRecurse
1080# undef pReturn
1081 ;;
1082 alloc r17=ar.pfs,0,0,0,0 // drop current register frame
1083 ;;
1084 loadrs
1085 ;;
1086skip_rbs_switch:
1087 mov ar.unat=r25 // M2
1088(pKStk) extr.u r22=r22,21,1 // I0 extract current value of psr.pp from r22
1089(pLvSys)mov r19=r0 // A clear r19 for leave_syscall, no-op otherwise
1090 ;;
1091(pUStk) mov ar.bspstore=r23 // M2
1092(pKStk) dep r29=r22,r29,21,1 // I0 update ipsr.pp with psr.pp
1093(pLvSys)mov r16=r0 // A clear r16 for leave_syscall, no-op otherwise
1094 ;;
1095 MOV_TO_IPSR(p0, r29, r25) // M2
1096 mov ar.pfs=r26 // I0
1097(pLvSys)mov r17=r0 // A clear r17 for leave_syscall, no-op otherwise
1098
1099 MOV_TO_IFS(p9, r30, r25)// M2
1100 mov b0=r21 // I0
1101(pLvSys)mov r18=r0 // A clear r18 for leave_syscall, no-op otherwise
1102
1103 mov ar.fpsr=r20 // M2
1104 MOV_TO_IIP(r28, r25) // M2
1105 nop 0
1106 ;;
1107(pUStk) mov ar.rnat=r24 // M2 must happen with RSE in lazy mode
1108 nop 0
1109(pLvSys)mov r2=r0
1110
1111 mov ar.rsc=r27 // M2
1112 mov pr=r31,-1 // I0
1113 RFI // B
1114
1115 /*
1116 * On entry:
1117 * r20 = ¤t->thread_info->pre_count (if CONFIG_PREEMPTION)
1118 * r31 = current->thread_info->flags
1119 * On exit:
1120 * p6 = TRUE if work-pending-check needs to be redone
1121 *
1122 * Interrupts are disabled on entry, reenabled depend on work, and
1123 * disabled on exit.
1124 */
1125.work_pending_syscall:
1126 add r2=-8,r2
1127 add r3=-8,r3
1128 ;;
1129 st8 [r2]=r8
1130 st8 [r3]=r10
1131.work_pending:
1132 tbit.z p6,p0=r31,TIF_NEED_RESCHED // is resched not needed?
1133(p6) br.cond.sptk.few .notify
1134 br.call.spnt.many rp=preempt_schedule_irq
1135.ret9: cmp.eq p6,p0=r0,r0 // p6 <- 1 (re-check)
1136(pLvSys)br.cond.sptk.few ia64_work_pending_syscall_end
1137 br.cond.sptk.many .work_processed_kernel
1138
1139.notify:
1140(pUStk) br.call.spnt.many rp=notify_resume_user
1141.ret10: cmp.ne p6,p0=r0,r0 // p6 <- 0 (don't re-check)
1142(pLvSys)br.cond.sptk.few ia64_work_pending_syscall_end
1143 br.cond.sptk.many .work_processed_kernel
1144
1145.global ia64_work_pending_syscall_end;
1146ia64_work_pending_syscall_end:
1147 adds r2=PT(R8)+16,r12
1148 adds r3=PT(R10)+16,r12
1149 ;;
1150 ld8 r8=[r2]
1151 ld8 r10=[r3]
1152 br.cond.sptk.many ia64_work_processed_syscall
1153END(ia64_leave_kernel)
1154
1155ENTRY(handle_syscall_error)
1156 /*
1157 * Some system calls (e.g., ptrace, mmap) can return arbitrary values which could
1158 * lead us to mistake a negative return value as a failed syscall. Those syscall
1159 * must deposit a non-zero value in pt_regs.r8 to indicate an error. If
1160 * pt_regs.r8 is zero, we assume that the call completed successfully.
1161 */
1162 PT_REGS_UNWIND_INFO(0)
1163 ld8 r3=[r2] // load pt_regs.r8
1164 ;;
1165 cmp.eq p6,p7=r3,r0 // is pt_regs.r8==0?
1166 ;;
1167(p7) mov r10=-1
1168(p7) sub r8=0,r8 // negate return value to get errno
1169 br.cond.sptk ia64_leave_syscall
1170END(handle_syscall_error)
1171
1172 /*
1173 * Invoke schedule_tail(task) while preserving in0-in7, which may be needed
1174 * in case a system call gets restarted.
1175 */
1176GLOBAL_ENTRY(ia64_invoke_schedule_tail)
1177 .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
1178 alloc loc1=ar.pfs,8,2,1,0
1179 mov loc0=rp
1180 mov out0=r8 // Address of previous task
1181 ;;
1182 br.call.sptk.many rp=schedule_tail
1183.ret11: mov ar.pfs=loc1
1184 mov rp=loc0
1185 br.ret.sptk.many rp
1186END(ia64_invoke_schedule_tail)
1187
1188 /*
1189 * Setup stack and call do_notify_resume_user(), keeping interrupts
1190 * disabled.
1191 *
1192 * Note that pSys and pNonSys need to be set up by the caller.
1193 * We declare 8 input registers so the system call args get preserved,
1194 * in case we need to restart a system call.
1195 */
1196GLOBAL_ENTRY(notify_resume_user)
1197 .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
1198 alloc loc1=ar.pfs,8,2,3,0 // preserve all eight input regs in case of syscall restart!
1199 mov r9=ar.unat
1200 mov loc0=rp // save return address
1201 mov out0=0 // there is no "oldset"
1202 adds out1=8,sp // out1=&sigscratch->ar_pfs
1203(pSys) mov out2=1 // out2==1 => we're in a syscall
1204 ;;
1205(pNonSys) mov out2=0 // out2==0 => not a syscall
1206 .fframe 16
1207 .spillsp ar.unat, 16
1208 st8 [sp]=r9,-16 // allocate space for ar.unat and save it
1209 st8 [out1]=loc1,-8 // save ar.pfs, out1=&sigscratch
1210 .body
1211 br.call.sptk.many rp=do_notify_resume_user
1212.ret15: .restore sp
1213 adds sp=16,sp // pop scratch stack space
1214 ;;
1215 ld8 r9=[sp] // load new unat from sigscratch->scratch_unat
1216 mov rp=loc0
1217 ;;
1218 mov ar.unat=r9
1219 mov ar.pfs=loc1
1220 br.ret.sptk.many rp
1221END(notify_resume_user)
1222
1223ENTRY(sys_rt_sigreturn)
1224 PT_REGS_UNWIND_INFO(0)
1225 /*
1226 * Allocate 8 input registers since ptrace() may clobber them
1227 */
1228 alloc r2=ar.pfs,8,0,1,0
1229 .prologue
1230 PT_REGS_SAVES(16)
1231 adds sp=-16,sp
1232 .body
1233 cmp.eq pNonSys,pSys=r0,r0 // sigreturn isn't a normal syscall...
1234 ;;
1235 /*
1236 * leave_kernel() restores f6-f11 from pt_regs, but since the streamlined
1237 * syscall-entry path does not save them we save them here instead. Note: we
1238 * don't need to save any other registers that are not saved by the stream-lined
1239 * syscall path, because restore_sigcontext() restores them.
1240 */
1241 adds r16=PT(F6)+32,sp
1242 adds r17=PT(F7)+32,sp
1243 ;;
1244 stf.spill [r16]=f6,32
1245 stf.spill [r17]=f7,32
1246 ;;
1247 stf.spill [r16]=f8,32
1248 stf.spill [r17]=f9,32
1249 ;;
1250 stf.spill [r16]=f10
1251 stf.spill [r17]=f11
1252 adds out0=16,sp // out0 = &sigscratch
1253 br.call.sptk.many rp=ia64_rt_sigreturn
1254.ret19: .restore sp,0
1255 adds sp=16,sp
1256 ;;
1257 ld8 r9=[sp] // load new ar.unat
1258 mov.sptk b7=r8,ia64_leave_kernel
1259 ;;
1260 mov ar.unat=r9
1261 br.many b7
1262END(sys_rt_sigreturn)
1263
1264GLOBAL_ENTRY(ia64_prepare_handle_unaligned)
1265 .prologue
1266 /*
1267 * r16 = fake ar.pfs, we simply need to make sure privilege is still 0
1268 */
1269 mov r16=r0
1270 DO_SAVE_SWITCH_STACK
1271 br.call.sptk.many rp=ia64_handle_unaligned // stack frame setup in ivt
1272.ret21: .body
1273 DO_LOAD_SWITCH_STACK
1274 br.cond.sptk.many rp // goes to ia64_leave_kernel
1275END(ia64_prepare_handle_unaligned)
1276
1277 //
1278 // unw_init_running(void (*callback)(info, arg), void *arg)
1279 //
1280# define EXTRA_FRAME_SIZE ((UNW_FRAME_INFO_SIZE+15)&~15)
1281
1282GLOBAL_ENTRY(unw_init_running)
1283 .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(2)
1284 alloc loc1=ar.pfs,2,3,3,0
1285 ;;
1286 ld8 loc2=[in0],8
1287 mov loc0=rp
1288 mov r16=loc1
1289 DO_SAVE_SWITCH_STACK
1290 .body
1291
1292 .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(2)
1293 .fframe IA64_SWITCH_STACK_SIZE+EXTRA_FRAME_SIZE
1294 SWITCH_STACK_SAVES(EXTRA_FRAME_SIZE)
1295 adds sp=-EXTRA_FRAME_SIZE,sp
1296 .body
1297 ;;
1298 adds out0=16,sp // &info
1299 mov out1=r13 // current
1300 adds out2=16+EXTRA_FRAME_SIZE,sp // &switch_stack
1301 br.call.sptk.many rp=unw_init_frame_info
13021: adds out0=16,sp // &info
1303 mov b6=loc2
1304 mov loc2=gp // save gp across indirect function call
1305 ;;
1306 ld8 gp=[in0]
1307 mov out1=in1 // arg
1308 br.call.sptk.many rp=b6 // invoke the callback function
13091: mov gp=loc2 // restore gp
1310
1311 // For now, we don't allow changing registers from within
1312 // unw_init_running; if we ever want to allow that, we'd
1313 // have to do a load_switch_stack here:
1314 .restore sp
1315 adds sp=IA64_SWITCH_STACK_SIZE+EXTRA_FRAME_SIZE,sp
1316
1317 mov ar.pfs=loc1
1318 mov rp=loc0
1319 br.ret.sptk.many rp
1320END(unw_init_running)
1321EXPORT_SYMBOL(unw_init_running)
1322
1323#ifdef CONFIG_FUNCTION_TRACER
1324#ifdef CONFIG_DYNAMIC_FTRACE
1325GLOBAL_ENTRY(_mcount)
1326 br ftrace_stub
1327END(_mcount)
1328EXPORT_SYMBOL(_mcount)
1329
1330.here:
1331 br.ret.sptk.many b0
1332
1333GLOBAL_ENTRY(ftrace_caller)
1334 alloc out0 = ar.pfs, 8, 0, 4, 0
1335 mov out3 = r0
1336 ;;
1337 mov out2 = b0
1338 add r3 = 0x20, r3
1339 mov out1 = r1;
1340 br.call.sptk.many b0 = ftrace_patch_gp
1341 //this might be called from module, so we must patch gp
1342ftrace_patch_gp:
1343 movl gp=__gp
1344 mov b0 = r3
1345 ;;
1346.global ftrace_call;
1347ftrace_call:
1348{
1349 .mlx
1350 nop.m 0x0
1351 movl r3 = .here;;
1352}
1353 alloc loc0 = ar.pfs, 4, 4, 2, 0
1354 ;;
1355 mov loc1 = b0
1356 mov out0 = b0
1357 mov loc2 = r8
1358 mov loc3 = r15
1359 ;;
1360 adds out0 = -MCOUNT_INSN_SIZE, out0
1361 mov out1 = in2
1362 mov b6 = r3
1363
1364 br.call.sptk.many b0 = b6
1365 ;;
1366 mov ar.pfs = loc0
1367 mov b0 = loc1
1368 mov r8 = loc2
1369 mov r15 = loc3
1370 br ftrace_stub
1371 ;;
1372END(ftrace_caller)
1373
1374#else
1375GLOBAL_ENTRY(_mcount)
1376 movl r2 = ftrace_stub
1377 movl r3 = ftrace_trace_function;;
1378 ld8 r3 = [r3];;
1379 ld8 r3 = [r3];;
1380 cmp.eq p7,p0 = r2, r3
1381(p7) br.sptk.many ftrace_stub
1382 ;;
1383
1384 alloc loc0 = ar.pfs, 4, 4, 2, 0
1385 ;;
1386 mov loc1 = b0
1387 mov out0 = b0
1388 mov loc2 = r8
1389 mov loc3 = r15
1390 ;;
1391 adds out0 = -MCOUNT_INSN_SIZE, out0
1392 mov out1 = in2
1393 mov b6 = r3
1394
1395 br.call.sptk.many b0 = b6
1396 ;;
1397 mov ar.pfs = loc0
1398 mov b0 = loc1
1399 mov r8 = loc2
1400 mov r15 = loc3
1401 br ftrace_stub
1402 ;;
1403END(_mcount)
1404#endif
1405
1406GLOBAL_ENTRY(ftrace_stub)
1407 mov r3 = b0
1408 movl r2 = _mcount_ret_helper
1409 ;;
1410 mov b6 = r2
1411 mov b7 = r3
1412 br.ret.sptk.many b6
1413
1414_mcount_ret_helper:
1415 mov b0 = r42
1416 mov r1 = r41
1417 mov ar.pfs = r40
1418 br b7
1419END(ftrace_stub)
1420
1421#endif /* CONFIG_FUNCTION_TRACER */
1422
1423#define __SYSCALL(nr, entry) data8 entry
1424 .rodata
1425 .align 8
1426 .globl sys_call_table
1427sys_call_table:
1428#include <asm/syscall_table.h>
1/*
2 * arch/ia64/kernel/entry.S
3 *
4 * Kernel entry points.
5 *
6 * Copyright (C) 1998-2003, 2005 Hewlett-Packard Co
7 * David Mosberger-Tang <davidm@hpl.hp.com>
8 * Copyright (C) 1999, 2002-2003
9 * Asit Mallick <Asit.K.Mallick@intel.com>
10 * Don Dugger <Don.Dugger@intel.com>
11 * Suresh Siddha <suresh.b.siddha@intel.com>
12 * Fenghua Yu <fenghua.yu@intel.com>
13 * Copyright (C) 1999 VA Linux Systems
14 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
15 */
16/*
17 * ia64_switch_to now places correct virtual mapping in in TR2 for
18 * kernel stack. This allows us to handle interrupts without changing
19 * to physical mode.
20 *
21 * Jonathan Nicklin <nicklin@missioncriticallinux.com>
22 * Patrick O'Rourke <orourke@missioncriticallinux.com>
23 * 11/07/2000
24 */
25/*
26 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
27 * VA Linux Systems Japan K.K.
28 * pv_ops.
29 */
30/*
31 * Global (preserved) predicate usage on syscall entry/exit path:
32 *
33 * pKStk: See entry.h.
34 * pUStk: See entry.h.
35 * pSys: See entry.h.
36 * pNonSys: !pSys
37 */
38
39
40#include <asm/asmmacro.h>
41#include <asm/cache.h>
42#include <asm/errno.h>
43#include <asm/kregs.h>
44#include <asm/asm-offsets.h>
45#include <asm/pgtable.h>
46#include <asm/percpu.h>
47#include <asm/processor.h>
48#include <asm/thread_info.h>
49#include <asm/unistd.h>
50#include <asm/ftrace.h>
51#include <asm/export.h>
52
53#include "minstate.h"
54
55 /*
56 * execve() is special because in case of success, we need to
57 * setup a null register window frame.
58 */
59ENTRY(ia64_execve)
60 /*
61 * Allocate 8 input registers since ptrace() may clobber them
62 */
63 .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
64 alloc loc1=ar.pfs,8,2,3,0
65 mov loc0=rp
66 .body
67 mov out0=in0 // filename
68 ;; // stop bit between alloc and call
69 mov out1=in1 // argv
70 mov out2=in2 // envp
71 br.call.sptk.many rp=sys_execve
72.ret0:
73 cmp4.ge p6,p7=r8,r0
74 mov ar.pfs=loc1 // restore ar.pfs
75 sxt4 r8=r8 // return 64-bit result
76 ;;
77 stf.spill [sp]=f0
78 mov rp=loc0
79(p6) mov ar.pfs=r0 // clear ar.pfs on success
80(p7) br.ret.sptk.many rp
81
82 /*
83 * In theory, we'd have to zap this state only to prevent leaking of
84 * security sensitive state (e.g., if current->mm->dumpable is zero). However,
85 * this executes in less than 20 cycles even on Itanium, so it's not worth
86 * optimizing for...).
87 */
88 mov ar.unat=0; mov ar.lc=0
89 mov r4=0; mov f2=f0; mov b1=r0
90 mov r5=0; mov f3=f0; mov b2=r0
91 mov r6=0; mov f4=f0; mov b3=r0
92 mov r7=0; mov f5=f0; mov b4=r0
93 ldf.fill f12=[sp]; mov f13=f0; mov b5=r0
94 ldf.fill f14=[sp]; ldf.fill f15=[sp]; mov f16=f0
95 ldf.fill f17=[sp]; ldf.fill f18=[sp]; mov f19=f0
96 ldf.fill f20=[sp]; ldf.fill f21=[sp]; mov f22=f0
97 ldf.fill f23=[sp]; ldf.fill f24=[sp]; mov f25=f0
98 ldf.fill f26=[sp]; ldf.fill f27=[sp]; mov f28=f0
99 ldf.fill f29=[sp]; ldf.fill f30=[sp]; mov f31=f0
100 br.ret.sptk.many rp
101END(ia64_execve)
102
103/*
104 * sys_clone2(u64 flags, u64 ustack_base, u64 ustack_size, u64 parent_tidptr, u64 child_tidptr,
105 * u64 tls)
106 */
107GLOBAL_ENTRY(sys_clone2)
108 /*
109 * Allocate 8 input registers since ptrace() may clobber them
110 */
111 .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
112 alloc r16=ar.pfs,8,2,6,0
113 DO_SAVE_SWITCH_STACK
114 adds r2=PT(R16)+IA64_SWITCH_STACK_SIZE+16,sp
115 mov loc0=rp
116 mov loc1=r16 // save ar.pfs across do_fork
117 .body
118 mov out1=in1
119 mov out2=in2
120 tbit.nz p6,p0=in0,CLONE_SETTLS_BIT
121 mov out3=in3 // parent_tidptr: valid only w/CLONE_PARENT_SETTID
122 ;;
123(p6) st8 [r2]=in5 // store TLS in r16 for copy_thread()
124 mov out4=in4 // child_tidptr: valid only w/CLONE_CHILD_SETTID or CLONE_CHILD_CLEARTID
125 mov out0=in0 // out0 = clone_flags
126 br.call.sptk.many rp=do_fork
127.ret1: .restore sp
128 adds sp=IA64_SWITCH_STACK_SIZE,sp // pop the switch stack
129 mov ar.pfs=loc1
130 mov rp=loc0
131 br.ret.sptk.many rp
132END(sys_clone2)
133
134/*
135 * sys_clone(u64 flags, u64 ustack_base, u64 parent_tidptr, u64 child_tidptr, u64 tls)
136 * Deprecated. Use sys_clone2() instead.
137 */
138GLOBAL_ENTRY(sys_clone)
139 /*
140 * Allocate 8 input registers since ptrace() may clobber them
141 */
142 .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
143 alloc r16=ar.pfs,8,2,6,0
144 DO_SAVE_SWITCH_STACK
145 adds r2=PT(R16)+IA64_SWITCH_STACK_SIZE+16,sp
146 mov loc0=rp
147 mov loc1=r16 // save ar.pfs across do_fork
148 .body
149 mov out1=in1
150 mov out2=16 // stacksize (compensates for 16-byte scratch area)
151 tbit.nz p6,p0=in0,CLONE_SETTLS_BIT
152 mov out3=in2 // parent_tidptr: valid only w/CLONE_PARENT_SETTID
153 ;;
154(p6) st8 [r2]=in4 // store TLS in r13 (tp)
155 mov out4=in3 // child_tidptr: valid only w/CLONE_CHILD_SETTID or CLONE_CHILD_CLEARTID
156 mov out0=in0 // out0 = clone_flags
157 br.call.sptk.many rp=do_fork
158.ret2: .restore sp
159 adds sp=IA64_SWITCH_STACK_SIZE,sp // pop the switch stack
160 mov ar.pfs=loc1
161 mov rp=loc0
162 br.ret.sptk.many rp
163END(sys_clone)
164
165/*
166 * prev_task <- ia64_switch_to(struct task_struct *next)
167 * With Ingo's new scheduler, interrupts are disabled when this routine gets
168 * called. The code starting at .map relies on this. The rest of the code
169 * doesn't care about the interrupt masking status.
170 */
171GLOBAL_ENTRY(ia64_switch_to)
172 .prologue
173 alloc r16=ar.pfs,1,0,0,0
174 DO_SAVE_SWITCH_STACK
175 .body
176
177 adds r22=IA64_TASK_THREAD_KSP_OFFSET,r13
178 movl r25=init_task
179 mov r27=IA64_KR(CURRENT_STACK)
180 adds r21=IA64_TASK_THREAD_KSP_OFFSET,in0
181 dep r20=0,in0,61,3 // physical address of "next"
182 ;;
183 st8 [r22]=sp // save kernel stack pointer of old task
184 shr.u r26=r20,IA64_GRANULE_SHIFT
185 cmp.eq p7,p6=r25,in0
186 ;;
187 /*
188 * If we've already mapped this task's page, we can skip doing it again.
189 */
190(p6) cmp.eq p7,p6=r26,r27
191(p6) br.cond.dpnt .map
192 ;;
193.done:
194 ld8 sp=[r21] // load kernel stack pointer of new task
195 MOV_TO_KR(CURRENT, in0, r8, r9) // update "current" application register
196 mov r8=r13 // return pointer to previously running task
197 mov r13=in0 // set "current" pointer
198 ;;
199 DO_LOAD_SWITCH_STACK
200
201#ifdef CONFIG_SMP
202 sync.i // ensure "fc"s done by this CPU are visible on other CPUs
203#endif
204 br.ret.sptk.many rp // boogie on out in new context
205
206.map:
207 RSM_PSR_IC(r25) // interrupts (psr.i) are already disabled here
208 movl r25=PAGE_KERNEL
209 ;;
210 srlz.d
211 or r23=r25,r20 // construct PA | page properties
212 mov r25=IA64_GRANULE_SHIFT<<2
213 ;;
214 MOV_TO_ITIR(p0, r25, r8)
215 MOV_TO_IFA(in0, r8) // VA of next task...
216 ;;
217 mov r25=IA64_TR_CURRENT_STACK
218 MOV_TO_KR(CURRENT_STACK, r26, r8, r9) // remember last page we mapped...
219 ;;
220 itr.d dtr[r25]=r23 // wire in new mapping...
221 SSM_PSR_IC_AND_SRLZ_D(r8, r9) // reenable the psr.ic bit
222 br.cond.sptk .done
223END(ia64_switch_to)
224
225/*
226 * Note that interrupts are enabled during save_switch_stack and load_switch_stack. This
227 * means that we may get an interrupt with "sp" pointing to the new kernel stack while
228 * ar.bspstore is still pointing to the old kernel backing store area. Since ar.rsc,
229 * ar.rnat, ar.bsp, and ar.bspstore are all preserved by interrupts, this is not a
230 * problem. Also, we don't need to specify unwind information for preserved registers
231 * that are not modified in save_switch_stack as the right unwind information is already
232 * specified at the call-site of save_switch_stack.
233 */
234
235/*
236 * save_switch_stack:
237 * - r16 holds ar.pfs
238 * - b7 holds address to return to
239 * - rp (b0) holds return address to save
240 */
241GLOBAL_ENTRY(save_switch_stack)
242 .prologue
243 .altrp b7
244 flushrs // flush dirty regs to backing store (must be first in insn group)
245 .save @priunat,r17
246 mov r17=ar.unat // preserve caller's
247 .body
248#ifdef CONFIG_ITANIUM
249 adds r2=16+128,sp
250 adds r3=16+64,sp
251 adds r14=SW(R4)+16,sp
252 ;;
253 st8.spill [r14]=r4,16 // spill r4
254 lfetch.fault.excl.nt1 [r3],128
255 ;;
256 lfetch.fault.excl.nt1 [r2],128
257 lfetch.fault.excl.nt1 [r3],128
258 ;;
259 lfetch.fault.excl [r2]
260 lfetch.fault.excl [r3]
261 adds r15=SW(R5)+16,sp
262#else
263 add r2=16+3*128,sp
264 add r3=16,sp
265 add r14=SW(R4)+16,sp
266 ;;
267 st8.spill [r14]=r4,SW(R6)-SW(R4) // spill r4 and prefetch offset 0x1c0
268 lfetch.fault.excl.nt1 [r3],128 // prefetch offset 0x010
269 ;;
270 lfetch.fault.excl.nt1 [r3],128 // prefetch offset 0x090
271 lfetch.fault.excl.nt1 [r2],128 // prefetch offset 0x190
272 ;;
273 lfetch.fault.excl.nt1 [r3] // prefetch offset 0x110
274 lfetch.fault.excl.nt1 [r2] // prefetch offset 0x210
275 adds r15=SW(R5)+16,sp
276#endif
277 ;;
278 st8.spill [r15]=r5,SW(R7)-SW(R5) // spill r5
279 mov.m ar.rsc=0 // put RSE in mode: enforced lazy, little endian, pl 0
280 add r2=SW(F2)+16,sp // r2 = &sw->f2
281 ;;
282 st8.spill [r14]=r6,SW(B0)-SW(R6) // spill r6
283 mov.m r18=ar.fpsr // preserve fpsr
284 add r3=SW(F3)+16,sp // r3 = &sw->f3
285 ;;
286 stf.spill [r2]=f2,32
287 mov.m r19=ar.rnat
288 mov r21=b0
289
290 stf.spill [r3]=f3,32
291 st8.spill [r15]=r7,SW(B2)-SW(R7) // spill r7
292 mov r22=b1
293 ;;
294 // since we're done with the spills, read and save ar.unat:
295 mov.m r29=ar.unat
296 mov.m r20=ar.bspstore
297 mov r23=b2
298 stf.spill [r2]=f4,32
299 stf.spill [r3]=f5,32
300 mov r24=b3
301 ;;
302 st8 [r14]=r21,SW(B1)-SW(B0) // save b0
303 st8 [r15]=r23,SW(B3)-SW(B2) // save b2
304 mov r25=b4
305 mov r26=b5
306 ;;
307 st8 [r14]=r22,SW(B4)-SW(B1) // save b1
308 st8 [r15]=r24,SW(AR_PFS)-SW(B3) // save b3
309 mov r21=ar.lc // I-unit
310 stf.spill [r2]=f12,32
311 stf.spill [r3]=f13,32
312 ;;
313 st8 [r14]=r25,SW(B5)-SW(B4) // save b4
314 st8 [r15]=r16,SW(AR_LC)-SW(AR_PFS) // save ar.pfs
315 stf.spill [r2]=f14,32
316 stf.spill [r3]=f15,32
317 ;;
318 st8 [r14]=r26 // save b5
319 st8 [r15]=r21 // save ar.lc
320 stf.spill [r2]=f16,32
321 stf.spill [r3]=f17,32
322 ;;
323 stf.spill [r2]=f18,32
324 stf.spill [r3]=f19,32
325 ;;
326 stf.spill [r2]=f20,32
327 stf.spill [r3]=f21,32
328 ;;
329 stf.spill [r2]=f22,32
330 stf.spill [r3]=f23,32
331 ;;
332 stf.spill [r2]=f24,32
333 stf.spill [r3]=f25,32
334 ;;
335 stf.spill [r2]=f26,32
336 stf.spill [r3]=f27,32
337 ;;
338 stf.spill [r2]=f28,32
339 stf.spill [r3]=f29,32
340 ;;
341 stf.spill [r2]=f30,SW(AR_UNAT)-SW(F30)
342 stf.spill [r3]=f31,SW(PR)-SW(F31)
343 add r14=SW(CALLER_UNAT)+16,sp
344 ;;
345 st8 [r2]=r29,SW(AR_RNAT)-SW(AR_UNAT) // save ar.unat
346 st8 [r14]=r17,SW(AR_FPSR)-SW(CALLER_UNAT) // save caller_unat
347 mov r21=pr
348 ;;
349 st8 [r2]=r19,SW(AR_BSPSTORE)-SW(AR_RNAT) // save ar.rnat
350 st8 [r3]=r21 // save predicate registers
351 ;;
352 st8 [r2]=r20 // save ar.bspstore
353 st8 [r14]=r18 // save fpsr
354 mov ar.rsc=3 // put RSE back into eager mode, pl 0
355 br.cond.sptk.many b7
356END(save_switch_stack)
357
358/*
359 * load_switch_stack:
360 * - "invala" MUST be done at call site (normally in DO_LOAD_SWITCH_STACK)
361 * - b7 holds address to return to
362 * - must not touch r8-r11
363 */
364GLOBAL_ENTRY(load_switch_stack)
365 .prologue
366 .altrp b7
367
368 .body
369 lfetch.fault.nt1 [sp]
370 adds r2=SW(AR_BSPSTORE)+16,sp
371 adds r3=SW(AR_UNAT)+16,sp
372 mov ar.rsc=0 // put RSE into enforced lazy mode
373 adds r14=SW(CALLER_UNAT)+16,sp
374 adds r15=SW(AR_FPSR)+16,sp
375 ;;
376 ld8 r27=[r2],(SW(B0)-SW(AR_BSPSTORE)) // bspstore
377 ld8 r29=[r3],(SW(B1)-SW(AR_UNAT)) // unat
378 ;;
379 ld8 r21=[r2],16 // restore b0
380 ld8 r22=[r3],16 // restore b1
381 ;;
382 ld8 r23=[r2],16 // restore b2
383 ld8 r24=[r3],16 // restore b3
384 ;;
385 ld8 r25=[r2],16 // restore b4
386 ld8 r26=[r3],16 // restore b5
387 ;;
388 ld8 r16=[r2],(SW(PR)-SW(AR_PFS)) // ar.pfs
389 ld8 r17=[r3],(SW(AR_RNAT)-SW(AR_LC)) // ar.lc
390 ;;
391 ld8 r28=[r2] // restore pr
392 ld8 r30=[r3] // restore rnat
393 ;;
394 ld8 r18=[r14],16 // restore caller's unat
395 ld8 r19=[r15],24 // restore fpsr
396 ;;
397 ldf.fill f2=[r14],32
398 ldf.fill f3=[r15],32
399 ;;
400 ldf.fill f4=[r14],32
401 ldf.fill f5=[r15],32
402 ;;
403 ldf.fill f12=[r14],32
404 ldf.fill f13=[r15],32
405 ;;
406 ldf.fill f14=[r14],32
407 ldf.fill f15=[r15],32
408 ;;
409 ldf.fill f16=[r14],32
410 ldf.fill f17=[r15],32
411 ;;
412 ldf.fill f18=[r14],32
413 ldf.fill f19=[r15],32
414 mov b0=r21
415 ;;
416 ldf.fill f20=[r14],32
417 ldf.fill f21=[r15],32
418 mov b1=r22
419 ;;
420 ldf.fill f22=[r14],32
421 ldf.fill f23=[r15],32
422 mov b2=r23
423 ;;
424 mov ar.bspstore=r27
425 mov ar.unat=r29 // establish unat holding the NaT bits for r4-r7
426 mov b3=r24
427 ;;
428 ldf.fill f24=[r14],32
429 ldf.fill f25=[r15],32
430 mov b4=r25
431 ;;
432 ldf.fill f26=[r14],32
433 ldf.fill f27=[r15],32
434 mov b5=r26
435 ;;
436 ldf.fill f28=[r14],32
437 ldf.fill f29=[r15],32
438 mov ar.pfs=r16
439 ;;
440 ldf.fill f30=[r14],32
441 ldf.fill f31=[r15],24
442 mov ar.lc=r17
443 ;;
444 ld8.fill r4=[r14],16
445 ld8.fill r5=[r15],16
446 mov pr=r28,-1
447 ;;
448 ld8.fill r6=[r14],16
449 ld8.fill r7=[r15],16
450
451 mov ar.unat=r18 // restore caller's unat
452 mov ar.rnat=r30 // must restore after bspstore but before rsc!
453 mov ar.fpsr=r19 // restore fpsr
454 mov ar.rsc=3 // put RSE back into eager mode, pl 0
455 br.cond.sptk.many b7
456END(load_switch_stack)
457
458GLOBAL_ENTRY(prefetch_stack)
459 add r14 = -IA64_SWITCH_STACK_SIZE, sp
460 add r15 = IA64_TASK_THREAD_KSP_OFFSET, in0
461 ;;
462 ld8 r16 = [r15] // load next's stack pointer
463 lfetch.fault.excl [r14], 128
464 ;;
465 lfetch.fault.excl [r14], 128
466 lfetch.fault [r16], 128
467 ;;
468 lfetch.fault.excl [r14], 128
469 lfetch.fault [r16], 128
470 ;;
471 lfetch.fault.excl [r14], 128
472 lfetch.fault [r16], 128
473 ;;
474 lfetch.fault.excl [r14], 128
475 lfetch.fault [r16], 128
476 ;;
477 lfetch.fault [r16], 128
478 br.ret.sptk.many rp
479END(prefetch_stack)
480
481 /*
482 * Invoke a system call, but do some tracing before and after the call.
483 * We MUST preserve the current register frame throughout this routine
484 * because some system calls (such as ia64_execve) directly
485 * manipulate ar.pfs.
486 */
487GLOBAL_ENTRY(ia64_trace_syscall)
488 PT_REGS_UNWIND_INFO(0)
489 /*
490 * We need to preserve the scratch registers f6-f11 in case the system
491 * call is sigreturn.
492 */
493 adds r16=PT(F6)+16,sp
494 adds r17=PT(F7)+16,sp
495 ;;
496 stf.spill [r16]=f6,32
497 stf.spill [r17]=f7,32
498 ;;
499 stf.spill [r16]=f8,32
500 stf.spill [r17]=f9,32
501 ;;
502 stf.spill [r16]=f10
503 stf.spill [r17]=f11
504 br.call.sptk.many rp=syscall_trace_enter // give parent a chance to catch syscall args
505 cmp.lt p6,p0=r8,r0 // check tracehook
506 adds r2=PT(R8)+16,sp // r2 = &pt_regs.r8
507 adds r3=PT(R10)+16,sp // r3 = &pt_regs.r10
508 mov r10=0
509(p6) br.cond.sptk strace_error // syscall failed ->
510 adds r16=PT(F6)+16,sp
511 adds r17=PT(F7)+16,sp
512 ;;
513 ldf.fill f6=[r16],32
514 ldf.fill f7=[r17],32
515 ;;
516 ldf.fill f8=[r16],32
517 ldf.fill f9=[r17],32
518 ;;
519 ldf.fill f10=[r16]
520 ldf.fill f11=[r17]
521 // the syscall number may have changed, so re-load it and re-calculate the
522 // syscall entry-point:
523 adds r15=PT(R15)+16,sp // r15 = &pt_regs.r15 (syscall #)
524 ;;
525 ld8 r15=[r15]
526 mov r3=NR_syscalls - 1
527 ;;
528 adds r15=-1024,r15
529 movl r16=sys_call_table
530 ;;
531 shladd r20=r15,3,r16 // r20 = sys_call_table + 8*(syscall-1024)
532 cmp.leu p6,p7=r15,r3
533 ;;
534(p6) ld8 r20=[r20] // load address of syscall entry point
535(p7) movl r20=sys_ni_syscall
536 ;;
537 mov b6=r20
538 br.call.sptk.many rp=b6 // do the syscall
539.strace_check_retval:
540 cmp.lt p6,p0=r8,r0 // syscall failed?
541 adds r2=PT(R8)+16,sp // r2 = &pt_regs.r8
542 adds r3=PT(R10)+16,sp // r3 = &pt_regs.r10
543 mov r10=0
544(p6) br.cond.sptk strace_error // syscall failed ->
545 ;; // avoid RAW on r10
546.strace_save_retval:
547.mem.offset 0,0; st8.spill [r2]=r8 // store return value in slot for r8
548.mem.offset 8,0; st8.spill [r3]=r10 // clear error indication in slot for r10
549 br.call.sptk.many rp=syscall_trace_leave // give parent a chance to catch return value
550.ret3:
551(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
552(pUStk) rsm psr.i // disable interrupts
553 br.cond.sptk ia64_work_pending_syscall_end
554
555strace_error:
556 ld8 r3=[r2] // load pt_regs.r8
557 sub r9=0,r8 // negate return value to get errno value
558 ;;
559 cmp.ne p6,p0=r3,r0 // is pt_regs.r8!=0?
560 adds r3=16,r2 // r3=&pt_regs.r10
561 ;;
562(p6) mov r10=-1
563(p6) mov r8=r9
564 br.cond.sptk .strace_save_retval
565END(ia64_trace_syscall)
566
567 /*
568 * When traced and returning from sigreturn, we invoke syscall_trace but then
569 * go straight to ia64_leave_kernel rather than ia64_leave_syscall.
570 */
571GLOBAL_ENTRY(ia64_strace_leave_kernel)
572 PT_REGS_UNWIND_INFO(0)
573{ /*
574 * Some versions of gas generate bad unwind info if the first instruction of a
575 * procedure doesn't go into the first slot of a bundle. This is a workaround.
576 */
577 nop.m 0
578 nop.i 0
579 br.call.sptk.many rp=syscall_trace_leave // give parent a chance to catch return value
580}
581.ret4: br.cond.sptk ia64_leave_kernel
582END(ia64_strace_leave_kernel)
583
584ENTRY(call_payload)
585 .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(0)
586 /* call the kernel_thread payload; fn is in r4, arg - in r5 */
587 alloc loc1=ar.pfs,0,3,1,0
588 mov loc0=rp
589 mov loc2=gp
590 mov out0=r5 // arg
591 ld8 r14 = [r4], 8 // fn.address
592 ;;
593 mov b6 = r14
594 ld8 gp = [r4] // fn.gp
595 ;;
596 br.call.sptk.many rp=b6 // fn(arg)
597.ret12: mov gp=loc2
598 mov rp=loc0
599 mov ar.pfs=loc1
600 /* ... and if it has returned, we are going to userland */
601 cmp.ne pKStk,pUStk=r0,r0
602 br.ret.sptk.many rp
603END(call_payload)
604
605GLOBAL_ENTRY(ia64_ret_from_clone)
606 PT_REGS_UNWIND_INFO(0)
607{ /*
608 * Some versions of gas generate bad unwind info if the first instruction of a
609 * procedure doesn't go into the first slot of a bundle. This is a workaround.
610 */
611 nop.m 0
612 nop.i 0
613 /*
614 * We need to call schedule_tail() to complete the scheduling process.
615 * Called by ia64_switch_to() after do_fork()->copy_thread(). r8 contains the
616 * address of the previously executing task.
617 */
618 br.call.sptk.many rp=ia64_invoke_schedule_tail
619}
620.ret8:
621(pKStk) br.call.sptk.many rp=call_payload
622 adds r2=TI_FLAGS+IA64_TASK_SIZE,r13
623 ;;
624 ld4 r2=[r2]
625 ;;
626 mov r8=0
627 and r2=_TIF_SYSCALL_TRACEAUDIT,r2
628 ;;
629 cmp.ne p6,p0=r2,r0
630(p6) br.cond.spnt .strace_check_retval
631 ;; // added stop bits to prevent r8 dependency
632END(ia64_ret_from_clone)
633 // fall through
634GLOBAL_ENTRY(ia64_ret_from_syscall)
635 PT_REGS_UNWIND_INFO(0)
636 cmp.ge p6,p7=r8,r0 // syscall executed successfully?
637 adds r2=PT(R8)+16,sp // r2 = &pt_regs.r8
638 mov r10=r0 // clear error indication in r10
639(p7) br.cond.spnt handle_syscall_error // handle potential syscall failure
640END(ia64_ret_from_syscall)
641 // fall through
642
643/*
644 * ia64_leave_syscall(): Same as ia64_leave_kernel, except that it doesn't
645 * need to switch to bank 0 and doesn't restore the scratch registers.
646 * To avoid leaking kernel bits, the scratch registers are set to
647 * the following known-to-be-safe values:
648 *
649 * r1: restored (global pointer)
650 * r2: cleared
651 * r3: 1 (when returning to user-level)
652 * r8-r11: restored (syscall return value(s))
653 * r12: restored (user-level stack pointer)
654 * r13: restored (user-level thread pointer)
655 * r14: set to __kernel_syscall_via_epc
656 * r15: restored (syscall #)
657 * r16-r17: cleared
658 * r18: user-level b6
659 * r19: cleared
660 * r20: user-level ar.fpsr
661 * r21: user-level b0
662 * r22: cleared
663 * r23: user-level ar.bspstore
664 * r24: user-level ar.rnat
665 * r25: user-level ar.unat
666 * r26: user-level ar.pfs
667 * r27: user-level ar.rsc
668 * r28: user-level ip
669 * r29: user-level psr
670 * r30: user-level cfm
671 * r31: user-level pr
672 * f6-f11: cleared
673 * pr: restored (user-level pr)
674 * b0: restored (user-level rp)
675 * b6: restored
676 * b7: set to __kernel_syscall_via_epc
677 * ar.unat: restored (user-level ar.unat)
678 * ar.pfs: restored (user-level ar.pfs)
679 * ar.rsc: restored (user-level ar.rsc)
680 * ar.rnat: restored (user-level ar.rnat)
681 * ar.bspstore: restored (user-level ar.bspstore)
682 * ar.fpsr: restored (user-level ar.fpsr)
683 * ar.ccv: cleared
684 * ar.csd: cleared
685 * ar.ssd: cleared
686 */
687GLOBAL_ENTRY(ia64_leave_syscall)
688 PT_REGS_UNWIND_INFO(0)
689 /*
690 * work.need_resched etc. mustn't get changed by this CPU before it returns to
691 * user- or fsys-mode, hence we disable interrupts early on.
692 *
693 * p6 controls whether current_thread_info()->flags needs to be check for
694 * extra work. We always check for extra work when returning to user-level.
695 * With CONFIG_PREEMPT, we also check for extra work when the preempt_count
696 * is 0. After extra work processing has been completed, execution
697 * resumes at ia64_work_processed_syscall with p6 set to 1 if the extra-work-check
698 * needs to be redone.
699 */
700#ifdef CONFIG_PREEMPT
701 RSM_PSR_I(p0, r2, r18) // disable interrupts
702 cmp.eq pLvSys,p0=r0,r0 // pLvSys=1: leave from syscall
703(pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
704 ;;
705 .pred.rel.mutex pUStk,pKStk
706(pKStk) ld4 r21=[r20] // r21 <- preempt_count
707(pUStk) mov r21=0 // r21 <- 0
708 ;;
709 cmp.eq p6,p0=r21,r0 // p6 <- pUStk || (preempt_count == 0)
710#else /* !CONFIG_PREEMPT */
711 RSM_PSR_I(pUStk, r2, r18)
712 cmp.eq pLvSys,p0=r0,r0 // pLvSys=1: leave from syscall
713(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
714#endif
715.global ia64_work_processed_syscall;
716ia64_work_processed_syscall:
717#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
718 adds r2=PT(LOADRS)+16,r12
719 MOV_FROM_ITC(pUStk, p9, r22, r19) // fetch time at leave
720 adds r18=TI_FLAGS+IA64_TASK_SIZE,r13
721 ;;
722(p6) ld4 r31=[r18] // load current_thread_info()->flags
723 ld8 r19=[r2],PT(B6)-PT(LOADRS) // load ar.rsc value for "loadrs"
724 adds r3=PT(AR_BSPSTORE)+16,r12 // deferred
725 ;;
726#else
727 adds r2=PT(LOADRS)+16,r12
728 adds r3=PT(AR_BSPSTORE)+16,r12
729 adds r18=TI_FLAGS+IA64_TASK_SIZE,r13
730 ;;
731(p6) ld4 r31=[r18] // load current_thread_info()->flags
732 ld8 r19=[r2],PT(B6)-PT(LOADRS) // load ar.rsc value for "loadrs"
733 nop.i 0
734 ;;
735#endif
736 mov r16=ar.bsp // M2 get existing backing store pointer
737 ld8 r18=[r2],PT(R9)-PT(B6) // load b6
738(p6) and r15=TIF_WORK_MASK,r31 // any work other than TIF_SYSCALL_TRACE?
739 ;;
740 ld8 r23=[r3],PT(R11)-PT(AR_BSPSTORE) // load ar.bspstore (may be garbage)
741(p6) cmp4.ne.unc p6,p0=r15, r0 // any special work pending?
742(p6) br.cond.spnt .work_pending_syscall
743 ;;
744 // start restoring the state saved on the kernel stack (struct pt_regs):
745 ld8 r9=[r2],PT(CR_IPSR)-PT(R9)
746 ld8 r11=[r3],PT(CR_IIP)-PT(R11)
747(pNonSys) break 0 // bug check: we shouldn't be here if pNonSys is TRUE!
748 ;;
749 invala // M0|1 invalidate ALAT
750 RSM_PSR_I_IC(r28, r29, r30) // M2 turn off interrupts and interruption collection
751 cmp.eq p9,p0=r0,r0 // A set p9 to indicate that we should restore cr.ifs
752
753 ld8 r29=[r2],16 // M0|1 load cr.ipsr
754 ld8 r28=[r3],16 // M0|1 load cr.iip
755#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
756(pUStk) add r14=TI_AC_LEAVE+IA64_TASK_SIZE,r13
757 ;;
758 ld8 r30=[r2],16 // M0|1 load cr.ifs
759 ld8 r25=[r3],16 // M0|1 load ar.unat
760(pUStk) add r15=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13
761 ;;
762#else
763 mov r22=r0 // A clear r22
764 ;;
765 ld8 r30=[r2],16 // M0|1 load cr.ifs
766 ld8 r25=[r3],16 // M0|1 load ar.unat
767(pUStk) add r14=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13
768 ;;
769#endif
770 ld8 r26=[r2],PT(B0)-PT(AR_PFS) // M0|1 load ar.pfs
771 MOV_FROM_PSR(pKStk, r22, r21) // M2 read PSR now that interrupts are disabled
772 nop 0
773 ;;
774 ld8 r21=[r2],PT(AR_RNAT)-PT(B0) // M0|1 load b0
775 ld8 r27=[r3],PT(PR)-PT(AR_RSC) // M0|1 load ar.rsc
776 mov f6=f0 // F clear f6
777 ;;
778 ld8 r24=[r2],PT(AR_FPSR)-PT(AR_RNAT) // M0|1 load ar.rnat (may be garbage)
779 ld8 r31=[r3],PT(R1)-PT(PR) // M0|1 load predicates
780 mov f7=f0 // F clear f7
781 ;;
782 ld8 r20=[r2],PT(R12)-PT(AR_FPSR) // M0|1 load ar.fpsr
783 ld8.fill r1=[r3],16 // M0|1 load r1
784(pUStk) mov r17=1 // A
785 ;;
786#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
787(pUStk) st1 [r15]=r17 // M2|3
788#else
789(pUStk) st1 [r14]=r17 // M2|3
790#endif
791 ld8.fill r13=[r3],16 // M0|1
792 mov f8=f0 // F clear f8
793 ;;
794 ld8.fill r12=[r2] // M0|1 restore r12 (sp)
795 ld8.fill r15=[r3] // M0|1 restore r15
796 mov b6=r18 // I0 restore b6
797
798 LOAD_PHYS_STACK_REG_SIZE(r17)
799 mov f9=f0 // F clear f9
800(pKStk) br.cond.dpnt.many skip_rbs_switch // B
801
802 srlz.d // M0 ensure interruption collection is off (for cover)
803 shr.u r18=r19,16 // I0|1 get byte size of existing "dirty" partition
804 COVER // B add current frame into dirty partition & set cr.ifs
805 ;;
806#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
807 mov r19=ar.bsp // M2 get new backing store pointer
808 st8 [r14]=r22 // M save time at leave
809 mov f10=f0 // F clear f10
810
811 mov r22=r0 // A clear r22
812 movl r14=__kernel_syscall_via_epc // X
813 ;;
814#else
815 mov r19=ar.bsp // M2 get new backing store pointer
816 mov f10=f0 // F clear f10
817
818 nop.m 0
819 movl r14=__kernel_syscall_via_epc // X
820 ;;
821#endif
822 mov.m ar.csd=r0 // M2 clear ar.csd
823 mov.m ar.ccv=r0 // M2 clear ar.ccv
824 mov b7=r14 // I0 clear b7 (hint with __kernel_syscall_via_epc)
825
826 mov.m ar.ssd=r0 // M2 clear ar.ssd
827 mov f11=f0 // F clear f11
828 br.cond.sptk.many rbs_switch // B
829END(ia64_leave_syscall)
830
831GLOBAL_ENTRY(ia64_leave_kernel)
832 PT_REGS_UNWIND_INFO(0)
833 /*
834 * work.need_resched etc. mustn't get changed by this CPU before it returns to
835 * user- or fsys-mode, hence we disable interrupts early on.
836 *
837 * p6 controls whether current_thread_info()->flags needs to be check for
838 * extra work. We always check for extra work when returning to user-level.
839 * With CONFIG_PREEMPT, we also check for extra work when the preempt_count
840 * is 0. After extra work processing has been completed, execution
841 * resumes at .work_processed_syscall with p6 set to 1 if the extra-work-check
842 * needs to be redone.
843 */
844#ifdef CONFIG_PREEMPT
845 RSM_PSR_I(p0, r17, r31) // disable interrupts
846 cmp.eq p0,pLvSys=r0,r0 // pLvSys=0: leave from kernel
847(pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
848 ;;
849 .pred.rel.mutex pUStk,pKStk
850(pKStk) ld4 r21=[r20] // r21 <- preempt_count
851(pUStk) mov r21=0 // r21 <- 0
852 ;;
853 cmp.eq p6,p0=r21,r0 // p6 <- pUStk || (preempt_count == 0)
854#else
855 RSM_PSR_I(pUStk, r17, r31)
856 cmp.eq p0,pLvSys=r0,r0 // pLvSys=0: leave from kernel
857(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
858#endif
859.work_processed_kernel:
860 adds r17=TI_FLAGS+IA64_TASK_SIZE,r13
861 ;;
862(p6) ld4 r31=[r17] // load current_thread_info()->flags
863 adds r21=PT(PR)+16,r12
864 ;;
865
866 lfetch [r21],PT(CR_IPSR)-PT(PR)
867 adds r2=PT(B6)+16,r12
868 adds r3=PT(R16)+16,r12
869 ;;
870 lfetch [r21]
871 ld8 r28=[r2],8 // load b6
872 adds r29=PT(R24)+16,r12
873
874 ld8.fill r16=[r3],PT(AR_CSD)-PT(R16)
875 adds r30=PT(AR_CCV)+16,r12
876(p6) and r19=TIF_WORK_MASK,r31 // any work other than TIF_SYSCALL_TRACE?
877 ;;
878 ld8.fill r24=[r29]
879 ld8 r15=[r30] // load ar.ccv
880(p6) cmp4.ne.unc p6,p0=r19, r0 // any special work pending?
881 ;;
882 ld8 r29=[r2],16 // load b7
883 ld8 r30=[r3],16 // load ar.csd
884(p6) br.cond.spnt .work_pending
885 ;;
886 ld8 r31=[r2],16 // load ar.ssd
887 ld8.fill r8=[r3],16
888 ;;
889 ld8.fill r9=[r2],16
890 ld8.fill r10=[r3],PT(R17)-PT(R10)
891 ;;
892 ld8.fill r11=[r2],PT(R18)-PT(R11)
893 ld8.fill r17=[r3],16
894 ;;
895 ld8.fill r18=[r2],16
896 ld8.fill r19=[r3],16
897 ;;
898 ld8.fill r20=[r2],16
899 ld8.fill r21=[r3],16
900 mov ar.csd=r30
901 mov ar.ssd=r31
902 ;;
903 RSM_PSR_I_IC(r23, r22, r25) // initiate turning off of interrupt and interruption collection
904 invala // invalidate ALAT
905 ;;
906 ld8.fill r22=[r2],24
907 ld8.fill r23=[r3],24
908 mov b6=r28
909 ;;
910 ld8.fill r25=[r2],16
911 ld8.fill r26=[r3],16
912 mov b7=r29
913 ;;
914 ld8.fill r27=[r2],16
915 ld8.fill r28=[r3],16
916 ;;
917 ld8.fill r29=[r2],16
918 ld8.fill r30=[r3],24
919 ;;
920 ld8.fill r31=[r2],PT(F9)-PT(R31)
921 adds r3=PT(F10)-PT(F6),r3
922 ;;
923 ldf.fill f9=[r2],PT(F6)-PT(F9)
924 ldf.fill f10=[r3],PT(F8)-PT(F10)
925 ;;
926 ldf.fill f6=[r2],PT(F7)-PT(F6)
927 ;;
928 ldf.fill f7=[r2],PT(F11)-PT(F7)
929 ldf.fill f8=[r3],32
930 ;;
931 srlz.d // ensure that inter. collection is off (VHPT is don't care, since text is pinned)
932 mov ar.ccv=r15
933 ;;
934 ldf.fill f11=[r2]
935 BSW_0(r2, r3, r15) // switch back to bank 0 (no stop bit required beforehand...)
936 ;;
937(pUStk) mov r18=IA64_KR(CURRENT)// M2 (12 cycle read latency)
938 adds r16=PT(CR_IPSR)+16,r12
939 adds r17=PT(CR_IIP)+16,r12
940
941#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
942 .pred.rel.mutex pUStk,pKStk
943 MOV_FROM_PSR(pKStk, r22, r29) // M2 read PSR now that interrupts are disabled
944 MOV_FROM_ITC(pUStk, p9, r22, r29) // M fetch time at leave
945 nop.i 0
946 ;;
947#else
948 MOV_FROM_PSR(pKStk, r22, r29) // M2 read PSR now that interrupts are disabled
949 nop.i 0
950 nop.i 0
951 ;;
952#endif
953 ld8 r29=[r16],16 // load cr.ipsr
954 ld8 r28=[r17],16 // load cr.iip
955 ;;
956 ld8 r30=[r16],16 // load cr.ifs
957 ld8 r25=[r17],16 // load ar.unat
958 ;;
959 ld8 r26=[r16],16 // load ar.pfs
960 ld8 r27=[r17],16 // load ar.rsc
961 cmp.eq p9,p0=r0,r0 // set p9 to indicate that we should restore cr.ifs
962 ;;
963 ld8 r24=[r16],16 // load ar.rnat (may be garbage)
964 ld8 r23=[r17],16 // load ar.bspstore (may be garbage)
965 ;;
966 ld8 r31=[r16],16 // load predicates
967 ld8 r21=[r17],16 // load b0
968 ;;
969 ld8 r19=[r16],16 // load ar.rsc value for "loadrs"
970 ld8.fill r1=[r17],16 // load r1
971 ;;
972 ld8.fill r12=[r16],16
973 ld8.fill r13=[r17],16
974#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
975(pUStk) adds r3=TI_AC_LEAVE+IA64_TASK_SIZE,r18
976#else
977(pUStk) adds r18=IA64_TASK_THREAD_ON_USTACK_OFFSET,r18
978#endif
979 ;;
980 ld8 r20=[r16],16 // ar.fpsr
981 ld8.fill r15=[r17],16
982#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
983(pUStk) adds r18=IA64_TASK_THREAD_ON_USTACK_OFFSET,r18 // deferred
984#endif
985 ;;
986 ld8.fill r14=[r16],16
987 ld8.fill r2=[r17]
988(pUStk) mov r17=1
989 ;;
990#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
991 // mmi_ : ld8 st1 shr;; mmi_ : st8 st1 shr;;
992 // mib : mov add br -> mib : ld8 add br
993 // bbb_ : br nop cover;; mbb_ : mov br cover;;
994 //
995 // no one require bsp in r16 if (pKStk) branch is selected.
996(pUStk) st8 [r3]=r22 // save time at leave
997(pUStk) st1 [r18]=r17 // restore current->thread.on_ustack
998 shr.u r18=r19,16 // get byte size of existing "dirty" partition
999 ;;
1000 ld8.fill r3=[r16] // deferred
1001 LOAD_PHYS_STACK_REG_SIZE(r17)
1002(pKStk) br.cond.dpnt skip_rbs_switch
1003 mov r16=ar.bsp // get existing backing store pointer
1004#else
1005 ld8.fill r3=[r16]
1006(pUStk) st1 [r18]=r17 // restore current->thread.on_ustack
1007 shr.u r18=r19,16 // get byte size of existing "dirty" partition
1008 ;;
1009 mov r16=ar.bsp // get existing backing store pointer
1010 LOAD_PHYS_STACK_REG_SIZE(r17)
1011(pKStk) br.cond.dpnt skip_rbs_switch
1012#endif
1013
1014 /*
1015 * Restore user backing store.
1016 *
1017 * NOTE: alloc, loadrs, and cover can't be predicated.
1018 */
1019(pNonSys) br.cond.dpnt dont_preserve_current_frame
1020 COVER // add current frame into dirty partition and set cr.ifs
1021 ;;
1022 mov r19=ar.bsp // get new backing store pointer
1023rbs_switch:
1024 sub r16=r16,r18 // krbs = old bsp - size of dirty partition
1025 cmp.ne p9,p0=r0,r0 // clear p9 to skip restore of cr.ifs
1026 ;;
1027 sub r19=r19,r16 // calculate total byte size of dirty partition
1028 add r18=64,r18 // don't force in0-in7 into memory...
1029 ;;
1030 shl r19=r19,16 // shift size of dirty partition into loadrs position
1031 ;;
1032dont_preserve_current_frame:
1033 /*
1034 * To prevent leaking bits between the kernel and user-space,
1035 * we must clear the stacked registers in the "invalid" partition here.
1036 * Not pretty, but at least it's fast (3.34 registers/cycle on Itanium,
1037 * 5 registers/cycle on McKinley).
1038 */
1039# define pRecurse p6
1040# define pReturn p7
1041#ifdef CONFIG_ITANIUM
1042# define Nregs 10
1043#else
1044# define Nregs 14
1045#endif
1046 alloc loc0=ar.pfs,2,Nregs-2,2,0
1047 shr.u loc1=r18,9 // RNaTslots <= floor(dirtySize / (64*8))
1048 sub r17=r17,r18 // r17 = (physStackedSize + 8) - dirtySize
1049 ;;
1050 mov ar.rsc=r19 // load ar.rsc to be used for "loadrs"
1051 shladd in0=loc1,3,r17
1052 mov in1=0
1053 ;;
1054 TEXT_ALIGN(32)
1055rse_clear_invalid:
1056#ifdef CONFIG_ITANIUM
1057 // cycle 0
1058 { .mii
1059 alloc loc0=ar.pfs,2,Nregs-2,2,0
1060 cmp.lt pRecurse,p0=Nregs*8,in0 // if more than Nregs regs left to clear, (re)curse
1061 add out0=-Nregs*8,in0
1062}{ .mfb
1063 add out1=1,in1 // increment recursion count
1064 nop.f 0
1065 nop.b 0 // can't do br.call here because of alloc (WAW on CFM)
1066 ;;
1067}{ .mfi // cycle 1
1068 mov loc1=0
1069 nop.f 0
1070 mov loc2=0
1071}{ .mib
1072 mov loc3=0
1073 mov loc4=0
1074(pRecurse) br.call.sptk.many b0=rse_clear_invalid
1075
1076}{ .mfi // cycle 2
1077 mov loc5=0
1078 nop.f 0
1079 cmp.ne pReturn,p0=r0,in1 // if recursion count != 0, we need to do a br.ret
1080}{ .mib
1081 mov loc6=0
1082 mov loc7=0
1083(pReturn) br.ret.sptk.many b0
1084}
1085#else /* !CONFIG_ITANIUM */
1086 alloc loc0=ar.pfs,2,Nregs-2,2,0
1087 cmp.lt pRecurse,p0=Nregs*8,in0 // if more than Nregs regs left to clear, (re)curse
1088 add out0=-Nregs*8,in0
1089 add out1=1,in1 // increment recursion count
1090 mov loc1=0
1091 mov loc2=0
1092 ;;
1093 mov loc3=0
1094 mov loc4=0
1095 mov loc5=0
1096 mov loc6=0
1097 mov loc7=0
1098(pRecurse) br.call.dptk.few b0=rse_clear_invalid
1099 ;;
1100 mov loc8=0
1101 mov loc9=0
1102 cmp.ne pReturn,p0=r0,in1 // if recursion count != 0, we need to do a br.ret
1103 mov loc10=0
1104 mov loc11=0
1105(pReturn) br.ret.dptk.many b0
1106#endif /* !CONFIG_ITANIUM */
1107# undef pRecurse
1108# undef pReturn
1109 ;;
1110 alloc r17=ar.pfs,0,0,0,0 // drop current register frame
1111 ;;
1112 loadrs
1113 ;;
1114skip_rbs_switch:
1115 mov ar.unat=r25 // M2
1116(pKStk) extr.u r22=r22,21,1 // I0 extract current value of psr.pp from r22
1117(pLvSys)mov r19=r0 // A clear r19 for leave_syscall, no-op otherwise
1118 ;;
1119(pUStk) mov ar.bspstore=r23 // M2
1120(pKStk) dep r29=r22,r29,21,1 // I0 update ipsr.pp with psr.pp
1121(pLvSys)mov r16=r0 // A clear r16 for leave_syscall, no-op otherwise
1122 ;;
1123 MOV_TO_IPSR(p0, r29, r25) // M2
1124 mov ar.pfs=r26 // I0
1125(pLvSys)mov r17=r0 // A clear r17 for leave_syscall, no-op otherwise
1126
1127 MOV_TO_IFS(p9, r30, r25)// M2
1128 mov b0=r21 // I0
1129(pLvSys)mov r18=r0 // A clear r18 for leave_syscall, no-op otherwise
1130
1131 mov ar.fpsr=r20 // M2
1132 MOV_TO_IIP(r28, r25) // M2
1133 nop 0
1134 ;;
1135(pUStk) mov ar.rnat=r24 // M2 must happen with RSE in lazy mode
1136 nop 0
1137(pLvSys)mov r2=r0
1138
1139 mov ar.rsc=r27 // M2
1140 mov pr=r31,-1 // I0
1141 RFI // B
1142
1143 /*
1144 * On entry:
1145 * r20 = ¤t->thread_info->pre_count (if CONFIG_PREEMPT)
1146 * r31 = current->thread_info->flags
1147 * On exit:
1148 * p6 = TRUE if work-pending-check needs to be redone
1149 *
1150 * Interrupts are disabled on entry, reenabled depend on work, and
1151 * disabled on exit.
1152 */
1153.work_pending_syscall:
1154 add r2=-8,r2
1155 add r3=-8,r3
1156 ;;
1157 st8 [r2]=r8
1158 st8 [r3]=r10
1159.work_pending:
1160 tbit.z p6,p0=r31,TIF_NEED_RESCHED // is resched not needed?
1161(p6) br.cond.sptk.few .notify
1162 br.call.spnt.many rp=preempt_schedule_irq
1163.ret9: cmp.eq p6,p0=r0,r0 // p6 <- 1 (re-check)
1164(pLvSys)br.cond.sptk.few ia64_work_pending_syscall_end
1165 br.cond.sptk.many .work_processed_kernel
1166
1167.notify:
1168(pUStk) br.call.spnt.many rp=notify_resume_user
1169.ret10: cmp.ne p6,p0=r0,r0 // p6 <- 0 (don't re-check)
1170(pLvSys)br.cond.sptk.few ia64_work_pending_syscall_end
1171 br.cond.sptk.many .work_processed_kernel
1172
1173.global ia64_work_pending_syscall_end;
1174ia64_work_pending_syscall_end:
1175 adds r2=PT(R8)+16,r12
1176 adds r3=PT(R10)+16,r12
1177 ;;
1178 ld8 r8=[r2]
1179 ld8 r10=[r3]
1180 br.cond.sptk.many ia64_work_processed_syscall
1181END(ia64_leave_kernel)
1182
1183ENTRY(handle_syscall_error)
1184 /*
1185 * Some system calls (e.g., ptrace, mmap) can return arbitrary values which could
1186 * lead us to mistake a negative return value as a failed syscall. Those syscall
1187 * must deposit a non-zero value in pt_regs.r8 to indicate an error. If
1188 * pt_regs.r8 is zero, we assume that the call completed successfully.
1189 */
1190 PT_REGS_UNWIND_INFO(0)
1191 ld8 r3=[r2] // load pt_regs.r8
1192 ;;
1193 cmp.eq p6,p7=r3,r0 // is pt_regs.r8==0?
1194 ;;
1195(p7) mov r10=-1
1196(p7) sub r8=0,r8 // negate return value to get errno
1197 br.cond.sptk ia64_leave_syscall
1198END(handle_syscall_error)
1199
1200 /*
1201 * Invoke schedule_tail(task) while preserving in0-in7, which may be needed
1202 * in case a system call gets restarted.
1203 */
1204GLOBAL_ENTRY(ia64_invoke_schedule_tail)
1205 .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
1206 alloc loc1=ar.pfs,8,2,1,0
1207 mov loc0=rp
1208 mov out0=r8 // Address of previous task
1209 ;;
1210 br.call.sptk.many rp=schedule_tail
1211.ret11: mov ar.pfs=loc1
1212 mov rp=loc0
1213 br.ret.sptk.many rp
1214END(ia64_invoke_schedule_tail)
1215
1216 /*
1217 * Setup stack and call do_notify_resume_user(), keeping interrupts
1218 * disabled.
1219 *
1220 * Note that pSys and pNonSys need to be set up by the caller.
1221 * We declare 8 input registers so the system call args get preserved,
1222 * in case we need to restart a system call.
1223 */
1224GLOBAL_ENTRY(notify_resume_user)
1225 .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
1226 alloc loc1=ar.pfs,8,2,3,0 // preserve all eight input regs in case of syscall restart!
1227 mov r9=ar.unat
1228 mov loc0=rp // save return address
1229 mov out0=0 // there is no "oldset"
1230 adds out1=8,sp // out1=&sigscratch->ar_pfs
1231(pSys) mov out2=1 // out2==1 => we're in a syscall
1232 ;;
1233(pNonSys) mov out2=0 // out2==0 => not a syscall
1234 .fframe 16
1235 .spillsp ar.unat, 16
1236 st8 [sp]=r9,-16 // allocate space for ar.unat and save it
1237 st8 [out1]=loc1,-8 // save ar.pfs, out1=&sigscratch
1238 .body
1239 br.call.sptk.many rp=do_notify_resume_user
1240.ret15: .restore sp
1241 adds sp=16,sp // pop scratch stack space
1242 ;;
1243 ld8 r9=[sp] // load new unat from sigscratch->scratch_unat
1244 mov rp=loc0
1245 ;;
1246 mov ar.unat=r9
1247 mov ar.pfs=loc1
1248 br.ret.sptk.many rp
1249END(notify_resume_user)
1250
1251ENTRY(sys_rt_sigreturn)
1252 PT_REGS_UNWIND_INFO(0)
1253 /*
1254 * Allocate 8 input registers since ptrace() may clobber them
1255 */
1256 alloc r2=ar.pfs,8,0,1,0
1257 .prologue
1258 PT_REGS_SAVES(16)
1259 adds sp=-16,sp
1260 .body
1261 cmp.eq pNonSys,pSys=r0,r0 // sigreturn isn't a normal syscall...
1262 ;;
1263 /*
1264 * leave_kernel() restores f6-f11 from pt_regs, but since the streamlined
1265 * syscall-entry path does not save them we save them here instead. Note: we
1266 * don't need to save any other registers that are not saved by the stream-lined
1267 * syscall path, because restore_sigcontext() restores them.
1268 */
1269 adds r16=PT(F6)+32,sp
1270 adds r17=PT(F7)+32,sp
1271 ;;
1272 stf.spill [r16]=f6,32
1273 stf.spill [r17]=f7,32
1274 ;;
1275 stf.spill [r16]=f8,32
1276 stf.spill [r17]=f9,32
1277 ;;
1278 stf.spill [r16]=f10
1279 stf.spill [r17]=f11
1280 adds out0=16,sp // out0 = &sigscratch
1281 br.call.sptk.many rp=ia64_rt_sigreturn
1282.ret19: .restore sp,0
1283 adds sp=16,sp
1284 ;;
1285 ld8 r9=[sp] // load new ar.unat
1286 mov.sptk b7=r8,ia64_leave_kernel
1287 ;;
1288 mov ar.unat=r9
1289 br.many b7
1290END(sys_rt_sigreturn)
1291
1292GLOBAL_ENTRY(ia64_prepare_handle_unaligned)
1293 .prologue
1294 /*
1295 * r16 = fake ar.pfs, we simply need to make sure privilege is still 0
1296 */
1297 mov r16=r0
1298 DO_SAVE_SWITCH_STACK
1299 br.call.sptk.many rp=ia64_handle_unaligned // stack frame setup in ivt
1300.ret21: .body
1301 DO_LOAD_SWITCH_STACK
1302 br.cond.sptk.many rp // goes to ia64_leave_kernel
1303END(ia64_prepare_handle_unaligned)
1304
1305 //
1306 // unw_init_running(void (*callback)(info, arg), void *arg)
1307 //
1308# define EXTRA_FRAME_SIZE ((UNW_FRAME_INFO_SIZE+15)&~15)
1309
1310GLOBAL_ENTRY(unw_init_running)
1311 .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(2)
1312 alloc loc1=ar.pfs,2,3,3,0
1313 ;;
1314 ld8 loc2=[in0],8
1315 mov loc0=rp
1316 mov r16=loc1
1317 DO_SAVE_SWITCH_STACK
1318 .body
1319
1320 .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(2)
1321 .fframe IA64_SWITCH_STACK_SIZE+EXTRA_FRAME_SIZE
1322 SWITCH_STACK_SAVES(EXTRA_FRAME_SIZE)
1323 adds sp=-EXTRA_FRAME_SIZE,sp
1324 .body
1325 ;;
1326 adds out0=16,sp // &info
1327 mov out1=r13 // current
1328 adds out2=16+EXTRA_FRAME_SIZE,sp // &switch_stack
1329 br.call.sptk.many rp=unw_init_frame_info
13301: adds out0=16,sp // &info
1331 mov b6=loc2
1332 mov loc2=gp // save gp across indirect function call
1333 ;;
1334 ld8 gp=[in0]
1335 mov out1=in1 // arg
1336 br.call.sptk.many rp=b6 // invoke the callback function
13371: mov gp=loc2 // restore gp
1338
1339 // For now, we don't allow changing registers from within
1340 // unw_init_running; if we ever want to allow that, we'd
1341 // have to do a load_switch_stack here:
1342 .restore sp
1343 adds sp=IA64_SWITCH_STACK_SIZE+EXTRA_FRAME_SIZE,sp
1344
1345 mov ar.pfs=loc1
1346 mov rp=loc0
1347 br.ret.sptk.many rp
1348END(unw_init_running)
1349EXPORT_SYMBOL(unw_init_running)
1350
1351#ifdef CONFIG_FUNCTION_TRACER
1352#ifdef CONFIG_DYNAMIC_FTRACE
1353GLOBAL_ENTRY(_mcount)
1354 br ftrace_stub
1355END(_mcount)
1356EXPORT_SYMBOL(_mcount)
1357
1358.here:
1359 br.ret.sptk.many b0
1360
1361GLOBAL_ENTRY(ftrace_caller)
1362 alloc out0 = ar.pfs, 8, 0, 4, 0
1363 mov out3 = r0
1364 ;;
1365 mov out2 = b0
1366 add r3 = 0x20, r3
1367 mov out1 = r1;
1368 br.call.sptk.many b0 = ftrace_patch_gp
1369 //this might be called from module, so we must patch gp
1370ftrace_patch_gp:
1371 movl gp=__gp
1372 mov b0 = r3
1373 ;;
1374.global ftrace_call;
1375ftrace_call:
1376{
1377 .mlx
1378 nop.m 0x0
1379 movl r3 = .here;;
1380}
1381 alloc loc0 = ar.pfs, 4, 4, 2, 0
1382 ;;
1383 mov loc1 = b0
1384 mov out0 = b0
1385 mov loc2 = r8
1386 mov loc3 = r15
1387 ;;
1388 adds out0 = -MCOUNT_INSN_SIZE, out0
1389 mov out1 = in2
1390 mov b6 = r3
1391
1392 br.call.sptk.many b0 = b6
1393 ;;
1394 mov ar.pfs = loc0
1395 mov b0 = loc1
1396 mov r8 = loc2
1397 mov r15 = loc3
1398 br ftrace_stub
1399 ;;
1400END(ftrace_caller)
1401
1402#else
1403GLOBAL_ENTRY(_mcount)
1404 movl r2 = ftrace_stub
1405 movl r3 = ftrace_trace_function;;
1406 ld8 r3 = [r3];;
1407 ld8 r3 = [r3];;
1408 cmp.eq p7,p0 = r2, r3
1409(p7) br.sptk.many ftrace_stub
1410 ;;
1411
1412 alloc loc0 = ar.pfs, 4, 4, 2, 0
1413 ;;
1414 mov loc1 = b0
1415 mov out0 = b0
1416 mov loc2 = r8
1417 mov loc3 = r15
1418 ;;
1419 adds out0 = -MCOUNT_INSN_SIZE, out0
1420 mov out1 = in2
1421 mov b6 = r3
1422
1423 br.call.sptk.many b0 = b6
1424 ;;
1425 mov ar.pfs = loc0
1426 mov b0 = loc1
1427 mov r8 = loc2
1428 mov r15 = loc3
1429 br ftrace_stub
1430 ;;
1431END(_mcount)
1432#endif
1433
1434GLOBAL_ENTRY(ftrace_stub)
1435 mov r3 = b0
1436 movl r2 = _mcount_ret_helper
1437 ;;
1438 mov b6 = r2
1439 mov b7 = r3
1440 br.ret.sptk.many b6
1441
1442_mcount_ret_helper:
1443 mov b0 = r42
1444 mov r1 = r41
1445 mov ar.pfs = r40
1446 br b7
1447END(ftrace_stub)
1448
1449#endif /* CONFIG_FUNCTION_TRACER */
1450
1451 .rodata
1452 .align 8
1453 .globl sys_call_table
1454sys_call_table:
1455 data8 sys_ni_syscall // This must be sys_ni_syscall! See ivt.S.
1456 data8 sys_exit // 1025
1457 data8 sys_read
1458 data8 sys_write
1459 data8 sys_open
1460 data8 sys_close
1461 data8 sys_creat // 1030
1462 data8 sys_link
1463 data8 sys_unlink
1464 data8 ia64_execve
1465 data8 sys_chdir
1466 data8 sys_fchdir // 1035
1467 data8 sys_utimes
1468 data8 sys_mknod
1469 data8 sys_chmod
1470 data8 sys_chown
1471 data8 sys_lseek // 1040
1472 data8 sys_getpid
1473 data8 sys_getppid
1474 data8 sys_mount
1475 data8 sys_umount
1476 data8 sys_setuid // 1045
1477 data8 sys_getuid
1478 data8 sys_geteuid
1479 data8 sys_ptrace
1480 data8 sys_access
1481 data8 sys_sync // 1050
1482 data8 sys_fsync
1483 data8 sys_fdatasync
1484 data8 sys_kill
1485 data8 sys_rename
1486 data8 sys_mkdir // 1055
1487 data8 sys_rmdir
1488 data8 sys_dup
1489 data8 sys_ia64_pipe
1490 data8 sys_times
1491 data8 ia64_brk // 1060
1492 data8 sys_setgid
1493 data8 sys_getgid
1494 data8 sys_getegid
1495 data8 sys_acct
1496 data8 sys_ioctl // 1065
1497 data8 sys_fcntl
1498 data8 sys_umask
1499 data8 sys_chroot
1500 data8 sys_ustat
1501 data8 sys_dup2 // 1070
1502 data8 sys_setreuid
1503 data8 sys_setregid
1504 data8 sys_getresuid
1505 data8 sys_setresuid
1506 data8 sys_getresgid // 1075
1507 data8 sys_setresgid
1508 data8 sys_getgroups
1509 data8 sys_setgroups
1510 data8 sys_getpgid
1511 data8 sys_setpgid // 1080
1512 data8 sys_setsid
1513 data8 sys_getsid
1514 data8 sys_sethostname
1515 data8 sys_setrlimit
1516 data8 sys_getrlimit // 1085
1517 data8 sys_getrusage
1518 data8 sys_gettimeofday
1519 data8 sys_settimeofday
1520 data8 sys_select
1521 data8 sys_poll // 1090
1522 data8 sys_symlink
1523 data8 sys_readlink
1524 data8 sys_uselib
1525 data8 sys_swapon
1526 data8 sys_swapoff // 1095
1527 data8 sys_reboot
1528 data8 sys_truncate
1529 data8 sys_ftruncate
1530 data8 sys_fchmod
1531 data8 sys_fchown // 1100
1532 data8 ia64_getpriority
1533 data8 sys_setpriority
1534 data8 sys_statfs
1535 data8 sys_fstatfs
1536 data8 sys_gettid // 1105
1537 data8 sys_semget
1538 data8 sys_semop
1539 data8 sys_semctl
1540 data8 sys_msgget
1541 data8 sys_msgsnd // 1110
1542 data8 sys_msgrcv
1543 data8 sys_msgctl
1544 data8 sys_shmget
1545 data8 sys_shmat
1546 data8 sys_shmdt // 1115
1547 data8 sys_shmctl
1548 data8 sys_syslog
1549 data8 sys_setitimer
1550 data8 sys_getitimer
1551 data8 sys_ni_syscall // 1120 /* was: ia64_oldstat */
1552 data8 sys_ni_syscall /* was: ia64_oldlstat */
1553 data8 sys_ni_syscall /* was: ia64_oldfstat */
1554 data8 sys_vhangup
1555 data8 sys_lchown
1556 data8 sys_remap_file_pages // 1125
1557 data8 sys_wait4
1558 data8 sys_sysinfo
1559 data8 sys_clone
1560 data8 sys_setdomainname
1561 data8 sys_newuname // 1130
1562 data8 sys_adjtimex
1563 data8 sys_ni_syscall /* was: ia64_create_module */
1564 data8 sys_init_module
1565 data8 sys_delete_module
1566 data8 sys_ni_syscall // 1135 /* was: sys_get_kernel_syms */
1567 data8 sys_ni_syscall /* was: sys_query_module */
1568 data8 sys_quotactl
1569 data8 sys_bdflush
1570 data8 sys_sysfs
1571 data8 sys_personality // 1140
1572 data8 sys_ni_syscall // sys_afs_syscall
1573 data8 sys_setfsuid
1574 data8 sys_setfsgid
1575 data8 sys_getdents
1576 data8 sys_flock // 1145
1577 data8 sys_readv
1578 data8 sys_writev
1579 data8 sys_pread64
1580 data8 sys_pwrite64
1581 data8 sys_sysctl // 1150
1582 data8 sys_mmap
1583 data8 sys_munmap
1584 data8 sys_mlock
1585 data8 sys_mlockall
1586 data8 sys_mprotect // 1155
1587 data8 ia64_mremap
1588 data8 sys_msync
1589 data8 sys_munlock
1590 data8 sys_munlockall
1591 data8 sys_sched_getparam // 1160
1592 data8 sys_sched_setparam
1593 data8 sys_sched_getscheduler
1594 data8 sys_sched_setscheduler
1595 data8 sys_sched_yield
1596 data8 sys_sched_get_priority_max // 1165
1597 data8 sys_sched_get_priority_min
1598 data8 sys_sched_rr_get_interval
1599 data8 sys_nanosleep
1600 data8 sys_ni_syscall // old nfsservctl
1601 data8 sys_prctl // 1170
1602 data8 sys_getpagesize
1603 data8 sys_mmap2
1604 data8 sys_pciconfig_read
1605 data8 sys_pciconfig_write
1606 data8 sys_perfmonctl // 1175
1607 data8 sys_sigaltstack
1608 data8 sys_rt_sigaction
1609 data8 sys_rt_sigpending
1610 data8 sys_rt_sigprocmask
1611 data8 sys_rt_sigqueueinfo // 1180
1612 data8 sys_rt_sigreturn
1613 data8 sys_rt_sigsuspend
1614 data8 sys_rt_sigtimedwait
1615 data8 sys_getcwd
1616 data8 sys_capget // 1185
1617 data8 sys_capset
1618 data8 sys_sendfile64
1619 data8 sys_ni_syscall // sys_getpmsg (STREAMS)
1620 data8 sys_ni_syscall // sys_putpmsg (STREAMS)
1621 data8 sys_socket // 1190
1622 data8 sys_bind
1623 data8 sys_connect
1624 data8 sys_listen
1625 data8 sys_accept
1626 data8 sys_getsockname // 1195
1627 data8 sys_getpeername
1628 data8 sys_socketpair
1629 data8 sys_send
1630 data8 sys_sendto
1631 data8 sys_recv // 1200
1632 data8 sys_recvfrom
1633 data8 sys_shutdown
1634 data8 sys_setsockopt
1635 data8 sys_getsockopt
1636 data8 sys_sendmsg // 1205
1637 data8 sys_recvmsg
1638 data8 sys_pivot_root
1639 data8 sys_mincore
1640 data8 sys_madvise
1641 data8 sys_newstat // 1210
1642 data8 sys_newlstat
1643 data8 sys_newfstat
1644 data8 sys_clone2
1645 data8 sys_getdents64
1646 data8 sys_getunwind // 1215
1647 data8 sys_readahead
1648 data8 sys_setxattr
1649 data8 sys_lsetxattr
1650 data8 sys_fsetxattr
1651 data8 sys_getxattr // 1220
1652 data8 sys_lgetxattr
1653 data8 sys_fgetxattr
1654 data8 sys_listxattr
1655 data8 sys_llistxattr
1656 data8 sys_flistxattr // 1225
1657 data8 sys_removexattr
1658 data8 sys_lremovexattr
1659 data8 sys_fremovexattr
1660 data8 sys_tkill
1661 data8 sys_futex // 1230
1662 data8 sys_sched_setaffinity
1663 data8 sys_sched_getaffinity
1664 data8 sys_set_tid_address
1665 data8 sys_fadvise64_64
1666 data8 sys_tgkill // 1235
1667 data8 sys_exit_group
1668 data8 sys_lookup_dcookie
1669 data8 sys_io_setup
1670 data8 sys_io_destroy
1671 data8 sys_io_getevents // 1240
1672 data8 sys_io_submit
1673 data8 sys_io_cancel
1674 data8 sys_epoll_create
1675 data8 sys_epoll_ctl
1676 data8 sys_epoll_wait // 1245
1677 data8 sys_restart_syscall
1678 data8 sys_semtimedop
1679 data8 sys_timer_create
1680 data8 sys_timer_settime
1681 data8 sys_timer_gettime // 1250
1682 data8 sys_timer_getoverrun
1683 data8 sys_timer_delete
1684 data8 sys_clock_settime
1685 data8 sys_clock_gettime
1686 data8 sys_clock_getres // 1255
1687 data8 sys_clock_nanosleep
1688 data8 sys_fstatfs64
1689 data8 sys_statfs64
1690 data8 sys_mbind
1691 data8 sys_get_mempolicy // 1260
1692 data8 sys_set_mempolicy
1693 data8 sys_mq_open
1694 data8 sys_mq_unlink
1695 data8 sys_mq_timedsend
1696 data8 sys_mq_timedreceive // 1265
1697 data8 sys_mq_notify
1698 data8 sys_mq_getsetattr
1699 data8 sys_kexec_load
1700 data8 sys_ni_syscall // reserved for vserver
1701 data8 sys_waitid // 1270
1702 data8 sys_add_key
1703 data8 sys_request_key
1704 data8 sys_keyctl
1705 data8 sys_ioprio_set
1706 data8 sys_ioprio_get // 1275
1707 data8 sys_move_pages
1708 data8 sys_inotify_init
1709 data8 sys_inotify_add_watch
1710 data8 sys_inotify_rm_watch
1711 data8 sys_migrate_pages // 1280
1712 data8 sys_openat
1713 data8 sys_mkdirat
1714 data8 sys_mknodat
1715 data8 sys_fchownat
1716 data8 sys_futimesat // 1285
1717 data8 sys_newfstatat
1718 data8 sys_unlinkat
1719 data8 sys_renameat
1720 data8 sys_linkat
1721 data8 sys_symlinkat // 1290
1722 data8 sys_readlinkat
1723 data8 sys_fchmodat
1724 data8 sys_faccessat
1725 data8 sys_pselect6
1726 data8 sys_ppoll // 1295
1727 data8 sys_unshare
1728 data8 sys_splice
1729 data8 sys_set_robust_list
1730 data8 sys_get_robust_list
1731 data8 sys_sync_file_range // 1300
1732 data8 sys_tee
1733 data8 sys_vmsplice
1734 data8 sys_fallocate
1735 data8 sys_getcpu
1736 data8 sys_epoll_pwait // 1305
1737 data8 sys_utimensat
1738 data8 sys_signalfd
1739 data8 sys_ni_syscall
1740 data8 sys_eventfd
1741 data8 sys_timerfd_create // 1310
1742 data8 sys_timerfd_settime
1743 data8 sys_timerfd_gettime
1744 data8 sys_signalfd4
1745 data8 sys_eventfd2
1746 data8 sys_epoll_create1 // 1315
1747 data8 sys_dup3
1748 data8 sys_pipe2
1749 data8 sys_inotify_init1
1750 data8 sys_preadv
1751 data8 sys_pwritev // 1320
1752 data8 sys_rt_tgsigqueueinfo
1753 data8 sys_recvmmsg
1754 data8 sys_fanotify_init
1755 data8 sys_fanotify_mark
1756 data8 sys_prlimit64 // 1325
1757 data8 sys_name_to_handle_at
1758 data8 sys_open_by_handle_at
1759 data8 sys_clock_adjtime
1760 data8 sys_syncfs
1761 data8 sys_setns // 1330
1762 data8 sys_sendmmsg
1763 data8 sys_process_vm_readv
1764 data8 sys_process_vm_writev
1765 data8 sys_accept4
1766 data8 sys_finit_module // 1335
1767 data8 sys_sched_setattr
1768 data8 sys_sched_getattr
1769 data8 sys_renameat2
1770 data8 sys_getrandom
1771 data8 sys_memfd_create // 1340
1772 data8 sys_bpf
1773 data8 sys_execveat
1774 data8 sys_userfaultfd
1775 data8 sys_membarrier
1776 data8 sys_kcmp // 1345
1777 data8 sys_mlock2
1778 data8 sys_copy_file_range
1779 data8 sys_preadv2
1780 data8 sys_pwritev2
1781
1782 .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls