Loading...
1/*
2 * Kernel support for the ptrace() and syscall tracing interfaces.
3 *
4 * Copyright (C) 2000 Hewlett-Packard Co, Linuxcare Inc.
5 * Copyright (C) 2000 Matthew Wilcox <matthew@wil.cx>
6 * Copyright (C) 2000 David Huggins-Daines <dhd@debian.org>
7 * Copyright (C) 2008 Helge Deller <deller@gmx.de>
8 */
9
10#include <linux/kernel.h>
11#include <linux/sched.h>
12#include <linux/mm.h>
13#include <linux/smp.h>
14#include <linux/errno.h>
15#include <linux/ptrace.h>
16#include <linux/tracehook.h>
17#include <linux/user.h>
18#include <linux/personality.h>
19#include <linux/security.h>
20#include <linux/seccomp.h>
21#include <linux/compat.h>
22#include <linux/signal.h>
23#include <linux/audit.h>
24
25#include <asm/uaccess.h>
26#include <asm/pgtable.h>
27#include <asm/processor.h>
28#include <asm/asm-offsets.h>
29
30/* PSW bits we allow the debugger to modify */
31#define USER_PSW_BITS (PSW_N | PSW_B | PSW_V | PSW_CB)
32
33/*
34 * Called by kernel/ptrace.c when detaching..
35 *
36 * Make sure single step bits etc are not set.
37 */
38void ptrace_disable(struct task_struct *task)
39{
40 clear_tsk_thread_flag(task, TIF_SINGLESTEP);
41 clear_tsk_thread_flag(task, TIF_BLOCKSTEP);
42
43 /* make sure the trap bits are not set */
44 pa_psw(task)->r = 0;
45 pa_psw(task)->t = 0;
46 pa_psw(task)->h = 0;
47 pa_psw(task)->l = 0;
48}
49
50/*
51 * The following functions are called by ptrace_resume() when
52 * enabling or disabling single/block tracing.
53 */
54void user_disable_single_step(struct task_struct *task)
55{
56 ptrace_disable(task);
57}
58
59void user_enable_single_step(struct task_struct *task)
60{
61 clear_tsk_thread_flag(task, TIF_BLOCKSTEP);
62 set_tsk_thread_flag(task, TIF_SINGLESTEP);
63
64 if (pa_psw(task)->n) {
65 struct siginfo si;
66
67 /* Nullified, just crank over the queue. */
68 task_regs(task)->iaoq[0] = task_regs(task)->iaoq[1];
69 task_regs(task)->iasq[0] = task_regs(task)->iasq[1];
70 task_regs(task)->iaoq[1] = task_regs(task)->iaoq[0] + 4;
71 pa_psw(task)->n = 0;
72 pa_psw(task)->x = 0;
73 pa_psw(task)->y = 0;
74 pa_psw(task)->z = 0;
75 pa_psw(task)->b = 0;
76 ptrace_disable(task);
77 /* Don't wake up the task, but let the
78 parent know something happened. */
79 si.si_code = TRAP_TRACE;
80 si.si_addr = (void __user *) (task_regs(task)->iaoq[0] & ~3);
81 si.si_signo = SIGTRAP;
82 si.si_errno = 0;
83 force_sig_info(SIGTRAP, &si, task);
84 /* notify_parent(task, SIGCHLD); */
85 return;
86 }
87
88 /* Enable recovery counter traps. The recovery counter
89 * itself will be set to zero on a task switch. If the
90 * task is suspended on a syscall then the syscall return
91 * path will overwrite the recovery counter with a suitable
92 * value such that it traps once back in user space. We
93 * disable interrupts in the tasks PSW here also, to avoid
94 * interrupts while the recovery counter is decrementing.
95 */
96 pa_psw(task)->r = 1;
97 pa_psw(task)->t = 0;
98 pa_psw(task)->h = 0;
99 pa_psw(task)->l = 0;
100}
101
102void user_enable_block_step(struct task_struct *task)
103{
104 clear_tsk_thread_flag(task, TIF_SINGLESTEP);
105 set_tsk_thread_flag(task, TIF_BLOCKSTEP);
106
107 /* Enable taken branch trap. */
108 pa_psw(task)->r = 0;
109 pa_psw(task)->t = 1;
110 pa_psw(task)->h = 0;
111 pa_psw(task)->l = 0;
112}
113
114long arch_ptrace(struct task_struct *child, long request,
115 unsigned long addr, unsigned long data)
116{
117 unsigned long tmp;
118 long ret = -EIO;
119
120 switch (request) {
121
122 /* Read the word at location addr in the USER area. For ptraced
123 processes, the kernel saves all regs on a syscall. */
124 case PTRACE_PEEKUSR:
125 if ((addr & (sizeof(unsigned long)-1)) ||
126 addr >= sizeof(struct pt_regs))
127 break;
128 tmp = *(unsigned long *) ((char *) task_regs(child) + addr);
129 ret = put_user(tmp, (unsigned long __user *) data);
130 break;
131
132 /* Write the word at location addr in the USER area. This will need
133 to change when the kernel no longer saves all regs on a syscall.
134 FIXME. There is a problem at the moment in that r3-r18 are only
135 saved if the process is ptraced on syscall entry, and even then
136 those values are overwritten by actual register values on syscall
137 exit. */
138 case PTRACE_POKEUSR:
139 /* Some register values written here may be ignored in
140 * entry.S:syscall_restore_rfi; e.g. iaoq is written with
141 * r31/r31+4, and not with the values in pt_regs.
142 */
143 if (addr == PT_PSW) {
144 /* Allow writing to Nullify, Divide-step-correction,
145 * and carry/borrow bits.
146 * BEWARE, if you set N, and then single step, it won't
147 * stop on the nullified instruction.
148 */
149 data &= USER_PSW_BITS;
150 task_regs(child)->gr[0] &= ~USER_PSW_BITS;
151 task_regs(child)->gr[0] |= data;
152 ret = 0;
153 break;
154 }
155
156 if ((addr & (sizeof(unsigned long)-1)) ||
157 addr >= sizeof(struct pt_regs))
158 break;
159 if ((addr >= PT_GR1 && addr <= PT_GR31) ||
160 addr == PT_IAOQ0 || addr == PT_IAOQ1 ||
161 (addr >= PT_FR0 && addr <= PT_FR31 + 4) ||
162 addr == PT_SAR) {
163 *(unsigned long *) ((char *) task_regs(child) + addr) = data;
164 ret = 0;
165 }
166 break;
167
168 default:
169 ret = ptrace_request(child, request, addr, data);
170 break;
171 }
172
173 return ret;
174}
175
176
177#ifdef CONFIG_COMPAT
178
179/* This function is needed to translate 32 bit pt_regs offsets in to
180 * 64 bit pt_regs offsets. For example, a 32 bit gdb under a 64 bit kernel
181 * will request offset 12 if it wants gr3, but the lower 32 bits of
182 * the 64 bit kernels view of gr3 will be at offset 28 (3*8 + 4).
183 * This code relies on a 32 bit pt_regs being comprised of 32 bit values
184 * except for the fp registers which (a) are 64 bits, and (b) follow
185 * the gr registers at the start of pt_regs. The 32 bit pt_regs should
186 * be half the size of the 64 bit pt_regs, plus 32*4 to allow for fr[]
187 * being 64 bit in both cases.
188 */
189
190static compat_ulong_t translate_usr_offset(compat_ulong_t offset)
191{
192 if (offset < 0)
193 return sizeof(struct pt_regs);
194 else if (offset <= 32*4) /* gr[0..31] */
195 return offset * 2 + 4;
196 else if (offset <= 32*4+32*8) /* gr[0..31] + fr[0..31] */
197 return offset + 32*4;
198 else if (offset < sizeof(struct pt_regs)/2 + 32*4)
199 return offset * 2 + 4 - 32*8;
200 else
201 return sizeof(struct pt_regs);
202}
203
204long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
205 compat_ulong_t addr, compat_ulong_t data)
206{
207 compat_uint_t tmp;
208 long ret = -EIO;
209
210 switch (request) {
211
212 case PTRACE_PEEKUSR:
213 if (addr & (sizeof(compat_uint_t)-1))
214 break;
215 addr = translate_usr_offset(addr);
216 if (addr >= sizeof(struct pt_regs))
217 break;
218
219 tmp = *(compat_uint_t *) ((char *) task_regs(child) + addr);
220 ret = put_user(tmp, (compat_uint_t *) (unsigned long) data);
221 break;
222
223 /* Write the word at location addr in the USER area. This will need
224 to change when the kernel no longer saves all regs on a syscall.
225 FIXME. There is a problem at the moment in that r3-r18 are only
226 saved if the process is ptraced on syscall entry, and even then
227 those values are overwritten by actual register values on syscall
228 exit. */
229 case PTRACE_POKEUSR:
230 /* Some register values written here may be ignored in
231 * entry.S:syscall_restore_rfi; e.g. iaoq is written with
232 * r31/r31+4, and not with the values in pt_regs.
233 */
234 if (addr == PT_PSW) {
235 /* Since PT_PSW==0, it is valid for 32 bit processes
236 * under 64 bit kernels as well.
237 */
238 ret = arch_ptrace(child, request, addr, data);
239 } else {
240 if (addr & (sizeof(compat_uint_t)-1))
241 break;
242 addr = translate_usr_offset(addr);
243 if (addr >= sizeof(struct pt_regs))
244 break;
245 if (addr >= PT_FR0 && addr <= PT_FR31 + 4) {
246 /* Special case, fp regs are 64 bits anyway */
247 *(__u64 *) ((char *) task_regs(child) + addr) = data;
248 ret = 0;
249 }
250 else if ((addr >= PT_GR1+4 && addr <= PT_GR31+4) ||
251 addr == PT_IAOQ0+4 || addr == PT_IAOQ1+4 ||
252 addr == PT_SAR+4) {
253 /* Zero the top 32 bits */
254 *(__u32 *) ((char *) task_regs(child) + addr - 4) = 0;
255 *(__u32 *) ((char *) task_regs(child) + addr) = data;
256 ret = 0;
257 }
258 }
259 break;
260
261 default:
262 ret = compat_ptrace_request(child, request, addr, data);
263 break;
264 }
265
266 return ret;
267}
268#endif
269
270long do_syscall_trace_enter(struct pt_regs *regs)
271{
272 /* Do the secure computing check first. */
273 if (secure_computing() == -1)
274 return -1;
275
276 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
277 tracehook_report_syscall_entry(regs)) {
278 /*
279 * Tracing decided this syscall should not happen or the
280 * debugger stored an invalid system call number. Skip
281 * the system call and the system call restart handling.
282 */
283 regs->gr[20] = -1UL;
284 goto out;
285 }
286
287#ifdef CONFIG_64BIT
288 if (!is_compat_task())
289 audit_syscall_entry(regs->gr[20], regs->gr[26], regs->gr[25],
290 regs->gr[24], regs->gr[23]);
291 else
292#endif
293 audit_syscall_entry(regs->gr[20] & 0xffffffff,
294 regs->gr[26] & 0xffffffff,
295 regs->gr[25] & 0xffffffff,
296 regs->gr[24] & 0xffffffff,
297 regs->gr[23] & 0xffffffff);
298
299out:
300 /*
301 * Sign extend the syscall number to 64bit since it may have been
302 * modified by a compat ptrace call
303 */
304 return (int) ((u32) regs->gr[20]);
305}
306
307void do_syscall_trace_exit(struct pt_regs *regs)
308{
309 int stepping = test_thread_flag(TIF_SINGLESTEP) ||
310 test_thread_flag(TIF_BLOCKSTEP);
311
312 audit_syscall_exit(regs);
313
314 if (stepping || test_thread_flag(TIF_SYSCALL_TRACE))
315 tracehook_report_syscall_exit(regs, stepping);
316}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Kernel support for the ptrace() and syscall tracing interfaces.
4 *
5 * Copyright (C) 2000 Hewlett-Packard Co, Linuxcare Inc.
6 * Copyright (C) 2000 Matthew Wilcox <matthew@wil.cx>
7 * Copyright (C) 2000 David Huggins-Daines <dhd@debian.org>
8 * Copyright (C) 2008-2016 Helge Deller <deller@gmx.de>
9 */
10
11#include <linux/kernel.h>
12#include <linux/sched.h>
13#include <linux/mm.h>
14#include <linux/smp.h>
15#include <linux/elf.h>
16#include <linux/errno.h>
17#include <linux/ptrace.h>
18#include <linux/tracehook.h>
19#include <linux/user.h>
20#include <linux/personality.h>
21#include <linux/regset.h>
22#include <linux/security.h>
23#include <linux/seccomp.h>
24#include <linux/compat.h>
25#include <linux/signal.h>
26#include <linux/audit.h>
27
28#include <linux/uaccess.h>
29#include <asm/pgtable.h>
30#include <asm/processor.h>
31#include <asm/asm-offsets.h>
32
33/* PSW bits we allow the debugger to modify */
34#define USER_PSW_BITS (PSW_N | PSW_B | PSW_V | PSW_CB)
35
36#define CREATE_TRACE_POINTS
37#include <trace/events/syscalls.h>
38
39/*
40 * These are our native regset flavors.
41 */
42enum parisc_regset {
43 REGSET_GENERAL,
44 REGSET_FP
45};
46
47/*
48 * Called by kernel/ptrace.c when detaching..
49 *
50 * Make sure single step bits etc are not set.
51 */
52void ptrace_disable(struct task_struct *task)
53{
54 clear_tsk_thread_flag(task, TIF_SINGLESTEP);
55 clear_tsk_thread_flag(task, TIF_BLOCKSTEP);
56
57 /* make sure the trap bits are not set */
58 pa_psw(task)->r = 0;
59 pa_psw(task)->t = 0;
60 pa_psw(task)->h = 0;
61 pa_psw(task)->l = 0;
62}
63
64/*
65 * The following functions are called by ptrace_resume() when
66 * enabling or disabling single/block tracing.
67 */
68void user_disable_single_step(struct task_struct *task)
69{
70 ptrace_disable(task);
71}
72
73void user_enable_single_step(struct task_struct *task)
74{
75 clear_tsk_thread_flag(task, TIF_BLOCKSTEP);
76 set_tsk_thread_flag(task, TIF_SINGLESTEP);
77
78 if (pa_psw(task)->n) {
79 struct siginfo si;
80
81 /* Nullified, just crank over the queue. */
82 task_regs(task)->iaoq[0] = task_regs(task)->iaoq[1];
83 task_regs(task)->iasq[0] = task_regs(task)->iasq[1];
84 task_regs(task)->iaoq[1] = task_regs(task)->iaoq[0] + 4;
85 pa_psw(task)->n = 0;
86 pa_psw(task)->x = 0;
87 pa_psw(task)->y = 0;
88 pa_psw(task)->z = 0;
89 pa_psw(task)->b = 0;
90 ptrace_disable(task);
91 /* Don't wake up the task, but let the
92 parent know something happened. */
93 si.si_code = TRAP_TRACE;
94 si.si_addr = (void __user *) (task_regs(task)->iaoq[0] & ~3);
95 si.si_signo = SIGTRAP;
96 si.si_errno = 0;
97 force_sig_info(SIGTRAP, &si, task);
98 /* notify_parent(task, SIGCHLD); */
99 return;
100 }
101
102 /* Enable recovery counter traps. The recovery counter
103 * itself will be set to zero on a task switch. If the
104 * task is suspended on a syscall then the syscall return
105 * path will overwrite the recovery counter with a suitable
106 * value such that it traps once back in user space. We
107 * disable interrupts in the tasks PSW here also, to avoid
108 * interrupts while the recovery counter is decrementing.
109 */
110 pa_psw(task)->r = 1;
111 pa_psw(task)->t = 0;
112 pa_psw(task)->h = 0;
113 pa_psw(task)->l = 0;
114}
115
116void user_enable_block_step(struct task_struct *task)
117{
118 clear_tsk_thread_flag(task, TIF_SINGLESTEP);
119 set_tsk_thread_flag(task, TIF_BLOCKSTEP);
120
121 /* Enable taken branch trap. */
122 pa_psw(task)->r = 0;
123 pa_psw(task)->t = 1;
124 pa_psw(task)->h = 0;
125 pa_psw(task)->l = 0;
126}
127
128long arch_ptrace(struct task_struct *child, long request,
129 unsigned long addr, unsigned long data)
130{
131 unsigned long __user *datap = (unsigned long __user *)data;
132 unsigned long tmp;
133 long ret = -EIO;
134
135 switch (request) {
136
137 /* Read the word at location addr in the USER area. For ptraced
138 processes, the kernel saves all regs on a syscall. */
139 case PTRACE_PEEKUSR:
140 if ((addr & (sizeof(unsigned long)-1)) ||
141 addr >= sizeof(struct pt_regs))
142 break;
143 tmp = *(unsigned long *) ((char *) task_regs(child) + addr);
144 ret = put_user(tmp, datap);
145 break;
146
147 /* Write the word at location addr in the USER area. This will need
148 to change when the kernel no longer saves all regs on a syscall.
149 FIXME. There is a problem at the moment in that r3-r18 are only
150 saved if the process is ptraced on syscall entry, and even then
151 those values are overwritten by actual register values on syscall
152 exit. */
153 case PTRACE_POKEUSR:
154 /* Some register values written here may be ignored in
155 * entry.S:syscall_restore_rfi; e.g. iaoq is written with
156 * r31/r31+4, and not with the values in pt_regs.
157 */
158 if (addr == PT_PSW) {
159 /* Allow writing to Nullify, Divide-step-correction,
160 * and carry/borrow bits.
161 * BEWARE, if you set N, and then single step, it won't
162 * stop on the nullified instruction.
163 */
164 data &= USER_PSW_BITS;
165 task_regs(child)->gr[0] &= ~USER_PSW_BITS;
166 task_regs(child)->gr[0] |= data;
167 ret = 0;
168 break;
169 }
170
171 if ((addr & (sizeof(unsigned long)-1)) ||
172 addr >= sizeof(struct pt_regs))
173 break;
174 if ((addr >= PT_GR1 && addr <= PT_GR31) ||
175 addr == PT_IAOQ0 || addr == PT_IAOQ1 ||
176 (addr >= PT_FR0 && addr <= PT_FR31 + 4) ||
177 addr == PT_SAR) {
178 *(unsigned long *) ((char *) task_regs(child) + addr) = data;
179 ret = 0;
180 }
181 break;
182
183 case PTRACE_GETREGS: /* Get all gp regs from the child. */
184 return copy_regset_to_user(child,
185 task_user_regset_view(current),
186 REGSET_GENERAL,
187 0, sizeof(struct user_regs_struct),
188 datap);
189
190 case PTRACE_SETREGS: /* Set all gp regs in the child. */
191 return copy_regset_from_user(child,
192 task_user_regset_view(current),
193 REGSET_GENERAL,
194 0, sizeof(struct user_regs_struct),
195 datap);
196
197 case PTRACE_GETFPREGS: /* Get the child FPU state. */
198 return copy_regset_to_user(child,
199 task_user_regset_view(current),
200 REGSET_FP,
201 0, sizeof(struct user_fp_struct),
202 datap);
203
204 case PTRACE_SETFPREGS: /* Set the child FPU state. */
205 return copy_regset_from_user(child,
206 task_user_regset_view(current),
207 REGSET_FP,
208 0, sizeof(struct user_fp_struct),
209 datap);
210
211 default:
212 ret = ptrace_request(child, request, addr, data);
213 break;
214 }
215
216 return ret;
217}
218
219
220#ifdef CONFIG_COMPAT
221
222/* This function is needed to translate 32 bit pt_regs offsets in to
223 * 64 bit pt_regs offsets. For example, a 32 bit gdb under a 64 bit kernel
224 * will request offset 12 if it wants gr3, but the lower 32 bits of
225 * the 64 bit kernels view of gr3 will be at offset 28 (3*8 + 4).
226 * This code relies on a 32 bit pt_regs being comprised of 32 bit values
227 * except for the fp registers which (a) are 64 bits, and (b) follow
228 * the gr registers at the start of pt_regs. The 32 bit pt_regs should
229 * be half the size of the 64 bit pt_regs, plus 32*4 to allow for fr[]
230 * being 64 bit in both cases.
231 */
232
233static compat_ulong_t translate_usr_offset(compat_ulong_t offset)
234{
235 if (offset < 0)
236 return sizeof(struct pt_regs);
237 else if (offset <= 32*4) /* gr[0..31] */
238 return offset * 2 + 4;
239 else if (offset <= 32*4+32*8) /* gr[0..31] + fr[0..31] */
240 return offset + 32*4;
241 else if (offset < sizeof(struct pt_regs)/2 + 32*4)
242 return offset * 2 + 4 - 32*8;
243 else
244 return sizeof(struct pt_regs);
245}
246
247long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
248 compat_ulong_t addr, compat_ulong_t data)
249{
250 compat_uint_t tmp;
251 long ret = -EIO;
252
253 switch (request) {
254
255 case PTRACE_PEEKUSR:
256 if (addr & (sizeof(compat_uint_t)-1))
257 break;
258 addr = translate_usr_offset(addr);
259 if (addr >= sizeof(struct pt_regs))
260 break;
261
262 tmp = *(compat_uint_t *) ((char *) task_regs(child) + addr);
263 ret = put_user(tmp, (compat_uint_t *) (unsigned long) data);
264 break;
265
266 /* Write the word at location addr in the USER area. This will need
267 to change when the kernel no longer saves all regs on a syscall.
268 FIXME. There is a problem at the moment in that r3-r18 are only
269 saved if the process is ptraced on syscall entry, and even then
270 those values are overwritten by actual register values on syscall
271 exit. */
272 case PTRACE_POKEUSR:
273 /* Some register values written here may be ignored in
274 * entry.S:syscall_restore_rfi; e.g. iaoq is written with
275 * r31/r31+4, and not with the values in pt_regs.
276 */
277 if (addr == PT_PSW) {
278 /* Since PT_PSW==0, it is valid for 32 bit processes
279 * under 64 bit kernels as well.
280 */
281 ret = arch_ptrace(child, request, addr, data);
282 } else {
283 if (addr & (sizeof(compat_uint_t)-1))
284 break;
285 addr = translate_usr_offset(addr);
286 if (addr >= sizeof(struct pt_regs))
287 break;
288 if (addr >= PT_FR0 && addr <= PT_FR31 + 4) {
289 /* Special case, fp regs are 64 bits anyway */
290 *(__u64 *) ((char *) task_regs(child) + addr) = data;
291 ret = 0;
292 }
293 else if ((addr >= PT_GR1+4 && addr <= PT_GR31+4) ||
294 addr == PT_IAOQ0+4 || addr == PT_IAOQ1+4 ||
295 addr == PT_SAR+4) {
296 /* Zero the top 32 bits */
297 *(__u32 *) ((char *) task_regs(child) + addr - 4) = 0;
298 *(__u32 *) ((char *) task_regs(child) + addr) = data;
299 ret = 0;
300 }
301 }
302 break;
303
304 default:
305 ret = compat_ptrace_request(child, request, addr, data);
306 break;
307 }
308
309 return ret;
310}
311#endif
312
313long do_syscall_trace_enter(struct pt_regs *regs)
314{
315 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
316 tracehook_report_syscall_entry(regs)) {
317 /*
318 * Tracing decided this syscall should not happen or the
319 * debugger stored an invalid system call number. Skip
320 * the system call and the system call restart handling.
321 */
322 regs->gr[20] = -1UL;
323 goto out;
324 }
325
326 /* Do the secure computing check after ptrace. */
327 if (secure_computing(NULL) == -1)
328 return -1;
329
330#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
331 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
332 trace_sys_enter(regs, regs->gr[20]);
333#endif
334
335#ifdef CONFIG_64BIT
336 if (!is_compat_task())
337 audit_syscall_entry(regs->gr[20], regs->gr[26], regs->gr[25],
338 regs->gr[24], regs->gr[23]);
339 else
340#endif
341 audit_syscall_entry(regs->gr[20] & 0xffffffff,
342 regs->gr[26] & 0xffffffff,
343 regs->gr[25] & 0xffffffff,
344 regs->gr[24] & 0xffffffff,
345 regs->gr[23] & 0xffffffff);
346
347out:
348 /*
349 * Sign extend the syscall number to 64bit since it may have been
350 * modified by a compat ptrace call
351 */
352 return (int) ((u32) regs->gr[20]);
353}
354
355void do_syscall_trace_exit(struct pt_regs *regs)
356{
357 int stepping = test_thread_flag(TIF_SINGLESTEP) ||
358 test_thread_flag(TIF_BLOCKSTEP);
359
360 audit_syscall_exit(regs);
361
362#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
363 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
364 trace_sys_exit(regs, regs->gr[20]);
365#endif
366
367 if (stepping || test_thread_flag(TIF_SYSCALL_TRACE))
368 tracehook_report_syscall_exit(regs, stepping);
369}
370
371
372/*
373 * regset functions.
374 */
375
376static int fpr_get(struct task_struct *target,
377 const struct user_regset *regset,
378 unsigned int pos, unsigned int count,
379 void *kbuf, void __user *ubuf)
380{
381 struct pt_regs *regs = task_regs(target);
382 __u64 *k = kbuf;
383 __u64 __user *u = ubuf;
384 __u64 reg;
385
386 pos /= sizeof(reg);
387 count /= sizeof(reg);
388
389 if (kbuf)
390 for (; count > 0 && pos < ELF_NFPREG; --count)
391 *k++ = regs->fr[pos++];
392 else
393 for (; count > 0 && pos < ELF_NFPREG; --count)
394 if (__put_user(regs->fr[pos++], u++))
395 return -EFAULT;
396
397 kbuf = k;
398 ubuf = u;
399 pos *= sizeof(reg);
400 count *= sizeof(reg);
401 return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
402 ELF_NFPREG * sizeof(reg), -1);
403}
404
405static int fpr_set(struct task_struct *target,
406 const struct user_regset *regset,
407 unsigned int pos, unsigned int count,
408 const void *kbuf, const void __user *ubuf)
409{
410 struct pt_regs *regs = task_regs(target);
411 const __u64 *k = kbuf;
412 const __u64 __user *u = ubuf;
413 __u64 reg;
414
415 pos /= sizeof(reg);
416 count /= sizeof(reg);
417
418 if (kbuf)
419 for (; count > 0 && pos < ELF_NFPREG; --count)
420 regs->fr[pos++] = *k++;
421 else
422 for (; count > 0 && pos < ELF_NFPREG; --count) {
423 if (__get_user(reg, u++))
424 return -EFAULT;
425 regs->fr[pos++] = reg;
426 }
427
428 kbuf = k;
429 ubuf = u;
430 pos *= sizeof(reg);
431 count *= sizeof(reg);
432 return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
433 ELF_NFPREG * sizeof(reg), -1);
434}
435
436#define RI(reg) (offsetof(struct user_regs_struct,reg) / sizeof(long))
437
438static unsigned long get_reg(struct pt_regs *regs, int num)
439{
440 switch (num) {
441 case RI(gr[0]) ... RI(gr[31]): return regs->gr[num - RI(gr[0])];
442 case RI(sr[0]) ... RI(sr[7]): return regs->sr[num - RI(sr[0])];
443 case RI(iasq[0]): return regs->iasq[0];
444 case RI(iasq[1]): return regs->iasq[1];
445 case RI(iaoq[0]): return regs->iaoq[0];
446 case RI(iaoq[1]): return regs->iaoq[1];
447 case RI(sar): return regs->sar;
448 case RI(iir): return regs->iir;
449 case RI(isr): return regs->isr;
450 case RI(ior): return regs->ior;
451 case RI(ipsw): return regs->ipsw;
452 case RI(cr27): return regs->cr27;
453 case RI(cr0): return mfctl(0);
454 case RI(cr24): return mfctl(24);
455 case RI(cr25): return mfctl(25);
456 case RI(cr26): return mfctl(26);
457 case RI(cr28): return mfctl(28);
458 case RI(cr29): return mfctl(29);
459 case RI(cr30): return mfctl(30);
460 case RI(cr31): return mfctl(31);
461 case RI(cr8): return mfctl(8);
462 case RI(cr9): return mfctl(9);
463 case RI(cr12): return mfctl(12);
464 case RI(cr13): return mfctl(13);
465 case RI(cr10): return mfctl(10);
466 case RI(cr15): return mfctl(15);
467 default: return 0;
468 }
469}
470
471static void set_reg(struct pt_regs *regs, int num, unsigned long val)
472{
473 switch (num) {
474 case RI(gr[0]): /*
475 * PSW is in gr[0].
476 * Allow writing to Nullify, Divide-step-correction,
477 * and carry/borrow bits.
478 * BEWARE, if you set N, and then single step, it won't
479 * stop on the nullified instruction.
480 */
481 val &= USER_PSW_BITS;
482 regs->gr[0] &= ~USER_PSW_BITS;
483 regs->gr[0] |= val;
484 return;
485 case RI(gr[1]) ... RI(gr[31]):
486 regs->gr[num - RI(gr[0])] = val;
487 return;
488 case RI(iaoq[0]):
489 case RI(iaoq[1]):
490 regs->iaoq[num - RI(iaoq[0])] = val;
491 return;
492 case RI(sar): regs->sar = val;
493 return;
494 default: return;
495#if 0
496 /* do not allow to change any of the following registers (yet) */
497 case RI(sr[0]) ... RI(sr[7]): return regs->sr[num - RI(sr[0])];
498 case RI(iasq[0]): return regs->iasq[0];
499 case RI(iasq[1]): return regs->iasq[1];
500 case RI(iir): return regs->iir;
501 case RI(isr): return regs->isr;
502 case RI(ior): return regs->ior;
503 case RI(ipsw): return regs->ipsw;
504 case RI(cr27): return regs->cr27;
505 case cr0, cr24, cr25, cr26, cr27, cr28, cr29, cr30, cr31;
506 case cr8, cr9, cr12, cr13, cr10, cr15;
507#endif
508 }
509}
510
511static int gpr_get(struct task_struct *target,
512 const struct user_regset *regset,
513 unsigned int pos, unsigned int count,
514 void *kbuf, void __user *ubuf)
515{
516 struct pt_regs *regs = task_regs(target);
517 unsigned long *k = kbuf;
518 unsigned long __user *u = ubuf;
519 unsigned long reg;
520
521 pos /= sizeof(reg);
522 count /= sizeof(reg);
523
524 if (kbuf)
525 for (; count > 0 && pos < ELF_NGREG; --count)
526 *k++ = get_reg(regs, pos++);
527 else
528 for (; count > 0 && pos < ELF_NGREG; --count)
529 if (__put_user(get_reg(regs, pos++), u++))
530 return -EFAULT;
531 kbuf = k;
532 ubuf = u;
533 pos *= sizeof(reg);
534 count *= sizeof(reg);
535 return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
536 ELF_NGREG * sizeof(reg), -1);
537}
538
539static int gpr_set(struct task_struct *target,
540 const struct user_regset *regset,
541 unsigned int pos, unsigned int count,
542 const void *kbuf, const void __user *ubuf)
543{
544 struct pt_regs *regs = task_regs(target);
545 const unsigned long *k = kbuf;
546 const unsigned long __user *u = ubuf;
547 unsigned long reg;
548
549 pos /= sizeof(reg);
550 count /= sizeof(reg);
551
552 if (kbuf)
553 for (; count > 0 && pos < ELF_NGREG; --count)
554 set_reg(regs, pos++, *k++);
555 else
556 for (; count > 0 && pos < ELF_NGREG; --count) {
557 if (__get_user(reg, u++))
558 return -EFAULT;
559 set_reg(regs, pos++, reg);
560 }
561
562 kbuf = k;
563 ubuf = u;
564 pos *= sizeof(reg);
565 count *= sizeof(reg);
566 return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
567 ELF_NGREG * sizeof(reg), -1);
568}
569
570static const struct user_regset native_regsets[] = {
571 [REGSET_GENERAL] = {
572 .core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
573 .size = sizeof(long), .align = sizeof(long),
574 .get = gpr_get, .set = gpr_set
575 },
576 [REGSET_FP] = {
577 .core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
578 .size = sizeof(__u64), .align = sizeof(__u64),
579 .get = fpr_get, .set = fpr_set
580 }
581};
582
583static const struct user_regset_view user_parisc_native_view = {
584 .name = "parisc", .e_machine = ELF_ARCH, .ei_osabi = ELFOSABI_LINUX,
585 .regsets = native_regsets, .n = ARRAY_SIZE(native_regsets)
586};
587
588#ifdef CONFIG_64BIT
589#include <linux/compat.h>
590
591static int gpr32_get(struct task_struct *target,
592 const struct user_regset *regset,
593 unsigned int pos, unsigned int count,
594 void *kbuf, void __user *ubuf)
595{
596 struct pt_regs *regs = task_regs(target);
597 compat_ulong_t *k = kbuf;
598 compat_ulong_t __user *u = ubuf;
599 compat_ulong_t reg;
600
601 pos /= sizeof(reg);
602 count /= sizeof(reg);
603
604 if (kbuf)
605 for (; count > 0 && pos < ELF_NGREG; --count)
606 *k++ = get_reg(regs, pos++);
607 else
608 for (; count > 0 && pos < ELF_NGREG; --count)
609 if (__put_user((compat_ulong_t) get_reg(regs, pos++), u++))
610 return -EFAULT;
611
612 kbuf = k;
613 ubuf = u;
614 pos *= sizeof(reg);
615 count *= sizeof(reg);
616 return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
617 ELF_NGREG * sizeof(reg), -1);
618}
619
620static int gpr32_set(struct task_struct *target,
621 const struct user_regset *regset,
622 unsigned int pos, unsigned int count,
623 const void *kbuf, const void __user *ubuf)
624{
625 struct pt_regs *regs = task_regs(target);
626 const compat_ulong_t *k = kbuf;
627 const compat_ulong_t __user *u = ubuf;
628 compat_ulong_t reg;
629
630 pos /= sizeof(reg);
631 count /= sizeof(reg);
632
633 if (kbuf)
634 for (; count > 0 && pos < ELF_NGREG; --count)
635 set_reg(regs, pos++, *k++);
636 else
637 for (; count > 0 && pos < ELF_NGREG; --count) {
638 if (__get_user(reg, u++))
639 return -EFAULT;
640 set_reg(regs, pos++, reg);
641 }
642
643 kbuf = k;
644 ubuf = u;
645 pos *= sizeof(reg);
646 count *= sizeof(reg);
647 return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
648 ELF_NGREG * sizeof(reg), -1);
649}
650
651/*
652 * These are the regset flavors matching the 32bit native set.
653 */
654static const struct user_regset compat_regsets[] = {
655 [REGSET_GENERAL] = {
656 .core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
657 .size = sizeof(compat_long_t), .align = sizeof(compat_long_t),
658 .get = gpr32_get, .set = gpr32_set
659 },
660 [REGSET_FP] = {
661 .core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
662 .size = sizeof(__u64), .align = sizeof(__u64),
663 .get = fpr_get, .set = fpr_set
664 }
665};
666
667static const struct user_regset_view user_parisc_compat_view = {
668 .name = "parisc", .e_machine = EM_PARISC, .ei_osabi = ELFOSABI_LINUX,
669 .regsets = compat_regsets, .n = ARRAY_SIZE(compat_regsets)
670};
671#endif /* CONFIG_64BIT */
672
673const struct user_regset_view *task_user_regset_view(struct task_struct *task)
674{
675 BUILD_BUG_ON(sizeof(struct user_regs_struct)/sizeof(long) != ELF_NGREG);
676 BUILD_BUG_ON(sizeof(struct user_fp_struct)/sizeof(__u64) != ELF_NFPREG);
677#ifdef CONFIG_64BIT
678 if (is_compat_task())
679 return &user_parisc_compat_view;
680#endif
681 return &user_parisc_native_view;
682}