Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright IBM Corp. 2000, 2006
4 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
5 * Gerhard Tonn (ton@de.ibm.com)
6 *
7 * Copyright (C) 1991, 1992 Linus Torvalds
8 *
9 * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson
10 */
11
12#include <linux/compat.h>
13#include <linux/sched.h>
14#include <linux/sched/task_stack.h>
15#include <linux/mm.h>
16#include <linux/smp.h>
17#include <linux/kernel.h>
18#include <linux/signal.h>
19#include <linux/errno.h>
20#include <linux/wait.h>
21#include <linux/ptrace.h>
22#include <linux/unistd.h>
23#include <linux/stddef.h>
24#include <linux/tty.h>
25#include <linux/personality.h>
26#include <linux/binfmts.h>
27#include <asm/ucontext.h>
28#include <linux/uaccess.h>
29#include <asm/lowcore.h>
30#include <asm/switch_to.h>
31#include <asm/vdso.h>
32#include "compat_linux.h"
33#include "compat_ptrace.h"
34#include "entry.h"
35
36typedef struct
37{
38 __u8 callee_used_stack[__SIGNAL_FRAMESIZE32];
39 struct sigcontext32 sc;
40 _sigregs32 sregs;
41 int signo;
42 _sigregs_ext32 sregs_ext;
43 __u16 svc_insn; /* Offset of svc_insn is NOT fixed! */
44} sigframe32;
45
46typedef struct
47{
48 __u8 callee_used_stack[__SIGNAL_FRAMESIZE32];
49 __u16 svc_insn;
50 compat_siginfo_t info;
51 struct ucontext32 uc;
52} rt_sigframe32;
53
54/* Store registers needed to create the signal frame */
55static void store_sigregs(void)
56{
57 save_access_regs(current->thread.acrs);
58 save_fpu_regs();
59}
60
61/* Load registers after signal return */
62static void load_sigregs(void)
63{
64 restore_access_regs(current->thread.acrs);
65}
66
67static int save_sigregs32(struct pt_regs *regs, _sigregs32 __user *sregs)
68{
69 _sigregs32 user_sregs;
70 int i;
71
72 user_sregs.regs.psw.mask = (__u32)(regs->psw.mask >> 32);
73 user_sregs.regs.psw.mask &= PSW32_MASK_USER | PSW32_MASK_RI;
74 user_sregs.regs.psw.mask |= PSW32_USER_BITS;
75 user_sregs.regs.psw.addr = (__u32) regs->psw.addr |
76 (__u32)(regs->psw.mask & PSW_MASK_BA);
77 for (i = 0; i < NUM_GPRS; i++)
78 user_sregs.regs.gprs[i] = (__u32) regs->gprs[i];
79 memcpy(&user_sregs.regs.acrs, current->thread.acrs,
80 sizeof(user_sregs.regs.acrs));
81 fpregs_store((_s390_fp_regs *) &user_sregs.fpregs, ¤t->thread.fpu);
82 if (__copy_to_user(sregs, &user_sregs, sizeof(_sigregs32)))
83 return -EFAULT;
84 return 0;
85}
86
87static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs)
88{
89 _sigregs32 user_sregs;
90 int i;
91
92 /* Always make any pending restarted system call return -EINTR */
93 current->restart_block.fn = do_no_restart_syscall;
94
95 if (__copy_from_user(&user_sregs, &sregs->regs, sizeof(user_sregs)))
96 return -EFAULT;
97
98 if (!is_ri_task(current) && (user_sregs.regs.psw.mask & PSW32_MASK_RI))
99 return -EINVAL;
100
101 /* Test the floating-point-control word. */
102 if (test_fp_ctl(user_sregs.fpregs.fpc))
103 return -EINVAL;
104
105 /* Use regs->psw.mask instead of PSW_USER_BITS to preserve PER bit. */
106 regs->psw.mask = (regs->psw.mask & ~(PSW_MASK_USER | PSW_MASK_RI)) |
107 (__u64)(user_sregs.regs.psw.mask & PSW32_MASK_USER) << 32 |
108 (__u64)(user_sregs.regs.psw.mask & PSW32_MASK_RI) << 32 |
109 (__u64)(user_sregs.regs.psw.addr & PSW32_ADDR_AMODE);
110 /* Check for invalid user address space control. */
111 if ((regs->psw.mask & PSW_MASK_ASC) == PSW_ASC_HOME)
112 regs->psw.mask = PSW_ASC_PRIMARY |
113 (regs->psw.mask & ~PSW_MASK_ASC);
114 regs->psw.addr = (__u64)(user_sregs.regs.psw.addr & PSW32_ADDR_INSN);
115 for (i = 0; i < NUM_GPRS; i++)
116 regs->gprs[i] = (__u64) user_sregs.regs.gprs[i];
117 memcpy(¤t->thread.acrs, &user_sregs.regs.acrs,
118 sizeof(current->thread.acrs));
119 fpregs_load((_s390_fp_regs *) &user_sregs.fpregs, ¤t->thread.fpu);
120
121 clear_pt_regs_flag(regs, PIF_SYSCALL); /* No longer in a system call */
122 return 0;
123}
124
125static int save_sigregs_ext32(struct pt_regs *regs,
126 _sigregs_ext32 __user *sregs_ext)
127{
128 __u32 gprs_high[NUM_GPRS];
129 __u64 vxrs[__NUM_VXRS_LOW];
130 int i;
131
132 /* Save high gprs to signal stack */
133 for (i = 0; i < NUM_GPRS; i++)
134 gprs_high[i] = regs->gprs[i] >> 32;
135 if (__copy_to_user(&sregs_ext->gprs_high, &gprs_high,
136 sizeof(sregs_ext->gprs_high)))
137 return -EFAULT;
138
139 /* Save vector registers to signal stack */
140 if (MACHINE_HAS_VX) {
141 for (i = 0; i < __NUM_VXRS_LOW; i++)
142 vxrs[i] = *((__u64 *)(current->thread.fpu.vxrs + i) + 1);
143 if (__copy_to_user(&sregs_ext->vxrs_low, vxrs,
144 sizeof(sregs_ext->vxrs_low)) ||
145 __copy_to_user(&sregs_ext->vxrs_high,
146 current->thread.fpu.vxrs + __NUM_VXRS_LOW,
147 sizeof(sregs_ext->vxrs_high)))
148 return -EFAULT;
149 }
150 return 0;
151}
152
153static int restore_sigregs_ext32(struct pt_regs *regs,
154 _sigregs_ext32 __user *sregs_ext)
155{
156 __u32 gprs_high[NUM_GPRS];
157 __u64 vxrs[__NUM_VXRS_LOW];
158 int i;
159
160 /* Restore high gprs from signal stack */
161 if (__copy_from_user(&gprs_high, &sregs_ext->gprs_high,
162 sizeof(sregs_ext->gprs_high)))
163 return -EFAULT;
164 for (i = 0; i < NUM_GPRS; i++)
165 *(__u32 *)®s->gprs[i] = gprs_high[i];
166
167 /* Restore vector registers from signal stack */
168 if (MACHINE_HAS_VX) {
169 if (__copy_from_user(vxrs, &sregs_ext->vxrs_low,
170 sizeof(sregs_ext->vxrs_low)) ||
171 __copy_from_user(current->thread.fpu.vxrs + __NUM_VXRS_LOW,
172 &sregs_ext->vxrs_high,
173 sizeof(sregs_ext->vxrs_high)))
174 return -EFAULT;
175 for (i = 0; i < __NUM_VXRS_LOW; i++)
176 *((__u64 *)(current->thread.fpu.vxrs + i) + 1) = vxrs[i];
177 }
178 return 0;
179}
180
181COMPAT_SYSCALL_DEFINE0(sigreturn)
182{
183 struct pt_regs *regs = task_pt_regs(current);
184 sigframe32 __user *frame = (sigframe32 __user *)regs->gprs[15];
185 sigset_t set;
186
187 if (get_compat_sigset(&set, (compat_sigset_t __user *)frame->sc.oldmask))
188 goto badframe;
189 set_current_blocked(&set);
190 save_fpu_regs();
191 if (restore_sigregs32(regs, &frame->sregs))
192 goto badframe;
193 if (restore_sigregs_ext32(regs, &frame->sregs_ext))
194 goto badframe;
195 load_sigregs();
196 return regs->gprs[2];
197badframe:
198 force_sig(SIGSEGV);
199 return 0;
200}
201
202COMPAT_SYSCALL_DEFINE0(rt_sigreturn)
203{
204 struct pt_regs *regs = task_pt_regs(current);
205 rt_sigframe32 __user *frame = (rt_sigframe32 __user *)regs->gprs[15];
206 sigset_t set;
207
208 if (get_compat_sigset(&set, &frame->uc.uc_sigmask))
209 goto badframe;
210 set_current_blocked(&set);
211 if (compat_restore_altstack(&frame->uc.uc_stack))
212 goto badframe;
213 save_fpu_regs();
214 if (restore_sigregs32(regs, &frame->uc.uc_mcontext))
215 goto badframe;
216 if (restore_sigregs_ext32(regs, &frame->uc.uc_mcontext_ext))
217 goto badframe;
218 load_sigregs();
219 return regs->gprs[2];
220badframe:
221 force_sig(SIGSEGV);
222 return 0;
223}
224
225/*
226 * Set up a signal frame.
227 */
228
229
230/*
231 * Determine which stack to use..
232 */
233static inline void __user *
234get_sigframe(struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size)
235{
236 unsigned long sp;
237
238 /* Default to using normal stack */
239 sp = (unsigned long) A(regs->gprs[15]);
240
241 /* Overflow on alternate signal stack gives SIGSEGV. */
242 if (on_sig_stack(sp) && !on_sig_stack((sp - frame_size) & -8UL))
243 return (void __user *) -1UL;
244
245 /* This is the X/Open sanctioned signal stack switching. */
246 if (ka->sa.sa_flags & SA_ONSTACK) {
247 if (! sas_ss_flags(sp))
248 sp = current->sas_ss_sp + current->sas_ss_size;
249 }
250
251 return (void __user *)((sp - frame_size) & -8ul);
252}
253
254static int setup_frame32(struct ksignal *ksig, sigset_t *set,
255 struct pt_regs *regs)
256{
257 int sig = ksig->sig;
258 sigframe32 __user *frame;
259 unsigned long restorer;
260 size_t frame_size;
261
262 /*
263 * gprs_high are always present for 31-bit compat tasks.
264 * The space for vector registers is only allocated if
265 * the machine supports it
266 */
267 frame_size = sizeof(*frame) - sizeof(frame->sregs_ext.__reserved);
268 if (!MACHINE_HAS_VX)
269 frame_size -= sizeof(frame->sregs_ext.vxrs_low) +
270 sizeof(frame->sregs_ext.vxrs_high);
271 frame = get_sigframe(&ksig->ka, regs, frame_size);
272 if (frame == (void __user *) -1UL)
273 return -EFAULT;
274
275 /* Set up backchain. */
276 if (__put_user(regs->gprs[15], (unsigned int __user *) frame))
277 return -EFAULT;
278
279 /* Create struct sigcontext32 on the signal stack */
280 if (put_compat_sigset((compat_sigset_t __user *)frame->sc.oldmask,
281 set, sizeof(compat_sigset_t)))
282 return -EFAULT;
283 if (__put_user(ptr_to_compat(&frame->sregs), &frame->sc.sregs))
284 return -EFAULT;
285
286 /* Store registers needed to create the signal frame */
287 store_sigregs();
288
289 /* Create _sigregs32 on the signal stack */
290 if (save_sigregs32(regs, &frame->sregs))
291 return -EFAULT;
292
293 /* Place signal number on stack to allow backtrace from handler. */
294 if (__put_user(regs->gprs[2], (int __force __user *) &frame->signo))
295 return -EFAULT;
296
297 /* Create _sigregs_ext32 on the signal stack */
298 if (save_sigregs_ext32(regs, &frame->sregs_ext))
299 return -EFAULT;
300
301 /* Set up to return from userspace. If provided, use a stub
302 already in userspace. */
303 if (ksig->ka.sa.sa_flags & SA_RESTORER) {
304 restorer = (unsigned long __force)
305 ksig->ka.sa.sa_restorer | PSW32_ADDR_AMODE;
306 } else {
307 restorer = VDSO32_SYMBOL(current, sigreturn);
308 }
309
310 /* Set up registers for signal handler */
311 regs->gprs[14] = restorer;
312 regs->gprs[15] = (__force __u64) frame;
313 /* Force 31 bit amode and default user address space control. */
314 regs->psw.mask = PSW_MASK_BA |
315 (PSW_USER_BITS & PSW_MASK_ASC) |
316 (regs->psw.mask & ~PSW_MASK_ASC);
317 regs->psw.addr = (__force __u64) ksig->ka.sa.sa_handler;
318
319 regs->gprs[2] = sig;
320 regs->gprs[3] = (__force __u64) &frame->sc;
321
322 /* We forgot to include these in the sigcontext.
323 To avoid breaking binary compatibility, they are passed as args. */
324 if (sig == SIGSEGV || sig == SIGBUS || sig == SIGILL ||
325 sig == SIGTRAP || sig == SIGFPE) {
326 /* set extra registers only for synchronous signals */
327 regs->gprs[4] = regs->int_code & 127;
328 regs->gprs[5] = regs->int_parm_long;
329 regs->gprs[6] = current->thread.last_break;
330 }
331
332 return 0;
333}
334
335static int setup_rt_frame32(struct ksignal *ksig, sigset_t *set,
336 struct pt_regs *regs)
337{
338 rt_sigframe32 __user *frame;
339 unsigned long restorer;
340 size_t frame_size;
341 u32 uc_flags;
342
343 frame_size = sizeof(*frame) -
344 sizeof(frame->uc.uc_mcontext_ext.__reserved);
345 /*
346 * gprs_high are always present for 31-bit compat tasks.
347 * The space for vector registers is only allocated if
348 * the machine supports it
349 */
350 uc_flags = UC_GPRS_HIGH;
351 if (MACHINE_HAS_VX) {
352 uc_flags |= UC_VXRS;
353 } else
354 frame_size -= sizeof(frame->uc.uc_mcontext_ext.vxrs_low) +
355 sizeof(frame->uc.uc_mcontext_ext.vxrs_high);
356 frame = get_sigframe(&ksig->ka, regs, frame_size);
357 if (frame == (void __user *) -1UL)
358 return -EFAULT;
359
360 /* Set up backchain. */
361 if (__put_user(regs->gprs[15], (unsigned int __force __user *) frame))
362 return -EFAULT;
363
364 /* Set up to return from userspace. If provided, use a stub
365 already in userspace. */
366 if (ksig->ka.sa.sa_flags & SA_RESTORER) {
367 restorer = (unsigned long __force)
368 ksig->ka.sa.sa_restorer | PSW32_ADDR_AMODE;
369 } else {
370 restorer = VDSO32_SYMBOL(current, rt_sigreturn);
371 }
372
373 /* Create siginfo on the signal stack */
374 if (copy_siginfo_to_user32(&frame->info, &ksig->info))
375 return -EFAULT;
376
377 /* Store registers needed to create the signal frame */
378 store_sigregs();
379
380 /* Create ucontext on the signal stack. */
381 if (__put_user(uc_flags, &frame->uc.uc_flags) ||
382 __put_user(0, &frame->uc.uc_link) ||
383 __compat_save_altstack(&frame->uc.uc_stack, regs->gprs[15]) ||
384 save_sigregs32(regs, &frame->uc.uc_mcontext) ||
385 put_compat_sigset(&frame->uc.uc_sigmask, set, sizeof(compat_sigset_t)) ||
386 save_sigregs_ext32(regs, &frame->uc.uc_mcontext_ext))
387 return -EFAULT;
388
389 /* Set up registers for signal handler */
390 regs->gprs[14] = restorer;
391 regs->gprs[15] = (__force __u64) frame;
392 /* Force 31 bit amode and default user address space control. */
393 regs->psw.mask = PSW_MASK_BA |
394 (PSW_USER_BITS & PSW_MASK_ASC) |
395 (regs->psw.mask & ~PSW_MASK_ASC);
396 regs->psw.addr = (__u64 __force) ksig->ka.sa.sa_handler;
397
398 regs->gprs[2] = ksig->sig;
399 regs->gprs[3] = (__force __u64) &frame->info;
400 regs->gprs[4] = (__force __u64) &frame->uc;
401 regs->gprs[5] = current->thread.last_break;
402 return 0;
403}
404
405/*
406 * OK, we're invoking a handler
407 */
408
409void handle_signal32(struct ksignal *ksig, sigset_t *oldset,
410 struct pt_regs *regs)
411{
412 int ret;
413
414 /* Set up the stack frame */
415 if (ksig->ka.sa.sa_flags & SA_SIGINFO)
416 ret = setup_rt_frame32(ksig, oldset, regs);
417 else
418 ret = setup_frame32(ksig, oldset, regs);
419
420 signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLE_STEP));
421}
422
1/*
2 * Copyright IBM Corp. 2000, 2006
3 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
4 * Gerhard Tonn (ton@de.ibm.com)
5 *
6 * Copyright (C) 1991, 1992 Linus Torvalds
7 *
8 * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson
9 */
10
11#include <linux/compat.h>
12#include <linux/sched.h>
13#include <linux/mm.h>
14#include <linux/smp.h>
15#include <linux/kernel.h>
16#include <linux/signal.h>
17#include <linux/errno.h>
18#include <linux/wait.h>
19#include <linux/ptrace.h>
20#include <linux/unistd.h>
21#include <linux/stddef.h>
22#include <linux/tty.h>
23#include <linux/personality.h>
24#include <linux/binfmts.h>
25#include <asm/ucontext.h>
26#include <linux/uaccess.h>
27#include <asm/lowcore.h>
28#include <asm/switch_to.h>
29#include "compat_linux.h"
30#include "compat_ptrace.h"
31#include "entry.h"
32
33typedef struct
34{
35 __u8 callee_used_stack[__SIGNAL_FRAMESIZE32];
36 struct sigcontext32 sc;
37 _sigregs32 sregs;
38 int signo;
39 _sigregs_ext32 sregs_ext;
40 __u16 svc_insn; /* Offset of svc_insn is NOT fixed! */
41} sigframe32;
42
43typedef struct
44{
45 __u8 callee_used_stack[__SIGNAL_FRAMESIZE32];
46 __u16 svc_insn;
47 compat_siginfo_t info;
48 struct ucontext32 uc;
49} rt_sigframe32;
50
51static inline void sigset_to_sigset32(unsigned long *set64,
52 compat_sigset_word *set32)
53{
54 set32[0] = (compat_sigset_word) set64[0];
55 set32[1] = (compat_sigset_word)(set64[0] >> 32);
56}
57
58static inline void sigset32_to_sigset(compat_sigset_word *set32,
59 unsigned long *set64)
60{
61 set64[0] = (unsigned long) set32[0] | ((unsigned long) set32[1] << 32);
62}
63
64int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
65{
66 int err;
67
68 /* If you change siginfo_t structure, please be sure
69 this code is fixed accordingly.
70 It should never copy any pad contained in the structure
71 to avoid security leaks, but must copy the generic
72 3 ints plus the relevant union member.
73 This routine must convert siginfo from 64bit to 32bit as well
74 at the same time. */
75 err = __put_user(from->si_signo, &to->si_signo);
76 err |= __put_user(from->si_errno, &to->si_errno);
77 err |= __put_user((short)from->si_code, &to->si_code);
78 if (from->si_code < 0)
79 err |= __copy_to_user(&to->_sifields._pad, &from->_sifields._pad, SI_PAD_SIZE);
80 else {
81 switch (from->si_code >> 16) {
82 case __SI_RT >> 16: /* This is not generated by the kernel as of now. */
83 case __SI_MESGQ >> 16:
84 err |= __put_user(from->si_int, &to->si_int);
85 /* fallthrough */
86 case __SI_KILL >> 16:
87 err |= __put_user(from->si_pid, &to->si_pid);
88 err |= __put_user(from->si_uid, &to->si_uid);
89 break;
90 case __SI_CHLD >> 16:
91 err |= __put_user(from->si_pid, &to->si_pid);
92 err |= __put_user(from->si_uid, &to->si_uid);
93 err |= __put_user(from->si_utime, &to->si_utime);
94 err |= __put_user(from->si_stime, &to->si_stime);
95 err |= __put_user(from->si_status, &to->si_status);
96 break;
97 case __SI_FAULT >> 16:
98 err |= __put_user((unsigned long) from->si_addr,
99 &to->si_addr);
100 break;
101 case __SI_POLL >> 16:
102 err |= __put_user(from->si_band, &to->si_band);
103 err |= __put_user(from->si_fd, &to->si_fd);
104 break;
105 case __SI_TIMER >> 16:
106 err |= __put_user(from->si_tid, &to->si_tid);
107 err |= __put_user(from->si_overrun, &to->si_overrun);
108 err |= __put_user(from->si_int, &to->si_int);
109 break;
110 default:
111 break;
112 }
113 }
114 return err ? -EFAULT : 0;
115}
116
117int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
118{
119 int err;
120 u32 tmp;
121
122 err = __get_user(to->si_signo, &from->si_signo);
123 err |= __get_user(to->si_errno, &from->si_errno);
124 err |= __get_user(to->si_code, &from->si_code);
125
126 if (to->si_code < 0)
127 err |= __copy_from_user(&to->_sifields._pad, &from->_sifields._pad, SI_PAD_SIZE);
128 else {
129 switch (to->si_code >> 16) {
130 case __SI_RT >> 16: /* This is not generated by the kernel as of now. */
131 case __SI_MESGQ >> 16:
132 err |= __get_user(to->si_int, &from->si_int);
133 /* fallthrough */
134 case __SI_KILL >> 16:
135 err |= __get_user(to->si_pid, &from->si_pid);
136 err |= __get_user(to->si_uid, &from->si_uid);
137 break;
138 case __SI_CHLD >> 16:
139 err |= __get_user(to->si_pid, &from->si_pid);
140 err |= __get_user(to->si_uid, &from->si_uid);
141 err |= __get_user(to->si_utime, &from->si_utime);
142 err |= __get_user(to->si_stime, &from->si_stime);
143 err |= __get_user(to->si_status, &from->si_status);
144 break;
145 case __SI_FAULT >> 16:
146 err |= __get_user(tmp, &from->si_addr);
147 to->si_addr = (void __force __user *)
148 (u64) (tmp & PSW32_ADDR_INSN);
149 break;
150 case __SI_POLL >> 16:
151 err |= __get_user(to->si_band, &from->si_band);
152 err |= __get_user(to->si_fd, &from->si_fd);
153 break;
154 case __SI_TIMER >> 16:
155 err |= __get_user(to->si_tid, &from->si_tid);
156 err |= __get_user(to->si_overrun, &from->si_overrun);
157 err |= __get_user(to->si_int, &from->si_int);
158 break;
159 default:
160 break;
161 }
162 }
163 return err ? -EFAULT : 0;
164}
165
166/* Store registers needed to create the signal frame */
167static void store_sigregs(void)
168{
169 save_access_regs(current->thread.acrs);
170 save_fpu_regs();
171}
172
173/* Load registers after signal return */
174static void load_sigregs(void)
175{
176 restore_access_regs(current->thread.acrs);
177}
178
179static int save_sigregs32(struct pt_regs *regs, _sigregs32 __user *sregs)
180{
181 _sigregs32 user_sregs;
182 int i;
183
184 user_sregs.regs.psw.mask = (__u32)(regs->psw.mask >> 32);
185 user_sregs.regs.psw.mask &= PSW32_MASK_USER | PSW32_MASK_RI;
186 user_sregs.regs.psw.mask |= PSW32_USER_BITS;
187 user_sregs.regs.psw.addr = (__u32) regs->psw.addr |
188 (__u32)(regs->psw.mask & PSW_MASK_BA);
189 for (i = 0; i < NUM_GPRS; i++)
190 user_sregs.regs.gprs[i] = (__u32) regs->gprs[i];
191 memcpy(&user_sregs.regs.acrs, current->thread.acrs,
192 sizeof(user_sregs.regs.acrs));
193 fpregs_store((_s390_fp_regs *) &user_sregs.fpregs, ¤t->thread.fpu);
194 if (__copy_to_user(sregs, &user_sregs, sizeof(_sigregs32)))
195 return -EFAULT;
196 return 0;
197}
198
199static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs)
200{
201 _sigregs32 user_sregs;
202 int i;
203
204 /* Alwys make any pending restarted system call return -EINTR */
205 current->restart_block.fn = do_no_restart_syscall;
206
207 if (__copy_from_user(&user_sregs, &sregs->regs, sizeof(user_sregs)))
208 return -EFAULT;
209
210 if (!is_ri_task(current) && (user_sregs.regs.psw.mask & PSW32_MASK_RI))
211 return -EINVAL;
212
213 /* Test the floating-point-control word. */
214 if (test_fp_ctl(user_sregs.fpregs.fpc))
215 return -EINVAL;
216
217 /* Use regs->psw.mask instead of PSW_USER_BITS to preserve PER bit. */
218 regs->psw.mask = (regs->psw.mask & ~(PSW_MASK_USER | PSW_MASK_RI)) |
219 (__u64)(user_sregs.regs.psw.mask & PSW32_MASK_USER) << 32 |
220 (__u64)(user_sregs.regs.psw.mask & PSW32_MASK_RI) << 32 |
221 (__u64)(user_sregs.regs.psw.addr & PSW32_ADDR_AMODE);
222 /* Check for invalid user address space control. */
223 if ((regs->psw.mask & PSW_MASK_ASC) == PSW_ASC_HOME)
224 regs->psw.mask = PSW_ASC_PRIMARY |
225 (regs->psw.mask & ~PSW_MASK_ASC);
226 regs->psw.addr = (__u64)(user_sregs.regs.psw.addr & PSW32_ADDR_INSN);
227 for (i = 0; i < NUM_GPRS; i++)
228 regs->gprs[i] = (__u64) user_sregs.regs.gprs[i];
229 memcpy(¤t->thread.acrs, &user_sregs.regs.acrs,
230 sizeof(current->thread.acrs));
231 fpregs_load((_s390_fp_regs *) &user_sregs.fpregs, ¤t->thread.fpu);
232
233 clear_pt_regs_flag(regs, PIF_SYSCALL); /* No longer in a system call */
234 return 0;
235}
236
237static int save_sigregs_ext32(struct pt_regs *regs,
238 _sigregs_ext32 __user *sregs_ext)
239{
240 __u32 gprs_high[NUM_GPRS];
241 __u64 vxrs[__NUM_VXRS_LOW];
242 int i;
243
244 /* Save high gprs to signal stack */
245 for (i = 0; i < NUM_GPRS; i++)
246 gprs_high[i] = regs->gprs[i] >> 32;
247 if (__copy_to_user(&sregs_ext->gprs_high, &gprs_high,
248 sizeof(sregs_ext->gprs_high)))
249 return -EFAULT;
250
251 /* Save vector registers to signal stack */
252 if (MACHINE_HAS_VX) {
253 for (i = 0; i < __NUM_VXRS_LOW; i++)
254 vxrs[i] = *((__u64 *)(current->thread.fpu.vxrs + i) + 1);
255 if (__copy_to_user(&sregs_ext->vxrs_low, vxrs,
256 sizeof(sregs_ext->vxrs_low)) ||
257 __copy_to_user(&sregs_ext->vxrs_high,
258 current->thread.fpu.vxrs + __NUM_VXRS_LOW,
259 sizeof(sregs_ext->vxrs_high)))
260 return -EFAULT;
261 }
262 return 0;
263}
264
265static int restore_sigregs_ext32(struct pt_regs *regs,
266 _sigregs_ext32 __user *sregs_ext)
267{
268 __u32 gprs_high[NUM_GPRS];
269 __u64 vxrs[__NUM_VXRS_LOW];
270 int i;
271
272 /* Restore high gprs from signal stack */
273 if (__copy_from_user(&gprs_high, &sregs_ext->gprs_high,
274 sizeof(sregs_ext->gprs_high)))
275 return -EFAULT;
276 for (i = 0; i < NUM_GPRS; i++)
277 *(__u32 *)®s->gprs[i] = gprs_high[i];
278
279 /* Restore vector registers from signal stack */
280 if (MACHINE_HAS_VX) {
281 if (__copy_from_user(vxrs, &sregs_ext->vxrs_low,
282 sizeof(sregs_ext->vxrs_low)) ||
283 __copy_from_user(current->thread.fpu.vxrs + __NUM_VXRS_LOW,
284 &sregs_ext->vxrs_high,
285 sizeof(sregs_ext->vxrs_high)))
286 return -EFAULT;
287 for (i = 0; i < __NUM_VXRS_LOW; i++)
288 *((__u64 *)(current->thread.fpu.vxrs + i) + 1) = vxrs[i];
289 }
290 return 0;
291}
292
293COMPAT_SYSCALL_DEFINE0(sigreturn)
294{
295 struct pt_regs *regs = task_pt_regs(current);
296 sigframe32 __user *frame = (sigframe32 __user *)regs->gprs[15];
297 compat_sigset_t cset;
298 sigset_t set;
299
300 if (__copy_from_user(&cset.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE32))
301 goto badframe;
302 sigset32_to_sigset(cset.sig, set.sig);
303 set_current_blocked(&set);
304 save_fpu_regs();
305 if (restore_sigregs32(regs, &frame->sregs))
306 goto badframe;
307 if (restore_sigregs_ext32(regs, &frame->sregs_ext))
308 goto badframe;
309 load_sigregs();
310 return regs->gprs[2];
311badframe:
312 force_sig(SIGSEGV, current);
313 return 0;
314}
315
316COMPAT_SYSCALL_DEFINE0(rt_sigreturn)
317{
318 struct pt_regs *regs = task_pt_regs(current);
319 rt_sigframe32 __user *frame = (rt_sigframe32 __user *)regs->gprs[15];
320 compat_sigset_t cset;
321 sigset_t set;
322
323 if (__copy_from_user(&cset, &frame->uc.uc_sigmask, sizeof(cset)))
324 goto badframe;
325 sigset32_to_sigset(cset.sig, set.sig);
326 set_current_blocked(&set);
327 if (compat_restore_altstack(&frame->uc.uc_stack))
328 goto badframe;
329 save_fpu_regs();
330 if (restore_sigregs32(regs, &frame->uc.uc_mcontext))
331 goto badframe;
332 if (restore_sigregs_ext32(regs, &frame->uc.uc_mcontext_ext))
333 goto badframe;
334 load_sigregs();
335 return regs->gprs[2];
336badframe:
337 force_sig(SIGSEGV, current);
338 return 0;
339}
340
341/*
342 * Set up a signal frame.
343 */
344
345
346/*
347 * Determine which stack to use..
348 */
349static inline void __user *
350get_sigframe(struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size)
351{
352 unsigned long sp;
353
354 /* Default to using normal stack */
355 sp = (unsigned long) A(regs->gprs[15]);
356
357 /* Overflow on alternate signal stack gives SIGSEGV. */
358 if (on_sig_stack(sp) && !on_sig_stack((sp - frame_size) & -8UL))
359 return (void __user *) -1UL;
360
361 /* This is the X/Open sanctioned signal stack switching. */
362 if (ka->sa.sa_flags & SA_ONSTACK) {
363 if (! sas_ss_flags(sp))
364 sp = current->sas_ss_sp + current->sas_ss_size;
365 }
366
367 return (void __user *)((sp - frame_size) & -8ul);
368}
369
370static int setup_frame32(struct ksignal *ksig, sigset_t *set,
371 struct pt_regs *regs)
372{
373 int sig = ksig->sig;
374 sigframe32 __user *frame;
375 struct sigcontext32 sc;
376 unsigned long restorer;
377 size_t frame_size;
378
379 /*
380 * gprs_high are always present for 31-bit compat tasks.
381 * The space for vector registers is only allocated if
382 * the machine supports it
383 */
384 frame_size = sizeof(*frame) - sizeof(frame->sregs_ext.__reserved);
385 if (!MACHINE_HAS_VX)
386 frame_size -= sizeof(frame->sregs_ext.vxrs_low) +
387 sizeof(frame->sregs_ext.vxrs_high);
388 frame = get_sigframe(&ksig->ka, regs, frame_size);
389 if (frame == (void __user *) -1UL)
390 return -EFAULT;
391
392 /* Set up backchain. */
393 if (__put_user(regs->gprs[15], (unsigned int __user *) frame))
394 return -EFAULT;
395
396 /* Create struct sigcontext32 on the signal stack */
397 sigset_to_sigset32(set->sig, sc.oldmask);
398 sc.sregs = (__u32)(unsigned long __force) &frame->sregs;
399 if (__copy_to_user(&frame->sc, &sc, sizeof(frame->sc)))
400 return -EFAULT;
401
402 /* Store registers needed to create the signal frame */
403 store_sigregs();
404
405 /* Create _sigregs32 on the signal stack */
406 if (save_sigregs32(regs, &frame->sregs))
407 return -EFAULT;
408
409 /* Place signal number on stack to allow backtrace from handler. */
410 if (__put_user(regs->gprs[2], (int __force __user *) &frame->signo))
411 return -EFAULT;
412
413 /* Create _sigregs_ext32 on the signal stack */
414 if (save_sigregs_ext32(regs, &frame->sregs_ext))
415 return -EFAULT;
416
417 /* Set up to return from userspace. If provided, use a stub
418 already in userspace. */
419 if (ksig->ka.sa.sa_flags & SA_RESTORER) {
420 restorer = (unsigned long __force)
421 ksig->ka.sa.sa_restorer | PSW32_ADDR_AMODE;
422 } else {
423 /* Signal frames without vectors registers are short ! */
424 __u16 __user *svc = (void __user *) frame + frame_size - 2;
425 if (__put_user(S390_SYSCALL_OPCODE | __NR_sigreturn, svc))
426 return -EFAULT;
427 restorer = (unsigned long __force) svc | PSW32_ADDR_AMODE;
428 }
429
430 /* Set up registers for signal handler */
431 regs->gprs[14] = restorer;
432 regs->gprs[15] = (__force __u64) frame;
433 /* Force 31 bit amode and default user address space control. */
434 regs->psw.mask = PSW_MASK_BA |
435 (PSW_USER_BITS & PSW_MASK_ASC) |
436 (regs->psw.mask & ~PSW_MASK_ASC);
437 regs->psw.addr = (__force __u64) ksig->ka.sa.sa_handler;
438
439 regs->gprs[2] = sig;
440 regs->gprs[3] = (__force __u64) &frame->sc;
441
442 /* We forgot to include these in the sigcontext.
443 To avoid breaking binary compatibility, they are passed as args. */
444 if (sig == SIGSEGV || sig == SIGBUS || sig == SIGILL ||
445 sig == SIGTRAP || sig == SIGFPE) {
446 /* set extra registers only for synchronous signals */
447 regs->gprs[4] = regs->int_code & 127;
448 regs->gprs[5] = regs->int_parm_long;
449 regs->gprs[6] = current->thread.last_break;
450 }
451
452 return 0;
453}
454
455static int setup_rt_frame32(struct ksignal *ksig, sigset_t *set,
456 struct pt_regs *regs)
457{
458 compat_sigset_t cset;
459 rt_sigframe32 __user *frame;
460 unsigned long restorer;
461 size_t frame_size;
462 u32 uc_flags;
463
464 frame_size = sizeof(*frame) -
465 sizeof(frame->uc.uc_mcontext_ext.__reserved);
466 /*
467 * gprs_high are always present for 31-bit compat tasks.
468 * The space for vector registers is only allocated if
469 * the machine supports it
470 */
471 uc_flags = UC_GPRS_HIGH;
472 if (MACHINE_HAS_VX) {
473 uc_flags |= UC_VXRS;
474 } else
475 frame_size -= sizeof(frame->uc.uc_mcontext_ext.vxrs_low) +
476 sizeof(frame->uc.uc_mcontext_ext.vxrs_high);
477 frame = get_sigframe(&ksig->ka, regs, frame_size);
478 if (frame == (void __user *) -1UL)
479 return -EFAULT;
480
481 /* Set up backchain. */
482 if (__put_user(regs->gprs[15], (unsigned int __force __user *) frame))
483 return -EFAULT;
484
485 /* Set up to return from userspace. If provided, use a stub
486 already in userspace. */
487 if (ksig->ka.sa.sa_flags & SA_RESTORER) {
488 restorer = (unsigned long __force)
489 ksig->ka.sa.sa_restorer | PSW32_ADDR_AMODE;
490 } else {
491 __u16 __user *svc = &frame->svc_insn;
492 if (__put_user(S390_SYSCALL_OPCODE | __NR_rt_sigreturn, svc))
493 return -EFAULT;
494 restorer = (unsigned long __force) svc | PSW32_ADDR_AMODE;
495 }
496
497 /* Create siginfo on the signal stack */
498 if (copy_siginfo_to_user32(&frame->info, &ksig->info))
499 return -EFAULT;
500
501 /* Store registers needed to create the signal frame */
502 store_sigregs();
503
504 /* Create ucontext on the signal stack. */
505 sigset_to_sigset32(set->sig, cset.sig);
506 if (__put_user(uc_flags, &frame->uc.uc_flags) ||
507 __put_user(0, &frame->uc.uc_link) ||
508 __compat_save_altstack(&frame->uc.uc_stack, regs->gprs[15]) ||
509 save_sigregs32(regs, &frame->uc.uc_mcontext) ||
510 __copy_to_user(&frame->uc.uc_sigmask, &cset, sizeof(cset)) ||
511 save_sigregs_ext32(regs, &frame->uc.uc_mcontext_ext))
512 return -EFAULT;
513
514 /* Set up registers for signal handler */
515 regs->gprs[14] = restorer;
516 regs->gprs[15] = (__force __u64) frame;
517 /* Force 31 bit amode and default user address space control. */
518 regs->psw.mask = PSW_MASK_BA |
519 (PSW_USER_BITS & PSW_MASK_ASC) |
520 (regs->psw.mask & ~PSW_MASK_ASC);
521 regs->psw.addr = (__u64 __force) ksig->ka.sa.sa_handler;
522
523 regs->gprs[2] = ksig->sig;
524 regs->gprs[3] = (__force __u64) &frame->info;
525 regs->gprs[4] = (__force __u64) &frame->uc;
526 regs->gprs[5] = current->thread.last_break;
527 return 0;
528}
529
530/*
531 * OK, we're invoking a handler
532 */
533
534void handle_signal32(struct ksignal *ksig, sigset_t *oldset,
535 struct pt_regs *regs)
536{
537 int ret;
538
539 /* Set up the stack frame */
540 if (ksig->ka.sa.sa_flags & SA_SIGINFO)
541 ret = setup_rt_frame32(ksig, oldset, regs);
542 else
543 ret = setup_frame32(ksig, oldset, regs);
544
545 signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLE_STEP));
546}
547