Loading...
1/*
2 * Signal handling for 32bit PPC and 32bit tasks on 64bit PPC
3 *
4 * PowerPC version
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 * Copyright (C) 2001 IBM
7 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
9 *
10 * Derived from "arch/i386/kernel/signal.c"
11 * Copyright (C) 1991, 1992 Linus Torvalds
12 * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson
13 *
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version
17 * 2 of the License, or (at your option) any later version.
18 */
19
20#include <linux/sched.h>
21#include <linux/mm.h>
22#include <linux/smp.h>
23#include <linux/kernel.h>
24#include <linux/signal.h>
25#include <linux/errno.h>
26#include <linux/elf.h>
27#include <linux/ptrace.h>
28#include <linux/ratelimit.h>
29#ifdef CONFIG_PPC64
30#include <linux/syscalls.h>
31#include <linux/compat.h>
32#else
33#include <linux/wait.h>
34#include <linux/unistd.h>
35#include <linux/stddef.h>
36#include <linux/tty.h>
37#include <linux/binfmts.h>
38#endif
39
40#include <linux/uaccess.h>
41#include <asm/cacheflush.h>
42#include <asm/syscalls.h>
43#include <asm/sigcontext.h>
44#include <asm/vdso.h>
45#include <asm/switch_to.h>
46#include <asm/tm.h>
47#include <asm/asm-prototypes.h>
48#ifdef CONFIG_PPC64
49#include "ppc32.h"
50#include <asm/unistd.h>
51#else
52#include <asm/ucontext.h>
53#include <asm/pgtable.h>
54#endif
55
56#include "signal.h"
57
58
59#ifdef CONFIG_PPC64
60#define sys_rt_sigreturn compat_sys_rt_sigreturn
61#define sys_swapcontext compat_sys_swapcontext
62#define sys_sigreturn compat_sys_sigreturn
63
64#define old_sigaction old_sigaction32
65#define sigcontext sigcontext32
66#define mcontext mcontext32
67#define ucontext ucontext32
68
69#define __save_altstack __compat_save_altstack
70
71/*
72 * Userspace code may pass a ucontext which doesn't include VSX added
73 * at the end. We need to check for this case.
74 */
75#define UCONTEXTSIZEWITHOUTVSX \
76 (sizeof(struct ucontext) - sizeof(elf_vsrreghalf_t32))
77
78/*
79 * Returning 0 means we return to userspace via
80 * ret_from_except and thus restore all user
81 * registers from *regs. This is what we need
82 * to do when a signal has been delivered.
83 */
84
85#define GP_REGS_SIZE min(sizeof(elf_gregset_t32), sizeof(struct pt_regs32))
86#undef __SIGNAL_FRAMESIZE
87#define __SIGNAL_FRAMESIZE __SIGNAL_FRAMESIZE32
88#undef ELF_NVRREG
89#define ELF_NVRREG ELF_NVRREG32
90
91/*
92 * Functions for flipping sigsets (thanks to brain dead generic
93 * implementation that makes things simple for little endian only)
94 */
95static inline int put_sigset_t(compat_sigset_t __user *uset, sigset_t *set)
96{
97 return put_compat_sigset(uset, set, sizeof(*uset));
98}
99
100static inline int get_sigset_t(sigset_t *set,
101 const compat_sigset_t __user *uset)
102{
103 return get_compat_sigset(set, uset);
104}
105
106#define to_user_ptr(p) ptr_to_compat(p)
107#define from_user_ptr(p) compat_ptr(p)
108
109static inline int save_general_regs(struct pt_regs *regs,
110 struct mcontext __user *frame)
111{
112 elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
113 int i;
114 /* Force usr to alway see softe as 1 (interrupts enabled) */
115 elf_greg_t64 softe = 0x1;
116
117 WARN_ON(!FULL_REGS(regs));
118
119 for (i = 0; i <= PT_RESULT; i ++) {
120 if (i == 14 && !FULL_REGS(regs))
121 i = 32;
122 if ( i == PT_SOFTE) {
123 if(__put_user((unsigned int)softe, &frame->mc_gregs[i]))
124 return -EFAULT;
125 else
126 continue;
127 }
128 if (__put_user((unsigned int)gregs[i], &frame->mc_gregs[i]))
129 return -EFAULT;
130 }
131 return 0;
132}
133
134static inline int restore_general_regs(struct pt_regs *regs,
135 struct mcontext __user *sr)
136{
137 elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
138 int i;
139
140 for (i = 0; i <= PT_RESULT; i++) {
141 if ((i == PT_MSR) || (i == PT_SOFTE))
142 continue;
143 if (__get_user(gregs[i], &sr->mc_gregs[i]))
144 return -EFAULT;
145 }
146 return 0;
147}
148
149#else /* CONFIG_PPC64 */
150
151#define GP_REGS_SIZE min(sizeof(elf_gregset_t), sizeof(struct pt_regs))
152
153static inline int put_sigset_t(sigset_t __user *uset, sigset_t *set)
154{
155 return copy_to_user(uset, set, sizeof(*uset));
156}
157
158static inline int get_sigset_t(sigset_t *set, const sigset_t __user *uset)
159{
160 return copy_from_user(set, uset, sizeof(*uset));
161}
162
163#define to_user_ptr(p) ((unsigned long)(p))
164#define from_user_ptr(p) ((void __user *)(p))
165
166static inline int save_general_regs(struct pt_regs *regs,
167 struct mcontext __user *frame)
168{
169 WARN_ON(!FULL_REGS(regs));
170 return __copy_to_user(&frame->mc_gregs, regs, GP_REGS_SIZE);
171}
172
173static inline int restore_general_regs(struct pt_regs *regs,
174 struct mcontext __user *sr)
175{
176 /* copy up to but not including MSR */
177 if (__copy_from_user(regs, &sr->mc_gregs,
178 PT_MSR * sizeof(elf_greg_t)))
179 return -EFAULT;
180 /* copy from orig_r3 (the word after the MSR) up to the end */
181 if (__copy_from_user(®s->orig_gpr3, &sr->mc_gregs[PT_ORIG_R3],
182 GP_REGS_SIZE - PT_ORIG_R3 * sizeof(elf_greg_t)))
183 return -EFAULT;
184 return 0;
185}
186#endif
187
188/*
189 * When we have signals to deliver, we set up on the
190 * user stack, going down from the original stack pointer:
191 * an ABI gap of 56 words
192 * an mcontext struct
193 * a sigcontext struct
194 * a gap of __SIGNAL_FRAMESIZE bytes
195 *
196 * Each of these things must be a multiple of 16 bytes in size. The following
197 * structure represent all of this except the __SIGNAL_FRAMESIZE gap
198 *
199 */
200struct sigframe {
201 struct sigcontext sctx; /* the sigcontext */
202 struct mcontext mctx; /* all the register values */
203#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
204 struct sigcontext sctx_transact;
205 struct mcontext mctx_transact;
206#endif
207 /*
208 * Programs using the rs6000/xcoff abi can save up to 19 gp
209 * regs and 18 fp regs below sp before decrementing it.
210 */
211 int abigap[56];
212};
213
214/* We use the mc_pad field for the signal return trampoline. */
215#define tramp mc_pad
216
217/*
218 * When we have rt signals to deliver, we set up on the
219 * user stack, going down from the original stack pointer:
220 * one rt_sigframe struct (siginfo + ucontext + ABI gap)
221 * a gap of __SIGNAL_FRAMESIZE+16 bytes
222 * (the +16 is to get the siginfo and ucontext in the same
223 * positions as in older kernels).
224 *
225 * Each of these things must be a multiple of 16 bytes in size.
226 *
227 */
228struct rt_sigframe {
229#ifdef CONFIG_PPC64
230 compat_siginfo_t info;
231#else
232 struct siginfo info;
233#endif
234 struct ucontext uc;
235#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
236 struct ucontext uc_transact;
237#endif
238 /*
239 * Programs using the rs6000/xcoff abi can save up to 19 gp
240 * regs and 18 fp regs below sp before decrementing it.
241 */
242 int abigap[56];
243};
244
245#ifdef CONFIG_VSX
246unsigned long copy_fpr_to_user(void __user *to,
247 struct task_struct *task)
248{
249 u64 buf[ELF_NFPREG];
250 int i;
251
252 /* save FPR copy to local buffer then write to the thread_struct */
253 for (i = 0; i < (ELF_NFPREG - 1) ; i++)
254 buf[i] = task->thread.TS_FPR(i);
255 buf[i] = task->thread.fp_state.fpscr;
256 return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double));
257}
258
259unsigned long copy_fpr_from_user(struct task_struct *task,
260 void __user *from)
261{
262 u64 buf[ELF_NFPREG];
263 int i;
264
265 if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double)))
266 return 1;
267 for (i = 0; i < (ELF_NFPREG - 1) ; i++)
268 task->thread.TS_FPR(i) = buf[i];
269 task->thread.fp_state.fpscr = buf[i];
270
271 return 0;
272}
273
274unsigned long copy_vsx_to_user(void __user *to,
275 struct task_struct *task)
276{
277 u64 buf[ELF_NVSRHALFREG];
278 int i;
279
280 /* save FPR copy to local buffer then write to the thread_struct */
281 for (i = 0; i < ELF_NVSRHALFREG; i++)
282 buf[i] = task->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
283 return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double));
284}
285
286unsigned long copy_vsx_from_user(struct task_struct *task,
287 void __user *from)
288{
289 u64 buf[ELF_NVSRHALFREG];
290 int i;
291
292 if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double)))
293 return 1;
294 for (i = 0; i < ELF_NVSRHALFREG ; i++)
295 task->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
296 return 0;
297}
298
299#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
300unsigned long copy_ckfpr_to_user(void __user *to,
301 struct task_struct *task)
302{
303 u64 buf[ELF_NFPREG];
304 int i;
305
306 /* save FPR copy to local buffer then write to the thread_struct */
307 for (i = 0; i < (ELF_NFPREG - 1) ; i++)
308 buf[i] = task->thread.TS_CKFPR(i);
309 buf[i] = task->thread.ckfp_state.fpscr;
310 return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double));
311}
312
313unsigned long copy_ckfpr_from_user(struct task_struct *task,
314 void __user *from)
315{
316 u64 buf[ELF_NFPREG];
317 int i;
318
319 if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double)))
320 return 1;
321 for (i = 0; i < (ELF_NFPREG - 1) ; i++)
322 task->thread.TS_CKFPR(i) = buf[i];
323 task->thread.ckfp_state.fpscr = buf[i];
324
325 return 0;
326}
327
328unsigned long copy_ckvsx_to_user(void __user *to,
329 struct task_struct *task)
330{
331 u64 buf[ELF_NVSRHALFREG];
332 int i;
333
334 /* save FPR copy to local buffer then write to the thread_struct */
335 for (i = 0; i < ELF_NVSRHALFREG; i++)
336 buf[i] = task->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET];
337 return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double));
338}
339
340unsigned long copy_ckvsx_from_user(struct task_struct *task,
341 void __user *from)
342{
343 u64 buf[ELF_NVSRHALFREG];
344 int i;
345
346 if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double)))
347 return 1;
348 for (i = 0; i < ELF_NVSRHALFREG ; i++)
349 task->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
350 return 0;
351}
352#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
353#else
354inline unsigned long copy_fpr_to_user(void __user *to,
355 struct task_struct *task)
356{
357 return __copy_to_user(to, task->thread.fp_state.fpr,
358 ELF_NFPREG * sizeof(double));
359}
360
361inline unsigned long copy_fpr_from_user(struct task_struct *task,
362 void __user *from)
363{
364 return __copy_from_user(task->thread.fp_state.fpr, from,
365 ELF_NFPREG * sizeof(double));
366}
367
368#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
369inline unsigned long copy_ckfpr_to_user(void __user *to,
370 struct task_struct *task)
371{
372 return __copy_to_user(to, task->thread.ckfp_state.fpr,
373 ELF_NFPREG * sizeof(double));
374}
375
376inline unsigned long copy_ckfpr_from_user(struct task_struct *task,
377 void __user *from)
378{
379 return __copy_from_user(task->thread.ckfp_state.fpr, from,
380 ELF_NFPREG * sizeof(double));
381}
382#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
383#endif
384
385/*
386 * Save the current user registers on the user stack.
387 * We only save the altivec/spe registers if the process has used
388 * altivec/spe instructions at some point.
389 */
390static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
391 struct mcontext __user *tm_frame, int sigret,
392 int ctx_has_vsx_region)
393{
394 unsigned long msr = regs->msr;
395
396 /* Make sure floating point registers are stored in regs */
397 flush_fp_to_thread(current);
398
399 /* save general registers */
400 if (save_general_regs(regs, frame))
401 return 1;
402
403#ifdef CONFIG_ALTIVEC
404 /* save altivec registers */
405 if (current->thread.used_vr) {
406 flush_altivec_to_thread(current);
407 if (__copy_to_user(&frame->mc_vregs, ¤t->thread.vr_state,
408 ELF_NVRREG * sizeof(vector128)))
409 return 1;
410 /* set MSR_VEC in the saved MSR value to indicate that
411 frame->mc_vregs contains valid data */
412 msr |= MSR_VEC;
413 }
414 /* else assert((regs->msr & MSR_VEC) == 0) */
415
416 /* We always copy to/from vrsave, it's 0 if we don't have or don't
417 * use altivec. Since VSCR only contains 32 bits saved in the least
418 * significant bits of a vector, we "cheat" and stuff VRSAVE in the
419 * most significant bits of that same vector. --BenH
420 * Note that the current VRSAVE value is in the SPR at this point.
421 */
422 if (cpu_has_feature(CPU_FTR_ALTIVEC))
423 current->thread.vrsave = mfspr(SPRN_VRSAVE);
424 if (__put_user(current->thread.vrsave, (u32 __user *)&frame->mc_vregs[32]))
425 return 1;
426#endif /* CONFIG_ALTIVEC */
427 if (copy_fpr_to_user(&frame->mc_fregs, current))
428 return 1;
429
430 /*
431 * Clear the MSR VSX bit to indicate there is no valid state attached
432 * to this context, except in the specific case below where we set it.
433 */
434 msr &= ~MSR_VSX;
435#ifdef CONFIG_VSX
436 /*
437 * Copy VSR 0-31 upper half from thread_struct to local
438 * buffer, then write that to userspace. Also set MSR_VSX in
439 * the saved MSR value to indicate that frame->mc_vregs
440 * contains valid data
441 */
442 if (current->thread.used_vsr && ctx_has_vsx_region) {
443 flush_vsx_to_thread(current);
444 if (copy_vsx_to_user(&frame->mc_vsregs, current))
445 return 1;
446 msr |= MSR_VSX;
447 }
448#endif /* CONFIG_VSX */
449#ifdef CONFIG_SPE
450 /* save spe registers */
451 if (current->thread.used_spe) {
452 flush_spe_to_thread(current);
453 if (__copy_to_user(&frame->mc_vregs, current->thread.evr,
454 ELF_NEVRREG * sizeof(u32)))
455 return 1;
456 /* set MSR_SPE in the saved MSR value to indicate that
457 frame->mc_vregs contains valid data */
458 msr |= MSR_SPE;
459 }
460 /* else assert((regs->msr & MSR_SPE) == 0) */
461
462 /* We always copy to/from spefscr */
463 if (__put_user(current->thread.spefscr, (u32 __user *)&frame->mc_vregs + ELF_NEVRREG))
464 return 1;
465#endif /* CONFIG_SPE */
466
467 if (__put_user(msr, &frame->mc_gregs[PT_MSR]))
468 return 1;
469 /* We need to write 0 the MSR top 32 bits in the tm frame so that we
470 * can check it on the restore to see if TM is active
471 */
472 if (tm_frame && __put_user(0, &tm_frame->mc_gregs[PT_MSR]))
473 return 1;
474
475 if (sigret) {
476 /* Set up the sigreturn trampoline: li r0,sigret; sc */
477 if (__put_user(0x38000000UL + sigret, &frame->tramp[0])
478 || __put_user(0x44000002UL, &frame->tramp[1]))
479 return 1;
480 flush_icache_range((unsigned long) &frame->tramp[0],
481 (unsigned long) &frame->tramp[2]);
482 }
483
484 return 0;
485}
486
487#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
488/*
489 * Save the current user registers on the user stack.
490 * We only save the altivec/spe registers if the process has used
491 * altivec/spe instructions at some point.
492 * We also save the transactional registers to a second ucontext in the
493 * frame.
494 *
495 * See save_user_regs() and signal_64.c:setup_tm_sigcontexts().
496 */
497static int save_tm_user_regs(struct pt_regs *regs,
498 struct mcontext __user *frame,
499 struct mcontext __user *tm_frame, int sigret)
500{
501 unsigned long msr = regs->msr;
502
503 WARN_ON(tm_suspend_disabled);
504
505 /* Remove TM bits from thread's MSR. The MSR in the sigcontext
506 * just indicates to userland that we were doing a transaction, but we
507 * don't want to return in transactional state. This also ensures
508 * that flush_fp_to_thread won't set TIF_RESTORE_TM again.
509 */
510 regs->msr &= ~MSR_TS_MASK;
511
512 /* Save both sets of general registers */
513 if (save_general_regs(¤t->thread.ckpt_regs, frame)
514 || save_general_regs(regs, tm_frame))
515 return 1;
516
517 /* Stash the top half of the 64bit MSR into the 32bit MSR word
518 * of the transactional mcontext. This way we have a backward-compatible
519 * MSR in the 'normal' (checkpointed) mcontext and additionally one can
520 * also look at what type of transaction (T or S) was active at the
521 * time of the signal.
522 */
523 if (__put_user((msr >> 32), &tm_frame->mc_gregs[PT_MSR]))
524 return 1;
525
526#ifdef CONFIG_ALTIVEC
527 /* save altivec registers */
528 if (current->thread.used_vr) {
529 if (__copy_to_user(&frame->mc_vregs, ¤t->thread.ckvr_state,
530 ELF_NVRREG * sizeof(vector128)))
531 return 1;
532 if (msr & MSR_VEC) {
533 if (__copy_to_user(&tm_frame->mc_vregs,
534 ¤t->thread.vr_state,
535 ELF_NVRREG * sizeof(vector128)))
536 return 1;
537 } else {
538 if (__copy_to_user(&tm_frame->mc_vregs,
539 ¤t->thread.ckvr_state,
540 ELF_NVRREG * sizeof(vector128)))
541 return 1;
542 }
543
544 /* set MSR_VEC in the saved MSR value to indicate that
545 * frame->mc_vregs contains valid data
546 */
547 msr |= MSR_VEC;
548 }
549
550 /* We always copy to/from vrsave, it's 0 if we don't have or don't
551 * use altivec. Since VSCR only contains 32 bits saved in the least
552 * significant bits of a vector, we "cheat" and stuff VRSAVE in the
553 * most significant bits of that same vector. --BenH
554 */
555 if (cpu_has_feature(CPU_FTR_ALTIVEC))
556 current->thread.ckvrsave = mfspr(SPRN_VRSAVE);
557 if (__put_user(current->thread.ckvrsave,
558 (u32 __user *)&frame->mc_vregs[32]))
559 return 1;
560 if (msr & MSR_VEC) {
561 if (__put_user(current->thread.vrsave,
562 (u32 __user *)&tm_frame->mc_vregs[32]))
563 return 1;
564 } else {
565 if (__put_user(current->thread.ckvrsave,
566 (u32 __user *)&tm_frame->mc_vregs[32]))
567 return 1;
568 }
569#endif /* CONFIG_ALTIVEC */
570
571 if (copy_ckfpr_to_user(&frame->mc_fregs, current))
572 return 1;
573 if (msr & MSR_FP) {
574 if (copy_fpr_to_user(&tm_frame->mc_fregs, current))
575 return 1;
576 } else {
577 if (copy_ckfpr_to_user(&tm_frame->mc_fregs, current))
578 return 1;
579 }
580
581#ifdef CONFIG_VSX
582 /*
583 * Copy VSR 0-31 upper half from thread_struct to local
584 * buffer, then write that to userspace. Also set MSR_VSX in
585 * the saved MSR value to indicate that frame->mc_vregs
586 * contains valid data
587 */
588 if (current->thread.used_vsr) {
589 if (copy_ckvsx_to_user(&frame->mc_vsregs, current))
590 return 1;
591 if (msr & MSR_VSX) {
592 if (copy_vsx_to_user(&tm_frame->mc_vsregs,
593 current))
594 return 1;
595 } else {
596 if (copy_ckvsx_to_user(&tm_frame->mc_vsregs, current))
597 return 1;
598 }
599
600 msr |= MSR_VSX;
601 }
602#endif /* CONFIG_VSX */
603#ifdef CONFIG_SPE
604 /* SPE regs are not checkpointed with TM, so this section is
605 * simply the same as in save_user_regs().
606 */
607 if (current->thread.used_spe) {
608 flush_spe_to_thread(current);
609 if (__copy_to_user(&frame->mc_vregs, current->thread.evr,
610 ELF_NEVRREG * sizeof(u32)))
611 return 1;
612 /* set MSR_SPE in the saved MSR value to indicate that
613 * frame->mc_vregs contains valid data */
614 msr |= MSR_SPE;
615 }
616
617 /* We always copy to/from spefscr */
618 if (__put_user(current->thread.spefscr, (u32 __user *)&frame->mc_vregs + ELF_NEVRREG))
619 return 1;
620#endif /* CONFIG_SPE */
621
622 if (__put_user(msr, &frame->mc_gregs[PT_MSR]))
623 return 1;
624 if (sigret) {
625 /* Set up the sigreturn trampoline: li r0,sigret; sc */
626 if (__put_user(0x38000000UL + sigret, &frame->tramp[0])
627 || __put_user(0x44000002UL, &frame->tramp[1]))
628 return 1;
629 flush_icache_range((unsigned long) &frame->tramp[0],
630 (unsigned long) &frame->tramp[2]);
631 }
632
633 return 0;
634}
635#endif
636
637/*
638 * Restore the current user register values from the user stack,
639 * (except for MSR).
640 */
641static long restore_user_regs(struct pt_regs *regs,
642 struct mcontext __user *sr, int sig)
643{
644 long err;
645 unsigned int save_r2 = 0;
646 unsigned long msr;
647#ifdef CONFIG_VSX
648 int i;
649#endif
650
651 /*
652 * restore general registers but not including MSR or SOFTE. Also
653 * take care of keeping r2 (TLS) intact if not a signal
654 */
655 if (!sig)
656 save_r2 = (unsigned int)regs->gpr[2];
657 err = restore_general_regs(regs, sr);
658 regs->trap = 0;
659 err |= __get_user(msr, &sr->mc_gregs[PT_MSR]);
660 if (!sig)
661 regs->gpr[2] = (unsigned long) save_r2;
662 if (err)
663 return 1;
664
665 /* if doing signal return, restore the previous little-endian mode */
666 if (sig)
667 regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
668
669#ifdef CONFIG_ALTIVEC
670 /*
671 * Force the process to reload the altivec registers from
672 * current->thread when it next does altivec instructions
673 */
674 regs->msr &= ~MSR_VEC;
675 if (msr & MSR_VEC) {
676 /* restore altivec registers from the stack */
677 if (__copy_from_user(¤t->thread.vr_state, &sr->mc_vregs,
678 sizeof(sr->mc_vregs)))
679 return 1;
680 current->thread.used_vr = true;
681 } else if (current->thread.used_vr)
682 memset(¤t->thread.vr_state, 0,
683 ELF_NVRREG * sizeof(vector128));
684
685 /* Always get VRSAVE back */
686 if (__get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32]))
687 return 1;
688 if (cpu_has_feature(CPU_FTR_ALTIVEC))
689 mtspr(SPRN_VRSAVE, current->thread.vrsave);
690#endif /* CONFIG_ALTIVEC */
691 if (copy_fpr_from_user(current, &sr->mc_fregs))
692 return 1;
693
694#ifdef CONFIG_VSX
695 /*
696 * Force the process to reload the VSX registers from
697 * current->thread when it next does VSX instruction.
698 */
699 regs->msr &= ~MSR_VSX;
700 if (msr & MSR_VSX) {
701 /*
702 * Restore altivec registers from the stack to a local
703 * buffer, then write this out to the thread_struct
704 */
705 if (copy_vsx_from_user(current, &sr->mc_vsregs))
706 return 1;
707 current->thread.used_vsr = true;
708 } else if (current->thread.used_vsr)
709 for (i = 0; i < 32 ; i++)
710 current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
711#endif /* CONFIG_VSX */
712 /*
713 * force the process to reload the FP registers from
714 * current->thread when it next does FP instructions
715 */
716 regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
717
718#ifdef CONFIG_SPE
719 /* force the process to reload the spe registers from
720 current->thread when it next does spe instructions */
721 regs->msr &= ~MSR_SPE;
722 if (msr & MSR_SPE) {
723 /* restore spe registers from the stack */
724 if (__copy_from_user(current->thread.evr, &sr->mc_vregs,
725 ELF_NEVRREG * sizeof(u32)))
726 return 1;
727 current->thread.used_spe = true;
728 } else if (current->thread.used_spe)
729 memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
730
731 /* Always get SPEFSCR back */
732 if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs + ELF_NEVRREG))
733 return 1;
734#endif /* CONFIG_SPE */
735
736 return 0;
737}
738
739#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
740/*
741 * Restore the current user register values from the user stack, except for
742 * MSR, and recheckpoint the original checkpointed register state for processes
743 * in transactions.
744 */
745static long restore_tm_user_regs(struct pt_regs *regs,
746 struct mcontext __user *sr,
747 struct mcontext __user *tm_sr)
748{
749 long err;
750 unsigned long msr, msr_hi;
751#ifdef CONFIG_VSX
752 int i;
753#endif
754
755 if (tm_suspend_disabled)
756 return 1;
757 /*
758 * restore general registers but not including MSR or SOFTE. Also
759 * take care of keeping r2 (TLS) intact if not a signal.
760 * See comment in signal_64.c:restore_tm_sigcontexts();
761 * TFHAR is restored from the checkpointed NIP; TEXASR and TFIAR
762 * were set by the signal delivery.
763 */
764 err = restore_general_regs(regs, tm_sr);
765 err |= restore_general_regs(¤t->thread.ckpt_regs, sr);
766
767 err |= __get_user(current->thread.tm_tfhar, &sr->mc_gregs[PT_NIP]);
768
769 err |= __get_user(msr, &sr->mc_gregs[PT_MSR]);
770 if (err)
771 return 1;
772
773 /* Restore the previous little-endian mode */
774 regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
775
776#ifdef CONFIG_ALTIVEC
777 regs->msr &= ~MSR_VEC;
778 if (msr & MSR_VEC) {
779 /* restore altivec registers from the stack */
780 if (__copy_from_user(¤t->thread.ckvr_state, &sr->mc_vregs,
781 sizeof(sr->mc_vregs)) ||
782 __copy_from_user(¤t->thread.vr_state,
783 &tm_sr->mc_vregs,
784 sizeof(sr->mc_vregs)))
785 return 1;
786 current->thread.used_vr = true;
787 } else if (current->thread.used_vr) {
788 memset(¤t->thread.vr_state, 0,
789 ELF_NVRREG * sizeof(vector128));
790 memset(¤t->thread.ckvr_state, 0,
791 ELF_NVRREG * sizeof(vector128));
792 }
793
794 /* Always get VRSAVE back */
795 if (__get_user(current->thread.ckvrsave,
796 (u32 __user *)&sr->mc_vregs[32]) ||
797 __get_user(current->thread.vrsave,
798 (u32 __user *)&tm_sr->mc_vregs[32]))
799 return 1;
800 if (cpu_has_feature(CPU_FTR_ALTIVEC))
801 mtspr(SPRN_VRSAVE, current->thread.ckvrsave);
802#endif /* CONFIG_ALTIVEC */
803
804 regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
805
806 if (copy_fpr_from_user(current, &sr->mc_fregs) ||
807 copy_ckfpr_from_user(current, &tm_sr->mc_fregs))
808 return 1;
809
810#ifdef CONFIG_VSX
811 regs->msr &= ~MSR_VSX;
812 if (msr & MSR_VSX) {
813 /*
814 * Restore altivec registers from the stack to a local
815 * buffer, then write this out to the thread_struct
816 */
817 if (copy_vsx_from_user(current, &tm_sr->mc_vsregs) ||
818 copy_ckvsx_from_user(current, &sr->mc_vsregs))
819 return 1;
820 current->thread.used_vsr = true;
821 } else if (current->thread.used_vsr)
822 for (i = 0; i < 32 ; i++) {
823 current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
824 current->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
825 }
826#endif /* CONFIG_VSX */
827
828#ifdef CONFIG_SPE
829 /* SPE regs are not checkpointed with TM, so this section is
830 * simply the same as in restore_user_regs().
831 */
832 regs->msr &= ~MSR_SPE;
833 if (msr & MSR_SPE) {
834 if (__copy_from_user(current->thread.evr, &sr->mc_vregs,
835 ELF_NEVRREG * sizeof(u32)))
836 return 1;
837 current->thread.used_spe = true;
838 } else if (current->thread.used_spe)
839 memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
840
841 /* Always get SPEFSCR back */
842 if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs
843 + ELF_NEVRREG))
844 return 1;
845#endif /* CONFIG_SPE */
846
847 /* Get the top half of the MSR from the user context */
848 if (__get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR]))
849 return 1;
850 msr_hi <<= 32;
851 /* If TM bits are set to the reserved value, it's an invalid context */
852 if (MSR_TM_RESV(msr_hi))
853 return 1;
854 /* Pull in the MSR TM bits from the user context */
855 regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr_hi & MSR_TS_MASK);
856 /* Now, recheckpoint. This loads up all of the checkpointed (older)
857 * registers, including FP and V[S]Rs. After recheckpointing, the
858 * transactional versions should be loaded.
859 */
860 tm_enable();
861 /* Make sure the transaction is marked as failed */
862 current->thread.tm_texasr |= TEXASR_FS;
863 /* This loads the checkpointed FP/VEC state, if used */
864 tm_recheckpoint(¤t->thread);
865
866 /* This loads the speculative FP/VEC state, if used */
867 msr_check_and_set(msr & (MSR_FP | MSR_VEC));
868 if (msr & MSR_FP) {
869 load_fp_state(¤t->thread.fp_state);
870 regs->msr |= (MSR_FP | current->thread.fpexc_mode);
871 }
872#ifdef CONFIG_ALTIVEC
873 if (msr & MSR_VEC) {
874 load_vr_state(¤t->thread.vr_state);
875 regs->msr |= MSR_VEC;
876 }
877#endif
878
879 return 0;
880}
881#endif
882
883#ifdef CONFIG_PPC64
884
885#define copy_siginfo_to_user copy_siginfo_to_user32
886
887#endif /* CONFIG_PPC64 */
888
889/*
890 * Set up a signal frame for a "real-time" signal handler
891 * (one which gets siginfo).
892 */
893int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
894 struct task_struct *tsk)
895{
896 struct rt_sigframe __user *rt_sf;
897 struct mcontext __user *frame;
898 struct mcontext __user *tm_frame = NULL;
899 void __user *addr;
900 unsigned long newsp = 0;
901 int sigret;
902 unsigned long tramp;
903 struct pt_regs *regs = tsk->thread.regs;
904
905 BUG_ON(tsk != current);
906
907 /* Set up Signal Frame */
908 /* Put a Real Time Context onto stack */
909 rt_sf = get_sigframe(ksig, get_tm_stackpointer(tsk), sizeof(*rt_sf), 1);
910 addr = rt_sf;
911 if (unlikely(rt_sf == NULL))
912 goto badframe;
913
914 /* Put the siginfo & fill in most of the ucontext */
915 if (copy_siginfo_to_user(&rt_sf->info, &ksig->info)
916 || __put_user(0, &rt_sf->uc.uc_flags)
917 || __save_altstack(&rt_sf->uc.uc_stack, regs->gpr[1])
918 || __put_user(to_user_ptr(&rt_sf->uc.uc_mcontext),
919 &rt_sf->uc.uc_regs)
920 || put_sigset_t(&rt_sf->uc.uc_sigmask, oldset))
921 goto badframe;
922
923 /* Save user registers on the stack */
924 frame = &rt_sf->uc.uc_mcontext;
925 addr = frame;
926 if (vdso32_rt_sigtramp && tsk->mm->context.vdso_base) {
927 sigret = 0;
928 tramp = tsk->mm->context.vdso_base + vdso32_rt_sigtramp;
929 } else {
930 sigret = __NR_rt_sigreturn;
931 tramp = (unsigned long) frame->tramp;
932 }
933
934#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
935 tm_frame = &rt_sf->uc_transact.uc_mcontext;
936 if (MSR_TM_ACTIVE(regs->msr)) {
937 if (__put_user((unsigned long)&rt_sf->uc_transact,
938 &rt_sf->uc.uc_link) ||
939 __put_user((unsigned long)tm_frame,
940 &rt_sf->uc_transact.uc_regs))
941 goto badframe;
942 if (save_tm_user_regs(regs, frame, tm_frame, sigret))
943 goto badframe;
944 }
945 else
946#endif
947 {
948 if (__put_user(0, &rt_sf->uc.uc_link))
949 goto badframe;
950 if (save_user_regs(regs, frame, tm_frame, sigret, 1))
951 goto badframe;
952 }
953 regs->link = tramp;
954
955 tsk->thread.fp_state.fpscr = 0; /* turn off all fp exceptions */
956
957 /* create a stack frame for the caller of the handler */
958 newsp = ((unsigned long)rt_sf) - (__SIGNAL_FRAMESIZE + 16);
959 addr = (void __user *)regs->gpr[1];
960 if (put_user(regs->gpr[1], (u32 __user *)newsp))
961 goto badframe;
962
963 /* Fill registers for signal handler */
964 regs->gpr[1] = newsp;
965 regs->gpr[3] = ksig->sig;
966 regs->gpr[4] = (unsigned long) &rt_sf->info;
967 regs->gpr[5] = (unsigned long) &rt_sf->uc;
968 regs->gpr[6] = (unsigned long) rt_sf;
969 regs->nip = (unsigned long) ksig->ka.sa.sa_handler;
970 /* enter the signal handler in native-endian mode */
971 regs->msr &= ~MSR_LE;
972 regs->msr |= (MSR_KERNEL & MSR_LE);
973 return 0;
974
975badframe:
976 if (show_unhandled_signals)
977 printk_ratelimited(KERN_INFO
978 "%s[%d]: bad frame in handle_rt_signal32: "
979 "%p nip %08lx lr %08lx\n",
980 tsk->comm, tsk->pid,
981 addr, regs->nip, regs->link);
982
983 return 1;
984}
985
986static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int sig)
987{
988 sigset_t set;
989 struct mcontext __user *mcp;
990
991 if (get_sigset_t(&set, &ucp->uc_sigmask))
992 return -EFAULT;
993#ifdef CONFIG_PPC64
994 {
995 u32 cmcp;
996
997 if (__get_user(cmcp, &ucp->uc_regs))
998 return -EFAULT;
999 mcp = (struct mcontext __user *)(u64)cmcp;
1000 /* no need to check access_ok(mcp), since mcp < 4GB */
1001 }
1002#else
1003 if (__get_user(mcp, &ucp->uc_regs))
1004 return -EFAULT;
1005 if (!access_ok(VERIFY_READ, mcp, sizeof(*mcp)))
1006 return -EFAULT;
1007#endif
1008 set_current_blocked(&set);
1009 if (restore_user_regs(regs, mcp, sig))
1010 return -EFAULT;
1011
1012 return 0;
1013}
1014
1015#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1016static int do_setcontext_tm(struct ucontext __user *ucp,
1017 struct ucontext __user *tm_ucp,
1018 struct pt_regs *regs)
1019{
1020 sigset_t set;
1021 struct mcontext __user *mcp;
1022 struct mcontext __user *tm_mcp;
1023 u32 cmcp;
1024 u32 tm_cmcp;
1025
1026 if (get_sigset_t(&set, &ucp->uc_sigmask))
1027 return -EFAULT;
1028
1029 if (__get_user(cmcp, &ucp->uc_regs) ||
1030 __get_user(tm_cmcp, &tm_ucp->uc_regs))
1031 return -EFAULT;
1032 mcp = (struct mcontext __user *)(u64)cmcp;
1033 tm_mcp = (struct mcontext __user *)(u64)tm_cmcp;
1034 /* no need to check access_ok(mcp), since mcp < 4GB */
1035
1036 set_current_blocked(&set);
1037 if (restore_tm_user_regs(regs, mcp, tm_mcp))
1038 return -EFAULT;
1039
1040 return 0;
1041}
1042#endif
1043
1044long sys_swapcontext(struct ucontext __user *old_ctx,
1045 struct ucontext __user *new_ctx,
1046 int ctx_size, int r6, int r7, int r8, struct pt_regs *regs)
1047{
1048 unsigned char tmp __maybe_unused;
1049 int ctx_has_vsx_region = 0;
1050
1051#ifdef CONFIG_PPC64
1052 unsigned long new_msr = 0;
1053
1054 if (new_ctx) {
1055 struct mcontext __user *mcp;
1056 u32 cmcp;
1057
1058 /*
1059 * Get pointer to the real mcontext. No need for
1060 * access_ok since we are dealing with compat
1061 * pointers.
1062 */
1063 if (__get_user(cmcp, &new_ctx->uc_regs))
1064 return -EFAULT;
1065 mcp = (struct mcontext __user *)(u64)cmcp;
1066 if (__get_user(new_msr, &mcp->mc_gregs[PT_MSR]))
1067 return -EFAULT;
1068 }
1069 /*
1070 * Check that the context is not smaller than the original
1071 * size (with VMX but without VSX)
1072 */
1073 if (ctx_size < UCONTEXTSIZEWITHOUTVSX)
1074 return -EINVAL;
1075 /*
1076 * If the new context state sets the MSR VSX bits but
1077 * it doesn't provide VSX state.
1078 */
1079 if ((ctx_size < sizeof(struct ucontext)) &&
1080 (new_msr & MSR_VSX))
1081 return -EINVAL;
1082 /* Does the context have enough room to store VSX data? */
1083 if (ctx_size >= sizeof(struct ucontext))
1084 ctx_has_vsx_region = 1;
1085#else
1086 /* Context size is for future use. Right now, we only make sure
1087 * we are passed something we understand
1088 */
1089 if (ctx_size < sizeof(struct ucontext))
1090 return -EINVAL;
1091#endif
1092 if (old_ctx != NULL) {
1093 struct mcontext __user *mctx;
1094
1095 /*
1096 * old_ctx might not be 16-byte aligned, in which
1097 * case old_ctx->uc_mcontext won't be either.
1098 * Because we have the old_ctx->uc_pad2 field
1099 * before old_ctx->uc_mcontext, we need to round down
1100 * from &old_ctx->uc_mcontext to a 16-byte boundary.
1101 */
1102 mctx = (struct mcontext __user *)
1103 ((unsigned long) &old_ctx->uc_mcontext & ~0xfUL);
1104 if (!access_ok(VERIFY_WRITE, old_ctx, ctx_size)
1105 || save_user_regs(regs, mctx, NULL, 0, ctx_has_vsx_region)
1106 || put_sigset_t(&old_ctx->uc_sigmask, ¤t->blocked)
1107 || __put_user(to_user_ptr(mctx), &old_ctx->uc_regs))
1108 return -EFAULT;
1109 }
1110 if (new_ctx == NULL)
1111 return 0;
1112 if (!access_ok(VERIFY_READ, new_ctx, ctx_size)
1113 || __get_user(tmp, (u8 __user *) new_ctx)
1114 || __get_user(tmp, (u8 __user *) new_ctx + ctx_size - 1))
1115 return -EFAULT;
1116
1117 /*
1118 * If we get a fault copying the context into the kernel's
1119 * image of the user's registers, we can't just return -EFAULT
1120 * because the user's registers will be corrupted. For instance
1121 * the NIP value may have been updated but not some of the
1122 * other registers. Given that we have done the access_ok
1123 * and successfully read the first and last bytes of the region
1124 * above, this should only happen in an out-of-memory situation
1125 * or if another thread unmaps the region containing the context.
1126 * We kill the task with a SIGSEGV in this situation.
1127 */
1128 if (do_setcontext(new_ctx, regs, 0))
1129 do_exit(SIGSEGV);
1130
1131 set_thread_flag(TIF_RESTOREALL);
1132 return 0;
1133}
1134
1135long sys_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
1136 struct pt_regs *regs)
1137{
1138 struct rt_sigframe __user *rt_sf;
1139#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1140 struct ucontext __user *uc_transact;
1141 unsigned long msr_hi;
1142 unsigned long tmp;
1143 int tm_restore = 0;
1144#endif
1145 /* Always make any pending restarted system calls return -EINTR */
1146 current->restart_block.fn = do_no_restart_syscall;
1147
1148 rt_sf = (struct rt_sigframe __user *)
1149 (regs->gpr[1] + __SIGNAL_FRAMESIZE + 16);
1150 if (!access_ok(VERIFY_READ, rt_sf, sizeof(*rt_sf)))
1151 goto bad;
1152
1153#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1154 /*
1155 * If there is a transactional state then throw it away.
1156 * The purpose of a sigreturn is to destroy all traces of the
1157 * signal frame, this includes any transactional state created
1158 * within in. We only check for suspended as we can never be
1159 * active in the kernel, we are active, there is nothing better to
1160 * do than go ahead and Bad Thing later.
1161 * The cause is not important as there will never be a
1162 * recheckpoint so it's not user visible.
1163 */
1164 if (MSR_TM_SUSPENDED(mfmsr()))
1165 tm_reclaim_current(0);
1166
1167 if (__get_user(tmp, &rt_sf->uc.uc_link))
1168 goto bad;
1169 uc_transact = (struct ucontext __user *)(uintptr_t)tmp;
1170 if (uc_transact) {
1171 u32 cmcp;
1172 struct mcontext __user *mcp;
1173
1174 if (__get_user(cmcp, &uc_transact->uc_regs))
1175 return -EFAULT;
1176 mcp = (struct mcontext __user *)(u64)cmcp;
1177 /* The top 32 bits of the MSR are stashed in the transactional
1178 * ucontext. */
1179 if (__get_user(msr_hi, &mcp->mc_gregs[PT_MSR]))
1180 goto bad;
1181
1182 if (MSR_TM_ACTIVE(msr_hi<<32)) {
1183 /* We only recheckpoint on return if we're
1184 * transaction.
1185 */
1186 tm_restore = 1;
1187 if (do_setcontext_tm(&rt_sf->uc, uc_transact, regs))
1188 goto bad;
1189 }
1190 }
1191 if (!tm_restore)
1192 /* Fall through, for non-TM restore */
1193#endif
1194 if (do_setcontext(&rt_sf->uc, regs, 1))
1195 goto bad;
1196
1197 /*
1198 * It's not clear whether or why it is desirable to save the
1199 * sigaltstack setting on signal delivery and restore it on
1200 * signal return. But other architectures do this and we have
1201 * always done it up until now so it is probably better not to
1202 * change it. -- paulus
1203 */
1204#ifdef CONFIG_PPC64
1205 if (compat_restore_altstack(&rt_sf->uc.uc_stack))
1206 goto bad;
1207#else
1208 if (restore_altstack(&rt_sf->uc.uc_stack))
1209 goto bad;
1210#endif
1211 set_thread_flag(TIF_RESTOREALL);
1212 return 0;
1213
1214 bad:
1215 if (show_unhandled_signals)
1216 printk_ratelimited(KERN_INFO
1217 "%s[%d]: bad frame in sys_rt_sigreturn: "
1218 "%p nip %08lx lr %08lx\n",
1219 current->comm, current->pid,
1220 rt_sf, regs->nip, regs->link);
1221
1222 force_sig(SIGSEGV, current);
1223 return 0;
1224}
1225
1226#ifdef CONFIG_PPC32
1227int sys_debug_setcontext(struct ucontext __user *ctx,
1228 int ndbg, struct sig_dbg_op __user *dbg,
1229 int r6, int r7, int r8,
1230 struct pt_regs *regs)
1231{
1232 struct sig_dbg_op op;
1233 int i;
1234 unsigned char tmp __maybe_unused;
1235 unsigned long new_msr = regs->msr;
1236#ifdef CONFIG_PPC_ADV_DEBUG_REGS
1237 unsigned long new_dbcr0 = current->thread.debug.dbcr0;
1238#endif
1239
1240 for (i=0; i<ndbg; i++) {
1241 if (copy_from_user(&op, dbg + i, sizeof(op)))
1242 return -EFAULT;
1243 switch (op.dbg_type) {
1244 case SIG_DBG_SINGLE_STEPPING:
1245#ifdef CONFIG_PPC_ADV_DEBUG_REGS
1246 if (op.dbg_value) {
1247 new_msr |= MSR_DE;
1248 new_dbcr0 |= (DBCR0_IDM | DBCR0_IC);
1249 } else {
1250 new_dbcr0 &= ~DBCR0_IC;
1251 if (!DBCR_ACTIVE_EVENTS(new_dbcr0,
1252 current->thread.debug.dbcr1)) {
1253 new_msr &= ~MSR_DE;
1254 new_dbcr0 &= ~DBCR0_IDM;
1255 }
1256 }
1257#else
1258 if (op.dbg_value)
1259 new_msr |= MSR_SE;
1260 else
1261 new_msr &= ~MSR_SE;
1262#endif
1263 break;
1264 case SIG_DBG_BRANCH_TRACING:
1265#ifdef CONFIG_PPC_ADV_DEBUG_REGS
1266 return -EINVAL;
1267#else
1268 if (op.dbg_value)
1269 new_msr |= MSR_BE;
1270 else
1271 new_msr &= ~MSR_BE;
1272#endif
1273 break;
1274
1275 default:
1276 return -EINVAL;
1277 }
1278 }
1279
1280 /* We wait until here to actually install the values in the
1281 registers so if we fail in the above loop, it will not
1282 affect the contents of these registers. After this point,
1283 failure is a problem, anyway, and it's very unlikely unless
1284 the user is really doing something wrong. */
1285 regs->msr = new_msr;
1286#ifdef CONFIG_PPC_ADV_DEBUG_REGS
1287 current->thread.debug.dbcr0 = new_dbcr0;
1288#endif
1289
1290 if (!access_ok(VERIFY_READ, ctx, sizeof(*ctx))
1291 || __get_user(tmp, (u8 __user *) ctx)
1292 || __get_user(tmp, (u8 __user *) (ctx + 1) - 1))
1293 return -EFAULT;
1294
1295 /*
1296 * If we get a fault copying the context into the kernel's
1297 * image of the user's registers, we can't just return -EFAULT
1298 * because the user's registers will be corrupted. For instance
1299 * the NIP value may have been updated but not some of the
1300 * other registers. Given that we have done the access_ok
1301 * and successfully read the first and last bytes of the region
1302 * above, this should only happen in an out-of-memory situation
1303 * or if another thread unmaps the region containing the context.
1304 * We kill the task with a SIGSEGV in this situation.
1305 */
1306 if (do_setcontext(ctx, regs, 1)) {
1307 if (show_unhandled_signals)
1308 printk_ratelimited(KERN_INFO "%s[%d]: bad frame in "
1309 "sys_debug_setcontext: %p nip %08lx "
1310 "lr %08lx\n",
1311 current->comm, current->pid,
1312 ctx, regs->nip, regs->link);
1313
1314 force_sig(SIGSEGV, current);
1315 goto out;
1316 }
1317
1318 /*
1319 * It's not clear whether or why it is desirable to save the
1320 * sigaltstack setting on signal delivery and restore it on
1321 * signal return. But other architectures do this and we have
1322 * always done it up until now so it is probably better not to
1323 * change it. -- paulus
1324 */
1325 restore_altstack(&ctx->uc_stack);
1326
1327 set_thread_flag(TIF_RESTOREALL);
1328 out:
1329 return 0;
1330}
1331#endif
1332
1333/*
1334 * OK, we're invoking a handler
1335 */
1336int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
1337 struct task_struct *tsk)
1338{
1339 struct sigcontext __user *sc;
1340 struct sigframe __user *frame;
1341 struct mcontext __user *tm_mctx = NULL;
1342 unsigned long newsp = 0;
1343 int sigret;
1344 unsigned long tramp;
1345 struct pt_regs *regs = tsk->thread.regs;
1346
1347 BUG_ON(tsk != current);
1348
1349 /* Set up Signal Frame */
1350 frame = get_sigframe(ksig, get_tm_stackpointer(tsk), sizeof(*frame), 1);
1351 if (unlikely(frame == NULL))
1352 goto badframe;
1353 sc = (struct sigcontext __user *) &frame->sctx;
1354
1355#if _NSIG != 64
1356#error "Please adjust handle_signal()"
1357#endif
1358 if (__put_user(to_user_ptr(ksig->ka.sa.sa_handler), &sc->handler)
1359 || __put_user(oldset->sig[0], &sc->oldmask)
1360#ifdef CONFIG_PPC64
1361 || __put_user((oldset->sig[0] >> 32), &sc->_unused[3])
1362#else
1363 || __put_user(oldset->sig[1], &sc->_unused[3])
1364#endif
1365 || __put_user(to_user_ptr(&frame->mctx), &sc->regs)
1366 || __put_user(ksig->sig, &sc->signal))
1367 goto badframe;
1368
1369 if (vdso32_sigtramp && tsk->mm->context.vdso_base) {
1370 sigret = 0;
1371 tramp = tsk->mm->context.vdso_base + vdso32_sigtramp;
1372 } else {
1373 sigret = __NR_sigreturn;
1374 tramp = (unsigned long) frame->mctx.tramp;
1375 }
1376
1377#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1378 tm_mctx = &frame->mctx_transact;
1379 if (MSR_TM_ACTIVE(regs->msr)) {
1380 if (save_tm_user_regs(regs, &frame->mctx, &frame->mctx_transact,
1381 sigret))
1382 goto badframe;
1383 }
1384 else
1385#endif
1386 {
1387 if (save_user_regs(regs, &frame->mctx, tm_mctx, sigret, 1))
1388 goto badframe;
1389 }
1390
1391 regs->link = tramp;
1392
1393 tsk->thread.fp_state.fpscr = 0; /* turn off all fp exceptions */
1394
1395 /* create a stack frame for the caller of the handler */
1396 newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE;
1397 if (put_user(regs->gpr[1], (u32 __user *)newsp))
1398 goto badframe;
1399
1400 regs->gpr[1] = newsp;
1401 regs->gpr[3] = ksig->sig;
1402 regs->gpr[4] = (unsigned long) sc;
1403 regs->nip = (unsigned long) (unsigned long)ksig->ka.sa.sa_handler;
1404 /* enter the signal handler in big-endian mode */
1405 regs->msr &= ~MSR_LE;
1406 return 0;
1407
1408badframe:
1409 if (show_unhandled_signals)
1410 printk_ratelimited(KERN_INFO
1411 "%s[%d]: bad frame in handle_signal32: "
1412 "%p nip %08lx lr %08lx\n",
1413 tsk->comm, tsk->pid,
1414 frame, regs->nip, regs->link);
1415
1416 return 1;
1417}
1418
1419/*
1420 * Do a signal return; undo the signal stack.
1421 */
1422long sys_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
1423 struct pt_regs *regs)
1424{
1425 struct sigframe __user *sf;
1426 struct sigcontext __user *sc;
1427 struct sigcontext sigctx;
1428 struct mcontext __user *sr;
1429 void __user *addr;
1430 sigset_t set;
1431#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1432 struct mcontext __user *mcp, *tm_mcp;
1433 unsigned long msr_hi;
1434#endif
1435
1436 /* Always make any pending restarted system calls return -EINTR */
1437 current->restart_block.fn = do_no_restart_syscall;
1438
1439 sf = (struct sigframe __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE);
1440 sc = &sf->sctx;
1441 addr = sc;
1442 if (copy_from_user(&sigctx, sc, sizeof(sigctx)))
1443 goto badframe;
1444
1445#ifdef CONFIG_PPC64
1446 /*
1447 * Note that PPC32 puts the upper 32 bits of the sigmask in the
1448 * unused part of the signal stackframe
1449 */
1450 set.sig[0] = sigctx.oldmask + ((long)(sigctx._unused[3]) << 32);
1451#else
1452 set.sig[0] = sigctx.oldmask;
1453 set.sig[1] = sigctx._unused[3];
1454#endif
1455 set_current_blocked(&set);
1456
1457#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1458 mcp = (struct mcontext __user *)&sf->mctx;
1459 tm_mcp = (struct mcontext __user *)&sf->mctx_transact;
1460 if (__get_user(msr_hi, &tm_mcp->mc_gregs[PT_MSR]))
1461 goto badframe;
1462 if (MSR_TM_ACTIVE(msr_hi<<32)) {
1463 if (!cpu_has_feature(CPU_FTR_TM))
1464 goto badframe;
1465 if (restore_tm_user_regs(regs, mcp, tm_mcp))
1466 goto badframe;
1467 } else
1468#endif
1469 {
1470 sr = (struct mcontext __user *)from_user_ptr(sigctx.regs);
1471 addr = sr;
1472 if (!access_ok(VERIFY_READ, sr, sizeof(*sr))
1473 || restore_user_regs(regs, sr, 1))
1474 goto badframe;
1475 }
1476
1477 set_thread_flag(TIF_RESTOREALL);
1478 return 0;
1479
1480badframe:
1481 if (show_unhandled_signals)
1482 printk_ratelimited(KERN_INFO
1483 "%s[%d]: bad frame in sys_sigreturn: "
1484 "%p nip %08lx lr %08lx\n",
1485 current->comm, current->pid,
1486 addr, regs->nip, regs->link);
1487
1488 force_sig(SIGSEGV, current);
1489 return 0;
1490}
1/*
2 * Signal handling for 32bit PPC and 32bit tasks on 64bit PPC
3 *
4 * PowerPC version
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 * Copyright (C) 2001 IBM
7 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
9 *
10 * Derived from "arch/i386/kernel/signal.c"
11 * Copyright (C) 1991, 1992 Linus Torvalds
12 * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson
13 *
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version
17 * 2 of the License, or (at your option) any later version.
18 */
19
20#include <linux/sched.h>
21#include <linux/mm.h>
22#include <linux/smp.h>
23#include <linux/kernel.h>
24#include <linux/signal.h>
25#include <linux/errno.h>
26#include <linux/elf.h>
27#include <linux/ptrace.h>
28#include <linux/ratelimit.h>
29#ifdef CONFIG_PPC64
30#include <linux/syscalls.h>
31#include <linux/compat.h>
32#else
33#include <linux/wait.h>
34#include <linux/unistd.h>
35#include <linux/stddef.h>
36#include <linux/tty.h>
37#include <linux/binfmts.h>
38#endif
39
40#include <asm/uaccess.h>
41#include <asm/cacheflush.h>
42#include <asm/syscalls.h>
43#include <asm/sigcontext.h>
44#include <asm/vdso.h>
45#include <asm/switch_to.h>
46#include <asm/tm.h>
47#ifdef CONFIG_PPC64
48#include "ppc32.h"
49#include <asm/unistd.h>
50#else
51#include <asm/ucontext.h>
52#include <asm/pgtable.h>
53#endif
54
55#include "signal.h"
56
57
58#ifdef CONFIG_PPC64
59#define sys_rt_sigreturn compat_sys_rt_sigreturn
60#define sys_swapcontext compat_sys_swapcontext
61#define sys_sigreturn compat_sys_sigreturn
62
63#define old_sigaction old_sigaction32
64#define sigcontext sigcontext32
65#define mcontext mcontext32
66#define ucontext ucontext32
67
68#define __save_altstack __compat_save_altstack
69
70/*
71 * Userspace code may pass a ucontext which doesn't include VSX added
72 * at the end. We need to check for this case.
73 */
74#define UCONTEXTSIZEWITHOUTVSX \
75 (sizeof(struct ucontext) - sizeof(elf_vsrreghalf_t32))
76
77/*
78 * Returning 0 means we return to userspace via
79 * ret_from_except and thus restore all user
80 * registers from *regs. This is what we need
81 * to do when a signal has been delivered.
82 */
83
84#define GP_REGS_SIZE min(sizeof(elf_gregset_t32), sizeof(struct pt_regs32))
85#undef __SIGNAL_FRAMESIZE
86#define __SIGNAL_FRAMESIZE __SIGNAL_FRAMESIZE32
87#undef ELF_NVRREG
88#define ELF_NVRREG ELF_NVRREG32
89
90/*
91 * Functions for flipping sigsets (thanks to brain dead generic
92 * implementation that makes things simple for little endian only)
93 */
94static inline int put_sigset_t(compat_sigset_t __user *uset, sigset_t *set)
95{
96 compat_sigset_t cset;
97
98 switch (_NSIG_WORDS) {
99 case 4: cset.sig[6] = set->sig[3] & 0xffffffffull;
100 cset.sig[7] = set->sig[3] >> 32;
101 case 3: cset.sig[4] = set->sig[2] & 0xffffffffull;
102 cset.sig[5] = set->sig[2] >> 32;
103 case 2: cset.sig[2] = set->sig[1] & 0xffffffffull;
104 cset.sig[3] = set->sig[1] >> 32;
105 case 1: cset.sig[0] = set->sig[0] & 0xffffffffull;
106 cset.sig[1] = set->sig[0] >> 32;
107 }
108 return copy_to_user(uset, &cset, sizeof(*uset));
109}
110
111static inline int get_sigset_t(sigset_t *set,
112 const compat_sigset_t __user *uset)
113{
114 compat_sigset_t s32;
115
116 if (copy_from_user(&s32, uset, sizeof(*uset)))
117 return -EFAULT;
118
119 /*
120 * Swap the 2 words of the 64-bit sigset_t (they are stored
121 * in the "wrong" endian in 32-bit user storage).
122 */
123 switch (_NSIG_WORDS) {
124 case 4: set->sig[3] = s32.sig[6] | (((long)s32.sig[7]) << 32);
125 case 3: set->sig[2] = s32.sig[4] | (((long)s32.sig[5]) << 32);
126 case 2: set->sig[1] = s32.sig[2] | (((long)s32.sig[3]) << 32);
127 case 1: set->sig[0] = s32.sig[0] | (((long)s32.sig[1]) << 32);
128 }
129 return 0;
130}
131
132#define to_user_ptr(p) ptr_to_compat(p)
133#define from_user_ptr(p) compat_ptr(p)
134
135static inline int save_general_regs(struct pt_regs *regs,
136 struct mcontext __user *frame)
137{
138 elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
139 int i;
140
141 WARN_ON(!FULL_REGS(regs));
142
143 for (i = 0; i <= PT_RESULT; i ++) {
144 if (i == 14 && !FULL_REGS(regs))
145 i = 32;
146 if (__put_user((unsigned int)gregs[i], &frame->mc_gregs[i]))
147 return -EFAULT;
148 }
149 return 0;
150}
151
152static inline int restore_general_regs(struct pt_regs *regs,
153 struct mcontext __user *sr)
154{
155 elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
156 int i;
157
158 for (i = 0; i <= PT_RESULT; i++) {
159 if ((i == PT_MSR) || (i == PT_SOFTE))
160 continue;
161 if (__get_user(gregs[i], &sr->mc_gregs[i]))
162 return -EFAULT;
163 }
164 return 0;
165}
166
167#else /* CONFIG_PPC64 */
168
169#define GP_REGS_SIZE min(sizeof(elf_gregset_t), sizeof(struct pt_regs))
170
171static inline int put_sigset_t(sigset_t __user *uset, sigset_t *set)
172{
173 return copy_to_user(uset, set, sizeof(*uset));
174}
175
176static inline int get_sigset_t(sigset_t *set, const sigset_t __user *uset)
177{
178 return copy_from_user(set, uset, sizeof(*uset));
179}
180
181#define to_user_ptr(p) ((unsigned long)(p))
182#define from_user_ptr(p) ((void __user *)(p))
183
184static inline int save_general_regs(struct pt_regs *regs,
185 struct mcontext __user *frame)
186{
187 WARN_ON(!FULL_REGS(regs));
188 return __copy_to_user(&frame->mc_gregs, regs, GP_REGS_SIZE);
189}
190
191static inline int restore_general_regs(struct pt_regs *regs,
192 struct mcontext __user *sr)
193{
194 /* copy up to but not including MSR */
195 if (__copy_from_user(regs, &sr->mc_gregs,
196 PT_MSR * sizeof(elf_greg_t)))
197 return -EFAULT;
198 /* copy from orig_r3 (the word after the MSR) up to the end */
199 if (__copy_from_user(®s->orig_gpr3, &sr->mc_gregs[PT_ORIG_R3],
200 GP_REGS_SIZE - PT_ORIG_R3 * sizeof(elf_greg_t)))
201 return -EFAULT;
202 return 0;
203}
204#endif
205
206/*
207 * When we have signals to deliver, we set up on the
208 * user stack, going down from the original stack pointer:
209 * an ABI gap of 56 words
210 * an mcontext struct
211 * a sigcontext struct
212 * a gap of __SIGNAL_FRAMESIZE bytes
213 *
214 * Each of these things must be a multiple of 16 bytes in size. The following
215 * structure represent all of this except the __SIGNAL_FRAMESIZE gap
216 *
217 */
218struct sigframe {
219 struct sigcontext sctx; /* the sigcontext */
220 struct mcontext mctx; /* all the register values */
221#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
222 struct sigcontext sctx_transact;
223 struct mcontext mctx_transact;
224#endif
225 /*
226 * Programs using the rs6000/xcoff abi can save up to 19 gp
227 * regs and 18 fp regs below sp before decrementing it.
228 */
229 int abigap[56];
230};
231
232/* We use the mc_pad field for the signal return trampoline. */
233#define tramp mc_pad
234
235/*
236 * When we have rt signals to deliver, we set up on the
237 * user stack, going down from the original stack pointer:
238 * one rt_sigframe struct (siginfo + ucontext + ABI gap)
239 * a gap of __SIGNAL_FRAMESIZE+16 bytes
240 * (the +16 is to get the siginfo and ucontext in the same
241 * positions as in older kernels).
242 *
243 * Each of these things must be a multiple of 16 bytes in size.
244 *
245 */
246struct rt_sigframe {
247#ifdef CONFIG_PPC64
248 compat_siginfo_t info;
249#else
250 struct siginfo info;
251#endif
252 struct ucontext uc;
253#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
254 struct ucontext uc_transact;
255#endif
256 /*
257 * Programs using the rs6000/xcoff abi can save up to 19 gp
258 * regs and 18 fp regs below sp before decrementing it.
259 */
260 int abigap[56];
261};
262
263#ifdef CONFIG_VSX
264unsigned long copy_fpr_to_user(void __user *to,
265 struct task_struct *task)
266{
267 u64 buf[ELF_NFPREG];
268 int i;
269
270 /* save FPR copy to local buffer then write to the thread_struct */
271 for (i = 0; i < (ELF_NFPREG - 1) ; i++)
272 buf[i] = task->thread.TS_FPR(i);
273 buf[i] = task->thread.fp_state.fpscr;
274 return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double));
275}
276
277unsigned long copy_fpr_from_user(struct task_struct *task,
278 void __user *from)
279{
280 u64 buf[ELF_NFPREG];
281 int i;
282
283 if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double)))
284 return 1;
285 for (i = 0; i < (ELF_NFPREG - 1) ; i++)
286 task->thread.TS_FPR(i) = buf[i];
287 task->thread.fp_state.fpscr = buf[i];
288
289 return 0;
290}
291
292unsigned long copy_vsx_to_user(void __user *to,
293 struct task_struct *task)
294{
295 u64 buf[ELF_NVSRHALFREG];
296 int i;
297
298 /* save FPR copy to local buffer then write to the thread_struct */
299 for (i = 0; i < ELF_NVSRHALFREG; i++)
300 buf[i] = task->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
301 return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double));
302}
303
304unsigned long copy_vsx_from_user(struct task_struct *task,
305 void __user *from)
306{
307 u64 buf[ELF_NVSRHALFREG];
308 int i;
309
310 if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double)))
311 return 1;
312 for (i = 0; i < ELF_NVSRHALFREG ; i++)
313 task->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
314 return 0;
315}
316
317#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
318unsigned long copy_transact_fpr_to_user(void __user *to,
319 struct task_struct *task)
320{
321 u64 buf[ELF_NFPREG];
322 int i;
323
324 /* save FPR copy to local buffer then write to the thread_struct */
325 for (i = 0; i < (ELF_NFPREG - 1) ; i++)
326 buf[i] = task->thread.TS_TRANS_FPR(i);
327 buf[i] = task->thread.transact_fp.fpscr;
328 return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double));
329}
330
331unsigned long copy_transact_fpr_from_user(struct task_struct *task,
332 void __user *from)
333{
334 u64 buf[ELF_NFPREG];
335 int i;
336
337 if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double)))
338 return 1;
339 for (i = 0; i < (ELF_NFPREG - 1) ; i++)
340 task->thread.TS_TRANS_FPR(i) = buf[i];
341 task->thread.transact_fp.fpscr = buf[i];
342
343 return 0;
344}
345
346unsigned long copy_transact_vsx_to_user(void __user *to,
347 struct task_struct *task)
348{
349 u64 buf[ELF_NVSRHALFREG];
350 int i;
351
352 /* save FPR copy to local buffer then write to the thread_struct */
353 for (i = 0; i < ELF_NVSRHALFREG; i++)
354 buf[i] = task->thread.transact_fp.fpr[i][TS_VSRLOWOFFSET];
355 return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double));
356}
357
358unsigned long copy_transact_vsx_from_user(struct task_struct *task,
359 void __user *from)
360{
361 u64 buf[ELF_NVSRHALFREG];
362 int i;
363
364 if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double)))
365 return 1;
366 for (i = 0; i < ELF_NVSRHALFREG ; i++)
367 task->thread.transact_fp.fpr[i][TS_VSRLOWOFFSET] = buf[i];
368 return 0;
369}
370#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
371#else
372inline unsigned long copy_fpr_to_user(void __user *to,
373 struct task_struct *task)
374{
375 return __copy_to_user(to, task->thread.fp_state.fpr,
376 ELF_NFPREG * sizeof(double));
377}
378
379inline unsigned long copy_fpr_from_user(struct task_struct *task,
380 void __user *from)
381{
382 return __copy_from_user(task->thread.fp_state.fpr, from,
383 ELF_NFPREG * sizeof(double));
384}
385
386#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
387inline unsigned long copy_transact_fpr_to_user(void __user *to,
388 struct task_struct *task)
389{
390 return __copy_to_user(to, task->thread.transact_fp.fpr,
391 ELF_NFPREG * sizeof(double));
392}
393
394inline unsigned long copy_transact_fpr_from_user(struct task_struct *task,
395 void __user *from)
396{
397 return __copy_from_user(task->thread.transact_fp.fpr, from,
398 ELF_NFPREG * sizeof(double));
399}
400#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
401#endif
402
403/*
404 * Save the current user registers on the user stack.
405 * We only save the altivec/spe registers if the process has used
406 * altivec/spe instructions at some point.
407 */
408static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
409 struct mcontext __user *tm_frame, int sigret,
410 int ctx_has_vsx_region)
411{
412 unsigned long msr = regs->msr;
413
414 /* Make sure floating point registers are stored in regs */
415 flush_fp_to_thread(current);
416
417 /* save general registers */
418 if (save_general_regs(regs, frame))
419 return 1;
420
421#ifdef CONFIG_ALTIVEC
422 /* save altivec registers */
423 if (current->thread.used_vr) {
424 flush_altivec_to_thread(current);
425 if (__copy_to_user(&frame->mc_vregs, ¤t->thread.vr_state,
426 ELF_NVRREG * sizeof(vector128)))
427 return 1;
428 /* set MSR_VEC in the saved MSR value to indicate that
429 frame->mc_vregs contains valid data */
430 msr |= MSR_VEC;
431 }
432 /* else assert((regs->msr & MSR_VEC) == 0) */
433
434 /* We always copy to/from vrsave, it's 0 if we don't have or don't
435 * use altivec. Since VSCR only contains 32 bits saved in the least
436 * significant bits of a vector, we "cheat" and stuff VRSAVE in the
437 * most significant bits of that same vector. --BenH
438 * Note that the current VRSAVE value is in the SPR at this point.
439 */
440 if (cpu_has_feature(CPU_FTR_ALTIVEC))
441 current->thread.vrsave = mfspr(SPRN_VRSAVE);
442 if (__put_user(current->thread.vrsave, (u32 __user *)&frame->mc_vregs[32]))
443 return 1;
444#endif /* CONFIG_ALTIVEC */
445 if (copy_fpr_to_user(&frame->mc_fregs, current))
446 return 1;
447
448 /*
449 * Clear the MSR VSX bit to indicate there is no valid state attached
450 * to this context, except in the specific case below where we set it.
451 */
452 msr &= ~MSR_VSX;
453#ifdef CONFIG_VSX
454 /*
455 * Copy VSR 0-31 upper half from thread_struct to local
456 * buffer, then write that to userspace. Also set MSR_VSX in
457 * the saved MSR value to indicate that frame->mc_vregs
458 * contains valid data
459 */
460 if (current->thread.used_vsr && ctx_has_vsx_region) {
461 flush_vsx_to_thread(current);
462 if (copy_vsx_to_user(&frame->mc_vsregs, current))
463 return 1;
464 msr |= MSR_VSX;
465 }
466#endif /* CONFIG_VSX */
467#ifdef CONFIG_SPE
468 /* save spe registers */
469 if (current->thread.used_spe) {
470 flush_spe_to_thread(current);
471 if (__copy_to_user(&frame->mc_vregs, current->thread.evr,
472 ELF_NEVRREG * sizeof(u32)))
473 return 1;
474 /* set MSR_SPE in the saved MSR value to indicate that
475 frame->mc_vregs contains valid data */
476 msr |= MSR_SPE;
477 }
478 /* else assert((regs->msr & MSR_SPE) == 0) */
479
480 /* We always copy to/from spefscr */
481 if (__put_user(current->thread.spefscr, (u32 __user *)&frame->mc_vregs + ELF_NEVRREG))
482 return 1;
483#endif /* CONFIG_SPE */
484
485 if (__put_user(msr, &frame->mc_gregs[PT_MSR]))
486 return 1;
487 /* We need to write 0 the MSR top 32 bits in the tm frame so that we
488 * can check it on the restore to see if TM is active
489 */
490 if (tm_frame && __put_user(0, &tm_frame->mc_gregs[PT_MSR]))
491 return 1;
492
493 if (sigret) {
494 /* Set up the sigreturn trampoline: li r0,sigret; sc */
495 if (__put_user(0x38000000UL + sigret, &frame->tramp[0])
496 || __put_user(0x44000002UL, &frame->tramp[1]))
497 return 1;
498 flush_icache_range((unsigned long) &frame->tramp[0],
499 (unsigned long) &frame->tramp[2]);
500 }
501
502 return 0;
503}
504
505#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
506/*
507 * Save the current user registers on the user stack.
508 * We only save the altivec/spe registers if the process has used
509 * altivec/spe instructions at some point.
510 * We also save the transactional registers to a second ucontext in the
511 * frame.
512 *
513 * See save_user_regs() and signal_64.c:setup_tm_sigcontexts().
514 */
515static int save_tm_user_regs(struct pt_regs *regs,
516 struct mcontext __user *frame,
517 struct mcontext __user *tm_frame, int sigret)
518{
519 unsigned long msr = regs->msr;
520
521 /* Remove TM bits from thread's MSR. The MSR in the sigcontext
522 * just indicates to userland that we were doing a transaction, but we
523 * don't want to return in transactional state. This also ensures
524 * that flush_fp_to_thread won't set TIF_RESTORE_TM again.
525 */
526 regs->msr &= ~MSR_TS_MASK;
527
528 /* Make sure floating point registers are stored in regs */
529 flush_fp_to_thread(current);
530
531 /* Save both sets of general registers */
532 if (save_general_regs(¤t->thread.ckpt_regs, frame)
533 || save_general_regs(regs, tm_frame))
534 return 1;
535
536 /* Stash the top half of the 64bit MSR into the 32bit MSR word
537 * of the transactional mcontext. This way we have a backward-compatible
538 * MSR in the 'normal' (checkpointed) mcontext and additionally one can
539 * also look at what type of transaction (T or S) was active at the
540 * time of the signal.
541 */
542 if (__put_user((msr >> 32), &tm_frame->mc_gregs[PT_MSR]))
543 return 1;
544
545#ifdef CONFIG_ALTIVEC
546 /* save altivec registers */
547 if (current->thread.used_vr) {
548 flush_altivec_to_thread(current);
549 if (__copy_to_user(&frame->mc_vregs, ¤t->thread.vr_state,
550 ELF_NVRREG * sizeof(vector128)))
551 return 1;
552 if (msr & MSR_VEC) {
553 if (__copy_to_user(&tm_frame->mc_vregs,
554 ¤t->thread.transact_vr,
555 ELF_NVRREG * sizeof(vector128)))
556 return 1;
557 } else {
558 if (__copy_to_user(&tm_frame->mc_vregs,
559 ¤t->thread.vr_state,
560 ELF_NVRREG * sizeof(vector128)))
561 return 1;
562 }
563
564 /* set MSR_VEC in the saved MSR value to indicate that
565 * frame->mc_vregs contains valid data
566 */
567 msr |= MSR_VEC;
568 }
569
570 /* We always copy to/from vrsave, it's 0 if we don't have or don't
571 * use altivec. Since VSCR only contains 32 bits saved in the least
572 * significant bits of a vector, we "cheat" and stuff VRSAVE in the
573 * most significant bits of that same vector. --BenH
574 */
575 if (cpu_has_feature(CPU_FTR_ALTIVEC))
576 current->thread.vrsave = mfspr(SPRN_VRSAVE);
577 if (__put_user(current->thread.vrsave,
578 (u32 __user *)&frame->mc_vregs[32]))
579 return 1;
580 if (msr & MSR_VEC) {
581 if (__put_user(current->thread.transact_vrsave,
582 (u32 __user *)&tm_frame->mc_vregs[32]))
583 return 1;
584 } else {
585 if (__put_user(current->thread.vrsave,
586 (u32 __user *)&tm_frame->mc_vregs[32]))
587 return 1;
588 }
589#endif /* CONFIG_ALTIVEC */
590
591 if (copy_fpr_to_user(&frame->mc_fregs, current))
592 return 1;
593 if (msr & MSR_FP) {
594 if (copy_transact_fpr_to_user(&tm_frame->mc_fregs, current))
595 return 1;
596 } else {
597 if (copy_fpr_to_user(&tm_frame->mc_fregs, current))
598 return 1;
599 }
600
601#ifdef CONFIG_VSX
602 /*
603 * Copy VSR 0-31 upper half from thread_struct to local
604 * buffer, then write that to userspace. Also set MSR_VSX in
605 * the saved MSR value to indicate that frame->mc_vregs
606 * contains valid data
607 */
608 if (current->thread.used_vsr) {
609 flush_vsx_to_thread(current);
610 if (copy_vsx_to_user(&frame->mc_vsregs, current))
611 return 1;
612 if (msr & MSR_VSX) {
613 if (copy_transact_vsx_to_user(&tm_frame->mc_vsregs,
614 current))
615 return 1;
616 } else {
617 if (copy_vsx_to_user(&tm_frame->mc_vsregs, current))
618 return 1;
619 }
620
621 msr |= MSR_VSX;
622 }
623#endif /* CONFIG_VSX */
624#ifdef CONFIG_SPE
625 /* SPE regs are not checkpointed with TM, so this section is
626 * simply the same as in save_user_regs().
627 */
628 if (current->thread.used_spe) {
629 flush_spe_to_thread(current);
630 if (__copy_to_user(&frame->mc_vregs, current->thread.evr,
631 ELF_NEVRREG * sizeof(u32)))
632 return 1;
633 /* set MSR_SPE in the saved MSR value to indicate that
634 * frame->mc_vregs contains valid data */
635 msr |= MSR_SPE;
636 }
637
638 /* We always copy to/from spefscr */
639 if (__put_user(current->thread.spefscr, (u32 __user *)&frame->mc_vregs + ELF_NEVRREG))
640 return 1;
641#endif /* CONFIG_SPE */
642
643 if (__put_user(msr, &frame->mc_gregs[PT_MSR]))
644 return 1;
645 if (sigret) {
646 /* Set up the sigreturn trampoline: li r0,sigret; sc */
647 if (__put_user(0x38000000UL + sigret, &frame->tramp[0])
648 || __put_user(0x44000002UL, &frame->tramp[1]))
649 return 1;
650 flush_icache_range((unsigned long) &frame->tramp[0],
651 (unsigned long) &frame->tramp[2]);
652 }
653
654 return 0;
655}
656#endif
657
658/*
659 * Restore the current user register values from the user stack,
660 * (except for MSR).
661 */
662static long restore_user_regs(struct pt_regs *regs,
663 struct mcontext __user *sr, int sig)
664{
665 long err;
666 unsigned int save_r2 = 0;
667 unsigned long msr;
668#ifdef CONFIG_VSX
669 int i;
670#endif
671
672 /*
673 * restore general registers but not including MSR or SOFTE. Also
674 * take care of keeping r2 (TLS) intact if not a signal
675 */
676 if (!sig)
677 save_r2 = (unsigned int)regs->gpr[2];
678 err = restore_general_regs(regs, sr);
679 regs->trap = 0;
680 err |= __get_user(msr, &sr->mc_gregs[PT_MSR]);
681 if (!sig)
682 regs->gpr[2] = (unsigned long) save_r2;
683 if (err)
684 return 1;
685
686 /* if doing signal return, restore the previous little-endian mode */
687 if (sig)
688 regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
689
690#ifdef CONFIG_ALTIVEC
691 /*
692 * Force the process to reload the altivec registers from
693 * current->thread when it next does altivec instructions
694 */
695 regs->msr &= ~MSR_VEC;
696 if (msr & MSR_VEC) {
697 /* restore altivec registers from the stack */
698 if (__copy_from_user(¤t->thread.vr_state, &sr->mc_vregs,
699 sizeof(sr->mc_vregs)))
700 return 1;
701 } else if (current->thread.used_vr)
702 memset(¤t->thread.vr_state, 0,
703 ELF_NVRREG * sizeof(vector128));
704
705 /* Always get VRSAVE back */
706 if (__get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32]))
707 return 1;
708 if (cpu_has_feature(CPU_FTR_ALTIVEC))
709 mtspr(SPRN_VRSAVE, current->thread.vrsave);
710#endif /* CONFIG_ALTIVEC */
711 if (copy_fpr_from_user(current, &sr->mc_fregs))
712 return 1;
713
714#ifdef CONFIG_VSX
715 /*
716 * Force the process to reload the VSX registers from
717 * current->thread when it next does VSX instruction.
718 */
719 regs->msr &= ~MSR_VSX;
720 if (msr & MSR_VSX) {
721 /*
722 * Restore altivec registers from the stack to a local
723 * buffer, then write this out to the thread_struct
724 */
725 if (copy_vsx_from_user(current, &sr->mc_vsregs))
726 return 1;
727 } else if (current->thread.used_vsr)
728 for (i = 0; i < 32 ; i++)
729 current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
730#endif /* CONFIG_VSX */
731 /*
732 * force the process to reload the FP registers from
733 * current->thread when it next does FP instructions
734 */
735 regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
736
737#ifdef CONFIG_SPE
738 /* force the process to reload the spe registers from
739 current->thread when it next does spe instructions */
740 regs->msr &= ~MSR_SPE;
741 if (msr & MSR_SPE) {
742 /* restore spe registers from the stack */
743 if (__copy_from_user(current->thread.evr, &sr->mc_vregs,
744 ELF_NEVRREG * sizeof(u32)))
745 return 1;
746 } else if (current->thread.used_spe)
747 memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
748
749 /* Always get SPEFSCR back */
750 if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs + ELF_NEVRREG))
751 return 1;
752#endif /* CONFIG_SPE */
753
754 return 0;
755}
756
757#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
758/*
759 * Restore the current user register values from the user stack, except for
760 * MSR, and recheckpoint the original checkpointed register state for processes
761 * in transactions.
762 */
763static long restore_tm_user_regs(struct pt_regs *regs,
764 struct mcontext __user *sr,
765 struct mcontext __user *tm_sr)
766{
767 long err;
768 unsigned long msr, msr_hi;
769#ifdef CONFIG_VSX
770 int i;
771#endif
772
773 /*
774 * restore general registers but not including MSR or SOFTE. Also
775 * take care of keeping r2 (TLS) intact if not a signal.
776 * See comment in signal_64.c:restore_tm_sigcontexts();
777 * TFHAR is restored from the checkpointed NIP; TEXASR and TFIAR
778 * were set by the signal delivery.
779 */
780 err = restore_general_regs(regs, tm_sr);
781 err |= restore_general_regs(¤t->thread.ckpt_regs, sr);
782
783 err |= __get_user(current->thread.tm_tfhar, &sr->mc_gregs[PT_NIP]);
784
785 err |= __get_user(msr, &sr->mc_gregs[PT_MSR]);
786 if (err)
787 return 1;
788
789 /* Restore the previous little-endian mode */
790 regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
791
792#ifdef CONFIG_ALTIVEC
793 regs->msr &= ~MSR_VEC;
794 if (msr & MSR_VEC) {
795 /* restore altivec registers from the stack */
796 if (__copy_from_user(¤t->thread.vr_state, &sr->mc_vregs,
797 sizeof(sr->mc_vregs)) ||
798 __copy_from_user(¤t->thread.transact_vr,
799 &tm_sr->mc_vregs,
800 sizeof(sr->mc_vregs)))
801 return 1;
802 } else if (current->thread.used_vr) {
803 memset(¤t->thread.vr_state, 0,
804 ELF_NVRREG * sizeof(vector128));
805 memset(¤t->thread.transact_vr, 0,
806 ELF_NVRREG * sizeof(vector128));
807 }
808
809 /* Always get VRSAVE back */
810 if (__get_user(current->thread.vrsave,
811 (u32 __user *)&sr->mc_vregs[32]) ||
812 __get_user(current->thread.transact_vrsave,
813 (u32 __user *)&tm_sr->mc_vregs[32]))
814 return 1;
815 if (cpu_has_feature(CPU_FTR_ALTIVEC))
816 mtspr(SPRN_VRSAVE, current->thread.vrsave);
817#endif /* CONFIG_ALTIVEC */
818
819 regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
820
821 if (copy_fpr_from_user(current, &sr->mc_fregs) ||
822 copy_transact_fpr_from_user(current, &tm_sr->mc_fregs))
823 return 1;
824
825#ifdef CONFIG_VSX
826 regs->msr &= ~MSR_VSX;
827 if (msr & MSR_VSX) {
828 /*
829 * Restore altivec registers from the stack to a local
830 * buffer, then write this out to the thread_struct
831 */
832 if (copy_vsx_from_user(current, &sr->mc_vsregs) ||
833 copy_transact_vsx_from_user(current, &tm_sr->mc_vsregs))
834 return 1;
835 } else if (current->thread.used_vsr)
836 for (i = 0; i < 32 ; i++) {
837 current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
838 current->thread.transact_fp.fpr[i][TS_VSRLOWOFFSET] = 0;
839 }
840#endif /* CONFIG_VSX */
841
842#ifdef CONFIG_SPE
843 /* SPE regs are not checkpointed with TM, so this section is
844 * simply the same as in restore_user_regs().
845 */
846 regs->msr &= ~MSR_SPE;
847 if (msr & MSR_SPE) {
848 if (__copy_from_user(current->thread.evr, &sr->mc_vregs,
849 ELF_NEVRREG * sizeof(u32)))
850 return 1;
851 } else if (current->thread.used_spe)
852 memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
853
854 /* Always get SPEFSCR back */
855 if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs
856 + ELF_NEVRREG))
857 return 1;
858#endif /* CONFIG_SPE */
859
860 /* Get the top half of the MSR from the user context */
861 if (__get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR]))
862 return 1;
863 msr_hi <<= 32;
864 /* If TM bits are set to the reserved value, it's an invalid context */
865 if (MSR_TM_RESV(msr_hi))
866 return 1;
867 /* Pull in the MSR TM bits from the user context */
868 regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr_hi & MSR_TS_MASK);
869 /* Now, recheckpoint. This loads up all of the checkpointed (older)
870 * registers, including FP and V[S]Rs. After recheckpointing, the
871 * transactional versions should be loaded.
872 */
873 tm_enable();
874 /* Make sure the transaction is marked as failed */
875 current->thread.tm_texasr |= TEXASR_FS;
876 /* This loads the checkpointed FP/VEC state, if used */
877 tm_recheckpoint(¤t->thread, msr);
878
879 /* This loads the speculative FP/VEC state, if used */
880 if (msr & MSR_FP) {
881 do_load_up_transact_fpu(¤t->thread);
882 regs->msr |= (MSR_FP | current->thread.fpexc_mode);
883 }
884#ifdef CONFIG_ALTIVEC
885 if (msr & MSR_VEC) {
886 do_load_up_transact_altivec(¤t->thread);
887 regs->msr |= MSR_VEC;
888 }
889#endif
890
891 return 0;
892}
893#endif
894
895#ifdef CONFIG_PPC64
896int copy_siginfo_to_user32(struct compat_siginfo __user *d, const siginfo_t *s)
897{
898 int err;
899
900 if (!access_ok (VERIFY_WRITE, d, sizeof(*d)))
901 return -EFAULT;
902
903 /* If you change siginfo_t structure, please be sure
904 * this code is fixed accordingly.
905 * It should never copy any pad contained in the structure
906 * to avoid security leaks, but must copy the generic
907 * 3 ints plus the relevant union member.
908 * This routine must convert siginfo from 64bit to 32bit as well
909 * at the same time.
910 */
911 err = __put_user(s->si_signo, &d->si_signo);
912 err |= __put_user(s->si_errno, &d->si_errno);
913 err |= __put_user((short)s->si_code, &d->si_code);
914 if (s->si_code < 0)
915 err |= __copy_to_user(&d->_sifields._pad, &s->_sifields._pad,
916 SI_PAD_SIZE32);
917 else switch(s->si_code >> 16) {
918 case __SI_CHLD >> 16:
919 err |= __put_user(s->si_pid, &d->si_pid);
920 err |= __put_user(s->si_uid, &d->si_uid);
921 err |= __put_user(s->si_utime, &d->si_utime);
922 err |= __put_user(s->si_stime, &d->si_stime);
923 err |= __put_user(s->si_status, &d->si_status);
924 break;
925 case __SI_FAULT >> 16:
926 err |= __put_user((unsigned int)(unsigned long)s->si_addr,
927 &d->si_addr);
928 break;
929 case __SI_POLL >> 16:
930 err |= __put_user(s->si_band, &d->si_band);
931 err |= __put_user(s->si_fd, &d->si_fd);
932 break;
933 case __SI_TIMER >> 16:
934 err |= __put_user(s->si_tid, &d->si_tid);
935 err |= __put_user(s->si_overrun, &d->si_overrun);
936 err |= __put_user(s->si_int, &d->si_int);
937 break;
938 case __SI_SYS >> 16:
939 err |= __put_user(ptr_to_compat(s->si_call_addr), &d->si_call_addr);
940 err |= __put_user(s->si_syscall, &d->si_syscall);
941 err |= __put_user(s->si_arch, &d->si_arch);
942 break;
943 case __SI_RT >> 16: /* This is not generated by the kernel as of now. */
944 case __SI_MESGQ >> 16:
945 err |= __put_user(s->si_int, &d->si_int);
946 /* fallthrough */
947 case __SI_KILL >> 16:
948 default:
949 err |= __put_user(s->si_pid, &d->si_pid);
950 err |= __put_user(s->si_uid, &d->si_uid);
951 break;
952 }
953 return err;
954}
955
956#define copy_siginfo_to_user copy_siginfo_to_user32
957
958int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from)
959{
960 if (copy_from_user(to, from, 3*sizeof(int)) ||
961 copy_from_user(to->_sifields._pad,
962 from->_sifields._pad, SI_PAD_SIZE32))
963 return -EFAULT;
964
965 return 0;
966}
967#endif /* CONFIG_PPC64 */
968
969/*
970 * Set up a signal frame for a "real-time" signal handler
971 * (one which gets siginfo).
972 */
973int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
974 struct pt_regs *regs)
975{
976 struct rt_sigframe __user *rt_sf;
977 struct mcontext __user *frame;
978 struct mcontext __user *tm_frame = NULL;
979 void __user *addr;
980 unsigned long newsp = 0;
981 int sigret;
982 unsigned long tramp;
983
984 /* Set up Signal Frame */
985 /* Put a Real Time Context onto stack */
986 rt_sf = get_sigframe(ksig, get_tm_stackpointer(regs), sizeof(*rt_sf), 1);
987 addr = rt_sf;
988 if (unlikely(rt_sf == NULL))
989 goto badframe;
990
991 /* Put the siginfo & fill in most of the ucontext */
992 if (copy_siginfo_to_user(&rt_sf->info, &ksig->info)
993 || __put_user(0, &rt_sf->uc.uc_flags)
994 || __save_altstack(&rt_sf->uc.uc_stack, regs->gpr[1])
995 || __put_user(to_user_ptr(&rt_sf->uc.uc_mcontext),
996 &rt_sf->uc.uc_regs)
997 || put_sigset_t(&rt_sf->uc.uc_sigmask, oldset))
998 goto badframe;
999
1000 /* Save user registers on the stack */
1001 frame = &rt_sf->uc.uc_mcontext;
1002 addr = frame;
1003 if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
1004 sigret = 0;
1005 tramp = current->mm->context.vdso_base + vdso32_rt_sigtramp;
1006 } else {
1007 sigret = __NR_rt_sigreturn;
1008 tramp = (unsigned long) frame->tramp;
1009 }
1010
1011#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1012 tm_frame = &rt_sf->uc_transact.uc_mcontext;
1013 if (MSR_TM_ACTIVE(regs->msr)) {
1014 if (__put_user((unsigned long)&rt_sf->uc_transact,
1015 &rt_sf->uc.uc_link) ||
1016 __put_user((unsigned long)tm_frame,
1017 &rt_sf->uc_transact.uc_regs))
1018 goto badframe;
1019 if (save_tm_user_regs(regs, frame, tm_frame, sigret))
1020 goto badframe;
1021 }
1022 else
1023#endif
1024 {
1025 if (__put_user(0, &rt_sf->uc.uc_link))
1026 goto badframe;
1027 if (save_user_regs(regs, frame, tm_frame, sigret, 1))
1028 goto badframe;
1029 }
1030 regs->link = tramp;
1031
1032 current->thread.fp_state.fpscr = 0; /* turn off all fp exceptions */
1033
1034 /* create a stack frame for the caller of the handler */
1035 newsp = ((unsigned long)rt_sf) - (__SIGNAL_FRAMESIZE + 16);
1036 addr = (void __user *)regs->gpr[1];
1037 if (put_user(regs->gpr[1], (u32 __user *)newsp))
1038 goto badframe;
1039
1040 /* Fill registers for signal handler */
1041 regs->gpr[1] = newsp;
1042 regs->gpr[3] = ksig->sig;
1043 regs->gpr[4] = (unsigned long) &rt_sf->info;
1044 regs->gpr[5] = (unsigned long) &rt_sf->uc;
1045 regs->gpr[6] = (unsigned long) rt_sf;
1046 regs->nip = (unsigned long) ksig->ka.sa.sa_handler;
1047 /* enter the signal handler in native-endian mode */
1048 regs->msr &= ~MSR_LE;
1049 regs->msr |= (MSR_KERNEL & MSR_LE);
1050 return 0;
1051
1052badframe:
1053 if (show_unhandled_signals)
1054 printk_ratelimited(KERN_INFO
1055 "%s[%d]: bad frame in handle_rt_signal32: "
1056 "%p nip %08lx lr %08lx\n",
1057 current->comm, current->pid,
1058 addr, regs->nip, regs->link);
1059
1060 return 1;
1061}
1062
1063static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int sig)
1064{
1065 sigset_t set;
1066 struct mcontext __user *mcp;
1067
1068 if (get_sigset_t(&set, &ucp->uc_sigmask))
1069 return -EFAULT;
1070#ifdef CONFIG_PPC64
1071 {
1072 u32 cmcp;
1073
1074 if (__get_user(cmcp, &ucp->uc_regs))
1075 return -EFAULT;
1076 mcp = (struct mcontext __user *)(u64)cmcp;
1077 /* no need to check access_ok(mcp), since mcp < 4GB */
1078 }
1079#else
1080 if (__get_user(mcp, &ucp->uc_regs))
1081 return -EFAULT;
1082 if (!access_ok(VERIFY_READ, mcp, sizeof(*mcp)))
1083 return -EFAULT;
1084#endif
1085 set_current_blocked(&set);
1086 if (restore_user_regs(regs, mcp, sig))
1087 return -EFAULT;
1088
1089 return 0;
1090}
1091
1092#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1093static int do_setcontext_tm(struct ucontext __user *ucp,
1094 struct ucontext __user *tm_ucp,
1095 struct pt_regs *regs)
1096{
1097 sigset_t set;
1098 struct mcontext __user *mcp;
1099 struct mcontext __user *tm_mcp;
1100 u32 cmcp;
1101 u32 tm_cmcp;
1102
1103 if (get_sigset_t(&set, &ucp->uc_sigmask))
1104 return -EFAULT;
1105
1106 if (__get_user(cmcp, &ucp->uc_regs) ||
1107 __get_user(tm_cmcp, &tm_ucp->uc_regs))
1108 return -EFAULT;
1109 mcp = (struct mcontext __user *)(u64)cmcp;
1110 tm_mcp = (struct mcontext __user *)(u64)tm_cmcp;
1111 /* no need to check access_ok(mcp), since mcp < 4GB */
1112
1113 set_current_blocked(&set);
1114 if (restore_tm_user_regs(regs, mcp, tm_mcp))
1115 return -EFAULT;
1116
1117 return 0;
1118}
1119#endif
1120
1121long sys_swapcontext(struct ucontext __user *old_ctx,
1122 struct ucontext __user *new_ctx,
1123 int ctx_size, int r6, int r7, int r8, struct pt_regs *regs)
1124{
1125 unsigned char tmp;
1126 int ctx_has_vsx_region = 0;
1127
1128#ifdef CONFIG_PPC64
1129 unsigned long new_msr = 0;
1130
1131 if (new_ctx) {
1132 struct mcontext __user *mcp;
1133 u32 cmcp;
1134
1135 /*
1136 * Get pointer to the real mcontext. No need for
1137 * access_ok since we are dealing with compat
1138 * pointers.
1139 */
1140 if (__get_user(cmcp, &new_ctx->uc_regs))
1141 return -EFAULT;
1142 mcp = (struct mcontext __user *)(u64)cmcp;
1143 if (__get_user(new_msr, &mcp->mc_gregs[PT_MSR]))
1144 return -EFAULT;
1145 }
1146 /*
1147 * Check that the context is not smaller than the original
1148 * size (with VMX but without VSX)
1149 */
1150 if (ctx_size < UCONTEXTSIZEWITHOUTVSX)
1151 return -EINVAL;
1152 /*
1153 * If the new context state sets the MSR VSX bits but
1154 * it doesn't provide VSX state.
1155 */
1156 if ((ctx_size < sizeof(struct ucontext)) &&
1157 (new_msr & MSR_VSX))
1158 return -EINVAL;
1159 /* Does the context have enough room to store VSX data? */
1160 if (ctx_size >= sizeof(struct ucontext))
1161 ctx_has_vsx_region = 1;
1162#else
1163 /* Context size is for future use. Right now, we only make sure
1164 * we are passed something we understand
1165 */
1166 if (ctx_size < sizeof(struct ucontext))
1167 return -EINVAL;
1168#endif
1169 if (old_ctx != NULL) {
1170 struct mcontext __user *mctx;
1171
1172 /*
1173 * old_ctx might not be 16-byte aligned, in which
1174 * case old_ctx->uc_mcontext won't be either.
1175 * Because we have the old_ctx->uc_pad2 field
1176 * before old_ctx->uc_mcontext, we need to round down
1177 * from &old_ctx->uc_mcontext to a 16-byte boundary.
1178 */
1179 mctx = (struct mcontext __user *)
1180 ((unsigned long) &old_ctx->uc_mcontext & ~0xfUL);
1181 if (!access_ok(VERIFY_WRITE, old_ctx, ctx_size)
1182 || save_user_regs(regs, mctx, NULL, 0, ctx_has_vsx_region)
1183 || put_sigset_t(&old_ctx->uc_sigmask, ¤t->blocked)
1184 || __put_user(to_user_ptr(mctx), &old_ctx->uc_regs))
1185 return -EFAULT;
1186 }
1187 if (new_ctx == NULL)
1188 return 0;
1189 if (!access_ok(VERIFY_READ, new_ctx, ctx_size)
1190 || __get_user(tmp, (u8 __user *) new_ctx)
1191 || __get_user(tmp, (u8 __user *) new_ctx + ctx_size - 1))
1192 return -EFAULT;
1193
1194 /*
1195 * If we get a fault copying the context into the kernel's
1196 * image of the user's registers, we can't just return -EFAULT
1197 * because the user's registers will be corrupted. For instance
1198 * the NIP value may have been updated but not some of the
1199 * other registers. Given that we have done the access_ok
1200 * and successfully read the first and last bytes of the region
1201 * above, this should only happen in an out-of-memory situation
1202 * or if another thread unmaps the region containing the context.
1203 * We kill the task with a SIGSEGV in this situation.
1204 */
1205 if (do_setcontext(new_ctx, regs, 0))
1206 do_exit(SIGSEGV);
1207
1208 set_thread_flag(TIF_RESTOREALL);
1209 return 0;
1210}
1211
1212long sys_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
1213 struct pt_regs *regs)
1214{
1215 struct rt_sigframe __user *rt_sf;
1216#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1217 struct ucontext __user *uc_transact;
1218 unsigned long msr_hi;
1219 unsigned long tmp;
1220 int tm_restore = 0;
1221#endif
1222 /* Always make any pending restarted system calls return -EINTR */
1223 current->restart_block.fn = do_no_restart_syscall;
1224
1225 rt_sf = (struct rt_sigframe __user *)
1226 (regs->gpr[1] + __SIGNAL_FRAMESIZE + 16);
1227 if (!access_ok(VERIFY_READ, rt_sf, sizeof(*rt_sf)))
1228 goto bad;
1229#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1230 if (__get_user(tmp, &rt_sf->uc.uc_link))
1231 goto bad;
1232 uc_transact = (struct ucontext __user *)(uintptr_t)tmp;
1233 if (uc_transact) {
1234 u32 cmcp;
1235 struct mcontext __user *mcp;
1236
1237 if (__get_user(cmcp, &uc_transact->uc_regs))
1238 return -EFAULT;
1239 mcp = (struct mcontext __user *)(u64)cmcp;
1240 /* The top 32 bits of the MSR are stashed in the transactional
1241 * ucontext. */
1242 if (__get_user(msr_hi, &mcp->mc_gregs[PT_MSR]))
1243 goto bad;
1244
1245 if (MSR_TM_ACTIVE(msr_hi<<32)) {
1246 /* We only recheckpoint on return if we're
1247 * transaction.
1248 */
1249 tm_restore = 1;
1250 if (do_setcontext_tm(&rt_sf->uc, uc_transact, regs))
1251 goto bad;
1252 }
1253 }
1254 if (!tm_restore)
1255 /* Fall through, for non-TM restore */
1256#endif
1257 if (do_setcontext(&rt_sf->uc, regs, 1))
1258 goto bad;
1259
1260 /*
1261 * It's not clear whether or why it is desirable to save the
1262 * sigaltstack setting on signal delivery and restore it on
1263 * signal return. But other architectures do this and we have
1264 * always done it up until now so it is probably better not to
1265 * change it. -- paulus
1266 */
1267#ifdef CONFIG_PPC64
1268 if (compat_restore_altstack(&rt_sf->uc.uc_stack))
1269 goto bad;
1270#else
1271 if (restore_altstack(&rt_sf->uc.uc_stack))
1272 goto bad;
1273#endif
1274 set_thread_flag(TIF_RESTOREALL);
1275 return 0;
1276
1277 bad:
1278 if (show_unhandled_signals)
1279 printk_ratelimited(KERN_INFO
1280 "%s[%d]: bad frame in sys_rt_sigreturn: "
1281 "%p nip %08lx lr %08lx\n",
1282 current->comm, current->pid,
1283 rt_sf, regs->nip, regs->link);
1284
1285 force_sig(SIGSEGV, current);
1286 return 0;
1287}
1288
1289#ifdef CONFIG_PPC32
1290int sys_debug_setcontext(struct ucontext __user *ctx,
1291 int ndbg, struct sig_dbg_op __user *dbg,
1292 int r6, int r7, int r8,
1293 struct pt_regs *regs)
1294{
1295 struct sig_dbg_op op;
1296 int i;
1297 unsigned char tmp;
1298 unsigned long new_msr = regs->msr;
1299#ifdef CONFIG_PPC_ADV_DEBUG_REGS
1300 unsigned long new_dbcr0 = current->thread.debug.dbcr0;
1301#endif
1302
1303 for (i=0; i<ndbg; i++) {
1304 if (copy_from_user(&op, dbg + i, sizeof(op)))
1305 return -EFAULT;
1306 switch (op.dbg_type) {
1307 case SIG_DBG_SINGLE_STEPPING:
1308#ifdef CONFIG_PPC_ADV_DEBUG_REGS
1309 if (op.dbg_value) {
1310 new_msr |= MSR_DE;
1311 new_dbcr0 |= (DBCR0_IDM | DBCR0_IC);
1312 } else {
1313 new_dbcr0 &= ~DBCR0_IC;
1314 if (!DBCR_ACTIVE_EVENTS(new_dbcr0,
1315 current->thread.debug.dbcr1)) {
1316 new_msr &= ~MSR_DE;
1317 new_dbcr0 &= ~DBCR0_IDM;
1318 }
1319 }
1320#else
1321 if (op.dbg_value)
1322 new_msr |= MSR_SE;
1323 else
1324 new_msr &= ~MSR_SE;
1325#endif
1326 break;
1327 case SIG_DBG_BRANCH_TRACING:
1328#ifdef CONFIG_PPC_ADV_DEBUG_REGS
1329 return -EINVAL;
1330#else
1331 if (op.dbg_value)
1332 new_msr |= MSR_BE;
1333 else
1334 new_msr &= ~MSR_BE;
1335#endif
1336 break;
1337
1338 default:
1339 return -EINVAL;
1340 }
1341 }
1342
1343 /* We wait until here to actually install the values in the
1344 registers so if we fail in the above loop, it will not
1345 affect the contents of these registers. After this point,
1346 failure is a problem, anyway, and it's very unlikely unless
1347 the user is really doing something wrong. */
1348 regs->msr = new_msr;
1349#ifdef CONFIG_PPC_ADV_DEBUG_REGS
1350 current->thread.debug.dbcr0 = new_dbcr0;
1351#endif
1352
1353 if (!access_ok(VERIFY_READ, ctx, sizeof(*ctx))
1354 || __get_user(tmp, (u8 __user *) ctx)
1355 || __get_user(tmp, (u8 __user *) (ctx + 1) - 1))
1356 return -EFAULT;
1357
1358 /*
1359 * If we get a fault copying the context into the kernel's
1360 * image of the user's registers, we can't just return -EFAULT
1361 * because the user's registers will be corrupted. For instance
1362 * the NIP value may have been updated but not some of the
1363 * other registers. Given that we have done the access_ok
1364 * and successfully read the first and last bytes of the region
1365 * above, this should only happen in an out-of-memory situation
1366 * or if another thread unmaps the region containing the context.
1367 * We kill the task with a SIGSEGV in this situation.
1368 */
1369 if (do_setcontext(ctx, regs, 1)) {
1370 if (show_unhandled_signals)
1371 printk_ratelimited(KERN_INFO "%s[%d]: bad frame in "
1372 "sys_debug_setcontext: %p nip %08lx "
1373 "lr %08lx\n",
1374 current->comm, current->pid,
1375 ctx, regs->nip, regs->link);
1376
1377 force_sig(SIGSEGV, current);
1378 goto out;
1379 }
1380
1381 /*
1382 * It's not clear whether or why it is desirable to save the
1383 * sigaltstack setting on signal delivery and restore it on
1384 * signal return. But other architectures do this and we have
1385 * always done it up until now so it is probably better not to
1386 * change it. -- paulus
1387 */
1388 restore_altstack(&ctx->uc_stack);
1389
1390 set_thread_flag(TIF_RESTOREALL);
1391 out:
1392 return 0;
1393}
1394#endif
1395
1396/*
1397 * OK, we're invoking a handler
1398 */
1399int handle_signal32(struct ksignal *ksig, sigset_t *oldset, struct pt_regs *regs)
1400{
1401 struct sigcontext __user *sc;
1402 struct sigframe __user *frame;
1403 struct mcontext __user *tm_mctx = NULL;
1404 unsigned long newsp = 0;
1405 int sigret;
1406 unsigned long tramp;
1407
1408 /* Set up Signal Frame */
1409 frame = get_sigframe(ksig, get_tm_stackpointer(regs), sizeof(*frame), 1);
1410 if (unlikely(frame == NULL))
1411 goto badframe;
1412 sc = (struct sigcontext __user *) &frame->sctx;
1413
1414#if _NSIG != 64
1415#error "Please adjust handle_signal()"
1416#endif
1417 if (__put_user(to_user_ptr(ksig->ka.sa.sa_handler), &sc->handler)
1418 || __put_user(oldset->sig[0], &sc->oldmask)
1419#ifdef CONFIG_PPC64
1420 || __put_user((oldset->sig[0] >> 32), &sc->_unused[3])
1421#else
1422 || __put_user(oldset->sig[1], &sc->_unused[3])
1423#endif
1424 || __put_user(to_user_ptr(&frame->mctx), &sc->regs)
1425 || __put_user(ksig->sig, &sc->signal))
1426 goto badframe;
1427
1428 if (vdso32_sigtramp && current->mm->context.vdso_base) {
1429 sigret = 0;
1430 tramp = current->mm->context.vdso_base + vdso32_sigtramp;
1431 } else {
1432 sigret = __NR_sigreturn;
1433 tramp = (unsigned long) frame->mctx.tramp;
1434 }
1435
1436#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1437 tm_mctx = &frame->mctx_transact;
1438 if (MSR_TM_ACTIVE(regs->msr)) {
1439 if (save_tm_user_regs(regs, &frame->mctx, &frame->mctx_transact,
1440 sigret))
1441 goto badframe;
1442 }
1443 else
1444#endif
1445 {
1446 if (save_user_regs(regs, &frame->mctx, tm_mctx, sigret, 1))
1447 goto badframe;
1448 }
1449
1450 regs->link = tramp;
1451
1452 current->thread.fp_state.fpscr = 0; /* turn off all fp exceptions */
1453
1454 /* create a stack frame for the caller of the handler */
1455 newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE;
1456 if (put_user(regs->gpr[1], (u32 __user *)newsp))
1457 goto badframe;
1458
1459 regs->gpr[1] = newsp;
1460 regs->gpr[3] = ksig->sig;
1461 regs->gpr[4] = (unsigned long) sc;
1462 regs->nip = (unsigned long) (unsigned long)ksig->ka.sa.sa_handler;
1463 /* enter the signal handler in big-endian mode */
1464 regs->msr &= ~MSR_LE;
1465 return 0;
1466
1467badframe:
1468 if (show_unhandled_signals)
1469 printk_ratelimited(KERN_INFO
1470 "%s[%d]: bad frame in handle_signal32: "
1471 "%p nip %08lx lr %08lx\n",
1472 current->comm, current->pid,
1473 frame, regs->nip, regs->link);
1474
1475 return 1;
1476}
1477
1478/*
1479 * Do a signal return; undo the signal stack.
1480 */
1481long sys_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
1482 struct pt_regs *regs)
1483{
1484 struct sigframe __user *sf;
1485 struct sigcontext __user *sc;
1486 struct sigcontext sigctx;
1487 struct mcontext __user *sr;
1488 void __user *addr;
1489 sigset_t set;
1490#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1491 struct mcontext __user *mcp, *tm_mcp;
1492 unsigned long msr_hi;
1493#endif
1494
1495 /* Always make any pending restarted system calls return -EINTR */
1496 current->restart_block.fn = do_no_restart_syscall;
1497
1498 sf = (struct sigframe __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE);
1499 sc = &sf->sctx;
1500 addr = sc;
1501 if (copy_from_user(&sigctx, sc, sizeof(sigctx)))
1502 goto badframe;
1503
1504#ifdef CONFIG_PPC64
1505 /*
1506 * Note that PPC32 puts the upper 32 bits of the sigmask in the
1507 * unused part of the signal stackframe
1508 */
1509 set.sig[0] = sigctx.oldmask + ((long)(sigctx._unused[3]) << 32);
1510#else
1511 set.sig[0] = sigctx.oldmask;
1512 set.sig[1] = sigctx._unused[3];
1513#endif
1514 set_current_blocked(&set);
1515
1516#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1517 mcp = (struct mcontext __user *)&sf->mctx;
1518 tm_mcp = (struct mcontext __user *)&sf->mctx_transact;
1519 if (__get_user(msr_hi, &tm_mcp->mc_gregs[PT_MSR]))
1520 goto badframe;
1521 if (MSR_TM_ACTIVE(msr_hi<<32)) {
1522 if (!cpu_has_feature(CPU_FTR_TM))
1523 goto badframe;
1524 if (restore_tm_user_regs(regs, mcp, tm_mcp))
1525 goto badframe;
1526 } else
1527#endif
1528 {
1529 sr = (struct mcontext __user *)from_user_ptr(sigctx.regs);
1530 addr = sr;
1531 if (!access_ok(VERIFY_READ, sr, sizeof(*sr))
1532 || restore_user_regs(regs, sr, 1))
1533 goto badframe;
1534 }
1535
1536 set_thread_flag(TIF_RESTOREALL);
1537 return 0;
1538
1539badframe:
1540 if (show_unhandled_signals)
1541 printk_ratelimited(KERN_INFO
1542 "%s[%d]: bad frame in sys_sigreturn: "
1543 "%p nip %08lx lr %08lx\n",
1544 current->comm, current->pid,
1545 addr, regs->nip, regs->link);
1546
1547 force_sig(SIGSEGV, current);
1548 return 0;
1549}