Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Signal handling for 32bit PPC and 32bit tasks on 64bit PPC
4 *
5 * PowerPC version
6 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
7 * Copyright (C) 2001 IBM
8 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
9 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
10 *
11 * Derived from "arch/i386/kernel/signal.c"
12 * Copyright (C) 1991, 1992 Linus Torvalds
13 * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson
14 */
15
16#include <linux/sched.h>
17#include <linux/mm.h>
18#include <linux/smp.h>
19#include <linux/kernel.h>
20#include <linux/signal.h>
21#include <linux/errno.h>
22#include <linux/elf.h>
23#include <linux/ptrace.h>
24#include <linux/pagemap.h>
25#include <linux/ratelimit.h>
26#include <linux/syscalls.h>
27#ifdef CONFIG_PPC64
28#include <linux/compat.h>
29#else
30#include <linux/wait.h>
31#include <linux/unistd.h>
32#include <linux/stddef.h>
33#include <linux/tty.h>
34#include <linux/binfmts.h>
35#endif
36
37#include <linux/uaccess.h>
38#include <asm/cacheflush.h>
39#include <asm/syscalls.h>
40#include <asm/sigcontext.h>
41#include <asm/vdso.h>
42#include <asm/switch_to.h>
43#include <asm/tm.h>
44#include <asm/asm-prototypes.h>
45#ifdef CONFIG_PPC64
46#include <asm/syscalls_32.h>
47#include <asm/unistd.h>
48#else
49#include <asm/ucontext.h>
50#endif
51
52#include "signal.h"
53
54
55#ifdef CONFIG_PPC64
56#define old_sigaction old_sigaction32
57#define sigcontext sigcontext32
58#define mcontext mcontext32
59#define ucontext ucontext32
60
61/*
62 * Userspace code may pass a ucontext which doesn't include VSX added
63 * at the end. We need to check for this case.
64 */
65#define UCONTEXTSIZEWITHOUTVSX \
66 (sizeof(struct ucontext) - sizeof(elf_vsrreghalf_t32))
67
68/*
69 * Returning 0 means we return to userspace via
70 * ret_from_except and thus restore all user
71 * registers from *regs. This is what we need
72 * to do when a signal has been delivered.
73 */
74
75#define GP_REGS_SIZE min(sizeof(elf_gregset_t32), sizeof(struct pt_regs32))
76#undef __SIGNAL_FRAMESIZE
77#define __SIGNAL_FRAMESIZE __SIGNAL_FRAMESIZE32
78#undef ELF_NVRREG
79#define ELF_NVRREG ELF_NVRREG32
80
81/*
82 * Functions for flipping sigsets (thanks to brain dead generic
83 * implementation that makes things simple for little endian only)
84 */
85#define unsafe_put_sigset_t unsafe_put_compat_sigset
86#define unsafe_get_sigset_t unsafe_get_compat_sigset
87
88#define to_user_ptr(p) ptr_to_compat(p)
89#define from_user_ptr(p) compat_ptr(p)
90
91static __always_inline int
92__unsafe_save_general_regs(struct pt_regs *regs, struct mcontext __user *frame)
93{
94 elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
95 int val, i;
96
97 for (i = 0; i <= PT_RESULT; i ++) {
98 /* Force usr to alway see softe as 1 (interrupts enabled) */
99 if (i == PT_SOFTE)
100 val = 1;
101 else
102 val = gregs[i];
103
104 unsafe_put_user(val, &frame->mc_gregs[i], failed);
105 }
106 return 0;
107
108failed:
109 return 1;
110}
111
112static __always_inline int
113__unsafe_restore_general_regs(struct pt_regs *regs, struct mcontext __user *sr)
114{
115 elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
116 int i;
117
118 for (i = 0; i <= PT_RESULT; i++) {
119 if ((i == PT_MSR) || (i == PT_SOFTE))
120 continue;
121 unsafe_get_user(gregs[i], &sr->mc_gregs[i], failed);
122 }
123 return 0;
124
125failed:
126 return 1;
127}
128
129#else /* CONFIG_PPC64 */
130
131#define GP_REGS_SIZE min(sizeof(elf_gregset_t), sizeof(struct pt_regs))
132
133#define unsafe_put_sigset_t(uset, set, label) do { \
134 sigset_t __user *__us = uset ; \
135 const sigset_t *__s = set; \
136 \
137 unsafe_copy_to_user(__us, __s, sizeof(*__us), label); \
138} while (0)
139
140#define unsafe_get_sigset_t unsafe_get_user_sigset
141
142#define to_user_ptr(p) ((unsigned long)(p))
143#define from_user_ptr(p) ((void __user *)(p))
144
145static __always_inline int
146__unsafe_save_general_regs(struct pt_regs *regs, struct mcontext __user *frame)
147{
148 unsafe_copy_to_user(&frame->mc_gregs, regs, GP_REGS_SIZE, failed);
149 return 0;
150
151failed:
152 return 1;
153}
154
155static __always_inline
156int __unsafe_restore_general_regs(struct pt_regs *regs, struct mcontext __user *sr)
157{
158 /* copy up to but not including MSR */
159 unsafe_copy_from_user(regs, &sr->mc_gregs, PT_MSR * sizeof(elf_greg_t), failed);
160
161 /* copy from orig_r3 (the word after the MSR) up to the end */
162 unsafe_copy_from_user(®s->orig_gpr3, &sr->mc_gregs[PT_ORIG_R3],
163 GP_REGS_SIZE - PT_ORIG_R3 * sizeof(elf_greg_t), failed);
164
165 return 0;
166
167failed:
168 return 1;
169}
170#endif
171
172#define unsafe_save_general_regs(regs, frame, label) do { \
173 if (__unsafe_save_general_regs(regs, frame)) \
174 goto label; \
175} while (0)
176
177#define unsafe_restore_general_regs(regs, frame, label) do { \
178 if (__unsafe_restore_general_regs(regs, frame)) \
179 goto label; \
180} while (0)
181
182/*
183 * When we have signals to deliver, we set up on the
184 * user stack, going down from the original stack pointer:
185 * an ABI gap of 56 words
186 * an mcontext struct
187 * a sigcontext struct
188 * a gap of __SIGNAL_FRAMESIZE bytes
189 *
190 * Each of these things must be a multiple of 16 bytes in size. The following
191 * structure represent all of this except the __SIGNAL_FRAMESIZE gap
192 *
193 */
194struct sigframe {
195 struct sigcontext sctx; /* the sigcontext */
196 struct mcontext mctx; /* all the register values */
197#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
198 struct sigcontext sctx_transact;
199 struct mcontext mctx_transact;
200#endif
201 /*
202 * Programs using the rs6000/xcoff abi can save up to 19 gp
203 * regs and 18 fp regs below sp before decrementing it.
204 */
205 int abigap[56];
206};
207
208/*
209 * When we have rt signals to deliver, we set up on the
210 * user stack, going down from the original stack pointer:
211 * one rt_sigframe struct (siginfo + ucontext + ABI gap)
212 * a gap of __SIGNAL_FRAMESIZE+16 bytes
213 * (the +16 is to get the siginfo and ucontext in the same
214 * positions as in older kernels).
215 *
216 * Each of these things must be a multiple of 16 bytes in size.
217 *
218 */
219struct rt_sigframe {
220#ifdef CONFIG_PPC64
221 compat_siginfo_t info;
222#else
223 struct siginfo info;
224#endif
225 struct ucontext uc;
226#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
227 struct ucontext uc_transact;
228#endif
229 /*
230 * Programs using the rs6000/xcoff abi can save up to 19 gp
231 * regs and 18 fp regs below sp before decrementing it.
232 */
233 int abigap[56];
234};
235
236unsigned long get_min_sigframe_size_32(void)
237{
238 return max(sizeof(struct rt_sigframe) + __SIGNAL_FRAMESIZE + 16,
239 sizeof(struct sigframe) + __SIGNAL_FRAMESIZE);
240}
241
242/*
243 * Save the current user registers on the user stack.
244 * We only save the altivec/spe registers if the process has used
245 * altivec/spe instructions at some point.
246 */
247static void prepare_save_user_regs(int ctx_has_vsx_region)
248{
249 /* Make sure floating point registers are stored in regs */
250 flush_fp_to_thread(current);
251#ifdef CONFIG_ALTIVEC
252 if (current->thread.used_vr)
253 flush_altivec_to_thread(current);
254 if (cpu_has_feature(CPU_FTR_ALTIVEC))
255 current->thread.vrsave = mfspr(SPRN_VRSAVE);
256#endif
257#ifdef CONFIG_VSX
258 if (current->thread.used_vsr && ctx_has_vsx_region)
259 flush_vsx_to_thread(current);
260#endif
261#ifdef CONFIG_SPE
262 if (current->thread.used_spe)
263 flush_spe_to_thread(current);
264#endif
265}
266
267static __always_inline int
268__unsafe_save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
269 struct mcontext __user *tm_frame, int ctx_has_vsx_region)
270{
271 unsigned long msr = regs->msr;
272
273 /* save general registers */
274 unsafe_save_general_regs(regs, frame, failed);
275
276#ifdef CONFIG_ALTIVEC
277 /* save altivec registers */
278 if (current->thread.used_vr) {
279 unsafe_copy_to_user(&frame->mc_vregs, ¤t->thread.vr_state,
280 ELF_NVRREG * sizeof(vector128), failed);
281 /* set MSR_VEC in the saved MSR value to indicate that
282 frame->mc_vregs contains valid data */
283 msr |= MSR_VEC;
284 }
285 /* else assert((regs->msr & MSR_VEC) == 0) */
286
287 /* We always copy to/from vrsave, it's 0 if we don't have or don't
288 * use altivec. Since VSCR only contains 32 bits saved in the least
289 * significant bits of a vector, we "cheat" and stuff VRSAVE in the
290 * most significant bits of that same vector. --BenH
291 * Note that the current VRSAVE value is in the SPR at this point.
292 */
293 unsafe_put_user(current->thread.vrsave, (u32 __user *)&frame->mc_vregs[32],
294 failed);
295#endif /* CONFIG_ALTIVEC */
296 unsafe_copy_fpr_to_user(&frame->mc_fregs, current, failed);
297
298 /*
299 * Clear the MSR VSX bit to indicate there is no valid state attached
300 * to this context, except in the specific case below where we set it.
301 */
302 msr &= ~MSR_VSX;
303#ifdef CONFIG_VSX
304 /*
305 * Copy VSR 0-31 upper half from thread_struct to local
306 * buffer, then write that to userspace. Also set MSR_VSX in
307 * the saved MSR value to indicate that frame->mc_vregs
308 * contains valid data
309 */
310 if (current->thread.used_vsr && ctx_has_vsx_region) {
311 unsafe_copy_vsx_to_user(&frame->mc_vsregs, current, failed);
312 msr |= MSR_VSX;
313 }
314#endif /* CONFIG_VSX */
315#ifdef CONFIG_SPE
316 /* save spe registers */
317 if (current->thread.used_spe) {
318 unsafe_copy_to_user(&frame->mc_vregs, current->thread.evr,
319 ELF_NEVRREG * sizeof(u32), failed);
320 /* set MSR_SPE in the saved MSR value to indicate that
321 frame->mc_vregs contains valid data */
322 msr |= MSR_SPE;
323 }
324 /* else assert((regs->msr & MSR_SPE) == 0) */
325
326 /* We always copy to/from spefscr */
327 unsafe_put_user(current->thread.spefscr,
328 (u32 __user *)&frame->mc_vregs + ELF_NEVRREG, failed);
329#endif /* CONFIG_SPE */
330
331 unsafe_put_user(msr, &frame->mc_gregs[PT_MSR], failed);
332
333 /* We need to write 0 the MSR top 32 bits in the tm frame so that we
334 * can check it on the restore to see if TM is active
335 */
336 if (tm_frame)
337 unsafe_put_user(0, &tm_frame->mc_gregs[PT_MSR], failed);
338
339 return 0;
340
341failed:
342 return 1;
343}
344
345#define unsafe_save_user_regs(regs, frame, tm_frame, has_vsx, label) do { \
346 if (__unsafe_save_user_regs(regs, frame, tm_frame, has_vsx)) \
347 goto label; \
348} while (0)
349
350#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
351/*
352 * Save the current user registers on the user stack.
353 * We only save the altivec/spe registers if the process has used
354 * altivec/spe instructions at some point.
355 * We also save the transactional registers to a second ucontext in the
356 * frame.
357 *
358 * See __unsafe_save_user_regs() and signal_64.c:setup_tm_sigcontexts().
359 */
360static void prepare_save_tm_user_regs(void)
361{
362 WARN_ON(tm_suspend_disabled);
363
364 if (cpu_has_feature(CPU_FTR_ALTIVEC))
365 current->thread.ckvrsave = mfspr(SPRN_VRSAVE);
366}
367
368static __always_inline int
369save_tm_user_regs_unsafe(struct pt_regs *regs, struct mcontext __user *frame,
370 struct mcontext __user *tm_frame, unsigned long msr)
371{
372 /* Save both sets of general registers */
373 unsafe_save_general_regs(¤t->thread.ckpt_regs, frame, failed);
374 unsafe_save_general_regs(regs, tm_frame, failed);
375
376 /* Stash the top half of the 64bit MSR into the 32bit MSR word
377 * of the transactional mcontext. This way we have a backward-compatible
378 * MSR in the 'normal' (checkpointed) mcontext and additionally one can
379 * also look at what type of transaction (T or S) was active at the
380 * time of the signal.
381 */
382 unsafe_put_user((msr >> 32), &tm_frame->mc_gregs[PT_MSR], failed);
383
384 /* save altivec registers */
385 if (current->thread.used_vr) {
386 unsafe_copy_to_user(&frame->mc_vregs, ¤t->thread.ckvr_state,
387 ELF_NVRREG * sizeof(vector128), failed);
388 if (msr & MSR_VEC)
389 unsafe_copy_to_user(&tm_frame->mc_vregs,
390 ¤t->thread.vr_state,
391 ELF_NVRREG * sizeof(vector128), failed);
392 else
393 unsafe_copy_to_user(&tm_frame->mc_vregs,
394 ¤t->thread.ckvr_state,
395 ELF_NVRREG * sizeof(vector128), failed);
396
397 /* set MSR_VEC in the saved MSR value to indicate that
398 * frame->mc_vregs contains valid data
399 */
400 msr |= MSR_VEC;
401 }
402
403 /* We always copy to/from vrsave, it's 0 if we don't have or don't
404 * use altivec. Since VSCR only contains 32 bits saved in the least
405 * significant bits of a vector, we "cheat" and stuff VRSAVE in the
406 * most significant bits of that same vector. --BenH
407 */
408 unsafe_put_user(current->thread.ckvrsave,
409 (u32 __user *)&frame->mc_vregs[32], failed);
410 if (msr & MSR_VEC)
411 unsafe_put_user(current->thread.vrsave,
412 (u32 __user *)&tm_frame->mc_vregs[32], failed);
413 else
414 unsafe_put_user(current->thread.ckvrsave,
415 (u32 __user *)&tm_frame->mc_vregs[32], failed);
416
417 unsafe_copy_ckfpr_to_user(&frame->mc_fregs, current, failed);
418 if (msr & MSR_FP)
419 unsafe_copy_fpr_to_user(&tm_frame->mc_fregs, current, failed);
420 else
421 unsafe_copy_ckfpr_to_user(&tm_frame->mc_fregs, current, failed);
422
423 /*
424 * Copy VSR 0-31 upper half from thread_struct to local
425 * buffer, then write that to userspace. Also set MSR_VSX in
426 * the saved MSR value to indicate that frame->mc_vregs
427 * contains valid data
428 */
429 if (current->thread.used_vsr) {
430 unsafe_copy_ckvsx_to_user(&frame->mc_vsregs, current, failed);
431 if (msr & MSR_VSX)
432 unsafe_copy_vsx_to_user(&tm_frame->mc_vsregs, current, failed);
433 else
434 unsafe_copy_ckvsx_to_user(&tm_frame->mc_vsregs, current, failed);
435
436 msr |= MSR_VSX;
437 }
438
439 unsafe_put_user(msr, &frame->mc_gregs[PT_MSR], failed);
440
441 return 0;
442
443failed:
444 return 1;
445}
446#else
447static void prepare_save_tm_user_regs(void) { }
448
449static __always_inline int
450save_tm_user_regs_unsafe(struct pt_regs *regs, struct mcontext __user *frame,
451 struct mcontext __user *tm_frame, unsigned long msr)
452{
453 return 0;
454}
455#endif
456
457#define unsafe_save_tm_user_regs(regs, frame, tm_frame, msr, label) do { \
458 if (save_tm_user_regs_unsafe(regs, frame, tm_frame, msr)) \
459 goto label; \
460} while (0)
461
462/*
463 * Restore the current user register values from the user stack,
464 * (except for MSR).
465 */
466static long restore_user_regs(struct pt_regs *regs,
467 struct mcontext __user *sr, int sig)
468{
469 unsigned int save_r2 = 0;
470 unsigned long msr;
471#ifdef CONFIG_VSX
472 int i;
473#endif
474
475 if (!user_read_access_begin(sr, sizeof(*sr)))
476 return 1;
477 /*
478 * restore general registers but not including MSR or SOFTE. Also
479 * take care of keeping r2 (TLS) intact if not a signal
480 */
481 if (!sig)
482 save_r2 = (unsigned int)regs->gpr[2];
483 unsafe_restore_general_regs(regs, sr, failed);
484 set_trap_norestart(regs);
485 unsafe_get_user(msr, &sr->mc_gregs[PT_MSR], failed);
486 if (!sig)
487 regs->gpr[2] = (unsigned long) save_r2;
488
489 /* if doing signal return, restore the previous little-endian mode */
490 if (sig)
491 regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (msr & MSR_LE));
492
493#ifdef CONFIG_ALTIVEC
494 /*
495 * Force the process to reload the altivec registers from
496 * current->thread when it next does altivec instructions
497 */
498 regs_set_return_msr(regs, regs->msr & ~MSR_VEC);
499 if (msr & MSR_VEC) {
500 /* restore altivec registers from the stack */
501 unsafe_copy_from_user(¤t->thread.vr_state, &sr->mc_vregs,
502 sizeof(sr->mc_vregs), failed);
503 current->thread.used_vr = true;
504 } else if (current->thread.used_vr)
505 memset(¤t->thread.vr_state, 0,
506 ELF_NVRREG * sizeof(vector128));
507
508 /* Always get VRSAVE back */
509 unsafe_get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32], failed);
510 if (cpu_has_feature(CPU_FTR_ALTIVEC))
511 mtspr(SPRN_VRSAVE, current->thread.vrsave);
512#endif /* CONFIG_ALTIVEC */
513 unsafe_copy_fpr_from_user(current, &sr->mc_fregs, failed);
514
515#ifdef CONFIG_VSX
516 /*
517 * Force the process to reload the VSX registers from
518 * current->thread when it next does VSX instruction.
519 */
520 regs_set_return_msr(regs, regs->msr & ~MSR_VSX);
521 if (msr & MSR_VSX) {
522 /*
523 * Restore altivec registers from the stack to a local
524 * buffer, then write this out to the thread_struct
525 */
526 unsafe_copy_vsx_from_user(current, &sr->mc_vsregs, failed);
527 current->thread.used_vsr = true;
528 } else if (current->thread.used_vsr)
529 for (i = 0; i < 32 ; i++)
530 current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
531#endif /* CONFIG_VSX */
532 /*
533 * force the process to reload the FP registers from
534 * current->thread when it next does FP instructions
535 */
536 regs_set_return_msr(regs, regs->msr & ~(MSR_FP | MSR_FE0 | MSR_FE1));
537
538#ifdef CONFIG_SPE
539 /*
540 * Force the process to reload the spe registers from
541 * current->thread when it next does spe instructions.
542 * Since this is user ABI, we must enforce the sizing.
543 */
544 BUILD_BUG_ON(sizeof(current->thread.spe) != ELF_NEVRREG * sizeof(u32));
545 regs_set_return_msr(regs, regs->msr & ~MSR_SPE);
546 if (msr & MSR_SPE) {
547 /* restore spe registers from the stack */
548 unsafe_copy_from_user(¤t->thread.spe, &sr->mc_vregs,
549 sizeof(current->thread.spe), failed);
550 current->thread.used_spe = true;
551 } else if (current->thread.used_spe)
552 memset(¤t->thread.spe, 0, sizeof(current->thread.spe));
553
554 /* Always get SPEFSCR back */
555 unsafe_get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs + ELF_NEVRREG, failed);
556#endif /* CONFIG_SPE */
557
558 user_read_access_end();
559 return 0;
560
561failed:
562 user_read_access_end();
563 return 1;
564}
565
566#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
567/*
568 * Restore the current user register values from the user stack, except for
569 * MSR, and recheckpoint the original checkpointed register state for processes
570 * in transactions.
571 */
572static long restore_tm_user_regs(struct pt_regs *regs,
573 struct mcontext __user *sr,
574 struct mcontext __user *tm_sr)
575{
576 unsigned long msr, msr_hi;
577 int i;
578
579 if (tm_suspend_disabled)
580 return 1;
581 /*
582 * restore general registers but not including MSR or SOFTE. Also
583 * take care of keeping r2 (TLS) intact if not a signal.
584 * See comment in signal_64.c:restore_tm_sigcontexts();
585 * TFHAR is restored from the checkpointed NIP; TEXASR and TFIAR
586 * were set by the signal delivery.
587 */
588 if (!user_read_access_begin(sr, sizeof(*sr)))
589 return 1;
590
591 unsafe_restore_general_regs(¤t->thread.ckpt_regs, sr, failed);
592 unsafe_get_user(current->thread.tm_tfhar, &sr->mc_gregs[PT_NIP], failed);
593 unsafe_get_user(msr, &sr->mc_gregs[PT_MSR], failed);
594
595 /* Restore the previous little-endian mode */
596 regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (msr & MSR_LE));
597
598 regs_set_return_msr(regs, regs->msr & ~MSR_VEC);
599 if (msr & MSR_VEC) {
600 /* restore altivec registers from the stack */
601 unsafe_copy_from_user(¤t->thread.ckvr_state, &sr->mc_vregs,
602 sizeof(sr->mc_vregs), failed);
603 current->thread.used_vr = true;
604 } else if (current->thread.used_vr) {
605 memset(¤t->thread.vr_state, 0,
606 ELF_NVRREG * sizeof(vector128));
607 memset(¤t->thread.ckvr_state, 0,
608 ELF_NVRREG * sizeof(vector128));
609 }
610
611 /* Always get VRSAVE back */
612 unsafe_get_user(current->thread.ckvrsave,
613 (u32 __user *)&sr->mc_vregs[32], failed);
614 if (cpu_has_feature(CPU_FTR_ALTIVEC))
615 mtspr(SPRN_VRSAVE, current->thread.ckvrsave);
616
617 regs_set_return_msr(regs, regs->msr & ~(MSR_FP | MSR_FE0 | MSR_FE1));
618
619 unsafe_copy_fpr_from_user(current, &sr->mc_fregs, failed);
620
621 regs_set_return_msr(regs, regs->msr & ~MSR_VSX);
622 if (msr & MSR_VSX) {
623 /*
624 * Restore altivec registers from the stack to a local
625 * buffer, then write this out to the thread_struct
626 */
627 unsafe_copy_ckvsx_from_user(current, &sr->mc_vsregs, failed);
628 current->thread.used_vsr = true;
629 } else if (current->thread.used_vsr)
630 for (i = 0; i < 32 ; i++) {
631 current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
632 current->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
633 }
634
635 user_read_access_end();
636
637 if (!user_read_access_begin(tm_sr, sizeof(*tm_sr)))
638 return 1;
639
640 unsafe_restore_general_regs(regs, tm_sr, failed);
641
642 /* restore altivec registers from the stack */
643 if (msr & MSR_VEC)
644 unsafe_copy_from_user(¤t->thread.vr_state, &tm_sr->mc_vregs,
645 sizeof(sr->mc_vregs), failed);
646
647 /* Always get VRSAVE back */
648 unsafe_get_user(current->thread.vrsave,
649 (u32 __user *)&tm_sr->mc_vregs[32], failed);
650
651 unsafe_copy_ckfpr_from_user(current, &tm_sr->mc_fregs, failed);
652
653 if (msr & MSR_VSX) {
654 /*
655 * Restore altivec registers from the stack to a local
656 * buffer, then write this out to the thread_struct
657 */
658 unsafe_copy_vsx_from_user(current, &tm_sr->mc_vsregs, failed);
659 current->thread.used_vsr = true;
660 }
661
662 /* Get the top half of the MSR from the user context */
663 unsafe_get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR], failed);
664 msr_hi <<= 32;
665
666 user_read_access_end();
667
668 /* If TM bits are set to the reserved value, it's an invalid context */
669 if (MSR_TM_RESV(msr_hi))
670 return 1;
671
672 /*
673 * Disabling preemption, since it is unsafe to be preempted
674 * with MSR[TS] set without recheckpointing.
675 */
676 preempt_disable();
677
678 /*
679 * CAUTION:
680 * After regs->MSR[TS] being updated, make sure that get_user(),
681 * put_user() or similar functions are *not* called. These
682 * functions can generate page faults which will cause the process
683 * to be de-scheduled with MSR[TS] set but without calling
684 * tm_recheckpoint(). This can cause a bug.
685 *
686 * Pull in the MSR TM bits from the user context
687 */
688 regs_set_return_msr(regs, (regs->msr & ~MSR_TS_MASK) | (msr_hi & MSR_TS_MASK));
689 /* Now, recheckpoint. This loads up all of the checkpointed (older)
690 * registers, including FP and V[S]Rs. After recheckpointing, the
691 * transactional versions should be loaded.
692 */
693 tm_enable();
694 /* Make sure the transaction is marked as failed */
695 current->thread.tm_texasr |= TEXASR_FS;
696 /* This loads the checkpointed FP/VEC state, if used */
697 tm_recheckpoint(¤t->thread);
698
699 /* This loads the speculative FP/VEC state, if used */
700 msr_check_and_set(msr & (MSR_FP | MSR_VEC));
701 if (msr & MSR_FP) {
702 load_fp_state(¤t->thread.fp_state);
703 regs_set_return_msr(regs, regs->msr | (MSR_FP | current->thread.fpexc_mode));
704 }
705 if (msr & MSR_VEC) {
706 load_vr_state(¤t->thread.vr_state);
707 regs_set_return_msr(regs, regs->msr | MSR_VEC);
708 }
709
710 preempt_enable();
711
712 return 0;
713
714failed:
715 user_read_access_end();
716 return 1;
717}
718#else
719static long restore_tm_user_regs(struct pt_regs *regs, struct mcontext __user *sr,
720 struct mcontext __user *tm_sr)
721{
722 return 0;
723}
724#endif
725
726#ifdef CONFIG_PPC64
727
728#define copy_siginfo_to_user copy_siginfo_to_user32
729
730#endif /* CONFIG_PPC64 */
731
732/*
733 * Set up a signal frame for a "real-time" signal handler
734 * (one which gets siginfo).
735 */
736int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
737 struct task_struct *tsk)
738{
739 struct rt_sigframe __user *frame;
740 struct mcontext __user *mctx;
741 struct mcontext __user *tm_mctx = NULL;
742 unsigned long newsp = 0;
743 unsigned long tramp;
744 struct pt_regs *regs = tsk->thread.regs;
745 /* Save the thread's msr before get_tm_stackpointer() changes it */
746 unsigned long msr = regs->msr;
747
748 /* Set up Signal Frame */
749 frame = get_sigframe(ksig, tsk, sizeof(*frame), 1);
750 mctx = &frame->uc.uc_mcontext;
751#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
752 tm_mctx = &frame->uc_transact.uc_mcontext;
753#endif
754 if (MSR_TM_ACTIVE(msr))
755 prepare_save_tm_user_regs();
756 else
757 prepare_save_user_regs(1);
758
759 if (!user_access_begin(frame, sizeof(*frame)))
760 goto badframe;
761
762 /* Put the siginfo & fill in most of the ucontext */
763 unsafe_put_user(0, &frame->uc.uc_flags, failed);
764#ifdef CONFIG_PPC64
765 unsafe_compat_save_altstack(&frame->uc.uc_stack, regs->gpr[1], failed);
766#else
767 unsafe_save_altstack(&frame->uc.uc_stack, regs->gpr[1], failed);
768#endif
769 unsafe_put_user(to_user_ptr(&frame->uc.uc_mcontext), &frame->uc.uc_regs, failed);
770
771 if (MSR_TM_ACTIVE(msr)) {
772#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
773 unsafe_put_user((unsigned long)&frame->uc_transact,
774 &frame->uc.uc_link, failed);
775 unsafe_put_user((unsigned long)tm_mctx,
776 &frame->uc_transact.uc_regs, failed);
777#endif
778 unsafe_save_tm_user_regs(regs, mctx, tm_mctx, msr, failed);
779 } else {
780 unsafe_put_user(0, &frame->uc.uc_link, failed);
781 unsafe_save_user_regs(regs, mctx, tm_mctx, 1, failed);
782 }
783
784 /* Save user registers on the stack */
785 if (tsk->mm->context.vdso) {
786 tramp = VDSO32_SYMBOL(tsk->mm->context.vdso, sigtramp_rt32);
787 } else {
788 tramp = (unsigned long)mctx->mc_pad;
789 unsafe_put_user(PPC_RAW_LI(_R0, __NR_rt_sigreturn), &mctx->mc_pad[0], failed);
790 unsafe_put_user(PPC_RAW_SC(), &mctx->mc_pad[1], failed);
791 asm("dcbst %y0; sync; icbi %y0; sync" :: "Z" (mctx->mc_pad[0]));
792 }
793 unsafe_put_sigset_t(&frame->uc.uc_sigmask, oldset, failed);
794
795 user_access_end();
796
797 if (copy_siginfo_to_user(&frame->info, &ksig->info))
798 goto badframe;
799
800 regs->link = tramp;
801
802#ifdef CONFIG_PPC_FPU_REGS
803 tsk->thread.fp_state.fpscr = 0; /* turn off all fp exceptions */
804#endif
805
806 /* create a stack frame for the caller of the handler */
807 newsp = ((unsigned long)frame) - (__SIGNAL_FRAMESIZE + 16);
808 if (put_user(regs->gpr[1], (u32 __user *)newsp))
809 goto badframe;
810
811 /* Fill registers for signal handler */
812 regs->gpr[1] = newsp;
813 regs->gpr[3] = ksig->sig;
814 regs->gpr[4] = (unsigned long)&frame->info;
815 regs->gpr[5] = (unsigned long)&frame->uc;
816 regs->gpr[6] = (unsigned long)frame;
817 regs_set_return_ip(regs, (unsigned long) ksig->ka.sa.sa_handler);
818 /* enter the signal handler in native-endian mode */
819 regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (MSR_KERNEL & MSR_LE));
820
821 return 0;
822
823failed:
824 user_access_end();
825
826badframe:
827 signal_fault(tsk, regs, "handle_rt_signal32", frame);
828
829 return 1;
830}
831
832/*
833 * OK, we're invoking a handler
834 */
835int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
836 struct task_struct *tsk)
837{
838 struct sigcontext __user *sc;
839 struct sigframe __user *frame;
840 struct mcontext __user *mctx;
841 struct mcontext __user *tm_mctx = NULL;
842 unsigned long newsp = 0;
843 unsigned long tramp;
844 struct pt_regs *regs = tsk->thread.regs;
845 /* Save the thread's msr before get_tm_stackpointer() changes it */
846 unsigned long msr = regs->msr;
847
848 /* Set up Signal Frame */
849 frame = get_sigframe(ksig, tsk, sizeof(*frame), 1);
850 mctx = &frame->mctx;
851#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
852 tm_mctx = &frame->mctx_transact;
853#endif
854 if (MSR_TM_ACTIVE(msr))
855 prepare_save_tm_user_regs();
856 else
857 prepare_save_user_regs(1);
858
859 if (!user_access_begin(frame, sizeof(*frame)))
860 goto badframe;
861 sc = (struct sigcontext __user *) &frame->sctx;
862
863#if _NSIG != 64
864#error "Please adjust handle_signal()"
865#endif
866 unsafe_put_user(to_user_ptr(ksig->ka.sa.sa_handler), &sc->handler, failed);
867 unsafe_put_user(oldset->sig[0], &sc->oldmask, failed);
868#ifdef CONFIG_PPC64
869 unsafe_put_user((oldset->sig[0] >> 32), &sc->_unused[3], failed);
870#else
871 unsafe_put_user(oldset->sig[1], &sc->_unused[3], failed);
872#endif
873 unsafe_put_user(to_user_ptr(mctx), &sc->regs, failed);
874 unsafe_put_user(ksig->sig, &sc->signal, failed);
875
876 if (MSR_TM_ACTIVE(msr))
877 unsafe_save_tm_user_regs(regs, mctx, tm_mctx, msr, failed);
878 else
879 unsafe_save_user_regs(regs, mctx, tm_mctx, 1, failed);
880
881 if (tsk->mm->context.vdso) {
882 tramp = VDSO32_SYMBOL(tsk->mm->context.vdso, sigtramp32);
883 } else {
884 tramp = (unsigned long)mctx->mc_pad;
885 unsafe_put_user(PPC_RAW_LI(_R0, __NR_sigreturn), &mctx->mc_pad[0], failed);
886 unsafe_put_user(PPC_RAW_SC(), &mctx->mc_pad[1], failed);
887 asm("dcbst %y0; sync; icbi %y0; sync" :: "Z" (mctx->mc_pad[0]));
888 }
889 user_access_end();
890
891 regs->link = tramp;
892
893#ifdef CONFIG_PPC_FPU_REGS
894 tsk->thread.fp_state.fpscr = 0; /* turn off all fp exceptions */
895#endif
896
897 /* create a stack frame for the caller of the handler */
898 newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE;
899 if (put_user(regs->gpr[1], (u32 __user *)newsp))
900 goto badframe;
901
902 regs->gpr[1] = newsp;
903 regs->gpr[3] = ksig->sig;
904 regs->gpr[4] = (unsigned long) sc;
905 regs_set_return_ip(regs, (unsigned long) ksig->ka.sa.sa_handler);
906 /* enter the signal handler in native-endian mode */
907 regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (MSR_KERNEL & MSR_LE));
908
909 return 0;
910
911failed:
912 user_access_end();
913
914badframe:
915 signal_fault(tsk, regs, "handle_signal32", frame);
916
917 return 1;
918}
919
920static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int sig)
921{
922 sigset_t set;
923 struct mcontext __user *mcp;
924
925 if (!user_read_access_begin(ucp, sizeof(*ucp)))
926 return -EFAULT;
927
928 unsafe_get_sigset_t(&set, &ucp->uc_sigmask, failed);
929#ifdef CONFIG_PPC64
930 {
931 u32 cmcp;
932
933 unsafe_get_user(cmcp, &ucp->uc_regs, failed);
934 mcp = (struct mcontext __user *)(u64)cmcp;
935 }
936#else
937 unsafe_get_user(mcp, &ucp->uc_regs, failed);
938#endif
939 user_read_access_end();
940
941 set_current_blocked(&set);
942 if (restore_user_regs(regs, mcp, sig))
943 return -EFAULT;
944
945 return 0;
946
947failed:
948 user_read_access_end();
949 return -EFAULT;
950}
951
952#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
953static int do_setcontext_tm(struct ucontext __user *ucp,
954 struct ucontext __user *tm_ucp,
955 struct pt_regs *regs)
956{
957 sigset_t set;
958 struct mcontext __user *mcp;
959 struct mcontext __user *tm_mcp;
960 u32 cmcp;
961 u32 tm_cmcp;
962
963 if (!user_read_access_begin(ucp, sizeof(*ucp)))
964 return -EFAULT;
965
966 unsafe_get_sigset_t(&set, &ucp->uc_sigmask, failed);
967 unsafe_get_user(cmcp, &ucp->uc_regs, failed);
968
969 user_read_access_end();
970
971 if (__get_user(tm_cmcp, &tm_ucp->uc_regs))
972 return -EFAULT;
973 mcp = (struct mcontext __user *)(u64)cmcp;
974 tm_mcp = (struct mcontext __user *)(u64)tm_cmcp;
975 /* no need to check access_ok(mcp), since mcp < 4GB */
976
977 set_current_blocked(&set);
978 if (restore_tm_user_regs(regs, mcp, tm_mcp))
979 return -EFAULT;
980
981 return 0;
982
983failed:
984 user_read_access_end();
985 return -EFAULT;
986}
987#endif
988
989#ifdef CONFIG_PPC64
990COMPAT_SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
991 struct ucontext __user *, new_ctx, int, ctx_size)
992#else
993SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
994 struct ucontext __user *, new_ctx, long, ctx_size)
995#endif
996{
997 struct pt_regs *regs = current_pt_regs();
998 int ctx_has_vsx_region = 0;
999
1000#ifdef CONFIG_PPC64
1001 unsigned long new_msr = 0;
1002
1003 if (new_ctx) {
1004 struct mcontext __user *mcp;
1005 u32 cmcp;
1006
1007 /*
1008 * Get pointer to the real mcontext. No need for
1009 * access_ok since we are dealing with compat
1010 * pointers.
1011 */
1012 if (__get_user(cmcp, &new_ctx->uc_regs))
1013 return -EFAULT;
1014 mcp = (struct mcontext __user *)(u64)cmcp;
1015 if (__get_user(new_msr, &mcp->mc_gregs[PT_MSR]))
1016 return -EFAULT;
1017 }
1018 /*
1019 * Check that the context is not smaller than the original
1020 * size (with VMX but without VSX)
1021 */
1022 if (ctx_size < UCONTEXTSIZEWITHOUTVSX)
1023 return -EINVAL;
1024 /*
1025 * If the new context state sets the MSR VSX bits but
1026 * it doesn't provide VSX state.
1027 */
1028 if ((ctx_size < sizeof(struct ucontext)) &&
1029 (new_msr & MSR_VSX))
1030 return -EINVAL;
1031 /* Does the context have enough room to store VSX data? */
1032 if (ctx_size >= sizeof(struct ucontext))
1033 ctx_has_vsx_region = 1;
1034#else
1035 /* Context size is for future use. Right now, we only make sure
1036 * we are passed something we understand
1037 */
1038 if (ctx_size < sizeof(struct ucontext))
1039 return -EINVAL;
1040#endif
1041 if (old_ctx != NULL) {
1042 struct mcontext __user *mctx;
1043
1044 /*
1045 * old_ctx might not be 16-byte aligned, in which
1046 * case old_ctx->uc_mcontext won't be either.
1047 * Because we have the old_ctx->uc_pad2 field
1048 * before old_ctx->uc_mcontext, we need to round down
1049 * from &old_ctx->uc_mcontext to a 16-byte boundary.
1050 */
1051 mctx = (struct mcontext __user *)
1052 ((unsigned long) &old_ctx->uc_mcontext & ~0xfUL);
1053 prepare_save_user_regs(ctx_has_vsx_region);
1054 if (!user_write_access_begin(old_ctx, ctx_size))
1055 return -EFAULT;
1056 unsafe_save_user_regs(regs, mctx, NULL, ctx_has_vsx_region, failed);
1057 unsafe_put_sigset_t(&old_ctx->uc_sigmask, ¤t->blocked, failed);
1058 unsafe_put_user(to_user_ptr(mctx), &old_ctx->uc_regs, failed);
1059 user_write_access_end();
1060 }
1061 if (new_ctx == NULL)
1062 return 0;
1063 if (!access_ok(new_ctx, ctx_size) ||
1064 fault_in_readable((char __user *)new_ctx, ctx_size))
1065 return -EFAULT;
1066
1067 /*
1068 * If we get a fault copying the context into the kernel's
1069 * image of the user's registers, we can't just return -EFAULT
1070 * because the user's registers will be corrupted. For instance
1071 * the NIP value may have been updated but not some of the
1072 * other registers. Given that we have done the access_ok
1073 * and successfully read the first and last bytes of the region
1074 * above, this should only happen in an out-of-memory situation
1075 * or if another thread unmaps the region containing the context.
1076 * We kill the task with a SIGSEGV in this situation.
1077 */
1078 if (do_setcontext(new_ctx, regs, 0)) {
1079 force_exit_sig(SIGSEGV);
1080 return -EFAULT;
1081 }
1082
1083 set_thread_flag(TIF_RESTOREALL);
1084 return 0;
1085
1086failed:
1087 user_write_access_end();
1088 return -EFAULT;
1089}
1090
1091#ifdef CONFIG_PPC64
1092COMPAT_SYSCALL_DEFINE0(rt_sigreturn)
1093#else
1094SYSCALL_DEFINE0(rt_sigreturn)
1095#endif
1096{
1097 struct rt_sigframe __user *rt_sf;
1098 struct pt_regs *regs = current_pt_regs();
1099 int tm_restore = 0;
1100#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1101 struct ucontext __user *uc_transact;
1102 unsigned long msr_hi;
1103 unsigned long tmp;
1104#endif
1105 /* Always make any pending restarted system calls return -EINTR */
1106 current->restart_block.fn = do_no_restart_syscall;
1107
1108 rt_sf = (struct rt_sigframe __user *)
1109 (regs->gpr[1] + __SIGNAL_FRAMESIZE + 16);
1110 if (!access_ok(rt_sf, sizeof(*rt_sf)))
1111 goto bad;
1112
1113#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1114 /*
1115 * If there is a transactional state then throw it away.
1116 * The purpose of a sigreturn is to destroy all traces of the
1117 * signal frame, this includes any transactional state created
1118 * within in. We only check for suspended as we can never be
1119 * active in the kernel, we are active, there is nothing better to
1120 * do than go ahead and Bad Thing later.
1121 * The cause is not important as there will never be a
1122 * recheckpoint so it's not user visible.
1123 */
1124 if (MSR_TM_SUSPENDED(mfmsr()))
1125 tm_reclaim_current(0);
1126
1127 if (__get_user(tmp, &rt_sf->uc.uc_link))
1128 goto bad;
1129 uc_transact = (struct ucontext __user *)(uintptr_t)tmp;
1130 if (uc_transact) {
1131 u32 cmcp;
1132 struct mcontext __user *mcp;
1133
1134 if (__get_user(cmcp, &uc_transact->uc_regs))
1135 return -EFAULT;
1136 mcp = (struct mcontext __user *)(u64)cmcp;
1137 /* The top 32 bits of the MSR are stashed in the transactional
1138 * ucontext. */
1139 if (__get_user(msr_hi, &mcp->mc_gregs[PT_MSR]))
1140 goto bad;
1141
1142 if (MSR_TM_ACTIVE(msr_hi<<32)) {
1143 /* Trying to start TM on non TM system */
1144 if (!cpu_has_feature(CPU_FTR_TM))
1145 goto bad;
1146 /* We only recheckpoint on return if we're
1147 * transaction.
1148 */
1149 tm_restore = 1;
1150 if (do_setcontext_tm(&rt_sf->uc, uc_transact, regs))
1151 goto bad;
1152 }
1153 }
1154 if (!tm_restore) {
1155 /*
1156 * Unset regs->msr because ucontext MSR TS is not
1157 * set, and recheckpoint was not called. This avoid
1158 * hitting a TM Bad thing at RFID
1159 */
1160 regs_set_return_msr(regs, regs->msr & ~MSR_TS_MASK);
1161 }
1162 /* Fall through, for non-TM restore */
1163#endif
1164 if (!tm_restore)
1165 if (do_setcontext(&rt_sf->uc, regs, 1))
1166 goto bad;
1167
1168 /*
1169 * It's not clear whether or why it is desirable to save the
1170 * sigaltstack setting on signal delivery and restore it on
1171 * signal return. But other architectures do this and we have
1172 * always done it up until now so it is probably better not to
1173 * change it. -- paulus
1174 */
1175#ifdef CONFIG_PPC64
1176 if (compat_restore_altstack(&rt_sf->uc.uc_stack))
1177 goto bad;
1178#else
1179 if (restore_altstack(&rt_sf->uc.uc_stack))
1180 goto bad;
1181#endif
1182 set_thread_flag(TIF_RESTOREALL);
1183 return 0;
1184
1185 bad:
1186 signal_fault(current, regs, "sys_rt_sigreturn", rt_sf);
1187
1188 force_sig(SIGSEGV);
1189 return 0;
1190}
1191
1192#ifdef CONFIG_PPC32
1193SYSCALL_DEFINE3(debug_setcontext, struct ucontext __user *, ctx,
1194 int, ndbg, struct sig_dbg_op __user *, dbg)
1195{
1196 struct pt_regs *regs = current_pt_regs();
1197 struct sig_dbg_op op;
1198 int i;
1199 unsigned long new_msr = regs->msr;
1200#ifdef CONFIG_PPC_ADV_DEBUG_REGS
1201 unsigned long new_dbcr0 = current->thread.debug.dbcr0;
1202#endif
1203
1204 for (i=0; i<ndbg; i++) {
1205 if (copy_from_user(&op, dbg + i, sizeof(op)))
1206 return -EFAULT;
1207 switch (op.dbg_type) {
1208 case SIG_DBG_SINGLE_STEPPING:
1209#ifdef CONFIG_PPC_ADV_DEBUG_REGS
1210 if (op.dbg_value) {
1211 new_msr |= MSR_DE;
1212 new_dbcr0 |= (DBCR0_IDM | DBCR0_IC);
1213 } else {
1214 new_dbcr0 &= ~DBCR0_IC;
1215 if (!DBCR_ACTIVE_EVENTS(new_dbcr0,
1216 current->thread.debug.dbcr1)) {
1217 new_msr &= ~MSR_DE;
1218 new_dbcr0 &= ~DBCR0_IDM;
1219 }
1220 }
1221#else
1222 if (op.dbg_value)
1223 new_msr |= MSR_SE;
1224 else
1225 new_msr &= ~MSR_SE;
1226#endif
1227 break;
1228 case SIG_DBG_BRANCH_TRACING:
1229#ifdef CONFIG_PPC_ADV_DEBUG_REGS
1230 return -EINVAL;
1231#else
1232 if (op.dbg_value)
1233 new_msr |= MSR_BE;
1234 else
1235 new_msr &= ~MSR_BE;
1236#endif
1237 break;
1238
1239 default:
1240 return -EINVAL;
1241 }
1242 }
1243
1244 /* We wait until here to actually install the values in the
1245 registers so if we fail in the above loop, it will not
1246 affect the contents of these registers. After this point,
1247 failure is a problem, anyway, and it's very unlikely unless
1248 the user is really doing something wrong. */
1249 regs_set_return_msr(regs, new_msr);
1250#ifdef CONFIG_PPC_ADV_DEBUG_REGS
1251 current->thread.debug.dbcr0 = new_dbcr0;
1252#endif
1253
1254 if (!access_ok(ctx, sizeof(*ctx)) ||
1255 fault_in_readable((char __user *)ctx, sizeof(*ctx)))
1256 return -EFAULT;
1257
1258 /*
1259 * If we get a fault copying the context into the kernel's
1260 * image of the user's registers, we can't just return -EFAULT
1261 * because the user's registers will be corrupted. For instance
1262 * the NIP value may have been updated but not some of the
1263 * other registers. Given that we have done the access_ok
1264 * and successfully read the first and last bytes of the region
1265 * above, this should only happen in an out-of-memory situation
1266 * or if another thread unmaps the region containing the context.
1267 * We kill the task with a SIGSEGV in this situation.
1268 */
1269 if (do_setcontext(ctx, regs, 1)) {
1270 signal_fault(current, regs, "sys_debug_setcontext", ctx);
1271
1272 force_sig(SIGSEGV);
1273 goto out;
1274 }
1275
1276 /*
1277 * It's not clear whether or why it is desirable to save the
1278 * sigaltstack setting on signal delivery and restore it on
1279 * signal return. But other architectures do this and we have
1280 * always done it up until now so it is probably better not to
1281 * change it. -- paulus
1282 */
1283 restore_altstack(&ctx->uc_stack);
1284
1285 set_thread_flag(TIF_RESTOREALL);
1286 out:
1287 return 0;
1288}
1289#endif
1290
1291/*
1292 * Do a signal return; undo the signal stack.
1293 */
1294#ifdef CONFIG_PPC64
1295COMPAT_SYSCALL_DEFINE0(sigreturn)
1296#else
1297SYSCALL_DEFINE0(sigreturn)
1298#endif
1299{
1300 struct pt_regs *regs = current_pt_regs();
1301 struct sigframe __user *sf;
1302 struct sigcontext __user *sc;
1303 struct sigcontext sigctx;
1304 struct mcontext __user *sr;
1305 sigset_t set;
1306 struct mcontext __user *mcp;
1307 struct mcontext __user *tm_mcp = NULL;
1308 unsigned long long msr_hi = 0;
1309
1310 /* Always make any pending restarted system calls return -EINTR */
1311 current->restart_block.fn = do_no_restart_syscall;
1312
1313 sf = (struct sigframe __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE);
1314 sc = &sf->sctx;
1315 if (copy_from_user(&sigctx, sc, sizeof(sigctx)))
1316 goto badframe;
1317
1318#ifdef CONFIG_PPC64
1319 /*
1320 * Note that PPC32 puts the upper 32 bits of the sigmask in the
1321 * unused part of the signal stackframe
1322 */
1323 set.sig[0] = sigctx.oldmask + ((long)(sigctx._unused[3]) << 32);
1324#else
1325 set.sig[0] = sigctx.oldmask;
1326 set.sig[1] = sigctx._unused[3];
1327#endif
1328 set_current_blocked(&set);
1329
1330 mcp = (struct mcontext __user *)&sf->mctx;
1331#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1332 tm_mcp = (struct mcontext __user *)&sf->mctx_transact;
1333 if (__get_user(msr_hi, &tm_mcp->mc_gregs[PT_MSR]))
1334 goto badframe;
1335#endif
1336 if (MSR_TM_ACTIVE(msr_hi<<32)) {
1337 if (!cpu_has_feature(CPU_FTR_TM))
1338 goto badframe;
1339 if (restore_tm_user_regs(regs, mcp, tm_mcp))
1340 goto badframe;
1341 } else {
1342 sr = (struct mcontext __user *)from_user_ptr(sigctx.regs);
1343 if (restore_user_regs(regs, sr, 1)) {
1344 signal_fault(current, regs, "sys_sigreturn", sr);
1345
1346 force_sig(SIGSEGV);
1347 return 0;
1348 }
1349 }
1350
1351 set_thread_flag(TIF_RESTOREALL);
1352 return 0;
1353
1354badframe:
1355 signal_fault(current, regs, "sys_sigreturn", sc);
1356
1357 force_sig(SIGSEGV);
1358 return 0;
1359}
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Signal handling for 32bit PPC and 32bit tasks on 64bit PPC
4 *
5 * PowerPC version
6 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
7 * Copyright (C) 2001 IBM
8 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
9 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
10 *
11 * Derived from "arch/i386/kernel/signal.c"
12 * Copyright (C) 1991, 1992 Linus Torvalds
13 * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson
14 */
15
16#include <linux/sched.h>
17#include <linux/mm.h>
18#include <linux/smp.h>
19#include <linux/kernel.h>
20#include <linux/signal.h>
21#include <linux/errno.h>
22#include <linux/elf.h>
23#include <linux/ptrace.h>
24#include <linux/pagemap.h>
25#include <linux/ratelimit.h>
26#include <linux/syscalls.h>
27#ifdef CONFIG_PPC64
28#include <linux/compat.h>
29#else
30#include <linux/wait.h>
31#include <linux/unistd.h>
32#include <linux/stddef.h>
33#include <linux/tty.h>
34#include <linux/binfmts.h>
35#endif
36
37#include <linux/uaccess.h>
38#include <asm/cacheflush.h>
39#include <asm/syscalls.h>
40#include <asm/sigcontext.h>
41#include <asm/vdso.h>
42#include <asm/switch_to.h>
43#include <asm/tm.h>
44#include <asm/asm-prototypes.h>
45#ifdef CONFIG_PPC64
46#include "ppc32.h"
47#include <asm/unistd.h>
48#else
49#include <asm/ucontext.h>
50#include <asm/pgtable.h>
51#endif
52
53#include "signal.h"
54
55
56#ifdef CONFIG_PPC64
57#define old_sigaction old_sigaction32
58#define sigcontext sigcontext32
59#define mcontext mcontext32
60#define ucontext ucontext32
61
62#define __save_altstack __compat_save_altstack
63
64/*
65 * Userspace code may pass a ucontext which doesn't include VSX added
66 * at the end. We need to check for this case.
67 */
68#define UCONTEXTSIZEWITHOUTVSX \
69 (sizeof(struct ucontext) - sizeof(elf_vsrreghalf_t32))
70
71/*
72 * Returning 0 means we return to userspace via
73 * ret_from_except and thus restore all user
74 * registers from *regs. This is what we need
75 * to do when a signal has been delivered.
76 */
77
78#define GP_REGS_SIZE min(sizeof(elf_gregset_t32), sizeof(struct pt_regs32))
79#undef __SIGNAL_FRAMESIZE
80#define __SIGNAL_FRAMESIZE __SIGNAL_FRAMESIZE32
81#undef ELF_NVRREG
82#define ELF_NVRREG ELF_NVRREG32
83
84/*
85 * Functions for flipping sigsets (thanks to brain dead generic
86 * implementation that makes things simple for little endian only)
87 */
88static inline int put_sigset_t(compat_sigset_t __user *uset, sigset_t *set)
89{
90 return put_compat_sigset(uset, set, sizeof(*uset));
91}
92
93static inline int get_sigset_t(sigset_t *set,
94 const compat_sigset_t __user *uset)
95{
96 return get_compat_sigset(set, uset);
97}
98
99#define to_user_ptr(p) ptr_to_compat(p)
100#define from_user_ptr(p) compat_ptr(p)
101
102static inline int save_general_regs(struct pt_regs *regs,
103 struct mcontext __user *frame)
104{
105 elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
106 int i;
107 /* Force usr to alway see softe as 1 (interrupts enabled) */
108 elf_greg_t64 softe = 0x1;
109
110 WARN_ON(!FULL_REGS(regs));
111
112 for (i = 0; i <= PT_RESULT; i ++) {
113 if (i == 14 && !FULL_REGS(regs))
114 i = 32;
115 if ( i == PT_SOFTE) {
116 if(__put_user((unsigned int)softe, &frame->mc_gregs[i]))
117 return -EFAULT;
118 else
119 continue;
120 }
121 if (__put_user((unsigned int)gregs[i], &frame->mc_gregs[i]))
122 return -EFAULT;
123 }
124 return 0;
125}
126
127static inline int restore_general_regs(struct pt_regs *regs,
128 struct mcontext __user *sr)
129{
130 elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
131 int i;
132
133 for (i = 0; i <= PT_RESULT; i++) {
134 if ((i == PT_MSR) || (i == PT_SOFTE))
135 continue;
136 if (__get_user(gregs[i], &sr->mc_gregs[i]))
137 return -EFAULT;
138 }
139 return 0;
140}
141
142#else /* CONFIG_PPC64 */
143
144#define GP_REGS_SIZE min(sizeof(elf_gregset_t), sizeof(struct pt_regs))
145
146static inline int put_sigset_t(sigset_t __user *uset, sigset_t *set)
147{
148 return copy_to_user(uset, set, sizeof(*uset));
149}
150
151static inline int get_sigset_t(sigset_t *set, const sigset_t __user *uset)
152{
153 return copy_from_user(set, uset, sizeof(*uset));
154}
155
156#define to_user_ptr(p) ((unsigned long)(p))
157#define from_user_ptr(p) ((void __user *)(p))
158
159static inline int save_general_regs(struct pt_regs *regs,
160 struct mcontext __user *frame)
161{
162 WARN_ON(!FULL_REGS(regs));
163 return __copy_to_user(&frame->mc_gregs, regs, GP_REGS_SIZE);
164}
165
166static inline int restore_general_regs(struct pt_regs *regs,
167 struct mcontext __user *sr)
168{
169 /* copy up to but not including MSR */
170 if (__copy_from_user(regs, &sr->mc_gregs,
171 PT_MSR * sizeof(elf_greg_t)))
172 return -EFAULT;
173 /* copy from orig_r3 (the word after the MSR) up to the end */
174 if (__copy_from_user(®s->orig_gpr3, &sr->mc_gregs[PT_ORIG_R3],
175 GP_REGS_SIZE - PT_ORIG_R3 * sizeof(elf_greg_t)))
176 return -EFAULT;
177 return 0;
178}
179#endif
180
181/*
182 * When we have signals to deliver, we set up on the
183 * user stack, going down from the original stack pointer:
184 * an ABI gap of 56 words
185 * an mcontext struct
186 * a sigcontext struct
187 * a gap of __SIGNAL_FRAMESIZE bytes
188 *
189 * Each of these things must be a multiple of 16 bytes in size. The following
190 * structure represent all of this except the __SIGNAL_FRAMESIZE gap
191 *
192 */
193struct sigframe {
194 struct sigcontext sctx; /* the sigcontext */
195 struct mcontext mctx; /* all the register values */
196#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
197 struct sigcontext sctx_transact;
198 struct mcontext mctx_transact;
199#endif
200 /*
201 * Programs using the rs6000/xcoff abi can save up to 19 gp
202 * regs and 18 fp regs below sp before decrementing it.
203 */
204 int abigap[56];
205};
206
207/* We use the mc_pad field for the signal return trampoline. */
208#define tramp mc_pad
209
210/*
211 * When we have rt signals to deliver, we set up on the
212 * user stack, going down from the original stack pointer:
213 * one rt_sigframe struct (siginfo + ucontext + ABI gap)
214 * a gap of __SIGNAL_FRAMESIZE+16 bytes
215 * (the +16 is to get the siginfo and ucontext in the same
216 * positions as in older kernels).
217 *
218 * Each of these things must be a multiple of 16 bytes in size.
219 *
220 */
221struct rt_sigframe {
222#ifdef CONFIG_PPC64
223 compat_siginfo_t info;
224#else
225 struct siginfo info;
226#endif
227 struct ucontext uc;
228#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
229 struct ucontext uc_transact;
230#endif
231 /*
232 * Programs using the rs6000/xcoff abi can save up to 19 gp
233 * regs and 18 fp regs below sp before decrementing it.
234 */
235 int abigap[56];
236};
237
238#ifdef CONFIG_VSX
239unsigned long copy_fpr_to_user(void __user *to,
240 struct task_struct *task)
241{
242 u64 buf[ELF_NFPREG];
243 int i;
244
245 /* save FPR copy to local buffer then write to the thread_struct */
246 for (i = 0; i < (ELF_NFPREG - 1) ; i++)
247 buf[i] = task->thread.TS_FPR(i);
248 buf[i] = task->thread.fp_state.fpscr;
249 return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double));
250}
251
252unsigned long copy_fpr_from_user(struct task_struct *task,
253 void __user *from)
254{
255 u64 buf[ELF_NFPREG];
256 int i;
257
258 if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double)))
259 return 1;
260 for (i = 0; i < (ELF_NFPREG - 1) ; i++)
261 task->thread.TS_FPR(i) = buf[i];
262 task->thread.fp_state.fpscr = buf[i];
263
264 return 0;
265}
266
267unsigned long copy_vsx_to_user(void __user *to,
268 struct task_struct *task)
269{
270 u64 buf[ELF_NVSRHALFREG];
271 int i;
272
273 /* save FPR copy to local buffer then write to the thread_struct */
274 for (i = 0; i < ELF_NVSRHALFREG; i++)
275 buf[i] = task->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
276 return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double));
277}
278
279unsigned long copy_vsx_from_user(struct task_struct *task,
280 void __user *from)
281{
282 u64 buf[ELF_NVSRHALFREG];
283 int i;
284
285 if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double)))
286 return 1;
287 for (i = 0; i < ELF_NVSRHALFREG ; i++)
288 task->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
289 return 0;
290}
291
292#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
293unsigned long copy_ckfpr_to_user(void __user *to,
294 struct task_struct *task)
295{
296 u64 buf[ELF_NFPREG];
297 int i;
298
299 /* save FPR copy to local buffer then write to the thread_struct */
300 for (i = 0; i < (ELF_NFPREG - 1) ; i++)
301 buf[i] = task->thread.TS_CKFPR(i);
302 buf[i] = task->thread.ckfp_state.fpscr;
303 return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double));
304}
305
306unsigned long copy_ckfpr_from_user(struct task_struct *task,
307 void __user *from)
308{
309 u64 buf[ELF_NFPREG];
310 int i;
311
312 if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double)))
313 return 1;
314 for (i = 0; i < (ELF_NFPREG - 1) ; i++)
315 task->thread.TS_CKFPR(i) = buf[i];
316 task->thread.ckfp_state.fpscr = buf[i];
317
318 return 0;
319}
320
321unsigned long copy_ckvsx_to_user(void __user *to,
322 struct task_struct *task)
323{
324 u64 buf[ELF_NVSRHALFREG];
325 int i;
326
327 /* save FPR copy to local buffer then write to the thread_struct */
328 for (i = 0; i < ELF_NVSRHALFREG; i++)
329 buf[i] = task->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET];
330 return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double));
331}
332
333unsigned long copy_ckvsx_from_user(struct task_struct *task,
334 void __user *from)
335{
336 u64 buf[ELF_NVSRHALFREG];
337 int i;
338
339 if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double)))
340 return 1;
341 for (i = 0; i < ELF_NVSRHALFREG ; i++)
342 task->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
343 return 0;
344}
345#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
346#else
347inline unsigned long copy_fpr_to_user(void __user *to,
348 struct task_struct *task)
349{
350 return __copy_to_user(to, task->thread.fp_state.fpr,
351 ELF_NFPREG * sizeof(double));
352}
353
354inline unsigned long copy_fpr_from_user(struct task_struct *task,
355 void __user *from)
356{
357 return __copy_from_user(task->thread.fp_state.fpr, from,
358 ELF_NFPREG * sizeof(double));
359}
360
361#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
362inline unsigned long copy_ckfpr_to_user(void __user *to,
363 struct task_struct *task)
364{
365 return __copy_to_user(to, task->thread.ckfp_state.fpr,
366 ELF_NFPREG * sizeof(double));
367}
368
369inline unsigned long copy_ckfpr_from_user(struct task_struct *task,
370 void __user *from)
371{
372 return __copy_from_user(task->thread.ckfp_state.fpr, from,
373 ELF_NFPREG * sizeof(double));
374}
375#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
376#endif
377
378/*
379 * Save the current user registers on the user stack.
380 * We only save the altivec/spe registers if the process has used
381 * altivec/spe instructions at some point.
382 */
383static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
384 struct mcontext __user *tm_frame, int sigret,
385 int ctx_has_vsx_region)
386{
387 unsigned long msr = regs->msr;
388
389 /* Make sure floating point registers are stored in regs */
390 flush_fp_to_thread(current);
391
392 /* save general registers */
393 if (save_general_regs(regs, frame))
394 return 1;
395
396#ifdef CONFIG_ALTIVEC
397 /* save altivec registers */
398 if (current->thread.used_vr) {
399 flush_altivec_to_thread(current);
400 if (__copy_to_user(&frame->mc_vregs, ¤t->thread.vr_state,
401 ELF_NVRREG * sizeof(vector128)))
402 return 1;
403 /* set MSR_VEC in the saved MSR value to indicate that
404 frame->mc_vregs contains valid data */
405 msr |= MSR_VEC;
406 }
407 /* else assert((regs->msr & MSR_VEC) == 0) */
408
409 /* We always copy to/from vrsave, it's 0 if we don't have or don't
410 * use altivec. Since VSCR only contains 32 bits saved in the least
411 * significant bits of a vector, we "cheat" and stuff VRSAVE in the
412 * most significant bits of that same vector. --BenH
413 * Note that the current VRSAVE value is in the SPR at this point.
414 */
415 if (cpu_has_feature(CPU_FTR_ALTIVEC))
416 current->thread.vrsave = mfspr(SPRN_VRSAVE);
417 if (__put_user(current->thread.vrsave, (u32 __user *)&frame->mc_vregs[32]))
418 return 1;
419#endif /* CONFIG_ALTIVEC */
420 if (copy_fpr_to_user(&frame->mc_fregs, current))
421 return 1;
422
423 /*
424 * Clear the MSR VSX bit to indicate there is no valid state attached
425 * to this context, except in the specific case below where we set it.
426 */
427 msr &= ~MSR_VSX;
428#ifdef CONFIG_VSX
429 /*
430 * Copy VSR 0-31 upper half from thread_struct to local
431 * buffer, then write that to userspace. Also set MSR_VSX in
432 * the saved MSR value to indicate that frame->mc_vregs
433 * contains valid data
434 */
435 if (current->thread.used_vsr && ctx_has_vsx_region) {
436 flush_vsx_to_thread(current);
437 if (copy_vsx_to_user(&frame->mc_vsregs, current))
438 return 1;
439 msr |= MSR_VSX;
440 }
441#endif /* CONFIG_VSX */
442#ifdef CONFIG_SPE
443 /* save spe registers */
444 if (current->thread.used_spe) {
445 flush_spe_to_thread(current);
446 if (__copy_to_user(&frame->mc_vregs, current->thread.evr,
447 ELF_NEVRREG * sizeof(u32)))
448 return 1;
449 /* set MSR_SPE in the saved MSR value to indicate that
450 frame->mc_vregs contains valid data */
451 msr |= MSR_SPE;
452 }
453 /* else assert((regs->msr & MSR_SPE) == 0) */
454
455 /* We always copy to/from spefscr */
456 if (__put_user(current->thread.spefscr, (u32 __user *)&frame->mc_vregs + ELF_NEVRREG))
457 return 1;
458#endif /* CONFIG_SPE */
459
460 if (__put_user(msr, &frame->mc_gregs[PT_MSR]))
461 return 1;
462 /* We need to write 0 the MSR top 32 bits in the tm frame so that we
463 * can check it on the restore to see if TM is active
464 */
465 if (tm_frame && __put_user(0, &tm_frame->mc_gregs[PT_MSR]))
466 return 1;
467
468 if (sigret) {
469 /* Set up the sigreturn trampoline: li 0,sigret; sc */
470 if (__put_user(PPC_INST_ADDI + sigret, &frame->tramp[0])
471 || __put_user(PPC_INST_SC, &frame->tramp[1]))
472 return 1;
473 flush_icache_range((unsigned long) &frame->tramp[0],
474 (unsigned long) &frame->tramp[2]);
475 }
476
477 return 0;
478}
479
480#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
481/*
482 * Save the current user registers on the user stack.
483 * We only save the altivec/spe registers if the process has used
484 * altivec/spe instructions at some point.
485 * We also save the transactional registers to a second ucontext in the
486 * frame.
487 *
488 * See save_user_regs() and signal_64.c:setup_tm_sigcontexts().
489 */
490static int save_tm_user_regs(struct pt_regs *regs,
491 struct mcontext __user *frame,
492 struct mcontext __user *tm_frame, int sigret)
493{
494 unsigned long msr = regs->msr;
495
496 WARN_ON(tm_suspend_disabled);
497
498 /* Remove TM bits from thread's MSR. The MSR in the sigcontext
499 * just indicates to userland that we were doing a transaction, but we
500 * don't want to return in transactional state. This also ensures
501 * that flush_fp_to_thread won't set TIF_RESTORE_TM again.
502 */
503 regs->msr &= ~MSR_TS_MASK;
504
505 /* Save both sets of general registers */
506 if (save_general_regs(¤t->thread.ckpt_regs, frame)
507 || save_general_regs(regs, tm_frame))
508 return 1;
509
510 /* Stash the top half of the 64bit MSR into the 32bit MSR word
511 * of the transactional mcontext. This way we have a backward-compatible
512 * MSR in the 'normal' (checkpointed) mcontext and additionally one can
513 * also look at what type of transaction (T or S) was active at the
514 * time of the signal.
515 */
516 if (__put_user((msr >> 32), &tm_frame->mc_gregs[PT_MSR]))
517 return 1;
518
519#ifdef CONFIG_ALTIVEC
520 /* save altivec registers */
521 if (current->thread.used_vr) {
522 if (__copy_to_user(&frame->mc_vregs, ¤t->thread.ckvr_state,
523 ELF_NVRREG * sizeof(vector128)))
524 return 1;
525 if (msr & MSR_VEC) {
526 if (__copy_to_user(&tm_frame->mc_vregs,
527 ¤t->thread.vr_state,
528 ELF_NVRREG * sizeof(vector128)))
529 return 1;
530 } else {
531 if (__copy_to_user(&tm_frame->mc_vregs,
532 ¤t->thread.ckvr_state,
533 ELF_NVRREG * sizeof(vector128)))
534 return 1;
535 }
536
537 /* set MSR_VEC in the saved MSR value to indicate that
538 * frame->mc_vregs contains valid data
539 */
540 msr |= MSR_VEC;
541 }
542
543 /* We always copy to/from vrsave, it's 0 if we don't have or don't
544 * use altivec. Since VSCR only contains 32 bits saved in the least
545 * significant bits of a vector, we "cheat" and stuff VRSAVE in the
546 * most significant bits of that same vector. --BenH
547 */
548 if (cpu_has_feature(CPU_FTR_ALTIVEC))
549 current->thread.ckvrsave = mfspr(SPRN_VRSAVE);
550 if (__put_user(current->thread.ckvrsave,
551 (u32 __user *)&frame->mc_vregs[32]))
552 return 1;
553 if (msr & MSR_VEC) {
554 if (__put_user(current->thread.vrsave,
555 (u32 __user *)&tm_frame->mc_vregs[32]))
556 return 1;
557 } else {
558 if (__put_user(current->thread.ckvrsave,
559 (u32 __user *)&tm_frame->mc_vregs[32]))
560 return 1;
561 }
562#endif /* CONFIG_ALTIVEC */
563
564 if (copy_ckfpr_to_user(&frame->mc_fregs, current))
565 return 1;
566 if (msr & MSR_FP) {
567 if (copy_fpr_to_user(&tm_frame->mc_fregs, current))
568 return 1;
569 } else {
570 if (copy_ckfpr_to_user(&tm_frame->mc_fregs, current))
571 return 1;
572 }
573
574#ifdef CONFIG_VSX
575 /*
576 * Copy VSR 0-31 upper half from thread_struct to local
577 * buffer, then write that to userspace. Also set MSR_VSX in
578 * the saved MSR value to indicate that frame->mc_vregs
579 * contains valid data
580 */
581 if (current->thread.used_vsr) {
582 if (copy_ckvsx_to_user(&frame->mc_vsregs, current))
583 return 1;
584 if (msr & MSR_VSX) {
585 if (copy_vsx_to_user(&tm_frame->mc_vsregs,
586 current))
587 return 1;
588 } else {
589 if (copy_ckvsx_to_user(&tm_frame->mc_vsregs, current))
590 return 1;
591 }
592
593 msr |= MSR_VSX;
594 }
595#endif /* CONFIG_VSX */
596#ifdef CONFIG_SPE
597 /* SPE regs are not checkpointed with TM, so this section is
598 * simply the same as in save_user_regs().
599 */
600 if (current->thread.used_spe) {
601 flush_spe_to_thread(current);
602 if (__copy_to_user(&frame->mc_vregs, current->thread.evr,
603 ELF_NEVRREG * sizeof(u32)))
604 return 1;
605 /* set MSR_SPE in the saved MSR value to indicate that
606 * frame->mc_vregs contains valid data */
607 msr |= MSR_SPE;
608 }
609
610 /* We always copy to/from spefscr */
611 if (__put_user(current->thread.spefscr, (u32 __user *)&frame->mc_vregs + ELF_NEVRREG))
612 return 1;
613#endif /* CONFIG_SPE */
614
615 if (__put_user(msr, &frame->mc_gregs[PT_MSR]))
616 return 1;
617 if (sigret) {
618 /* Set up the sigreturn trampoline: li 0,sigret; sc */
619 if (__put_user(PPC_INST_ADDI + sigret, &frame->tramp[0])
620 || __put_user(PPC_INST_SC, &frame->tramp[1]))
621 return 1;
622 flush_icache_range((unsigned long) &frame->tramp[0],
623 (unsigned long) &frame->tramp[2]);
624 }
625
626 return 0;
627}
628#endif
629
630/*
631 * Restore the current user register values from the user stack,
632 * (except for MSR).
633 */
634static long restore_user_regs(struct pt_regs *regs,
635 struct mcontext __user *sr, int sig)
636{
637 long err;
638 unsigned int save_r2 = 0;
639 unsigned long msr;
640#ifdef CONFIG_VSX
641 int i;
642#endif
643
644 /*
645 * restore general registers but not including MSR or SOFTE. Also
646 * take care of keeping r2 (TLS) intact if not a signal
647 */
648 if (!sig)
649 save_r2 = (unsigned int)regs->gpr[2];
650 err = restore_general_regs(regs, sr);
651 regs->trap = 0;
652 err |= __get_user(msr, &sr->mc_gregs[PT_MSR]);
653 if (!sig)
654 regs->gpr[2] = (unsigned long) save_r2;
655 if (err)
656 return 1;
657
658 /* if doing signal return, restore the previous little-endian mode */
659 if (sig)
660 regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
661
662#ifdef CONFIG_ALTIVEC
663 /*
664 * Force the process to reload the altivec registers from
665 * current->thread when it next does altivec instructions
666 */
667 regs->msr &= ~MSR_VEC;
668 if (msr & MSR_VEC) {
669 /* restore altivec registers from the stack */
670 if (__copy_from_user(¤t->thread.vr_state, &sr->mc_vregs,
671 sizeof(sr->mc_vregs)))
672 return 1;
673 current->thread.used_vr = true;
674 } else if (current->thread.used_vr)
675 memset(¤t->thread.vr_state, 0,
676 ELF_NVRREG * sizeof(vector128));
677
678 /* Always get VRSAVE back */
679 if (__get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32]))
680 return 1;
681 if (cpu_has_feature(CPU_FTR_ALTIVEC))
682 mtspr(SPRN_VRSAVE, current->thread.vrsave);
683#endif /* CONFIG_ALTIVEC */
684 if (copy_fpr_from_user(current, &sr->mc_fregs))
685 return 1;
686
687#ifdef CONFIG_VSX
688 /*
689 * Force the process to reload the VSX registers from
690 * current->thread when it next does VSX instruction.
691 */
692 regs->msr &= ~MSR_VSX;
693 if (msr & MSR_VSX) {
694 /*
695 * Restore altivec registers from the stack to a local
696 * buffer, then write this out to the thread_struct
697 */
698 if (copy_vsx_from_user(current, &sr->mc_vsregs))
699 return 1;
700 current->thread.used_vsr = true;
701 } else if (current->thread.used_vsr)
702 for (i = 0; i < 32 ; i++)
703 current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
704#endif /* CONFIG_VSX */
705 /*
706 * force the process to reload the FP registers from
707 * current->thread when it next does FP instructions
708 */
709 regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
710
711#ifdef CONFIG_SPE
712 /* force the process to reload the spe registers from
713 current->thread when it next does spe instructions */
714 regs->msr &= ~MSR_SPE;
715 if (msr & MSR_SPE) {
716 /* restore spe registers from the stack */
717 if (__copy_from_user(current->thread.evr, &sr->mc_vregs,
718 ELF_NEVRREG * sizeof(u32)))
719 return 1;
720 current->thread.used_spe = true;
721 } else if (current->thread.used_spe)
722 memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
723
724 /* Always get SPEFSCR back */
725 if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs + ELF_NEVRREG))
726 return 1;
727#endif /* CONFIG_SPE */
728
729 return 0;
730}
731
732#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
733/*
734 * Restore the current user register values from the user stack, except for
735 * MSR, and recheckpoint the original checkpointed register state for processes
736 * in transactions.
737 */
738static long restore_tm_user_regs(struct pt_regs *regs,
739 struct mcontext __user *sr,
740 struct mcontext __user *tm_sr)
741{
742 long err;
743 unsigned long msr, msr_hi;
744#ifdef CONFIG_VSX
745 int i;
746#endif
747
748 if (tm_suspend_disabled)
749 return 1;
750 /*
751 * restore general registers but not including MSR or SOFTE. Also
752 * take care of keeping r2 (TLS) intact if not a signal.
753 * See comment in signal_64.c:restore_tm_sigcontexts();
754 * TFHAR is restored from the checkpointed NIP; TEXASR and TFIAR
755 * were set by the signal delivery.
756 */
757 err = restore_general_regs(regs, tm_sr);
758 err |= restore_general_regs(¤t->thread.ckpt_regs, sr);
759
760 err |= __get_user(current->thread.tm_tfhar, &sr->mc_gregs[PT_NIP]);
761
762 err |= __get_user(msr, &sr->mc_gregs[PT_MSR]);
763 if (err)
764 return 1;
765
766 /* Restore the previous little-endian mode */
767 regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
768
769#ifdef CONFIG_ALTIVEC
770 regs->msr &= ~MSR_VEC;
771 if (msr & MSR_VEC) {
772 /* restore altivec registers from the stack */
773 if (__copy_from_user(¤t->thread.ckvr_state, &sr->mc_vregs,
774 sizeof(sr->mc_vregs)) ||
775 __copy_from_user(¤t->thread.vr_state,
776 &tm_sr->mc_vregs,
777 sizeof(sr->mc_vregs)))
778 return 1;
779 current->thread.used_vr = true;
780 } else if (current->thread.used_vr) {
781 memset(¤t->thread.vr_state, 0,
782 ELF_NVRREG * sizeof(vector128));
783 memset(¤t->thread.ckvr_state, 0,
784 ELF_NVRREG * sizeof(vector128));
785 }
786
787 /* Always get VRSAVE back */
788 if (__get_user(current->thread.ckvrsave,
789 (u32 __user *)&sr->mc_vregs[32]) ||
790 __get_user(current->thread.vrsave,
791 (u32 __user *)&tm_sr->mc_vregs[32]))
792 return 1;
793 if (cpu_has_feature(CPU_FTR_ALTIVEC))
794 mtspr(SPRN_VRSAVE, current->thread.ckvrsave);
795#endif /* CONFIG_ALTIVEC */
796
797 regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
798
799 if (copy_fpr_from_user(current, &sr->mc_fregs) ||
800 copy_ckfpr_from_user(current, &tm_sr->mc_fregs))
801 return 1;
802
803#ifdef CONFIG_VSX
804 regs->msr &= ~MSR_VSX;
805 if (msr & MSR_VSX) {
806 /*
807 * Restore altivec registers from the stack to a local
808 * buffer, then write this out to the thread_struct
809 */
810 if (copy_vsx_from_user(current, &tm_sr->mc_vsregs) ||
811 copy_ckvsx_from_user(current, &sr->mc_vsregs))
812 return 1;
813 current->thread.used_vsr = true;
814 } else if (current->thread.used_vsr)
815 for (i = 0; i < 32 ; i++) {
816 current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
817 current->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
818 }
819#endif /* CONFIG_VSX */
820
821#ifdef CONFIG_SPE
822 /* SPE regs are not checkpointed with TM, so this section is
823 * simply the same as in restore_user_regs().
824 */
825 regs->msr &= ~MSR_SPE;
826 if (msr & MSR_SPE) {
827 if (__copy_from_user(current->thread.evr, &sr->mc_vregs,
828 ELF_NEVRREG * sizeof(u32)))
829 return 1;
830 current->thread.used_spe = true;
831 } else if (current->thread.used_spe)
832 memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
833
834 /* Always get SPEFSCR back */
835 if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs
836 + ELF_NEVRREG))
837 return 1;
838#endif /* CONFIG_SPE */
839
840 /* Get the top half of the MSR from the user context */
841 if (__get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR]))
842 return 1;
843 msr_hi <<= 32;
844 /* If TM bits are set to the reserved value, it's an invalid context */
845 if (MSR_TM_RESV(msr_hi))
846 return 1;
847
848 /*
849 * Disabling preemption, since it is unsafe to be preempted
850 * with MSR[TS] set without recheckpointing.
851 */
852 preempt_disable();
853
854 /*
855 * CAUTION:
856 * After regs->MSR[TS] being updated, make sure that get_user(),
857 * put_user() or similar functions are *not* called. These
858 * functions can generate page faults which will cause the process
859 * to be de-scheduled with MSR[TS] set but without calling
860 * tm_recheckpoint(). This can cause a bug.
861 *
862 * Pull in the MSR TM bits from the user context
863 */
864 regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr_hi & MSR_TS_MASK);
865 /* Now, recheckpoint. This loads up all of the checkpointed (older)
866 * registers, including FP and V[S]Rs. After recheckpointing, the
867 * transactional versions should be loaded.
868 */
869 tm_enable();
870 /* Make sure the transaction is marked as failed */
871 current->thread.tm_texasr |= TEXASR_FS;
872 /* This loads the checkpointed FP/VEC state, if used */
873 tm_recheckpoint(¤t->thread);
874
875 /* This loads the speculative FP/VEC state, if used */
876 msr_check_and_set(msr & (MSR_FP | MSR_VEC));
877 if (msr & MSR_FP) {
878 load_fp_state(¤t->thread.fp_state);
879 regs->msr |= (MSR_FP | current->thread.fpexc_mode);
880 }
881#ifdef CONFIG_ALTIVEC
882 if (msr & MSR_VEC) {
883 load_vr_state(¤t->thread.vr_state);
884 regs->msr |= MSR_VEC;
885 }
886#endif
887
888 preempt_enable();
889
890 return 0;
891}
892#endif
893
894#ifdef CONFIG_PPC64
895
896#define copy_siginfo_to_user copy_siginfo_to_user32
897
898#endif /* CONFIG_PPC64 */
899
900/*
901 * Set up a signal frame for a "real-time" signal handler
902 * (one which gets siginfo).
903 */
904int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
905 struct task_struct *tsk)
906{
907 struct rt_sigframe __user *rt_sf;
908 struct mcontext __user *frame;
909 struct mcontext __user *tm_frame = NULL;
910 void __user *addr;
911 unsigned long newsp = 0;
912 int sigret;
913 unsigned long tramp;
914 struct pt_regs *regs = tsk->thread.regs;
915
916 BUG_ON(tsk != current);
917
918 /* Set up Signal Frame */
919 /* Put a Real Time Context onto stack */
920 rt_sf = get_sigframe(ksig, get_tm_stackpointer(tsk), sizeof(*rt_sf), 1);
921 addr = rt_sf;
922 if (unlikely(rt_sf == NULL))
923 goto badframe;
924
925 /* Put the siginfo & fill in most of the ucontext */
926 if (copy_siginfo_to_user(&rt_sf->info, &ksig->info)
927 || __put_user(0, &rt_sf->uc.uc_flags)
928 || __save_altstack(&rt_sf->uc.uc_stack, regs->gpr[1])
929 || __put_user(to_user_ptr(&rt_sf->uc.uc_mcontext),
930 &rt_sf->uc.uc_regs)
931 || put_sigset_t(&rt_sf->uc.uc_sigmask, oldset))
932 goto badframe;
933
934 /* Save user registers on the stack */
935 frame = &rt_sf->uc.uc_mcontext;
936 addr = frame;
937 if (vdso32_rt_sigtramp && tsk->mm->context.vdso_base) {
938 sigret = 0;
939 tramp = tsk->mm->context.vdso_base + vdso32_rt_sigtramp;
940 } else {
941 sigret = __NR_rt_sigreturn;
942 tramp = (unsigned long) frame->tramp;
943 }
944
945#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
946 tm_frame = &rt_sf->uc_transact.uc_mcontext;
947 if (MSR_TM_ACTIVE(regs->msr)) {
948 if (__put_user((unsigned long)&rt_sf->uc_transact,
949 &rt_sf->uc.uc_link) ||
950 __put_user((unsigned long)tm_frame,
951 &rt_sf->uc_transact.uc_regs))
952 goto badframe;
953 if (save_tm_user_regs(regs, frame, tm_frame, sigret))
954 goto badframe;
955 }
956 else
957#endif
958 {
959 if (__put_user(0, &rt_sf->uc.uc_link))
960 goto badframe;
961 if (save_user_regs(regs, frame, tm_frame, sigret, 1))
962 goto badframe;
963 }
964 regs->link = tramp;
965
966 tsk->thread.fp_state.fpscr = 0; /* turn off all fp exceptions */
967
968 /* create a stack frame for the caller of the handler */
969 newsp = ((unsigned long)rt_sf) - (__SIGNAL_FRAMESIZE + 16);
970 addr = (void __user *)regs->gpr[1];
971 if (put_user(regs->gpr[1], (u32 __user *)newsp))
972 goto badframe;
973
974 /* Fill registers for signal handler */
975 regs->gpr[1] = newsp;
976 regs->gpr[3] = ksig->sig;
977 regs->gpr[4] = (unsigned long) &rt_sf->info;
978 regs->gpr[5] = (unsigned long) &rt_sf->uc;
979 regs->gpr[6] = (unsigned long) rt_sf;
980 regs->nip = (unsigned long) ksig->ka.sa.sa_handler;
981 /* enter the signal handler in native-endian mode */
982 regs->msr &= ~MSR_LE;
983 regs->msr |= (MSR_KERNEL & MSR_LE);
984 return 0;
985
986badframe:
987 if (show_unhandled_signals)
988 printk_ratelimited(KERN_INFO
989 "%s[%d]: bad frame in handle_rt_signal32: "
990 "%p nip %08lx lr %08lx\n",
991 tsk->comm, tsk->pid,
992 addr, regs->nip, regs->link);
993
994 return 1;
995}
996
997static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int sig)
998{
999 sigset_t set;
1000 struct mcontext __user *mcp;
1001
1002 if (get_sigset_t(&set, &ucp->uc_sigmask))
1003 return -EFAULT;
1004#ifdef CONFIG_PPC64
1005 {
1006 u32 cmcp;
1007
1008 if (__get_user(cmcp, &ucp->uc_regs))
1009 return -EFAULT;
1010 mcp = (struct mcontext __user *)(u64)cmcp;
1011 /* no need to check access_ok(mcp), since mcp < 4GB */
1012 }
1013#else
1014 if (__get_user(mcp, &ucp->uc_regs))
1015 return -EFAULT;
1016 if (!access_ok(mcp, sizeof(*mcp)))
1017 return -EFAULT;
1018#endif
1019 set_current_blocked(&set);
1020 if (restore_user_regs(regs, mcp, sig))
1021 return -EFAULT;
1022
1023 return 0;
1024}
1025
1026#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1027static int do_setcontext_tm(struct ucontext __user *ucp,
1028 struct ucontext __user *tm_ucp,
1029 struct pt_regs *regs)
1030{
1031 sigset_t set;
1032 struct mcontext __user *mcp;
1033 struct mcontext __user *tm_mcp;
1034 u32 cmcp;
1035 u32 tm_cmcp;
1036
1037 if (get_sigset_t(&set, &ucp->uc_sigmask))
1038 return -EFAULT;
1039
1040 if (__get_user(cmcp, &ucp->uc_regs) ||
1041 __get_user(tm_cmcp, &tm_ucp->uc_regs))
1042 return -EFAULT;
1043 mcp = (struct mcontext __user *)(u64)cmcp;
1044 tm_mcp = (struct mcontext __user *)(u64)tm_cmcp;
1045 /* no need to check access_ok(mcp), since mcp < 4GB */
1046
1047 set_current_blocked(&set);
1048 if (restore_tm_user_regs(regs, mcp, tm_mcp))
1049 return -EFAULT;
1050
1051 return 0;
1052}
1053#endif
1054
1055#ifdef CONFIG_PPC64
1056COMPAT_SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
1057 struct ucontext __user *, new_ctx, int, ctx_size)
1058#else
1059SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
1060 struct ucontext __user *, new_ctx, long, ctx_size)
1061#endif
1062{
1063 struct pt_regs *regs = current_pt_regs();
1064 int ctx_has_vsx_region = 0;
1065
1066#ifdef CONFIG_PPC64
1067 unsigned long new_msr = 0;
1068
1069 if (new_ctx) {
1070 struct mcontext __user *mcp;
1071 u32 cmcp;
1072
1073 /*
1074 * Get pointer to the real mcontext. No need for
1075 * access_ok since we are dealing with compat
1076 * pointers.
1077 */
1078 if (__get_user(cmcp, &new_ctx->uc_regs))
1079 return -EFAULT;
1080 mcp = (struct mcontext __user *)(u64)cmcp;
1081 if (__get_user(new_msr, &mcp->mc_gregs[PT_MSR]))
1082 return -EFAULT;
1083 }
1084 /*
1085 * Check that the context is not smaller than the original
1086 * size (with VMX but without VSX)
1087 */
1088 if (ctx_size < UCONTEXTSIZEWITHOUTVSX)
1089 return -EINVAL;
1090 /*
1091 * If the new context state sets the MSR VSX bits but
1092 * it doesn't provide VSX state.
1093 */
1094 if ((ctx_size < sizeof(struct ucontext)) &&
1095 (new_msr & MSR_VSX))
1096 return -EINVAL;
1097 /* Does the context have enough room to store VSX data? */
1098 if (ctx_size >= sizeof(struct ucontext))
1099 ctx_has_vsx_region = 1;
1100#else
1101 /* Context size is for future use. Right now, we only make sure
1102 * we are passed something we understand
1103 */
1104 if (ctx_size < sizeof(struct ucontext))
1105 return -EINVAL;
1106#endif
1107 if (old_ctx != NULL) {
1108 struct mcontext __user *mctx;
1109
1110 /*
1111 * old_ctx might not be 16-byte aligned, in which
1112 * case old_ctx->uc_mcontext won't be either.
1113 * Because we have the old_ctx->uc_pad2 field
1114 * before old_ctx->uc_mcontext, we need to round down
1115 * from &old_ctx->uc_mcontext to a 16-byte boundary.
1116 */
1117 mctx = (struct mcontext __user *)
1118 ((unsigned long) &old_ctx->uc_mcontext & ~0xfUL);
1119 if (!access_ok(old_ctx, ctx_size)
1120 || save_user_regs(regs, mctx, NULL, 0, ctx_has_vsx_region)
1121 || put_sigset_t(&old_ctx->uc_sigmask, ¤t->blocked)
1122 || __put_user(to_user_ptr(mctx), &old_ctx->uc_regs))
1123 return -EFAULT;
1124 }
1125 if (new_ctx == NULL)
1126 return 0;
1127 if (!access_ok(new_ctx, ctx_size) ||
1128 fault_in_pages_readable((u8 __user *)new_ctx, ctx_size))
1129 return -EFAULT;
1130
1131 /*
1132 * If we get a fault copying the context into the kernel's
1133 * image of the user's registers, we can't just return -EFAULT
1134 * because the user's registers will be corrupted. For instance
1135 * the NIP value may have been updated but not some of the
1136 * other registers. Given that we have done the access_ok
1137 * and successfully read the first and last bytes of the region
1138 * above, this should only happen in an out-of-memory situation
1139 * or if another thread unmaps the region containing the context.
1140 * We kill the task with a SIGSEGV in this situation.
1141 */
1142 if (do_setcontext(new_ctx, regs, 0))
1143 do_exit(SIGSEGV);
1144
1145 set_thread_flag(TIF_RESTOREALL);
1146 return 0;
1147}
1148
1149#ifdef CONFIG_PPC64
1150COMPAT_SYSCALL_DEFINE0(rt_sigreturn)
1151#else
1152SYSCALL_DEFINE0(rt_sigreturn)
1153#endif
1154{
1155 struct rt_sigframe __user *rt_sf;
1156 struct pt_regs *regs = current_pt_regs();
1157 int tm_restore = 0;
1158#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1159 struct ucontext __user *uc_transact;
1160 unsigned long msr_hi;
1161 unsigned long tmp;
1162#endif
1163 /* Always make any pending restarted system calls return -EINTR */
1164 current->restart_block.fn = do_no_restart_syscall;
1165
1166 rt_sf = (struct rt_sigframe __user *)
1167 (regs->gpr[1] + __SIGNAL_FRAMESIZE + 16);
1168 if (!access_ok(rt_sf, sizeof(*rt_sf)))
1169 goto bad;
1170
1171#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1172 /*
1173 * If there is a transactional state then throw it away.
1174 * The purpose of a sigreturn is to destroy all traces of the
1175 * signal frame, this includes any transactional state created
1176 * within in. We only check for suspended as we can never be
1177 * active in the kernel, we are active, there is nothing better to
1178 * do than go ahead and Bad Thing later.
1179 * The cause is not important as there will never be a
1180 * recheckpoint so it's not user visible.
1181 */
1182 if (MSR_TM_SUSPENDED(mfmsr()))
1183 tm_reclaim_current(0);
1184
1185 if (__get_user(tmp, &rt_sf->uc.uc_link))
1186 goto bad;
1187 uc_transact = (struct ucontext __user *)(uintptr_t)tmp;
1188 if (uc_transact) {
1189 u32 cmcp;
1190 struct mcontext __user *mcp;
1191
1192 if (__get_user(cmcp, &uc_transact->uc_regs))
1193 return -EFAULT;
1194 mcp = (struct mcontext __user *)(u64)cmcp;
1195 /* The top 32 bits of the MSR are stashed in the transactional
1196 * ucontext. */
1197 if (__get_user(msr_hi, &mcp->mc_gregs[PT_MSR]))
1198 goto bad;
1199
1200 if (MSR_TM_ACTIVE(msr_hi<<32)) {
1201 /* Trying to start TM on non TM system */
1202 if (!cpu_has_feature(CPU_FTR_TM))
1203 goto bad;
1204 /* We only recheckpoint on return if we're
1205 * transaction.
1206 */
1207 tm_restore = 1;
1208 if (do_setcontext_tm(&rt_sf->uc, uc_transact, regs))
1209 goto bad;
1210 }
1211 }
1212 if (!tm_restore) {
1213 /*
1214 * Unset regs->msr because ucontext MSR TS is not
1215 * set, and recheckpoint was not called. This avoid
1216 * hitting a TM Bad thing at RFID
1217 */
1218 regs->msr &= ~MSR_TS_MASK;
1219 }
1220 /* Fall through, for non-TM restore */
1221#endif
1222 if (!tm_restore)
1223 if (do_setcontext(&rt_sf->uc, regs, 1))
1224 goto bad;
1225
1226 /*
1227 * It's not clear whether or why it is desirable to save the
1228 * sigaltstack setting on signal delivery and restore it on
1229 * signal return. But other architectures do this and we have
1230 * always done it up until now so it is probably better not to
1231 * change it. -- paulus
1232 */
1233#ifdef CONFIG_PPC64
1234 if (compat_restore_altstack(&rt_sf->uc.uc_stack))
1235 goto bad;
1236#else
1237 if (restore_altstack(&rt_sf->uc.uc_stack))
1238 goto bad;
1239#endif
1240 set_thread_flag(TIF_RESTOREALL);
1241 return 0;
1242
1243 bad:
1244 if (show_unhandled_signals)
1245 printk_ratelimited(KERN_INFO
1246 "%s[%d]: bad frame in sys_rt_sigreturn: "
1247 "%p nip %08lx lr %08lx\n",
1248 current->comm, current->pid,
1249 rt_sf, regs->nip, regs->link);
1250
1251 force_sig(SIGSEGV);
1252 return 0;
1253}
1254
1255#ifdef CONFIG_PPC32
1256SYSCALL_DEFINE3(debug_setcontext, struct ucontext __user *, ctx,
1257 int, ndbg, struct sig_dbg_op __user *, dbg)
1258{
1259 struct pt_regs *regs = current_pt_regs();
1260 struct sig_dbg_op op;
1261 int i;
1262 unsigned long new_msr = regs->msr;
1263#ifdef CONFIG_PPC_ADV_DEBUG_REGS
1264 unsigned long new_dbcr0 = current->thread.debug.dbcr0;
1265#endif
1266
1267 for (i=0; i<ndbg; i++) {
1268 if (copy_from_user(&op, dbg + i, sizeof(op)))
1269 return -EFAULT;
1270 switch (op.dbg_type) {
1271 case SIG_DBG_SINGLE_STEPPING:
1272#ifdef CONFIG_PPC_ADV_DEBUG_REGS
1273 if (op.dbg_value) {
1274 new_msr |= MSR_DE;
1275 new_dbcr0 |= (DBCR0_IDM | DBCR0_IC);
1276 } else {
1277 new_dbcr0 &= ~DBCR0_IC;
1278 if (!DBCR_ACTIVE_EVENTS(new_dbcr0,
1279 current->thread.debug.dbcr1)) {
1280 new_msr &= ~MSR_DE;
1281 new_dbcr0 &= ~DBCR0_IDM;
1282 }
1283 }
1284#else
1285 if (op.dbg_value)
1286 new_msr |= MSR_SE;
1287 else
1288 new_msr &= ~MSR_SE;
1289#endif
1290 break;
1291 case SIG_DBG_BRANCH_TRACING:
1292#ifdef CONFIG_PPC_ADV_DEBUG_REGS
1293 return -EINVAL;
1294#else
1295 if (op.dbg_value)
1296 new_msr |= MSR_BE;
1297 else
1298 new_msr &= ~MSR_BE;
1299#endif
1300 break;
1301
1302 default:
1303 return -EINVAL;
1304 }
1305 }
1306
1307 /* We wait until here to actually install the values in the
1308 registers so if we fail in the above loop, it will not
1309 affect the contents of these registers. After this point,
1310 failure is a problem, anyway, and it's very unlikely unless
1311 the user is really doing something wrong. */
1312 regs->msr = new_msr;
1313#ifdef CONFIG_PPC_ADV_DEBUG_REGS
1314 current->thread.debug.dbcr0 = new_dbcr0;
1315#endif
1316
1317 if (!access_ok(ctx, sizeof(*ctx)) ||
1318 fault_in_pages_readable((u8 __user *)ctx, sizeof(*ctx)))
1319 return -EFAULT;
1320
1321 /*
1322 * If we get a fault copying the context into the kernel's
1323 * image of the user's registers, we can't just return -EFAULT
1324 * because the user's registers will be corrupted. For instance
1325 * the NIP value may have been updated but not some of the
1326 * other registers. Given that we have done the access_ok
1327 * and successfully read the first and last bytes of the region
1328 * above, this should only happen in an out-of-memory situation
1329 * or if another thread unmaps the region containing the context.
1330 * We kill the task with a SIGSEGV in this situation.
1331 */
1332 if (do_setcontext(ctx, regs, 1)) {
1333 if (show_unhandled_signals)
1334 printk_ratelimited(KERN_INFO "%s[%d]: bad frame in "
1335 "sys_debug_setcontext: %p nip %08lx "
1336 "lr %08lx\n",
1337 current->comm, current->pid,
1338 ctx, regs->nip, regs->link);
1339
1340 force_sig(SIGSEGV);
1341 goto out;
1342 }
1343
1344 /*
1345 * It's not clear whether or why it is desirable to save the
1346 * sigaltstack setting on signal delivery and restore it on
1347 * signal return. But other architectures do this and we have
1348 * always done it up until now so it is probably better not to
1349 * change it. -- paulus
1350 */
1351 restore_altstack(&ctx->uc_stack);
1352
1353 set_thread_flag(TIF_RESTOREALL);
1354 out:
1355 return 0;
1356}
1357#endif
1358
1359/*
1360 * OK, we're invoking a handler
1361 */
1362int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
1363 struct task_struct *tsk)
1364{
1365 struct sigcontext __user *sc;
1366 struct sigframe __user *frame;
1367 struct mcontext __user *tm_mctx = NULL;
1368 unsigned long newsp = 0;
1369 int sigret;
1370 unsigned long tramp;
1371 struct pt_regs *regs = tsk->thread.regs;
1372
1373 BUG_ON(tsk != current);
1374
1375 /* Set up Signal Frame */
1376 frame = get_sigframe(ksig, get_tm_stackpointer(tsk), sizeof(*frame), 1);
1377 if (unlikely(frame == NULL))
1378 goto badframe;
1379 sc = (struct sigcontext __user *) &frame->sctx;
1380
1381#if _NSIG != 64
1382#error "Please adjust handle_signal()"
1383#endif
1384 if (__put_user(to_user_ptr(ksig->ka.sa.sa_handler), &sc->handler)
1385 || __put_user(oldset->sig[0], &sc->oldmask)
1386#ifdef CONFIG_PPC64
1387 || __put_user((oldset->sig[0] >> 32), &sc->_unused[3])
1388#else
1389 || __put_user(oldset->sig[1], &sc->_unused[3])
1390#endif
1391 || __put_user(to_user_ptr(&frame->mctx), &sc->regs)
1392 || __put_user(ksig->sig, &sc->signal))
1393 goto badframe;
1394
1395 if (vdso32_sigtramp && tsk->mm->context.vdso_base) {
1396 sigret = 0;
1397 tramp = tsk->mm->context.vdso_base + vdso32_sigtramp;
1398 } else {
1399 sigret = __NR_sigreturn;
1400 tramp = (unsigned long) frame->mctx.tramp;
1401 }
1402
1403#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1404 tm_mctx = &frame->mctx_transact;
1405 if (MSR_TM_ACTIVE(regs->msr)) {
1406 if (save_tm_user_regs(regs, &frame->mctx, &frame->mctx_transact,
1407 sigret))
1408 goto badframe;
1409 }
1410 else
1411#endif
1412 {
1413 if (save_user_regs(regs, &frame->mctx, tm_mctx, sigret, 1))
1414 goto badframe;
1415 }
1416
1417 regs->link = tramp;
1418
1419 tsk->thread.fp_state.fpscr = 0; /* turn off all fp exceptions */
1420
1421 /* create a stack frame for the caller of the handler */
1422 newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE;
1423 if (put_user(regs->gpr[1], (u32 __user *)newsp))
1424 goto badframe;
1425
1426 regs->gpr[1] = newsp;
1427 regs->gpr[3] = ksig->sig;
1428 regs->gpr[4] = (unsigned long) sc;
1429 regs->nip = (unsigned long) (unsigned long)ksig->ka.sa.sa_handler;
1430 /* enter the signal handler in big-endian mode */
1431 regs->msr &= ~MSR_LE;
1432 return 0;
1433
1434badframe:
1435 if (show_unhandled_signals)
1436 printk_ratelimited(KERN_INFO
1437 "%s[%d]: bad frame in handle_signal32: "
1438 "%p nip %08lx lr %08lx\n",
1439 tsk->comm, tsk->pid,
1440 frame, regs->nip, regs->link);
1441
1442 return 1;
1443}
1444
1445/*
1446 * Do a signal return; undo the signal stack.
1447 */
1448#ifdef CONFIG_PPC64
1449COMPAT_SYSCALL_DEFINE0(sigreturn)
1450#else
1451SYSCALL_DEFINE0(sigreturn)
1452#endif
1453{
1454 struct pt_regs *regs = current_pt_regs();
1455 struct sigframe __user *sf;
1456 struct sigcontext __user *sc;
1457 struct sigcontext sigctx;
1458 struct mcontext __user *sr;
1459 void __user *addr;
1460 sigset_t set;
1461#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1462 struct mcontext __user *mcp, *tm_mcp;
1463 unsigned long msr_hi;
1464#endif
1465
1466 /* Always make any pending restarted system calls return -EINTR */
1467 current->restart_block.fn = do_no_restart_syscall;
1468
1469 sf = (struct sigframe __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE);
1470 sc = &sf->sctx;
1471 addr = sc;
1472 if (copy_from_user(&sigctx, sc, sizeof(sigctx)))
1473 goto badframe;
1474
1475#ifdef CONFIG_PPC64
1476 /*
1477 * Note that PPC32 puts the upper 32 bits of the sigmask in the
1478 * unused part of the signal stackframe
1479 */
1480 set.sig[0] = sigctx.oldmask + ((long)(sigctx._unused[3]) << 32);
1481#else
1482 set.sig[0] = sigctx.oldmask;
1483 set.sig[1] = sigctx._unused[3];
1484#endif
1485 set_current_blocked(&set);
1486
1487#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1488 mcp = (struct mcontext __user *)&sf->mctx;
1489 tm_mcp = (struct mcontext __user *)&sf->mctx_transact;
1490 if (__get_user(msr_hi, &tm_mcp->mc_gregs[PT_MSR]))
1491 goto badframe;
1492 if (MSR_TM_ACTIVE(msr_hi<<32)) {
1493 if (!cpu_has_feature(CPU_FTR_TM))
1494 goto badframe;
1495 if (restore_tm_user_regs(regs, mcp, tm_mcp))
1496 goto badframe;
1497 } else
1498#endif
1499 {
1500 sr = (struct mcontext __user *)from_user_ptr(sigctx.regs);
1501 addr = sr;
1502 if (!access_ok(sr, sizeof(*sr))
1503 || restore_user_regs(regs, sr, 1))
1504 goto badframe;
1505 }
1506
1507 set_thread_flag(TIF_RESTOREALL);
1508 return 0;
1509
1510badframe:
1511 if (show_unhandled_signals)
1512 printk_ratelimited(KERN_INFO
1513 "%s[%d]: bad frame in sys_sigreturn: "
1514 "%p nip %08lx lr %08lx\n",
1515 current->comm, current->pid,
1516 addr, regs->nip, regs->link);
1517
1518 force_sig(SIGSEGV);
1519 return 0;
1520}