Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *  Copyright (C) 1991, 1992  Linus Torvalds
  4 *  Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
  5 *
  6 *  1997-11-28  Modified for POSIX.1b signals by Richard Henderson
  7 *  2000-06-20  Pentium III FXSR, SSE support by Gareth Hughes
  8 *  2000-2002   x86-64 support by Andi Kleen
  9 */
 10
 11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 12
 13#include <linux/sched.h>
 14#include <linux/sched/task_stack.h>
 15#include <linux/mm.h>
 16#include <linux/smp.h>
 17#include <linux/kernel.h>
 
 18#include <linux/errno.h>
 19#include <linux/wait.h>
 20#include <linux/tracehook.h>
 21#include <linux/unistd.h>
 22#include <linux/stddef.h>
 23#include <linux/personality.h>
 24#include <linux/uaccess.h>
 25#include <linux/user-return-notifier.h>
 26#include <linux/uprobes.h>
 27#include <linux/context_tracking.h>
 
 28#include <linux/syscalls.h>
 
 29
 30#include <asm/processor.h>
 31#include <asm/ucontext.h>
 32#include <asm/fpu/internal.h>
 33#include <asm/fpu/signal.h>
 
 34#include <asm/vdso.h>
 35#include <asm/mce.h>
 36#include <asm/sighandling.h>
 37#include <asm/vm86.h>
 38
 39#ifdef CONFIG_X86_64
 40#include <asm/proto.h>
 41#include <asm/ia32_unistd.h>
 42#endif /* CONFIG_X86_64 */
 43
 44#include <asm/syscall.h>
 45#include <asm/syscalls.h>
 46
 47#include <asm/sigframe.h>
 48#include <asm/signal.h>
 
 49
 50#define COPY(x)			do {			\
 51	get_user_ex(regs->x, &sc->x);			\
 52} while (0)
 53
 54#define GET_SEG(seg)		({			\
 55	unsigned short tmp;				\
 56	get_user_ex(tmp, &sc->seg);			\
 57	tmp;						\
 58})
 59
 60#define COPY_SEG(seg)		do {			\
 61	regs->seg = GET_SEG(seg);			\
 62} while (0)
 63
 64#define COPY_SEG_CPL3(seg)	do {			\
 65	regs->seg = GET_SEG(seg) | 3;			\
 66} while (0)
 67
 68#ifdef CONFIG_X86_64
 69/*
 70 * If regs->ss will cause an IRET fault, change it.  Otherwise leave it
 71 * alone.  Using this generally makes no sense unless
 72 * user_64bit_mode(regs) would return true.
 73 */
 74static void force_valid_ss(struct pt_regs *regs)
 75{
 76	u32 ar;
 77	asm volatile ("lar %[old_ss], %[ar]\n\t"
 78		      "jz 1f\n\t"		/* If invalid: */
 79		      "xorl %[ar], %[ar]\n\t"	/* set ar = 0 */
 80		      "1:"
 81		      : [ar] "=r" (ar)
 82		      : [old_ss] "rm" ((u16)regs->ss));
 83
 84	/*
 85	 * For a valid 64-bit user context, we need DPL 3, type
 86	 * read-write data or read-write exp-down data, and S and P
 87	 * set.  We can't use VERW because VERW doesn't check the
 88	 * P bit.
 89	 */
 90	ar &= AR_DPL_MASK | AR_S | AR_P | AR_TYPE_MASK;
 91	if (ar != (AR_DPL3 | AR_S | AR_P | AR_TYPE_RWDATA) &&
 92	    ar != (AR_DPL3 | AR_S | AR_P | AR_TYPE_RWDATA_EXPDOWN))
 93		regs->ss = __USER_DS;
 94}
 95#endif
 96
 97static int restore_sigcontext(struct pt_regs *regs,
 98			      struct sigcontext __user *sc,
 99			      unsigned long uc_flags)
100{
101	unsigned long buf_val;
102	void __user *buf;
103	unsigned int tmpflags;
104	unsigned int err = 0;
105
106	/* Always make any pending restarted system calls return -EINTR */
107	current->restart_block.fn = do_no_restart_syscall;
108
109	get_user_try {
110
111#ifdef CONFIG_X86_32
112		set_user_gs(regs, GET_SEG(gs));
113		COPY_SEG(fs);
114		COPY_SEG(es);
115		COPY_SEG(ds);
116#endif /* CONFIG_X86_32 */
117
118		COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx);
119		COPY(dx); COPY(cx); COPY(ip); COPY(ax);
120
121#ifdef CONFIG_X86_64
122		COPY(r8);
123		COPY(r9);
124		COPY(r10);
125		COPY(r11);
126		COPY(r12);
127		COPY(r13);
128		COPY(r14);
129		COPY(r15);
130#endif /* CONFIG_X86_64 */
131
132		COPY_SEG_CPL3(cs);
133		COPY_SEG_CPL3(ss);
134
135		get_user_ex(tmpflags, &sc->flags);
136		regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
137		regs->orig_ax = -1;		/* disable syscall checks */
138
139		get_user_ex(buf_val, &sc->fpstate);
140		buf = (void __user *)buf_val;
141	} get_user_catch(err);
142
143#ifdef CONFIG_X86_64
144	/*
145	 * Fix up SS if needed for the benefit of old DOSEMU and
146	 * CRIU.
147	 */
148	if (unlikely(!(uc_flags & UC_STRICT_RESTORE_SS) && user_64bit_mode(regs)))
149		force_valid_ss(regs);
150#endif
151
152	err |= fpu__restore_sig(buf, IS_ENABLED(CONFIG_X86_32));
153
154	force_iret();
 
 
 
155
156	return err;
 
 
 
157}
158
159int setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate,
160		     struct pt_regs *regs, unsigned long mask)
161{
162	int err = 0;
163
164	put_user_try {
165
166#ifdef CONFIG_X86_32
167		put_user_ex(get_user_gs(regs), (unsigned int __user *)&sc->gs);
168		put_user_ex(regs->fs, (unsigned int __user *)&sc->fs);
169		put_user_ex(regs->es, (unsigned int __user *)&sc->es);
170		put_user_ex(regs->ds, (unsigned int __user *)&sc->ds);
171#endif /* CONFIG_X86_32 */
172
173		put_user_ex(regs->di, &sc->di);
174		put_user_ex(regs->si, &sc->si);
175		put_user_ex(regs->bp, &sc->bp);
176		put_user_ex(regs->sp, &sc->sp);
177		put_user_ex(regs->bx, &sc->bx);
178		put_user_ex(regs->dx, &sc->dx);
179		put_user_ex(regs->cx, &sc->cx);
180		put_user_ex(regs->ax, &sc->ax);
181#ifdef CONFIG_X86_64
182		put_user_ex(regs->r8, &sc->r8);
183		put_user_ex(regs->r9, &sc->r9);
184		put_user_ex(regs->r10, &sc->r10);
185		put_user_ex(regs->r11, &sc->r11);
186		put_user_ex(regs->r12, &sc->r12);
187		put_user_ex(regs->r13, &sc->r13);
188		put_user_ex(regs->r14, &sc->r14);
189		put_user_ex(regs->r15, &sc->r15);
190#endif /* CONFIG_X86_64 */
191
192		put_user_ex(current->thread.trap_nr, &sc->trapno);
193		put_user_ex(current->thread.error_code, &sc->err);
194		put_user_ex(regs->ip, &sc->ip);
195#ifdef CONFIG_X86_32
196		put_user_ex(regs->cs, (unsigned int __user *)&sc->cs);
197		put_user_ex(regs->flags, &sc->flags);
198		put_user_ex(regs->sp, &sc->sp_at_signal);
199		put_user_ex(regs->ss, (unsigned int __user *)&sc->ss);
200#else /* !CONFIG_X86_32 */
201		put_user_ex(regs->flags, &sc->flags);
202		put_user_ex(regs->cs, &sc->cs);
203		put_user_ex(0, &sc->gs);
204		put_user_ex(0, &sc->fs);
205		put_user_ex(regs->ss, &sc->ss);
206#endif /* CONFIG_X86_32 */
207
208		put_user_ex(fpstate, (unsigned long __user *)&sc->fpstate);
209
210		/* non-iBCS2 extensions.. */
211		put_user_ex(mask, &sc->oldmask);
212		put_user_ex(current->thread.cr2, &sc->cr2);
213	} put_user_catch(err);
214
215	return err;
 
216}
217
218/*
219 * Set up a signal frame.
220 */
221
 
 
 
 
 
222/*
223 * Determine which stack to use..
224 */
225static unsigned long align_sigframe(unsigned long sp)
226{
227#ifdef CONFIG_X86_32
228	/*
229	 * Align the stack pointer according to the i386 ABI,
230	 * i.e. so that on function entry ((sp + 4) & 15) == 0.
231	 */
232	sp = ((sp + 4) & -16ul) - 4;
233#else /* !CONFIG_X86_32 */
234	sp = round_down(sp, 16) - 8;
235#endif
236	return sp;
237}
238
239static void __user *
240get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
241	     void __user **fpstate)
242{
 
 
243	/* Default to using normal stack */
 
 
244	unsigned long math_size = 0;
245	unsigned long sp = regs->sp;
246	unsigned long buf_fx = 0;
247	int onsigstack = on_sig_stack(sp);
248	int ret;
249
250	/* redzone */
251	if (IS_ENABLED(CONFIG_X86_64))
252		sp -= 128;
253
254	/* This is the X/Open sanctioned signal stack switching.  */
255	if (ka->sa.sa_flags & SA_ONSTACK) {
256		if (sas_ss_flags(sp) == 0)
 
 
 
 
 
257			sp = current->sas_ss_sp + current->sas_ss_size;
258	} else if (IS_ENABLED(CONFIG_X86_32) &&
259		   !onsigstack &&
 
 
260		   regs->ss != __USER_DS &&
261		   !(ka->sa.sa_flags & SA_RESTORER) &&
262		   ka->sa.sa_restorer) {
263		/* This is the legacy signal stack switching. */
264		sp = (unsigned long) ka->sa.sa_restorer;
 
265	}
266
267	sp = fpu__alloc_mathframe(sp, IS_ENABLED(CONFIG_X86_32),
268				  &buf_fx, &math_size);
269	*fpstate = (void __user *)sp;
270
271	sp = align_sigframe(sp - frame_size);
 
 
 
 
 
 
 
 
 
272
273	/*
274	 * If we are on the alternate signal stack and would overflow it, don't.
275	 * Return an always-bogus address instead so we will die with SIGSEGV.
276	 */
277	if (onsigstack && !likely(on_sig_stack(sp)))
278		return (void __user *)-1L;
279
280	/* save i387 and extended state */
281	ret = copy_fpstate_to_sigframe(*fpstate, (void __user *)buf_fx, math_size);
282	if (ret < 0)
283		return (void __user *)-1L;
284
285	return (void __user *)sp;
286}
287
288#ifdef CONFIG_X86_32
289static const struct {
290	u16 poplmovl;
291	u32 val;
292	u16 int80;
293} __attribute__((packed)) retcode = {
294	0xb858,		/* popl %eax; movl $..., %eax */
295	__NR_sigreturn,
296	0x80cd,		/* int $0x80 */
297};
298
299static const struct {
300	u8  movl;
301	u32 val;
302	u16 int80;
303	u8  pad;
304} __attribute__((packed)) rt_retcode = {
305	0xb8,		/* movl $..., %eax */
306	__NR_rt_sigreturn,
307	0x80cd,		/* int $0x80 */
308	0
309};
310
311static int
312__setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
313	      struct pt_regs *regs)
314{
315	struct sigframe __user *frame;
316	void __user *restorer;
317	int err = 0;
318	void __user *fpstate = NULL;
319
320	frame = get_sigframe(&ksig->ka, regs, sizeof(*frame), &fpstate);
321
322	if (!access_ok(frame, sizeof(*frame)))
323		return -EFAULT;
324
325	if (__put_user(sig, &frame->sig))
326		return -EFAULT;
 
327
328	if (setup_sigcontext(&frame->sc, fpstate, regs, set->sig[0]))
329		return -EFAULT;
330
331	if (_NSIG_WORDS > 1) {
332		if (__copy_to_user(&frame->extramask, &set->sig[1],
333				   sizeof(frame->extramask)))
334			return -EFAULT;
335	}
336
337	if (current->mm->context.vdso)
338		restorer = current->mm->context.vdso +
339			vdso_image_32.sym___kernel_sigreturn;
340	else
341		restorer = &frame->retcode;
342	if (ksig->ka.sa.sa_flags & SA_RESTORER)
343		restorer = ksig->ka.sa.sa_restorer;
344
345	/* Set up to return from userspace.  */
346	err |= __put_user(restorer, &frame->pretcode);
347
348	/*
349	 * This is popl %eax ; movl $__NR_sigreturn, %eax ; int $0x80
350	 *
351	 * WE DO NOT USE IT ANY MORE! It's only left here for historical
352	 * reasons and because gdb uses it as a signature to notice
353	 * signal handler stack frames.
354	 */
355	err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
356
357	if (err)
358		return -EFAULT;
359
360	/* Set up registers for signal handler */
361	regs->sp = (unsigned long)frame;
362	regs->ip = (unsigned long)ksig->ka.sa.sa_handler;
363	regs->ax = (unsigned long)sig;
364	regs->dx = 0;
365	regs->cx = 0;
366
367	regs->ds = __USER_DS;
368	regs->es = __USER_DS;
369	regs->ss = __USER_DS;
370	regs->cs = __USER_CS;
371
372	return 0;
373}
374
375static int __setup_rt_frame(int sig, struct ksignal *ksig,
376			    sigset_t *set, struct pt_regs *regs)
377{
378	struct rt_sigframe __user *frame;
379	void __user *restorer;
380	int err = 0;
381	void __user *fpstate = NULL;
382
383	frame = get_sigframe(&ksig->ka, regs, sizeof(*frame), &fpstate);
384
385	if (!access_ok(frame, sizeof(*frame)))
386		return -EFAULT;
387
388	put_user_try {
389		put_user_ex(sig, &frame->sig);
390		put_user_ex(&frame->info, &frame->pinfo);
391		put_user_ex(&frame->uc, &frame->puc);
392
393		/* Create the ucontext.  */
394		if (static_cpu_has(X86_FEATURE_XSAVE))
395			put_user_ex(UC_FP_XSTATE, &frame->uc.uc_flags);
396		else
397			put_user_ex(0, &frame->uc.uc_flags);
398		put_user_ex(0, &frame->uc.uc_link);
399		save_altstack_ex(&frame->uc.uc_stack, regs->sp);
400
401		/* Set up to return from userspace.  */
402		restorer = current->mm->context.vdso +
403			vdso_image_32.sym___kernel_rt_sigreturn;
404		if (ksig->ka.sa.sa_flags & SA_RESTORER)
405			restorer = ksig->ka.sa.sa_restorer;
406		put_user_ex(restorer, &frame->pretcode);
407
408		/*
409		 * This is movl $__NR_rt_sigreturn, %ax ; int $0x80
410		 *
411		 * WE DO NOT USE IT ANY MORE! It's only left here for historical
412		 * reasons and because gdb uses it as a signature to notice
413		 * signal handler stack frames.
414		 */
415		put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
416	} put_user_catch(err);
417	
418	err |= copy_siginfo_to_user(&frame->info, &ksig->info);
419	err |= setup_sigcontext(&frame->uc.uc_mcontext, fpstate,
420				regs, set->sig[0]);
421	err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
422
423	if (err)
424		return -EFAULT;
425
426	/* Set up registers for signal handler */
427	regs->sp = (unsigned long)frame;
428	regs->ip = (unsigned long)ksig->ka.sa.sa_handler;
429	regs->ax = (unsigned long)sig;
430	regs->dx = (unsigned long)&frame->info;
431	regs->cx = (unsigned long)&frame->uc;
432
433	regs->ds = __USER_DS;
434	regs->es = __USER_DS;
435	regs->ss = __USER_DS;
436	regs->cs = __USER_CS;
437
438	return 0;
439}
440#else /* !CONFIG_X86_32 */
441static unsigned long frame_uc_flags(struct pt_regs *regs)
442{
443	unsigned long flags;
444
445	if (boot_cpu_has(X86_FEATURE_XSAVE))
446		flags = UC_FP_XSTATE | UC_SIGCONTEXT_SS;
447	else
448		flags = UC_SIGCONTEXT_SS;
449
450	if (likely(user_64bit_mode(regs)))
451		flags |= UC_STRICT_RESTORE_SS;
452
453	return flags;
454}
455
456static int __setup_rt_frame(int sig, struct ksignal *ksig,
457			    sigset_t *set, struct pt_regs *regs)
458{
459	struct rt_sigframe __user *frame;
460	void __user *fp = NULL;
461	unsigned long uc_flags;
462	int err = 0;
463
464	frame = get_sigframe(&ksig->ka, regs, sizeof(struct rt_sigframe), &fp);
465
466	if (!access_ok(frame, sizeof(*frame)))
467		return -EFAULT;
468
469	if (ksig->ka.sa.sa_flags & SA_SIGINFO) {
470		if (copy_siginfo_to_user(&frame->info, &ksig->info))
471			return -EFAULT;
472	}
473
474	uc_flags = frame_uc_flags(regs);
475
476	put_user_try {
477		/* Create the ucontext.  */
478		put_user_ex(uc_flags, &frame->uc.uc_flags);
479		put_user_ex(0, &frame->uc.uc_link);
480		save_altstack_ex(&frame->uc.uc_stack, regs->sp);
481
482		/* Set up to return from userspace.  If provided, use a stub
483		   already in userspace.  */
484		/* x86-64 should always use SA_RESTORER. */
485		if (ksig->ka.sa.sa_flags & SA_RESTORER) {
486			put_user_ex(ksig->ka.sa.sa_restorer, &frame->pretcode);
487		} else {
488			/* could use a vstub here */
489			err |= -EFAULT;
490		}
491	} put_user_catch(err);
492
493	err |= setup_sigcontext(&frame->uc.uc_mcontext, fp, regs, set->sig[0]);
494	err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
495
496	if (err)
497		return -EFAULT;
498
499	/* Set up registers for signal handler */
500	regs->di = sig;
501	/* In case the signal handler was declared without prototypes */
502	regs->ax = 0;
503
504	/* This also works for non SA_SIGINFO handlers because they expect the
505	   next argument after the signal number on the stack. */
506	regs->si = (unsigned long)&frame->info;
507	regs->dx = (unsigned long)&frame->uc;
508	regs->ip = (unsigned long) ksig->ka.sa.sa_handler;
509
510	regs->sp = (unsigned long)frame;
511
512	/*
513	 * Set up the CS and SS registers to run signal handlers in
514	 * 64-bit mode, even if the handler happens to be interrupting
515	 * 32-bit or 16-bit code.
516	 *
517	 * SS is subtle.  In 64-bit mode, we don't need any particular
518	 * SS descriptor, but we do need SS to be valid.  It's possible
519	 * that the old SS is entirely bogus -- this can happen if the
520	 * signal we're trying to deliver is #GP or #SS caused by a bad
521	 * SS value.  We also have a compatbility issue here: DOSEMU
522	 * relies on the contents of the SS register indicating the
523	 * SS value at the time of the signal, even though that code in
524	 * DOSEMU predates sigreturn's ability to restore SS.  (DOSEMU
525	 * avoids relying on sigreturn to restore SS; instead it uses
526	 * a trampoline.)  So we do our best: if the old SS was valid,
527	 * we keep it.  Otherwise we replace it.
528	 */
529	regs->cs = __USER_CS;
530
531	if (unlikely(regs->ss != __USER_DS))
532		force_valid_ss(regs);
533
534	return 0;
535}
536#endif /* CONFIG_X86_32 */
537
538static int x32_setup_rt_frame(struct ksignal *ksig,
539			      compat_sigset_t *set,
540			      struct pt_regs *regs)
541{
542#ifdef CONFIG_X86_X32_ABI
543	struct rt_sigframe_x32 __user *frame;
544	unsigned long uc_flags;
545	void __user *restorer;
546	int err = 0;
547	void __user *fpstate = NULL;
548
549	frame = get_sigframe(&ksig->ka, regs, sizeof(*frame), &fpstate);
550
551	if (!access_ok(frame, sizeof(*frame)))
552		return -EFAULT;
553
554	if (ksig->ka.sa.sa_flags & SA_SIGINFO) {
555		if (__copy_siginfo_to_user32(&frame->info, &ksig->info, true))
556			return -EFAULT;
557	}
558
559	uc_flags = frame_uc_flags(regs);
560
561	put_user_try {
562		/* Create the ucontext.  */
563		put_user_ex(uc_flags, &frame->uc.uc_flags);
564		put_user_ex(0, &frame->uc.uc_link);
565		compat_save_altstack_ex(&frame->uc.uc_stack, regs->sp);
566		put_user_ex(0, &frame->uc.uc__pad0);
567
568		if (ksig->ka.sa.sa_flags & SA_RESTORER) {
569			restorer = ksig->ka.sa.sa_restorer;
570		} else {
571			/* could use a vstub here */
572			restorer = NULL;
573			err |= -EFAULT;
574		}
575		put_user_ex(restorer, (unsigned long __user *)&frame->pretcode);
576	} put_user_catch(err);
577
578	err |= setup_sigcontext(&frame->uc.uc_mcontext, fpstate,
579				regs, set->sig[0]);
580	err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
581
582	if (err)
583		return -EFAULT;
584
585	/* Set up registers for signal handler */
586	regs->sp = (unsigned long) frame;
587	regs->ip = (unsigned long) ksig->ka.sa.sa_handler;
588
589	/* We use the x32 calling convention here... */
590	regs->di = ksig->sig;
591	regs->si = (unsigned long) &frame->info;
592	regs->dx = (unsigned long) &frame->uc;
593
594	loadsegment(ds, __USER_DS);
595	loadsegment(es, __USER_DS);
596
597	regs->cs = __USER_CS;
598	regs->ss = __USER_DS;
599#endif	/* CONFIG_X86_X32_ABI */
600
601	return 0;
602}
603
604/*
605 * Do a signal return; undo the signal stack.
 
 
 
606 */
607#ifdef CONFIG_X86_32
608SYSCALL_DEFINE0(sigreturn)
609{
610	struct pt_regs *regs = current_pt_regs();
611	struct sigframe __user *frame;
612	sigset_t set;
613
614	frame = (struct sigframe __user *)(regs->sp - 8);
615
616	if (!access_ok(frame, sizeof(*frame)))
617		goto badframe;
618	if (__get_user(set.sig[0], &frame->sc.oldmask) || (_NSIG_WORDS > 1
619		&& __copy_from_user(&set.sig[1], &frame->extramask,
620				    sizeof(frame->extramask))))
621		goto badframe;
622
623	set_current_blocked(&set);
624
625	/*
626	 * x86_32 has no uc_flags bits relevant to restore_sigcontext.
627	 * Save a few cycles by skipping the __get_user.
628	 */
629	if (restore_sigcontext(regs, &frame->sc, 0))
630		goto badframe;
631	return regs->ax;
632
633badframe:
634	signal_fault(regs, frame, "sigreturn");
 
 
 
 
 
 
 
 
 
 
 
 
 
635
636	return 0;
637}
638#endif /* CONFIG_X86_32 */
639
640SYSCALL_DEFINE0(rt_sigreturn)
641{
642	struct pt_regs *regs = current_pt_regs();
643	struct rt_sigframe __user *frame;
644	sigset_t set;
645	unsigned long uc_flags;
646
647	frame = (struct rt_sigframe __user *)(regs->sp - sizeof(long));
648	if (!access_ok(frame, sizeof(*frame)))
649		goto badframe;
650	if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
651		goto badframe;
652	if (__get_user(uc_flags, &frame->uc.uc_flags))
653		goto badframe;
654
655	set_current_blocked(&set);
656
657	if (restore_sigcontext(regs, &frame->uc.uc_mcontext, uc_flags))
658		goto badframe;
659
660	if (restore_altstack(&frame->uc.uc_stack))
661		goto badframe;
662
663	return regs->ax;
664
665badframe:
666	signal_fault(regs, frame, "rt_sigreturn");
667	return 0;
668}
 
669
670static inline int is_ia32_compat_frame(struct ksignal *ksig)
671{
672	return IS_ENABLED(CONFIG_IA32_EMULATION) &&
673		ksig->ka.sa.sa_flags & SA_IA32_ABI;
674}
675
676static inline int is_ia32_frame(struct ksignal *ksig)
677{
678	return IS_ENABLED(CONFIG_X86_32) || is_ia32_compat_frame(ksig);
679}
680
681static inline int is_x32_frame(struct ksignal *ksig)
682{
683	return IS_ENABLED(CONFIG_X86_X32_ABI) &&
684		ksig->ka.sa.sa_flags & SA_X32_ABI;
685}
686
687static int
688setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
689{
690	int usig = ksig->sig;
691	sigset_t *set = sigmask_to_save();
692	compat_sigset_t *cset = (compat_sigset_t *) set;
693
694	/* Perform fixup for the pre-signal frame. */
695	rseq_signal_deliver(ksig, regs);
696
697	/* Set up the stack frame */
698	if (is_ia32_frame(ksig)) {
699		if (ksig->ka.sa.sa_flags & SA_SIGINFO)
700			return ia32_setup_rt_frame(usig, ksig, cset, regs);
701		else
702			return ia32_setup_frame(usig, ksig, cset, regs);
703	} else if (is_x32_frame(ksig)) {
704		return x32_setup_rt_frame(ksig, cset, regs);
705	} else {
706		return __setup_rt_frame(ksig->sig, ksig, set, regs);
707	}
708}
709
710static void
711handle_signal(struct ksignal *ksig, struct pt_regs *regs)
712{
713	bool stepping, failed;
714	struct fpu *fpu = &current->thread.fpu;
715
716	if (v8086_mode(regs))
717		save_v86_state((struct kernel_vm86_regs *) regs, VM86_SIGNAL);
718
719	/* Are we from a system call? */
720	if (syscall_get_nr(current, regs) >= 0) {
721		/* If so, check system call restarting.. */
722		switch (syscall_get_error(current, regs)) {
723		case -ERESTART_RESTARTBLOCK:
724		case -ERESTARTNOHAND:
725			regs->ax = -EINTR;
726			break;
727
728		case -ERESTARTSYS:
729			if (!(ksig->ka.sa.sa_flags & SA_RESTART)) {
730				regs->ax = -EINTR;
731				break;
732			}
733		/* fallthrough */
734		case -ERESTARTNOINTR:
735			regs->ax = regs->orig_ax;
736			regs->ip -= 2;
737			break;
738		}
739	}
740
741	/*
742	 * If TF is set due to a debugger (TIF_FORCED_TF), clear TF now
743	 * so that register information in the sigcontext is correct and
744	 * then notify the tracer before entering the signal handler.
745	 */
746	stepping = test_thread_flag(TIF_SINGLESTEP);
747	if (stepping)
748		user_disable_single_step(current);
749
750	failed = (setup_rt_frame(ksig, regs) < 0);
751	if (!failed) {
752		/*
753		 * Clear the direction flag as per the ABI for function entry.
754		 *
755		 * Clear RF when entering the signal handler, because
756		 * it might disable possible debug exception from the
757		 * signal handler.
758		 *
759		 * Clear TF for the case when it wasn't set by debugger to
760		 * avoid the recursive send_sigtrap() in SIGTRAP handler.
761		 */
762		regs->flags &= ~(X86_EFLAGS_DF|X86_EFLAGS_RF|X86_EFLAGS_TF);
763		/*
764		 * Ensure the signal handler starts with the new fpu state.
765		 */
766		fpu__clear(fpu);
767	}
768	signal_setup_done(failed, ksig, stepping);
769}
770
771static inline unsigned long get_nr_restart_syscall(const struct pt_regs *regs)
772{
773	/*
774	 * This function is fundamentally broken as currently
775	 * implemented.
776	 *
777	 * The idea is that we want to trigger a call to the
778	 * restart_block() syscall and that we want in_ia32_syscall(),
779	 * in_x32_syscall(), etc. to match whatever they were in the
780	 * syscall being restarted.  We assume that the syscall
781	 * instruction at (regs->ip - 2) matches whatever syscall
782	 * instruction we used to enter in the first place.
783	 *
784	 * The problem is that we can get here when ptrace pokes
785	 * syscall-like values into regs even if we're not in a syscall
786	 * at all.
787	 *
788	 * For now, we maintain historical behavior and guess based on
789	 * stored state.  We could do better by saving the actual
790	 * syscall arch in restart_block or (with caveats on x32) by
791	 * checking if regs->ip points to 'int $0x80'.  The current
792	 * behavior is incorrect if a tracer has a different bitness
793	 * than the tracee.
794	 */
795#ifdef CONFIG_IA32_EMULATION
796	if (current_thread_info()->status & (TS_COMPAT|TS_I386_REGS_POKED))
797		return __NR_ia32_restart_syscall;
798#endif
799#ifdef CONFIG_X86_X32_ABI
800	return __NR_restart_syscall | (regs->orig_ax & __X32_SYSCALL_BIT);
801#else
802	return __NR_restart_syscall;
803#endif
804}
805
806/*
807 * Note that 'init' is a special process: it doesn't get signals it doesn't
808 * want to handle. Thus you cannot kill init even with a SIGKILL even by
809 * mistake.
810 */
811void do_signal(struct pt_regs *regs)
812{
813	struct ksignal ksig;
814
815	if (get_signal(&ksig)) {
816		/* Whee! Actually deliver the signal.  */
817		handle_signal(&ksig, regs);
818		return;
819	}
820
821	/* Did we come from a system call? */
822	if (syscall_get_nr(current, regs) >= 0) {
823		/* Restart the system call - no handlers present */
824		switch (syscall_get_error(current, regs)) {
825		case -ERESTARTNOHAND:
826		case -ERESTARTSYS:
827		case -ERESTARTNOINTR:
828			regs->ax = regs->orig_ax;
829			regs->ip -= 2;
830			break;
831
832		case -ERESTART_RESTARTBLOCK:
833			regs->ax = get_nr_restart_syscall(regs);
834			regs->ip -= 2;
835			break;
836		}
837	}
838
839	/*
840	 * If there's no signal to deliver, we just put the saved sigmask
841	 * back.
842	 */
843	restore_saved_sigmask();
844}
845
846void signal_fault(struct pt_regs *regs, void __user *frame, char *where)
847{
848	struct task_struct *me = current;
849
850	if (show_unhandled_signals && printk_ratelimit()) {
851		printk("%s"
852		       "%s[%d] bad frame in %s frame:%p ip:%lx sp:%lx orax:%lx",
853		       task_pid_nr(current) > 1 ? KERN_INFO : KERN_EMERG,
854		       me->comm, me->pid, where, frame,
855		       regs->ip, regs->sp, regs->orig_ax);
856		print_vma_addr(KERN_CONT " in ", regs->ip);
857		pr_cont("\n");
858	}
859
860	force_sig(SIGSEGV);
861}
862
863#ifdef CONFIG_X86_X32_ABI
864asmlinkage long sys32_x32_rt_sigreturn(void)
 
 
 
 
 
 
865{
866	struct pt_regs *regs = current_pt_regs();
867	struct rt_sigframe_x32 __user *frame;
868	sigset_t set;
869	unsigned long uc_flags;
870
871	frame = (struct rt_sigframe_x32 __user *)(regs->sp - 8);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
872
873	if (!access_ok(frame, sizeof(*frame)))
874		goto badframe;
875	if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
876		goto badframe;
877	if (__get_user(uc_flags, &frame->uc.uc_flags))
878		goto badframe;
879
880	set_current_blocked(&set);
 
881
882	if (restore_sigcontext(regs, &frame->uc.uc_mcontext, uc_flags))
883		goto badframe;
 
884
885	if (compat_restore_altstack(&frame->uc.uc_stack))
886		goto badframe;
887
888	return regs->ax;
 
 
889
890badframe:
891	signal_fault(regs, frame, "x32 rt_sigreturn");
892	return 0;
893}
894#endif
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *  Copyright (C) 1991, 1992  Linus Torvalds
  4 *  Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
  5 *
  6 *  1997-11-28  Modified for POSIX.1b signals by Richard Henderson
  7 *  2000-06-20  Pentium III FXSR, SSE support by Gareth Hughes
  8 *  2000-2002   x86-64 support by Andi Kleen
  9 */
 10
 11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 12
 13#include <linux/sched.h>
 14#include <linux/sched/task_stack.h>
 15#include <linux/mm.h>
 16#include <linux/smp.h>
 17#include <linux/kernel.h>
 18#include <linux/kstrtox.h>
 19#include <linux/errno.h>
 20#include <linux/wait.h>
 
 21#include <linux/unistd.h>
 22#include <linux/stddef.h>
 23#include <linux/personality.h>
 24#include <linux/uaccess.h>
 25#include <linux/user-return-notifier.h>
 26#include <linux/uprobes.h>
 27#include <linux/context_tracking.h>
 28#include <linux/entry-common.h>
 29#include <linux/syscalls.h>
 30#include <linux/rseq.h>
 31
 32#include <asm/processor.h>
 33#include <asm/ucontext.h>
 
 34#include <asm/fpu/signal.h>
 35#include <asm/fpu/xstate.h>
 36#include <asm/vdso.h>
 37#include <asm/mce.h>
 38#include <asm/sighandling.h>
 39#include <asm/vm86.h>
 40
 
 
 
 
 
 41#include <asm/syscall.h>
 
 
 42#include <asm/sigframe.h>
 43#include <asm/signal.h>
 44#include <asm/shstk.h>
 45
 46static inline int is_ia32_compat_frame(struct ksignal *ksig)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 47{
 48	return IS_ENABLED(CONFIG_IA32_EMULATION) &&
 49		ksig->ka.sa.sa_flags & SA_IA32_ABI;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 50}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 51
 52static inline int is_ia32_frame(struct ksignal *ksig)
 53{
 54	return IS_ENABLED(CONFIG_X86_32) || is_ia32_compat_frame(ksig);
 55}
 56
 57static inline int is_x32_frame(struct ksignal *ksig)
 58{
 59	return IS_ENABLED(CONFIG_X86_X32_ABI) &&
 60		ksig->ka.sa.sa_flags & SA_X32_ABI;
 61}
 62
 63/*
 64 * Enable all pkeys temporarily, so as to ensure that both the current
 65 * execution stack as well as the alternate signal stack are writeable.
 66 * The application can use any of the available pkeys to protect the
 67 * alternate signal stack, and we don't know which one it is, so enable
 68 * all. The PKRU register will be reset to init_pkru later in the flow,
 69 * in fpu__clear_user_states(), and it is the application's responsibility
 70 * to enable the appropriate pkey as the first step in the signal handler
 71 * so that the handler does not segfault.
 72 */
 73static inline u32 sig_prepare_pkru(void)
 74{
 75	u32 orig_pkru = read_pkru();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 76
 77	write_pkru(0);
 78	return orig_pkru;
 79}
 80
 81/*
 82 * Set up a signal frame.
 83 */
 84
 85/* x86 ABI requires 16-byte alignment */
 86#define FRAME_ALIGNMENT	16UL
 87
 88#define MAX_FRAME_PADDING	(FRAME_ALIGNMENT - 1)
 89
 90/*
 91 * Determine which stack to use..
 92 */
 93void __user *
 94get_sigframe(struct ksignal *ksig, struct pt_regs *regs, size_t frame_size,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 95	     void __user **fpstate)
 96{
 97	struct k_sigaction *ka = &ksig->ka;
 98	int ia32_frame = is_ia32_frame(ksig);
 99	/* Default to using normal stack */
100	bool nested_altstack = on_sig_stack(regs->sp);
101	bool entering_altstack = false;
102	unsigned long math_size = 0;
103	unsigned long sp = regs->sp;
104	unsigned long buf_fx = 0;
105	u32 pkru;
 
106
107	/* redzone */
108	if (!ia32_frame)
109		sp -= 128;
110
111	/* This is the X/Open sanctioned signal stack switching.  */
112	if (ka->sa.sa_flags & SA_ONSTACK) {
113		/*
114		 * This checks nested_altstack via sas_ss_flags(). Sensible
115		 * programs use SS_AUTODISARM, which disables that check, and
116		 * programs that don't use SS_AUTODISARM get compatible.
117		 */
118		if (sas_ss_flags(sp) == 0) {
119			sp = current->sas_ss_sp + current->sas_ss_size;
120			entering_altstack = true;
121		}
122	} else if (ia32_frame &&
123		   !nested_altstack &&
124		   regs->ss != __USER_DS &&
125		   !(ka->sa.sa_flags & SA_RESTORER) &&
126		   ka->sa.sa_restorer) {
127		/* This is the legacy signal stack switching. */
128		sp = (unsigned long) ka->sa.sa_restorer;
129		entering_altstack = true;
130	}
131
132	sp = fpu__alloc_mathframe(sp, ia32_frame, &buf_fx, &math_size);
 
133	*fpstate = (void __user *)sp;
134
135	sp -= frame_size;
136
137	if (ia32_frame)
138		/*
139		 * Align the stack pointer according to the i386 ABI,
140		 * i.e. so that on function entry ((sp + 4) & 15) == 0.
141		 */
142		sp = ((sp + 4) & -FRAME_ALIGNMENT) - 4;
143	else
144		sp = round_down(sp, FRAME_ALIGNMENT) - 8;
145
146	/*
147	 * If we are on the alternate signal stack and would overflow it, don't.
148	 * Return an always-bogus address instead so we will die with SIGSEGV.
149	 */
150	if (unlikely((nested_altstack || entering_altstack) &&
151		     !__on_sig_stack(sp))) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
152
153		if (show_unhandled_signals && printk_ratelimit())
154			pr_info("%s[%d] overflowed sigaltstack\n",
155				current->comm, task_pid_nr(current));
156
157		return (void __user *)-1L;
 
 
 
 
 
 
158	}
159
160	/* Update PKRU to enable access to the alternate signal stack. */
161	pkru = sig_prepare_pkru();
162	/* save i387 and extended state */
163	if (!copy_fpstate_to_sigframe(*fpstate, (void __user *)buf_fx, math_size, pkru)) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
164		/*
165		 * Restore PKRU to the original, user-defined value; disable
166		 * extra pkeys enabled for the alternate signal stack, if any.
 
 
 
167		 */
168		write_pkru(pkru);
169		return (void __user *)-1L;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
170	}
171
172	return (void __user *)sp;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
173}
174
175/*
176 * There are four different struct types for signal frame: sigframe_ia32,
177 * rt_sigframe_ia32, rt_sigframe_x32, and rt_sigframe. Use the worst case
178 * -- the largest size. It means the size for 64-bit apps is a bit more
179 * than needed, but this keeps the code simple.
180 */
181#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
182# define MAX_FRAME_SIGINFO_UCTXT_SIZE	sizeof(struct sigframe_ia32)
183#else
184# define MAX_FRAME_SIGINFO_UCTXT_SIZE	sizeof(struct rt_sigframe)
185#endif
 
 
 
 
 
 
 
 
 
 
 
 
186
187/*
188 * The FP state frame contains an XSAVE buffer which must be 64-byte aligned.
189 * If a signal frame starts at an unaligned address, extra space is required.
190 * This is the max alignment padding, conservatively.
191 */
192#define MAX_XSAVE_PADDING	63UL
 
193
194/*
195 * The frame data is composed of the following areas and laid out as:
196 *
197 * -------------------------
198 * | alignment padding     |
199 * -------------------------
200 * | (f)xsave frame        |
201 * -------------------------
202 * | fsave header          |
203 * -------------------------
204 * | alignment padding     |
205 * -------------------------
206 * | siginfo + ucontext    |
207 * -------------------------
208 */
209
210/* max_frame_size tells userspace the worst case signal stack size. */
211static unsigned long __ro_after_init max_frame_size;
212static unsigned int __ro_after_init fpu_default_state_size;
213
214static int __init init_sigframe_size(void)
215{
216	fpu_default_state_size = fpu__get_fpstate_size();
 
 
 
 
 
 
 
 
 
 
 
217
218	max_frame_size = MAX_FRAME_SIGINFO_UCTXT_SIZE + MAX_FRAME_PADDING;
219
220	max_frame_size += fpu_default_state_size + MAX_XSAVE_PADDING;
 
221
222	/* Userspace expects an aligned size. */
223	max_frame_size = round_up(max_frame_size, FRAME_ALIGNMENT);
224
225	pr_info("max sigframe size: %lu\n", max_frame_size);
 
 
 
226	return 0;
227}
228early_initcall(init_sigframe_size);
229
230unsigned long get_sigframe_size(void)
 
 
 
 
 
 
 
 
 
 
 
231{
232	return max_frame_size;
 
233}
234
235static int
236setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
237{
 
 
 
 
238	/* Perform fixup for the pre-signal frame. */
239	rseq_signal_deliver(ksig, regs);
240
241	/* Set up the stack frame */
242	if (is_ia32_frame(ksig)) {
243		if (ksig->ka.sa.sa_flags & SA_SIGINFO)
244			return ia32_setup_rt_frame(ksig, regs);
245		else
246			return ia32_setup_frame(ksig, regs);
247	} else if (is_x32_frame(ksig)) {
248		return x32_setup_rt_frame(ksig, regs);
249	} else {
250		return x64_setup_rt_frame(ksig, regs);
251	}
252}
253
254static void
255handle_signal(struct ksignal *ksig, struct pt_regs *regs)
256{
257	bool stepping, failed;
258	struct fpu *fpu = &current->thread.fpu;
259
260	if (v8086_mode(regs))
261		save_v86_state((struct kernel_vm86_regs *) regs, VM86_SIGNAL);
262
263	/* Are we from a system call? */
264	if (syscall_get_nr(current, regs) != -1) {
265		/* If so, check system call restarting.. */
266		switch (syscall_get_error(current, regs)) {
267		case -ERESTART_RESTARTBLOCK:
268		case -ERESTARTNOHAND:
269			regs->ax = -EINTR;
270			break;
271
272		case -ERESTARTSYS:
273			if (!(ksig->ka.sa.sa_flags & SA_RESTART)) {
274				regs->ax = -EINTR;
275				break;
276			}
277			fallthrough;
278		case -ERESTARTNOINTR:
279			regs->ax = regs->orig_ax;
280			regs->ip -= 2;
281			break;
282		}
283	}
284
285	/*
286	 * If TF is set due to a debugger (TIF_FORCED_TF), clear TF now
287	 * so that register information in the sigcontext is correct and
288	 * then notify the tracer before entering the signal handler.
289	 */
290	stepping = test_thread_flag(TIF_SINGLESTEP);
291	if (stepping)
292		user_disable_single_step(current);
293
294	failed = (setup_rt_frame(ksig, regs) < 0);
295	if (!failed) {
296		/*
297		 * Clear the direction flag as per the ABI for function entry.
298		 *
299		 * Clear RF when entering the signal handler, because
300		 * it might disable possible debug exception from the
301		 * signal handler.
302		 *
303		 * Clear TF for the case when it wasn't set by debugger to
304		 * avoid the recursive send_sigtrap() in SIGTRAP handler.
305		 */
306		regs->flags &= ~(X86_EFLAGS_DF|X86_EFLAGS_RF|X86_EFLAGS_TF);
307		/*
308		 * Ensure the signal handler starts with the new fpu state.
309		 */
310		fpu__clear_user_states(fpu);
311	}
312	signal_setup_done(failed, ksig, stepping);
313}
314
315static inline unsigned long get_nr_restart_syscall(const struct pt_regs *regs)
316{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
317#ifdef CONFIG_IA32_EMULATION
318	if (current->restart_block.arch_data & TS_COMPAT)
319		return __NR_ia32_restart_syscall;
320#endif
321#ifdef CONFIG_X86_X32_ABI
322	return __NR_restart_syscall | (regs->orig_ax & __X32_SYSCALL_BIT);
323#else
324	return __NR_restart_syscall;
325#endif
326}
327
328/*
329 * Note that 'init' is a special process: it doesn't get signals it doesn't
330 * want to handle. Thus you cannot kill init even with a SIGKILL even by
331 * mistake.
332 */
333void arch_do_signal_or_restart(struct pt_regs *regs)
334{
335	struct ksignal ksig;
336
337	if (get_signal(&ksig)) {
338		/* Whee! Actually deliver the signal.  */
339		handle_signal(&ksig, regs);
340		return;
341	}
342
343	/* Did we come from a system call? */
344	if (syscall_get_nr(current, regs) != -1) {
345		/* Restart the system call - no handlers present */
346		switch (syscall_get_error(current, regs)) {
347		case -ERESTARTNOHAND:
348		case -ERESTARTSYS:
349		case -ERESTARTNOINTR:
350			regs->ax = regs->orig_ax;
351			regs->ip -= 2;
352			break;
353
354		case -ERESTART_RESTARTBLOCK:
355			regs->ax = get_nr_restart_syscall(regs);
356			regs->ip -= 2;
357			break;
358		}
359	}
360
361	/*
362	 * If there's no signal to deliver, we just put the saved sigmask
363	 * back.
364	 */
365	restore_saved_sigmask();
366}
367
368void signal_fault(struct pt_regs *regs, void __user *frame, char *where)
369{
370	struct task_struct *me = current;
371
372	if (show_unhandled_signals && printk_ratelimit()) {
373		printk("%s"
374		       "%s[%d] bad frame in %s frame:%p ip:%lx sp:%lx orax:%lx",
375		       task_pid_nr(current) > 1 ? KERN_INFO : KERN_EMERG,
376		       me->comm, me->pid, where, frame,
377		       regs->ip, regs->sp, regs->orig_ax);
378		print_vma_addr(KERN_CONT " in ", regs->ip);
379		pr_cont("\n");
380	}
381
382	force_sig(SIGSEGV);
383}
384
385#ifdef CONFIG_DYNAMIC_SIGFRAME
386#ifdef CONFIG_STRICT_SIGALTSTACK_SIZE
387static bool strict_sigaltstack_size __ro_after_init = true;
388#else
389static bool strict_sigaltstack_size __ro_after_init = false;
390#endif
391
392static int __init strict_sas_size(char *arg)
393{
394	return kstrtobool(arg, &strict_sigaltstack_size) == 0;
395}
396__setup("strict_sas_size", strict_sas_size);
 
397
398/*
399 * MINSIGSTKSZ is 2048 and can't be changed despite the fact that AVX512
400 * exceeds that size already. As such programs might never use the
401 * sigaltstack they just continued to work. While always checking against
402 * the real size would be correct, this might be considered a regression.
403 *
404 * Therefore avoid the sanity check, unless enforced by kernel
405 * configuration or command line option.
406 *
407 * When dynamic FPU features are supported, the check is also enforced when
408 * the task has permissions to use dynamic features. Tasks which have no
409 * permission are checked against the size of the non-dynamic feature set
410 * if strict checking is enabled. This avoids forcing all tasks on the
411 * system to allocate large sigaltstacks even if they are never going
412 * to use a dynamic feature. As this is serialized via sighand::siglock
413 * any permission request for a dynamic feature either happened already
414 * or will see the newly install sigaltstack size in the permission checks.
415 */
416bool sigaltstack_size_valid(size_t ss_size)
417{
418	unsigned long fsize = max_frame_size - fpu_default_state_size;
419	u64 mask;
420
421	lockdep_assert_held(&current->sighand->siglock);
 
 
 
 
 
422
423	if (!fpu_state_size_dynamic() && !strict_sigaltstack_size)
424		return true;
425
426	fsize += current->group_leader->thread.fpu.perm.__user_state_size;
427	if (likely(ss_size > fsize))
428		return true;
429
430	if (strict_sigaltstack_size)
431		return ss_size > fsize;
432
433	mask = current->group_leader->thread.fpu.perm.__state_perm;
434	if (mask & XFEATURE_MASK_USER_DYNAMIC)
435		return ss_size > fsize;
436
437	return true;
 
 
438}
439#endif /* CONFIG_DYNAMIC_SIGFRAME */