Linux Audio

Check our new training course

Embedded Linux training

Mar 10-20, 2025, special US time zones
Register
Loading...
v4.17
 
  1/*
  2 * Based on arch/arm/kernel/signal.c
  3 *
  4 * Copyright (C) 1995-2009 Russell King
  5 * Copyright (C) 2012 ARM Ltd.
  6 *
  7 * This program is free software; you can redistribute it and/or modify
  8 * it under the terms of the GNU General Public License version 2 as
  9 * published by the Free Software Foundation.
 10 *
 11 * This program is distributed in the hope that it will be useful,
 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 14 * GNU General Public License for more details.
 15 *
 16 * You should have received a copy of the GNU General Public License
 17 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 18 */
 19
 
 20#include <linux/compat.h>
 21#include <linux/errno.h>
 22#include <linux/kernel.h>
 23#include <linux/signal.h>
 24#include <linux/personality.h>
 25#include <linux/freezer.h>
 26#include <linux/stddef.h>
 27#include <linux/uaccess.h>
 28#include <linux/sizes.h>
 29#include <linux/string.h>
 30#include <linux/tracehook.h>
 31#include <linux/ratelimit.h>
 32#include <linux/syscalls.h>
 33
 34#include <asm/daifflags.h>
 35#include <asm/debug-monitors.h>
 36#include <asm/elf.h>
 
 37#include <asm/cacheflush.h>
 38#include <asm/ucontext.h>
 39#include <asm/unistd.h>
 40#include <asm/fpsimd.h>
 41#include <asm/ptrace.h>
 
 42#include <asm/signal32.h>
 43#include <asm/traps.h>
 44#include <asm/vdso.h>
 45
 46/*
 47 * Do a signal return; undo the signal stack. These are aligned to 128-bit.
 48 */
 49struct rt_sigframe {
 50	struct siginfo info;
 51	struct ucontext uc;
 52};
 53
 54struct frame_record {
 55	u64 fp;
 56	u64 lr;
 57};
 58
 59struct rt_sigframe_user_layout {
 60	struct rt_sigframe __user *sigframe;
 61	struct frame_record __user *next_frame;
 62
 63	unsigned long size;	/* size of allocated sigframe data */
 64	unsigned long limit;	/* largest allowed size */
 65
 66	unsigned long fpsimd_offset;
 67	unsigned long esr_offset;
 68	unsigned long sve_offset;
 
 
 
 69	unsigned long extra_offset;
 70	unsigned long end_offset;
 71};
 72
 73#define BASE_SIGFRAME_SIZE round_up(sizeof(struct rt_sigframe), 16)
 74#define TERMINATOR_SIZE round_up(sizeof(struct _aarch64_ctx), 16)
 75#define EXTRA_CONTEXT_SIZE round_up(sizeof(struct extra_context), 16)
 76
 77static void init_user_layout(struct rt_sigframe_user_layout *user)
 78{
 79	const size_t reserved_size =
 80		sizeof(user->sigframe->uc.uc_mcontext.__reserved);
 81
 82	memset(user, 0, sizeof(*user));
 83	user->size = offsetof(struct rt_sigframe, uc.uc_mcontext.__reserved);
 84
 85	user->limit = user->size + reserved_size;
 86
 87	user->limit -= TERMINATOR_SIZE;
 88	user->limit -= EXTRA_CONTEXT_SIZE;
 89	/* Reserve space for extension and terminator ^ */
 90}
 91
 92static size_t sigframe_size(struct rt_sigframe_user_layout const *user)
 93{
 94	return round_up(max(user->size, sizeof(struct rt_sigframe)), 16);
 95}
 96
 97/*
 98 * Sanity limit on the approximate maximum size of signal frame we'll
 99 * try to generate.  Stack alignment padding and the frame record are
100 * not taken into account.  This limit is not a guarantee and is
101 * NOT ABI.
102 */
103#define SIGFRAME_MAXSZ SZ_64K
104
105static int __sigframe_alloc(struct rt_sigframe_user_layout *user,
106			    unsigned long *offset, size_t size, bool extend)
107{
108	size_t padded_size = round_up(size, 16);
109
110	if (padded_size > user->limit - user->size &&
111	    !user->extra_offset &&
112	    extend) {
113		int ret;
114
115		user->limit += EXTRA_CONTEXT_SIZE;
116		ret = __sigframe_alloc(user, &user->extra_offset,
117				       sizeof(struct extra_context), false);
118		if (ret) {
119			user->limit -= EXTRA_CONTEXT_SIZE;
120			return ret;
121		}
122
123		/* Reserve space for the __reserved[] terminator */
124		user->size += TERMINATOR_SIZE;
125
126		/*
127		 * Allow expansion up to SIGFRAME_MAXSZ, ensuring space for
128		 * the terminator:
129		 */
130		user->limit = SIGFRAME_MAXSZ - TERMINATOR_SIZE;
131	}
132
133	/* Still not enough space?  Bad luck! */
134	if (padded_size > user->limit - user->size)
135		return -ENOMEM;
136
137	*offset = user->size;
138	user->size += padded_size;
139
140	return 0;
141}
142
143/*
144 * Allocate space for an optional record of <size> bytes in the user
145 * signal frame.  The offset from the signal frame base address to the
146 * allocated block is assigned to *offset.
147 */
148static int sigframe_alloc(struct rt_sigframe_user_layout *user,
149			  unsigned long *offset, size_t size)
150{
151	return __sigframe_alloc(user, offset, size, true);
152}
153
154/* Allocate the null terminator record and prevent further allocations */
155static int sigframe_alloc_end(struct rt_sigframe_user_layout *user)
156{
157	int ret;
158
159	/* Un-reserve the space reserved for the terminator: */
160	user->limit += TERMINATOR_SIZE;
161
162	ret = sigframe_alloc(user, &user->end_offset,
163			     sizeof(struct _aarch64_ctx));
164	if (ret)
165		return ret;
166
167	/* Prevent further allocation: */
168	user->limit = user->size;
169	return 0;
170}
171
172static void __user *apply_user_offset(
173	struct rt_sigframe_user_layout const *user, unsigned long offset)
174{
175	char __user *base = (char __user *)user->sigframe;
176
177	return base + offset;
178}
179
 
 
 
 
 
 
 
 
 
 
 
 
 
180static int preserve_fpsimd_context(struct fpsimd_context __user *ctx)
181{
182	struct user_fpsimd_state const *fpsimd =
183		&current->thread.uw.fpsimd_state;
184	int err;
185
186	/* copy the FP and status/control registers */
187	err = __copy_to_user(ctx->vregs, fpsimd->vregs, sizeof(fpsimd->vregs));
188	__put_user_error(fpsimd->fpsr, &ctx->fpsr, err);
189	__put_user_error(fpsimd->fpcr, &ctx->fpcr, err);
190
191	/* copy the magic/size information */
192	__put_user_error(FPSIMD_MAGIC, &ctx->head.magic, err);
193	__put_user_error(sizeof(struct fpsimd_context), &ctx->head.size, err);
194
195	return err ? -EFAULT : 0;
196}
197
198static int restore_fpsimd_context(struct fpsimd_context __user *ctx)
199{
200	struct user_fpsimd_state fpsimd;
201	__u32 magic, size;
202	int err = 0;
203
204	/* check the magic/size information */
205	__get_user_error(magic, &ctx->head.magic, err);
206	__get_user_error(size, &ctx->head.size, err);
207	if (err)
208		return -EFAULT;
209	if (magic != FPSIMD_MAGIC || size != sizeof(struct fpsimd_context))
210		return -EINVAL;
211
212	/* copy the FP and status/control registers */
213	err = __copy_from_user(fpsimd.vregs, ctx->vregs,
214			       sizeof(fpsimd.vregs));
215	__get_user_error(fpsimd.fpsr, &ctx->fpsr, err);
216	__get_user_error(fpsimd.fpcr, &ctx->fpcr, err);
217
218	clear_thread_flag(TIF_SVE);
 
219
220	/* load the hardware registers from the fpsimd_state structure */
221	if (!err)
222		fpsimd_update_current_state(&fpsimd);
223
224	return err ? -EFAULT : 0;
225}
226
227
228struct user_ctxs {
229	struct fpsimd_context __user *fpsimd;
230	struct sve_context __user *sve;
231};
232
233#ifdef CONFIG_ARM64_SVE
234
235static int preserve_sve_context(struct sve_context __user *ctx)
236{
237	int err = 0;
238	u16 reserved[ARRAY_SIZE(ctx->__reserved)];
239	unsigned int vl = current->thread.sve_vl;
 
240	unsigned int vq = 0;
241
242	if (test_thread_flag(TIF_SVE))
 
 
 
 
243		vq = sve_vq_from_vl(vl);
 
244
245	memset(reserved, 0, sizeof(reserved));
246
247	__put_user_error(SVE_MAGIC, &ctx->head.magic, err);
248	__put_user_error(round_up(SVE_SIG_CONTEXT_SIZE(vq), 16),
249			 &ctx->head.size, err);
250	__put_user_error(vl, &ctx->vl, err);
 
251	BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved));
252	err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved));
253
254	if (vq) {
255		/*
256		 * This assumes that the SVE state has already been saved to
257		 * the task struct by calling preserve_fpsimd_context().
 
258		 */
259		err |= __copy_to_user((char __user *)ctx + SVE_SIG_REGS_OFFSET,
260				      current->thread.sve_state,
261				      SVE_SIG_REGS_SIZE(vq));
262	}
263
264	return err ? -EFAULT : 0;
265}
266
267static int restore_sve_fpsimd_context(struct user_ctxs *user)
268{
269	int err;
270	unsigned int vq;
271	struct user_fpsimd_state fpsimd;
272	struct sve_context sve;
273
274	if (__copy_from_user(&sve, user->sve, sizeof(sve)))
275		return -EFAULT;
 
 
 
 
 
 
 
 
 
276
277	if (sve.vl != current->thread.sve_vl)
 
 
 
 
 
 
 
 
 
 
 
 
 
278		return -EINVAL;
279
280	if (sve.head.size <= sizeof(*user->sve)) {
281		clear_thread_flag(TIF_SVE);
 
 
282		goto fpsimd_only;
283	}
284
285	vq = sve_vq_from_vl(sve.vl);
286
287	if (sve.head.size < SVE_SIG_CONTEXT_SIZE(vq))
288		return -EINVAL;
289
290	/*
291	 * Careful: we are about __copy_from_user() directly into
292	 * thread.sve_state with preemption enabled, so protection is
293	 * needed to prevent a racing context switch from writing stale
294	 * registers back over the new data.
295	 */
296
297	fpsimd_flush_task_state(current);
298	barrier();
299	/* From now, fpsimd_thread_switch() won't clear TIF_FOREIGN_FPSTATE */
300
301	set_thread_flag(TIF_FOREIGN_FPSTATE);
302	barrier();
303	/* From now, fpsimd_thread_switch() won't touch thread.sve_state */
304
305	sve_alloc(current);
 
 
 
 
 
306	err = __copy_from_user(current->thread.sve_state,
307			       (char __user const *)user->sve +
308					SVE_SIG_REGS_OFFSET,
309			       SVE_SIG_REGS_SIZE(vq));
310	if (err)
311		return -EFAULT;
312
313	set_thread_flag(TIF_SVE);
 
 
 
 
314
315fpsimd_only:
316	/* copy the FP and status/control registers */
317	/* restore_sigframe() already checked that user->fpsimd != NULL. */
318	err = __copy_from_user(fpsimd.vregs, user->fpsimd->vregs,
319			       sizeof(fpsimd.vregs));
320	__get_user_error(fpsimd.fpsr, &user->fpsimd->fpsr, err);
321	__get_user_error(fpsimd.fpcr, &user->fpsimd->fpcr, err);
322
323	/* load the hardware registers from the fpsimd_state structure */
324	if (!err)
325		fpsimd_update_current_state(&fpsimd);
326
327	return err ? -EFAULT : 0;
328}
329
330#else /* ! CONFIG_ARM64_SVE */
331
332/* Turn any non-optimised out attempts to use these into a link error: */
 
 
 
 
 
 
333extern int preserve_sve_context(void __user *ctx);
334extern int restore_sve_fpsimd_context(struct user_ctxs *user);
335
336#endif /* ! CONFIG_ARM64_SVE */
337
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
338
339static int parse_user_sigframe(struct user_ctxs *user,
340			       struct rt_sigframe __user *sf)
341{
342	struct sigcontext __user *const sc = &sf->uc.uc_mcontext;
343	struct _aarch64_ctx __user *head;
344	char __user *base = (char __user *)&sc->__reserved;
345	size_t offset = 0;
346	size_t limit = sizeof(sc->__reserved);
347	bool have_extra_context = false;
348	char const __user *const sfp = (char const __user *)sf;
349
350	user->fpsimd = NULL;
351	user->sve = NULL;
 
 
 
352
353	if (!IS_ALIGNED((unsigned long)base, 16))
354		goto invalid;
355
356	while (1) {
357		int err = 0;
358		u32 magic, size;
359		char const __user *userp;
360		struct extra_context const __user *extra;
361		u64 extra_datap;
362		u32 extra_size;
363		struct _aarch64_ctx const __user *end;
364		u32 end_magic, end_size;
365
366		if (limit - offset < sizeof(*head))
367			goto invalid;
368
369		if (!IS_ALIGNED(offset, 16))
370			goto invalid;
371
372		head = (struct _aarch64_ctx __user *)(base + offset);
373		__get_user_error(magic, &head->magic, err);
374		__get_user_error(size, &head->size, err);
375		if (err)
376			return err;
377
378		if (limit - offset < size)
379			goto invalid;
380
381		switch (magic) {
382		case 0:
383			if (size)
384				goto invalid;
385
386			goto done;
387
388		case FPSIMD_MAGIC:
389			if (user->fpsimd)
390				goto invalid;
391
392			if (size < sizeof(*user->fpsimd))
393				goto invalid;
394
395			user->fpsimd = (struct fpsimd_context __user *)head;
 
396			break;
397
398		case ESR_MAGIC:
399			/* ignore */
400			break;
401
402		case SVE_MAGIC:
403			if (!system_supports_sve())
404				goto invalid;
405
406			if (user->sve)
407				goto invalid;
408
409			if (size < sizeof(*user->sve))
 
 
 
 
 
410				goto invalid;
411
412			user->sve = (struct sve_context __user *)head;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
413			break;
414
415		case EXTRA_MAGIC:
416			if (have_extra_context)
417				goto invalid;
418
419			if (size < sizeof(*extra))
420				goto invalid;
421
422			userp = (char const __user *)head;
423
424			extra = (struct extra_context const __user *)userp;
425			userp += size;
426
427			__get_user_error(extra_datap, &extra->datap, err);
428			__get_user_error(extra_size, &extra->size, err);
429			if (err)
430				return err;
431
432			/* Check for the dummy terminator in __reserved[]: */
433
434			if (limit - offset - size < TERMINATOR_SIZE)
435				goto invalid;
436
437			end = (struct _aarch64_ctx const __user *)userp;
438			userp += TERMINATOR_SIZE;
439
440			__get_user_error(end_magic, &end->magic, err);
441			__get_user_error(end_size, &end->size, err);
442			if (err)
443				return err;
444
445			if (end_magic || end_size)
446				goto invalid;
447
448			/* Prevent looping/repeated parsing of extra_context */
449			have_extra_context = true;
450
451			base = (__force void __user *)extra_datap;
452			if (!IS_ALIGNED((unsigned long)base, 16))
453				goto invalid;
454
455			if (!IS_ALIGNED(extra_size, 16))
456				goto invalid;
457
458			if (base != userp)
459				goto invalid;
460
461			/* Reject "unreasonably large" frames: */
462			if (extra_size > sfp + SIGFRAME_MAXSZ - userp)
463				goto invalid;
464
465			/*
466			 * Ignore trailing terminator in __reserved[]
467			 * and start parsing extra data:
468			 */
469			offset = 0;
470			limit = extra_size;
471
472			if (!access_ok(VERIFY_READ, base, limit))
473				goto invalid;
474
475			continue;
476
477		default:
478			goto invalid;
479		}
480
481		if (size < sizeof(*head))
482			goto invalid;
483
484		if (limit - offset < size)
485			goto invalid;
486
487		offset += size;
488	}
489
490done:
491	return 0;
492
493invalid:
494	return -EINVAL;
495}
496
497static int restore_sigframe(struct pt_regs *regs,
498			    struct rt_sigframe __user *sf)
499{
500	sigset_t set;
501	int i, err;
502	struct user_ctxs user;
503
504	err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
505	if (err == 0)
506		set_current_blocked(&set);
507
508	for (i = 0; i < 31; i++)
509		__get_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i],
510				 err);
511	__get_user_error(regs->sp, &sf->uc.uc_mcontext.sp, err);
512	__get_user_error(regs->pc, &sf->uc.uc_mcontext.pc, err);
513	__get_user_error(regs->pstate, &sf->uc.uc_mcontext.pstate, err);
514
515	/*
516	 * Avoid sys_rt_sigreturn() restarting.
517	 */
518	forget_syscall(regs);
519
520	err |= !valid_user_regs(&regs->user_regs, current);
521	if (err == 0)
522		err = parse_user_sigframe(&user, sf);
523
524	if (err == 0) {
525		if (!user.fpsimd)
526			return -EINVAL;
527
528		if (user.sve) {
529			if (!system_supports_sve())
530				return -EINVAL;
531
532			err = restore_sve_fpsimd_context(&user);
533		} else {
534			err = restore_fpsimd_context(user.fpsimd);
535		}
536	}
537
 
 
 
 
 
 
 
 
 
538	return err;
539}
540
541asmlinkage long sys_rt_sigreturn(struct pt_regs *regs)
542{
 
543	struct rt_sigframe __user *frame;
544
545	/* Always make any pending restarted system calls return -EINTR */
546	current->restart_block.fn = do_no_restart_syscall;
547
548	/*
549	 * Since we stacked the signal on a 128-bit boundary, then 'sp' should
550	 * be word aligned here.
551	 */
552	if (regs->sp & 15)
553		goto badframe;
554
555	frame = (struct rt_sigframe __user *)regs->sp;
556
557	if (!access_ok(VERIFY_READ, frame, sizeof (*frame)))
558		goto badframe;
559
560	if (restore_sigframe(regs, frame))
561		goto badframe;
562
563	if (restore_altstack(&frame->uc.uc_stack))
564		goto badframe;
565
566	return regs->regs[0];
567
568badframe:
569	arm64_notify_segfault(regs->sp);
570	return 0;
571}
572
573/* Determine the layout of optional records in the signal frame */
574static int setup_sigframe_layout(struct rt_sigframe_user_layout *user)
 
 
 
 
 
 
 
575{
576	int err;
577
578	err = sigframe_alloc(user, &user->fpsimd_offset,
579			     sizeof(struct fpsimd_context));
580	if (err)
581		return err;
 
 
582
583	/* fault information, if valid */
584	if (current->thread.fault_code) {
585		err = sigframe_alloc(user, &user->esr_offset,
586				     sizeof(struct esr_context));
587		if (err)
588			return err;
589	}
590
591	if (system_supports_sve()) {
592		unsigned int vq = 0;
593
594		if (test_thread_flag(TIF_SVE))
595			vq = sve_vq_from_vl(current->thread.sve_vl);
 
 
 
 
 
 
 
596
597		err = sigframe_alloc(user, &user->sve_offset,
598				     SVE_SIG_CONTEXT_SIZE(vq));
599		if (err)
600			return err;
601	}
602
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
603	return sigframe_alloc_end(user);
604}
605
606
607static int setup_sigframe(struct rt_sigframe_user_layout *user,
608			  struct pt_regs *regs, sigset_t *set)
609{
610	int i, err = 0;
611	struct rt_sigframe __user *sf = user->sigframe;
612
613	/* set up the stack frame for unwinding */
614	__put_user_error(regs->regs[29], &user->next_frame->fp, err);
615	__put_user_error(regs->regs[30], &user->next_frame->lr, err);
616
617	for (i = 0; i < 31; i++)
618		__put_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i],
619				 err);
620	__put_user_error(regs->sp, &sf->uc.uc_mcontext.sp, err);
621	__put_user_error(regs->pc, &sf->uc.uc_mcontext.pc, err);
622	__put_user_error(regs->pstate, &sf->uc.uc_mcontext.pstate, err);
623
624	__put_user_error(current->thread.fault_address, &sf->uc.uc_mcontext.fault_address, err);
625
626	err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set));
627
628	if (err == 0) {
629		struct fpsimd_context __user *fpsimd_ctx =
630			apply_user_offset(user, user->fpsimd_offset);
631		err |= preserve_fpsimd_context(fpsimd_ctx);
632	}
633
634	/* fault information, if valid */
635	if (err == 0 && user->esr_offset) {
636		struct esr_context __user *esr_ctx =
637			apply_user_offset(user, user->esr_offset);
638
639		__put_user_error(ESR_MAGIC, &esr_ctx->head.magic, err);
640		__put_user_error(sizeof(*esr_ctx), &esr_ctx->head.size, err);
641		__put_user_error(current->thread.fault_code, &esr_ctx->esr, err);
642	}
643
644	/* Scalable Vector Extension state, if present */
645	if (system_supports_sve() && err == 0 && user->sve_offset) {
 
646		struct sve_context __user *sve_ctx =
647			apply_user_offset(user, user->sve_offset);
648		err |= preserve_sve_context(sve_ctx);
649	}
650
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
651	if (err == 0 && user->extra_offset) {
652		char __user *sfp = (char __user *)user->sigframe;
653		char __user *userp =
654			apply_user_offset(user, user->extra_offset);
655
656		struct extra_context __user *extra;
657		struct _aarch64_ctx __user *end;
658		u64 extra_datap;
659		u32 extra_size;
660
661		extra = (struct extra_context __user *)userp;
662		userp += EXTRA_CONTEXT_SIZE;
663
664		end = (struct _aarch64_ctx __user *)userp;
665		userp += TERMINATOR_SIZE;
666
667		/*
668		 * extra_datap is just written to the signal frame.
669		 * The value gets cast back to a void __user *
670		 * during sigreturn.
671		 */
672		extra_datap = (__force u64)userp;
673		extra_size = sfp + round_up(user->size, 16) - userp;
674
675		__put_user_error(EXTRA_MAGIC, &extra->head.magic, err);
676		__put_user_error(EXTRA_CONTEXT_SIZE, &extra->head.size, err);
677		__put_user_error(extra_datap, &extra->datap, err);
678		__put_user_error(extra_size, &extra->size, err);
679
680		/* Add the terminator */
681		__put_user_error(0, &end->magic, err);
682		__put_user_error(0, &end->size, err);
683	}
684
685	/* set the "end" magic */
686	if (err == 0) {
687		struct _aarch64_ctx __user *end =
688			apply_user_offset(user, user->end_offset);
689
690		__put_user_error(0, &end->magic, err);
691		__put_user_error(0, &end->size, err);
692	}
693
694	return err;
695}
696
697static int get_sigframe(struct rt_sigframe_user_layout *user,
698			 struct ksignal *ksig, struct pt_regs *regs)
699{
700	unsigned long sp, sp_top;
701	int err;
702
703	init_user_layout(user);
704	err = setup_sigframe_layout(user);
705	if (err)
706		return err;
707
708	sp = sp_top = sigsp(regs->sp, ksig);
709
710	sp = round_down(sp - sizeof(struct frame_record), 16);
711	user->next_frame = (struct frame_record __user *)sp;
712
713	sp = round_down(sp, 16) - sigframe_size(user);
714	user->sigframe = (struct rt_sigframe __user *)sp;
715
716	/*
717	 * Check that we can actually write to the signal frame.
718	 */
719	if (!access_ok(VERIFY_WRITE, user->sigframe, sp_top - sp))
720		return -EFAULT;
721
722	return 0;
723}
724
725static void setup_return(struct pt_regs *regs, struct k_sigaction *ka,
726			 struct rt_sigframe_user_layout *user, int usig)
727{
728	__sigrestore_t sigtramp;
729
730	regs->regs[0] = usig;
731	regs->sp = (unsigned long)user->sigframe;
732	regs->regs[29] = (unsigned long)&user->next_frame->fp;
733	regs->pc = (unsigned long)ka->sa.sa_handler;
734
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
735	if (ka->sa.sa_flags & SA_RESTORER)
736		sigtramp = ka->sa.sa_restorer;
737	else
738		sigtramp = VDSO_SYMBOL(current->mm->context.vdso, sigtramp);
739
740	regs->regs[30] = (unsigned long)sigtramp;
741}
742
743static int setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set,
744			  struct pt_regs *regs)
745{
746	struct rt_sigframe_user_layout user;
747	struct rt_sigframe __user *frame;
748	int err = 0;
749
750	fpsimd_signal_preserve_current_state();
751
752	if (get_sigframe(&user, ksig, regs))
753		return 1;
754
755	frame = user.sigframe;
756
757	__put_user_error(0, &frame->uc.uc_flags, err);
758	__put_user_error(NULL, &frame->uc.uc_link, err);
759
760	err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
761	err |= setup_sigframe(&user, regs, set);
762	if (err == 0) {
763		setup_return(regs, &ksig->ka, &user, usig);
764		if (ksig->ka.sa.sa_flags & SA_SIGINFO) {
765			err |= copy_siginfo_to_user(&frame->info, &ksig->info);
766			regs->regs[1] = (unsigned long)&frame->info;
767			regs->regs[2] = (unsigned long)&frame->uc;
768		}
769	}
770
771	return err;
772}
773
774static void setup_restart_syscall(struct pt_regs *regs)
775{
776	if (is_compat_task())
777		compat_setup_restart_syscall(regs);
778	else
779		regs->regs[8] = __NR_restart_syscall;
780}
781
782/*
783 * OK, we're invoking a handler
784 */
785static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
786{
787	struct task_struct *tsk = current;
788	sigset_t *oldset = sigmask_to_save();
789	int usig = ksig->sig;
790	int ret;
791
 
 
792	/*
793	 * Set up the stack frame
794	 */
795	if (is_compat_task()) {
796		if (ksig->ka.sa.sa_flags & SA_SIGINFO)
797			ret = compat_setup_rt_frame(usig, ksig, oldset, regs);
798		else
799			ret = compat_setup_frame(usig, ksig, oldset, regs);
800	} else {
801		ret = setup_rt_frame(usig, ksig, oldset, regs);
802	}
803
804	/*
805	 * Check that the resulting registers are actually sane.
806	 */
807	ret |= !valid_user_regs(&regs->user_regs, current);
808
809	/*
810	 * Fast forward the stepping logic so we step into the signal
811	 * handler.
812	 */
813	if (!ret)
814		user_fastforward_single_step(tsk);
815
816	signal_setup_done(ret, ksig, 0);
817}
818
819/*
820 * Note that 'init' is a special process: it doesn't get signals it doesn't
821 * want to handle. Thus you cannot kill init even with a SIGKILL even by
822 * mistake.
823 *
824 * Note that we go through the signals twice: once to check the signals that
825 * the kernel can handle, and then we build all the user-level signal handling
826 * stack-frames in one go after that.
827 */
828static void do_signal(struct pt_regs *regs)
829{
830	unsigned long continue_addr = 0, restart_addr = 0;
831	int retval = 0;
832	struct ksignal ksig;
 
833
834	/*
835	 * If we were from a system call, check for system call restarting...
836	 */
837	if (in_syscall(regs)) {
838		continue_addr = regs->pc;
839		restart_addr = continue_addr - (compat_thumb_mode(regs) ? 2 : 4);
840		retval = regs->regs[0];
841
842		/*
843		 * Avoid additional syscall restarting via ret_to_user.
844		 */
845		forget_syscall(regs);
846
847		/*
848		 * Prepare for system call restart. We do this here so that a
849		 * debugger will see the already changed PC.
850		 */
851		switch (retval) {
852		case -ERESTARTNOHAND:
853		case -ERESTARTSYS:
854		case -ERESTARTNOINTR:
855		case -ERESTART_RESTARTBLOCK:
856			regs->regs[0] = regs->orig_x0;
857			regs->pc = restart_addr;
858			break;
859		}
860	}
861
862	/*
863	 * Get the signal to deliver. When running under ptrace, at this point
864	 * the debugger may change all of our registers.
865	 */
866	if (get_signal(&ksig)) {
867		/*
868		 * Depending on the signal settings, we may need to revert the
869		 * decision to restart the system call, but skip this if a
870		 * debugger has chosen to restart at a different PC.
871		 */
872		if (regs->pc == restart_addr &&
873		    (retval == -ERESTARTNOHAND ||
874		     retval == -ERESTART_RESTARTBLOCK ||
875		     (retval == -ERESTARTSYS &&
876		      !(ksig.ka.sa.sa_flags & SA_RESTART)))) {
877			regs->regs[0] = -EINTR;
878			regs->pc = continue_addr;
879		}
880
881		handle_signal(&ksig, regs);
882		return;
883	}
884
885	/*
886	 * Handle restarting a different system call. As above, if a debugger
887	 * has chosen to restart at a different PC, ignore the restart.
888	 */
889	if (in_syscall(regs) && regs->pc == restart_addr) {
890		if (retval == -ERESTART_RESTARTBLOCK)
891			setup_restart_syscall(regs);
892		user_rewind_single_step(current);
893	}
894
895	restore_saved_sigmask();
896}
897
898asmlinkage void do_notify_resume(struct pt_regs *regs,
899				 unsigned int thread_flags)
900{
901	/*
902	 * The assembly code enters us with IRQs off, but it hasn't
903	 * informed the tracing code of that for efficiency reasons.
904	 * Update the trace code with the current status.
905	 */
906	trace_hardirqs_off();
907
908	do {
909		/* Check valid user FS if needed */
910		addr_limit_user_check();
911
912		if (thread_flags & _TIF_NEED_RESCHED) {
913			/* Unmask Debug and SError for the next task */
914			local_daif_restore(DAIF_PROCCTX_NOIRQ);
915
916			schedule();
917		} else {
918			local_daif_restore(DAIF_PROCCTX);
919
920			if (thread_flags & _TIF_UPROBE)
921				uprobe_notify_resume(regs);
922
923			if (thread_flags & _TIF_SIGPENDING)
 
 
 
 
 
 
924				do_signal(regs);
925
926			if (thread_flags & _TIF_NOTIFY_RESUME) {
927				clear_thread_flag(TIF_NOTIFY_RESUME);
928				tracehook_notify_resume(regs);
929			}
930
931			if (thread_flags & _TIF_FOREIGN_FPSTATE)
932				fpsimd_restore_current_state();
933		}
934
935		local_daif_mask();
936		thread_flags = READ_ONCE(current_thread_info()->flags);
937	} while (thread_flags & _TIF_WORK_MASK);
938}
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Based on arch/arm/kernel/signal.c
   4 *
   5 * Copyright (C) 1995-2009 Russell King
   6 * Copyright (C) 2012 ARM Ltd.
 
 
 
 
 
 
 
 
 
 
 
 
   7 */
   8
   9#include <linux/cache.h>
  10#include <linux/compat.h>
  11#include <linux/errno.h>
  12#include <linux/kernel.h>
  13#include <linux/signal.h>
 
  14#include <linux/freezer.h>
  15#include <linux/stddef.h>
  16#include <linux/uaccess.h>
  17#include <linux/sizes.h>
  18#include <linux/string.h>
  19#include <linux/resume_user_mode.h>
  20#include <linux/ratelimit.h>
  21#include <linux/syscalls.h>
  22
  23#include <asm/daifflags.h>
  24#include <asm/debug-monitors.h>
  25#include <asm/elf.h>
  26#include <asm/exception.h>
  27#include <asm/cacheflush.h>
  28#include <asm/ucontext.h>
  29#include <asm/unistd.h>
  30#include <asm/fpsimd.h>
  31#include <asm/ptrace.h>
  32#include <asm/syscall.h>
  33#include <asm/signal32.h>
  34#include <asm/traps.h>
  35#include <asm/vdso.h>
  36
  37/*
  38 * Do a signal return; undo the signal stack. These are aligned to 128-bit.
  39 */
  40struct rt_sigframe {
  41	struct siginfo info;
  42	struct ucontext uc;
  43};
  44
  45struct frame_record {
  46	u64 fp;
  47	u64 lr;
  48};
  49
  50struct rt_sigframe_user_layout {
  51	struct rt_sigframe __user *sigframe;
  52	struct frame_record __user *next_frame;
  53
  54	unsigned long size;	/* size of allocated sigframe data */
  55	unsigned long limit;	/* largest allowed size */
  56
  57	unsigned long fpsimd_offset;
  58	unsigned long esr_offset;
  59	unsigned long sve_offset;
  60	unsigned long tpidr2_offset;
  61	unsigned long za_offset;
  62	unsigned long zt_offset;
  63	unsigned long extra_offset;
  64	unsigned long end_offset;
  65};
  66
  67#define BASE_SIGFRAME_SIZE round_up(sizeof(struct rt_sigframe), 16)
  68#define TERMINATOR_SIZE round_up(sizeof(struct _aarch64_ctx), 16)
  69#define EXTRA_CONTEXT_SIZE round_up(sizeof(struct extra_context), 16)
  70
  71static void init_user_layout(struct rt_sigframe_user_layout *user)
  72{
  73	const size_t reserved_size =
  74		sizeof(user->sigframe->uc.uc_mcontext.__reserved);
  75
  76	memset(user, 0, sizeof(*user));
  77	user->size = offsetof(struct rt_sigframe, uc.uc_mcontext.__reserved);
  78
  79	user->limit = user->size + reserved_size;
  80
  81	user->limit -= TERMINATOR_SIZE;
  82	user->limit -= EXTRA_CONTEXT_SIZE;
  83	/* Reserve space for extension and terminator ^ */
  84}
  85
  86static size_t sigframe_size(struct rt_sigframe_user_layout const *user)
  87{
  88	return round_up(max(user->size, sizeof(struct rt_sigframe)), 16);
  89}
  90
  91/*
  92 * Sanity limit on the approximate maximum size of signal frame we'll
  93 * try to generate.  Stack alignment padding and the frame record are
  94 * not taken into account.  This limit is not a guarantee and is
  95 * NOT ABI.
  96 */
  97#define SIGFRAME_MAXSZ SZ_256K
  98
  99static int __sigframe_alloc(struct rt_sigframe_user_layout *user,
 100			    unsigned long *offset, size_t size, bool extend)
 101{
 102	size_t padded_size = round_up(size, 16);
 103
 104	if (padded_size > user->limit - user->size &&
 105	    !user->extra_offset &&
 106	    extend) {
 107		int ret;
 108
 109		user->limit += EXTRA_CONTEXT_SIZE;
 110		ret = __sigframe_alloc(user, &user->extra_offset,
 111				       sizeof(struct extra_context), false);
 112		if (ret) {
 113			user->limit -= EXTRA_CONTEXT_SIZE;
 114			return ret;
 115		}
 116
 117		/* Reserve space for the __reserved[] terminator */
 118		user->size += TERMINATOR_SIZE;
 119
 120		/*
 121		 * Allow expansion up to SIGFRAME_MAXSZ, ensuring space for
 122		 * the terminator:
 123		 */
 124		user->limit = SIGFRAME_MAXSZ - TERMINATOR_SIZE;
 125	}
 126
 127	/* Still not enough space?  Bad luck! */
 128	if (padded_size > user->limit - user->size)
 129		return -ENOMEM;
 130
 131	*offset = user->size;
 132	user->size += padded_size;
 133
 134	return 0;
 135}
 136
 137/*
 138 * Allocate space for an optional record of <size> bytes in the user
 139 * signal frame.  The offset from the signal frame base address to the
 140 * allocated block is assigned to *offset.
 141 */
 142static int sigframe_alloc(struct rt_sigframe_user_layout *user,
 143			  unsigned long *offset, size_t size)
 144{
 145	return __sigframe_alloc(user, offset, size, true);
 146}
 147
 148/* Allocate the null terminator record and prevent further allocations */
 149static int sigframe_alloc_end(struct rt_sigframe_user_layout *user)
 150{
 151	int ret;
 152
 153	/* Un-reserve the space reserved for the terminator: */
 154	user->limit += TERMINATOR_SIZE;
 155
 156	ret = sigframe_alloc(user, &user->end_offset,
 157			     sizeof(struct _aarch64_ctx));
 158	if (ret)
 159		return ret;
 160
 161	/* Prevent further allocation: */
 162	user->limit = user->size;
 163	return 0;
 164}
 165
 166static void __user *apply_user_offset(
 167	struct rt_sigframe_user_layout const *user, unsigned long offset)
 168{
 169	char __user *base = (char __user *)user->sigframe;
 170
 171	return base + offset;
 172}
 173
 174struct user_ctxs {
 175	struct fpsimd_context __user *fpsimd;
 176	u32 fpsimd_size;
 177	struct sve_context __user *sve;
 178	u32 sve_size;
 179	struct tpidr2_context __user *tpidr2;
 180	u32 tpidr2_size;
 181	struct za_context __user *za;
 182	u32 za_size;
 183	struct zt_context __user *zt;
 184	u32 zt_size;
 185};
 186
 187static int preserve_fpsimd_context(struct fpsimd_context __user *ctx)
 188{
 189	struct user_fpsimd_state const *fpsimd =
 190		&current->thread.uw.fpsimd_state;
 191	int err;
 192
 193	/* copy the FP and status/control registers */
 194	err = __copy_to_user(ctx->vregs, fpsimd->vregs, sizeof(fpsimd->vregs));
 195	__put_user_error(fpsimd->fpsr, &ctx->fpsr, err);
 196	__put_user_error(fpsimd->fpcr, &ctx->fpcr, err);
 197
 198	/* copy the magic/size information */
 199	__put_user_error(FPSIMD_MAGIC, &ctx->head.magic, err);
 200	__put_user_error(sizeof(struct fpsimd_context), &ctx->head.size, err);
 201
 202	return err ? -EFAULT : 0;
 203}
 204
 205static int restore_fpsimd_context(struct user_ctxs *user)
 206{
 207	struct user_fpsimd_state fpsimd;
 
 208	int err = 0;
 209
 210	/* check the size information */
 211	if (user->fpsimd_size != sizeof(struct fpsimd_context))
 
 
 
 
 212		return -EINVAL;
 213
 214	/* copy the FP and status/control registers */
 215	err = __copy_from_user(fpsimd.vregs, &(user->fpsimd->vregs),
 216			       sizeof(fpsimd.vregs));
 217	__get_user_error(fpsimd.fpsr, &(user->fpsimd->fpsr), err);
 218	__get_user_error(fpsimd.fpcr, &(user->fpsimd->fpcr), err);
 219
 220	clear_thread_flag(TIF_SVE);
 221	current->thread.fp_type = FP_STATE_FPSIMD;
 222
 223	/* load the hardware registers from the fpsimd_state structure */
 224	if (!err)
 225		fpsimd_update_current_state(&fpsimd);
 226
 227	return err ? -EFAULT : 0;
 228}
 229
 230
 
 
 
 
 
 231#ifdef CONFIG_ARM64_SVE
 232
 233static int preserve_sve_context(struct sve_context __user *ctx)
 234{
 235	int err = 0;
 236	u16 reserved[ARRAY_SIZE(ctx->__reserved)];
 237	u16 flags = 0;
 238	unsigned int vl = task_get_sve_vl(current);
 239	unsigned int vq = 0;
 240
 241	if (thread_sm_enabled(&current->thread)) {
 242		vl = task_get_sme_vl(current);
 243		vq = sve_vq_from_vl(vl);
 244		flags |= SVE_SIG_FLAG_SM;
 245	} else if (current->thread.fp_type == FP_STATE_SVE) {
 246		vq = sve_vq_from_vl(vl);
 247	}
 248
 249	memset(reserved, 0, sizeof(reserved));
 250
 251	__put_user_error(SVE_MAGIC, &ctx->head.magic, err);
 252	__put_user_error(round_up(SVE_SIG_CONTEXT_SIZE(vq), 16),
 253			 &ctx->head.size, err);
 254	__put_user_error(vl, &ctx->vl, err);
 255	__put_user_error(flags, &ctx->flags, err);
 256	BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved));
 257	err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved));
 258
 259	if (vq) {
 260		/*
 261		 * This assumes that the SVE state has already been saved to
 262		 * the task struct by calling the function
 263		 * fpsimd_signal_preserve_current_state().
 264		 */
 265		err |= __copy_to_user((char __user *)ctx + SVE_SIG_REGS_OFFSET,
 266				      current->thread.sve_state,
 267				      SVE_SIG_REGS_SIZE(vq));
 268	}
 269
 270	return err ? -EFAULT : 0;
 271}
 272
 273static int restore_sve_fpsimd_context(struct user_ctxs *user)
 274{
 275	int err = 0;
 276	unsigned int vl, vq;
 277	struct user_fpsimd_state fpsimd;
 278	u16 user_vl, flags;
 279
 280	if (user->sve_size < sizeof(*user->sve))
 281		return -EINVAL;
 282
 283	__get_user_error(user_vl, &(user->sve->vl), err);
 284	__get_user_error(flags, &(user->sve->flags), err);
 285	if (err)
 286		return err;
 287
 288	if (flags & SVE_SIG_FLAG_SM) {
 289		if (!system_supports_sme())
 290			return -EINVAL;
 291
 292		vl = task_get_sme_vl(current);
 293	} else {
 294		/*
 295		 * A SME only system use SVE for streaming mode so can
 296		 * have a SVE formatted context with a zero VL and no
 297		 * payload data.
 298		 */
 299		if (!system_supports_sve() && !system_supports_sme())
 300			return -EINVAL;
 301
 302		vl = task_get_sve_vl(current);
 303	}
 304
 305	if (user_vl != vl)
 306		return -EINVAL;
 307
 308	if (user->sve_size == sizeof(*user->sve)) {
 309		clear_thread_flag(TIF_SVE);
 310		current->thread.svcr &= ~SVCR_SM_MASK;
 311		current->thread.fp_type = FP_STATE_FPSIMD;
 312		goto fpsimd_only;
 313	}
 314
 315	vq = sve_vq_from_vl(vl);
 316
 317	if (user->sve_size < SVE_SIG_CONTEXT_SIZE(vq))
 318		return -EINVAL;
 319
 320	/*
 321	 * Careful: we are about __copy_from_user() directly into
 322	 * thread.sve_state with preemption enabled, so protection is
 323	 * needed to prevent a racing context switch from writing stale
 324	 * registers back over the new data.
 325	 */
 326
 327	fpsimd_flush_task_state(current);
 
 
 
 
 
 328	/* From now, fpsimd_thread_switch() won't touch thread.sve_state */
 329
 330	sve_alloc(current, true);
 331	if (!current->thread.sve_state) {
 332		clear_thread_flag(TIF_SVE);
 333		return -ENOMEM;
 334	}
 335
 336	err = __copy_from_user(current->thread.sve_state,
 337			       (char __user const *)user->sve +
 338					SVE_SIG_REGS_OFFSET,
 339			       SVE_SIG_REGS_SIZE(vq));
 340	if (err)
 341		return -EFAULT;
 342
 343	if (flags & SVE_SIG_FLAG_SM)
 344		current->thread.svcr |= SVCR_SM_MASK;
 345	else
 346		set_thread_flag(TIF_SVE);
 347	current->thread.fp_type = FP_STATE_SVE;
 348
 349fpsimd_only:
 350	/* copy the FP and status/control registers */
 351	/* restore_sigframe() already checked that user->fpsimd != NULL. */
 352	err = __copy_from_user(fpsimd.vregs, user->fpsimd->vregs,
 353			       sizeof(fpsimd.vregs));
 354	__get_user_error(fpsimd.fpsr, &user->fpsimd->fpsr, err);
 355	__get_user_error(fpsimd.fpcr, &user->fpsimd->fpcr, err);
 356
 357	/* load the hardware registers from the fpsimd_state structure */
 358	if (!err)
 359		fpsimd_update_current_state(&fpsimd);
 360
 361	return err ? -EFAULT : 0;
 362}
 363
 364#else /* ! CONFIG_ARM64_SVE */
 365
 366static int restore_sve_fpsimd_context(struct user_ctxs *user)
 367{
 368	WARN_ON_ONCE(1);
 369	return -EINVAL;
 370}
 371
 372/* Turn any non-optimised out attempts to use this into a link error: */
 373extern int preserve_sve_context(void __user *ctx);
 
 374
 375#endif /* ! CONFIG_ARM64_SVE */
 376
 377#ifdef CONFIG_ARM64_SME
 378
 379static int preserve_tpidr2_context(struct tpidr2_context __user *ctx)
 380{
 381	int err = 0;
 382
 383	current->thread.tpidr2_el0 = read_sysreg_s(SYS_TPIDR2_EL0);
 384
 385	__put_user_error(TPIDR2_MAGIC, &ctx->head.magic, err);
 386	__put_user_error(sizeof(*ctx), &ctx->head.size, err);
 387	__put_user_error(current->thread.tpidr2_el0, &ctx->tpidr2, err);
 388
 389	return err;
 390}
 391
 392static int restore_tpidr2_context(struct user_ctxs *user)
 393{
 394	u64 tpidr2_el0;
 395	int err = 0;
 396
 397	if (user->tpidr2_size != sizeof(*user->tpidr2))
 398		return -EINVAL;
 399
 400	__get_user_error(tpidr2_el0, &user->tpidr2->tpidr2, err);
 401	if (!err)
 402		write_sysreg_s(tpidr2_el0, SYS_TPIDR2_EL0);
 403
 404	return err;
 405}
 406
 407static int preserve_za_context(struct za_context __user *ctx)
 408{
 409	int err = 0;
 410	u16 reserved[ARRAY_SIZE(ctx->__reserved)];
 411	unsigned int vl = task_get_sme_vl(current);
 412	unsigned int vq;
 413
 414	if (thread_za_enabled(&current->thread))
 415		vq = sve_vq_from_vl(vl);
 416	else
 417		vq = 0;
 418
 419	memset(reserved, 0, sizeof(reserved));
 420
 421	__put_user_error(ZA_MAGIC, &ctx->head.magic, err);
 422	__put_user_error(round_up(ZA_SIG_CONTEXT_SIZE(vq), 16),
 423			 &ctx->head.size, err);
 424	__put_user_error(vl, &ctx->vl, err);
 425	BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved));
 426	err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved));
 427
 428	if (vq) {
 429		/*
 430		 * This assumes that the ZA state has already been saved to
 431		 * the task struct by calling the function
 432		 * fpsimd_signal_preserve_current_state().
 433		 */
 434		err |= __copy_to_user((char __user *)ctx + ZA_SIG_REGS_OFFSET,
 435				      current->thread.sme_state,
 436				      ZA_SIG_REGS_SIZE(vq));
 437	}
 438
 439	return err ? -EFAULT : 0;
 440}
 441
 442static int restore_za_context(struct user_ctxs *user)
 443{
 444	int err = 0;
 445	unsigned int vq;
 446	u16 user_vl;
 447
 448	if (user->za_size < sizeof(*user->za))
 449		return -EINVAL;
 450
 451	__get_user_error(user_vl, &(user->za->vl), err);
 452	if (err)
 453		return err;
 454
 455	if (user_vl != task_get_sme_vl(current))
 456		return -EINVAL;
 457
 458	if (user->za_size == sizeof(*user->za)) {
 459		current->thread.svcr &= ~SVCR_ZA_MASK;
 460		return 0;
 461	}
 462
 463	vq = sve_vq_from_vl(user_vl);
 464
 465	if (user->za_size < ZA_SIG_CONTEXT_SIZE(vq))
 466		return -EINVAL;
 467
 468	/*
 469	 * Careful: we are about __copy_from_user() directly into
 470	 * thread.sme_state with preemption enabled, so protection is
 471	 * needed to prevent a racing context switch from writing stale
 472	 * registers back over the new data.
 473	 */
 474
 475	fpsimd_flush_task_state(current);
 476	/* From now, fpsimd_thread_switch() won't touch thread.sve_state */
 477
 478	sme_alloc(current, true);
 479	if (!current->thread.sme_state) {
 480		current->thread.svcr &= ~SVCR_ZA_MASK;
 481		clear_thread_flag(TIF_SME);
 482		return -ENOMEM;
 483	}
 484
 485	err = __copy_from_user(current->thread.sme_state,
 486			       (char __user const *)user->za +
 487					ZA_SIG_REGS_OFFSET,
 488			       ZA_SIG_REGS_SIZE(vq));
 489	if (err)
 490		return -EFAULT;
 491
 492	set_thread_flag(TIF_SME);
 493	current->thread.svcr |= SVCR_ZA_MASK;
 494
 495	return 0;
 496}
 497
 498static int preserve_zt_context(struct zt_context __user *ctx)
 499{
 500	int err = 0;
 501	u16 reserved[ARRAY_SIZE(ctx->__reserved)];
 502
 503	if (WARN_ON(!thread_za_enabled(&current->thread)))
 504		return -EINVAL;
 505
 506	memset(reserved, 0, sizeof(reserved));
 507
 508	__put_user_error(ZT_MAGIC, &ctx->head.magic, err);
 509	__put_user_error(round_up(ZT_SIG_CONTEXT_SIZE(1), 16),
 510			 &ctx->head.size, err);
 511	__put_user_error(1, &ctx->nregs, err);
 512	BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved));
 513	err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved));
 514
 515	/*
 516	 * This assumes that the ZT state has already been saved to
 517	 * the task struct by calling the function
 518	 * fpsimd_signal_preserve_current_state().
 519	 */
 520	err |= __copy_to_user((char __user *)ctx + ZT_SIG_REGS_OFFSET,
 521			      thread_zt_state(&current->thread),
 522			      ZT_SIG_REGS_SIZE(1));
 523
 524	return err ? -EFAULT : 0;
 525}
 526
 527static int restore_zt_context(struct user_ctxs *user)
 528{
 529	int err;
 530	u16 nregs;
 531
 532	/* ZA must be restored first for this check to be valid */
 533	if (!thread_za_enabled(&current->thread))
 534		return -EINVAL;
 535
 536	if (user->zt_size != ZT_SIG_CONTEXT_SIZE(1))
 537		return -EINVAL;
 538
 539	if (__copy_from_user(&nregs, &(user->zt->nregs), sizeof(nregs)))
 540		return -EFAULT;
 541
 542	if (nregs != 1)
 543		return -EINVAL;
 544
 545	/*
 546	 * Careful: we are about __copy_from_user() directly into
 547	 * thread.zt_state with preemption enabled, so protection is
 548	 * needed to prevent a racing context switch from writing stale
 549	 * registers back over the new data.
 550	 */
 551
 552	fpsimd_flush_task_state(current);
 553	/* From now, fpsimd_thread_switch() won't touch ZT in thread state */
 554
 555	err = __copy_from_user(thread_zt_state(&current->thread),
 556			       (char __user const *)user->zt +
 557					ZT_SIG_REGS_OFFSET,
 558			       ZT_SIG_REGS_SIZE(1));
 559	if (err)
 560		return -EFAULT;
 561
 562	return 0;
 563}
 564
 565#else /* ! CONFIG_ARM64_SME */
 566
 567/* Turn any non-optimised out attempts to use these into a link error: */
 568extern int preserve_tpidr2_context(void __user *ctx);
 569extern int restore_tpidr2_context(struct user_ctxs *user);
 570extern int preserve_za_context(void __user *ctx);
 571extern int restore_za_context(struct user_ctxs *user);
 572extern int preserve_zt_context(void __user *ctx);
 573extern int restore_zt_context(struct user_ctxs *user);
 574
 575#endif /* ! CONFIG_ARM64_SME */
 576
 577static int parse_user_sigframe(struct user_ctxs *user,
 578			       struct rt_sigframe __user *sf)
 579{
 580	struct sigcontext __user *const sc = &sf->uc.uc_mcontext;
 581	struct _aarch64_ctx __user *head;
 582	char __user *base = (char __user *)&sc->__reserved;
 583	size_t offset = 0;
 584	size_t limit = sizeof(sc->__reserved);
 585	bool have_extra_context = false;
 586	char const __user *const sfp = (char const __user *)sf;
 587
 588	user->fpsimd = NULL;
 589	user->sve = NULL;
 590	user->tpidr2 = NULL;
 591	user->za = NULL;
 592	user->zt = NULL;
 593
 594	if (!IS_ALIGNED((unsigned long)base, 16))
 595		goto invalid;
 596
 597	while (1) {
 598		int err = 0;
 599		u32 magic, size;
 600		char const __user *userp;
 601		struct extra_context const __user *extra;
 602		u64 extra_datap;
 603		u32 extra_size;
 604		struct _aarch64_ctx const __user *end;
 605		u32 end_magic, end_size;
 606
 607		if (limit - offset < sizeof(*head))
 608			goto invalid;
 609
 610		if (!IS_ALIGNED(offset, 16))
 611			goto invalid;
 612
 613		head = (struct _aarch64_ctx __user *)(base + offset);
 614		__get_user_error(magic, &head->magic, err);
 615		__get_user_error(size, &head->size, err);
 616		if (err)
 617			return err;
 618
 619		if (limit - offset < size)
 620			goto invalid;
 621
 622		switch (magic) {
 623		case 0:
 624			if (size)
 625				goto invalid;
 626
 627			goto done;
 628
 629		case FPSIMD_MAGIC:
 630			if (!system_supports_fpsimd())
 631				goto invalid;
 632			if (user->fpsimd)
 
 633				goto invalid;
 634
 635			user->fpsimd = (struct fpsimd_context __user *)head;
 636			user->fpsimd_size = size;
 637			break;
 638
 639		case ESR_MAGIC:
 640			/* ignore */
 641			break;
 642
 643		case SVE_MAGIC:
 644			if (!system_supports_sve() && !system_supports_sme())
 645				goto invalid;
 646
 647			if (user->sve)
 648				goto invalid;
 649
 650			user->sve = (struct sve_context __user *)head;
 651			user->sve_size = size;
 652			break;
 653
 654		case TPIDR2_MAGIC:
 655			if (!system_supports_tpidr2())
 656				goto invalid;
 657
 658			if (user->tpidr2)
 659				goto invalid;
 660
 661			user->tpidr2 = (struct tpidr2_context __user *)head;
 662			user->tpidr2_size = size;
 663			break;
 664
 665		case ZA_MAGIC:
 666			if (!system_supports_sme())
 667				goto invalid;
 668
 669			if (user->za)
 670				goto invalid;
 671
 672			user->za = (struct za_context __user *)head;
 673			user->za_size = size;
 674			break;
 675
 676		case ZT_MAGIC:
 677			if (!system_supports_sme2())
 678				goto invalid;
 679
 680			if (user->zt)
 681				goto invalid;
 682
 683			user->zt = (struct zt_context __user *)head;
 684			user->zt_size = size;
 685			break;
 686
 687		case EXTRA_MAGIC:
 688			if (have_extra_context)
 689				goto invalid;
 690
 691			if (size < sizeof(*extra))
 692				goto invalid;
 693
 694			userp = (char const __user *)head;
 695
 696			extra = (struct extra_context const __user *)userp;
 697			userp += size;
 698
 699			__get_user_error(extra_datap, &extra->datap, err);
 700			__get_user_error(extra_size, &extra->size, err);
 701			if (err)
 702				return err;
 703
 704			/* Check for the dummy terminator in __reserved[]: */
 705
 706			if (limit - offset - size < TERMINATOR_SIZE)
 707				goto invalid;
 708
 709			end = (struct _aarch64_ctx const __user *)userp;
 710			userp += TERMINATOR_SIZE;
 711
 712			__get_user_error(end_magic, &end->magic, err);
 713			__get_user_error(end_size, &end->size, err);
 714			if (err)
 715				return err;
 716
 717			if (end_magic || end_size)
 718				goto invalid;
 719
 720			/* Prevent looping/repeated parsing of extra_context */
 721			have_extra_context = true;
 722
 723			base = (__force void __user *)extra_datap;
 724			if (!IS_ALIGNED((unsigned long)base, 16))
 725				goto invalid;
 726
 727			if (!IS_ALIGNED(extra_size, 16))
 728				goto invalid;
 729
 730			if (base != userp)
 731				goto invalid;
 732
 733			/* Reject "unreasonably large" frames: */
 734			if (extra_size > sfp + SIGFRAME_MAXSZ - userp)
 735				goto invalid;
 736
 737			/*
 738			 * Ignore trailing terminator in __reserved[]
 739			 * and start parsing extra data:
 740			 */
 741			offset = 0;
 742			limit = extra_size;
 743
 744			if (!access_ok(base, limit))
 745				goto invalid;
 746
 747			continue;
 748
 749		default:
 750			goto invalid;
 751		}
 752
 753		if (size < sizeof(*head))
 754			goto invalid;
 755
 756		if (limit - offset < size)
 757			goto invalid;
 758
 759		offset += size;
 760	}
 761
 762done:
 763	return 0;
 764
 765invalid:
 766	return -EINVAL;
 767}
 768
 769static int restore_sigframe(struct pt_regs *regs,
 770			    struct rt_sigframe __user *sf)
 771{
 772	sigset_t set;
 773	int i, err;
 774	struct user_ctxs user;
 775
 776	err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
 777	if (err == 0)
 778		set_current_blocked(&set);
 779
 780	for (i = 0; i < 31; i++)
 781		__get_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i],
 782				 err);
 783	__get_user_error(regs->sp, &sf->uc.uc_mcontext.sp, err);
 784	__get_user_error(regs->pc, &sf->uc.uc_mcontext.pc, err);
 785	__get_user_error(regs->pstate, &sf->uc.uc_mcontext.pstate, err);
 786
 787	/*
 788	 * Avoid sys_rt_sigreturn() restarting.
 789	 */
 790	forget_syscall(regs);
 791
 792	err |= !valid_user_regs(&regs->user_regs, current);
 793	if (err == 0)
 794		err = parse_user_sigframe(&user, sf);
 795
 796	if (err == 0 && system_supports_fpsimd()) {
 797		if (!user.fpsimd)
 798			return -EINVAL;
 799
 800		if (user.sve)
 
 
 
 801			err = restore_sve_fpsimd_context(&user);
 802		else
 803			err = restore_fpsimd_context(&user);
 
 804	}
 805
 806	if (err == 0 && system_supports_tpidr2() && user.tpidr2)
 807		err = restore_tpidr2_context(&user);
 808
 809	if (err == 0 && system_supports_sme() && user.za)
 810		err = restore_za_context(&user);
 811
 812	if (err == 0 && system_supports_sme2() && user.zt)
 813		err = restore_zt_context(&user);
 814
 815	return err;
 816}
 817
 818SYSCALL_DEFINE0(rt_sigreturn)
 819{
 820	struct pt_regs *regs = current_pt_regs();
 821	struct rt_sigframe __user *frame;
 822
 823	/* Always make any pending restarted system calls return -EINTR */
 824	current->restart_block.fn = do_no_restart_syscall;
 825
 826	/*
 827	 * Since we stacked the signal on a 128-bit boundary, then 'sp' should
 828	 * be word aligned here.
 829	 */
 830	if (regs->sp & 15)
 831		goto badframe;
 832
 833	frame = (struct rt_sigframe __user *)regs->sp;
 834
 835	if (!access_ok(frame, sizeof (*frame)))
 836		goto badframe;
 837
 838	if (restore_sigframe(regs, frame))
 839		goto badframe;
 840
 841	if (restore_altstack(&frame->uc.uc_stack))
 842		goto badframe;
 843
 844	return regs->regs[0];
 845
 846badframe:
 847	arm64_notify_segfault(regs->sp);
 848	return 0;
 849}
 850
 851/*
 852 * Determine the layout of optional records in the signal frame
 853 *
 854 * add_all: if true, lays out the biggest possible signal frame for
 855 *	this task; otherwise, generates a layout for the current state
 856 *	of the task.
 857 */
 858static int setup_sigframe_layout(struct rt_sigframe_user_layout *user,
 859				 bool add_all)
 860{
 861	int err;
 862
 863	if (system_supports_fpsimd()) {
 864		err = sigframe_alloc(user, &user->fpsimd_offset,
 865				     sizeof(struct fpsimd_context));
 866		if (err)
 867			return err;
 868	}
 869
 870	/* fault information, if valid */
 871	if (add_all || current->thread.fault_code) {
 872		err = sigframe_alloc(user, &user->esr_offset,
 873				     sizeof(struct esr_context));
 874		if (err)
 875			return err;
 876	}
 877
 878	if (system_supports_sve() || system_supports_sme()) {
 879		unsigned int vq = 0;
 880
 881		if (add_all || current->thread.fp_type == FP_STATE_SVE ||
 882		    thread_sm_enabled(&current->thread)) {
 883			int vl = max(sve_max_vl(), sme_max_vl());
 884
 885			if (!add_all)
 886				vl = thread_get_cur_vl(&current->thread);
 887
 888			vq = sve_vq_from_vl(vl);
 889		}
 890
 891		err = sigframe_alloc(user, &user->sve_offset,
 892				     SVE_SIG_CONTEXT_SIZE(vq));
 893		if (err)
 894			return err;
 895	}
 896
 897	if (system_supports_tpidr2()) {
 898		err = sigframe_alloc(user, &user->tpidr2_offset,
 899				     sizeof(struct tpidr2_context));
 900		if (err)
 901			return err;
 902	}
 903
 904	if (system_supports_sme()) {
 905		unsigned int vl;
 906		unsigned int vq = 0;
 907
 908		if (add_all)
 909			vl = sme_max_vl();
 910		else
 911			vl = task_get_sme_vl(current);
 912
 913		if (thread_za_enabled(&current->thread))
 914			vq = sve_vq_from_vl(vl);
 915
 916		err = sigframe_alloc(user, &user->za_offset,
 917				     ZA_SIG_CONTEXT_SIZE(vq));
 918		if (err)
 919			return err;
 920	}
 921
 922	if (system_supports_sme2()) {
 923		if (add_all || thread_za_enabled(&current->thread)) {
 924			err = sigframe_alloc(user, &user->zt_offset,
 925					     ZT_SIG_CONTEXT_SIZE(1));
 926			if (err)
 927				return err;
 928		}
 929	}
 930
 931	return sigframe_alloc_end(user);
 932}
 933
 
 934static int setup_sigframe(struct rt_sigframe_user_layout *user,
 935			  struct pt_regs *regs, sigset_t *set)
 936{
 937	int i, err = 0;
 938	struct rt_sigframe __user *sf = user->sigframe;
 939
 940	/* set up the stack frame for unwinding */
 941	__put_user_error(regs->regs[29], &user->next_frame->fp, err);
 942	__put_user_error(regs->regs[30], &user->next_frame->lr, err);
 943
 944	for (i = 0; i < 31; i++)
 945		__put_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i],
 946				 err);
 947	__put_user_error(regs->sp, &sf->uc.uc_mcontext.sp, err);
 948	__put_user_error(regs->pc, &sf->uc.uc_mcontext.pc, err);
 949	__put_user_error(regs->pstate, &sf->uc.uc_mcontext.pstate, err);
 950
 951	__put_user_error(current->thread.fault_address, &sf->uc.uc_mcontext.fault_address, err);
 952
 953	err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set));
 954
 955	if (err == 0 && system_supports_fpsimd()) {
 956		struct fpsimd_context __user *fpsimd_ctx =
 957			apply_user_offset(user, user->fpsimd_offset);
 958		err |= preserve_fpsimd_context(fpsimd_ctx);
 959	}
 960
 961	/* fault information, if valid */
 962	if (err == 0 && user->esr_offset) {
 963		struct esr_context __user *esr_ctx =
 964			apply_user_offset(user, user->esr_offset);
 965
 966		__put_user_error(ESR_MAGIC, &esr_ctx->head.magic, err);
 967		__put_user_error(sizeof(*esr_ctx), &esr_ctx->head.size, err);
 968		__put_user_error(current->thread.fault_code, &esr_ctx->esr, err);
 969	}
 970
 971	/* Scalable Vector Extension state (including streaming), if present */
 972	if ((system_supports_sve() || system_supports_sme()) &&
 973	    err == 0 && user->sve_offset) {
 974		struct sve_context __user *sve_ctx =
 975			apply_user_offset(user, user->sve_offset);
 976		err |= preserve_sve_context(sve_ctx);
 977	}
 978
 979	/* TPIDR2 if supported */
 980	if (system_supports_tpidr2() && err == 0) {
 981		struct tpidr2_context __user *tpidr2_ctx =
 982			apply_user_offset(user, user->tpidr2_offset);
 983		err |= preserve_tpidr2_context(tpidr2_ctx);
 984	}
 985
 986	/* ZA state if present */
 987	if (system_supports_sme() && err == 0 && user->za_offset) {
 988		struct za_context __user *za_ctx =
 989			apply_user_offset(user, user->za_offset);
 990		err |= preserve_za_context(za_ctx);
 991	}
 992
 993	/* ZT state if present */
 994	if (system_supports_sme2() && err == 0 && user->zt_offset) {
 995		struct zt_context __user *zt_ctx =
 996			apply_user_offset(user, user->zt_offset);
 997		err |= preserve_zt_context(zt_ctx);
 998	}
 999
1000	if (err == 0 && user->extra_offset) {
1001		char __user *sfp = (char __user *)user->sigframe;
1002		char __user *userp =
1003			apply_user_offset(user, user->extra_offset);
1004
1005		struct extra_context __user *extra;
1006		struct _aarch64_ctx __user *end;
1007		u64 extra_datap;
1008		u32 extra_size;
1009
1010		extra = (struct extra_context __user *)userp;
1011		userp += EXTRA_CONTEXT_SIZE;
1012
1013		end = (struct _aarch64_ctx __user *)userp;
1014		userp += TERMINATOR_SIZE;
1015
1016		/*
1017		 * extra_datap is just written to the signal frame.
1018		 * The value gets cast back to a void __user *
1019		 * during sigreturn.
1020		 */
1021		extra_datap = (__force u64)userp;
1022		extra_size = sfp + round_up(user->size, 16) - userp;
1023
1024		__put_user_error(EXTRA_MAGIC, &extra->head.magic, err);
1025		__put_user_error(EXTRA_CONTEXT_SIZE, &extra->head.size, err);
1026		__put_user_error(extra_datap, &extra->datap, err);
1027		__put_user_error(extra_size, &extra->size, err);
1028
1029		/* Add the terminator */
1030		__put_user_error(0, &end->magic, err);
1031		__put_user_error(0, &end->size, err);
1032	}
1033
1034	/* set the "end" magic */
1035	if (err == 0) {
1036		struct _aarch64_ctx __user *end =
1037			apply_user_offset(user, user->end_offset);
1038
1039		__put_user_error(0, &end->magic, err);
1040		__put_user_error(0, &end->size, err);
1041	}
1042
1043	return err;
1044}
1045
1046static int get_sigframe(struct rt_sigframe_user_layout *user,
1047			 struct ksignal *ksig, struct pt_regs *regs)
1048{
1049	unsigned long sp, sp_top;
1050	int err;
1051
1052	init_user_layout(user);
1053	err = setup_sigframe_layout(user, false);
1054	if (err)
1055		return err;
1056
1057	sp = sp_top = sigsp(regs->sp, ksig);
1058
1059	sp = round_down(sp - sizeof(struct frame_record), 16);
1060	user->next_frame = (struct frame_record __user *)sp;
1061
1062	sp = round_down(sp, 16) - sigframe_size(user);
1063	user->sigframe = (struct rt_sigframe __user *)sp;
1064
1065	/*
1066	 * Check that we can actually write to the signal frame.
1067	 */
1068	if (!access_ok(user->sigframe, sp_top - sp))
1069		return -EFAULT;
1070
1071	return 0;
1072}
1073
1074static void setup_return(struct pt_regs *regs, struct k_sigaction *ka,
1075			 struct rt_sigframe_user_layout *user, int usig)
1076{
1077	__sigrestore_t sigtramp;
1078
1079	regs->regs[0] = usig;
1080	regs->sp = (unsigned long)user->sigframe;
1081	regs->regs[29] = (unsigned long)&user->next_frame->fp;
1082	regs->pc = (unsigned long)ka->sa.sa_handler;
1083
1084	/*
1085	 * Signal delivery is a (wacky) indirect function call in
1086	 * userspace, so simulate the same setting of BTYPE as a BLR
1087	 * <register containing the signal handler entry point>.
1088	 * Signal delivery to a location in a PROT_BTI guarded page
1089	 * that is not a function entry point will now trigger a
1090	 * SIGILL in userspace.
1091	 *
1092	 * If the signal handler entry point is not in a PROT_BTI
1093	 * guarded page, this is harmless.
1094	 */
1095	if (system_supports_bti()) {
1096		regs->pstate &= ~PSR_BTYPE_MASK;
1097		regs->pstate |= PSR_BTYPE_C;
1098	}
1099
1100	/* TCO (Tag Check Override) always cleared for signal handlers */
1101	regs->pstate &= ~PSR_TCO_BIT;
1102
1103	/* Signal handlers are invoked with ZA and streaming mode disabled */
1104	if (system_supports_sme()) {
1105		/*
1106		 * If we were in streaming mode the saved register
1107		 * state was SVE but we will exit SM and use the
1108		 * FPSIMD register state - flush the saved FPSIMD
1109		 * register state in case it gets loaded.
1110		 */
1111		if (current->thread.svcr & SVCR_SM_MASK) {
1112			memset(&current->thread.uw.fpsimd_state, 0,
1113			       sizeof(current->thread.uw.fpsimd_state));
1114			current->thread.fp_type = FP_STATE_FPSIMD;
1115		}
1116
1117		current->thread.svcr &= ~(SVCR_ZA_MASK |
1118					  SVCR_SM_MASK);
1119		sme_smstop();
1120	}
1121
1122	if (ka->sa.sa_flags & SA_RESTORER)
1123		sigtramp = ka->sa.sa_restorer;
1124	else
1125		sigtramp = VDSO_SYMBOL(current->mm->context.vdso, sigtramp);
1126
1127	regs->regs[30] = (unsigned long)sigtramp;
1128}
1129
1130static int setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set,
1131			  struct pt_regs *regs)
1132{
1133	struct rt_sigframe_user_layout user;
1134	struct rt_sigframe __user *frame;
1135	int err = 0;
1136
1137	fpsimd_signal_preserve_current_state();
1138
1139	if (get_sigframe(&user, ksig, regs))
1140		return 1;
1141
1142	frame = user.sigframe;
1143
1144	__put_user_error(0, &frame->uc.uc_flags, err);
1145	__put_user_error(NULL, &frame->uc.uc_link, err);
1146
1147	err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
1148	err |= setup_sigframe(&user, regs, set);
1149	if (err == 0) {
1150		setup_return(regs, &ksig->ka, &user, usig);
1151		if (ksig->ka.sa.sa_flags & SA_SIGINFO) {
1152			err |= copy_siginfo_to_user(&frame->info, &ksig->info);
1153			regs->regs[1] = (unsigned long)&frame->info;
1154			regs->regs[2] = (unsigned long)&frame->uc;
1155		}
1156	}
1157
1158	return err;
1159}
1160
1161static void setup_restart_syscall(struct pt_regs *regs)
1162{
1163	if (is_compat_task())
1164		compat_setup_restart_syscall(regs);
1165	else
1166		regs->regs[8] = __NR_restart_syscall;
1167}
1168
1169/*
1170 * OK, we're invoking a handler
1171 */
1172static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
1173{
 
1174	sigset_t *oldset = sigmask_to_save();
1175	int usig = ksig->sig;
1176	int ret;
1177
1178	rseq_signal_deliver(ksig, regs);
1179
1180	/*
1181	 * Set up the stack frame
1182	 */
1183	if (is_compat_task()) {
1184		if (ksig->ka.sa.sa_flags & SA_SIGINFO)
1185			ret = compat_setup_rt_frame(usig, ksig, oldset, regs);
1186		else
1187			ret = compat_setup_frame(usig, ksig, oldset, regs);
1188	} else {
1189		ret = setup_rt_frame(usig, ksig, oldset, regs);
1190	}
1191
1192	/*
1193	 * Check that the resulting registers are actually sane.
1194	 */
1195	ret |= !valid_user_regs(&regs->user_regs, current);
1196
1197	/* Step into the signal handler if we are stepping */
1198	signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLESTEP));
 
 
 
 
 
 
1199}
1200
1201/*
1202 * Note that 'init' is a special process: it doesn't get signals it doesn't
1203 * want to handle. Thus you cannot kill init even with a SIGKILL even by
1204 * mistake.
1205 *
1206 * Note that we go through the signals twice: once to check the signals that
1207 * the kernel can handle, and then we build all the user-level signal handling
1208 * stack-frames in one go after that.
1209 */
1210static void do_signal(struct pt_regs *regs)
1211{
1212	unsigned long continue_addr = 0, restart_addr = 0;
1213	int retval = 0;
1214	struct ksignal ksig;
1215	bool syscall = in_syscall(regs);
1216
1217	/*
1218	 * If we were from a system call, check for system call restarting...
1219	 */
1220	if (syscall) {
1221		continue_addr = regs->pc;
1222		restart_addr = continue_addr - (compat_thumb_mode(regs) ? 2 : 4);
1223		retval = regs->regs[0];
1224
1225		/*
1226		 * Avoid additional syscall restarting via ret_to_user.
1227		 */
1228		forget_syscall(regs);
1229
1230		/*
1231		 * Prepare for system call restart. We do this here so that a
1232		 * debugger will see the already changed PC.
1233		 */
1234		switch (retval) {
1235		case -ERESTARTNOHAND:
1236		case -ERESTARTSYS:
1237		case -ERESTARTNOINTR:
1238		case -ERESTART_RESTARTBLOCK:
1239			regs->regs[0] = regs->orig_x0;
1240			regs->pc = restart_addr;
1241			break;
1242		}
1243	}
1244
1245	/*
1246	 * Get the signal to deliver. When running under ptrace, at this point
1247	 * the debugger may change all of our registers.
1248	 */
1249	if (get_signal(&ksig)) {
1250		/*
1251		 * Depending on the signal settings, we may need to revert the
1252		 * decision to restart the system call, but skip this if a
1253		 * debugger has chosen to restart at a different PC.
1254		 */
1255		if (regs->pc == restart_addr &&
1256		    (retval == -ERESTARTNOHAND ||
1257		     retval == -ERESTART_RESTARTBLOCK ||
1258		     (retval == -ERESTARTSYS &&
1259		      !(ksig.ka.sa.sa_flags & SA_RESTART)))) {
1260			syscall_set_return_value(current, regs, -EINTR, 0);
1261			regs->pc = continue_addr;
1262		}
1263
1264		handle_signal(&ksig, regs);
1265		return;
1266	}
1267
1268	/*
1269	 * Handle restarting a different system call. As above, if a debugger
1270	 * has chosen to restart at a different PC, ignore the restart.
1271	 */
1272	if (syscall && regs->pc == restart_addr) {
1273		if (retval == -ERESTART_RESTARTBLOCK)
1274			setup_restart_syscall(regs);
1275		user_rewind_single_step(current);
1276	}
1277
1278	restore_saved_sigmask();
1279}
1280
1281void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags)
 
1282{
 
 
 
 
 
 
 
1283	do {
 
 
 
1284		if (thread_flags & _TIF_NEED_RESCHED) {
1285			/* Unmask Debug and SError for the next task */
1286			local_daif_restore(DAIF_PROCCTX_NOIRQ);
1287
1288			schedule();
1289		} else {
1290			local_daif_restore(DAIF_PROCCTX);
1291
1292			if (thread_flags & _TIF_UPROBE)
1293				uprobe_notify_resume(regs);
1294
1295			if (thread_flags & _TIF_MTE_ASYNC_FAULT) {
1296				clear_thread_flag(TIF_MTE_ASYNC_FAULT);
1297				send_sig_fault(SIGSEGV, SEGV_MTEAERR,
1298					       (void __user *)NULL, current);
1299			}
1300
1301			if (thread_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
1302				do_signal(regs);
1303
1304			if (thread_flags & _TIF_NOTIFY_RESUME)
1305				resume_user_mode_work(regs);
 
 
1306
1307			if (thread_flags & _TIF_FOREIGN_FPSTATE)
1308				fpsimd_restore_current_state();
1309		}
1310
1311		local_daif_mask();
1312		thread_flags = read_thread_flags();
1313	} while (thread_flags & _TIF_WORK_MASK);
1314}
1315
1316unsigned long __ro_after_init signal_minsigstksz;
1317
1318/*
1319 * Determine the stack space required for guaranteed signal devliery.
1320 * This function is used to populate AT_MINSIGSTKSZ at process startup.
1321 * cpufeatures setup is assumed to be complete.
1322 */
1323void __init minsigstksz_setup(void)
1324{
1325	struct rt_sigframe_user_layout user;
1326
1327	init_user_layout(&user);
1328
1329	/*
1330	 * If this fails, SIGFRAME_MAXSZ needs to be enlarged.  It won't
1331	 * be big enough, but it's our best guess:
1332	 */
1333	if (WARN_ON(setup_sigframe_layout(&user, true)))
1334		return;
1335
1336	signal_minsigstksz = sigframe_size(&user) +
1337		round_up(sizeof(struct frame_record), 16) +
1338		16; /* max alignment padding */
1339}
1340
1341/*
1342 * Compile-time assertions for siginfo_t offsets. Check NSIG* as well, as
1343 * changes likely come with new fields that should be added below.
1344 */
1345static_assert(NSIGILL	== 11);
1346static_assert(NSIGFPE	== 15);
1347static_assert(NSIGSEGV	== 10);
1348static_assert(NSIGBUS	== 5);
1349static_assert(NSIGTRAP	== 6);
1350static_assert(NSIGCHLD	== 6);
1351static_assert(NSIGSYS	== 2);
1352static_assert(sizeof(siginfo_t) == 128);
1353static_assert(__alignof__(siginfo_t) == 8);
1354static_assert(offsetof(siginfo_t, si_signo)	== 0x00);
1355static_assert(offsetof(siginfo_t, si_errno)	== 0x04);
1356static_assert(offsetof(siginfo_t, si_code)	== 0x08);
1357static_assert(offsetof(siginfo_t, si_pid)	== 0x10);
1358static_assert(offsetof(siginfo_t, si_uid)	== 0x14);
1359static_assert(offsetof(siginfo_t, si_tid)	== 0x10);
1360static_assert(offsetof(siginfo_t, si_overrun)	== 0x14);
1361static_assert(offsetof(siginfo_t, si_status)	== 0x18);
1362static_assert(offsetof(siginfo_t, si_utime)	== 0x20);
1363static_assert(offsetof(siginfo_t, si_stime)	== 0x28);
1364static_assert(offsetof(siginfo_t, si_value)	== 0x18);
1365static_assert(offsetof(siginfo_t, si_int)	== 0x18);
1366static_assert(offsetof(siginfo_t, si_ptr)	== 0x18);
1367static_assert(offsetof(siginfo_t, si_addr)	== 0x10);
1368static_assert(offsetof(siginfo_t, si_addr_lsb)	== 0x18);
1369static_assert(offsetof(siginfo_t, si_lower)	== 0x20);
1370static_assert(offsetof(siginfo_t, si_upper)	== 0x28);
1371static_assert(offsetof(siginfo_t, si_pkey)	== 0x20);
1372static_assert(offsetof(siginfo_t, si_perf_data)	== 0x18);
1373static_assert(offsetof(siginfo_t, si_perf_type)	== 0x20);
1374static_assert(offsetof(siginfo_t, si_perf_flags) == 0x24);
1375static_assert(offsetof(siginfo_t, si_band)	== 0x10);
1376static_assert(offsetof(siginfo_t, si_fd)	== 0x18);
1377static_assert(offsetof(siginfo_t, si_call_addr)	== 0x10);
1378static_assert(offsetof(siginfo_t, si_syscall)	== 0x18);
1379static_assert(offsetof(siginfo_t, si_arch)	== 0x1c);