Loading...
1/*
2 * Based on arch/arm/kernel/signal.c
3 *
4 * Copyright (C) 1995-2009 Russell King
5 * Copyright (C) 2012 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/compat.h>
21#include <linux/errno.h>
22#include <linux/signal.h>
23#include <linux/personality.h>
24#include <linux/freezer.h>
25#include <linux/uaccess.h>
26#include <linux/tracehook.h>
27#include <linux/ratelimit.h>
28
29#include <asm/debug-monitors.h>
30#include <asm/elf.h>
31#include <asm/cacheflush.h>
32#include <asm/ucontext.h>
33#include <asm/unistd.h>
34#include <asm/fpsimd.h>
35#include <asm/signal32.h>
36#include <asm/vdso.h>
37
38/*
39 * Do a signal return; undo the signal stack. These are aligned to 128-bit.
40 */
41struct rt_sigframe {
42 struct siginfo info;
43 struct ucontext uc;
44 u64 fp;
45 u64 lr;
46};
47
48static int preserve_fpsimd_context(struct fpsimd_context __user *ctx)
49{
50 struct fpsimd_state *fpsimd = ¤t->thread.fpsimd_state;
51 int err;
52
53 /* dump the hardware registers to the fpsimd_state structure */
54 fpsimd_preserve_current_state();
55
56 /* copy the FP and status/control registers */
57 err = __copy_to_user(ctx->vregs, fpsimd->vregs, sizeof(fpsimd->vregs));
58 __put_user_error(fpsimd->fpsr, &ctx->fpsr, err);
59 __put_user_error(fpsimd->fpcr, &ctx->fpcr, err);
60
61 /* copy the magic/size information */
62 __put_user_error(FPSIMD_MAGIC, &ctx->head.magic, err);
63 __put_user_error(sizeof(struct fpsimd_context), &ctx->head.size, err);
64
65 return err ? -EFAULT : 0;
66}
67
68static int restore_fpsimd_context(struct fpsimd_context __user *ctx)
69{
70 struct fpsimd_state fpsimd;
71 __u32 magic, size;
72 int err = 0;
73
74 /* check the magic/size information */
75 __get_user_error(magic, &ctx->head.magic, err);
76 __get_user_error(size, &ctx->head.size, err);
77 if (err)
78 return -EFAULT;
79 if (magic != FPSIMD_MAGIC || size != sizeof(struct fpsimd_context))
80 return -EINVAL;
81
82 /* copy the FP and status/control registers */
83 err = __copy_from_user(fpsimd.vregs, ctx->vregs,
84 sizeof(fpsimd.vregs));
85 __get_user_error(fpsimd.fpsr, &ctx->fpsr, err);
86 __get_user_error(fpsimd.fpcr, &ctx->fpcr, err);
87
88 /* load the hardware registers from the fpsimd_state structure */
89 if (!err)
90 fpsimd_update_current_state(&fpsimd);
91
92 return err ? -EFAULT : 0;
93}
94
95static int restore_sigframe(struct pt_regs *regs,
96 struct rt_sigframe __user *sf)
97{
98 sigset_t set;
99 int i, err;
100 void *aux = sf->uc.uc_mcontext.__reserved;
101
102 err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
103 if (err == 0)
104 set_current_blocked(&set);
105
106 for (i = 0; i < 31; i++)
107 __get_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i],
108 err);
109 __get_user_error(regs->sp, &sf->uc.uc_mcontext.sp, err);
110 __get_user_error(regs->pc, &sf->uc.uc_mcontext.pc, err);
111 __get_user_error(regs->pstate, &sf->uc.uc_mcontext.pstate, err);
112
113 /*
114 * Avoid sys_rt_sigreturn() restarting.
115 */
116 regs->syscallno = ~0UL;
117
118 err |= !valid_user_regs(®s->user_regs, current);
119
120 if (err == 0) {
121 struct fpsimd_context *fpsimd_ctx =
122 container_of(aux, struct fpsimd_context, head);
123 err |= restore_fpsimd_context(fpsimd_ctx);
124 }
125
126 return err;
127}
128
129asmlinkage long sys_rt_sigreturn(struct pt_regs *regs)
130{
131 struct rt_sigframe __user *frame;
132
133 /* Always make any pending restarted system calls return -EINTR */
134 current->restart_block.fn = do_no_restart_syscall;
135
136 /*
137 * Since we stacked the signal on a 128-bit boundary, then 'sp' should
138 * be word aligned here.
139 */
140 if (regs->sp & 15)
141 goto badframe;
142
143 frame = (struct rt_sigframe __user *)regs->sp;
144
145 if (!access_ok(VERIFY_READ, frame, sizeof (*frame)))
146 goto badframe;
147
148 if (restore_sigframe(regs, frame))
149 goto badframe;
150
151 if (restore_altstack(&frame->uc.uc_stack))
152 goto badframe;
153
154 return regs->regs[0];
155
156badframe:
157 if (show_unhandled_signals)
158 pr_info_ratelimited("%s[%d]: bad frame in %s: pc=%08llx sp=%08llx\n",
159 current->comm, task_pid_nr(current), __func__,
160 regs->pc, regs->sp);
161 force_sig(SIGSEGV, current);
162 return 0;
163}
164
165static int setup_sigframe(struct rt_sigframe __user *sf,
166 struct pt_regs *regs, sigset_t *set)
167{
168 int i, err = 0;
169 void *aux = sf->uc.uc_mcontext.__reserved;
170 struct _aarch64_ctx *end;
171
172 /* set up the stack frame for unwinding */
173 __put_user_error(regs->regs[29], &sf->fp, err);
174 __put_user_error(regs->regs[30], &sf->lr, err);
175
176 for (i = 0; i < 31; i++)
177 __put_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i],
178 err);
179 __put_user_error(regs->sp, &sf->uc.uc_mcontext.sp, err);
180 __put_user_error(regs->pc, &sf->uc.uc_mcontext.pc, err);
181 __put_user_error(regs->pstate, &sf->uc.uc_mcontext.pstate, err);
182
183 __put_user_error(current->thread.fault_address, &sf->uc.uc_mcontext.fault_address, err);
184
185 err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set));
186
187 if (err == 0) {
188 struct fpsimd_context *fpsimd_ctx =
189 container_of(aux, struct fpsimd_context, head);
190 err |= preserve_fpsimd_context(fpsimd_ctx);
191 aux += sizeof(*fpsimd_ctx);
192 }
193
194 /* fault information, if valid */
195 if (current->thread.fault_code) {
196 struct esr_context *esr_ctx =
197 container_of(aux, struct esr_context, head);
198 __put_user_error(ESR_MAGIC, &esr_ctx->head.magic, err);
199 __put_user_error(sizeof(*esr_ctx), &esr_ctx->head.size, err);
200 __put_user_error(current->thread.fault_code, &esr_ctx->esr, err);
201 aux += sizeof(*esr_ctx);
202 }
203
204 /* set the "end" magic */
205 end = aux;
206 __put_user_error(0, &end->magic, err);
207 __put_user_error(0, &end->size, err);
208
209 return err;
210}
211
212static struct rt_sigframe __user *get_sigframe(struct ksignal *ksig,
213 struct pt_regs *regs)
214{
215 unsigned long sp, sp_top;
216 struct rt_sigframe __user *frame;
217
218 sp = sp_top = sigsp(regs->sp, ksig);
219
220 sp = (sp - sizeof(struct rt_sigframe)) & ~15;
221 frame = (struct rt_sigframe __user *)sp;
222
223 /*
224 * Check that we can actually write to the signal frame.
225 */
226 if (!access_ok(VERIFY_WRITE, frame, sp_top - sp))
227 frame = NULL;
228
229 return frame;
230}
231
232static void setup_return(struct pt_regs *regs, struct k_sigaction *ka,
233 void __user *frame, int usig)
234{
235 __sigrestore_t sigtramp;
236
237 regs->regs[0] = usig;
238 regs->sp = (unsigned long)frame;
239 regs->regs[29] = regs->sp + offsetof(struct rt_sigframe, fp);
240 regs->pc = (unsigned long)ka->sa.sa_handler;
241
242 if (ka->sa.sa_flags & SA_RESTORER)
243 sigtramp = ka->sa.sa_restorer;
244 else
245 sigtramp = VDSO_SYMBOL(current->mm->context.vdso, sigtramp);
246
247 regs->regs[30] = (unsigned long)sigtramp;
248}
249
250static int setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set,
251 struct pt_regs *regs)
252{
253 struct rt_sigframe __user *frame;
254 int err = 0;
255
256 frame = get_sigframe(ksig, regs);
257 if (!frame)
258 return 1;
259
260 __put_user_error(0, &frame->uc.uc_flags, err);
261 __put_user_error(NULL, &frame->uc.uc_link, err);
262
263 err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
264 err |= setup_sigframe(frame, regs, set);
265 if (err == 0) {
266 setup_return(regs, &ksig->ka, frame, usig);
267 if (ksig->ka.sa.sa_flags & SA_SIGINFO) {
268 err |= copy_siginfo_to_user(&frame->info, &ksig->info);
269 regs->regs[1] = (unsigned long)&frame->info;
270 regs->regs[2] = (unsigned long)&frame->uc;
271 }
272 }
273
274 return err;
275}
276
277static void setup_restart_syscall(struct pt_regs *regs)
278{
279 if (is_compat_task())
280 compat_setup_restart_syscall(regs);
281 else
282 regs->regs[8] = __NR_restart_syscall;
283}
284
285/*
286 * OK, we're invoking a handler
287 */
288static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
289{
290 struct task_struct *tsk = current;
291 sigset_t *oldset = sigmask_to_save();
292 int usig = ksig->sig;
293 int ret;
294
295 /*
296 * Set up the stack frame
297 */
298 if (is_compat_task()) {
299 if (ksig->ka.sa.sa_flags & SA_SIGINFO)
300 ret = compat_setup_rt_frame(usig, ksig, oldset, regs);
301 else
302 ret = compat_setup_frame(usig, ksig, oldset, regs);
303 } else {
304 ret = setup_rt_frame(usig, ksig, oldset, regs);
305 }
306
307 /*
308 * Check that the resulting registers are actually sane.
309 */
310 ret |= !valid_user_regs(®s->user_regs, current);
311
312 /*
313 * Fast forward the stepping logic so we step into the signal
314 * handler.
315 */
316 if (!ret)
317 user_fastforward_single_step(tsk);
318
319 signal_setup_done(ret, ksig, 0);
320}
321
322/*
323 * Note that 'init' is a special process: it doesn't get signals it doesn't
324 * want to handle. Thus you cannot kill init even with a SIGKILL even by
325 * mistake.
326 *
327 * Note that we go through the signals twice: once to check the signals that
328 * the kernel can handle, and then we build all the user-level signal handling
329 * stack-frames in one go after that.
330 */
331static void do_signal(struct pt_regs *regs)
332{
333 unsigned long continue_addr = 0, restart_addr = 0;
334 int retval = 0;
335 int syscall = (int)regs->syscallno;
336 struct ksignal ksig;
337
338 /*
339 * If we were from a system call, check for system call restarting...
340 */
341 if (syscall >= 0) {
342 continue_addr = regs->pc;
343 restart_addr = continue_addr - (compat_thumb_mode(regs) ? 2 : 4);
344 retval = regs->regs[0];
345
346 /*
347 * Avoid additional syscall restarting via ret_to_user.
348 */
349 regs->syscallno = ~0UL;
350
351 /*
352 * Prepare for system call restart. We do this here so that a
353 * debugger will see the already changed PC.
354 */
355 switch (retval) {
356 case -ERESTARTNOHAND:
357 case -ERESTARTSYS:
358 case -ERESTARTNOINTR:
359 case -ERESTART_RESTARTBLOCK:
360 regs->regs[0] = regs->orig_x0;
361 regs->pc = restart_addr;
362 break;
363 }
364 }
365
366 /*
367 * Get the signal to deliver. When running under ptrace, at this point
368 * the debugger may change all of our registers.
369 */
370 if (get_signal(&ksig)) {
371 /*
372 * Depending on the signal settings, we may need to revert the
373 * decision to restart the system call, but skip this if a
374 * debugger has chosen to restart at a different PC.
375 */
376 if (regs->pc == restart_addr &&
377 (retval == -ERESTARTNOHAND ||
378 retval == -ERESTART_RESTARTBLOCK ||
379 (retval == -ERESTARTSYS &&
380 !(ksig.ka.sa.sa_flags & SA_RESTART)))) {
381 regs->regs[0] = -EINTR;
382 regs->pc = continue_addr;
383 }
384
385 handle_signal(&ksig, regs);
386 return;
387 }
388
389 /*
390 * Handle restarting a different system call. As above, if a debugger
391 * has chosen to restart at a different PC, ignore the restart.
392 */
393 if (syscall >= 0 && regs->pc == restart_addr) {
394 if (retval == -ERESTART_RESTARTBLOCK)
395 setup_restart_syscall(regs);
396 user_rewind_single_step(current);
397 }
398
399 restore_saved_sigmask();
400}
401
402asmlinkage void do_notify_resume(struct pt_regs *regs,
403 unsigned int thread_flags)
404{
405 if (thread_flags & _TIF_SIGPENDING)
406 do_signal(regs);
407
408 if (thread_flags & _TIF_NOTIFY_RESUME) {
409 clear_thread_flag(TIF_NOTIFY_RESUME);
410 tracehook_notify_resume(regs);
411 }
412
413 if (thread_flags & _TIF_FOREIGN_FPSTATE)
414 fpsimd_restore_current_state();
415
416}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Based on arch/arm/kernel/signal.c
4 *
5 * Copyright (C) 1995-2009 Russell King
6 * Copyright (C) 2012 ARM Ltd.
7 */
8
9#include <linux/cache.h>
10#include <linux/compat.h>
11#include <linux/errno.h>
12#include <linux/kernel.h>
13#include <linux/signal.h>
14#include <linux/freezer.h>
15#include <linux/stddef.h>
16#include <linux/uaccess.h>
17#include <linux/sizes.h>
18#include <linux/string.h>
19#include <linux/resume_user_mode.h>
20#include <linux/ratelimit.h>
21#include <linux/syscalls.h>
22
23#include <asm/daifflags.h>
24#include <asm/debug-monitors.h>
25#include <asm/elf.h>
26#include <asm/cacheflush.h>
27#include <asm/ucontext.h>
28#include <asm/unistd.h>
29#include <asm/fpsimd.h>
30#include <asm/ptrace.h>
31#include <asm/syscall.h>
32#include <asm/signal32.h>
33#include <asm/traps.h>
34#include <asm/vdso.h>
35
36/*
37 * Do a signal return; undo the signal stack. These are aligned to 128-bit.
38 */
39struct rt_sigframe {
40 struct siginfo info;
41 struct ucontext uc;
42};
43
44struct frame_record {
45 u64 fp;
46 u64 lr;
47};
48
49struct rt_sigframe_user_layout {
50 struct rt_sigframe __user *sigframe;
51 struct frame_record __user *next_frame;
52
53 unsigned long size; /* size of allocated sigframe data */
54 unsigned long limit; /* largest allowed size */
55
56 unsigned long fpsimd_offset;
57 unsigned long esr_offset;
58 unsigned long sve_offset;
59 unsigned long za_offset;
60 unsigned long extra_offset;
61 unsigned long end_offset;
62};
63
64#define BASE_SIGFRAME_SIZE round_up(sizeof(struct rt_sigframe), 16)
65#define TERMINATOR_SIZE round_up(sizeof(struct _aarch64_ctx), 16)
66#define EXTRA_CONTEXT_SIZE round_up(sizeof(struct extra_context), 16)
67
68static void init_user_layout(struct rt_sigframe_user_layout *user)
69{
70 const size_t reserved_size =
71 sizeof(user->sigframe->uc.uc_mcontext.__reserved);
72
73 memset(user, 0, sizeof(*user));
74 user->size = offsetof(struct rt_sigframe, uc.uc_mcontext.__reserved);
75
76 user->limit = user->size + reserved_size;
77
78 user->limit -= TERMINATOR_SIZE;
79 user->limit -= EXTRA_CONTEXT_SIZE;
80 /* Reserve space for extension and terminator ^ */
81}
82
83static size_t sigframe_size(struct rt_sigframe_user_layout const *user)
84{
85 return round_up(max(user->size, sizeof(struct rt_sigframe)), 16);
86}
87
88/*
89 * Sanity limit on the approximate maximum size of signal frame we'll
90 * try to generate. Stack alignment padding and the frame record are
91 * not taken into account. This limit is not a guarantee and is
92 * NOT ABI.
93 */
94#define SIGFRAME_MAXSZ SZ_256K
95
96static int __sigframe_alloc(struct rt_sigframe_user_layout *user,
97 unsigned long *offset, size_t size, bool extend)
98{
99 size_t padded_size = round_up(size, 16);
100
101 if (padded_size > user->limit - user->size &&
102 !user->extra_offset &&
103 extend) {
104 int ret;
105
106 user->limit += EXTRA_CONTEXT_SIZE;
107 ret = __sigframe_alloc(user, &user->extra_offset,
108 sizeof(struct extra_context), false);
109 if (ret) {
110 user->limit -= EXTRA_CONTEXT_SIZE;
111 return ret;
112 }
113
114 /* Reserve space for the __reserved[] terminator */
115 user->size += TERMINATOR_SIZE;
116
117 /*
118 * Allow expansion up to SIGFRAME_MAXSZ, ensuring space for
119 * the terminator:
120 */
121 user->limit = SIGFRAME_MAXSZ - TERMINATOR_SIZE;
122 }
123
124 /* Still not enough space? Bad luck! */
125 if (padded_size > user->limit - user->size)
126 return -ENOMEM;
127
128 *offset = user->size;
129 user->size += padded_size;
130
131 return 0;
132}
133
134/*
135 * Allocate space for an optional record of <size> bytes in the user
136 * signal frame. The offset from the signal frame base address to the
137 * allocated block is assigned to *offset.
138 */
139static int sigframe_alloc(struct rt_sigframe_user_layout *user,
140 unsigned long *offset, size_t size)
141{
142 return __sigframe_alloc(user, offset, size, true);
143}
144
145/* Allocate the null terminator record and prevent further allocations */
146static int sigframe_alloc_end(struct rt_sigframe_user_layout *user)
147{
148 int ret;
149
150 /* Un-reserve the space reserved for the terminator: */
151 user->limit += TERMINATOR_SIZE;
152
153 ret = sigframe_alloc(user, &user->end_offset,
154 sizeof(struct _aarch64_ctx));
155 if (ret)
156 return ret;
157
158 /* Prevent further allocation: */
159 user->limit = user->size;
160 return 0;
161}
162
163static void __user *apply_user_offset(
164 struct rt_sigframe_user_layout const *user, unsigned long offset)
165{
166 char __user *base = (char __user *)user->sigframe;
167
168 return base + offset;
169}
170
171static int preserve_fpsimd_context(struct fpsimd_context __user *ctx)
172{
173 struct user_fpsimd_state const *fpsimd =
174 ¤t->thread.uw.fpsimd_state;
175 int err;
176
177 /* copy the FP and status/control registers */
178 err = __copy_to_user(ctx->vregs, fpsimd->vregs, sizeof(fpsimd->vregs));
179 __put_user_error(fpsimd->fpsr, &ctx->fpsr, err);
180 __put_user_error(fpsimd->fpcr, &ctx->fpcr, err);
181
182 /* copy the magic/size information */
183 __put_user_error(FPSIMD_MAGIC, &ctx->head.magic, err);
184 __put_user_error(sizeof(struct fpsimd_context), &ctx->head.size, err);
185
186 return err ? -EFAULT : 0;
187}
188
189static int restore_fpsimd_context(struct fpsimd_context __user *ctx)
190{
191 struct user_fpsimd_state fpsimd;
192 __u32 magic, size;
193 int err = 0;
194
195 /* check the magic/size information */
196 __get_user_error(magic, &ctx->head.magic, err);
197 __get_user_error(size, &ctx->head.size, err);
198 if (err)
199 return -EFAULT;
200 if (magic != FPSIMD_MAGIC || size != sizeof(struct fpsimd_context))
201 return -EINVAL;
202
203 /* copy the FP and status/control registers */
204 err = __copy_from_user(fpsimd.vregs, ctx->vregs,
205 sizeof(fpsimd.vregs));
206 __get_user_error(fpsimd.fpsr, &ctx->fpsr, err);
207 __get_user_error(fpsimd.fpcr, &ctx->fpcr, err);
208
209 clear_thread_flag(TIF_SVE);
210 current->thread.fp_type = FP_STATE_FPSIMD;
211
212 /* load the hardware registers from the fpsimd_state structure */
213 if (!err)
214 fpsimd_update_current_state(&fpsimd);
215
216 return err ? -EFAULT : 0;
217}
218
219
220struct user_ctxs {
221 struct fpsimd_context __user *fpsimd;
222 struct sve_context __user *sve;
223 struct za_context __user *za;
224};
225
226#ifdef CONFIG_ARM64_SVE
227
228static int preserve_sve_context(struct sve_context __user *ctx)
229{
230 int err = 0;
231 u16 reserved[ARRAY_SIZE(ctx->__reserved)];
232 u16 flags = 0;
233 unsigned int vl = task_get_sve_vl(current);
234 unsigned int vq = 0;
235
236 if (thread_sm_enabled(¤t->thread)) {
237 vl = task_get_sme_vl(current);
238 vq = sve_vq_from_vl(vl);
239 flags |= SVE_SIG_FLAG_SM;
240 } else if (test_thread_flag(TIF_SVE)) {
241 vq = sve_vq_from_vl(vl);
242 }
243
244 memset(reserved, 0, sizeof(reserved));
245
246 __put_user_error(SVE_MAGIC, &ctx->head.magic, err);
247 __put_user_error(round_up(SVE_SIG_CONTEXT_SIZE(vq), 16),
248 &ctx->head.size, err);
249 __put_user_error(vl, &ctx->vl, err);
250 __put_user_error(flags, &ctx->flags, err);
251 BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved));
252 err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved));
253
254 if (vq) {
255 /*
256 * This assumes that the SVE state has already been saved to
257 * the task struct by calling the function
258 * fpsimd_signal_preserve_current_state().
259 */
260 err |= __copy_to_user((char __user *)ctx + SVE_SIG_REGS_OFFSET,
261 current->thread.sve_state,
262 SVE_SIG_REGS_SIZE(vq));
263 }
264
265 return err ? -EFAULT : 0;
266}
267
268static int restore_sve_fpsimd_context(struct user_ctxs *user)
269{
270 int err;
271 unsigned int vl, vq;
272 struct user_fpsimd_state fpsimd;
273 struct sve_context sve;
274
275 if (__copy_from_user(&sve, user->sve, sizeof(sve)))
276 return -EFAULT;
277
278 if (sve.flags & SVE_SIG_FLAG_SM) {
279 if (!system_supports_sme())
280 return -EINVAL;
281
282 vl = task_get_sme_vl(current);
283 } else {
284 /*
285 * A SME only system use SVE for streaming mode so can
286 * have a SVE formatted context with a zero VL and no
287 * payload data.
288 */
289 if (!system_supports_sve() && !system_supports_sme())
290 return -EINVAL;
291
292 vl = task_get_sve_vl(current);
293 }
294
295 if (sve.vl != vl)
296 return -EINVAL;
297
298 if (sve.head.size <= sizeof(*user->sve)) {
299 clear_thread_flag(TIF_SVE);
300 current->thread.svcr &= ~SVCR_SM_MASK;
301 current->thread.fp_type = FP_STATE_FPSIMD;
302 goto fpsimd_only;
303 }
304
305 vq = sve_vq_from_vl(sve.vl);
306
307 if (sve.head.size < SVE_SIG_CONTEXT_SIZE(vq))
308 return -EINVAL;
309
310 /*
311 * Careful: we are about __copy_from_user() directly into
312 * thread.sve_state with preemption enabled, so protection is
313 * needed to prevent a racing context switch from writing stale
314 * registers back over the new data.
315 */
316
317 fpsimd_flush_task_state(current);
318 /* From now, fpsimd_thread_switch() won't touch thread.sve_state */
319
320 sve_alloc(current, true);
321 if (!current->thread.sve_state) {
322 clear_thread_flag(TIF_SVE);
323 return -ENOMEM;
324 }
325
326 err = __copy_from_user(current->thread.sve_state,
327 (char __user const *)user->sve +
328 SVE_SIG_REGS_OFFSET,
329 SVE_SIG_REGS_SIZE(vq));
330 if (err)
331 return -EFAULT;
332
333 if (sve.flags & SVE_SIG_FLAG_SM)
334 current->thread.svcr |= SVCR_SM_MASK;
335 else
336 set_thread_flag(TIF_SVE);
337 current->thread.fp_type = FP_STATE_SVE;
338
339fpsimd_only:
340 /* copy the FP and status/control registers */
341 /* restore_sigframe() already checked that user->fpsimd != NULL. */
342 err = __copy_from_user(fpsimd.vregs, user->fpsimd->vregs,
343 sizeof(fpsimd.vregs));
344 __get_user_error(fpsimd.fpsr, &user->fpsimd->fpsr, err);
345 __get_user_error(fpsimd.fpcr, &user->fpsimd->fpcr, err);
346
347 /* load the hardware registers from the fpsimd_state structure */
348 if (!err)
349 fpsimd_update_current_state(&fpsimd);
350
351 return err ? -EFAULT : 0;
352}
353
354#else /* ! CONFIG_ARM64_SVE */
355
356static int restore_sve_fpsimd_context(struct user_ctxs *user)
357{
358 WARN_ON_ONCE(1);
359 return -EINVAL;
360}
361
362/* Turn any non-optimised out attempts to use this into a link error: */
363extern int preserve_sve_context(void __user *ctx);
364
365#endif /* ! CONFIG_ARM64_SVE */
366
367#ifdef CONFIG_ARM64_SME
368
369static int preserve_za_context(struct za_context __user *ctx)
370{
371 int err = 0;
372 u16 reserved[ARRAY_SIZE(ctx->__reserved)];
373 unsigned int vl = task_get_sme_vl(current);
374 unsigned int vq;
375
376 if (thread_za_enabled(¤t->thread))
377 vq = sve_vq_from_vl(vl);
378 else
379 vq = 0;
380
381 memset(reserved, 0, sizeof(reserved));
382
383 __put_user_error(ZA_MAGIC, &ctx->head.magic, err);
384 __put_user_error(round_up(ZA_SIG_CONTEXT_SIZE(vq), 16),
385 &ctx->head.size, err);
386 __put_user_error(vl, &ctx->vl, err);
387 BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved));
388 err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved));
389
390 if (vq) {
391 /*
392 * This assumes that the ZA state has already been saved to
393 * the task struct by calling the function
394 * fpsimd_signal_preserve_current_state().
395 */
396 err |= __copy_to_user((char __user *)ctx + ZA_SIG_REGS_OFFSET,
397 current->thread.za_state,
398 ZA_SIG_REGS_SIZE(vq));
399 }
400
401 return err ? -EFAULT : 0;
402}
403
404static int restore_za_context(struct user_ctxs *user)
405{
406 int err;
407 unsigned int vq;
408 struct za_context za;
409
410 if (__copy_from_user(&za, user->za, sizeof(za)))
411 return -EFAULT;
412
413 if (za.vl != task_get_sme_vl(current))
414 return -EINVAL;
415
416 if (za.head.size <= sizeof(*user->za)) {
417 current->thread.svcr &= ~SVCR_ZA_MASK;
418 return 0;
419 }
420
421 vq = sve_vq_from_vl(za.vl);
422
423 if (za.head.size < ZA_SIG_CONTEXT_SIZE(vq))
424 return -EINVAL;
425
426 /*
427 * Careful: we are about __copy_from_user() directly into
428 * thread.za_state with preemption enabled, so protection is
429 * needed to prevent a racing context switch from writing stale
430 * registers back over the new data.
431 */
432
433 fpsimd_flush_task_state(current);
434 /* From now, fpsimd_thread_switch() won't touch thread.sve_state */
435
436 sme_alloc(current);
437 if (!current->thread.za_state) {
438 current->thread.svcr &= ~SVCR_ZA_MASK;
439 clear_thread_flag(TIF_SME);
440 return -ENOMEM;
441 }
442
443 err = __copy_from_user(current->thread.za_state,
444 (char __user const *)user->za +
445 ZA_SIG_REGS_OFFSET,
446 ZA_SIG_REGS_SIZE(vq));
447 if (err)
448 return -EFAULT;
449
450 set_thread_flag(TIF_SME);
451 current->thread.svcr |= SVCR_ZA_MASK;
452
453 return 0;
454}
455#else /* ! CONFIG_ARM64_SME */
456
457/* Turn any non-optimised out attempts to use these into a link error: */
458extern int preserve_za_context(void __user *ctx);
459extern int restore_za_context(struct user_ctxs *user);
460
461#endif /* ! CONFIG_ARM64_SME */
462
463static int parse_user_sigframe(struct user_ctxs *user,
464 struct rt_sigframe __user *sf)
465{
466 struct sigcontext __user *const sc = &sf->uc.uc_mcontext;
467 struct _aarch64_ctx __user *head;
468 char __user *base = (char __user *)&sc->__reserved;
469 size_t offset = 0;
470 size_t limit = sizeof(sc->__reserved);
471 bool have_extra_context = false;
472 char const __user *const sfp = (char const __user *)sf;
473
474 user->fpsimd = NULL;
475 user->sve = NULL;
476 user->za = NULL;
477
478 if (!IS_ALIGNED((unsigned long)base, 16))
479 goto invalid;
480
481 while (1) {
482 int err = 0;
483 u32 magic, size;
484 char const __user *userp;
485 struct extra_context const __user *extra;
486 u64 extra_datap;
487 u32 extra_size;
488 struct _aarch64_ctx const __user *end;
489 u32 end_magic, end_size;
490
491 if (limit - offset < sizeof(*head))
492 goto invalid;
493
494 if (!IS_ALIGNED(offset, 16))
495 goto invalid;
496
497 head = (struct _aarch64_ctx __user *)(base + offset);
498 __get_user_error(magic, &head->magic, err);
499 __get_user_error(size, &head->size, err);
500 if (err)
501 return err;
502
503 if (limit - offset < size)
504 goto invalid;
505
506 switch (magic) {
507 case 0:
508 if (size)
509 goto invalid;
510
511 goto done;
512
513 case FPSIMD_MAGIC:
514 if (!system_supports_fpsimd())
515 goto invalid;
516 if (user->fpsimd)
517 goto invalid;
518
519 if (size < sizeof(*user->fpsimd))
520 goto invalid;
521
522 user->fpsimd = (struct fpsimd_context __user *)head;
523 break;
524
525 case ESR_MAGIC:
526 /* ignore */
527 break;
528
529 case SVE_MAGIC:
530 if (!system_supports_sve() && !system_supports_sme())
531 goto invalid;
532
533 if (user->sve)
534 goto invalid;
535
536 if (size < sizeof(*user->sve))
537 goto invalid;
538
539 user->sve = (struct sve_context __user *)head;
540 break;
541
542 case ZA_MAGIC:
543 if (!system_supports_sme())
544 goto invalid;
545
546 if (user->za)
547 goto invalid;
548
549 if (size < sizeof(*user->za))
550 goto invalid;
551
552 user->za = (struct za_context __user *)head;
553 break;
554
555 case EXTRA_MAGIC:
556 if (have_extra_context)
557 goto invalid;
558
559 if (size < sizeof(*extra))
560 goto invalid;
561
562 userp = (char const __user *)head;
563
564 extra = (struct extra_context const __user *)userp;
565 userp += size;
566
567 __get_user_error(extra_datap, &extra->datap, err);
568 __get_user_error(extra_size, &extra->size, err);
569 if (err)
570 return err;
571
572 /* Check for the dummy terminator in __reserved[]: */
573
574 if (limit - offset - size < TERMINATOR_SIZE)
575 goto invalid;
576
577 end = (struct _aarch64_ctx const __user *)userp;
578 userp += TERMINATOR_SIZE;
579
580 __get_user_error(end_magic, &end->magic, err);
581 __get_user_error(end_size, &end->size, err);
582 if (err)
583 return err;
584
585 if (end_magic || end_size)
586 goto invalid;
587
588 /* Prevent looping/repeated parsing of extra_context */
589 have_extra_context = true;
590
591 base = (__force void __user *)extra_datap;
592 if (!IS_ALIGNED((unsigned long)base, 16))
593 goto invalid;
594
595 if (!IS_ALIGNED(extra_size, 16))
596 goto invalid;
597
598 if (base != userp)
599 goto invalid;
600
601 /* Reject "unreasonably large" frames: */
602 if (extra_size > sfp + SIGFRAME_MAXSZ - userp)
603 goto invalid;
604
605 /*
606 * Ignore trailing terminator in __reserved[]
607 * and start parsing extra data:
608 */
609 offset = 0;
610 limit = extra_size;
611
612 if (!access_ok(base, limit))
613 goto invalid;
614
615 continue;
616
617 default:
618 goto invalid;
619 }
620
621 if (size < sizeof(*head))
622 goto invalid;
623
624 if (limit - offset < size)
625 goto invalid;
626
627 offset += size;
628 }
629
630done:
631 return 0;
632
633invalid:
634 return -EINVAL;
635}
636
637static int restore_sigframe(struct pt_regs *regs,
638 struct rt_sigframe __user *sf)
639{
640 sigset_t set;
641 int i, err;
642 struct user_ctxs user;
643
644 err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
645 if (err == 0)
646 set_current_blocked(&set);
647
648 for (i = 0; i < 31; i++)
649 __get_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i],
650 err);
651 __get_user_error(regs->sp, &sf->uc.uc_mcontext.sp, err);
652 __get_user_error(regs->pc, &sf->uc.uc_mcontext.pc, err);
653 __get_user_error(regs->pstate, &sf->uc.uc_mcontext.pstate, err);
654
655 /*
656 * Avoid sys_rt_sigreturn() restarting.
657 */
658 forget_syscall(regs);
659
660 err |= !valid_user_regs(®s->user_regs, current);
661 if (err == 0)
662 err = parse_user_sigframe(&user, sf);
663
664 if (err == 0 && system_supports_fpsimd()) {
665 if (!user.fpsimd)
666 return -EINVAL;
667
668 if (user.sve)
669 err = restore_sve_fpsimd_context(&user);
670 else
671 err = restore_fpsimd_context(user.fpsimd);
672 }
673
674 if (err == 0 && system_supports_sme() && user.za)
675 err = restore_za_context(&user);
676
677 return err;
678}
679
680SYSCALL_DEFINE0(rt_sigreturn)
681{
682 struct pt_regs *regs = current_pt_regs();
683 struct rt_sigframe __user *frame;
684
685 /* Always make any pending restarted system calls return -EINTR */
686 current->restart_block.fn = do_no_restart_syscall;
687
688 /*
689 * Since we stacked the signal on a 128-bit boundary, then 'sp' should
690 * be word aligned here.
691 */
692 if (regs->sp & 15)
693 goto badframe;
694
695 frame = (struct rt_sigframe __user *)regs->sp;
696
697 if (!access_ok(frame, sizeof (*frame)))
698 goto badframe;
699
700 if (restore_sigframe(regs, frame))
701 goto badframe;
702
703 if (restore_altstack(&frame->uc.uc_stack))
704 goto badframe;
705
706 return regs->regs[0];
707
708badframe:
709 arm64_notify_segfault(regs->sp);
710 return 0;
711}
712
713/*
714 * Determine the layout of optional records in the signal frame
715 *
716 * add_all: if true, lays out the biggest possible signal frame for
717 * this task; otherwise, generates a layout for the current state
718 * of the task.
719 */
720static int setup_sigframe_layout(struct rt_sigframe_user_layout *user,
721 bool add_all)
722{
723 int err;
724
725 if (system_supports_fpsimd()) {
726 err = sigframe_alloc(user, &user->fpsimd_offset,
727 sizeof(struct fpsimd_context));
728 if (err)
729 return err;
730 }
731
732 /* fault information, if valid */
733 if (add_all || current->thread.fault_code) {
734 err = sigframe_alloc(user, &user->esr_offset,
735 sizeof(struct esr_context));
736 if (err)
737 return err;
738 }
739
740 if (system_supports_sve() || system_supports_sme()) {
741 unsigned int vq = 0;
742
743 if (add_all || test_thread_flag(TIF_SVE) ||
744 thread_sm_enabled(¤t->thread)) {
745 int vl = max(sve_max_vl(), sme_max_vl());
746
747 if (!add_all)
748 vl = thread_get_cur_vl(¤t->thread);
749
750 vq = sve_vq_from_vl(vl);
751 }
752
753 err = sigframe_alloc(user, &user->sve_offset,
754 SVE_SIG_CONTEXT_SIZE(vq));
755 if (err)
756 return err;
757 }
758
759 if (system_supports_sme()) {
760 unsigned int vl;
761 unsigned int vq = 0;
762
763 if (add_all)
764 vl = sme_max_vl();
765 else
766 vl = task_get_sme_vl(current);
767
768 if (thread_za_enabled(¤t->thread))
769 vq = sve_vq_from_vl(vl);
770
771 err = sigframe_alloc(user, &user->za_offset,
772 ZA_SIG_CONTEXT_SIZE(vq));
773 if (err)
774 return err;
775 }
776
777 return sigframe_alloc_end(user);
778}
779
780static int setup_sigframe(struct rt_sigframe_user_layout *user,
781 struct pt_regs *regs, sigset_t *set)
782{
783 int i, err = 0;
784 struct rt_sigframe __user *sf = user->sigframe;
785
786 /* set up the stack frame for unwinding */
787 __put_user_error(regs->regs[29], &user->next_frame->fp, err);
788 __put_user_error(regs->regs[30], &user->next_frame->lr, err);
789
790 for (i = 0; i < 31; i++)
791 __put_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i],
792 err);
793 __put_user_error(regs->sp, &sf->uc.uc_mcontext.sp, err);
794 __put_user_error(regs->pc, &sf->uc.uc_mcontext.pc, err);
795 __put_user_error(regs->pstate, &sf->uc.uc_mcontext.pstate, err);
796
797 __put_user_error(current->thread.fault_address, &sf->uc.uc_mcontext.fault_address, err);
798
799 err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set));
800
801 if (err == 0 && system_supports_fpsimd()) {
802 struct fpsimd_context __user *fpsimd_ctx =
803 apply_user_offset(user, user->fpsimd_offset);
804 err |= preserve_fpsimd_context(fpsimd_ctx);
805 }
806
807 /* fault information, if valid */
808 if (err == 0 && user->esr_offset) {
809 struct esr_context __user *esr_ctx =
810 apply_user_offset(user, user->esr_offset);
811
812 __put_user_error(ESR_MAGIC, &esr_ctx->head.magic, err);
813 __put_user_error(sizeof(*esr_ctx), &esr_ctx->head.size, err);
814 __put_user_error(current->thread.fault_code, &esr_ctx->esr, err);
815 }
816
817 /* Scalable Vector Extension state (including streaming), if present */
818 if ((system_supports_sve() || system_supports_sme()) &&
819 err == 0 && user->sve_offset) {
820 struct sve_context __user *sve_ctx =
821 apply_user_offset(user, user->sve_offset);
822 err |= preserve_sve_context(sve_ctx);
823 }
824
825 /* ZA state if present */
826 if (system_supports_sme() && err == 0 && user->za_offset) {
827 struct za_context __user *za_ctx =
828 apply_user_offset(user, user->za_offset);
829 err |= preserve_za_context(za_ctx);
830 }
831
832 if (err == 0 && user->extra_offset) {
833 char __user *sfp = (char __user *)user->sigframe;
834 char __user *userp =
835 apply_user_offset(user, user->extra_offset);
836
837 struct extra_context __user *extra;
838 struct _aarch64_ctx __user *end;
839 u64 extra_datap;
840 u32 extra_size;
841
842 extra = (struct extra_context __user *)userp;
843 userp += EXTRA_CONTEXT_SIZE;
844
845 end = (struct _aarch64_ctx __user *)userp;
846 userp += TERMINATOR_SIZE;
847
848 /*
849 * extra_datap is just written to the signal frame.
850 * The value gets cast back to a void __user *
851 * during sigreturn.
852 */
853 extra_datap = (__force u64)userp;
854 extra_size = sfp + round_up(user->size, 16) - userp;
855
856 __put_user_error(EXTRA_MAGIC, &extra->head.magic, err);
857 __put_user_error(EXTRA_CONTEXT_SIZE, &extra->head.size, err);
858 __put_user_error(extra_datap, &extra->datap, err);
859 __put_user_error(extra_size, &extra->size, err);
860
861 /* Add the terminator */
862 __put_user_error(0, &end->magic, err);
863 __put_user_error(0, &end->size, err);
864 }
865
866 /* set the "end" magic */
867 if (err == 0) {
868 struct _aarch64_ctx __user *end =
869 apply_user_offset(user, user->end_offset);
870
871 __put_user_error(0, &end->magic, err);
872 __put_user_error(0, &end->size, err);
873 }
874
875 return err;
876}
877
878static int get_sigframe(struct rt_sigframe_user_layout *user,
879 struct ksignal *ksig, struct pt_regs *regs)
880{
881 unsigned long sp, sp_top;
882 int err;
883
884 init_user_layout(user);
885 err = setup_sigframe_layout(user, false);
886 if (err)
887 return err;
888
889 sp = sp_top = sigsp(regs->sp, ksig);
890
891 sp = round_down(sp - sizeof(struct frame_record), 16);
892 user->next_frame = (struct frame_record __user *)sp;
893
894 sp = round_down(sp, 16) - sigframe_size(user);
895 user->sigframe = (struct rt_sigframe __user *)sp;
896
897 /*
898 * Check that we can actually write to the signal frame.
899 */
900 if (!access_ok(user->sigframe, sp_top - sp))
901 return -EFAULT;
902
903 return 0;
904}
905
906static void setup_return(struct pt_regs *regs, struct k_sigaction *ka,
907 struct rt_sigframe_user_layout *user, int usig)
908{
909 __sigrestore_t sigtramp;
910
911 regs->regs[0] = usig;
912 regs->sp = (unsigned long)user->sigframe;
913 regs->regs[29] = (unsigned long)&user->next_frame->fp;
914 regs->pc = (unsigned long)ka->sa.sa_handler;
915
916 /*
917 * Signal delivery is a (wacky) indirect function call in
918 * userspace, so simulate the same setting of BTYPE as a BLR
919 * <register containing the signal handler entry point>.
920 * Signal delivery to a location in a PROT_BTI guarded page
921 * that is not a function entry point will now trigger a
922 * SIGILL in userspace.
923 *
924 * If the signal handler entry point is not in a PROT_BTI
925 * guarded page, this is harmless.
926 */
927 if (system_supports_bti()) {
928 regs->pstate &= ~PSR_BTYPE_MASK;
929 regs->pstate |= PSR_BTYPE_C;
930 }
931
932 /* TCO (Tag Check Override) always cleared for signal handlers */
933 regs->pstate &= ~PSR_TCO_BIT;
934
935 /* Signal handlers are invoked with ZA and streaming mode disabled */
936 if (system_supports_sme()) {
937 /*
938 * If we were in streaming mode the saved register
939 * state was SVE but we will exit SM and use the
940 * FPSIMD register state - flush the saved FPSIMD
941 * register state in case it gets loaded.
942 */
943 if (current->thread.svcr & SVCR_SM_MASK) {
944 memset(¤t->thread.uw.fpsimd_state, 0,
945 sizeof(current->thread.uw.fpsimd_state));
946 current->thread.fp_type = FP_STATE_FPSIMD;
947 }
948
949 current->thread.svcr &= ~(SVCR_ZA_MASK |
950 SVCR_SM_MASK);
951 sme_smstop();
952 }
953
954 if (ka->sa.sa_flags & SA_RESTORER)
955 sigtramp = ka->sa.sa_restorer;
956 else
957 sigtramp = VDSO_SYMBOL(current->mm->context.vdso, sigtramp);
958
959 regs->regs[30] = (unsigned long)sigtramp;
960}
961
962static int setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set,
963 struct pt_regs *regs)
964{
965 struct rt_sigframe_user_layout user;
966 struct rt_sigframe __user *frame;
967 int err = 0;
968
969 fpsimd_signal_preserve_current_state();
970
971 if (get_sigframe(&user, ksig, regs))
972 return 1;
973
974 frame = user.sigframe;
975
976 __put_user_error(0, &frame->uc.uc_flags, err);
977 __put_user_error(NULL, &frame->uc.uc_link, err);
978
979 err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
980 err |= setup_sigframe(&user, regs, set);
981 if (err == 0) {
982 setup_return(regs, &ksig->ka, &user, usig);
983 if (ksig->ka.sa.sa_flags & SA_SIGINFO) {
984 err |= copy_siginfo_to_user(&frame->info, &ksig->info);
985 regs->regs[1] = (unsigned long)&frame->info;
986 regs->regs[2] = (unsigned long)&frame->uc;
987 }
988 }
989
990 return err;
991}
992
993static void setup_restart_syscall(struct pt_regs *regs)
994{
995 if (is_compat_task())
996 compat_setup_restart_syscall(regs);
997 else
998 regs->regs[8] = __NR_restart_syscall;
999}
1000
1001/*
1002 * OK, we're invoking a handler
1003 */
1004static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
1005{
1006 sigset_t *oldset = sigmask_to_save();
1007 int usig = ksig->sig;
1008 int ret;
1009
1010 rseq_signal_deliver(ksig, regs);
1011
1012 /*
1013 * Set up the stack frame
1014 */
1015 if (is_compat_task()) {
1016 if (ksig->ka.sa.sa_flags & SA_SIGINFO)
1017 ret = compat_setup_rt_frame(usig, ksig, oldset, regs);
1018 else
1019 ret = compat_setup_frame(usig, ksig, oldset, regs);
1020 } else {
1021 ret = setup_rt_frame(usig, ksig, oldset, regs);
1022 }
1023
1024 /*
1025 * Check that the resulting registers are actually sane.
1026 */
1027 ret |= !valid_user_regs(®s->user_regs, current);
1028
1029 /* Step into the signal handler if we are stepping */
1030 signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLESTEP));
1031}
1032
1033/*
1034 * Note that 'init' is a special process: it doesn't get signals it doesn't
1035 * want to handle. Thus you cannot kill init even with a SIGKILL even by
1036 * mistake.
1037 *
1038 * Note that we go through the signals twice: once to check the signals that
1039 * the kernel can handle, and then we build all the user-level signal handling
1040 * stack-frames in one go after that.
1041 */
1042static void do_signal(struct pt_regs *regs)
1043{
1044 unsigned long continue_addr = 0, restart_addr = 0;
1045 int retval = 0;
1046 struct ksignal ksig;
1047 bool syscall = in_syscall(regs);
1048
1049 /*
1050 * If we were from a system call, check for system call restarting...
1051 */
1052 if (syscall) {
1053 continue_addr = regs->pc;
1054 restart_addr = continue_addr - (compat_thumb_mode(regs) ? 2 : 4);
1055 retval = regs->regs[0];
1056
1057 /*
1058 * Avoid additional syscall restarting via ret_to_user.
1059 */
1060 forget_syscall(regs);
1061
1062 /*
1063 * Prepare for system call restart. We do this here so that a
1064 * debugger will see the already changed PC.
1065 */
1066 switch (retval) {
1067 case -ERESTARTNOHAND:
1068 case -ERESTARTSYS:
1069 case -ERESTARTNOINTR:
1070 case -ERESTART_RESTARTBLOCK:
1071 regs->regs[0] = regs->orig_x0;
1072 regs->pc = restart_addr;
1073 break;
1074 }
1075 }
1076
1077 /*
1078 * Get the signal to deliver. When running under ptrace, at this point
1079 * the debugger may change all of our registers.
1080 */
1081 if (get_signal(&ksig)) {
1082 /*
1083 * Depending on the signal settings, we may need to revert the
1084 * decision to restart the system call, but skip this if a
1085 * debugger has chosen to restart at a different PC.
1086 */
1087 if (regs->pc == restart_addr &&
1088 (retval == -ERESTARTNOHAND ||
1089 retval == -ERESTART_RESTARTBLOCK ||
1090 (retval == -ERESTARTSYS &&
1091 !(ksig.ka.sa.sa_flags & SA_RESTART)))) {
1092 syscall_set_return_value(current, regs, -EINTR, 0);
1093 regs->pc = continue_addr;
1094 }
1095
1096 handle_signal(&ksig, regs);
1097 return;
1098 }
1099
1100 /*
1101 * Handle restarting a different system call. As above, if a debugger
1102 * has chosen to restart at a different PC, ignore the restart.
1103 */
1104 if (syscall && regs->pc == restart_addr) {
1105 if (retval == -ERESTART_RESTARTBLOCK)
1106 setup_restart_syscall(regs);
1107 user_rewind_single_step(current);
1108 }
1109
1110 restore_saved_sigmask();
1111}
1112
1113void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags)
1114{
1115 do {
1116 if (thread_flags & _TIF_NEED_RESCHED) {
1117 /* Unmask Debug and SError for the next task */
1118 local_daif_restore(DAIF_PROCCTX_NOIRQ);
1119
1120 schedule();
1121 } else {
1122 local_daif_restore(DAIF_PROCCTX);
1123
1124 if (thread_flags & _TIF_UPROBE)
1125 uprobe_notify_resume(regs);
1126
1127 if (thread_flags & _TIF_MTE_ASYNC_FAULT) {
1128 clear_thread_flag(TIF_MTE_ASYNC_FAULT);
1129 send_sig_fault(SIGSEGV, SEGV_MTEAERR,
1130 (void __user *)NULL, current);
1131 }
1132
1133 if (thread_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
1134 do_signal(regs);
1135
1136 if (thread_flags & _TIF_NOTIFY_RESUME)
1137 resume_user_mode_work(regs);
1138
1139 if (thread_flags & _TIF_FOREIGN_FPSTATE)
1140 fpsimd_restore_current_state();
1141 }
1142
1143 local_daif_mask();
1144 thread_flags = read_thread_flags();
1145 } while (thread_flags & _TIF_WORK_MASK);
1146}
1147
1148unsigned long __ro_after_init signal_minsigstksz;
1149
1150/*
1151 * Determine the stack space required for guaranteed signal devliery.
1152 * This function is used to populate AT_MINSIGSTKSZ at process startup.
1153 * cpufeatures setup is assumed to be complete.
1154 */
1155void __init minsigstksz_setup(void)
1156{
1157 struct rt_sigframe_user_layout user;
1158
1159 init_user_layout(&user);
1160
1161 /*
1162 * If this fails, SIGFRAME_MAXSZ needs to be enlarged. It won't
1163 * be big enough, but it's our best guess:
1164 */
1165 if (WARN_ON(setup_sigframe_layout(&user, true)))
1166 return;
1167
1168 signal_minsigstksz = sigframe_size(&user) +
1169 round_up(sizeof(struct frame_record), 16) +
1170 16; /* max alignment padding */
1171}
1172
1173/*
1174 * Compile-time assertions for siginfo_t offsets. Check NSIG* as well, as
1175 * changes likely come with new fields that should be added below.
1176 */
1177static_assert(NSIGILL == 11);
1178static_assert(NSIGFPE == 15);
1179static_assert(NSIGSEGV == 9);
1180static_assert(NSIGBUS == 5);
1181static_assert(NSIGTRAP == 6);
1182static_assert(NSIGCHLD == 6);
1183static_assert(NSIGSYS == 2);
1184static_assert(sizeof(siginfo_t) == 128);
1185static_assert(__alignof__(siginfo_t) == 8);
1186static_assert(offsetof(siginfo_t, si_signo) == 0x00);
1187static_assert(offsetof(siginfo_t, si_errno) == 0x04);
1188static_assert(offsetof(siginfo_t, si_code) == 0x08);
1189static_assert(offsetof(siginfo_t, si_pid) == 0x10);
1190static_assert(offsetof(siginfo_t, si_uid) == 0x14);
1191static_assert(offsetof(siginfo_t, si_tid) == 0x10);
1192static_assert(offsetof(siginfo_t, si_overrun) == 0x14);
1193static_assert(offsetof(siginfo_t, si_status) == 0x18);
1194static_assert(offsetof(siginfo_t, si_utime) == 0x20);
1195static_assert(offsetof(siginfo_t, si_stime) == 0x28);
1196static_assert(offsetof(siginfo_t, si_value) == 0x18);
1197static_assert(offsetof(siginfo_t, si_int) == 0x18);
1198static_assert(offsetof(siginfo_t, si_ptr) == 0x18);
1199static_assert(offsetof(siginfo_t, si_addr) == 0x10);
1200static_assert(offsetof(siginfo_t, si_addr_lsb) == 0x18);
1201static_assert(offsetof(siginfo_t, si_lower) == 0x20);
1202static_assert(offsetof(siginfo_t, si_upper) == 0x28);
1203static_assert(offsetof(siginfo_t, si_pkey) == 0x20);
1204static_assert(offsetof(siginfo_t, si_perf_data) == 0x18);
1205static_assert(offsetof(siginfo_t, si_perf_type) == 0x20);
1206static_assert(offsetof(siginfo_t, si_perf_flags) == 0x24);
1207static_assert(offsetof(siginfo_t, si_band) == 0x10);
1208static_assert(offsetof(siginfo_t, si_fd) == 0x18);
1209static_assert(offsetof(siginfo_t, si_call_addr) == 0x10);
1210static_assert(offsetof(siginfo_t, si_syscall) == 0x18);
1211static_assert(offsetof(siginfo_t, si_arch) == 0x1c);