Linux Audio

Check our new training course

Loading...
v5.9
  1/*
  2 * Common signal handling code for both 32 and 64 bits
  3 *
  4 *    Copyright (c) 2007 Benjamin Herrenschmidt, IBM Corporation
  5 *    Extracted from signal_32.c and signal_64.c
  6 *
  7 * This file is subject to the terms and conditions of the GNU General
  8 * Public License.  See the file README.legal in the main directory of
  9 * this archive for more details.
 10 */
 11
 12#include <linux/tracehook.h>
 13#include <linux/signal.h>
 14#include <linux/uprobes.h>
 15#include <linux/key.h>
 16#include <linux/context_tracking.h>
 17#include <linux/livepatch.h>
 18#include <linux/syscalls.h>
 19#include <asm/hw_breakpoint.h>
 20#include <linux/uaccess.h>
 21#include <asm/switch_to.h>
 22#include <asm/unistd.h>
 23#include <asm/debug.h>
 24#include <asm/tm.h>
 25
 26#include "signal.h"
 27
 28#ifdef CONFIG_VSX
 29unsigned long copy_fpr_to_user(void __user *to,
 30			       struct task_struct *task)
 31{
 32	u64 buf[ELF_NFPREG];
 33	int i;
 34
 35	/* save FPR copy to local buffer then write to the thread_struct */
 36	for (i = 0; i < (ELF_NFPREG - 1) ; i++)
 37		buf[i] = task->thread.TS_FPR(i);
 38	buf[i] = task->thread.fp_state.fpscr;
 39	return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double));
 40}
 41
 42unsigned long copy_fpr_from_user(struct task_struct *task,
 43				 void __user *from)
 44{
 45	u64 buf[ELF_NFPREG];
 46	int i;
 47
 48	if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double)))
 49		return 1;
 50	for (i = 0; i < (ELF_NFPREG - 1) ; i++)
 51		task->thread.TS_FPR(i) = buf[i];
 52	task->thread.fp_state.fpscr = buf[i];
 53
 54	return 0;
 55}
 56
 57unsigned long copy_vsx_to_user(void __user *to,
 58			       struct task_struct *task)
 59{
 60	u64 buf[ELF_NVSRHALFREG];
 61	int i;
 62
 63	/* save FPR copy to local buffer then write to the thread_struct */
 64	for (i = 0; i < ELF_NVSRHALFREG; i++)
 65		buf[i] = task->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
 66	return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double));
 67}
 68
 69unsigned long copy_vsx_from_user(struct task_struct *task,
 70				 void __user *from)
 71{
 72	u64 buf[ELF_NVSRHALFREG];
 73	int i;
 74
 75	if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double)))
 76		return 1;
 77	for (i = 0; i < ELF_NVSRHALFREG ; i++)
 78		task->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
 79	return 0;
 80}
 81
 82#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
 83unsigned long copy_ckfpr_to_user(void __user *to,
 84				  struct task_struct *task)
 85{
 86	u64 buf[ELF_NFPREG];
 87	int i;
 88
 89	/* save FPR copy to local buffer then write to the thread_struct */
 90	for (i = 0; i < (ELF_NFPREG - 1) ; i++)
 91		buf[i] = task->thread.TS_CKFPR(i);
 92	buf[i] = task->thread.ckfp_state.fpscr;
 93	return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double));
 94}
 95
 96unsigned long copy_ckfpr_from_user(struct task_struct *task,
 97					  void __user *from)
 98{
 99	u64 buf[ELF_NFPREG];
100	int i;
101
102	if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double)))
103		return 1;
104	for (i = 0; i < (ELF_NFPREG - 1) ; i++)
105		task->thread.TS_CKFPR(i) = buf[i];
106	task->thread.ckfp_state.fpscr = buf[i];
107
108	return 0;
109}
110
111unsigned long copy_ckvsx_to_user(void __user *to,
112				  struct task_struct *task)
113{
114	u64 buf[ELF_NVSRHALFREG];
115	int i;
116
117	/* save FPR copy to local buffer then write to the thread_struct */
118	for (i = 0; i < ELF_NVSRHALFREG; i++)
119		buf[i] = task->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET];
120	return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double));
121}
122
123unsigned long copy_ckvsx_from_user(struct task_struct *task,
124					  void __user *from)
125{
126	u64 buf[ELF_NVSRHALFREG];
127	int i;
128
129	if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double)))
130		return 1;
131	for (i = 0; i < ELF_NVSRHALFREG ; i++)
132		task->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
133	return 0;
134}
135#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
136#else
137inline unsigned long copy_fpr_to_user(void __user *to,
138				      struct task_struct *task)
139{
140	return __copy_to_user(to, task->thread.fp_state.fpr,
141			      ELF_NFPREG * sizeof(double));
142}
143
144inline unsigned long copy_fpr_from_user(struct task_struct *task,
145					void __user *from)
146{
147	return __copy_from_user(task->thread.fp_state.fpr, from,
148			      ELF_NFPREG * sizeof(double));
149}
150
151#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
152inline unsigned long copy_ckfpr_to_user(void __user *to,
153					 struct task_struct *task)
154{
155	return __copy_to_user(to, task->thread.ckfp_state.fpr,
156			      ELF_NFPREG * sizeof(double));
157}
158
159inline unsigned long copy_ckfpr_from_user(struct task_struct *task,
160						 void __user *from)
161{
162	return __copy_from_user(task->thread.ckfp_state.fpr, from,
163				ELF_NFPREG * sizeof(double));
164}
165#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
166#endif
167
168/* Log an error when sending an unhandled signal to a process. Controlled
169 * through debug.exception-trace sysctl.
170 */
171
172int show_unhandled_signals = 1;
173
174/*
175 * Allocate space for the signal frame
176 */
177void __user *get_sigframe(struct ksignal *ksig, unsigned long sp,
178			   size_t frame_size, int is_32)
179{
180        unsigned long oldsp, newsp;
181
182        /* Default to using normal stack */
183        oldsp = get_clean_sp(sp, is_32);
184	oldsp = sigsp(oldsp, ksig);
185	newsp = (oldsp - frame_size) & ~0xFUL;
186
187	/* Check access */
188	if (!access_ok((void __user *)newsp, oldsp - newsp))
189		return NULL;
190
191        return (void __user *)newsp;
192}
193
194static void check_syscall_restart(struct pt_regs *regs, struct k_sigaction *ka,
195				  int has_handler)
196{
197	unsigned long ret = regs->gpr[3];
198	int restart = 1;
199
200	/* syscall ? */
201	if (!trap_is_syscall(regs))
202		return;
203
204	if (trap_norestart(regs))
205		return;
206
207	/* error signalled ? */
208	if (trap_is_scv(regs)) {
209		/* 32-bit compat mode sign extend? */
210		if (!IS_ERR_VALUE(ret))
211			return;
212		ret = -ret;
213	} else if (!(regs->ccr & 0x10000000)) {
214		return;
215	}
216
217	switch (ret) {
218	case ERESTART_RESTARTBLOCK:
219	case ERESTARTNOHAND:
220		/* ERESTARTNOHAND means that the syscall should only be
221		 * restarted if there was no handler for the signal, and since
222		 * we only get here if there is a handler, we dont restart.
223		 */
224		restart = !has_handler;
225		break;
226	case ERESTARTSYS:
227		/* ERESTARTSYS means to restart the syscall if there is no
228		 * handler or the handler was registered with SA_RESTART
229		 */
230		restart = !has_handler || (ka->sa.sa_flags & SA_RESTART) != 0;
231		break;
232	case ERESTARTNOINTR:
233		/* ERESTARTNOINTR means that the syscall should be
234		 * called again after the signal handler returns.
235		 */
236		break;
237	default:
238		return;
239	}
240	if (restart) {
241		if (ret == ERESTART_RESTARTBLOCK)
242			regs->gpr[0] = __NR_restart_syscall;
243		else
244			regs->gpr[3] = regs->orig_gpr3;
245		regs->nip -= 4;
246		regs->result = 0;
247	} else {
248		if (trap_is_scv(regs)) {
249			regs->result = -EINTR;
250			regs->gpr[3] = -EINTR;
251		} else {
252			regs->result = -EINTR;
253			regs->gpr[3] = EINTR;
254			regs->ccr |= 0x10000000;
255		}
256	}
257}
258
259static void do_signal(struct task_struct *tsk)
260{
261	sigset_t *oldset = sigmask_to_save();
262	struct ksignal ksig = { .sig = 0 };
263	int ret;
 
264
265	BUG_ON(tsk != current);
266
267	get_signal(&ksig);
268
269	/* Is there any syscall restart business here ? */
270	check_syscall_restart(tsk->thread.regs, &ksig.ka, ksig.sig > 0);
271
272	if (ksig.sig <= 0) {
273		/* No signal to deliver -- put the saved sigmask back */
274		restore_saved_sigmask();
275		set_trap_norestart(tsk->thread.regs);
276		return;               /* no signals delivered */
277	}
278
 
279        /*
280	 * Reenable the DABR before delivering the signal to
281	 * user space. The DABR will have been cleared if it
282	 * triggered inside the kernel.
283	 */
284	if (!IS_ENABLED(CONFIG_PPC_ADV_DEBUG_REGS)) {
285		int i;
286
287		for (i = 0; i < nr_wp_slots(); i++) {
288			if (tsk->thread.hw_brk[i].address && tsk->thread.hw_brk[i].type)
289				__set_breakpoint(i, &tsk->thread.hw_brk[i]);
290		}
291	}
292
293	/* Re-enable the breakpoints for the signal stack */
294	thread_change_pc(tsk, tsk->thread.regs);
295
296	rseq_signal_deliver(&ksig, tsk->thread.regs);
297
298	if (is_32bit_task()) {
299        	if (ksig.ka.sa.sa_flags & SA_SIGINFO)
300			ret = handle_rt_signal32(&ksig, oldset, tsk);
301		else
302			ret = handle_signal32(&ksig, oldset, tsk);
303	} else {
304		ret = handle_rt_signal64(&ksig, oldset, tsk);
305	}
306
307	set_trap_norestart(tsk->thread.regs);
308	signal_setup_done(ret, &ksig, test_thread_flag(TIF_SINGLESTEP));
309}
310
311void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
312{
313	user_exit();
314
315	/* Check valid addr_limit, TIF check is done there */
316	addr_limit_user_check();
317
318	if (thread_info_flags & _TIF_UPROBE)
319		uprobe_notify_resume(regs);
320
321	if (thread_info_flags & _TIF_PATCH_PENDING)
322		klp_update_patch_state(current);
323
324	if (thread_info_flags & _TIF_SIGPENDING) {
325		BUG_ON(regs != current->thread.regs);
326		do_signal(current);
327	}
328
329	if (thread_info_flags & _TIF_NOTIFY_RESUME) {
330		clear_thread_flag(TIF_NOTIFY_RESUME);
331		tracehook_notify_resume(regs);
332		rseq_handle_notify_resume(NULL, regs);
333	}
334
335	user_enter();
336}
337
338unsigned long get_tm_stackpointer(struct task_struct *tsk)
339{
340	/* When in an active transaction that takes a signal, we need to be
341	 * careful with the stack.  It's possible that the stack has moved back
342	 * up after the tbegin.  The obvious case here is when the tbegin is
343	 * called inside a function that returns before a tend.  In this case,
344	 * the stack is part of the checkpointed transactional memory state.
345	 * If we write over this non transactionally or in suspend, we are in
346	 * trouble because if we get a tm abort, the program counter and stack
347	 * pointer will be back at the tbegin but our in memory stack won't be
348	 * valid anymore.
349	 *
350	 * To avoid this, when taking a signal in an active transaction, we
351	 * need to use the stack pointer from the checkpointed state, rather
352	 * than the speculated state.  This ensures that the signal context
353	 * (written tm suspended) will be written below the stack required for
354	 * the rollback.  The transaction is aborted because of the treclaim,
355	 * so any memory written between the tbegin and the signal will be
356	 * rolled back anyway.
357	 *
358	 * For signals taken in non-TM or suspended mode, we use the
359	 * normal/non-checkpointed stack pointer.
360	 */
361
362	unsigned long ret = tsk->thread.regs->gpr[1];
363
364#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
365	BUG_ON(tsk != current);
366
367	if (MSR_TM_ACTIVE(tsk->thread.regs->msr)) {
368		preempt_disable();
369		tm_reclaim_current(TM_CAUSE_SIGNAL);
370		if (MSR_TM_TRANSACTIONAL(tsk->thread.regs->msr))
371			ret = tsk->thread.ckpt_regs.gpr[1];
372
373		/*
374		 * If we treclaim, we must clear the current thread's TM bits
375		 * before re-enabling preemption. Otherwise we might be
376		 * preempted and have the live MSR[TS] changed behind our back
377		 * (tm_recheckpoint_new_task() would recheckpoint). Besides, we
378		 * enter the signal handler in non-transactional state.
379		 */
380		tsk->thread.regs->msr &= ~MSR_TS_MASK;
381		preempt_enable();
382	}
383#endif
384	return ret;
385}
v4.17
  1/*
  2 * Common signal handling code for both 32 and 64 bits
  3 *
  4 *    Copyright (c) 2007 Benjamin Herrenschmidt, IBM Corporation
  5 *    Extracted from signal_32.c and signal_64.c
  6 *
  7 * This file is subject to the terms and conditions of the GNU General
  8 * Public License.  See the file README.legal in the main directory of
  9 * this archive for more details.
 10 */
 11
 12#include <linux/tracehook.h>
 13#include <linux/signal.h>
 14#include <linux/uprobes.h>
 15#include <linux/key.h>
 16#include <linux/context_tracking.h>
 17#include <linux/livepatch.h>
 
 18#include <asm/hw_breakpoint.h>
 19#include <linux/uaccess.h>
 
 20#include <asm/unistd.h>
 21#include <asm/debug.h>
 22#include <asm/tm.h>
 23
 24#include "signal.h"
 25
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 26/* Log an error when sending an unhandled signal to a process. Controlled
 27 * through debug.exception-trace sysctl.
 28 */
 29
 30int show_unhandled_signals = 1;
 31
 32/*
 33 * Allocate space for the signal frame
 34 */
 35void __user *get_sigframe(struct ksignal *ksig, unsigned long sp,
 36			   size_t frame_size, int is_32)
 37{
 38        unsigned long oldsp, newsp;
 39
 40        /* Default to using normal stack */
 41        oldsp = get_clean_sp(sp, is_32);
 42	oldsp = sigsp(oldsp, ksig);
 43	newsp = (oldsp - frame_size) & ~0xFUL;
 44
 45	/* Check access */
 46	if (!access_ok(VERIFY_WRITE, (void __user *)newsp, oldsp - newsp))
 47		return NULL;
 48
 49        return (void __user *)newsp;
 50}
 51
 52static void check_syscall_restart(struct pt_regs *regs, struct k_sigaction *ka,
 53				  int has_handler)
 54{
 55	unsigned long ret = regs->gpr[3];
 56	int restart = 1;
 57
 58	/* syscall ? */
 59	if (TRAP(regs) != 0x0C00)
 
 
 
 60		return;
 61
 62	/* error signalled ? */
 63	if (!(regs->ccr & 0x10000000))
 
 
 
 
 
 64		return;
 
 65
 66	switch (ret) {
 67	case ERESTART_RESTARTBLOCK:
 68	case ERESTARTNOHAND:
 69		/* ERESTARTNOHAND means that the syscall should only be
 70		 * restarted if there was no handler for the signal, and since
 71		 * we only get here if there is a handler, we dont restart.
 72		 */
 73		restart = !has_handler;
 74		break;
 75	case ERESTARTSYS:
 76		/* ERESTARTSYS means to restart the syscall if there is no
 77		 * handler or the handler was registered with SA_RESTART
 78		 */
 79		restart = !has_handler || (ka->sa.sa_flags & SA_RESTART) != 0;
 80		break;
 81	case ERESTARTNOINTR:
 82		/* ERESTARTNOINTR means that the syscall should be
 83		 * called again after the signal handler returns.
 84		 */
 85		break;
 86	default:
 87		return;
 88	}
 89	if (restart) {
 90		if (ret == ERESTART_RESTARTBLOCK)
 91			regs->gpr[0] = __NR_restart_syscall;
 92		else
 93			regs->gpr[3] = regs->orig_gpr3;
 94		regs->nip -= 4;
 95		regs->result = 0;
 96	} else {
 97		regs->result = -EINTR;
 98		regs->gpr[3] = EINTR;
 99		regs->ccr |= 0x10000000;
 
 
 
 
 
100	}
101}
102
103static void do_signal(struct task_struct *tsk)
104{
105	sigset_t *oldset = sigmask_to_save();
106	struct ksignal ksig = { .sig = 0 };
107	int ret;
108	int is32 = is_32bit_task();
109
110	BUG_ON(tsk != current);
111
112	get_signal(&ksig);
113
114	/* Is there any syscall restart business here ? */
115	check_syscall_restart(tsk->thread.regs, &ksig.ka, ksig.sig > 0);
116
117	if (ksig.sig <= 0) {
118		/* No signal to deliver -- put the saved sigmask back */
119		restore_saved_sigmask();
120		tsk->thread.regs->trap = 0;
121		return;               /* no signals delivered */
122	}
123
124#ifndef CONFIG_PPC_ADV_DEBUG_REGS
125        /*
126	 * Reenable the DABR before delivering the signal to
127	 * user space. The DABR will have been cleared if it
128	 * triggered inside the kernel.
129	 */
130	if (tsk->thread.hw_brk.address && tsk->thread.hw_brk.type)
131		__set_breakpoint(&tsk->thread.hw_brk);
132#endif
 
 
 
 
 
 
133	/* Re-enable the breakpoints for the signal stack */
134	thread_change_pc(tsk, tsk->thread.regs);
135
136	if (is32) {
 
 
137        	if (ksig.ka.sa.sa_flags & SA_SIGINFO)
138			ret = handle_rt_signal32(&ksig, oldset, tsk);
139		else
140			ret = handle_signal32(&ksig, oldset, tsk);
141	} else {
142		ret = handle_rt_signal64(&ksig, oldset, tsk);
143	}
144
145	tsk->thread.regs->trap = 0;
146	signal_setup_done(ret, &ksig, test_thread_flag(TIF_SINGLESTEP));
147}
148
149void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
150{
151	user_exit();
152
 
 
 
153	if (thread_info_flags & _TIF_UPROBE)
154		uprobe_notify_resume(regs);
155
156	if (thread_info_flags & _TIF_PATCH_PENDING)
157		klp_update_patch_state(current);
158
159	if (thread_info_flags & _TIF_SIGPENDING) {
160		BUG_ON(regs != current->thread.regs);
161		do_signal(current);
162	}
163
164	if (thread_info_flags & _TIF_NOTIFY_RESUME) {
165		clear_thread_flag(TIF_NOTIFY_RESUME);
166		tracehook_notify_resume(regs);
 
167	}
168
169	user_enter();
170}
171
172unsigned long get_tm_stackpointer(struct task_struct *tsk)
173{
174	/* When in an active transaction that takes a signal, we need to be
175	 * careful with the stack.  It's possible that the stack has moved back
176	 * up after the tbegin.  The obvious case here is when the tbegin is
177	 * called inside a function that returns before a tend.  In this case,
178	 * the stack is part of the checkpointed transactional memory state.
179	 * If we write over this non transactionally or in suspend, we are in
180	 * trouble because if we get a tm abort, the program counter and stack
181	 * pointer will be back at the tbegin but our in memory stack won't be
182	 * valid anymore.
183	 *
184	 * To avoid this, when taking a signal in an active transaction, we
185	 * need to use the stack pointer from the checkpointed state, rather
186	 * than the speculated state.  This ensures that the signal context
187	 * (written tm suspended) will be written below the stack required for
188	 * the rollback.  The transaction is aborted because of the treclaim,
189	 * so any memory written between the tbegin and the signal will be
190	 * rolled back anyway.
191	 *
192	 * For signals taken in non-TM or suspended mode, we use the
193	 * normal/non-checkpointed stack pointer.
194	 */
195
 
 
196#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
197	BUG_ON(tsk != current);
198
199	if (MSR_TM_ACTIVE(tsk->thread.regs->msr)) {
 
200		tm_reclaim_current(TM_CAUSE_SIGNAL);
201		if (MSR_TM_TRANSACTIONAL(tsk->thread.regs->msr))
202			return tsk->thread.ckpt_regs.gpr[1];
 
 
 
 
 
 
 
 
 
 
203	}
204#endif
205	return tsk->thread.regs->gpr[1];
206}