Linux Audio

Check our new training course

Loading...
v6.2
  1/*
  2 * Common signal handling code for both 32 and 64 bits
  3 *
  4 *    Copyright (c) 2007 Benjamin Herrenschmidt, IBM Corporation
  5 *    Extracted from signal_32.c and signal_64.c
  6 *
  7 * This file is subject to the terms and conditions of the GNU General
  8 * Public License.  See the file README.legal in the main directory of
  9 * this archive for more details.
 10 */
 11
 12#include <linux/resume_user_mode.h>
 13#include <linux/signal.h>
 14#include <linux/uprobes.h>
 15#include <linux/key.h>
 16#include <linux/context_tracking.h>
 17#include <linux/livepatch.h>
 18#include <linux/syscalls.h>
 19#include <asm/hw_breakpoint.h>
 20#include <linux/uaccess.h>
 21#include <asm/switch_to.h>
 22#include <asm/unistd.h>
 23#include <asm/debug.h>
 24#include <asm/tm.h>
 25
 26#include "signal.h"
 27
 28#ifdef CONFIG_VSX
 29unsigned long copy_fpr_to_user(void __user *to,
 30			       struct task_struct *task)
 31{
 32	u64 buf[ELF_NFPREG];
 33	int i;
 34
 35	/* save FPR copy to local buffer then write to the thread_struct */
 36	for (i = 0; i < (ELF_NFPREG - 1) ; i++)
 37		buf[i] = task->thread.TS_FPR(i);
 38	buf[i] = task->thread.fp_state.fpscr;
 39	return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double));
 40}
 41
 42unsigned long copy_fpr_from_user(struct task_struct *task,
 43				 void __user *from)
 44{
 45	u64 buf[ELF_NFPREG];
 46	int i;
 47
 48	if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double)))
 49		return 1;
 50	for (i = 0; i < (ELF_NFPREG - 1) ; i++)
 51		task->thread.TS_FPR(i) = buf[i];
 52	task->thread.fp_state.fpscr = buf[i];
 53
 54	return 0;
 55}
 56
 57unsigned long copy_vsx_to_user(void __user *to,
 58			       struct task_struct *task)
 59{
 60	u64 buf[ELF_NVSRHALFREG];
 61	int i;
 62
 63	/* save FPR copy to local buffer then write to the thread_struct */
 64	for (i = 0; i < ELF_NVSRHALFREG; i++)
 65		buf[i] = task->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
 66	return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double));
 67}
 68
 69unsigned long copy_vsx_from_user(struct task_struct *task,
 70				 void __user *from)
 71{
 72	u64 buf[ELF_NVSRHALFREG];
 73	int i;
 74
 75	if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double)))
 76		return 1;
 77	for (i = 0; i < ELF_NVSRHALFREG ; i++)
 78		task->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
 79	return 0;
 80}
 81
 82#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
 83unsigned long copy_ckfpr_to_user(void __user *to,
 84				  struct task_struct *task)
 85{
 86	u64 buf[ELF_NFPREG];
 87	int i;
 88
 89	/* save FPR copy to local buffer then write to the thread_struct */
 90	for (i = 0; i < (ELF_NFPREG - 1) ; i++)
 91		buf[i] = task->thread.TS_CKFPR(i);
 92	buf[i] = task->thread.ckfp_state.fpscr;
 93	return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double));
 94}
 95
 96unsigned long copy_ckfpr_from_user(struct task_struct *task,
 97					  void __user *from)
 98{
 99	u64 buf[ELF_NFPREG];
100	int i;
101
102	if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double)))
103		return 1;
104	for (i = 0; i < (ELF_NFPREG - 1) ; i++)
105		task->thread.TS_CKFPR(i) = buf[i];
106	task->thread.ckfp_state.fpscr = buf[i];
107
108	return 0;
109}
110
111unsigned long copy_ckvsx_to_user(void __user *to,
112				  struct task_struct *task)
113{
114	u64 buf[ELF_NVSRHALFREG];
115	int i;
116
117	/* save FPR copy to local buffer then write to the thread_struct */
118	for (i = 0; i < ELF_NVSRHALFREG; i++)
119		buf[i] = task->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET];
120	return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double));
121}
122
123unsigned long copy_ckvsx_from_user(struct task_struct *task,
124					  void __user *from)
125{
126	u64 buf[ELF_NVSRHALFREG];
127	int i;
128
129	if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double)))
130		return 1;
131	for (i = 0; i < ELF_NVSRHALFREG ; i++)
132		task->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
133	return 0;
134}
135#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
136#endif
137
138/* Log an error when sending an unhandled signal to a process. Controlled
139 * through debug.exception-trace sysctl.
140 */
141
142int show_unhandled_signals = 1;
143
144unsigned long get_min_sigframe_size(void)
145{
146	if (IS_ENABLED(CONFIG_PPC64))
147		return get_min_sigframe_size_64();
148	else
149		return get_min_sigframe_size_32();
150}
151
152#ifdef CONFIG_COMPAT
153unsigned long get_min_sigframe_size_compat(void)
154{
155	return get_min_sigframe_size_32();
156}
157#endif
158
159/*
160 * Allocate space for the signal frame
161 */
162static unsigned long get_tm_stackpointer(struct task_struct *tsk);
163
164void __user *get_sigframe(struct ksignal *ksig, struct task_struct *tsk,
165			  size_t frame_size, int is_32)
166{
167        unsigned long oldsp, newsp;
168	unsigned long sp = get_tm_stackpointer(tsk);
169
170        /* Default to using normal stack */
171	if (is_32)
172		oldsp = sp & 0x0ffffffffUL;
173	else
174		oldsp = sp;
175	oldsp = sigsp(oldsp, ksig);
176	newsp = (oldsp - frame_size) & ~0xFUL;
177
178        return (void __user *)newsp;
179}
180
181static void check_syscall_restart(struct pt_regs *regs, struct k_sigaction *ka,
182				  int has_handler)
183{
184	unsigned long ret = regs->gpr[3];
185	int restart = 1;
186
187	/* syscall ? */
188	if (!trap_is_syscall(regs))
189		return;
190
191	if (trap_norestart(regs))
192		return;
193
194	/* error signalled ? */
195	if (trap_is_scv(regs)) {
196		/* 32-bit compat mode sign extend? */
197		if (!IS_ERR_VALUE(ret))
198			return;
199		ret = -ret;
200	} else if (!(regs->ccr & 0x10000000)) {
201		return;
202	}
203
204	switch (ret) {
205	case ERESTART_RESTARTBLOCK:
206	case ERESTARTNOHAND:
207		/* ERESTARTNOHAND means that the syscall should only be
208		 * restarted if there was no handler for the signal, and since
209		 * we only get here if there is a handler, we dont restart.
210		 */
211		restart = !has_handler;
212		break;
213	case ERESTARTSYS:
214		/* ERESTARTSYS means to restart the syscall if there is no
215		 * handler or the handler was registered with SA_RESTART
216		 */
217		restart = !has_handler || (ka->sa.sa_flags & SA_RESTART) != 0;
218		break;
219	case ERESTARTNOINTR:
220		/* ERESTARTNOINTR means that the syscall should be
221		 * called again after the signal handler returns.
222		 */
223		break;
224	default:
225		return;
226	}
227	if (restart) {
228		if (ret == ERESTART_RESTARTBLOCK)
229			regs->gpr[0] = __NR_restart_syscall;
230		else
231			regs->gpr[3] = regs->orig_gpr3;
232		regs_add_return_ip(regs, -4);
233		regs->result = 0;
234	} else {
235		if (trap_is_scv(regs)) {
236			regs->result = -EINTR;
237			regs->gpr[3] = -EINTR;
238		} else {
239			regs->result = -EINTR;
240			regs->gpr[3] = EINTR;
241			regs->ccr |= 0x10000000;
242		}
243	}
244}
245
246static void do_signal(struct task_struct *tsk)
247{
248	sigset_t *oldset = sigmask_to_save();
249	struct ksignal ksig = { .sig = 0 };
250	int ret;
251
252	BUG_ON(tsk != current);
253
254	get_signal(&ksig);
255
256	/* Is there any syscall restart business here ? */
257	check_syscall_restart(tsk->thread.regs, &ksig.ka, ksig.sig > 0);
258
259	if (ksig.sig <= 0) {
260		/* No signal to deliver -- put the saved sigmask back */
261		restore_saved_sigmask();
262		set_trap_norestart(tsk->thread.regs);
263		return;               /* no signals delivered */
264	}
265
266        /*
267	 * Reenable the DABR before delivering the signal to
268	 * user space. The DABR will have been cleared if it
269	 * triggered inside the kernel.
270	 */
271	if (!IS_ENABLED(CONFIG_PPC_ADV_DEBUG_REGS)) {
272		int i;
273
274		for (i = 0; i < nr_wp_slots(); i++) {
275			if (tsk->thread.hw_brk[i].address && tsk->thread.hw_brk[i].type)
276				__set_breakpoint(i, &tsk->thread.hw_brk[i]);
277		}
278	}
279
280	/* Re-enable the breakpoints for the signal stack */
281	thread_change_pc(tsk, tsk->thread.regs);
282
283	rseq_signal_deliver(&ksig, tsk->thread.regs);
284
285	if (is_32bit_task()) {
286        	if (ksig.ka.sa.sa_flags & SA_SIGINFO)
287			ret = handle_rt_signal32(&ksig, oldset, tsk);
288		else
289			ret = handle_signal32(&ksig, oldset, tsk);
290	} else {
291		ret = handle_rt_signal64(&ksig, oldset, tsk);
292	}
293
294	set_trap_norestart(tsk->thread.regs);
295	signal_setup_done(ret, &ksig, test_thread_flag(TIF_SINGLESTEP));
296}
297
298void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
299{
300	if (thread_info_flags & _TIF_UPROBE)
301		uprobe_notify_resume(regs);
302
303	if (thread_info_flags & _TIF_PATCH_PENDING)
304		klp_update_patch_state(current);
305
306	if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) {
307		BUG_ON(regs != current->thread.regs);
308		do_signal(current);
309	}
310
311	if (thread_info_flags & _TIF_NOTIFY_RESUME)
312		resume_user_mode_work(regs);
 
 
313}
314
315static unsigned long get_tm_stackpointer(struct task_struct *tsk)
316{
317	/* When in an active transaction that takes a signal, we need to be
318	 * careful with the stack.  It's possible that the stack has moved back
319	 * up after the tbegin.  The obvious case here is when the tbegin is
320	 * called inside a function that returns before a tend.  In this case,
321	 * the stack is part of the checkpointed transactional memory state.
322	 * If we write over this non transactionally or in suspend, we are in
323	 * trouble because if we get a tm abort, the program counter and stack
324	 * pointer will be back at the tbegin but our in memory stack won't be
325	 * valid anymore.
326	 *
327	 * To avoid this, when taking a signal in an active transaction, we
328	 * need to use the stack pointer from the checkpointed state, rather
329	 * than the speculated state.  This ensures that the signal context
330	 * (written tm suspended) will be written below the stack required for
331	 * the rollback.  The transaction is aborted because of the treclaim,
332	 * so any memory written between the tbegin and the signal will be
333	 * rolled back anyway.
334	 *
335	 * For signals taken in non-TM or suspended mode, we use the
336	 * normal/non-checkpointed stack pointer.
337	 */
338	struct pt_regs *regs = tsk->thread.regs;
339	unsigned long ret = regs->gpr[1];
340
341#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
342	BUG_ON(tsk != current);
343
344	if (MSR_TM_ACTIVE(regs->msr)) {
345		preempt_disable();
346		tm_reclaim_current(TM_CAUSE_SIGNAL);
347		if (MSR_TM_TRANSACTIONAL(regs->msr))
348			ret = tsk->thread.ckpt_regs.gpr[1];
349
350		/*
351		 * If we treclaim, we must clear the current thread's TM bits
352		 * before re-enabling preemption. Otherwise we might be
353		 * preempted and have the live MSR[TS] changed behind our back
354		 * (tm_recheckpoint_new_task() would recheckpoint). Besides, we
355		 * enter the signal handler in non-transactional state.
356		 */
357		regs_set_return_msr(regs, regs->msr & ~MSR_TS_MASK);
358		preempt_enable();
359	}
360#endif
361	return ret;
362}
363
364static const char fm32[] = KERN_INFO "%s[%d]: bad frame in %s: %p nip %08lx lr %08lx\n";
365static const char fm64[] = KERN_INFO "%s[%d]: bad frame in %s: %p nip %016lx lr %016lx\n";
366
367void signal_fault(struct task_struct *tsk, struct pt_regs *regs,
368		  const char *where, void __user *ptr)
369{
370	if (show_unhandled_signals)
371		printk_ratelimited(regs->msr & MSR_64BIT ? fm64 : fm32, tsk->comm,
372				   task_pid_nr(tsk), where, ptr, regs->nip, regs->link);
373}
v5.14.15
  1/*
  2 * Common signal handling code for both 32 and 64 bits
  3 *
  4 *    Copyright (c) 2007 Benjamin Herrenschmidt, IBM Corporation
  5 *    Extracted from signal_32.c and signal_64.c
  6 *
  7 * This file is subject to the terms and conditions of the GNU General
  8 * Public License.  See the file README.legal in the main directory of
  9 * this archive for more details.
 10 */
 11
 12#include <linux/tracehook.h>
 13#include <linux/signal.h>
 14#include <linux/uprobes.h>
 15#include <linux/key.h>
 16#include <linux/context_tracking.h>
 17#include <linux/livepatch.h>
 18#include <linux/syscalls.h>
 19#include <asm/hw_breakpoint.h>
 20#include <linux/uaccess.h>
 21#include <asm/switch_to.h>
 22#include <asm/unistd.h>
 23#include <asm/debug.h>
 24#include <asm/tm.h>
 25
 26#include "signal.h"
 27
 28#ifdef CONFIG_VSX
 29unsigned long copy_fpr_to_user(void __user *to,
 30			       struct task_struct *task)
 31{
 32	u64 buf[ELF_NFPREG];
 33	int i;
 34
 35	/* save FPR copy to local buffer then write to the thread_struct */
 36	for (i = 0; i < (ELF_NFPREG - 1) ; i++)
 37		buf[i] = task->thread.TS_FPR(i);
 38	buf[i] = task->thread.fp_state.fpscr;
 39	return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double));
 40}
 41
 42unsigned long copy_fpr_from_user(struct task_struct *task,
 43				 void __user *from)
 44{
 45	u64 buf[ELF_NFPREG];
 46	int i;
 47
 48	if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double)))
 49		return 1;
 50	for (i = 0; i < (ELF_NFPREG - 1) ; i++)
 51		task->thread.TS_FPR(i) = buf[i];
 52	task->thread.fp_state.fpscr = buf[i];
 53
 54	return 0;
 55}
 56
 57unsigned long copy_vsx_to_user(void __user *to,
 58			       struct task_struct *task)
 59{
 60	u64 buf[ELF_NVSRHALFREG];
 61	int i;
 62
 63	/* save FPR copy to local buffer then write to the thread_struct */
 64	for (i = 0; i < ELF_NVSRHALFREG; i++)
 65		buf[i] = task->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
 66	return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double));
 67}
 68
 69unsigned long copy_vsx_from_user(struct task_struct *task,
 70				 void __user *from)
 71{
 72	u64 buf[ELF_NVSRHALFREG];
 73	int i;
 74
 75	if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double)))
 76		return 1;
 77	for (i = 0; i < ELF_NVSRHALFREG ; i++)
 78		task->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
 79	return 0;
 80}
 81
 82#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
 83unsigned long copy_ckfpr_to_user(void __user *to,
 84				  struct task_struct *task)
 85{
 86	u64 buf[ELF_NFPREG];
 87	int i;
 88
 89	/* save FPR copy to local buffer then write to the thread_struct */
 90	for (i = 0; i < (ELF_NFPREG - 1) ; i++)
 91		buf[i] = task->thread.TS_CKFPR(i);
 92	buf[i] = task->thread.ckfp_state.fpscr;
 93	return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double));
 94}
 95
 96unsigned long copy_ckfpr_from_user(struct task_struct *task,
 97					  void __user *from)
 98{
 99	u64 buf[ELF_NFPREG];
100	int i;
101
102	if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double)))
103		return 1;
104	for (i = 0; i < (ELF_NFPREG - 1) ; i++)
105		task->thread.TS_CKFPR(i) = buf[i];
106	task->thread.ckfp_state.fpscr = buf[i];
107
108	return 0;
109}
110
111unsigned long copy_ckvsx_to_user(void __user *to,
112				  struct task_struct *task)
113{
114	u64 buf[ELF_NVSRHALFREG];
115	int i;
116
117	/* save FPR copy to local buffer then write to the thread_struct */
118	for (i = 0; i < ELF_NVSRHALFREG; i++)
119		buf[i] = task->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET];
120	return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double));
121}
122
123unsigned long copy_ckvsx_from_user(struct task_struct *task,
124					  void __user *from)
125{
126	u64 buf[ELF_NVSRHALFREG];
127	int i;
128
129	if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double)))
130		return 1;
131	for (i = 0; i < ELF_NVSRHALFREG ; i++)
132		task->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
133	return 0;
134}
135#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
136#endif
137
138/* Log an error when sending an unhandled signal to a process. Controlled
139 * through debug.exception-trace sysctl.
140 */
141
142int show_unhandled_signals = 1;
143
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
144/*
145 * Allocate space for the signal frame
146 */
147static unsigned long get_tm_stackpointer(struct task_struct *tsk);
148
149void __user *get_sigframe(struct ksignal *ksig, struct task_struct *tsk,
150			  size_t frame_size, int is_32)
151{
152        unsigned long oldsp, newsp;
153	unsigned long sp = get_tm_stackpointer(tsk);
154
155        /* Default to using normal stack */
156	if (is_32)
157		oldsp = sp & 0x0ffffffffUL;
158	else
159		oldsp = sp;
160	oldsp = sigsp(oldsp, ksig);
161	newsp = (oldsp - frame_size) & ~0xFUL;
162
163        return (void __user *)newsp;
164}
165
166static void check_syscall_restart(struct pt_regs *regs, struct k_sigaction *ka,
167				  int has_handler)
168{
169	unsigned long ret = regs->gpr[3];
170	int restart = 1;
171
172	/* syscall ? */
173	if (!trap_is_syscall(regs))
174		return;
175
176	if (trap_norestart(regs))
177		return;
178
179	/* error signalled ? */
180	if (trap_is_scv(regs)) {
181		/* 32-bit compat mode sign extend? */
182		if (!IS_ERR_VALUE(ret))
183			return;
184		ret = -ret;
185	} else if (!(regs->ccr & 0x10000000)) {
186		return;
187	}
188
189	switch (ret) {
190	case ERESTART_RESTARTBLOCK:
191	case ERESTARTNOHAND:
192		/* ERESTARTNOHAND means that the syscall should only be
193		 * restarted if there was no handler for the signal, and since
194		 * we only get here if there is a handler, we dont restart.
195		 */
196		restart = !has_handler;
197		break;
198	case ERESTARTSYS:
199		/* ERESTARTSYS means to restart the syscall if there is no
200		 * handler or the handler was registered with SA_RESTART
201		 */
202		restart = !has_handler || (ka->sa.sa_flags & SA_RESTART) != 0;
203		break;
204	case ERESTARTNOINTR:
205		/* ERESTARTNOINTR means that the syscall should be
206		 * called again after the signal handler returns.
207		 */
208		break;
209	default:
210		return;
211	}
212	if (restart) {
213		if (ret == ERESTART_RESTARTBLOCK)
214			regs->gpr[0] = __NR_restart_syscall;
215		else
216			regs->gpr[3] = regs->orig_gpr3;
217		regs_add_return_ip(regs, -4);
218		regs->result = 0;
219	} else {
220		if (trap_is_scv(regs)) {
221			regs->result = -EINTR;
222			regs->gpr[3] = -EINTR;
223		} else {
224			regs->result = -EINTR;
225			regs->gpr[3] = EINTR;
226			regs->ccr |= 0x10000000;
227		}
228	}
229}
230
231static void do_signal(struct task_struct *tsk)
232{
233	sigset_t *oldset = sigmask_to_save();
234	struct ksignal ksig = { .sig = 0 };
235	int ret;
236
237	BUG_ON(tsk != current);
238
239	get_signal(&ksig);
240
241	/* Is there any syscall restart business here ? */
242	check_syscall_restart(tsk->thread.regs, &ksig.ka, ksig.sig > 0);
243
244	if (ksig.sig <= 0) {
245		/* No signal to deliver -- put the saved sigmask back */
246		restore_saved_sigmask();
247		set_trap_norestart(tsk->thread.regs);
248		return;               /* no signals delivered */
249	}
250
251        /*
252	 * Reenable the DABR before delivering the signal to
253	 * user space. The DABR will have been cleared if it
254	 * triggered inside the kernel.
255	 */
256	if (!IS_ENABLED(CONFIG_PPC_ADV_DEBUG_REGS)) {
257		int i;
258
259		for (i = 0; i < nr_wp_slots(); i++) {
260			if (tsk->thread.hw_brk[i].address && tsk->thread.hw_brk[i].type)
261				__set_breakpoint(i, &tsk->thread.hw_brk[i]);
262		}
263	}
264
265	/* Re-enable the breakpoints for the signal stack */
266	thread_change_pc(tsk, tsk->thread.regs);
267
268	rseq_signal_deliver(&ksig, tsk->thread.regs);
269
270	if (is_32bit_task()) {
271        	if (ksig.ka.sa.sa_flags & SA_SIGINFO)
272			ret = handle_rt_signal32(&ksig, oldset, tsk);
273		else
274			ret = handle_signal32(&ksig, oldset, tsk);
275	} else {
276		ret = handle_rt_signal64(&ksig, oldset, tsk);
277	}
278
279	set_trap_norestart(tsk->thread.regs);
280	signal_setup_done(ret, &ksig, test_thread_flag(TIF_SINGLESTEP));
281}
282
283void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
284{
285	if (thread_info_flags & _TIF_UPROBE)
286		uprobe_notify_resume(regs);
287
288	if (thread_info_flags & _TIF_PATCH_PENDING)
289		klp_update_patch_state(current);
290
291	if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) {
292		BUG_ON(regs != current->thread.regs);
293		do_signal(current);
294	}
295
296	if (thread_info_flags & _TIF_NOTIFY_RESUME) {
297		tracehook_notify_resume(regs);
298		rseq_handle_notify_resume(NULL, regs);
299	}
300}
301
302static unsigned long get_tm_stackpointer(struct task_struct *tsk)
303{
304	/* When in an active transaction that takes a signal, we need to be
305	 * careful with the stack.  It's possible that the stack has moved back
306	 * up after the tbegin.  The obvious case here is when the tbegin is
307	 * called inside a function that returns before a tend.  In this case,
308	 * the stack is part of the checkpointed transactional memory state.
309	 * If we write over this non transactionally or in suspend, we are in
310	 * trouble because if we get a tm abort, the program counter and stack
311	 * pointer will be back at the tbegin but our in memory stack won't be
312	 * valid anymore.
313	 *
314	 * To avoid this, when taking a signal in an active transaction, we
315	 * need to use the stack pointer from the checkpointed state, rather
316	 * than the speculated state.  This ensures that the signal context
317	 * (written tm suspended) will be written below the stack required for
318	 * the rollback.  The transaction is aborted because of the treclaim,
319	 * so any memory written between the tbegin and the signal will be
320	 * rolled back anyway.
321	 *
322	 * For signals taken in non-TM or suspended mode, we use the
323	 * normal/non-checkpointed stack pointer.
324	 */
325	struct pt_regs *regs = tsk->thread.regs;
326	unsigned long ret = regs->gpr[1];
327
328#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
329	BUG_ON(tsk != current);
330
331	if (MSR_TM_ACTIVE(regs->msr)) {
332		preempt_disable();
333		tm_reclaim_current(TM_CAUSE_SIGNAL);
334		if (MSR_TM_TRANSACTIONAL(regs->msr))
335			ret = tsk->thread.ckpt_regs.gpr[1];
336
337		/*
338		 * If we treclaim, we must clear the current thread's TM bits
339		 * before re-enabling preemption. Otherwise we might be
340		 * preempted and have the live MSR[TS] changed behind our back
341		 * (tm_recheckpoint_new_task() would recheckpoint). Besides, we
342		 * enter the signal handler in non-transactional state.
343		 */
344		regs_set_return_msr(regs, regs->msr & ~MSR_TS_MASK);
345		preempt_enable();
346	}
347#endif
348	return ret;
349}
350
351static const char fm32[] = KERN_INFO "%s[%d]: bad frame in %s: %p nip %08lx lr %08lx\n";
352static const char fm64[] = KERN_INFO "%s[%d]: bad frame in %s: %p nip %016lx lr %016lx\n";
353
354void signal_fault(struct task_struct *tsk, struct pt_regs *regs,
355		  const char *where, void __user *ptr)
356{
357	if (show_unhandled_signals)
358		printk_ratelimited(regs->msr & MSR_64BIT ? fm64 : fm32, tsk->comm,
359				   task_pid_nr(tsk), where, ptr, regs->nip, regs->link);
360}