Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Derived from "arch/i386/kernel/process.c"
4 * Copyright (C) 1995 Linus Torvalds
5 *
6 * Updated and modified by Cort Dougan (cort@cs.nmt.edu) and
7 * Paul Mackerras (paulus@cs.anu.edu.au)
8 *
9 * PowerPC version
10 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
11 */
12
13#include <linux/errno.h>
14#include <linux/sched.h>
15#include <linux/sched/debug.h>
16#include <linux/sched/task.h>
17#include <linux/sched/task_stack.h>
18#include <linux/kernel.h>
19#include <linux/mm.h>
20#include <linux/smp.h>
21#include <linux/stddef.h>
22#include <linux/unistd.h>
23#include <linux/ptrace.h>
24#include <linux/slab.h>
25#include <linux/user.h>
26#include <linux/elf.h>
27#include <linux/prctl.h>
28#include <linux/init_task.h>
29#include <linux/export.h>
30#include <linux/kallsyms.h>
31#include <linux/mqueue.h>
32#include <linux/hardirq.h>
33#include <linux/utsname.h>
34#include <linux/ftrace.h>
35#include <linux/kernel_stat.h>
36#include <linux/personality.h>
37#include <linux/hw_breakpoint.h>
38#include <linux/uaccess.h>
39#include <linux/pkeys.h>
40#include <linux/seq_buf.h>
41
42#include <asm/interrupt.h>
43#include <asm/io.h>
44#include <asm/processor.h>
45#include <asm/mmu.h>
46#include <asm/machdep.h>
47#include <asm/time.h>
48#include <asm/runlatch.h>
49#include <asm/syscalls.h>
50#include <asm/switch_to.h>
51#include <asm/tm.h>
52#include <asm/debug.h>
53#ifdef CONFIG_PPC64
54#include <asm/firmware.h>
55#include <asm/hw_irq.h>
56#endif
57#include <asm/code-patching.h>
58#include <asm/exec.h>
59#include <asm/livepatch.h>
60#include <asm/cpu_has_feature.h>
61#include <asm/asm-prototypes.h>
62#include <asm/stacktrace.h>
63#include <asm/hw_breakpoint.h>
64
65#include <linux/kprobes.h>
66#include <linux/kdebug.h>
67
68/* Transactional Memory debug */
69#ifdef TM_DEBUG_SW
70#define TM_DEBUG(x...) printk(KERN_INFO x)
71#else
72#define TM_DEBUG(x...) do { } while(0)
73#endif
74
75extern unsigned long _get_SP(void);
76
77#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
78/*
79 * Are we running in "Suspend disabled" mode? If so we have to block any
80 * sigreturn that would get us into suspended state, and we also warn in some
81 * other paths that we should never reach with suspend disabled.
82 */
83bool tm_suspend_disabled __ro_after_init = false;
84
85static void check_if_tm_restore_required(struct task_struct *tsk)
86{
87 /*
88 * If we are saving the current thread's registers, and the
89 * thread is in a transactional state, set the TIF_RESTORE_TM
90 * bit so that we know to restore the registers before
91 * returning to userspace.
92 */
93 if (tsk == current && tsk->thread.regs &&
94 MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
95 !test_thread_flag(TIF_RESTORE_TM)) {
96 regs_set_return_msr(&tsk->thread.ckpt_regs,
97 tsk->thread.regs->msr);
98 set_thread_flag(TIF_RESTORE_TM);
99 }
100}
101
102#else
103static inline void check_if_tm_restore_required(struct task_struct *tsk) { }
104#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
105
106bool strict_msr_control;
107EXPORT_SYMBOL(strict_msr_control);
108
109static int __init enable_strict_msr_control(char *str)
110{
111 strict_msr_control = true;
112 pr_info("Enabling strict facility control\n");
113
114 return 0;
115}
116early_param("ppc_strict_facility_enable", enable_strict_msr_control);
117
118/* notrace because it's called by restore_math */
119unsigned long notrace msr_check_and_set(unsigned long bits)
120{
121 unsigned long oldmsr = mfmsr();
122 unsigned long newmsr;
123
124 newmsr = oldmsr | bits;
125
126 if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP))
127 newmsr |= MSR_VSX;
128
129 if (oldmsr != newmsr)
130 newmsr = mtmsr_isync_irqsafe(newmsr);
131
132 return newmsr;
133}
134EXPORT_SYMBOL_GPL(msr_check_and_set);
135
136/* notrace because it's called by restore_math */
137void notrace __msr_check_and_clear(unsigned long bits)
138{
139 unsigned long oldmsr = mfmsr();
140 unsigned long newmsr;
141
142 newmsr = oldmsr & ~bits;
143
144 if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP))
145 newmsr &= ~MSR_VSX;
146
147 if (oldmsr != newmsr)
148 mtmsr_isync_irqsafe(newmsr);
149}
150EXPORT_SYMBOL(__msr_check_and_clear);
151
152#ifdef CONFIG_PPC_FPU
153static void __giveup_fpu(struct task_struct *tsk)
154{
155 unsigned long msr;
156
157 save_fpu(tsk);
158 msr = tsk->thread.regs->msr;
159 msr &= ~(MSR_FP|MSR_FE0|MSR_FE1);
160 if (cpu_has_feature(CPU_FTR_VSX))
161 msr &= ~MSR_VSX;
162 regs_set_return_msr(tsk->thread.regs, msr);
163}
164
165void giveup_fpu(struct task_struct *tsk)
166{
167 check_if_tm_restore_required(tsk);
168
169 msr_check_and_set(MSR_FP);
170 __giveup_fpu(tsk);
171 msr_check_and_clear(MSR_FP);
172}
173EXPORT_SYMBOL(giveup_fpu);
174
175/*
176 * Make sure the floating-point register state in the
177 * the thread_struct is up to date for task tsk.
178 */
179void flush_fp_to_thread(struct task_struct *tsk)
180{
181 if (tsk->thread.regs) {
182 /*
183 * We need to disable preemption here because if we didn't,
184 * another process could get scheduled after the regs->msr
185 * test but before we have finished saving the FP registers
186 * to the thread_struct. That process could take over the
187 * FPU, and then when we get scheduled again we would store
188 * bogus values for the remaining FP registers.
189 */
190 preempt_disable();
191 if (tsk->thread.regs->msr & MSR_FP) {
192 /*
193 * This should only ever be called for current or
194 * for a stopped child process. Since we save away
195 * the FP register state on context switch,
196 * there is something wrong if a stopped child appears
197 * to still have its FP state in the CPU registers.
198 */
199 BUG_ON(tsk != current);
200 giveup_fpu(tsk);
201 }
202 preempt_enable();
203 }
204}
205EXPORT_SYMBOL_GPL(flush_fp_to_thread);
206
207void enable_kernel_fp(void)
208{
209 unsigned long cpumsr;
210
211 WARN_ON(preemptible());
212
213 cpumsr = msr_check_and_set(MSR_FP);
214
215 if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) {
216 check_if_tm_restore_required(current);
217 /*
218 * If a thread has already been reclaimed then the
219 * checkpointed registers are on the CPU but have definitely
220 * been saved by the reclaim code. Don't need to and *cannot*
221 * giveup as this would save to the 'live' structure not the
222 * checkpointed structure.
223 */
224 if (!MSR_TM_ACTIVE(cpumsr) &&
225 MSR_TM_ACTIVE(current->thread.regs->msr))
226 return;
227 __giveup_fpu(current);
228 }
229}
230EXPORT_SYMBOL(enable_kernel_fp);
231#else
232static inline void __giveup_fpu(struct task_struct *tsk) { }
233#endif /* CONFIG_PPC_FPU */
234
235#ifdef CONFIG_ALTIVEC
236static void __giveup_altivec(struct task_struct *tsk)
237{
238 unsigned long msr;
239
240 save_altivec(tsk);
241 msr = tsk->thread.regs->msr;
242 msr &= ~MSR_VEC;
243 if (cpu_has_feature(CPU_FTR_VSX))
244 msr &= ~MSR_VSX;
245 regs_set_return_msr(tsk->thread.regs, msr);
246}
247
248void giveup_altivec(struct task_struct *tsk)
249{
250 check_if_tm_restore_required(tsk);
251
252 msr_check_and_set(MSR_VEC);
253 __giveup_altivec(tsk);
254 msr_check_and_clear(MSR_VEC);
255}
256EXPORT_SYMBOL(giveup_altivec);
257
258void enable_kernel_altivec(void)
259{
260 unsigned long cpumsr;
261
262 WARN_ON(preemptible());
263
264 cpumsr = msr_check_and_set(MSR_VEC);
265
266 if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) {
267 check_if_tm_restore_required(current);
268 /*
269 * If a thread has already been reclaimed then the
270 * checkpointed registers are on the CPU but have definitely
271 * been saved by the reclaim code. Don't need to and *cannot*
272 * giveup as this would save to the 'live' structure not the
273 * checkpointed structure.
274 */
275 if (!MSR_TM_ACTIVE(cpumsr) &&
276 MSR_TM_ACTIVE(current->thread.regs->msr))
277 return;
278 __giveup_altivec(current);
279 }
280}
281EXPORT_SYMBOL(enable_kernel_altivec);
282
283/*
284 * Make sure the VMX/Altivec register state in the
285 * the thread_struct is up to date for task tsk.
286 */
287void flush_altivec_to_thread(struct task_struct *tsk)
288{
289 if (tsk->thread.regs) {
290 preempt_disable();
291 if (tsk->thread.regs->msr & MSR_VEC) {
292 BUG_ON(tsk != current);
293 giveup_altivec(tsk);
294 }
295 preempt_enable();
296 }
297}
298EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
299#endif /* CONFIG_ALTIVEC */
300
301#ifdef CONFIG_VSX
302static void __giveup_vsx(struct task_struct *tsk)
303{
304 unsigned long msr = tsk->thread.regs->msr;
305
306 /*
307 * We should never be setting MSR_VSX without also setting
308 * MSR_FP and MSR_VEC
309 */
310 WARN_ON((msr & MSR_VSX) && !((msr & MSR_FP) && (msr & MSR_VEC)));
311
312 /* __giveup_fpu will clear MSR_VSX */
313 if (msr & MSR_FP)
314 __giveup_fpu(tsk);
315 if (msr & MSR_VEC)
316 __giveup_altivec(tsk);
317}
318
319static void giveup_vsx(struct task_struct *tsk)
320{
321 check_if_tm_restore_required(tsk);
322
323 msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
324 __giveup_vsx(tsk);
325 msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX);
326}
327
328void enable_kernel_vsx(void)
329{
330 unsigned long cpumsr;
331
332 WARN_ON(preemptible());
333
334 cpumsr = msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
335
336 if (current->thread.regs &&
337 (current->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP))) {
338 check_if_tm_restore_required(current);
339 /*
340 * If a thread has already been reclaimed then the
341 * checkpointed registers are on the CPU but have definitely
342 * been saved by the reclaim code. Don't need to and *cannot*
343 * giveup as this would save to the 'live' structure not the
344 * checkpointed structure.
345 */
346 if (!MSR_TM_ACTIVE(cpumsr) &&
347 MSR_TM_ACTIVE(current->thread.regs->msr))
348 return;
349 __giveup_vsx(current);
350 }
351}
352EXPORT_SYMBOL(enable_kernel_vsx);
353
354void flush_vsx_to_thread(struct task_struct *tsk)
355{
356 if (tsk->thread.regs) {
357 preempt_disable();
358 if (tsk->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP)) {
359 BUG_ON(tsk != current);
360 giveup_vsx(tsk);
361 }
362 preempt_enable();
363 }
364}
365EXPORT_SYMBOL_GPL(flush_vsx_to_thread);
366#endif /* CONFIG_VSX */
367
368#ifdef CONFIG_SPE
369void giveup_spe(struct task_struct *tsk)
370{
371 check_if_tm_restore_required(tsk);
372
373 msr_check_and_set(MSR_SPE);
374 __giveup_spe(tsk);
375 msr_check_and_clear(MSR_SPE);
376}
377EXPORT_SYMBOL(giveup_spe);
378
379void enable_kernel_spe(void)
380{
381 WARN_ON(preemptible());
382
383 msr_check_and_set(MSR_SPE);
384
385 if (current->thread.regs && (current->thread.regs->msr & MSR_SPE)) {
386 check_if_tm_restore_required(current);
387 __giveup_spe(current);
388 }
389}
390EXPORT_SYMBOL(enable_kernel_spe);
391
392void flush_spe_to_thread(struct task_struct *tsk)
393{
394 if (tsk->thread.regs) {
395 preempt_disable();
396 if (tsk->thread.regs->msr & MSR_SPE) {
397 BUG_ON(tsk != current);
398 tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
399 giveup_spe(tsk);
400 }
401 preempt_enable();
402 }
403}
404#endif /* CONFIG_SPE */
405
406static unsigned long msr_all_available;
407
408static int __init init_msr_all_available(void)
409{
410 if (IS_ENABLED(CONFIG_PPC_FPU))
411 msr_all_available |= MSR_FP;
412 if (cpu_has_feature(CPU_FTR_ALTIVEC))
413 msr_all_available |= MSR_VEC;
414 if (cpu_has_feature(CPU_FTR_VSX))
415 msr_all_available |= MSR_VSX;
416 if (cpu_has_feature(CPU_FTR_SPE))
417 msr_all_available |= MSR_SPE;
418
419 return 0;
420}
421early_initcall(init_msr_all_available);
422
423void giveup_all(struct task_struct *tsk)
424{
425 unsigned long usermsr;
426
427 if (!tsk->thread.regs)
428 return;
429
430 check_if_tm_restore_required(tsk);
431
432 usermsr = tsk->thread.regs->msr;
433
434 if ((usermsr & msr_all_available) == 0)
435 return;
436
437 msr_check_and_set(msr_all_available);
438
439 WARN_ON((usermsr & MSR_VSX) && !((usermsr & MSR_FP) && (usermsr & MSR_VEC)));
440
441 if (usermsr & MSR_FP)
442 __giveup_fpu(tsk);
443 if (usermsr & MSR_VEC)
444 __giveup_altivec(tsk);
445 if (usermsr & MSR_SPE)
446 __giveup_spe(tsk);
447
448 msr_check_and_clear(msr_all_available);
449}
450EXPORT_SYMBOL(giveup_all);
451
452#ifdef CONFIG_PPC_BOOK3S_64
453#ifdef CONFIG_PPC_FPU
454static bool should_restore_fp(void)
455{
456 if (current->thread.load_fp) {
457 current->thread.load_fp++;
458 return true;
459 }
460 return false;
461}
462
463static void do_restore_fp(void)
464{
465 load_fp_state(¤t->thread.fp_state);
466}
467#else
468static bool should_restore_fp(void) { return false; }
469static void do_restore_fp(void) { }
470#endif /* CONFIG_PPC_FPU */
471
472#ifdef CONFIG_ALTIVEC
473static bool should_restore_altivec(void)
474{
475 if (cpu_has_feature(CPU_FTR_ALTIVEC) && (current->thread.load_vec)) {
476 current->thread.load_vec++;
477 return true;
478 }
479 return false;
480}
481
482static void do_restore_altivec(void)
483{
484 load_vr_state(¤t->thread.vr_state);
485 current->thread.used_vr = 1;
486}
487#else
488static bool should_restore_altivec(void) { return false; }
489static void do_restore_altivec(void) { }
490#endif /* CONFIG_ALTIVEC */
491
492static bool should_restore_vsx(void)
493{
494 if (cpu_has_feature(CPU_FTR_VSX))
495 return true;
496 return false;
497}
498#ifdef CONFIG_VSX
499static void do_restore_vsx(void)
500{
501 current->thread.used_vsr = 1;
502}
503#else
504static void do_restore_vsx(void) { }
505#endif /* CONFIG_VSX */
506
507/*
508 * The exception exit path calls restore_math() with interrupts hard disabled
509 * but the soft irq state not "reconciled". ftrace code that calls
510 * local_irq_save/restore causes warnings.
511 *
512 * Rather than complicate the exit path, just don't trace restore_math. This
513 * could be done by having ftrace entry code check for this un-reconciled
514 * condition where MSR[EE]=0 and PACA_IRQ_HARD_DIS is not set, and
515 * temporarily fix it up for the duration of the ftrace call.
516 */
517void notrace restore_math(struct pt_regs *regs)
518{
519 unsigned long msr;
520 unsigned long new_msr = 0;
521
522 msr = regs->msr;
523
524 /*
525 * new_msr tracks the facilities that are to be restored. Only reload
526 * if the bit is not set in the user MSR (if it is set, the registers
527 * are live for the user thread).
528 */
529 if ((!(msr & MSR_FP)) && should_restore_fp())
530 new_msr |= MSR_FP;
531
532 if ((!(msr & MSR_VEC)) && should_restore_altivec())
533 new_msr |= MSR_VEC;
534
535 if ((!(msr & MSR_VSX)) && should_restore_vsx()) {
536 if (((msr | new_msr) & (MSR_FP | MSR_VEC)) == (MSR_FP | MSR_VEC))
537 new_msr |= MSR_VSX;
538 }
539
540 if (new_msr) {
541 unsigned long fpexc_mode = 0;
542
543 msr_check_and_set(new_msr);
544
545 if (new_msr & MSR_FP) {
546 do_restore_fp();
547
548 // This also covers VSX, because VSX implies FP
549 fpexc_mode = current->thread.fpexc_mode;
550 }
551
552 if (new_msr & MSR_VEC)
553 do_restore_altivec();
554
555 if (new_msr & MSR_VSX)
556 do_restore_vsx();
557
558 msr_check_and_clear(new_msr);
559
560 regs_set_return_msr(regs, regs->msr | new_msr | fpexc_mode);
561 }
562}
563#endif /* CONFIG_PPC_BOOK3S_64 */
564
565static void save_all(struct task_struct *tsk)
566{
567 unsigned long usermsr;
568
569 if (!tsk->thread.regs)
570 return;
571
572 usermsr = tsk->thread.regs->msr;
573
574 if ((usermsr & msr_all_available) == 0)
575 return;
576
577 msr_check_and_set(msr_all_available);
578
579 WARN_ON((usermsr & MSR_VSX) && !((usermsr & MSR_FP) && (usermsr & MSR_VEC)));
580
581 if (usermsr & MSR_FP)
582 save_fpu(tsk);
583
584 if (usermsr & MSR_VEC)
585 save_altivec(tsk);
586
587 if (usermsr & MSR_SPE)
588 __giveup_spe(tsk);
589
590 msr_check_and_clear(msr_all_available);
591}
592
593void flush_all_to_thread(struct task_struct *tsk)
594{
595 if (tsk->thread.regs) {
596 preempt_disable();
597 BUG_ON(tsk != current);
598#ifdef CONFIG_SPE
599 if (tsk->thread.regs->msr & MSR_SPE)
600 tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
601#endif
602 save_all(tsk);
603
604 preempt_enable();
605 }
606}
607EXPORT_SYMBOL(flush_all_to_thread);
608
609#ifdef CONFIG_PPC_ADV_DEBUG_REGS
610void do_send_trap(struct pt_regs *regs, unsigned long address,
611 unsigned long error_code, int breakpt)
612{
613 current->thread.trap_nr = TRAP_HWBKPT;
614 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
615 11, SIGSEGV) == NOTIFY_STOP)
616 return;
617
618 /* Deliver the signal to userspace */
619 force_sig_ptrace_errno_trap(breakpt, /* breakpoint or watchpoint id */
620 (void __user *)address);
621}
622#else /* !CONFIG_PPC_ADV_DEBUG_REGS */
623
624static void do_break_handler(struct pt_regs *regs)
625{
626 struct arch_hw_breakpoint null_brk = {0};
627 struct arch_hw_breakpoint *info;
628 ppc_inst_t instr = ppc_inst(0);
629 int type = 0;
630 int size = 0;
631 unsigned long ea;
632 int i;
633
634 /*
635 * If underneath hw supports only one watchpoint, we know it
636 * caused exception. 8xx also falls into this category.
637 */
638 if (nr_wp_slots() == 1) {
639 __set_breakpoint(0, &null_brk);
640 current->thread.hw_brk[0] = null_brk;
641 current->thread.hw_brk[0].flags |= HW_BRK_FLAG_DISABLED;
642 return;
643 }
644
645 /* Otherwise find out which DAWR caused exception and disable it. */
646 wp_get_instr_detail(regs, &instr, &type, &size, &ea);
647
648 for (i = 0; i < nr_wp_slots(); i++) {
649 info = ¤t->thread.hw_brk[i];
650 if (!info->address)
651 continue;
652
653 if (wp_check_constraints(regs, instr, ea, type, size, info)) {
654 __set_breakpoint(i, &null_brk);
655 current->thread.hw_brk[i] = null_brk;
656 current->thread.hw_brk[i].flags |= HW_BRK_FLAG_DISABLED;
657 }
658 }
659}
660
661DEFINE_INTERRUPT_HANDLER(do_break)
662{
663 current->thread.trap_nr = TRAP_HWBKPT;
664 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, regs->dsisr,
665 11, SIGSEGV) == NOTIFY_STOP)
666 return;
667
668 if (debugger_break_match(regs))
669 return;
670
671 /*
672 * We reach here only when watchpoint exception is generated by ptrace
673 * event (or hw is buggy!). Now if CONFIG_HAVE_HW_BREAKPOINT is set,
674 * watchpoint is already handled by hw_breakpoint_handler() so we don't
675 * have to do anything. But when CONFIG_HAVE_HW_BREAKPOINT is not set,
676 * we need to manually handle the watchpoint here.
677 */
678 if (!IS_ENABLED(CONFIG_HAVE_HW_BREAKPOINT))
679 do_break_handler(regs);
680
681 /* Deliver the signal to userspace */
682 force_sig_fault(SIGTRAP, TRAP_HWBKPT, (void __user *)regs->dar);
683}
684#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
685
686static DEFINE_PER_CPU(struct arch_hw_breakpoint, current_brk[HBP_NUM_MAX]);
687
688#ifdef CONFIG_PPC_ADV_DEBUG_REGS
689/*
690 * Set the debug registers back to their default "safe" values.
691 */
692static void set_debug_reg_defaults(struct thread_struct *thread)
693{
694 thread->debug.iac1 = thread->debug.iac2 = 0;
695#if CONFIG_PPC_ADV_DEBUG_IACS > 2
696 thread->debug.iac3 = thread->debug.iac4 = 0;
697#endif
698 thread->debug.dac1 = thread->debug.dac2 = 0;
699#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
700 thread->debug.dvc1 = thread->debug.dvc2 = 0;
701#endif
702 thread->debug.dbcr0 = 0;
703#ifdef CONFIG_BOOKE
704 /*
705 * Force User/Supervisor bits to b11 (user-only MSR[PR]=1)
706 */
707 thread->debug.dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US |
708 DBCR1_IAC3US | DBCR1_IAC4US;
709 /*
710 * Force Data Address Compare User/Supervisor bits to be User-only
711 * (0b11 MSR[PR]=1) and set all other bits in DBCR2 register to be 0.
712 */
713 thread->debug.dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
714#else
715 thread->debug.dbcr1 = 0;
716#endif
717}
718
719static void prime_debug_regs(struct debug_reg *debug)
720{
721 /*
722 * We could have inherited MSR_DE from userspace, since
723 * it doesn't get cleared on exception entry. Make sure
724 * MSR_DE is clear before we enable any debug events.
725 */
726 mtmsr(mfmsr() & ~MSR_DE);
727
728 mtspr(SPRN_IAC1, debug->iac1);
729 mtspr(SPRN_IAC2, debug->iac2);
730#if CONFIG_PPC_ADV_DEBUG_IACS > 2
731 mtspr(SPRN_IAC3, debug->iac3);
732 mtspr(SPRN_IAC4, debug->iac4);
733#endif
734 mtspr(SPRN_DAC1, debug->dac1);
735 mtspr(SPRN_DAC2, debug->dac2);
736#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
737 mtspr(SPRN_DVC1, debug->dvc1);
738 mtspr(SPRN_DVC2, debug->dvc2);
739#endif
740 mtspr(SPRN_DBCR0, debug->dbcr0);
741 mtspr(SPRN_DBCR1, debug->dbcr1);
742#ifdef CONFIG_BOOKE
743 mtspr(SPRN_DBCR2, debug->dbcr2);
744#endif
745}
746/*
747 * Unless neither the old or new thread are making use of the
748 * debug registers, set the debug registers from the values
749 * stored in the new thread.
750 */
751void switch_booke_debug_regs(struct debug_reg *new_debug)
752{
753 if ((current->thread.debug.dbcr0 & DBCR0_IDM)
754 || (new_debug->dbcr0 & DBCR0_IDM))
755 prime_debug_regs(new_debug);
756}
757EXPORT_SYMBOL_GPL(switch_booke_debug_regs);
758#else /* !CONFIG_PPC_ADV_DEBUG_REGS */
759#ifndef CONFIG_HAVE_HW_BREAKPOINT
760static void set_breakpoint(int i, struct arch_hw_breakpoint *brk)
761{
762 preempt_disable();
763 __set_breakpoint(i, brk);
764 preempt_enable();
765}
766
767static void set_debug_reg_defaults(struct thread_struct *thread)
768{
769 int i;
770 struct arch_hw_breakpoint null_brk = {0};
771
772 for (i = 0; i < nr_wp_slots(); i++) {
773 thread->hw_brk[i] = null_brk;
774 if (ppc_breakpoint_available())
775 set_breakpoint(i, &thread->hw_brk[i]);
776 }
777}
778
779static inline bool hw_brk_match(struct arch_hw_breakpoint *a,
780 struct arch_hw_breakpoint *b)
781{
782 if (a->address != b->address)
783 return false;
784 if (a->type != b->type)
785 return false;
786 if (a->len != b->len)
787 return false;
788 /* no need to check hw_len. it's calculated from address and len */
789 return true;
790}
791
792static void switch_hw_breakpoint(struct task_struct *new)
793{
794 int i;
795
796 for (i = 0; i < nr_wp_slots(); i++) {
797 if (likely(hw_brk_match(this_cpu_ptr(¤t_brk[i]),
798 &new->thread.hw_brk[i])))
799 continue;
800
801 __set_breakpoint(i, &new->thread.hw_brk[i]);
802 }
803}
804#endif /* !CONFIG_HAVE_HW_BREAKPOINT */
805#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
806
807static inline int set_dabr(struct arch_hw_breakpoint *brk)
808{
809 unsigned long dabr, dabrx;
810
811 dabr = brk->address | (brk->type & HW_BRK_TYPE_DABR);
812 dabrx = ((brk->type >> 3) & 0x7);
813
814 if (ppc_md.set_dabr)
815 return ppc_md.set_dabr(dabr, dabrx);
816
817 if (IS_ENABLED(CONFIG_PPC_ADV_DEBUG_REGS)) {
818 mtspr(SPRN_DAC1, dabr);
819 if (IS_ENABLED(CONFIG_PPC_47x))
820 isync();
821 return 0;
822 } else if (IS_ENABLED(CONFIG_PPC_BOOK3S)) {
823 mtspr(SPRN_DABR, dabr);
824 if (cpu_has_feature(CPU_FTR_DABRX))
825 mtspr(SPRN_DABRX, dabrx);
826 return 0;
827 } else {
828 return -EINVAL;
829 }
830}
831
832static inline int set_breakpoint_8xx(struct arch_hw_breakpoint *brk)
833{
834 unsigned long lctrl1 = LCTRL1_CTE_GT | LCTRL1_CTF_LT | LCTRL1_CRWE_RW |
835 LCTRL1_CRWF_RW;
836 unsigned long lctrl2 = LCTRL2_LW0EN | LCTRL2_LW0LADC | LCTRL2_SLW0EN;
837 unsigned long start_addr = ALIGN_DOWN(brk->address, HW_BREAKPOINT_SIZE);
838 unsigned long end_addr = ALIGN(brk->address + brk->len, HW_BREAKPOINT_SIZE);
839
840 if (start_addr == 0)
841 lctrl2 |= LCTRL2_LW0LA_F;
842 else if (end_addr == 0)
843 lctrl2 |= LCTRL2_LW0LA_E;
844 else
845 lctrl2 |= LCTRL2_LW0LA_EandF;
846
847 mtspr(SPRN_LCTRL2, 0);
848
849 if ((brk->type & HW_BRK_TYPE_RDWR) == 0)
850 return 0;
851
852 if ((brk->type & HW_BRK_TYPE_RDWR) == HW_BRK_TYPE_READ)
853 lctrl1 |= LCTRL1_CRWE_RO | LCTRL1_CRWF_RO;
854 if ((brk->type & HW_BRK_TYPE_RDWR) == HW_BRK_TYPE_WRITE)
855 lctrl1 |= LCTRL1_CRWE_WO | LCTRL1_CRWF_WO;
856
857 mtspr(SPRN_CMPE, start_addr - 1);
858 mtspr(SPRN_CMPF, end_addr);
859 mtspr(SPRN_LCTRL1, lctrl1);
860 mtspr(SPRN_LCTRL2, lctrl2);
861
862 return 0;
863}
864
865static void set_hw_breakpoint(int nr, struct arch_hw_breakpoint *brk)
866{
867 if (dawr_enabled())
868 // Power8 or later
869 set_dawr(nr, brk);
870 else if (IS_ENABLED(CONFIG_PPC_8xx))
871 set_breakpoint_8xx(brk);
872 else if (!cpu_has_feature(CPU_FTR_ARCH_207S))
873 // Power7 or earlier
874 set_dabr(brk);
875 else
876 // Shouldn't happen due to higher level checks
877 WARN_ON_ONCE(1);
878}
879
880void __set_breakpoint(int nr, struct arch_hw_breakpoint *brk)
881{
882 memcpy(this_cpu_ptr(¤t_brk[nr]), brk, sizeof(*brk));
883 set_hw_breakpoint(nr, brk);
884}
885
886/* Check if we have DAWR or DABR hardware */
887bool ppc_breakpoint_available(void)
888{
889 if (dawr_enabled())
890 return true; /* POWER8 DAWR or POWER9 forced DAWR */
891 if (cpu_has_feature(CPU_FTR_ARCH_207S))
892 return false; /* POWER9 with DAWR disabled */
893 /* DABR: Everything but POWER8 and POWER9 */
894 return true;
895}
896EXPORT_SYMBOL_GPL(ppc_breakpoint_available);
897
898/* Disable the breakpoint in hardware without touching current_brk[] */
899void suspend_breakpoints(void)
900{
901 struct arch_hw_breakpoint brk = {0};
902 int i;
903
904 if (!ppc_breakpoint_available())
905 return;
906
907 for (i = 0; i < nr_wp_slots(); i++)
908 set_hw_breakpoint(i, &brk);
909}
910
911/*
912 * Re-enable breakpoints suspended by suspend_breakpoints() in hardware
913 * from current_brk[]
914 */
915void restore_breakpoints(void)
916{
917 int i;
918
919 if (!ppc_breakpoint_available())
920 return;
921
922 for (i = 0; i < nr_wp_slots(); i++)
923 set_hw_breakpoint(i, this_cpu_ptr(¤t_brk[i]));
924}
925
926#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
927
928static inline bool tm_enabled(struct task_struct *tsk)
929{
930 return tsk && tsk->thread.regs && (tsk->thread.regs->msr & MSR_TM);
931}
932
933static void tm_reclaim_thread(struct thread_struct *thr, uint8_t cause)
934{
935 /*
936 * Use the current MSR TM suspended bit to track if we have
937 * checkpointed state outstanding.
938 * On signal delivery, we'd normally reclaim the checkpointed
939 * state to obtain stack pointer (see:get_tm_stackpointer()).
940 * This will then directly return to userspace without going
941 * through __switch_to(). However, if the stack frame is bad,
942 * we need to exit this thread which calls __switch_to() which
943 * will again attempt to reclaim the already saved tm state.
944 * Hence we need to check that we've not already reclaimed
945 * this state.
946 * We do this using the current MSR, rather tracking it in
947 * some specific thread_struct bit, as it has the additional
948 * benefit of checking for a potential TM bad thing exception.
949 */
950 if (!MSR_TM_SUSPENDED(mfmsr()))
951 return;
952
953 giveup_all(container_of(thr, struct task_struct, thread));
954
955 tm_reclaim(thr, cause);
956
957 /*
958 * If we are in a transaction and FP is off then we can't have
959 * used FP inside that transaction. Hence the checkpointed
960 * state is the same as the live state. We need to copy the
961 * live state to the checkpointed state so that when the
962 * transaction is restored, the checkpointed state is correct
963 * and the aborted transaction sees the correct state. We use
964 * ckpt_regs.msr here as that's what tm_reclaim will use to
965 * determine if it's going to write the checkpointed state or
966 * not. So either this will write the checkpointed registers,
967 * or reclaim will. Similarly for VMX.
968 */
969 if ((thr->ckpt_regs.msr & MSR_FP) == 0)
970 memcpy(&thr->ckfp_state, &thr->fp_state,
971 sizeof(struct thread_fp_state));
972 if ((thr->ckpt_regs.msr & MSR_VEC) == 0)
973 memcpy(&thr->ckvr_state, &thr->vr_state,
974 sizeof(struct thread_vr_state));
975}
976
977void tm_reclaim_current(uint8_t cause)
978{
979 tm_enable();
980 tm_reclaim_thread(¤t->thread, cause);
981}
982
983static inline void tm_reclaim_task(struct task_struct *tsk)
984{
985 /* We have to work out if we're switching from/to a task that's in the
986 * middle of a transaction.
987 *
988 * In switching we need to maintain a 2nd register state as
989 * oldtask->thread.ckpt_regs. We tm_reclaim(oldproc); this saves the
990 * checkpointed (tbegin) state in ckpt_regs, ckfp_state and
991 * ckvr_state
992 *
993 * We also context switch (save) TFHAR/TEXASR/TFIAR in here.
994 */
995 struct thread_struct *thr = &tsk->thread;
996
997 if (!thr->regs)
998 return;
999
1000 if (!MSR_TM_ACTIVE(thr->regs->msr))
1001 goto out_and_saveregs;
1002
1003 WARN_ON(tm_suspend_disabled);
1004
1005 TM_DEBUG("--- tm_reclaim on pid %d (NIP=%lx, "
1006 "ccr=%lx, msr=%lx, trap=%lx)\n",
1007 tsk->pid, thr->regs->nip,
1008 thr->regs->ccr, thr->regs->msr,
1009 thr->regs->trap);
1010
1011 tm_reclaim_thread(thr, TM_CAUSE_RESCHED);
1012
1013 TM_DEBUG("--- tm_reclaim on pid %d complete\n",
1014 tsk->pid);
1015
1016out_and_saveregs:
1017 /* Always save the regs here, even if a transaction's not active.
1018 * This context-switches a thread's TM info SPRs. We do it here to
1019 * be consistent with the restore path (in recheckpoint) which
1020 * cannot happen later in _switch().
1021 */
1022 tm_save_sprs(thr);
1023}
1024
1025extern void __tm_recheckpoint(struct thread_struct *thread);
1026
1027void tm_recheckpoint(struct thread_struct *thread)
1028{
1029 unsigned long flags;
1030
1031 if (!(thread->regs->msr & MSR_TM))
1032 return;
1033
1034 /* We really can't be interrupted here as the TEXASR registers can't
1035 * change and later in the trecheckpoint code, we have a userspace R1.
1036 * So let's hard disable over this region.
1037 */
1038 local_irq_save(flags);
1039 hard_irq_disable();
1040
1041 /* The TM SPRs are restored here, so that TEXASR.FS can be set
1042 * before the trecheckpoint and no explosion occurs.
1043 */
1044 tm_restore_sprs(thread);
1045
1046 __tm_recheckpoint(thread);
1047
1048 local_irq_restore(flags);
1049}
1050
1051static inline void tm_recheckpoint_new_task(struct task_struct *new)
1052{
1053 if (!cpu_has_feature(CPU_FTR_TM))
1054 return;
1055
1056 /* Recheckpoint the registers of the thread we're about to switch to.
1057 *
1058 * If the task was using FP, we non-lazily reload both the original and
1059 * the speculative FP register states. This is because the kernel
1060 * doesn't see if/when a TM rollback occurs, so if we take an FP
1061 * unavailable later, we are unable to determine which set of FP regs
1062 * need to be restored.
1063 */
1064 if (!tm_enabled(new))
1065 return;
1066
1067 if (!MSR_TM_ACTIVE(new->thread.regs->msr)){
1068 tm_restore_sprs(&new->thread);
1069 return;
1070 }
1071 /* Recheckpoint to restore original checkpointed register state. */
1072 TM_DEBUG("*** tm_recheckpoint of pid %d (new->msr 0x%lx)\n",
1073 new->pid, new->thread.regs->msr);
1074
1075 tm_recheckpoint(&new->thread);
1076
1077 /*
1078 * The checkpointed state has been restored but the live state has
1079 * not, ensure all the math functionality is turned off to trigger
1080 * restore_math() to reload.
1081 */
1082 new->thread.regs->msr &= ~(MSR_FP | MSR_VEC | MSR_VSX);
1083
1084 TM_DEBUG("*** tm_recheckpoint of pid %d complete "
1085 "(kernel msr 0x%lx)\n",
1086 new->pid, mfmsr());
1087}
1088
1089static inline void __switch_to_tm(struct task_struct *prev,
1090 struct task_struct *new)
1091{
1092 if (cpu_has_feature(CPU_FTR_TM)) {
1093 if (tm_enabled(prev) || tm_enabled(new))
1094 tm_enable();
1095
1096 if (tm_enabled(prev)) {
1097 prev->thread.load_tm++;
1098 tm_reclaim_task(prev);
1099 if (!MSR_TM_ACTIVE(prev->thread.regs->msr) && prev->thread.load_tm == 0)
1100 prev->thread.regs->msr &= ~MSR_TM;
1101 }
1102
1103 tm_recheckpoint_new_task(new);
1104 }
1105}
1106
1107/*
1108 * This is called if we are on the way out to userspace and the
1109 * TIF_RESTORE_TM flag is set. It checks if we need to reload
1110 * FP and/or vector state and does so if necessary.
1111 * If userspace is inside a transaction (whether active or
1112 * suspended) and FP/VMX/VSX instructions have ever been enabled
1113 * inside that transaction, then we have to keep them enabled
1114 * and keep the FP/VMX/VSX state loaded while ever the transaction
1115 * continues. The reason is that if we didn't, and subsequently
1116 * got a FP/VMX/VSX unavailable interrupt inside a transaction,
1117 * we don't know whether it's the same transaction, and thus we
1118 * don't know which of the checkpointed state and the transactional
1119 * state to use.
1120 */
1121void restore_tm_state(struct pt_regs *regs)
1122{
1123 unsigned long msr_diff;
1124
1125 /*
1126 * This is the only moment we should clear TIF_RESTORE_TM as
1127 * it is here that ckpt_regs.msr and pt_regs.msr become the same
1128 * again, anything else could lead to an incorrect ckpt_msr being
1129 * saved and therefore incorrect signal contexts.
1130 */
1131 clear_thread_flag(TIF_RESTORE_TM);
1132 if (!MSR_TM_ACTIVE(regs->msr))
1133 return;
1134
1135 msr_diff = current->thread.ckpt_regs.msr & ~regs->msr;
1136 msr_diff &= MSR_FP | MSR_VEC | MSR_VSX;
1137
1138 /* Ensure that restore_math() will restore */
1139 if (msr_diff & MSR_FP)
1140 current->thread.load_fp = 1;
1141#ifdef CONFIG_ALTIVEC
1142 if (cpu_has_feature(CPU_FTR_ALTIVEC) && msr_diff & MSR_VEC)
1143 current->thread.load_vec = 1;
1144#endif
1145 restore_math(regs);
1146
1147 regs_set_return_msr(regs, regs->msr | msr_diff);
1148}
1149
1150#else /* !CONFIG_PPC_TRANSACTIONAL_MEM */
1151#define tm_recheckpoint_new_task(new)
1152#define __switch_to_tm(prev, new)
1153void tm_reclaim_current(uint8_t cause) {}
1154#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
1155
1156static inline void save_sprs(struct thread_struct *t)
1157{
1158#ifdef CONFIG_ALTIVEC
1159 if (cpu_has_feature(CPU_FTR_ALTIVEC))
1160 t->vrsave = mfspr(SPRN_VRSAVE);
1161#endif
1162#ifdef CONFIG_SPE
1163 if (cpu_has_feature(CPU_FTR_SPE))
1164 t->spefscr = mfspr(SPRN_SPEFSCR);
1165#endif
1166#ifdef CONFIG_PPC_BOOK3S_64
1167 if (cpu_has_feature(CPU_FTR_DSCR))
1168 t->dscr = mfspr(SPRN_DSCR);
1169
1170 if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
1171 t->bescr = mfspr(SPRN_BESCR);
1172 t->ebbhr = mfspr(SPRN_EBBHR);
1173 t->ebbrr = mfspr(SPRN_EBBRR);
1174
1175 t->fscr = mfspr(SPRN_FSCR);
1176
1177 /*
1178 * Note that the TAR is not available for use in the kernel.
1179 * (To provide this, the TAR should be backed up/restored on
1180 * exception entry/exit instead, and be in pt_regs. FIXME,
1181 * this should be in pt_regs anyway (for debug).)
1182 */
1183 t->tar = mfspr(SPRN_TAR);
1184 }
1185#endif
1186}
1187
1188#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1189void kvmppc_save_user_regs(void)
1190{
1191 unsigned long usermsr;
1192
1193 if (!current->thread.regs)
1194 return;
1195
1196 usermsr = current->thread.regs->msr;
1197
1198 if (usermsr & MSR_FP)
1199 save_fpu(current);
1200
1201 if (usermsr & MSR_VEC)
1202 save_altivec(current);
1203
1204#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1205 if (usermsr & MSR_TM) {
1206 current->thread.tm_tfhar = mfspr(SPRN_TFHAR);
1207 current->thread.tm_tfiar = mfspr(SPRN_TFIAR);
1208 current->thread.tm_texasr = mfspr(SPRN_TEXASR);
1209 current->thread.regs->msr &= ~MSR_TM;
1210 }
1211#endif
1212}
1213EXPORT_SYMBOL_GPL(kvmppc_save_user_regs);
1214
1215void kvmppc_save_current_sprs(void)
1216{
1217 save_sprs(¤t->thread);
1218}
1219EXPORT_SYMBOL_GPL(kvmppc_save_current_sprs);
1220#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
1221
1222static inline void restore_sprs(struct thread_struct *old_thread,
1223 struct thread_struct *new_thread)
1224{
1225#ifdef CONFIG_ALTIVEC
1226 if (cpu_has_feature(CPU_FTR_ALTIVEC) &&
1227 old_thread->vrsave != new_thread->vrsave)
1228 mtspr(SPRN_VRSAVE, new_thread->vrsave);
1229#endif
1230#ifdef CONFIG_SPE
1231 if (cpu_has_feature(CPU_FTR_SPE) &&
1232 old_thread->spefscr != new_thread->spefscr)
1233 mtspr(SPRN_SPEFSCR, new_thread->spefscr);
1234#endif
1235#ifdef CONFIG_PPC_BOOK3S_64
1236 if (cpu_has_feature(CPU_FTR_DSCR)) {
1237 u64 dscr = get_paca()->dscr_default;
1238 if (new_thread->dscr_inherit)
1239 dscr = new_thread->dscr;
1240
1241 if (old_thread->dscr != dscr)
1242 mtspr(SPRN_DSCR, dscr);
1243 }
1244
1245 if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
1246 if (old_thread->bescr != new_thread->bescr)
1247 mtspr(SPRN_BESCR, new_thread->bescr);
1248 if (old_thread->ebbhr != new_thread->ebbhr)
1249 mtspr(SPRN_EBBHR, new_thread->ebbhr);
1250 if (old_thread->ebbrr != new_thread->ebbrr)
1251 mtspr(SPRN_EBBRR, new_thread->ebbrr);
1252
1253 if (old_thread->fscr != new_thread->fscr)
1254 mtspr(SPRN_FSCR, new_thread->fscr);
1255
1256 if (old_thread->tar != new_thread->tar)
1257 mtspr(SPRN_TAR, new_thread->tar);
1258 }
1259
1260 if (cpu_has_feature(CPU_FTR_P9_TIDR) &&
1261 old_thread->tidr != new_thread->tidr)
1262 mtspr(SPRN_TIDR, new_thread->tidr);
1263#endif
1264
1265}
1266
1267struct task_struct *__switch_to(struct task_struct *prev,
1268 struct task_struct *new)
1269{
1270 struct thread_struct *new_thread, *old_thread;
1271 struct task_struct *last;
1272#ifdef CONFIG_PPC_64S_HASH_MMU
1273 struct ppc64_tlb_batch *batch;
1274#endif
1275
1276 new_thread = &new->thread;
1277 old_thread = ¤t->thread;
1278
1279 WARN_ON(!irqs_disabled());
1280
1281#ifdef CONFIG_PPC_64S_HASH_MMU
1282 batch = this_cpu_ptr(&ppc64_tlb_batch);
1283 if (batch->active) {
1284 current_thread_info()->local_flags |= _TLF_LAZY_MMU;
1285 if (batch->index)
1286 __flush_tlb_pending(batch);
1287 batch->active = 0;
1288 }
1289
1290 /*
1291 * On POWER9 the copy-paste buffer can only paste into
1292 * foreign real addresses, so unprivileged processes can not
1293 * see the data or use it in any way unless they have
1294 * foreign real mappings. If the new process has the foreign
1295 * real address mappings, we must issue a cp_abort to clear
1296 * any state and prevent snooping, corruption or a covert
1297 * channel. ISA v3.1 supports paste into local memory.
1298 */
1299 if (new->mm && (cpu_has_feature(CPU_FTR_ARCH_31) ||
1300 atomic_read(&new->mm->context.vas_windows)))
1301 asm volatile(PPC_CP_ABORT);
1302#endif /* CONFIG_PPC_BOOK3S_64 */
1303
1304#ifdef CONFIG_PPC_ADV_DEBUG_REGS
1305 switch_booke_debug_regs(&new->thread.debug);
1306#else
1307/*
1308 * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would
1309 * schedule DABR
1310 */
1311#ifndef CONFIG_HAVE_HW_BREAKPOINT
1312 switch_hw_breakpoint(new);
1313#endif /* CONFIG_HAVE_HW_BREAKPOINT */
1314#endif
1315
1316 /*
1317 * We need to save SPRs before treclaim/trecheckpoint as these will
1318 * change a number of them.
1319 */
1320 save_sprs(&prev->thread);
1321
1322 /* Save FPU, Altivec, VSX and SPE state */
1323 giveup_all(prev);
1324
1325 __switch_to_tm(prev, new);
1326
1327 if (!radix_enabled()) {
1328 /*
1329 * We can't take a PMU exception inside _switch() since there
1330 * is a window where the kernel stack SLB and the kernel stack
1331 * are out of sync. Hard disable here.
1332 */
1333 hard_irq_disable();
1334 }
1335
1336 /*
1337 * Call restore_sprs() and set_return_regs_changed() before calling
1338 * _switch(). If we move it after _switch() then we miss out on calling
1339 * it for new tasks. The reason for this is we manually create a stack
1340 * frame for new tasks that directly returns through ret_from_fork() or
1341 * ret_from_kernel_thread(). See copy_thread() for details.
1342 */
1343 restore_sprs(old_thread, new_thread);
1344
1345 set_return_regs_changed(); /* _switch changes stack (and regs) */
1346
1347 if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64))
1348 kuap_assert_locked();
1349
1350 last = _switch(old_thread, new_thread);
1351
1352 /*
1353 * Nothing after _switch will be run for newly created tasks,
1354 * because they switch directly to ret_from_fork/ret_from_kernel_thread
1355 * etc. Code added here should have a comment explaining why that is
1356 * okay.
1357 */
1358
1359#ifdef CONFIG_PPC_BOOK3S_64
1360#ifdef CONFIG_PPC_64S_HASH_MMU
1361 /*
1362 * This applies to a process that was context switched while inside
1363 * arch_enter_lazy_mmu_mode(), to re-activate the batch that was
1364 * deactivated above, before _switch(). This will never be the case
1365 * for new tasks.
1366 */
1367 if (current_thread_info()->local_flags & _TLF_LAZY_MMU) {
1368 current_thread_info()->local_flags &= ~_TLF_LAZY_MMU;
1369 batch = this_cpu_ptr(&ppc64_tlb_batch);
1370 batch->active = 1;
1371 }
1372#endif
1373
1374 /*
1375 * Math facilities are masked out of the child MSR in copy_thread.
1376 * A new task does not need to restore_math because it will
1377 * demand fault them.
1378 */
1379 if (current->thread.regs)
1380 restore_math(current->thread.regs);
1381#endif /* CONFIG_PPC_BOOK3S_64 */
1382
1383 return last;
1384}
1385
1386#define NR_INSN_TO_PRINT 16
1387
1388static void show_instructions(struct pt_regs *regs)
1389{
1390 int i;
1391 unsigned long nip = regs->nip;
1392 unsigned long pc = regs->nip - (NR_INSN_TO_PRINT * 3 / 4 * sizeof(int));
1393
1394 printk("Code: ");
1395
1396 /*
1397 * If we were executing with the MMU off for instructions, adjust pc
1398 * rather than printing XXXXXXXX.
1399 */
1400 if (!IS_ENABLED(CONFIG_BOOKE) && !(regs->msr & MSR_IR)) {
1401 pc = (unsigned long)phys_to_virt(pc);
1402 nip = (unsigned long)phys_to_virt(regs->nip);
1403 }
1404
1405 for (i = 0; i < NR_INSN_TO_PRINT; i++) {
1406 int instr;
1407
1408 if (!__kernel_text_address(pc) ||
1409 get_kernel_nofault(instr, (const void *)pc)) {
1410 pr_cont("XXXXXXXX ");
1411 } else {
1412 if (nip == pc)
1413 pr_cont("<%08x> ", instr);
1414 else
1415 pr_cont("%08x ", instr);
1416 }
1417
1418 pc += sizeof(int);
1419 }
1420
1421 pr_cont("\n");
1422}
1423
1424void show_user_instructions(struct pt_regs *regs)
1425{
1426 unsigned long pc;
1427 int n = NR_INSN_TO_PRINT;
1428 struct seq_buf s;
1429 char buf[96]; /* enough for 8 times 9 + 2 chars */
1430
1431 pc = regs->nip - (NR_INSN_TO_PRINT * 3 / 4 * sizeof(int));
1432
1433 seq_buf_init(&s, buf, sizeof(buf));
1434
1435 while (n) {
1436 int i;
1437
1438 seq_buf_clear(&s);
1439
1440 for (i = 0; i < 8 && n; i++, n--, pc += sizeof(int)) {
1441 int instr;
1442
1443 if (copy_from_user_nofault(&instr, (void __user *)pc,
1444 sizeof(instr))) {
1445 seq_buf_printf(&s, "XXXXXXXX ");
1446 continue;
1447 }
1448 seq_buf_printf(&s, regs->nip == pc ? "<%08x> " : "%08x ", instr);
1449 }
1450
1451 if (!seq_buf_has_overflowed(&s))
1452 pr_info("%s[%d]: code: %s\n", current->comm,
1453 current->pid, s.buffer);
1454 }
1455}
1456
1457struct regbit {
1458 unsigned long bit;
1459 const char *name;
1460};
1461
1462static struct regbit msr_bits[] = {
1463#if defined(CONFIG_PPC64) && !defined(CONFIG_BOOKE)
1464 {MSR_SF, "SF"},
1465 {MSR_HV, "HV"},
1466#endif
1467 {MSR_VEC, "VEC"},
1468 {MSR_VSX, "VSX"},
1469#ifdef CONFIG_BOOKE
1470 {MSR_CE, "CE"},
1471#endif
1472 {MSR_EE, "EE"},
1473 {MSR_PR, "PR"},
1474 {MSR_FP, "FP"},
1475 {MSR_ME, "ME"},
1476#ifdef CONFIG_BOOKE
1477 {MSR_DE, "DE"},
1478#else
1479 {MSR_SE, "SE"},
1480 {MSR_BE, "BE"},
1481#endif
1482 {MSR_IR, "IR"},
1483 {MSR_DR, "DR"},
1484 {MSR_PMM, "PMM"},
1485#ifndef CONFIG_BOOKE
1486 {MSR_RI, "RI"},
1487 {MSR_LE, "LE"},
1488#endif
1489 {0, NULL}
1490};
1491
1492static void print_bits(unsigned long val, struct regbit *bits, const char *sep)
1493{
1494 const char *s = "";
1495
1496 for (; bits->bit; ++bits)
1497 if (val & bits->bit) {
1498 pr_cont("%s%s", s, bits->name);
1499 s = sep;
1500 }
1501}
1502
1503#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1504static struct regbit msr_tm_bits[] = {
1505 {MSR_TS_T, "T"},
1506 {MSR_TS_S, "S"},
1507 {MSR_TM, "E"},
1508 {0, NULL}
1509};
1510
1511static void print_tm_bits(unsigned long val)
1512{
1513/*
1514 * This only prints something if at least one of the TM bit is set.
1515 * Inside the TM[], the output means:
1516 * E: Enabled (bit 32)
1517 * S: Suspended (bit 33)
1518 * T: Transactional (bit 34)
1519 */
1520 if (val & (MSR_TM | MSR_TS_S | MSR_TS_T)) {
1521 pr_cont(",TM[");
1522 print_bits(val, msr_tm_bits, "");
1523 pr_cont("]");
1524 }
1525}
1526#else
1527static void print_tm_bits(unsigned long val) {}
1528#endif
1529
1530static void print_msr_bits(unsigned long val)
1531{
1532 pr_cont("<");
1533 print_bits(val, msr_bits, ",");
1534 print_tm_bits(val);
1535 pr_cont(">");
1536}
1537
1538#ifdef CONFIG_PPC64
1539#define REG "%016lx"
1540#define REGS_PER_LINE 4
1541#else
1542#define REG "%08lx"
1543#define REGS_PER_LINE 8
1544#endif
1545
1546static void __show_regs(struct pt_regs *regs)
1547{
1548 int i, trap;
1549
1550 printk("NIP: "REG" LR: "REG" CTR: "REG"\n",
1551 regs->nip, regs->link, regs->ctr);
1552 printk("REGS: %px TRAP: %04lx %s (%s)\n",
1553 regs, regs->trap, print_tainted(), init_utsname()->release);
1554 printk("MSR: "REG" ", regs->msr);
1555 print_msr_bits(regs->msr);
1556 pr_cont(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer);
1557 trap = TRAP(regs);
1558 if (!trap_is_syscall(regs) && cpu_has_feature(CPU_FTR_CFAR))
1559 pr_cont("CFAR: "REG" ", regs->orig_gpr3);
1560 if (trap == INTERRUPT_MACHINE_CHECK ||
1561 trap == INTERRUPT_DATA_STORAGE ||
1562 trap == INTERRUPT_ALIGNMENT) {
1563 if (IS_ENABLED(CONFIG_4xx) || IS_ENABLED(CONFIG_BOOKE))
1564 pr_cont("DEAR: "REG" ESR: "REG" ", regs->dear, regs->esr);
1565 else
1566 pr_cont("DAR: "REG" DSISR: %08lx ", regs->dar, regs->dsisr);
1567 }
1568
1569#ifdef CONFIG_PPC64
1570 pr_cont("IRQMASK: %lx ", regs->softe);
1571#endif
1572#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1573 if (MSR_TM_ACTIVE(regs->msr))
1574 pr_cont("\nPACATMSCRATCH: %016llx ", get_paca()->tm_scratch);
1575#endif
1576
1577 for (i = 0; i < 32; i++) {
1578 if ((i % REGS_PER_LINE) == 0)
1579 pr_cont("\nGPR%02d: ", i);
1580 pr_cont(REG " ", regs->gpr[i]);
1581 }
1582 pr_cont("\n");
1583 /*
1584 * Lookup NIP late so we have the best change of getting the
1585 * above info out without failing
1586 */
1587 if (IS_ENABLED(CONFIG_KALLSYMS)) {
1588 printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
1589 printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
1590 }
1591}
1592
1593void show_regs(struct pt_regs *regs)
1594{
1595 show_regs_print_info(KERN_DEFAULT);
1596 __show_regs(regs);
1597 show_stack(current, (unsigned long *) regs->gpr[1], KERN_DEFAULT);
1598 if (!user_mode(regs))
1599 show_instructions(regs);
1600}
1601
1602void flush_thread(void)
1603{
1604#ifdef CONFIG_HAVE_HW_BREAKPOINT
1605 flush_ptrace_hw_breakpoint(current);
1606#else /* CONFIG_HAVE_HW_BREAKPOINT */
1607 set_debug_reg_defaults(¤t->thread);
1608#endif /* CONFIG_HAVE_HW_BREAKPOINT */
1609}
1610
1611void arch_setup_new_exec(void)
1612{
1613
1614#ifdef CONFIG_PPC_BOOK3S_64
1615 if (!radix_enabled())
1616 hash__setup_new_exec();
1617#endif
1618 /*
1619 * If we exec out of a kernel thread then thread.regs will not be
1620 * set. Do it now.
1621 */
1622 if (!current->thread.regs) {
1623 struct pt_regs *regs = task_stack_page(current) + THREAD_SIZE;
1624 current->thread.regs = regs - 1;
1625 }
1626
1627#ifdef CONFIG_PPC_MEM_KEYS
1628 current->thread.regs->amr = default_amr;
1629 current->thread.regs->iamr = default_iamr;
1630#endif
1631}
1632
1633#ifdef CONFIG_PPC64
1634/**
1635 * Assign a TIDR (thread ID) for task @t and set it in the thread
1636 * structure. For now, we only support setting TIDR for 'current' task.
1637 *
1638 * Since the TID value is a truncated form of it PID, it is possible
1639 * (but unlikely) for 2 threads to have the same TID. In the unlikely event
1640 * that 2 threads share the same TID and are waiting, one of the following
1641 * cases will happen:
1642 *
1643 * 1. The correct thread is running, the wrong thread is not
1644 * In this situation, the correct thread is woken and proceeds to pass it's
1645 * condition check.
1646 *
1647 * 2. Neither threads are running
1648 * In this situation, neither thread will be woken. When scheduled, the waiting
1649 * threads will execute either a wait, which will return immediately, followed
1650 * by a condition check, which will pass for the correct thread and fail
1651 * for the wrong thread, or they will execute the condition check immediately.
1652 *
1653 * 3. The wrong thread is running, the correct thread is not
1654 * The wrong thread will be woken, but will fail it's condition check and
1655 * re-execute wait. The correct thread, when scheduled, will execute either
1656 * it's condition check (which will pass), or wait, which returns immediately
1657 * when called the first time after the thread is scheduled, followed by it's
1658 * condition check (which will pass).
1659 *
1660 * 4. Both threads are running
1661 * Both threads will be woken. The wrong thread will fail it's condition check
1662 * and execute another wait, while the correct thread will pass it's condition
1663 * check.
1664 *
1665 * @t: the task to set the thread ID for
1666 */
1667int set_thread_tidr(struct task_struct *t)
1668{
1669 if (!cpu_has_feature(CPU_FTR_P9_TIDR))
1670 return -EINVAL;
1671
1672 if (t != current)
1673 return -EINVAL;
1674
1675 if (t->thread.tidr)
1676 return 0;
1677
1678 t->thread.tidr = (u16)task_pid_nr(t);
1679 mtspr(SPRN_TIDR, t->thread.tidr);
1680
1681 return 0;
1682}
1683EXPORT_SYMBOL_GPL(set_thread_tidr);
1684
1685#endif /* CONFIG_PPC64 */
1686
1687/*
1688 * this gets called so that we can store coprocessor state into memory and
1689 * copy the current task into the new thread.
1690 */
1691int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
1692{
1693 flush_all_to_thread(src);
1694 /*
1695 * Flush TM state out so we can copy it. __switch_to_tm() does this
1696 * flush but it removes the checkpointed state from the current CPU and
1697 * transitions the CPU out of TM mode. Hence we need to call
1698 * tm_recheckpoint_new_task() (on the same task) to restore the
1699 * checkpointed state back and the TM mode.
1700 *
1701 * Can't pass dst because it isn't ready. Doesn't matter, passing
1702 * dst is only important for __switch_to()
1703 */
1704 __switch_to_tm(src, src);
1705
1706 *dst = *src;
1707
1708 clear_task_ebb(dst);
1709
1710 return 0;
1711}
1712
1713static void setup_ksp_vsid(struct task_struct *p, unsigned long sp)
1714{
1715#ifdef CONFIG_PPC_64S_HASH_MMU
1716 unsigned long sp_vsid;
1717 unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
1718
1719 if (radix_enabled())
1720 return;
1721
1722 if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
1723 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T)
1724 << SLB_VSID_SHIFT_1T;
1725 else
1726 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_256M)
1727 << SLB_VSID_SHIFT;
1728 sp_vsid |= SLB_VSID_KERNEL | llp;
1729 p->thread.ksp_vsid = sp_vsid;
1730#endif
1731}
1732
1733/*
1734 * Copy a thread..
1735 */
1736
1737/*
1738 * Copy architecture-specific thread state
1739 */
1740int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
1741{
1742 unsigned long clone_flags = args->flags;
1743 unsigned long usp = args->stack;
1744 unsigned long tls = args->tls;
1745 struct pt_regs *childregs, *kregs;
1746 extern void ret_from_fork(void);
1747 extern void ret_from_fork_scv(void);
1748 extern void ret_from_kernel_thread(void);
1749 void (*f)(void);
1750 unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
1751 struct thread_info *ti = task_thread_info(p);
1752#ifdef CONFIG_HAVE_HW_BREAKPOINT
1753 int i;
1754#endif
1755
1756 klp_init_thread_info(p);
1757
1758 /* Create initial stack frame. */
1759 sp -= STACK_USER_INT_FRAME_SIZE;
1760 *(unsigned long *)(sp + STACK_INT_FRAME_MARKER) = STACK_FRAME_REGS_MARKER;
1761
1762 /* Copy registers */
1763 childregs = (struct pt_regs *)(sp + STACK_INT_FRAME_REGS);
1764 if (unlikely(args->fn)) {
1765 /* kernel thread */
1766 ((unsigned long *)sp)[0] = 0;
1767 memset(childregs, 0, sizeof(struct pt_regs));
1768 childregs->gpr[1] = sp + STACK_USER_INT_FRAME_SIZE;
1769 /* function */
1770 if (args->fn)
1771 childregs->gpr[14] = ppc_function_entry((void *)args->fn);
1772#ifdef CONFIG_PPC64
1773 clear_tsk_thread_flag(p, TIF_32BIT);
1774 childregs->softe = IRQS_ENABLED;
1775#endif
1776 childregs->gpr[15] = (unsigned long)args->fn_arg;
1777 p->thread.regs = NULL; /* no user register state */
1778 ti->flags |= _TIF_RESTOREALL;
1779 f = ret_from_kernel_thread;
1780 } else {
1781 /* user thread */
1782 struct pt_regs *regs = current_pt_regs();
1783 *childregs = *regs;
1784 if (usp)
1785 childregs->gpr[1] = usp;
1786 ((unsigned long *)sp)[0] = childregs->gpr[1];
1787 p->thread.regs = childregs;
1788 /* 64s sets this in ret_from_fork */
1789 if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64))
1790 childregs->gpr[3] = 0; /* Result from fork() */
1791 if (clone_flags & CLONE_SETTLS) {
1792 if (!is_32bit_task())
1793 childregs->gpr[13] = tls;
1794 else
1795 childregs->gpr[2] = tls;
1796 }
1797
1798 if (trap_is_scv(regs))
1799 f = ret_from_fork_scv;
1800 else
1801 f = ret_from_fork;
1802 }
1803 childregs->msr &= ~(MSR_FP|MSR_VEC|MSR_VSX);
1804
1805 /*
1806 * The way this works is that at some point in the future
1807 * some task will call _switch to switch to the new task.
1808 * That will pop off the stack frame created below and start
1809 * the new task running at ret_from_fork. The new task will
1810 * do some house keeping and then return from the fork or clone
1811 * system call, using the stack frame created above.
1812 */
1813 ((unsigned long *)sp)[STACK_FRAME_LR_SAVE] = (unsigned long)f;
1814 sp -= STACK_SWITCH_FRAME_SIZE;
1815 ((unsigned long *)sp)[0] = sp + STACK_SWITCH_FRAME_SIZE;
1816 kregs = (struct pt_regs *)(sp + STACK_SWITCH_FRAME_REGS);
1817 p->thread.ksp = sp;
1818
1819#ifdef CONFIG_HAVE_HW_BREAKPOINT
1820 for (i = 0; i < nr_wp_slots(); i++)
1821 p->thread.ptrace_bps[i] = NULL;
1822#endif
1823
1824#ifdef CONFIG_PPC_FPU_REGS
1825 p->thread.fp_save_area = NULL;
1826#endif
1827#ifdef CONFIG_ALTIVEC
1828 p->thread.vr_save_area = NULL;
1829#endif
1830#if defined(CONFIG_PPC_BOOK3S_32) && defined(CONFIG_PPC_KUAP)
1831 p->thread.kuap = KUAP_NONE;
1832#endif
1833#if defined(CONFIG_BOOKE_OR_40x) && defined(CONFIG_PPC_KUAP)
1834 p->thread.pid = MMU_NO_CONTEXT;
1835#endif
1836
1837 setup_ksp_vsid(p, sp);
1838
1839#ifdef CONFIG_PPC64
1840 if (cpu_has_feature(CPU_FTR_DSCR)) {
1841 p->thread.dscr_inherit = current->thread.dscr_inherit;
1842 p->thread.dscr = mfspr(SPRN_DSCR);
1843 }
1844 if (cpu_has_feature(CPU_FTR_HAS_PPR))
1845 childregs->ppr = DEFAULT_PPR;
1846
1847 p->thread.tidr = 0;
1848#endif
1849 /*
1850 * Run with the current AMR value of the kernel
1851 */
1852#ifdef CONFIG_PPC_PKEY
1853 if (mmu_has_feature(MMU_FTR_BOOK3S_KUAP))
1854 kregs->amr = AMR_KUAP_BLOCKED;
1855
1856 if (mmu_has_feature(MMU_FTR_BOOK3S_KUEP))
1857 kregs->iamr = AMR_KUEP_BLOCKED;
1858#endif
1859 kregs->nip = ppc_function_entry(f);
1860 return 0;
1861}
1862
1863void preload_new_slb_context(unsigned long start, unsigned long sp);
1864
1865/*
1866 * Set up a thread for executing a new program
1867 */
1868void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
1869{
1870#ifdef CONFIG_PPC64
1871 unsigned long load_addr = regs->gpr[2]; /* saved by ELF_PLAT_INIT */
1872
1873 if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && !radix_enabled())
1874 preload_new_slb_context(start, sp);
1875#endif
1876
1877#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1878 /*
1879 * Clear any transactional state, we're exec()ing. The cause is
1880 * not important as there will never be a recheckpoint so it's not
1881 * user visible.
1882 */
1883 if (MSR_TM_SUSPENDED(mfmsr()))
1884 tm_reclaim_current(0);
1885#endif
1886
1887 memset(®s->gpr[1], 0, sizeof(regs->gpr) - sizeof(regs->gpr[0]));
1888 regs->ctr = 0;
1889 regs->link = 0;
1890 regs->xer = 0;
1891 regs->ccr = 0;
1892 regs->gpr[1] = sp;
1893
1894#ifdef CONFIG_PPC32
1895 regs->mq = 0;
1896 regs->nip = start;
1897 regs->msr = MSR_USER;
1898#else
1899 if (!is_32bit_task()) {
1900 unsigned long entry;
1901
1902 if (is_elf2_task()) {
1903 /* Look ma, no function descriptors! */
1904 entry = start;
1905
1906 /*
1907 * Ulrich says:
1908 * The latest iteration of the ABI requires that when
1909 * calling a function (at its global entry point),
1910 * the caller must ensure r12 holds the entry point
1911 * address (so that the function can quickly
1912 * establish addressability).
1913 */
1914 regs->gpr[12] = start;
1915 /* Make sure that's restored on entry to userspace. */
1916 set_thread_flag(TIF_RESTOREALL);
1917 } else {
1918 unsigned long toc;
1919
1920 /* start is a relocated pointer to the function
1921 * descriptor for the elf _start routine. The first
1922 * entry in the function descriptor is the entry
1923 * address of _start and the second entry is the TOC
1924 * value we need to use.
1925 */
1926 __get_user(entry, (unsigned long __user *)start);
1927 __get_user(toc, (unsigned long __user *)start+1);
1928
1929 /* Check whether the e_entry function descriptor entries
1930 * need to be relocated before we can use them.
1931 */
1932 if (load_addr != 0) {
1933 entry += load_addr;
1934 toc += load_addr;
1935 }
1936 regs->gpr[2] = toc;
1937 }
1938 regs_set_return_ip(regs, entry);
1939 regs_set_return_msr(regs, MSR_USER64);
1940 } else {
1941 regs->gpr[2] = 0;
1942 regs_set_return_ip(regs, start);
1943 regs_set_return_msr(regs, MSR_USER32);
1944 }
1945
1946#endif
1947#ifdef CONFIG_VSX
1948 current->thread.used_vsr = 0;
1949#endif
1950 current->thread.load_slb = 0;
1951 current->thread.load_fp = 0;
1952#ifdef CONFIG_PPC_FPU_REGS
1953 memset(¤t->thread.fp_state, 0, sizeof(current->thread.fp_state));
1954 current->thread.fp_save_area = NULL;
1955#endif
1956#ifdef CONFIG_ALTIVEC
1957 memset(¤t->thread.vr_state, 0, sizeof(current->thread.vr_state));
1958 current->thread.vr_state.vscr.u[3] = 0x00010000; /* Java mode disabled */
1959 current->thread.vr_save_area = NULL;
1960 current->thread.vrsave = 0;
1961 current->thread.used_vr = 0;
1962 current->thread.load_vec = 0;
1963#endif /* CONFIG_ALTIVEC */
1964#ifdef CONFIG_SPE
1965 memset(current->thread.evr, 0, sizeof(current->thread.evr));
1966 current->thread.acc = 0;
1967 current->thread.spefscr = 0;
1968 current->thread.used_spe = 0;
1969#endif /* CONFIG_SPE */
1970#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1971 current->thread.tm_tfhar = 0;
1972 current->thread.tm_texasr = 0;
1973 current->thread.tm_tfiar = 0;
1974 current->thread.load_tm = 0;
1975#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
1976}
1977EXPORT_SYMBOL(start_thread);
1978
1979#define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
1980 | PR_FP_EXC_RES | PR_FP_EXC_INV)
1981
1982int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
1983{
1984 struct pt_regs *regs = tsk->thread.regs;
1985
1986 /* This is a bit hairy. If we are an SPE enabled processor
1987 * (have embedded fp) we store the IEEE exception enable flags in
1988 * fpexc_mode. fpexc_mode is also used for setting FP exception
1989 * mode (asyn, precise, disabled) for 'Classic' FP. */
1990 if (val & PR_FP_EXC_SW_ENABLE) {
1991 if (cpu_has_feature(CPU_FTR_SPE)) {
1992 /*
1993 * When the sticky exception bits are set
1994 * directly by userspace, it must call prctl
1995 * with PR_GET_FPEXC (with PR_FP_EXC_SW_ENABLE
1996 * in the existing prctl settings) or
1997 * PR_SET_FPEXC (with PR_FP_EXC_SW_ENABLE in
1998 * the bits being set). <fenv.h> functions
1999 * saving and restoring the whole
2000 * floating-point environment need to do so
2001 * anyway to restore the prctl settings from
2002 * the saved environment.
2003 */
2004#ifdef CONFIG_SPE
2005 tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR);
2006 tsk->thread.fpexc_mode = val &
2007 (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT);
2008#endif
2009 return 0;
2010 } else {
2011 return -EINVAL;
2012 }
2013 }
2014
2015 /* on a CONFIG_SPE this does not hurt us. The bits that
2016 * __pack_fe01 use do not overlap with bits used for
2017 * PR_FP_EXC_SW_ENABLE. Additionally, the MSR[FE0,FE1] bits
2018 * on CONFIG_SPE implementations are reserved so writing to
2019 * them does not change anything */
2020 if (val > PR_FP_EXC_PRECISE)
2021 return -EINVAL;
2022 tsk->thread.fpexc_mode = __pack_fe01(val);
2023 if (regs != NULL && (regs->msr & MSR_FP) != 0) {
2024 regs_set_return_msr(regs, (regs->msr & ~(MSR_FE0|MSR_FE1))
2025 | tsk->thread.fpexc_mode);
2026 }
2027 return 0;
2028}
2029
2030int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
2031{
2032 unsigned int val = 0;
2033
2034 if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE) {
2035 if (cpu_has_feature(CPU_FTR_SPE)) {
2036 /*
2037 * When the sticky exception bits are set
2038 * directly by userspace, it must call prctl
2039 * with PR_GET_FPEXC (with PR_FP_EXC_SW_ENABLE
2040 * in the existing prctl settings) or
2041 * PR_SET_FPEXC (with PR_FP_EXC_SW_ENABLE in
2042 * the bits being set). <fenv.h> functions
2043 * saving and restoring the whole
2044 * floating-point environment need to do so
2045 * anyway to restore the prctl settings from
2046 * the saved environment.
2047 */
2048#ifdef CONFIG_SPE
2049 tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR);
2050 val = tsk->thread.fpexc_mode;
2051#endif
2052 } else
2053 return -EINVAL;
2054 } else {
2055 val = __unpack_fe01(tsk->thread.fpexc_mode);
2056 }
2057 return put_user(val, (unsigned int __user *) adr);
2058}
2059
2060int set_endian(struct task_struct *tsk, unsigned int val)
2061{
2062 struct pt_regs *regs = tsk->thread.regs;
2063
2064 if ((val == PR_ENDIAN_LITTLE && !cpu_has_feature(CPU_FTR_REAL_LE)) ||
2065 (val == PR_ENDIAN_PPC_LITTLE && !cpu_has_feature(CPU_FTR_PPC_LE)))
2066 return -EINVAL;
2067
2068 if (regs == NULL)
2069 return -EINVAL;
2070
2071 if (val == PR_ENDIAN_BIG)
2072 regs_set_return_msr(regs, regs->msr & ~MSR_LE);
2073 else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE)
2074 regs_set_return_msr(regs, regs->msr | MSR_LE);
2075 else
2076 return -EINVAL;
2077
2078 return 0;
2079}
2080
2081int get_endian(struct task_struct *tsk, unsigned long adr)
2082{
2083 struct pt_regs *regs = tsk->thread.regs;
2084 unsigned int val;
2085
2086 if (!cpu_has_feature(CPU_FTR_PPC_LE) &&
2087 !cpu_has_feature(CPU_FTR_REAL_LE))
2088 return -EINVAL;
2089
2090 if (regs == NULL)
2091 return -EINVAL;
2092
2093 if (regs->msr & MSR_LE) {
2094 if (cpu_has_feature(CPU_FTR_REAL_LE))
2095 val = PR_ENDIAN_LITTLE;
2096 else
2097 val = PR_ENDIAN_PPC_LITTLE;
2098 } else
2099 val = PR_ENDIAN_BIG;
2100
2101 return put_user(val, (unsigned int __user *)adr);
2102}
2103
2104int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
2105{
2106 tsk->thread.align_ctl = val;
2107 return 0;
2108}
2109
2110int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
2111{
2112 return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr);
2113}
2114
2115static inline int valid_irq_stack(unsigned long sp, struct task_struct *p,
2116 unsigned long nbytes)
2117{
2118 unsigned long stack_page;
2119 unsigned long cpu = task_cpu(p);
2120
2121 stack_page = (unsigned long)hardirq_ctx[cpu];
2122 if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
2123 return 1;
2124
2125 stack_page = (unsigned long)softirq_ctx[cpu];
2126 if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
2127 return 1;
2128
2129 return 0;
2130}
2131
2132static inline int valid_emergency_stack(unsigned long sp, struct task_struct *p,
2133 unsigned long nbytes)
2134{
2135#ifdef CONFIG_PPC64
2136 unsigned long stack_page;
2137 unsigned long cpu = task_cpu(p);
2138
2139 if (!paca_ptrs)
2140 return 0;
2141
2142 stack_page = (unsigned long)paca_ptrs[cpu]->emergency_sp - THREAD_SIZE;
2143 if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
2144 return 1;
2145
2146# ifdef CONFIG_PPC_BOOK3S_64
2147 stack_page = (unsigned long)paca_ptrs[cpu]->nmi_emergency_sp - THREAD_SIZE;
2148 if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
2149 return 1;
2150
2151 stack_page = (unsigned long)paca_ptrs[cpu]->mc_emergency_sp - THREAD_SIZE;
2152 if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
2153 return 1;
2154# endif
2155#endif
2156
2157 return 0;
2158}
2159
2160/*
2161 * validate the stack frame of a particular minimum size, used for when we are
2162 * looking at a certain object in the stack beyond the minimum.
2163 */
2164int validate_sp_size(unsigned long sp, struct task_struct *p,
2165 unsigned long nbytes)
2166{
2167 unsigned long stack_page = (unsigned long)task_stack_page(p);
2168
2169 if (sp < THREAD_SIZE)
2170 return 0;
2171
2172 if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
2173 return 1;
2174
2175 if (valid_irq_stack(sp, p, nbytes))
2176 return 1;
2177
2178 return valid_emergency_stack(sp, p, nbytes);
2179}
2180
2181int validate_sp(unsigned long sp, struct task_struct *p)
2182{
2183 return validate_sp_size(sp, p, STACK_FRAME_MIN_SIZE);
2184}
2185
2186static unsigned long ___get_wchan(struct task_struct *p)
2187{
2188 unsigned long ip, sp;
2189 int count = 0;
2190
2191 sp = p->thread.ksp;
2192 if (!validate_sp(sp, p))
2193 return 0;
2194
2195 do {
2196 sp = READ_ONCE_NOCHECK(*(unsigned long *)sp);
2197 if (!validate_sp(sp, p) || task_is_running(p))
2198 return 0;
2199 if (count > 0) {
2200 ip = READ_ONCE_NOCHECK(((unsigned long *)sp)[STACK_FRAME_LR_SAVE]);
2201 if (!in_sched_functions(ip))
2202 return ip;
2203 }
2204 } while (count++ < 16);
2205 return 0;
2206}
2207
2208unsigned long __get_wchan(struct task_struct *p)
2209{
2210 unsigned long ret;
2211
2212 if (!try_get_task_stack(p))
2213 return 0;
2214
2215 ret = ___get_wchan(p);
2216
2217 put_task_stack(p);
2218
2219 return ret;
2220}
2221
2222static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
2223
2224void __no_sanitize_address show_stack(struct task_struct *tsk,
2225 unsigned long *stack,
2226 const char *loglvl)
2227{
2228 unsigned long sp, ip, lr, newsp;
2229 int count = 0;
2230 int firstframe = 1;
2231 unsigned long ret_addr;
2232 int ftrace_idx = 0;
2233
2234 if (tsk == NULL)
2235 tsk = current;
2236
2237 if (!try_get_task_stack(tsk))
2238 return;
2239
2240 sp = (unsigned long) stack;
2241 if (sp == 0) {
2242 if (tsk == current)
2243 sp = current_stack_frame();
2244 else
2245 sp = tsk->thread.ksp;
2246 }
2247
2248 lr = 0;
2249 printk("%sCall Trace:\n", loglvl);
2250 do {
2251 if (!validate_sp(sp, tsk))
2252 break;
2253
2254 stack = (unsigned long *) sp;
2255 newsp = stack[0];
2256 ip = stack[STACK_FRAME_LR_SAVE];
2257 if (!firstframe || ip != lr) {
2258 printk("%s["REG"] ["REG"] %pS",
2259 loglvl, sp, ip, (void *)ip);
2260 ret_addr = ftrace_graph_ret_addr(current,
2261 &ftrace_idx, ip, stack);
2262 if (ret_addr != ip)
2263 pr_cont(" (%pS)", (void *)ret_addr);
2264 if (firstframe)
2265 pr_cont(" (unreliable)");
2266 pr_cont("\n");
2267 }
2268 firstframe = 0;
2269
2270 /*
2271 * See if this is an exception frame.
2272 * We look for the "regs" marker in the current frame.
2273 *
2274 * STACK_SWITCH_FRAME_SIZE being the smallest frame that
2275 * could hold a pt_regs, if that does not fit then it can't
2276 * have regs.
2277 */
2278 if (validate_sp_size(sp, tsk, STACK_SWITCH_FRAME_SIZE)
2279 && stack[STACK_INT_FRAME_MARKER_LONGS] == STACK_FRAME_REGS_MARKER) {
2280 struct pt_regs *regs = (struct pt_regs *)
2281 (sp + STACK_INT_FRAME_REGS);
2282
2283 lr = regs->link;
2284 printk("%s--- interrupt: %lx at %pS\n",
2285 loglvl, regs->trap, (void *)regs->nip);
2286 __show_regs(regs);
2287 printk("%s--- interrupt: %lx\n",
2288 loglvl, regs->trap);
2289
2290 firstframe = 1;
2291 }
2292
2293 sp = newsp;
2294 } while (count++ < kstack_depth_to_print);
2295
2296 put_task_stack(tsk);
2297}
2298
2299#ifdef CONFIG_PPC64
2300/* Called with hard IRQs off */
2301void notrace __ppc64_runlatch_on(void)
2302{
2303 struct thread_info *ti = current_thread_info();
2304
2305 if (cpu_has_feature(CPU_FTR_ARCH_206)) {
2306 /*
2307 * Least significant bit (RUN) is the only writable bit of
2308 * the CTRL register, so we can avoid mfspr. 2.06 is not the
2309 * earliest ISA where this is the case, but it's convenient.
2310 */
2311 mtspr(SPRN_CTRLT, CTRL_RUNLATCH);
2312 } else {
2313 unsigned long ctrl;
2314
2315 /*
2316 * Some architectures (e.g., Cell) have writable fields other
2317 * than RUN, so do the read-modify-write.
2318 */
2319 ctrl = mfspr(SPRN_CTRLF);
2320 ctrl |= CTRL_RUNLATCH;
2321 mtspr(SPRN_CTRLT, ctrl);
2322 }
2323
2324 ti->local_flags |= _TLF_RUNLATCH;
2325}
2326
2327/* Called with hard IRQs off */
2328void notrace __ppc64_runlatch_off(void)
2329{
2330 struct thread_info *ti = current_thread_info();
2331
2332 ti->local_flags &= ~_TLF_RUNLATCH;
2333
2334 if (cpu_has_feature(CPU_FTR_ARCH_206)) {
2335 mtspr(SPRN_CTRLT, 0);
2336 } else {
2337 unsigned long ctrl;
2338
2339 ctrl = mfspr(SPRN_CTRLF);
2340 ctrl &= ~CTRL_RUNLATCH;
2341 mtspr(SPRN_CTRLT, ctrl);
2342 }
2343}
2344#endif /* CONFIG_PPC64 */
2345
2346unsigned long arch_align_stack(unsigned long sp)
2347{
2348 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2349 sp -= get_random_u32_below(PAGE_SIZE);
2350 return sp & ~0xf;
2351}
1/*
2 * Derived from "arch/i386/kernel/process.c"
3 * Copyright (C) 1995 Linus Torvalds
4 *
5 * Updated and modified by Cort Dougan (cort@cs.nmt.edu) and
6 * Paul Mackerras (paulus@cs.anu.edu.au)
7 *
8 * PowerPC version
9 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
15 */
16
17#include <linux/errno.h>
18#include <linux/sched.h>
19#include <linux/kernel.h>
20#include <linux/mm.h>
21#include <linux/smp.h>
22#include <linux/stddef.h>
23#include <linux/unistd.h>
24#include <linux/ptrace.h>
25#include <linux/slab.h>
26#include <linux/user.h>
27#include <linux/elf.h>
28#include <linux/init.h>
29#include <linux/prctl.h>
30#include <linux/init_task.h>
31#include <linux/export.h>
32#include <linux/kallsyms.h>
33#include <linux/mqueue.h>
34#include <linux/hardirq.h>
35#include <linux/utsname.h>
36#include <linux/ftrace.h>
37#include <linux/kernel_stat.h>
38#include <linux/personality.h>
39#include <linux/random.h>
40#include <linux/hw_breakpoint.h>
41
42#include <asm/pgtable.h>
43#include <asm/uaccess.h>
44#include <asm/io.h>
45#include <asm/processor.h>
46#include <asm/mmu.h>
47#include <asm/prom.h>
48#include <asm/machdep.h>
49#include <asm/time.h>
50#include <asm/runlatch.h>
51#include <asm/syscalls.h>
52#include <asm/switch_to.h>
53#include <asm/debug.h>
54#ifdef CONFIG_PPC64
55#include <asm/firmware.h>
56#endif
57#include <linux/kprobes.h>
58#include <linux/kdebug.h>
59
60extern unsigned long _get_SP(void);
61
62#ifndef CONFIG_SMP
63struct task_struct *last_task_used_math = NULL;
64struct task_struct *last_task_used_altivec = NULL;
65struct task_struct *last_task_used_vsx = NULL;
66struct task_struct *last_task_used_spe = NULL;
67#endif
68
69/*
70 * Make sure the floating-point register state in the
71 * the thread_struct is up to date for task tsk.
72 */
73void flush_fp_to_thread(struct task_struct *tsk)
74{
75 if (tsk->thread.regs) {
76 /*
77 * We need to disable preemption here because if we didn't,
78 * another process could get scheduled after the regs->msr
79 * test but before we have finished saving the FP registers
80 * to the thread_struct. That process could take over the
81 * FPU, and then when we get scheduled again we would store
82 * bogus values for the remaining FP registers.
83 */
84 preempt_disable();
85 if (tsk->thread.regs->msr & MSR_FP) {
86#ifdef CONFIG_SMP
87 /*
88 * This should only ever be called for current or
89 * for a stopped child process. Since we save away
90 * the FP register state on context switch on SMP,
91 * there is something wrong if a stopped child appears
92 * to still have its FP state in the CPU registers.
93 */
94 BUG_ON(tsk != current);
95#endif
96 giveup_fpu(tsk);
97 }
98 preempt_enable();
99 }
100}
101EXPORT_SYMBOL_GPL(flush_fp_to_thread);
102
103void enable_kernel_fp(void)
104{
105 WARN_ON(preemptible());
106
107#ifdef CONFIG_SMP
108 if (current->thread.regs && (current->thread.regs->msr & MSR_FP))
109 giveup_fpu(current);
110 else
111 giveup_fpu(NULL); /* just enables FP for kernel */
112#else
113 giveup_fpu(last_task_used_math);
114#endif /* CONFIG_SMP */
115}
116EXPORT_SYMBOL(enable_kernel_fp);
117
118#ifdef CONFIG_ALTIVEC
119void enable_kernel_altivec(void)
120{
121 WARN_ON(preemptible());
122
123#ifdef CONFIG_SMP
124 if (current->thread.regs && (current->thread.regs->msr & MSR_VEC))
125 giveup_altivec(current);
126 else
127 giveup_altivec_notask();
128#else
129 giveup_altivec(last_task_used_altivec);
130#endif /* CONFIG_SMP */
131}
132EXPORT_SYMBOL(enable_kernel_altivec);
133
134/*
135 * Make sure the VMX/Altivec register state in the
136 * the thread_struct is up to date for task tsk.
137 */
138void flush_altivec_to_thread(struct task_struct *tsk)
139{
140 if (tsk->thread.regs) {
141 preempt_disable();
142 if (tsk->thread.regs->msr & MSR_VEC) {
143#ifdef CONFIG_SMP
144 BUG_ON(tsk != current);
145#endif
146 giveup_altivec(tsk);
147 }
148 preempt_enable();
149 }
150}
151EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
152#endif /* CONFIG_ALTIVEC */
153
154#ifdef CONFIG_VSX
155#if 0
156/* not currently used, but some crazy RAID module might want to later */
157void enable_kernel_vsx(void)
158{
159 WARN_ON(preemptible());
160
161#ifdef CONFIG_SMP
162 if (current->thread.regs && (current->thread.regs->msr & MSR_VSX))
163 giveup_vsx(current);
164 else
165 giveup_vsx(NULL); /* just enable vsx for kernel - force */
166#else
167 giveup_vsx(last_task_used_vsx);
168#endif /* CONFIG_SMP */
169}
170EXPORT_SYMBOL(enable_kernel_vsx);
171#endif
172
173void giveup_vsx(struct task_struct *tsk)
174{
175 giveup_fpu(tsk);
176 giveup_altivec(tsk);
177 __giveup_vsx(tsk);
178}
179
180void flush_vsx_to_thread(struct task_struct *tsk)
181{
182 if (tsk->thread.regs) {
183 preempt_disable();
184 if (tsk->thread.regs->msr & MSR_VSX) {
185#ifdef CONFIG_SMP
186 BUG_ON(tsk != current);
187#endif
188 giveup_vsx(tsk);
189 }
190 preempt_enable();
191 }
192}
193EXPORT_SYMBOL_GPL(flush_vsx_to_thread);
194#endif /* CONFIG_VSX */
195
196#ifdef CONFIG_SPE
197
198void enable_kernel_spe(void)
199{
200 WARN_ON(preemptible());
201
202#ifdef CONFIG_SMP
203 if (current->thread.regs && (current->thread.regs->msr & MSR_SPE))
204 giveup_spe(current);
205 else
206 giveup_spe(NULL); /* just enable SPE for kernel - force */
207#else
208 giveup_spe(last_task_used_spe);
209#endif /* __SMP __ */
210}
211EXPORT_SYMBOL(enable_kernel_spe);
212
213void flush_spe_to_thread(struct task_struct *tsk)
214{
215 if (tsk->thread.regs) {
216 preempt_disable();
217 if (tsk->thread.regs->msr & MSR_SPE) {
218#ifdef CONFIG_SMP
219 BUG_ON(tsk != current);
220#endif
221 tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
222 giveup_spe(tsk);
223 }
224 preempt_enable();
225 }
226}
227#endif /* CONFIG_SPE */
228
229#ifndef CONFIG_SMP
230/*
231 * If we are doing lazy switching of CPU state (FP, altivec or SPE),
232 * and the current task has some state, discard it.
233 */
234void discard_lazy_cpu_state(void)
235{
236 preempt_disable();
237 if (last_task_used_math == current)
238 last_task_used_math = NULL;
239#ifdef CONFIG_ALTIVEC
240 if (last_task_used_altivec == current)
241 last_task_used_altivec = NULL;
242#endif /* CONFIG_ALTIVEC */
243#ifdef CONFIG_VSX
244 if (last_task_used_vsx == current)
245 last_task_used_vsx = NULL;
246#endif /* CONFIG_VSX */
247#ifdef CONFIG_SPE
248 if (last_task_used_spe == current)
249 last_task_used_spe = NULL;
250#endif
251 preempt_enable();
252}
253#endif /* CONFIG_SMP */
254
255#ifdef CONFIG_PPC_ADV_DEBUG_REGS
256void do_send_trap(struct pt_regs *regs, unsigned long address,
257 unsigned long error_code, int signal_code, int breakpt)
258{
259 siginfo_t info;
260
261 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
262 11, SIGSEGV) == NOTIFY_STOP)
263 return;
264
265 /* Deliver the signal to userspace */
266 info.si_signo = SIGTRAP;
267 info.si_errno = breakpt; /* breakpoint or watchpoint id */
268 info.si_code = signal_code;
269 info.si_addr = (void __user *)address;
270 force_sig_info(SIGTRAP, &info, current);
271}
272#else /* !CONFIG_PPC_ADV_DEBUG_REGS */
273void do_dabr(struct pt_regs *regs, unsigned long address,
274 unsigned long error_code)
275{
276 siginfo_t info;
277
278 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
279 11, SIGSEGV) == NOTIFY_STOP)
280 return;
281
282 if (debugger_dabr_match(regs))
283 return;
284
285 /* Clear the DABR */
286 set_dabr(0);
287
288 /* Deliver the signal to userspace */
289 info.si_signo = SIGTRAP;
290 info.si_errno = 0;
291 info.si_code = TRAP_HWBKPT;
292 info.si_addr = (void __user *)address;
293 force_sig_info(SIGTRAP, &info, current);
294}
295#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
296
297static DEFINE_PER_CPU(unsigned long, current_dabr);
298
299#ifdef CONFIG_PPC_ADV_DEBUG_REGS
300/*
301 * Set the debug registers back to their default "safe" values.
302 */
303static void set_debug_reg_defaults(struct thread_struct *thread)
304{
305 thread->iac1 = thread->iac2 = 0;
306#if CONFIG_PPC_ADV_DEBUG_IACS > 2
307 thread->iac3 = thread->iac4 = 0;
308#endif
309 thread->dac1 = thread->dac2 = 0;
310#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
311 thread->dvc1 = thread->dvc2 = 0;
312#endif
313 thread->dbcr0 = 0;
314#ifdef CONFIG_BOOKE
315 /*
316 * Force User/Supervisor bits to b11 (user-only MSR[PR]=1)
317 */
318 thread->dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US | \
319 DBCR1_IAC3US | DBCR1_IAC4US;
320 /*
321 * Force Data Address Compare User/Supervisor bits to be User-only
322 * (0b11 MSR[PR]=1) and set all other bits in DBCR2 register to be 0.
323 */
324 thread->dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
325#else
326 thread->dbcr1 = 0;
327#endif
328}
329
330static void prime_debug_regs(struct thread_struct *thread)
331{
332 mtspr(SPRN_IAC1, thread->iac1);
333 mtspr(SPRN_IAC2, thread->iac2);
334#if CONFIG_PPC_ADV_DEBUG_IACS > 2
335 mtspr(SPRN_IAC3, thread->iac3);
336 mtspr(SPRN_IAC4, thread->iac4);
337#endif
338 mtspr(SPRN_DAC1, thread->dac1);
339 mtspr(SPRN_DAC2, thread->dac2);
340#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
341 mtspr(SPRN_DVC1, thread->dvc1);
342 mtspr(SPRN_DVC2, thread->dvc2);
343#endif
344 mtspr(SPRN_DBCR0, thread->dbcr0);
345 mtspr(SPRN_DBCR1, thread->dbcr1);
346#ifdef CONFIG_BOOKE
347 mtspr(SPRN_DBCR2, thread->dbcr2);
348#endif
349}
350/*
351 * Unless neither the old or new thread are making use of the
352 * debug registers, set the debug registers from the values
353 * stored in the new thread.
354 */
355static void switch_booke_debug_regs(struct thread_struct *new_thread)
356{
357 if ((current->thread.dbcr0 & DBCR0_IDM)
358 || (new_thread->dbcr0 & DBCR0_IDM))
359 prime_debug_regs(new_thread);
360}
361#else /* !CONFIG_PPC_ADV_DEBUG_REGS */
362#ifndef CONFIG_HAVE_HW_BREAKPOINT
363static void set_debug_reg_defaults(struct thread_struct *thread)
364{
365 if (thread->dabr) {
366 thread->dabr = 0;
367 set_dabr(0);
368 }
369}
370#endif /* !CONFIG_HAVE_HW_BREAKPOINT */
371#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
372
373int set_dabr(unsigned long dabr)
374{
375 __get_cpu_var(current_dabr) = dabr;
376
377 if (ppc_md.set_dabr)
378 return ppc_md.set_dabr(dabr);
379
380 /* XXX should we have a CPU_FTR_HAS_DABR ? */
381#ifdef CONFIG_PPC_ADV_DEBUG_REGS
382 mtspr(SPRN_DAC1, dabr);
383#ifdef CONFIG_PPC_47x
384 isync();
385#endif
386#elif defined(CONFIG_PPC_BOOK3S)
387 mtspr(SPRN_DABR, dabr);
388#endif
389
390
391 return 0;
392}
393
394#ifdef CONFIG_PPC64
395DEFINE_PER_CPU(struct cpu_usage, cpu_usage_array);
396#endif
397
398struct task_struct *__switch_to(struct task_struct *prev,
399 struct task_struct *new)
400{
401 struct thread_struct *new_thread, *old_thread;
402 unsigned long flags;
403 struct task_struct *last;
404#ifdef CONFIG_PPC_BOOK3S_64
405 struct ppc64_tlb_batch *batch;
406#endif
407
408#ifdef CONFIG_SMP
409 /* avoid complexity of lazy save/restore of fpu
410 * by just saving it every time we switch out if
411 * this task used the fpu during the last quantum.
412 *
413 * If it tries to use the fpu again, it'll trap and
414 * reload its fp regs. So we don't have to do a restore
415 * every switch, just a save.
416 * -- Cort
417 */
418 if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP))
419 giveup_fpu(prev);
420#ifdef CONFIG_ALTIVEC
421 /*
422 * If the previous thread used altivec in the last quantum
423 * (thus changing altivec regs) then save them.
424 * We used to check the VRSAVE register but not all apps
425 * set it, so we don't rely on it now (and in fact we need
426 * to save & restore VSCR even if VRSAVE == 0). -- paulus
427 *
428 * On SMP we always save/restore altivec regs just to avoid the
429 * complexity of changing processors.
430 * -- Cort
431 */
432 if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC))
433 giveup_altivec(prev);
434#endif /* CONFIG_ALTIVEC */
435#ifdef CONFIG_VSX
436 if (prev->thread.regs && (prev->thread.regs->msr & MSR_VSX))
437 /* VMX and FPU registers are already save here */
438 __giveup_vsx(prev);
439#endif /* CONFIG_VSX */
440#ifdef CONFIG_SPE
441 /*
442 * If the previous thread used spe in the last quantum
443 * (thus changing spe regs) then save them.
444 *
445 * On SMP we always save/restore spe regs just to avoid the
446 * complexity of changing processors.
447 */
448 if ((prev->thread.regs && (prev->thread.regs->msr & MSR_SPE)))
449 giveup_spe(prev);
450#endif /* CONFIG_SPE */
451
452#else /* CONFIG_SMP */
453#ifdef CONFIG_ALTIVEC
454 /* Avoid the trap. On smp this this never happens since
455 * we don't set last_task_used_altivec -- Cort
456 */
457 if (new->thread.regs && last_task_used_altivec == new)
458 new->thread.regs->msr |= MSR_VEC;
459#endif /* CONFIG_ALTIVEC */
460#ifdef CONFIG_VSX
461 if (new->thread.regs && last_task_used_vsx == new)
462 new->thread.regs->msr |= MSR_VSX;
463#endif /* CONFIG_VSX */
464#ifdef CONFIG_SPE
465 /* Avoid the trap. On smp this this never happens since
466 * we don't set last_task_used_spe
467 */
468 if (new->thread.regs && last_task_used_spe == new)
469 new->thread.regs->msr |= MSR_SPE;
470#endif /* CONFIG_SPE */
471
472#endif /* CONFIG_SMP */
473
474#ifdef CONFIG_PPC_ADV_DEBUG_REGS
475 switch_booke_debug_regs(&new->thread);
476#else
477/*
478 * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would
479 * schedule DABR
480 */
481#ifndef CONFIG_HAVE_HW_BREAKPOINT
482 if (unlikely(__get_cpu_var(current_dabr) != new->thread.dabr))
483 set_dabr(new->thread.dabr);
484#endif /* CONFIG_HAVE_HW_BREAKPOINT */
485#endif
486
487
488 new_thread = &new->thread;
489 old_thread = ¤t->thread;
490
491#ifdef CONFIG_PPC64
492 /*
493 * Collect processor utilization data per process
494 */
495 if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
496 struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array);
497 long unsigned start_tb, current_tb;
498 start_tb = old_thread->start_tb;
499 cu->current_tb = current_tb = mfspr(SPRN_PURR);
500 old_thread->accum_tb += (current_tb - start_tb);
501 new_thread->start_tb = current_tb;
502 }
503#endif /* CONFIG_PPC64 */
504
505#ifdef CONFIG_PPC_BOOK3S_64
506 batch = &__get_cpu_var(ppc64_tlb_batch);
507 if (batch->active) {
508 current_thread_info()->local_flags |= _TLF_LAZY_MMU;
509 if (batch->index)
510 __flush_tlb_pending(batch);
511 batch->active = 0;
512 }
513#endif /* CONFIG_PPC_BOOK3S_64 */
514
515 local_irq_save(flags);
516
517 account_system_vtime(current);
518 account_process_vtime(current);
519
520 /*
521 * We can't take a PMU exception inside _switch() since there is a
522 * window where the kernel stack SLB and the kernel stack are out
523 * of sync. Hard disable here.
524 */
525 hard_irq_disable();
526 last = _switch(old_thread, new_thread);
527
528#ifdef CONFIG_PPC_BOOK3S_64
529 if (current_thread_info()->local_flags & _TLF_LAZY_MMU) {
530 current_thread_info()->local_flags &= ~_TLF_LAZY_MMU;
531 batch = &__get_cpu_var(ppc64_tlb_batch);
532 batch->active = 1;
533 }
534#endif /* CONFIG_PPC_BOOK3S_64 */
535
536 local_irq_restore(flags);
537
538 return last;
539}
540
541static int instructions_to_print = 16;
542
543static void show_instructions(struct pt_regs *regs)
544{
545 int i;
546 unsigned long pc = regs->nip - (instructions_to_print * 3 / 4 *
547 sizeof(int));
548
549 printk("Instruction dump:");
550
551 for (i = 0; i < instructions_to_print; i++) {
552 int instr;
553
554 if (!(i % 8))
555 printk("\n");
556
557#if !defined(CONFIG_BOOKE)
558 /* If executing with the IMMU off, adjust pc rather
559 * than print XXXXXXXX.
560 */
561 if (!(regs->msr & MSR_IR))
562 pc = (unsigned long)phys_to_virt(pc);
563#endif
564
565 /* We use __get_user here *only* to avoid an OOPS on a
566 * bad address because the pc *should* only be a
567 * kernel address.
568 */
569 if (!__kernel_text_address(pc) ||
570 __get_user(instr, (unsigned int __user *)pc)) {
571 printk(KERN_CONT "XXXXXXXX ");
572 } else {
573 if (regs->nip == pc)
574 printk(KERN_CONT "<%08x> ", instr);
575 else
576 printk(KERN_CONT "%08x ", instr);
577 }
578
579 pc += sizeof(int);
580 }
581
582 printk("\n");
583}
584
585static struct regbit {
586 unsigned long bit;
587 const char *name;
588} msr_bits[] = {
589#if defined(CONFIG_PPC64) && !defined(CONFIG_BOOKE)
590 {MSR_SF, "SF"},
591 {MSR_HV, "HV"},
592#endif
593 {MSR_VEC, "VEC"},
594 {MSR_VSX, "VSX"},
595#ifdef CONFIG_BOOKE
596 {MSR_CE, "CE"},
597#endif
598 {MSR_EE, "EE"},
599 {MSR_PR, "PR"},
600 {MSR_FP, "FP"},
601 {MSR_ME, "ME"},
602#ifdef CONFIG_BOOKE
603 {MSR_DE, "DE"},
604#else
605 {MSR_SE, "SE"},
606 {MSR_BE, "BE"},
607#endif
608 {MSR_IR, "IR"},
609 {MSR_DR, "DR"},
610 {MSR_PMM, "PMM"},
611#ifndef CONFIG_BOOKE
612 {MSR_RI, "RI"},
613 {MSR_LE, "LE"},
614#endif
615 {0, NULL}
616};
617
618static void printbits(unsigned long val, struct regbit *bits)
619{
620 const char *sep = "";
621
622 printk("<");
623 for (; bits->bit; ++bits)
624 if (val & bits->bit) {
625 printk("%s%s", sep, bits->name);
626 sep = ",";
627 }
628 printk(">");
629}
630
631#ifdef CONFIG_PPC64
632#define REG "%016lx"
633#define REGS_PER_LINE 4
634#define LAST_VOLATILE 13
635#else
636#define REG "%08lx"
637#define REGS_PER_LINE 8
638#define LAST_VOLATILE 12
639#endif
640
641void show_regs(struct pt_regs * regs)
642{
643 int i, trap;
644
645 printk("NIP: "REG" LR: "REG" CTR: "REG"\n",
646 regs->nip, regs->link, regs->ctr);
647 printk("REGS: %p TRAP: %04lx %s (%s)\n",
648 regs, regs->trap, print_tainted(), init_utsname()->release);
649 printk("MSR: "REG" ", regs->msr);
650 printbits(regs->msr, msr_bits);
651 printk(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer);
652#ifdef CONFIG_PPC64
653 printk("SOFTE: %ld\n", regs->softe);
654#endif
655 trap = TRAP(regs);
656 if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR))
657 printk("CFAR: "REG"\n", regs->orig_gpr3);
658 if (trap == 0x300 || trap == 0x600)
659#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
660 printk("DEAR: "REG", ESR: "REG"\n", regs->dar, regs->dsisr);
661#else
662 printk("DAR: "REG", DSISR: %08lx\n", regs->dar, regs->dsisr);
663#endif
664 printk("TASK = %p[%d] '%s' THREAD: %p",
665 current, task_pid_nr(current), current->comm, task_thread_info(current));
666
667#ifdef CONFIG_SMP
668 printk(" CPU: %d", raw_smp_processor_id());
669#endif /* CONFIG_SMP */
670
671 for (i = 0; i < 32; i++) {
672 if ((i % REGS_PER_LINE) == 0)
673 printk("\nGPR%02d: ", i);
674 printk(REG " ", regs->gpr[i]);
675 if (i == LAST_VOLATILE && !FULL_REGS(regs))
676 break;
677 }
678 printk("\n");
679#ifdef CONFIG_KALLSYMS
680 /*
681 * Lookup NIP late so we have the best change of getting the
682 * above info out without failing
683 */
684 printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
685 printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
686#endif
687 show_stack(current, (unsigned long *) regs->gpr[1]);
688 if (!user_mode(regs))
689 show_instructions(regs);
690}
691
692void exit_thread(void)
693{
694 discard_lazy_cpu_state();
695}
696
697void flush_thread(void)
698{
699 discard_lazy_cpu_state();
700
701#ifdef CONFIG_HAVE_HW_BREAKPOINT
702 flush_ptrace_hw_breakpoint(current);
703#else /* CONFIG_HAVE_HW_BREAKPOINT */
704 set_debug_reg_defaults(¤t->thread);
705#endif /* CONFIG_HAVE_HW_BREAKPOINT */
706}
707
708void
709release_thread(struct task_struct *t)
710{
711}
712
713/*
714 * this gets called so that we can store coprocessor state into memory and
715 * copy the current task into the new thread.
716 */
717int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
718{
719 flush_fp_to_thread(src);
720 flush_altivec_to_thread(src);
721 flush_vsx_to_thread(src);
722 flush_spe_to_thread(src);
723#ifdef CONFIG_HAVE_HW_BREAKPOINT
724 flush_ptrace_hw_breakpoint(src);
725#endif /* CONFIG_HAVE_HW_BREAKPOINT */
726
727 *dst = *src;
728 return 0;
729}
730
731/*
732 * Copy a thread..
733 */
734extern unsigned long dscr_default; /* defined in arch/powerpc/kernel/sysfs.c */
735
736int copy_thread(unsigned long clone_flags, unsigned long usp,
737 unsigned long unused, struct task_struct *p,
738 struct pt_regs *regs)
739{
740 struct pt_regs *childregs, *kregs;
741 extern void ret_from_fork(void);
742 unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
743
744 CHECK_FULL_REGS(regs);
745 /* Copy registers */
746 sp -= sizeof(struct pt_regs);
747 childregs = (struct pt_regs *) sp;
748 *childregs = *regs;
749 if ((childregs->msr & MSR_PR) == 0) {
750 /* for kernel thread, set `current' and stackptr in new task */
751 childregs->gpr[1] = sp + sizeof(struct pt_regs);
752#ifdef CONFIG_PPC32
753 childregs->gpr[2] = (unsigned long) p;
754#else
755 clear_tsk_thread_flag(p, TIF_32BIT);
756#endif
757 p->thread.regs = NULL; /* no user register state */
758 } else {
759 childregs->gpr[1] = usp;
760 p->thread.regs = childregs;
761 if (clone_flags & CLONE_SETTLS) {
762#ifdef CONFIG_PPC64
763 if (!is_32bit_task())
764 childregs->gpr[13] = childregs->gpr[6];
765 else
766#endif
767 childregs->gpr[2] = childregs->gpr[6];
768 }
769 }
770 childregs->gpr[3] = 0; /* Result from fork() */
771 sp -= STACK_FRAME_OVERHEAD;
772
773 /*
774 * The way this works is that at some point in the future
775 * some task will call _switch to switch to the new task.
776 * That will pop off the stack frame created below and start
777 * the new task running at ret_from_fork. The new task will
778 * do some house keeping and then return from the fork or clone
779 * system call, using the stack frame created above.
780 */
781 sp -= sizeof(struct pt_regs);
782 kregs = (struct pt_regs *) sp;
783 sp -= STACK_FRAME_OVERHEAD;
784 p->thread.ksp = sp;
785 p->thread.ksp_limit = (unsigned long)task_stack_page(p) +
786 _ALIGN_UP(sizeof(struct thread_info), 16);
787
788#ifdef CONFIG_PPC_STD_MMU_64
789 if (mmu_has_feature(MMU_FTR_SLB)) {
790 unsigned long sp_vsid;
791 unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
792
793 if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
794 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T)
795 << SLB_VSID_SHIFT_1T;
796 else
797 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_256M)
798 << SLB_VSID_SHIFT;
799 sp_vsid |= SLB_VSID_KERNEL | llp;
800 p->thread.ksp_vsid = sp_vsid;
801 }
802#endif /* CONFIG_PPC_STD_MMU_64 */
803#ifdef CONFIG_PPC64
804 if (cpu_has_feature(CPU_FTR_DSCR)) {
805 p->thread.dscr_inherit = current->thread.dscr_inherit;
806 p->thread.dscr = current->thread.dscr;
807 }
808#endif
809
810 /*
811 * The PPC64 ABI makes use of a TOC to contain function
812 * pointers. The function (ret_from_except) is actually a pointer
813 * to the TOC entry. The first entry is a pointer to the actual
814 * function.
815 */
816#ifdef CONFIG_PPC64
817 kregs->nip = *((unsigned long *)ret_from_fork);
818#else
819 kregs->nip = (unsigned long)ret_from_fork;
820#endif
821
822 return 0;
823}
824
825/*
826 * Set up a thread for executing a new program
827 */
828void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
829{
830#ifdef CONFIG_PPC64
831 unsigned long load_addr = regs->gpr[2]; /* saved by ELF_PLAT_INIT */
832#endif
833
834 /*
835 * If we exec out of a kernel thread then thread.regs will not be
836 * set. Do it now.
837 */
838 if (!current->thread.regs) {
839 struct pt_regs *regs = task_stack_page(current) + THREAD_SIZE;
840 current->thread.regs = regs - 1;
841 }
842
843 memset(regs->gpr, 0, sizeof(regs->gpr));
844 regs->ctr = 0;
845 regs->link = 0;
846 regs->xer = 0;
847 regs->ccr = 0;
848 regs->gpr[1] = sp;
849
850 /*
851 * We have just cleared all the nonvolatile GPRs, so make
852 * FULL_REGS(regs) return true. This is necessary to allow
853 * ptrace to examine the thread immediately after exec.
854 */
855 regs->trap &= ~1UL;
856
857#ifdef CONFIG_PPC32
858 regs->mq = 0;
859 regs->nip = start;
860 regs->msr = MSR_USER;
861#else
862 if (!is_32bit_task()) {
863 unsigned long entry, toc;
864
865 /* start is a relocated pointer to the function descriptor for
866 * the elf _start routine. The first entry in the function
867 * descriptor is the entry address of _start and the second
868 * entry is the TOC value we need to use.
869 */
870 __get_user(entry, (unsigned long __user *)start);
871 __get_user(toc, (unsigned long __user *)start+1);
872
873 /* Check whether the e_entry function descriptor entries
874 * need to be relocated before we can use them.
875 */
876 if (load_addr != 0) {
877 entry += load_addr;
878 toc += load_addr;
879 }
880 regs->nip = entry;
881 regs->gpr[2] = toc;
882 regs->msr = MSR_USER64;
883 } else {
884 regs->nip = start;
885 regs->gpr[2] = 0;
886 regs->msr = MSR_USER32;
887 }
888#endif
889
890 discard_lazy_cpu_state();
891#ifdef CONFIG_VSX
892 current->thread.used_vsr = 0;
893#endif
894 memset(current->thread.fpr, 0, sizeof(current->thread.fpr));
895 current->thread.fpscr.val = 0;
896#ifdef CONFIG_ALTIVEC
897 memset(current->thread.vr, 0, sizeof(current->thread.vr));
898 memset(¤t->thread.vscr, 0, sizeof(current->thread.vscr));
899 current->thread.vscr.u[3] = 0x00010000; /* Java mode disabled */
900 current->thread.vrsave = 0;
901 current->thread.used_vr = 0;
902#endif /* CONFIG_ALTIVEC */
903#ifdef CONFIG_SPE
904 memset(current->thread.evr, 0, sizeof(current->thread.evr));
905 current->thread.acc = 0;
906 current->thread.spefscr = 0;
907 current->thread.used_spe = 0;
908#endif /* CONFIG_SPE */
909}
910
911#define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
912 | PR_FP_EXC_RES | PR_FP_EXC_INV)
913
914int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
915{
916 struct pt_regs *regs = tsk->thread.regs;
917
918 /* This is a bit hairy. If we are an SPE enabled processor
919 * (have embedded fp) we store the IEEE exception enable flags in
920 * fpexc_mode. fpexc_mode is also used for setting FP exception
921 * mode (asyn, precise, disabled) for 'Classic' FP. */
922 if (val & PR_FP_EXC_SW_ENABLE) {
923#ifdef CONFIG_SPE
924 if (cpu_has_feature(CPU_FTR_SPE)) {
925 tsk->thread.fpexc_mode = val &
926 (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT);
927 return 0;
928 } else {
929 return -EINVAL;
930 }
931#else
932 return -EINVAL;
933#endif
934 }
935
936 /* on a CONFIG_SPE this does not hurt us. The bits that
937 * __pack_fe01 use do not overlap with bits used for
938 * PR_FP_EXC_SW_ENABLE. Additionally, the MSR[FE0,FE1] bits
939 * on CONFIG_SPE implementations are reserved so writing to
940 * them does not change anything */
941 if (val > PR_FP_EXC_PRECISE)
942 return -EINVAL;
943 tsk->thread.fpexc_mode = __pack_fe01(val);
944 if (regs != NULL && (regs->msr & MSR_FP) != 0)
945 regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1))
946 | tsk->thread.fpexc_mode;
947 return 0;
948}
949
950int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
951{
952 unsigned int val;
953
954 if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE)
955#ifdef CONFIG_SPE
956 if (cpu_has_feature(CPU_FTR_SPE))
957 val = tsk->thread.fpexc_mode;
958 else
959 return -EINVAL;
960#else
961 return -EINVAL;
962#endif
963 else
964 val = __unpack_fe01(tsk->thread.fpexc_mode);
965 return put_user(val, (unsigned int __user *) adr);
966}
967
968int set_endian(struct task_struct *tsk, unsigned int val)
969{
970 struct pt_regs *regs = tsk->thread.regs;
971
972 if ((val == PR_ENDIAN_LITTLE && !cpu_has_feature(CPU_FTR_REAL_LE)) ||
973 (val == PR_ENDIAN_PPC_LITTLE && !cpu_has_feature(CPU_FTR_PPC_LE)))
974 return -EINVAL;
975
976 if (regs == NULL)
977 return -EINVAL;
978
979 if (val == PR_ENDIAN_BIG)
980 regs->msr &= ~MSR_LE;
981 else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE)
982 regs->msr |= MSR_LE;
983 else
984 return -EINVAL;
985
986 return 0;
987}
988
989int get_endian(struct task_struct *tsk, unsigned long adr)
990{
991 struct pt_regs *regs = tsk->thread.regs;
992 unsigned int val;
993
994 if (!cpu_has_feature(CPU_FTR_PPC_LE) &&
995 !cpu_has_feature(CPU_FTR_REAL_LE))
996 return -EINVAL;
997
998 if (regs == NULL)
999 return -EINVAL;
1000
1001 if (regs->msr & MSR_LE) {
1002 if (cpu_has_feature(CPU_FTR_REAL_LE))
1003 val = PR_ENDIAN_LITTLE;
1004 else
1005 val = PR_ENDIAN_PPC_LITTLE;
1006 } else
1007 val = PR_ENDIAN_BIG;
1008
1009 return put_user(val, (unsigned int __user *)adr);
1010}
1011
1012int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
1013{
1014 tsk->thread.align_ctl = val;
1015 return 0;
1016}
1017
1018int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
1019{
1020 return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr);
1021}
1022
1023#define TRUNC_PTR(x) ((typeof(x))(((unsigned long)(x)) & 0xffffffff))
1024
1025int sys_clone(unsigned long clone_flags, unsigned long usp,
1026 int __user *parent_tidp, void __user *child_threadptr,
1027 int __user *child_tidp, int p6,
1028 struct pt_regs *regs)
1029{
1030 CHECK_FULL_REGS(regs);
1031 if (usp == 0)
1032 usp = regs->gpr[1]; /* stack pointer for child */
1033#ifdef CONFIG_PPC64
1034 if (is_32bit_task()) {
1035 parent_tidp = TRUNC_PTR(parent_tidp);
1036 child_tidp = TRUNC_PTR(child_tidp);
1037 }
1038#endif
1039 return do_fork(clone_flags, usp, regs, 0, parent_tidp, child_tidp);
1040}
1041
1042int sys_fork(unsigned long p1, unsigned long p2, unsigned long p3,
1043 unsigned long p4, unsigned long p5, unsigned long p6,
1044 struct pt_regs *regs)
1045{
1046 CHECK_FULL_REGS(regs);
1047 return do_fork(SIGCHLD, regs->gpr[1], regs, 0, NULL, NULL);
1048}
1049
1050int sys_vfork(unsigned long p1, unsigned long p2, unsigned long p3,
1051 unsigned long p4, unsigned long p5, unsigned long p6,
1052 struct pt_regs *regs)
1053{
1054 CHECK_FULL_REGS(regs);
1055 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->gpr[1],
1056 regs, 0, NULL, NULL);
1057}
1058
1059int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2,
1060 unsigned long a3, unsigned long a4, unsigned long a5,
1061 struct pt_regs *regs)
1062{
1063 int error;
1064 char *filename;
1065
1066 filename = getname((const char __user *) a0);
1067 error = PTR_ERR(filename);
1068 if (IS_ERR(filename))
1069 goto out;
1070 flush_fp_to_thread(current);
1071 flush_altivec_to_thread(current);
1072 flush_spe_to_thread(current);
1073 error = do_execve(filename,
1074 (const char __user *const __user *) a1,
1075 (const char __user *const __user *) a2, regs);
1076 putname(filename);
1077out:
1078 return error;
1079}
1080
1081static inline int valid_irq_stack(unsigned long sp, struct task_struct *p,
1082 unsigned long nbytes)
1083{
1084 unsigned long stack_page;
1085 unsigned long cpu = task_cpu(p);
1086
1087 /*
1088 * Avoid crashing if the stack has overflowed and corrupted
1089 * task_cpu(p), which is in the thread_info struct.
1090 */
1091 if (cpu < NR_CPUS && cpu_possible(cpu)) {
1092 stack_page = (unsigned long) hardirq_ctx[cpu];
1093 if (sp >= stack_page + sizeof(struct thread_struct)
1094 && sp <= stack_page + THREAD_SIZE - nbytes)
1095 return 1;
1096
1097 stack_page = (unsigned long) softirq_ctx[cpu];
1098 if (sp >= stack_page + sizeof(struct thread_struct)
1099 && sp <= stack_page + THREAD_SIZE - nbytes)
1100 return 1;
1101 }
1102 return 0;
1103}
1104
1105int validate_sp(unsigned long sp, struct task_struct *p,
1106 unsigned long nbytes)
1107{
1108 unsigned long stack_page = (unsigned long)task_stack_page(p);
1109
1110 if (sp >= stack_page + sizeof(struct thread_struct)
1111 && sp <= stack_page + THREAD_SIZE - nbytes)
1112 return 1;
1113
1114 return valid_irq_stack(sp, p, nbytes);
1115}
1116
1117EXPORT_SYMBOL(validate_sp);
1118
1119unsigned long get_wchan(struct task_struct *p)
1120{
1121 unsigned long ip, sp;
1122 int count = 0;
1123
1124 if (!p || p == current || p->state == TASK_RUNNING)
1125 return 0;
1126
1127 sp = p->thread.ksp;
1128 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
1129 return 0;
1130
1131 do {
1132 sp = *(unsigned long *)sp;
1133 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
1134 return 0;
1135 if (count > 0) {
1136 ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE];
1137 if (!in_sched_functions(ip))
1138 return ip;
1139 }
1140 } while (count++ < 16);
1141 return 0;
1142}
1143
1144static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
1145
1146void show_stack(struct task_struct *tsk, unsigned long *stack)
1147{
1148 unsigned long sp, ip, lr, newsp;
1149 int count = 0;
1150 int firstframe = 1;
1151#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1152 int curr_frame = current->curr_ret_stack;
1153 extern void return_to_handler(void);
1154 unsigned long rth = (unsigned long)return_to_handler;
1155 unsigned long mrth = -1;
1156#ifdef CONFIG_PPC64
1157 extern void mod_return_to_handler(void);
1158 rth = *(unsigned long *)rth;
1159 mrth = (unsigned long)mod_return_to_handler;
1160 mrth = *(unsigned long *)mrth;
1161#endif
1162#endif
1163
1164 sp = (unsigned long) stack;
1165 if (tsk == NULL)
1166 tsk = current;
1167 if (sp == 0) {
1168 if (tsk == current)
1169 asm("mr %0,1" : "=r" (sp));
1170 else
1171 sp = tsk->thread.ksp;
1172 }
1173
1174 lr = 0;
1175 printk("Call Trace:\n");
1176 do {
1177 if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD))
1178 return;
1179
1180 stack = (unsigned long *) sp;
1181 newsp = stack[0];
1182 ip = stack[STACK_FRAME_LR_SAVE];
1183 if (!firstframe || ip != lr) {
1184 printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
1185#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1186 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
1187 printk(" (%pS)",
1188 (void *)current->ret_stack[curr_frame].ret);
1189 curr_frame--;
1190 }
1191#endif
1192 if (firstframe)
1193 printk(" (unreliable)");
1194 printk("\n");
1195 }
1196 firstframe = 0;
1197
1198 /*
1199 * See if this is an exception frame.
1200 * We look for the "regshere" marker in the current frame.
1201 */
1202 if (validate_sp(sp, tsk, STACK_INT_FRAME_SIZE)
1203 && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
1204 struct pt_regs *regs = (struct pt_regs *)
1205 (sp + STACK_FRAME_OVERHEAD);
1206 lr = regs->link;
1207 printk("--- Exception: %lx at %pS\n LR = %pS\n",
1208 regs->trap, (void *)regs->nip, (void *)lr);
1209 firstframe = 1;
1210 }
1211
1212 sp = newsp;
1213 } while (count++ < kstack_depth_to_print);
1214}
1215
1216void dump_stack(void)
1217{
1218 show_stack(current, NULL);
1219}
1220EXPORT_SYMBOL(dump_stack);
1221
1222#ifdef CONFIG_PPC64
1223/* Called with hard IRQs off */
1224void __ppc64_runlatch_on(void)
1225{
1226 struct thread_info *ti = current_thread_info();
1227 unsigned long ctrl;
1228
1229 ctrl = mfspr(SPRN_CTRLF);
1230 ctrl |= CTRL_RUNLATCH;
1231 mtspr(SPRN_CTRLT, ctrl);
1232
1233 ti->local_flags |= _TLF_RUNLATCH;
1234}
1235
1236/* Called with hard IRQs off */
1237void __ppc64_runlatch_off(void)
1238{
1239 struct thread_info *ti = current_thread_info();
1240 unsigned long ctrl;
1241
1242 ti->local_flags &= ~_TLF_RUNLATCH;
1243
1244 ctrl = mfspr(SPRN_CTRLF);
1245 ctrl &= ~CTRL_RUNLATCH;
1246 mtspr(SPRN_CTRLT, ctrl);
1247}
1248#endif /* CONFIG_PPC64 */
1249
1250unsigned long arch_align_stack(unsigned long sp)
1251{
1252 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1253 sp -= get_random_int() & ~PAGE_MASK;
1254 return sp & ~0xf;
1255}
1256
1257static inline unsigned long brk_rnd(void)
1258{
1259 unsigned long rnd = 0;
1260
1261 /* 8MB for 32bit, 1GB for 64bit */
1262 if (is_32bit_task())
1263 rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
1264 else
1265 rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
1266
1267 return rnd << PAGE_SHIFT;
1268}
1269
1270unsigned long arch_randomize_brk(struct mm_struct *mm)
1271{
1272 unsigned long base = mm->brk;
1273 unsigned long ret;
1274
1275#ifdef CONFIG_PPC_STD_MMU_64
1276 /*
1277 * If we are using 1TB segments and we are allowed to randomise
1278 * the heap, we can put it above 1TB so it is backed by a 1TB
1279 * segment. Otherwise the heap will be in the bottom 1TB
1280 * which always uses 256MB segments and this may result in a
1281 * performance penalty.
1282 */
1283 if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
1284 base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
1285#endif
1286
1287 ret = PAGE_ALIGN(base + brk_rnd());
1288
1289 if (ret < mm->brk)
1290 return mm->brk;
1291
1292 return ret;
1293}
1294
1295unsigned long randomize_et_dyn(unsigned long base)
1296{
1297 unsigned long ret = PAGE_ALIGN(base + brk_rnd());
1298
1299 if (ret < base)
1300 return base;
1301
1302 return ret;
1303}