Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Derived from "arch/i386/kernel/process.c"
4 * Copyright (C) 1995 Linus Torvalds
5 *
6 * Updated and modified by Cort Dougan (cort@cs.nmt.edu) and
7 * Paul Mackerras (paulus@cs.anu.edu.au)
8 *
9 * PowerPC version
10 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
11 */
12
13#include <linux/errno.h>
14#include <linux/sched.h>
15#include <linux/sched/debug.h>
16#include <linux/sched/task.h>
17#include <linux/sched/task_stack.h>
18#include <linux/kernel.h>
19#include <linux/mm.h>
20#include <linux/smp.h>
21#include <linux/stddef.h>
22#include <linux/unistd.h>
23#include <linux/ptrace.h>
24#include <linux/slab.h>
25#include <linux/user.h>
26#include <linux/elf.h>
27#include <linux/prctl.h>
28#include <linux/init_task.h>
29#include <linux/export.h>
30#include <linux/kallsyms.h>
31#include <linux/mqueue.h>
32#include <linux/hardirq.h>
33#include <linux/utsname.h>
34#include <linux/ftrace.h>
35#include <linux/kernel_stat.h>
36#include <linux/personality.h>
37#include <linux/random.h>
38#include <linux/hw_breakpoint.h>
39#include <linux/uaccess.h>
40#include <linux/elf-randomize.h>
41#include <linux/pkeys.h>
42#include <linux/seq_buf.h>
43
44#include <asm/io.h>
45#include <asm/processor.h>
46#include <asm/mmu.h>
47#include <asm/prom.h>
48#include <asm/machdep.h>
49#include <asm/time.h>
50#include <asm/runlatch.h>
51#include <asm/syscalls.h>
52#include <asm/switch_to.h>
53#include <asm/tm.h>
54#include <asm/debug.h>
55#ifdef CONFIG_PPC64
56#include <asm/firmware.h>
57#include <asm/hw_irq.h>
58#endif
59#include <asm/code-patching.h>
60#include <asm/exec.h>
61#include <asm/livepatch.h>
62#include <asm/cpu_has_feature.h>
63#include <asm/asm-prototypes.h>
64#include <asm/stacktrace.h>
65#include <asm/hw_breakpoint.h>
66
67#include <linux/kprobes.h>
68#include <linux/kdebug.h>
69
70/* Transactional Memory debug */
71#ifdef TM_DEBUG_SW
72#define TM_DEBUG(x...) printk(KERN_INFO x)
73#else
74#define TM_DEBUG(x...) do { } while(0)
75#endif
76
77extern unsigned long _get_SP(void);
78
79#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
80/*
81 * Are we running in "Suspend disabled" mode? If so we have to block any
82 * sigreturn that would get us into suspended state, and we also warn in some
83 * other paths that we should never reach with suspend disabled.
84 */
85bool tm_suspend_disabled __ro_after_init = false;
86
87static void check_if_tm_restore_required(struct task_struct *tsk)
88{
89 /*
90 * If we are saving the current thread's registers, and the
91 * thread is in a transactional state, set the TIF_RESTORE_TM
92 * bit so that we know to restore the registers before
93 * returning to userspace.
94 */
95 if (tsk == current && tsk->thread.regs &&
96 MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
97 !test_thread_flag(TIF_RESTORE_TM)) {
98 tsk->thread.ckpt_regs.msr = tsk->thread.regs->msr;
99 set_thread_flag(TIF_RESTORE_TM);
100 }
101}
102
103#else
104static inline void check_if_tm_restore_required(struct task_struct *tsk) { }
105#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
106
107bool strict_msr_control;
108EXPORT_SYMBOL(strict_msr_control);
109
110static int __init enable_strict_msr_control(char *str)
111{
112 strict_msr_control = true;
113 pr_info("Enabling strict facility control\n");
114
115 return 0;
116}
117early_param("ppc_strict_facility_enable", enable_strict_msr_control);
118
119/* notrace because it's called by restore_math */
120unsigned long notrace msr_check_and_set(unsigned long bits)
121{
122 unsigned long oldmsr = mfmsr();
123 unsigned long newmsr;
124
125 newmsr = oldmsr | bits;
126
127#ifdef CONFIG_VSX
128 if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP))
129 newmsr |= MSR_VSX;
130#endif
131
132 if (oldmsr != newmsr)
133 mtmsr_isync(newmsr);
134
135 return newmsr;
136}
137EXPORT_SYMBOL_GPL(msr_check_and_set);
138
139/* notrace because it's called by restore_math */
140void notrace __msr_check_and_clear(unsigned long bits)
141{
142 unsigned long oldmsr = mfmsr();
143 unsigned long newmsr;
144
145 newmsr = oldmsr & ~bits;
146
147#ifdef CONFIG_VSX
148 if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP))
149 newmsr &= ~MSR_VSX;
150#endif
151
152 if (oldmsr != newmsr)
153 mtmsr_isync(newmsr);
154}
155EXPORT_SYMBOL(__msr_check_and_clear);
156
157#ifdef CONFIG_PPC_FPU
158static void __giveup_fpu(struct task_struct *tsk)
159{
160 unsigned long msr;
161
162 save_fpu(tsk);
163 msr = tsk->thread.regs->msr;
164 msr &= ~(MSR_FP|MSR_FE0|MSR_FE1);
165#ifdef CONFIG_VSX
166 if (cpu_has_feature(CPU_FTR_VSX))
167 msr &= ~MSR_VSX;
168#endif
169 tsk->thread.regs->msr = msr;
170}
171
172void giveup_fpu(struct task_struct *tsk)
173{
174 check_if_tm_restore_required(tsk);
175
176 msr_check_and_set(MSR_FP);
177 __giveup_fpu(tsk);
178 msr_check_and_clear(MSR_FP);
179}
180EXPORT_SYMBOL(giveup_fpu);
181
182/*
183 * Make sure the floating-point register state in the
184 * the thread_struct is up to date for task tsk.
185 */
186void flush_fp_to_thread(struct task_struct *tsk)
187{
188 if (tsk->thread.regs) {
189 /*
190 * We need to disable preemption here because if we didn't,
191 * another process could get scheduled after the regs->msr
192 * test but before we have finished saving the FP registers
193 * to the thread_struct. That process could take over the
194 * FPU, and then when we get scheduled again we would store
195 * bogus values for the remaining FP registers.
196 */
197 preempt_disable();
198 if (tsk->thread.regs->msr & MSR_FP) {
199 /*
200 * This should only ever be called for current or
201 * for a stopped child process. Since we save away
202 * the FP register state on context switch,
203 * there is something wrong if a stopped child appears
204 * to still have its FP state in the CPU registers.
205 */
206 BUG_ON(tsk != current);
207 giveup_fpu(tsk);
208 }
209 preempt_enable();
210 }
211}
212EXPORT_SYMBOL_GPL(flush_fp_to_thread);
213
214void enable_kernel_fp(void)
215{
216 unsigned long cpumsr;
217
218 WARN_ON(preemptible());
219
220 cpumsr = msr_check_and_set(MSR_FP);
221
222 if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) {
223 check_if_tm_restore_required(current);
224 /*
225 * If a thread has already been reclaimed then the
226 * checkpointed registers are on the CPU but have definitely
227 * been saved by the reclaim code. Don't need to and *cannot*
228 * giveup as this would save to the 'live' structure not the
229 * checkpointed structure.
230 */
231 if (!MSR_TM_ACTIVE(cpumsr) &&
232 MSR_TM_ACTIVE(current->thread.regs->msr))
233 return;
234 __giveup_fpu(current);
235 }
236}
237EXPORT_SYMBOL(enable_kernel_fp);
238#endif /* CONFIG_PPC_FPU */
239
240#ifdef CONFIG_ALTIVEC
241static void __giveup_altivec(struct task_struct *tsk)
242{
243 unsigned long msr;
244
245 save_altivec(tsk);
246 msr = tsk->thread.regs->msr;
247 msr &= ~MSR_VEC;
248#ifdef CONFIG_VSX
249 if (cpu_has_feature(CPU_FTR_VSX))
250 msr &= ~MSR_VSX;
251#endif
252 tsk->thread.regs->msr = msr;
253}
254
255void giveup_altivec(struct task_struct *tsk)
256{
257 check_if_tm_restore_required(tsk);
258
259 msr_check_and_set(MSR_VEC);
260 __giveup_altivec(tsk);
261 msr_check_and_clear(MSR_VEC);
262}
263EXPORT_SYMBOL(giveup_altivec);
264
265void enable_kernel_altivec(void)
266{
267 unsigned long cpumsr;
268
269 WARN_ON(preemptible());
270
271 cpumsr = msr_check_and_set(MSR_VEC);
272
273 if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) {
274 check_if_tm_restore_required(current);
275 /*
276 * If a thread has already been reclaimed then the
277 * checkpointed registers are on the CPU but have definitely
278 * been saved by the reclaim code. Don't need to and *cannot*
279 * giveup as this would save to the 'live' structure not the
280 * checkpointed structure.
281 */
282 if (!MSR_TM_ACTIVE(cpumsr) &&
283 MSR_TM_ACTIVE(current->thread.regs->msr))
284 return;
285 __giveup_altivec(current);
286 }
287}
288EXPORT_SYMBOL(enable_kernel_altivec);
289
290/*
291 * Make sure the VMX/Altivec register state in the
292 * the thread_struct is up to date for task tsk.
293 */
294void flush_altivec_to_thread(struct task_struct *tsk)
295{
296 if (tsk->thread.regs) {
297 preempt_disable();
298 if (tsk->thread.regs->msr & MSR_VEC) {
299 BUG_ON(tsk != current);
300 giveup_altivec(tsk);
301 }
302 preempt_enable();
303 }
304}
305EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
306#endif /* CONFIG_ALTIVEC */
307
308#ifdef CONFIG_VSX
309static void __giveup_vsx(struct task_struct *tsk)
310{
311 unsigned long msr = tsk->thread.regs->msr;
312
313 /*
314 * We should never be ssetting MSR_VSX without also setting
315 * MSR_FP and MSR_VEC
316 */
317 WARN_ON((msr & MSR_VSX) && !((msr & MSR_FP) && (msr & MSR_VEC)));
318
319 /* __giveup_fpu will clear MSR_VSX */
320 if (msr & MSR_FP)
321 __giveup_fpu(tsk);
322 if (msr & MSR_VEC)
323 __giveup_altivec(tsk);
324}
325
326static void giveup_vsx(struct task_struct *tsk)
327{
328 check_if_tm_restore_required(tsk);
329
330 msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
331 __giveup_vsx(tsk);
332 msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX);
333}
334
335void enable_kernel_vsx(void)
336{
337 unsigned long cpumsr;
338
339 WARN_ON(preemptible());
340
341 cpumsr = msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
342
343 if (current->thread.regs &&
344 (current->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP))) {
345 check_if_tm_restore_required(current);
346 /*
347 * If a thread has already been reclaimed then the
348 * checkpointed registers are on the CPU but have definitely
349 * been saved by the reclaim code. Don't need to and *cannot*
350 * giveup as this would save to the 'live' structure not the
351 * checkpointed structure.
352 */
353 if (!MSR_TM_ACTIVE(cpumsr) &&
354 MSR_TM_ACTIVE(current->thread.regs->msr))
355 return;
356 __giveup_vsx(current);
357 }
358}
359EXPORT_SYMBOL(enable_kernel_vsx);
360
361void flush_vsx_to_thread(struct task_struct *tsk)
362{
363 if (tsk->thread.regs) {
364 preempt_disable();
365 if (tsk->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP)) {
366 BUG_ON(tsk != current);
367 giveup_vsx(tsk);
368 }
369 preempt_enable();
370 }
371}
372EXPORT_SYMBOL_GPL(flush_vsx_to_thread);
373#endif /* CONFIG_VSX */
374
375#ifdef CONFIG_SPE
376void giveup_spe(struct task_struct *tsk)
377{
378 check_if_tm_restore_required(tsk);
379
380 msr_check_and_set(MSR_SPE);
381 __giveup_spe(tsk);
382 msr_check_and_clear(MSR_SPE);
383}
384EXPORT_SYMBOL(giveup_spe);
385
386void enable_kernel_spe(void)
387{
388 WARN_ON(preemptible());
389
390 msr_check_and_set(MSR_SPE);
391
392 if (current->thread.regs && (current->thread.regs->msr & MSR_SPE)) {
393 check_if_tm_restore_required(current);
394 __giveup_spe(current);
395 }
396}
397EXPORT_SYMBOL(enable_kernel_spe);
398
399void flush_spe_to_thread(struct task_struct *tsk)
400{
401 if (tsk->thread.regs) {
402 preempt_disable();
403 if (tsk->thread.regs->msr & MSR_SPE) {
404 BUG_ON(tsk != current);
405 tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
406 giveup_spe(tsk);
407 }
408 preempt_enable();
409 }
410}
411#endif /* CONFIG_SPE */
412
413static unsigned long msr_all_available;
414
415static int __init init_msr_all_available(void)
416{
417#ifdef CONFIG_PPC_FPU
418 msr_all_available |= MSR_FP;
419#endif
420#ifdef CONFIG_ALTIVEC
421 if (cpu_has_feature(CPU_FTR_ALTIVEC))
422 msr_all_available |= MSR_VEC;
423#endif
424#ifdef CONFIG_VSX
425 if (cpu_has_feature(CPU_FTR_VSX))
426 msr_all_available |= MSR_VSX;
427#endif
428#ifdef CONFIG_SPE
429 if (cpu_has_feature(CPU_FTR_SPE))
430 msr_all_available |= MSR_SPE;
431#endif
432
433 return 0;
434}
435early_initcall(init_msr_all_available);
436
437void giveup_all(struct task_struct *tsk)
438{
439 unsigned long usermsr;
440
441 if (!tsk->thread.regs)
442 return;
443
444 check_if_tm_restore_required(tsk);
445
446 usermsr = tsk->thread.regs->msr;
447
448 if ((usermsr & msr_all_available) == 0)
449 return;
450
451 msr_check_and_set(msr_all_available);
452
453 WARN_ON((usermsr & MSR_VSX) && !((usermsr & MSR_FP) && (usermsr & MSR_VEC)));
454
455#ifdef CONFIG_PPC_FPU
456 if (usermsr & MSR_FP)
457 __giveup_fpu(tsk);
458#endif
459#ifdef CONFIG_ALTIVEC
460 if (usermsr & MSR_VEC)
461 __giveup_altivec(tsk);
462#endif
463#ifdef CONFIG_SPE
464 if (usermsr & MSR_SPE)
465 __giveup_spe(tsk);
466#endif
467
468 msr_check_and_clear(msr_all_available);
469}
470EXPORT_SYMBOL(giveup_all);
471
472#ifdef CONFIG_PPC_BOOK3S_64
473#ifdef CONFIG_PPC_FPU
474static bool should_restore_fp(void)
475{
476 if (current->thread.load_fp) {
477 current->thread.load_fp++;
478 return true;
479 }
480 return false;
481}
482
483static void do_restore_fp(void)
484{
485 load_fp_state(¤t->thread.fp_state);
486}
487#else
488static bool should_restore_fp(void) { return false; }
489static void do_restore_fp(void) { }
490#endif /* CONFIG_PPC_FPU */
491
492#ifdef CONFIG_ALTIVEC
493static bool should_restore_altivec(void)
494{
495 if (cpu_has_feature(CPU_FTR_ALTIVEC) && (current->thread.load_vec)) {
496 current->thread.load_vec++;
497 return true;
498 }
499 return false;
500}
501
502static void do_restore_altivec(void)
503{
504 load_vr_state(¤t->thread.vr_state);
505 current->thread.used_vr = 1;
506}
507#else
508static bool should_restore_altivec(void) { return false; }
509static void do_restore_altivec(void) { }
510#endif /* CONFIG_ALTIVEC */
511
512#ifdef CONFIG_VSX
513static bool should_restore_vsx(void)
514{
515 if (cpu_has_feature(CPU_FTR_VSX))
516 return true;
517 return false;
518}
519static void do_restore_vsx(void)
520{
521 current->thread.used_vsr = 1;
522}
523#else
524static bool should_restore_vsx(void) { return false; }
525static void do_restore_vsx(void) { }
526#endif /* CONFIG_VSX */
527
528/*
529 * The exception exit path calls restore_math() with interrupts hard disabled
530 * but the soft irq state not "reconciled". ftrace code that calls
531 * local_irq_save/restore causes warnings.
532 *
533 * Rather than complicate the exit path, just don't trace restore_math. This
534 * could be done by having ftrace entry code check for this un-reconciled
535 * condition where MSR[EE]=0 and PACA_IRQ_HARD_DIS is not set, and
536 * temporarily fix it up for the duration of the ftrace call.
537 */
538void notrace restore_math(struct pt_regs *regs)
539{
540 unsigned long msr;
541 unsigned long new_msr = 0;
542
543 msr = regs->msr;
544
545 /*
546 * new_msr tracks the facilities that are to be restored. Only reload
547 * if the bit is not set in the user MSR (if it is set, the registers
548 * are live for the user thread).
549 */
550 if ((!(msr & MSR_FP)) && should_restore_fp())
551 new_msr |= MSR_FP;
552
553 if ((!(msr & MSR_VEC)) && should_restore_altivec())
554 new_msr |= MSR_VEC;
555
556 if ((!(msr & MSR_VSX)) && should_restore_vsx()) {
557 if (((msr | new_msr) & (MSR_FP | MSR_VEC)) == (MSR_FP | MSR_VEC))
558 new_msr |= MSR_VSX;
559 }
560
561 if (new_msr) {
562 unsigned long fpexc_mode = 0;
563
564 msr_check_and_set(new_msr);
565
566 if (new_msr & MSR_FP) {
567 do_restore_fp();
568
569 // This also covers VSX, because VSX implies FP
570 fpexc_mode = current->thread.fpexc_mode;
571 }
572
573 if (new_msr & MSR_VEC)
574 do_restore_altivec();
575
576 if (new_msr & MSR_VSX)
577 do_restore_vsx();
578
579 msr_check_and_clear(new_msr);
580
581 regs->msr |= new_msr | fpexc_mode;
582 }
583}
584#endif
585
586static void save_all(struct task_struct *tsk)
587{
588 unsigned long usermsr;
589
590 if (!tsk->thread.regs)
591 return;
592
593 usermsr = tsk->thread.regs->msr;
594
595 if ((usermsr & msr_all_available) == 0)
596 return;
597
598 msr_check_and_set(msr_all_available);
599
600 WARN_ON((usermsr & MSR_VSX) && !((usermsr & MSR_FP) && (usermsr & MSR_VEC)));
601
602 if (usermsr & MSR_FP)
603 save_fpu(tsk);
604
605 if (usermsr & MSR_VEC)
606 save_altivec(tsk);
607
608 if (usermsr & MSR_SPE)
609 __giveup_spe(tsk);
610
611 msr_check_and_clear(msr_all_available);
612 thread_pkey_regs_save(&tsk->thread);
613}
614
615void flush_all_to_thread(struct task_struct *tsk)
616{
617 if (tsk->thread.regs) {
618 preempt_disable();
619 BUG_ON(tsk != current);
620#ifdef CONFIG_SPE
621 if (tsk->thread.regs->msr & MSR_SPE)
622 tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
623#endif
624 save_all(tsk);
625
626 preempt_enable();
627 }
628}
629EXPORT_SYMBOL(flush_all_to_thread);
630
631#ifdef CONFIG_PPC_ADV_DEBUG_REGS
632void do_send_trap(struct pt_regs *regs, unsigned long address,
633 unsigned long error_code, int breakpt)
634{
635 current->thread.trap_nr = TRAP_HWBKPT;
636 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
637 11, SIGSEGV) == NOTIFY_STOP)
638 return;
639
640 /* Deliver the signal to userspace */
641 force_sig_ptrace_errno_trap(breakpt, /* breakpoint or watchpoint id */
642 (void __user *)address);
643}
644#else /* !CONFIG_PPC_ADV_DEBUG_REGS */
645void do_break (struct pt_regs *regs, unsigned long address,
646 unsigned long error_code)
647{
648 current->thread.trap_nr = TRAP_HWBKPT;
649 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
650 11, SIGSEGV) == NOTIFY_STOP)
651 return;
652
653 if (debugger_break_match(regs))
654 return;
655
656 /* Deliver the signal to userspace */
657 force_sig_fault(SIGTRAP, TRAP_HWBKPT, (void __user *)address);
658}
659#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
660
661static DEFINE_PER_CPU(struct arch_hw_breakpoint, current_brk[HBP_NUM_MAX]);
662
663#ifdef CONFIG_PPC_ADV_DEBUG_REGS
664/*
665 * Set the debug registers back to their default "safe" values.
666 */
667static void set_debug_reg_defaults(struct thread_struct *thread)
668{
669 thread->debug.iac1 = thread->debug.iac2 = 0;
670#if CONFIG_PPC_ADV_DEBUG_IACS > 2
671 thread->debug.iac3 = thread->debug.iac4 = 0;
672#endif
673 thread->debug.dac1 = thread->debug.dac2 = 0;
674#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
675 thread->debug.dvc1 = thread->debug.dvc2 = 0;
676#endif
677 thread->debug.dbcr0 = 0;
678#ifdef CONFIG_BOOKE
679 /*
680 * Force User/Supervisor bits to b11 (user-only MSR[PR]=1)
681 */
682 thread->debug.dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US |
683 DBCR1_IAC3US | DBCR1_IAC4US;
684 /*
685 * Force Data Address Compare User/Supervisor bits to be User-only
686 * (0b11 MSR[PR]=1) and set all other bits in DBCR2 register to be 0.
687 */
688 thread->debug.dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
689#else
690 thread->debug.dbcr1 = 0;
691#endif
692}
693
694static void prime_debug_regs(struct debug_reg *debug)
695{
696 /*
697 * We could have inherited MSR_DE from userspace, since
698 * it doesn't get cleared on exception entry. Make sure
699 * MSR_DE is clear before we enable any debug events.
700 */
701 mtmsr(mfmsr() & ~MSR_DE);
702
703 mtspr(SPRN_IAC1, debug->iac1);
704 mtspr(SPRN_IAC2, debug->iac2);
705#if CONFIG_PPC_ADV_DEBUG_IACS > 2
706 mtspr(SPRN_IAC3, debug->iac3);
707 mtspr(SPRN_IAC4, debug->iac4);
708#endif
709 mtspr(SPRN_DAC1, debug->dac1);
710 mtspr(SPRN_DAC2, debug->dac2);
711#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
712 mtspr(SPRN_DVC1, debug->dvc1);
713 mtspr(SPRN_DVC2, debug->dvc2);
714#endif
715 mtspr(SPRN_DBCR0, debug->dbcr0);
716 mtspr(SPRN_DBCR1, debug->dbcr1);
717#ifdef CONFIG_BOOKE
718 mtspr(SPRN_DBCR2, debug->dbcr2);
719#endif
720}
721/*
722 * Unless neither the old or new thread are making use of the
723 * debug registers, set the debug registers from the values
724 * stored in the new thread.
725 */
726void switch_booke_debug_regs(struct debug_reg *new_debug)
727{
728 if ((current->thread.debug.dbcr0 & DBCR0_IDM)
729 || (new_debug->dbcr0 & DBCR0_IDM))
730 prime_debug_regs(new_debug);
731}
732EXPORT_SYMBOL_GPL(switch_booke_debug_regs);
733#else /* !CONFIG_PPC_ADV_DEBUG_REGS */
734#ifndef CONFIG_HAVE_HW_BREAKPOINT
735static void set_breakpoint(int i, struct arch_hw_breakpoint *brk)
736{
737 preempt_disable();
738 __set_breakpoint(i, brk);
739 preempt_enable();
740}
741
742static void set_debug_reg_defaults(struct thread_struct *thread)
743{
744 int i;
745 struct arch_hw_breakpoint null_brk = {0};
746
747 for (i = 0; i < nr_wp_slots(); i++) {
748 thread->hw_brk[i] = null_brk;
749 if (ppc_breakpoint_available())
750 set_breakpoint(i, &thread->hw_brk[i]);
751 }
752}
753
754static inline bool hw_brk_match(struct arch_hw_breakpoint *a,
755 struct arch_hw_breakpoint *b)
756{
757 if (a->address != b->address)
758 return false;
759 if (a->type != b->type)
760 return false;
761 if (a->len != b->len)
762 return false;
763 /* no need to check hw_len. it's calculated from address and len */
764 return true;
765}
766
767static void switch_hw_breakpoint(struct task_struct *new)
768{
769 int i;
770
771 for (i = 0; i < nr_wp_slots(); i++) {
772 if (likely(hw_brk_match(this_cpu_ptr(¤t_brk[i]),
773 &new->thread.hw_brk[i])))
774 continue;
775
776 __set_breakpoint(i, &new->thread.hw_brk[i]);
777 }
778}
779#endif /* !CONFIG_HAVE_HW_BREAKPOINT */
780#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
781
782#ifdef CONFIG_PPC_ADV_DEBUG_REGS
783static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
784{
785 mtspr(SPRN_DAC1, dabr);
786#ifdef CONFIG_PPC_47x
787 isync();
788#endif
789 return 0;
790}
791#elif defined(CONFIG_PPC_BOOK3S)
792static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
793{
794 mtspr(SPRN_DABR, dabr);
795 if (cpu_has_feature(CPU_FTR_DABRX))
796 mtspr(SPRN_DABRX, dabrx);
797 return 0;
798}
799#else
800static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
801{
802 return -EINVAL;
803}
804#endif
805
806static inline int set_dabr(struct arch_hw_breakpoint *brk)
807{
808 unsigned long dabr, dabrx;
809
810 dabr = brk->address | (brk->type & HW_BRK_TYPE_DABR);
811 dabrx = ((brk->type >> 3) & 0x7);
812
813 if (ppc_md.set_dabr)
814 return ppc_md.set_dabr(dabr, dabrx);
815
816 return __set_dabr(dabr, dabrx);
817}
818
819static inline int set_breakpoint_8xx(struct arch_hw_breakpoint *brk)
820{
821 unsigned long lctrl1 = LCTRL1_CTE_GT | LCTRL1_CTF_LT | LCTRL1_CRWE_RW |
822 LCTRL1_CRWF_RW;
823 unsigned long lctrl2 = LCTRL2_LW0EN | LCTRL2_LW0LADC | LCTRL2_SLW0EN;
824 unsigned long start_addr = ALIGN_DOWN(brk->address, HW_BREAKPOINT_SIZE);
825 unsigned long end_addr = ALIGN(brk->address + brk->len, HW_BREAKPOINT_SIZE);
826
827 if (start_addr == 0)
828 lctrl2 |= LCTRL2_LW0LA_F;
829 else if (end_addr == 0)
830 lctrl2 |= LCTRL2_LW0LA_E;
831 else
832 lctrl2 |= LCTRL2_LW0LA_EandF;
833
834 mtspr(SPRN_LCTRL2, 0);
835
836 if ((brk->type & HW_BRK_TYPE_RDWR) == 0)
837 return 0;
838
839 if ((brk->type & HW_BRK_TYPE_RDWR) == HW_BRK_TYPE_READ)
840 lctrl1 |= LCTRL1_CRWE_RO | LCTRL1_CRWF_RO;
841 if ((brk->type & HW_BRK_TYPE_RDWR) == HW_BRK_TYPE_WRITE)
842 lctrl1 |= LCTRL1_CRWE_WO | LCTRL1_CRWF_WO;
843
844 mtspr(SPRN_CMPE, start_addr - 1);
845 mtspr(SPRN_CMPF, end_addr);
846 mtspr(SPRN_LCTRL1, lctrl1);
847 mtspr(SPRN_LCTRL2, lctrl2);
848
849 return 0;
850}
851
852void __set_breakpoint(int nr, struct arch_hw_breakpoint *brk)
853{
854 memcpy(this_cpu_ptr(¤t_brk[nr]), brk, sizeof(*brk));
855
856 if (dawr_enabled())
857 // Power8 or later
858 set_dawr(nr, brk);
859 else if (IS_ENABLED(CONFIG_PPC_8xx))
860 set_breakpoint_8xx(brk);
861 else if (!cpu_has_feature(CPU_FTR_ARCH_207S))
862 // Power7 or earlier
863 set_dabr(brk);
864 else
865 // Shouldn't happen due to higher level checks
866 WARN_ON_ONCE(1);
867}
868
869/* Check if we have DAWR or DABR hardware */
870bool ppc_breakpoint_available(void)
871{
872 if (dawr_enabled())
873 return true; /* POWER8 DAWR or POWER9 forced DAWR */
874 if (cpu_has_feature(CPU_FTR_ARCH_207S))
875 return false; /* POWER9 with DAWR disabled */
876 /* DABR: Everything but POWER8 and POWER9 */
877 return true;
878}
879EXPORT_SYMBOL_GPL(ppc_breakpoint_available);
880
881#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
882
883static inline bool tm_enabled(struct task_struct *tsk)
884{
885 return tsk && tsk->thread.regs && (tsk->thread.regs->msr & MSR_TM);
886}
887
888static void tm_reclaim_thread(struct thread_struct *thr, uint8_t cause)
889{
890 /*
891 * Use the current MSR TM suspended bit to track if we have
892 * checkpointed state outstanding.
893 * On signal delivery, we'd normally reclaim the checkpointed
894 * state to obtain stack pointer (see:get_tm_stackpointer()).
895 * This will then directly return to userspace without going
896 * through __switch_to(). However, if the stack frame is bad,
897 * we need to exit this thread which calls __switch_to() which
898 * will again attempt to reclaim the already saved tm state.
899 * Hence we need to check that we've not already reclaimed
900 * this state.
901 * We do this using the current MSR, rather tracking it in
902 * some specific thread_struct bit, as it has the additional
903 * benefit of checking for a potential TM bad thing exception.
904 */
905 if (!MSR_TM_SUSPENDED(mfmsr()))
906 return;
907
908 giveup_all(container_of(thr, struct task_struct, thread));
909
910 tm_reclaim(thr, cause);
911
912 /*
913 * If we are in a transaction and FP is off then we can't have
914 * used FP inside that transaction. Hence the checkpointed
915 * state is the same as the live state. We need to copy the
916 * live state to the checkpointed state so that when the
917 * transaction is restored, the checkpointed state is correct
918 * and the aborted transaction sees the correct state. We use
919 * ckpt_regs.msr here as that's what tm_reclaim will use to
920 * determine if it's going to write the checkpointed state or
921 * not. So either this will write the checkpointed registers,
922 * or reclaim will. Similarly for VMX.
923 */
924 if ((thr->ckpt_regs.msr & MSR_FP) == 0)
925 memcpy(&thr->ckfp_state, &thr->fp_state,
926 sizeof(struct thread_fp_state));
927 if ((thr->ckpt_regs.msr & MSR_VEC) == 0)
928 memcpy(&thr->ckvr_state, &thr->vr_state,
929 sizeof(struct thread_vr_state));
930}
931
932void tm_reclaim_current(uint8_t cause)
933{
934 tm_enable();
935 tm_reclaim_thread(¤t->thread, cause);
936}
937
938static inline void tm_reclaim_task(struct task_struct *tsk)
939{
940 /* We have to work out if we're switching from/to a task that's in the
941 * middle of a transaction.
942 *
943 * In switching we need to maintain a 2nd register state as
944 * oldtask->thread.ckpt_regs. We tm_reclaim(oldproc); this saves the
945 * checkpointed (tbegin) state in ckpt_regs, ckfp_state and
946 * ckvr_state
947 *
948 * We also context switch (save) TFHAR/TEXASR/TFIAR in here.
949 */
950 struct thread_struct *thr = &tsk->thread;
951
952 if (!thr->regs)
953 return;
954
955 if (!MSR_TM_ACTIVE(thr->regs->msr))
956 goto out_and_saveregs;
957
958 WARN_ON(tm_suspend_disabled);
959
960 TM_DEBUG("--- tm_reclaim on pid %d (NIP=%lx, "
961 "ccr=%lx, msr=%lx, trap=%lx)\n",
962 tsk->pid, thr->regs->nip,
963 thr->regs->ccr, thr->regs->msr,
964 thr->regs->trap);
965
966 tm_reclaim_thread(thr, TM_CAUSE_RESCHED);
967
968 TM_DEBUG("--- tm_reclaim on pid %d complete\n",
969 tsk->pid);
970
971out_and_saveregs:
972 /* Always save the regs here, even if a transaction's not active.
973 * This context-switches a thread's TM info SPRs. We do it here to
974 * be consistent with the restore path (in recheckpoint) which
975 * cannot happen later in _switch().
976 */
977 tm_save_sprs(thr);
978}
979
980extern void __tm_recheckpoint(struct thread_struct *thread);
981
982void tm_recheckpoint(struct thread_struct *thread)
983{
984 unsigned long flags;
985
986 if (!(thread->regs->msr & MSR_TM))
987 return;
988
989 /* We really can't be interrupted here as the TEXASR registers can't
990 * change and later in the trecheckpoint code, we have a userspace R1.
991 * So let's hard disable over this region.
992 */
993 local_irq_save(flags);
994 hard_irq_disable();
995
996 /* The TM SPRs are restored here, so that TEXASR.FS can be set
997 * before the trecheckpoint and no explosion occurs.
998 */
999 tm_restore_sprs(thread);
1000
1001 __tm_recheckpoint(thread);
1002
1003 local_irq_restore(flags);
1004}
1005
1006static inline void tm_recheckpoint_new_task(struct task_struct *new)
1007{
1008 if (!cpu_has_feature(CPU_FTR_TM))
1009 return;
1010
1011 /* Recheckpoint the registers of the thread we're about to switch to.
1012 *
1013 * If the task was using FP, we non-lazily reload both the original and
1014 * the speculative FP register states. This is because the kernel
1015 * doesn't see if/when a TM rollback occurs, so if we take an FP
1016 * unavailable later, we are unable to determine which set of FP regs
1017 * need to be restored.
1018 */
1019 if (!tm_enabled(new))
1020 return;
1021
1022 if (!MSR_TM_ACTIVE(new->thread.regs->msr)){
1023 tm_restore_sprs(&new->thread);
1024 return;
1025 }
1026 /* Recheckpoint to restore original checkpointed register state. */
1027 TM_DEBUG("*** tm_recheckpoint of pid %d (new->msr 0x%lx)\n",
1028 new->pid, new->thread.regs->msr);
1029
1030 tm_recheckpoint(&new->thread);
1031
1032 /*
1033 * The checkpointed state has been restored but the live state has
1034 * not, ensure all the math functionality is turned off to trigger
1035 * restore_math() to reload.
1036 */
1037 new->thread.regs->msr &= ~(MSR_FP | MSR_VEC | MSR_VSX);
1038
1039 TM_DEBUG("*** tm_recheckpoint of pid %d complete "
1040 "(kernel msr 0x%lx)\n",
1041 new->pid, mfmsr());
1042}
1043
1044static inline void __switch_to_tm(struct task_struct *prev,
1045 struct task_struct *new)
1046{
1047 if (cpu_has_feature(CPU_FTR_TM)) {
1048 if (tm_enabled(prev) || tm_enabled(new))
1049 tm_enable();
1050
1051 if (tm_enabled(prev)) {
1052 prev->thread.load_tm++;
1053 tm_reclaim_task(prev);
1054 if (!MSR_TM_ACTIVE(prev->thread.regs->msr) && prev->thread.load_tm == 0)
1055 prev->thread.regs->msr &= ~MSR_TM;
1056 }
1057
1058 tm_recheckpoint_new_task(new);
1059 }
1060}
1061
1062/*
1063 * This is called if we are on the way out to userspace and the
1064 * TIF_RESTORE_TM flag is set. It checks if we need to reload
1065 * FP and/or vector state and does so if necessary.
1066 * If userspace is inside a transaction (whether active or
1067 * suspended) and FP/VMX/VSX instructions have ever been enabled
1068 * inside that transaction, then we have to keep them enabled
1069 * and keep the FP/VMX/VSX state loaded while ever the transaction
1070 * continues. The reason is that if we didn't, and subsequently
1071 * got a FP/VMX/VSX unavailable interrupt inside a transaction,
1072 * we don't know whether it's the same transaction, and thus we
1073 * don't know which of the checkpointed state and the transactional
1074 * state to use.
1075 */
1076void restore_tm_state(struct pt_regs *regs)
1077{
1078 unsigned long msr_diff;
1079
1080 /*
1081 * This is the only moment we should clear TIF_RESTORE_TM as
1082 * it is here that ckpt_regs.msr and pt_regs.msr become the same
1083 * again, anything else could lead to an incorrect ckpt_msr being
1084 * saved and therefore incorrect signal contexts.
1085 */
1086 clear_thread_flag(TIF_RESTORE_TM);
1087 if (!MSR_TM_ACTIVE(regs->msr))
1088 return;
1089
1090 msr_diff = current->thread.ckpt_regs.msr & ~regs->msr;
1091 msr_diff &= MSR_FP | MSR_VEC | MSR_VSX;
1092
1093 /* Ensure that restore_math() will restore */
1094 if (msr_diff & MSR_FP)
1095 current->thread.load_fp = 1;
1096#ifdef CONFIG_ALTIVEC
1097 if (cpu_has_feature(CPU_FTR_ALTIVEC) && msr_diff & MSR_VEC)
1098 current->thread.load_vec = 1;
1099#endif
1100 restore_math(regs);
1101
1102 regs->msr |= msr_diff;
1103}
1104
1105#else
1106#define tm_recheckpoint_new_task(new)
1107#define __switch_to_tm(prev, new)
1108#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
1109
1110static inline void save_sprs(struct thread_struct *t)
1111{
1112#ifdef CONFIG_ALTIVEC
1113 if (cpu_has_feature(CPU_FTR_ALTIVEC))
1114 t->vrsave = mfspr(SPRN_VRSAVE);
1115#endif
1116#ifdef CONFIG_PPC_BOOK3S_64
1117 if (cpu_has_feature(CPU_FTR_DSCR))
1118 t->dscr = mfspr(SPRN_DSCR);
1119
1120 if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
1121 t->bescr = mfspr(SPRN_BESCR);
1122 t->ebbhr = mfspr(SPRN_EBBHR);
1123 t->ebbrr = mfspr(SPRN_EBBRR);
1124
1125 t->fscr = mfspr(SPRN_FSCR);
1126
1127 /*
1128 * Note that the TAR is not available for use in the kernel.
1129 * (To provide this, the TAR should be backed up/restored on
1130 * exception entry/exit instead, and be in pt_regs. FIXME,
1131 * this should be in pt_regs anyway (for debug).)
1132 */
1133 t->tar = mfspr(SPRN_TAR);
1134 }
1135#endif
1136
1137 thread_pkey_regs_save(t);
1138}
1139
1140static inline void restore_sprs(struct thread_struct *old_thread,
1141 struct thread_struct *new_thread)
1142{
1143#ifdef CONFIG_ALTIVEC
1144 if (cpu_has_feature(CPU_FTR_ALTIVEC) &&
1145 old_thread->vrsave != new_thread->vrsave)
1146 mtspr(SPRN_VRSAVE, new_thread->vrsave);
1147#endif
1148#ifdef CONFIG_PPC_BOOK3S_64
1149 if (cpu_has_feature(CPU_FTR_DSCR)) {
1150 u64 dscr = get_paca()->dscr_default;
1151 if (new_thread->dscr_inherit)
1152 dscr = new_thread->dscr;
1153
1154 if (old_thread->dscr != dscr)
1155 mtspr(SPRN_DSCR, dscr);
1156 }
1157
1158 if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
1159 if (old_thread->bescr != new_thread->bescr)
1160 mtspr(SPRN_BESCR, new_thread->bescr);
1161 if (old_thread->ebbhr != new_thread->ebbhr)
1162 mtspr(SPRN_EBBHR, new_thread->ebbhr);
1163 if (old_thread->ebbrr != new_thread->ebbrr)
1164 mtspr(SPRN_EBBRR, new_thread->ebbrr);
1165
1166 if (old_thread->fscr != new_thread->fscr)
1167 mtspr(SPRN_FSCR, new_thread->fscr);
1168
1169 if (old_thread->tar != new_thread->tar)
1170 mtspr(SPRN_TAR, new_thread->tar);
1171 }
1172
1173 if (cpu_has_feature(CPU_FTR_P9_TIDR) &&
1174 old_thread->tidr != new_thread->tidr)
1175 mtspr(SPRN_TIDR, new_thread->tidr);
1176#endif
1177
1178 thread_pkey_regs_restore(new_thread, old_thread);
1179}
1180
1181struct task_struct *__switch_to(struct task_struct *prev,
1182 struct task_struct *new)
1183{
1184 struct thread_struct *new_thread, *old_thread;
1185 struct task_struct *last;
1186#ifdef CONFIG_PPC_BOOK3S_64
1187 struct ppc64_tlb_batch *batch;
1188#endif
1189
1190 new_thread = &new->thread;
1191 old_thread = ¤t->thread;
1192
1193 WARN_ON(!irqs_disabled());
1194
1195#ifdef CONFIG_PPC_BOOK3S_64
1196 batch = this_cpu_ptr(&ppc64_tlb_batch);
1197 if (batch->active) {
1198 current_thread_info()->local_flags |= _TLF_LAZY_MMU;
1199 if (batch->index)
1200 __flush_tlb_pending(batch);
1201 batch->active = 0;
1202 }
1203#endif /* CONFIG_PPC_BOOK3S_64 */
1204
1205#ifdef CONFIG_PPC_ADV_DEBUG_REGS
1206 switch_booke_debug_regs(&new->thread.debug);
1207#else
1208/*
1209 * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would
1210 * schedule DABR
1211 */
1212#ifndef CONFIG_HAVE_HW_BREAKPOINT
1213 switch_hw_breakpoint(new);
1214#endif /* CONFIG_HAVE_HW_BREAKPOINT */
1215#endif
1216
1217 /*
1218 * We need to save SPRs before treclaim/trecheckpoint as these will
1219 * change a number of them.
1220 */
1221 save_sprs(&prev->thread);
1222
1223 /* Save FPU, Altivec, VSX and SPE state */
1224 giveup_all(prev);
1225
1226 __switch_to_tm(prev, new);
1227
1228 if (!radix_enabled()) {
1229 /*
1230 * We can't take a PMU exception inside _switch() since there
1231 * is a window where the kernel stack SLB and the kernel stack
1232 * are out of sync. Hard disable here.
1233 */
1234 hard_irq_disable();
1235 }
1236
1237 /*
1238 * Call restore_sprs() before calling _switch(). If we move it after
1239 * _switch() then we miss out on calling it for new tasks. The reason
1240 * for this is we manually create a stack frame for new tasks that
1241 * directly returns through ret_from_fork() or
1242 * ret_from_kernel_thread(). See copy_thread() for details.
1243 */
1244 restore_sprs(old_thread, new_thread);
1245
1246 last = _switch(old_thread, new_thread);
1247
1248#ifdef CONFIG_PPC_BOOK3S_64
1249 if (current_thread_info()->local_flags & _TLF_LAZY_MMU) {
1250 current_thread_info()->local_flags &= ~_TLF_LAZY_MMU;
1251 batch = this_cpu_ptr(&ppc64_tlb_batch);
1252 batch->active = 1;
1253 }
1254
1255 if (current->thread.regs) {
1256 restore_math(current->thread.regs);
1257
1258 /*
1259 * The copy-paste buffer can only store into foreign real
1260 * addresses, so unprivileged processes can not see the
1261 * data or use it in any way unless they have foreign real
1262 * mappings. If the new process has the foreign real address
1263 * mappings, we must issue a cp_abort to clear any state and
1264 * prevent snooping, corruption or a covert channel.
1265 */
1266 if (current->mm &&
1267 atomic_read(¤t->mm->context.vas_windows))
1268 asm volatile(PPC_CP_ABORT);
1269 }
1270#endif /* CONFIG_PPC_BOOK3S_64 */
1271
1272 return last;
1273}
1274
1275#define NR_INSN_TO_PRINT 16
1276
1277static void show_instructions(struct pt_regs *regs)
1278{
1279 int i;
1280 unsigned long nip = regs->nip;
1281 unsigned long pc = regs->nip - (NR_INSN_TO_PRINT * 3 / 4 * sizeof(int));
1282
1283 printk("Instruction dump:");
1284
1285 /*
1286 * If we were executing with the MMU off for instructions, adjust pc
1287 * rather than printing XXXXXXXX.
1288 */
1289 if (!IS_ENABLED(CONFIG_BOOKE) && !(regs->msr & MSR_IR)) {
1290 pc = (unsigned long)phys_to_virt(pc);
1291 nip = (unsigned long)phys_to_virt(regs->nip);
1292 }
1293
1294 for (i = 0; i < NR_INSN_TO_PRINT; i++) {
1295 int instr;
1296
1297 if (!(i % 8))
1298 pr_cont("\n");
1299
1300 if (!__kernel_text_address(pc) ||
1301 get_kernel_nofault(instr, (const void *)pc)) {
1302 pr_cont("XXXXXXXX ");
1303 } else {
1304 if (nip == pc)
1305 pr_cont("<%08x> ", instr);
1306 else
1307 pr_cont("%08x ", instr);
1308 }
1309
1310 pc += sizeof(int);
1311 }
1312
1313 pr_cont("\n");
1314}
1315
1316void show_user_instructions(struct pt_regs *regs)
1317{
1318 unsigned long pc;
1319 int n = NR_INSN_TO_PRINT;
1320 struct seq_buf s;
1321 char buf[96]; /* enough for 8 times 9 + 2 chars */
1322
1323 pc = regs->nip - (NR_INSN_TO_PRINT * 3 / 4 * sizeof(int));
1324
1325 seq_buf_init(&s, buf, sizeof(buf));
1326
1327 while (n) {
1328 int i;
1329
1330 seq_buf_clear(&s);
1331
1332 for (i = 0; i < 8 && n; i++, n--, pc += sizeof(int)) {
1333 int instr;
1334
1335 if (copy_from_user_nofault(&instr, (void __user *)pc,
1336 sizeof(instr))) {
1337 seq_buf_printf(&s, "XXXXXXXX ");
1338 continue;
1339 }
1340 seq_buf_printf(&s, regs->nip == pc ? "<%08x> " : "%08x ", instr);
1341 }
1342
1343 if (!seq_buf_has_overflowed(&s))
1344 pr_info("%s[%d]: code: %s\n", current->comm,
1345 current->pid, s.buffer);
1346 }
1347}
1348
1349struct regbit {
1350 unsigned long bit;
1351 const char *name;
1352};
1353
1354static struct regbit msr_bits[] = {
1355#if defined(CONFIG_PPC64) && !defined(CONFIG_BOOKE)
1356 {MSR_SF, "SF"},
1357 {MSR_HV, "HV"},
1358#endif
1359 {MSR_VEC, "VEC"},
1360 {MSR_VSX, "VSX"},
1361#ifdef CONFIG_BOOKE
1362 {MSR_CE, "CE"},
1363#endif
1364 {MSR_EE, "EE"},
1365 {MSR_PR, "PR"},
1366 {MSR_FP, "FP"},
1367 {MSR_ME, "ME"},
1368#ifdef CONFIG_BOOKE
1369 {MSR_DE, "DE"},
1370#else
1371 {MSR_SE, "SE"},
1372 {MSR_BE, "BE"},
1373#endif
1374 {MSR_IR, "IR"},
1375 {MSR_DR, "DR"},
1376 {MSR_PMM, "PMM"},
1377#ifndef CONFIG_BOOKE
1378 {MSR_RI, "RI"},
1379 {MSR_LE, "LE"},
1380#endif
1381 {0, NULL}
1382};
1383
1384static void print_bits(unsigned long val, struct regbit *bits, const char *sep)
1385{
1386 const char *s = "";
1387
1388 for (; bits->bit; ++bits)
1389 if (val & bits->bit) {
1390 pr_cont("%s%s", s, bits->name);
1391 s = sep;
1392 }
1393}
1394
1395#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1396static struct regbit msr_tm_bits[] = {
1397 {MSR_TS_T, "T"},
1398 {MSR_TS_S, "S"},
1399 {MSR_TM, "E"},
1400 {0, NULL}
1401};
1402
1403static void print_tm_bits(unsigned long val)
1404{
1405/*
1406 * This only prints something if at least one of the TM bit is set.
1407 * Inside the TM[], the output means:
1408 * E: Enabled (bit 32)
1409 * S: Suspended (bit 33)
1410 * T: Transactional (bit 34)
1411 */
1412 if (val & (MSR_TM | MSR_TS_S | MSR_TS_T)) {
1413 pr_cont(",TM[");
1414 print_bits(val, msr_tm_bits, "");
1415 pr_cont("]");
1416 }
1417}
1418#else
1419static void print_tm_bits(unsigned long val) {}
1420#endif
1421
1422static void print_msr_bits(unsigned long val)
1423{
1424 pr_cont("<");
1425 print_bits(val, msr_bits, ",");
1426 print_tm_bits(val);
1427 pr_cont(">");
1428}
1429
1430#ifdef CONFIG_PPC64
1431#define REG "%016lx"
1432#define REGS_PER_LINE 4
1433#define LAST_VOLATILE 13
1434#else
1435#define REG "%08lx"
1436#define REGS_PER_LINE 8
1437#define LAST_VOLATILE 12
1438#endif
1439
1440void show_regs(struct pt_regs * regs)
1441{
1442 int i, trap;
1443
1444 show_regs_print_info(KERN_DEFAULT);
1445
1446 printk("NIP: "REG" LR: "REG" CTR: "REG"\n",
1447 regs->nip, regs->link, regs->ctr);
1448 printk("REGS: %px TRAP: %04lx %s (%s)\n",
1449 regs, regs->trap, print_tainted(), init_utsname()->release);
1450 printk("MSR: "REG" ", regs->msr);
1451 print_msr_bits(regs->msr);
1452 pr_cont(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer);
1453 trap = TRAP(regs);
1454 if (!trap_is_syscall(regs) && cpu_has_feature(CPU_FTR_CFAR))
1455 pr_cont("CFAR: "REG" ", regs->orig_gpr3);
1456 if (trap == 0x200 || trap == 0x300 || trap == 0x600)
1457#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
1458 pr_cont("DEAR: "REG" ESR: "REG" ", regs->dar, regs->dsisr);
1459#else
1460 pr_cont("DAR: "REG" DSISR: %08lx ", regs->dar, regs->dsisr);
1461#endif
1462#ifdef CONFIG_PPC64
1463 pr_cont("IRQMASK: %lx ", regs->softe);
1464#endif
1465#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1466 if (MSR_TM_ACTIVE(regs->msr))
1467 pr_cont("\nPACATMSCRATCH: %016llx ", get_paca()->tm_scratch);
1468#endif
1469
1470 for (i = 0; i < 32; i++) {
1471 if ((i % REGS_PER_LINE) == 0)
1472 pr_cont("\nGPR%02d: ", i);
1473 pr_cont(REG " ", regs->gpr[i]);
1474 if (i == LAST_VOLATILE && !FULL_REGS(regs))
1475 break;
1476 }
1477 pr_cont("\n");
1478#ifdef CONFIG_KALLSYMS
1479 /*
1480 * Lookup NIP late so we have the best change of getting the
1481 * above info out without failing
1482 */
1483 printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
1484 printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
1485#endif
1486 show_stack(current, (unsigned long *) regs->gpr[1], KERN_DEFAULT);
1487 if (!user_mode(regs))
1488 show_instructions(regs);
1489}
1490
1491void flush_thread(void)
1492{
1493#ifdef CONFIG_HAVE_HW_BREAKPOINT
1494 flush_ptrace_hw_breakpoint(current);
1495#else /* CONFIG_HAVE_HW_BREAKPOINT */
1496 set_debug_reg_defaults(¤t->thread);
1497#endif /* CONFIG_HAVE_HW_BREAKPOINT */
1498}
1499
1500#ifdef CONFIG_PPC_BOOK3S_64
1501void arch_setup_new_exec(void)
1502{
1503 if (radix_enabled())
1504 return;
1505 hash__setup_new_exec();
1506}
1507#endif
1508
1509#ifdef CONFIG_PPC64
1510/**
1511 * Assign a TIDR (thread ID) for task @t and set it in the thread
1512 * structure. For now, we only support setting TIDR for 'current' task.
1513 *
1514 * Since the TID value is a truncated form of it PID, it is possible
1515 * (but unlikely) for 2 threads to have the same TID. In the unlikely event
1516 * that 2 threads share the same TID and are waiting, one of the following
1517 * cases will happen:
1518 *
1519 * 1. The correct thread is running, the wrong thread is not
1520 * In this situation, the correct thread is woken and proceeds to pass it's
1521 * condition check.
1522 *
1523 * 2. Neither threads are running
1524 * In this situation, neither thread will be woken. When scheduled, the waiting
1525 * threads will execute either a wait, which will return immediately, followed
1526 * by a condition check, which will pass for the correct thread and fail
1527 * for the wrong thread, or they will execute the condition check immediately.
1528 *
1529 * 3. The wrong thread is running, the correct thread is not
1530 * The wrong thread will be woken, but will fail it's condition check and
1531 * re-execute wait. The correct thread, when scheduled, will execute either
1532 * it's condition check (which will pass), or wait, which returns immediately
1533 * when called the first time after the thread is scheduled, followed by it's
1534 * condition check (which will pass).
1535 *
1536 * 4. Both threads are running
1537 * Both threads will be woken. The wrong thread will fail it's condition check
1538 * and execute another wait, while the correct thread will pass it's condition
1539 * check.
1540 *
1541 * @t: the task to set the thread ID for
1542 */
1543int set_thread_tidr(struct task_struct *t)
1544{
1545 if (!cpu_has_feature(CPU_FTR_P9_TIDR))
1546 return -EINVAL;
1547
1548 if (t != current)
1549 return -EINVAL;
1550
1551 if (t->thread.tidr)
1552 return 0;
1553
1554 t->thread.tidr = (u16)task_pid_nr(t);
1555 mtspr(SPRN_TIDR, t->thread.tidr);
1556
1557 return 0;
1558}
1559EXPORT_SYMBOL_GPL(set_thread_tidr);
1560
1561#endif /* CONFIG_PPC64 */
1562
1563void
1564release_thread(struct task_struct *t)
1565{
1566}
1567
1568/*
1569 * this gets called so that we can store coprocessor state into memory and
1570 * copy the current task into the new thread.
1571 */
1572int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
1573{
1574 flush_all_to_thread(src);
1575 /*
1576 * Flush TM state out so we can copy it. __switch_to_tm() does this
1577 * flush but it removes the checkpointed state from the current CPU and
1578 * transitions the CPU out of TM mode. Hence we need to call
1579 * tm_recheckpoint_new_task() (on the same task) to restore the
1580 * checkpointed state back and the TM mode.
1581 *
1582 * Can't pass dst because it isn't ready. Doesn't matter, passing
1583 * dst is only important for __switch_to()
1584 */
1585 __switch_to_tm(src, src);
1586
1587 *dst = *src;
1588
1589 clear_task_ebb(dst);
1590
1591 return 0;
1592}
1593
1594static void setup_ksp_vsid(struct task_struct *p, unsigned long sp)
1595{
1596#ifdef CONFIG_PPC_BOOK3S_64
1597 unsigned long sp_vsid;
1598 unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
1599
1600 if (radix_enabled())
1601 return;
1602
1603 if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
1604 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T)
1605 << SLB_VSID_SHIFT_1T;
1606 else
1607 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_256M)
1608 << SLB_VSID_SHIFT;
1609 sp_vsid |= SLB_VSID_KERNEL | llp;
1610 p->thread.ksp_vsid = sp_vsid;
1611#endif
1612}
1613
1614/*
1615 * Copy a thread..
1616 */
1617
1618/*
1619 * Copy architecture-specific thread state
1620 */
1621int copy_thread(unsigned long clone_flags, unsigned long usp,
1622 unsigned long kthread_arg, struct task_struct *p,
1623 unsigned long tls)
1624{
1625 struct pt_regs *childregs, *kregs;
1626 extern void ret_from_fork(void);
1627 extern void ret_from_fork_scv(void);
1628 extern void ret_from_kernel_thread(void);
1629 void (*f)(void);
1630 unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
1631 struct thread_info *ti = task_thread_info(p);
1632#ifdef CONFIG_HAVE_HW_BREAKPOINT
1633 int i;
1634#endif
1635
1636 klp_init_thread_info(p);
1637
1638 /* Copy registers */
1639 sp -= sizeof(struct pt_regs);
1640 childregs = (struct pt_regs *) sp;
1641 if (unlikely(p->flags & PF_KTHREAD)) {
1642 /* kernel thread */
1643 memset(childregs, 0, sizeof(struct pt_regs));
1644 childregs->gpr[1] = sp + sizeof(struct pt_regs);
1645 /* function */
1646 if (usp)
1647 childregs->gpr[14] = ppc_function_entry((void *)usp);
1648#ifdef CONFIG_PPC64
1649 clear_tsk_thread_flag(p, TIF_32BIT);
1650 childregs->softe = IRQS_ENABLED;
1651#endif
1652 childregs->gpr[15] = kthread_arg;
1653 p->thread.regs = NULL; /* no user register state */
1654 ti->flags |= _TIF_RESTOREALL;
1655 f = ret_from_kernel_thread;
1656 } else {
1657 /* user thread */
1658 struct pt_regs *regs = current_pt_regs();
1659 CHECK_FULL_REGS(regs);
1660 *childregs = *regs;
1661 if (usp)
1662 childregs->gpr[1] = usp;
1663 p->thread.regs = childregs;
1664 /* 64s sets this in ret_from_fork */
1665 if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64))
1666 childregs->gpr[3] = 0; /* Result from fork() */
1667 if (clone_flags & CLONE_SETTLS) {
1668 if (!is_32bit_task())
1669 childregs->gpr[13] = tls;
1670 else
1671 childregs->gpr[2] = tls;
1672 }
1673
1674 if (trap_is_scv(regs))
1675 f = ret_from_fork_scv;
1676 else
1677 f = ret_from_fork;
1678 }
1679 childregs->msr &= ~(MSR_FP|MSR_VEC|MSR_VSX);
1680 sp -= STACK_FRAME_OVERHEAD;
1681
1682 /*
1683 * The way this works is that at some point in the future
1684 * some task will call _switch to switch to the new task.
1685 * That will pop off the stack frame created below and start
1686 * the new task running at ret_from_fork. The new task will
1687 * do some house keeping and then return from the fork or clone
1688 * system call, using the stack frame created above.
1689 */
1690 ((unsigned long *)sp)[0] = 0;
1691 sp -= sizeof(struct pt_regs);
1692 kregs = (struct pt_regs *) sp;
1693 sp -= STACK_FRAME_OVERHEAD;
1694 p->thread.ksp = sp;
1695#ifdef CONFIG_PPC32
1696 p->thread.ksp_limit = (unsigned long)end_of_stack(p);
1697#endif
1698#ifdef CONFIG_HAVE_HW_BREAKPOINT
1699 for (i = 0; i < nr_wp_slots(); i++)
1700 p->thread.ptrace_bps[i] = NULL;
1701#endif
1702
1703 p->thread.fp_save_area = NULL;
1704#ifdef CONFIG_ALTIVEC
1705 p->thread.vr_save_area = NULL;
1706#endif
1707
1708 setup_ksp_vsid(p, sp);
1709
1710#ifdef CONFIG_PPC64
1711 if (cpu_has_feature(CPU_FTR_DSCR)) {
1712 p->thread.dscr_inherit = current->thread.dscr_inherit;
1713 p->thread.dscr = mfspr(SPRN_DSCR);
1714 }
1715 if (cpu_has_feature(CPU_FTR_HAS_PPR))
1716 childregs->ppr = DEFAULT_PPR;
1717
1718 p->thread.tidr = 0;
1719#endif
1720 kregs->nip = ppc_function_entry(f);
1721 return 0;
1722}
1723
1724void preload_new_slb_context(unsigned long start, unsigned long sp);
1725
1726/*
1727 * Set up a thread for executing a new program
1728 */
1729void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
1730{
1731#ifdef CONFIG_PPC64
1732 unsigned long load_addr = regs->gpr[2]; /* saved by ELF_PLAT_INIT */
1733
1734#ifdef CONFIG_PPC_BOOK3S_64
1735 if (!radix_enabled())
1736 preload_new_slb_context(start, sp);
1737#endif
1738#endif
1739
1740 /*
1741 * If we exec out of a kernel thread then thread.regs will not be
1742 * set. Do it now.
1743 */
1744 if (!current->thread.regs) {
1745 struct pt_regs *regs = task_stack_page(current) + THREAD_SIZE;
1746 current->thread.regs = regs - 1;
1747 }
1748
1749#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1750 /*
1751 * Clear any transactional state, we're exec()ing. The cause is
1752 * not important as there will never be a recheckpoint so it's not
1753 * user visible.
1754 */
1755 if (MSR_TM_SUSPENDED(mfmsr()))
1756 tm_reclaim_current(0);
1757#endif
1758
1759 memset(regs->gpr, 0, sizeof(regs->gpr));
1760 regs->ctr = 0;
1761 regs->link = 0;
1762 regs->xer = 0;
1763 regs->ccr = 0;
1764 regs->gpr[1] = sp;
1765
1766 /*
1767 * We have just cleared all the nonvolatile GPRs, so make
1768 * FULL_REGS(regs) return true. This is necessary to allow
1769 * ptrace to examine the thread immediately after exec.
1770 */
1771 SET_FULL_REGS(regs);
1772
1773#ifdef CONFIG_PPC32
1774 regs->mq = 0;
1775 regs->nip = start;
1776 regs->msr = MSR_USER;
1777#else
1778 if (!is_32bit_task()) {
1779 unsigned long entry;
1780
1781 if (is_elf2_task()) {
1782 /* Look ma, no function descriptors! */
1783 entry = start;
1784
1785 /*
1786 * Ulrich says:
1787 * The latest iteration of the ABI requires that when
1788 * calling a function (at its global entry point),
1789 * the caller must ensure r12 holds the entry point
1790 * address (so that the function can quickly
1791 * establish addressability).
1792 */
1793 regs->gpr[12] = start;
1794 /* Make sure that's restored on entry to userspace. */
1795 set_thread_flag(TIF_RESTOREALL);
1796 } else {
1797 unsigned long toc;
1798
1799 /* start is a relocated pointer to the function
1800 * descriptor for the elf _start routine. The first
1801 * entry in the function descriptor is the entry
1802 * address of _start and the second entry is the TOC
1803 * value we need to use.
1804 */
1805 __get_user(entry, (unsigned long __user *)start);
1806 __get_user(toc, (unsigned long __user *)start+1);
1807
1808 /* Check whether the e_entry function descriptor entries
1809 * need to be relocated before we can use them.
1810 */
1811 if (load_addr != 0) {
1812 entry += load_addr;
1813 toc += load_addr;
1814 }
1815 regs->gpr[2] = toc;
1816 }
1817 regs->nip = entry;
1818 regs->msr = MSR_USER64;
1819 } else {
1820 regs->nip = start;
1821 regs->gpr[2] = 0;
1822 regs->msr = MSR_USER32;
1823 }
1824#endif
1825#ifdef CONFIG_VSX
1826 current->thread.used_vsr = 0;
1827#endif
1828 current->thread.load_slb = 0;
1829 current->thread.load_fp = 0;
1830 memset(¤t->thread.fp_state, 0, sizeof(current->thread.fp_state));
1831 current->thread.fp_save_area = NULL;
1832#ifdef CONFIG_ALTIVEC
1833 memset(¤t->thread.vr_state, 0, sizeof(current->thread.vr_state));
1834 current->thread.vr_state.vscr.u[3] = 0x00010000; /* Java mode disabled */
1835 current->thread.vr_save_area = NULL;
1836 current->thread.vrsave = 0;
1837 current->thread.used_vr = 0;
1838 current->thread.load_vec = 0;
1839#endif /* CONFIG_ALTIVEC */
1840#ifdef CONFIG_SPE
1841 memset(current->thread.evr, 0, sizeof(current->thread.evr));
1842 current->thread.acc = 0;
1843 current->thread.spefscr = 0;
1844 current->thread.used_spe = 0;
1845#endif /* CONFIG_SPE */
1846#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1847 current->thread.tm_tfhar = 0;
1848 current->thread.tm_texasr = 0;
1849 current->thread.tm_tfiar = 0;
1850 current->thread.load_tm = 0;
1851#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
1852
1853 thread_pkey_regs_init(¤t->thread);
1854}
1855EXPORT_SYMBOL(start_thread);
1856
1857#define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
1858 | PR_FP_EXC_RES | PR_FP_EXC_INV)
1859
1860int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
1861{
1862 struct pt_regs *regs = tsk->thread.regs;
1863
1864 /* This is a bit hairy. If we are an SPE enabled processor
1865 * (have embedded fp) we store the IEEE exception enable flags in
1866 * fpexc_mode. fpexc_mode is also used for setting FP exception
1867 * mode (asyn, precise, disabled) for 'Classic' FP. */
1868 if (val & PR_FP_EXC_SW_ENABLE) {
1869#ifdef CONFIG_SPE
1870 if (cpu_has_feature(CPU_FTR_SPE)) {
1871 /*
1872 * When the sticky exception bits are set
1873 * directly by userspace, it must call prctl
1874 * with PR_GET_FPEXC (with PR_FP_EXC_SW_ENABLE
1875 * in the existing prctl settings) or
1876 * PR_SET_FPEXC (with PR_FP_EXC_SW_ENABLE in
1877 * the bits being set). <fenv.h> functions
1878 * saving and restoring the whole
1879 * floating-point environment need to do so
1880 * anyway to restore the prctl settings from
1881 * the saved environment.
1882 */
1883 tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR);
1884 tsk->thread.fpexc_mode = val &
1885 (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT);
1886 return 0;
1887 } else {
1888 return -EINVAL;
1889 }
1890#else
1891 return -EINVAL;
1892#endif
1893 }
1894
1895 /* on a CONFIG_SPE this does not hurt us. The bits that
1896 * __pack_fe01 use do not overlap with bits used for
1897 * PR_FP_EXC_SW_ENABLE. Additionally, the MSR[FE0,FE1] bits
1898 * on CONFIG_SPE implementations are reserved so writing to
1899 * them does not change anything */
1900 if (val > PR_FP_EXC_PRECISE)
1901 return -EINVAL;
1902 tsk->thread.fpexc_mode = __pack_fe01(val);
1903 if (regs != NULL && (regs->msr & MSR_FP) != 0)
1904 regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1))
1905 | tsk->thread.fpexc_mode;
1906 return 0;
1907}
1908
1909int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
1910{
1911 unsigned int val;
1912
1913 if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE)
1914#ifdef CONFIG_SPE
1915 if (cpu_has_feature(CPU_FTR_SPE)) {
1916 /*
1917 * When the sticky exception bits are set
1918 * directly by userspace, it must call prctl
1919 * with PR_GET_FPEXC (with PR_FP_EXC_SW_ENABLE
1920 * in the existing prctl settings) or
1921 * PR_SET_FPEXC (with PR_FP_EXC_SW_ENABLE in
1922 * the bits being set). <fenv.h> functions
1923 * saving and restoring the whole
1924 * floating-point environment need to do so
1925 * anyway to restore the prctl settings from
1926 * the saved environment.
1927 */
1928 tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR);
1929 val = tsk->thread.fpexc_mode;
1930 } else
1931 return -EINVAL;
1932#else
1933 return -EINVAL;
1934#endif
1935 else
1936 val = __unpack_fe01(tsk->thread.fpexc_mode);
1937 return put_user(val, (unsigned int __user *) adr);
1938}
1939
1940int set_endian(struct task_struct *tsk, unsigned int val)
1941{
1942 struct pt_regs *regs = tsk->thread.regs;
1943
1944 if ((val == PR_ENDIAN_LITTLE && !cpu_has_feature(CPU_FTR_REAL_LE)) ||
1945 (val == PR_ENDIAN_PPC_LITTLE && !cpu_has_feature(CPU_FTR_PPC_LE)))
1946 return -EINVAL;
1947
1948 if (regs == NULL)
1949 return -EINVAL;
1950
1951 if (val == PR_ENDIAN_BIG)
1952 regs->msr &= ~MSR_LE;
1953 else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE)
1954 regs->msr |= MSR_LE;
1955 else
1956 return -EINVAL;
1957
1958 return 0;
1959}
1960
1961int get_endian(struct task_struct *tsk, unsigned long adr)
1962{
1963 struct pt_regs *regs = tsk->thread.regs;
1964 unsigned int val;
1965
1966 if (!cpu_has_feature(CPU_FTR_PPC_LE) &&
1967 !cpu_has_feature(CPU_FTR_REAL_LE))
1968 return -EINVAL;
1969
1970 if (regs == NULL)
1971 return -EINVAL;
1972
1973 if (regs->msr & MSR_LE) {
1974 if (cpu_has_feature(CPU_FTR_REAL_LE))
1975 val = PR_ENDIAN_LITTLE;
1976 else
1977 val = PR_ENDIAN_PPC_LITTLE;
1978 } else
1979 val = PR_ENDIAN_BIG;
1980
1981 return put_user(val, (unsigned int __user *)adr);
1982}
1983
1984int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
1985{
1986 tsk->thread.align_ctl = val;
1987 return 0;
1988}
1989
1990int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
1991{
1992 return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr);
1993}
1994
1995static inline int valid_irq_stack(unsigned long sp, struct task_struct *p,
1996 unsigned long nbytes)
1997{
1998 unsigned long stack_page;
1999 unsigned long cpu = task_cpu(p);
2000
2001 stack_page = (unsigned long)hardirq_ctx[cpu];
2002 if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
2003 return 1;
2004
2005 stack_page = (unsigned long)softirq_ctx[cpu];
2006 if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
2007 return 1;
2008
2009 return 0;
2010}
2011
2012static inline int valid_emergency_stack(unsigned long sp, struct task_struct *p,
2013 unsigned long nbytes)
2014{
2015#ifdef CONFIG_PPC64
2016 unsigned long stack_page;
2017 unsigned long cpu = task_cpu(p);
2018
2019 stack_page = (unsigned long)paca_ptrs[cpu]->emergency_sp - THREAD_SIZE;
2020 if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
2021 return 1;
2022
2023# ifdef CONFIG_PPC_BOOK3S_64
2024 stack_page = (unsigned long)paca_ptrs[cpu]->nmi_emergency_sp - THREAD_SIZE;
2025 if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
2026 return 1;
2027
2028 stack_page = (unsigned long)paca_ptrs[cpu]->mc_emergency_sp - THREAD_SIZE;
2029 if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
2030 return 1;
2031# endif
2032#endif
2033
2034 return 0;
2035}
2036
2037
2038int validate_sp(unsigned long sp, struct task_struct *p,
2039 unsigned long nbytes)
2040{
2041 unsigned long stack_page = (unsigned long)task_stack_page(p);
2042
2043 if (sp < THREAD_SIZE)
2044 return 0;
2045
2046 if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
2047 return 1;
2048
2049 if (valid_irq_stack(sp, p, nbytes))
2050 return 1;
2051
2052 return valid_emergency_stack(sp, p, nbytes);
2053}
2054
2055EXPORT_SYMBOL(validate_sp);
2056
2057static unsigned long __get_wchan(struct task_struct *p)
2058{
2059 unsigned long ip, sp;
2060 int count = 0;
2061
2062 if (!p || p == current || p->state == TASK_RUNNING)
2063 return 0;
2064
2065 sp = p->thread.ksp;
2066 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
2067 return 0;
2068
2069 do {
2070 sp = *(unsigned long *)sp;
2071 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD) ||
2072 p->state == TASK_RUNNING)
2073 return 0;
2074 if (count > 0) {
2075 ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE];
2076 if (!in_sched_functions(ip))
2077 return ip;
2078 }
2079 } while (count++ < 16);
2080 return 0;
2081}
2082
2083unsigned long get_wchan(struct task_struct *p)
2084{
2085 unsigned long ret;
2086
2087 if (!try_get_task_stack(p))
2088 return 0;
2089
2090 ret = __get_wchan(p);
2091
2092 put_task_stack(p);
2093
2094 return ret;
2095}
2096
2097static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
2098
2099void show_stack(struct task_struct *tsk, unsigned long *stack,
2100 const char *loglvl)
2101{
2102 unsigned long sp, ip, lr, newsp;
2103 int count = 0;
2104 int firstframe = 1;
2105#ifdef CONFIG_FUNCTION_GRAPH_TRACER
2106 unsigned long ret_addr;
2107 int ftrace_idx = 0;
2108#endif
2109
2110 if (tsk == NULL)
2111 tsk = current;
2112
2113 if (!try_get_task_stack(tsk))
2114 return;
2115
2116 sp = (unsigned long) stack;
2117 if (sp == 0) {
2118 if (tsk == current)
2119 sp = current_stack_frame();
2120 else
2121 sp = tsk->thread.ksp;
2122 }
2123
2124 lr = 0;
2125 printk("%sCall Trace:\n", loglvl);
2126 do {
2127 if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD))
2128 break;
2129
2130 stack = (unsigned long *) sp;
2131 newsp = stack[0];
2132 ip = stack[STACK_FRAME_LR_SAVE];
2133 if (!firstframe || ip != lr) {
2134 printk("%s["REG"] ["REG"] %pS",
2135 loglvl, sp, ip, (void *)ip);
2136#ifdef CONFIG_FUNCTION_GRAPH_TRACER
2137 ret_addr = ftrace_graph_ret_addr(current,
2138 &ftrace_idx, ip, stack);
2139 if (ret_addr != ip)
2140 pr_cont(" (%pS)", (void *)ret_addr);
2141#endif
2142 if (firstframe)
2143 pr_cont(" (unreliable)");
2144 pr_cont("\n");
2145 }
2146 firstframe = 0;
2147
2148 /*
2149 * See if this is an exception frame.
2150 * We look for the "regshere" marker in the current frame.
2151 */
2152 if (validate_sp(sp, tsk, STACK_INT_FRAME_SIZE)
2153 && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
2154 struct pt_regs *regs = (struct pt_regs *)
2155 (sp + STACK_FRAME_OVERHEAD);
2156 lr = regs->link;
2157 printk("%s--- interrupt: %lx at %pS\n LR = %pS\n",
2158 loglvl, regs->trap,
2159 (void *)regs->nip, (void *)lr);
2160 firstframe = 1;
2161 }
2162
2163 sp = newsp;
2164 } while (count++ < kstack_depth_to_print);
2165
2166 put_task_stack(tsk);
2167}
2168
2169#ifdef CONFIG_PPC64
2170/* Called with hard IRQs off */
2171void notrace __ppc64_runlatch_on(void)
2172{
2173 struct thread_info *ti = current_thread_info();
2174
2175 if (cpu_has_feature(CPU_FTR_ARCH_206)) {
2176 /*
2177 * Least significant bit (RUN) is the only writable bit of
2178 * the CTRL register, so we can avoid mfspr. 2.06 is not the
2179 * earliest ISA where this is the case, but it's convenient.
2180 */
2181 mtspr(SPRN_CTRLT, CTRL_RUNLATCH);
2182 } else {
2183 unsigned long ctrl;
2184
2185 /*
2186 * Some architectures (e.g., Cell) have writable fields other
2187 * than RUN, so do the read-modify-write.
2188 */
2189 ctrl = mfspr(SPRN_CTRLF);
2190 ctrl |= CTRL_RUNLATCH;
2191 mtspr(SPRN_CTRLT, ctrl);
2192 }
2193
2194 ti->local_flags |= _TLF_RUNLATCH;
2195}
2196
2197/* Called with hard IRQs off */
2198void notrace __ppc64_runlatch_off(void)
2199{
2200 struct thread_info *ti = current_thread_info();
2201
2202 ti->local_flags &= ~_TLF_RUNLATCH;
2203
2204 if (cpu_has_feature(CPU_FTR_ARCH_206)) {
2205 mtspr(SPRN_CTRLT, 0);
2206 } else {
2207 unsigned long ctrl;
2208
2209 ctrl = mfspr(SPRN_CTRLF);
2210 ctrl &= ~CTRL_RUNLATCH;
2211 mtspr(SPRN_CTRLT, ctrl);
2212 }
2213}
2214#endif /* CONFIG_PPC64 */
2215
2216unsigned long arch_align_stack(unsigned long sp)
2217{
2218 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2219 sp -= get_random_int() & ~PAGE_MASK;
2220 return sp & ~0xf;
2221}
2222
2223static inline unsigned long brk_rnd(void)
2224{
2225 unsigned long rnd = 0;
2226
2227 /* 8MB for 32bit, 1GB for 64bit */
2228 if (is_32bit_task())
2229 rnd = (get_random_long() % (1UL<<(23-PAGE_SHIFT)));
2230 else
2231 rnd = (get_random_long() % (1UL<<(30-PAGE_SHIFT)));
2232
2233 return rnd << PAGE_SHIFT;
2234}
2235
2236unsigned long arch_randomize_brk(struct mm_struct *mm)
2237{
2238 unsigned long base = mm->brk;
2239 unsigned long ret;
2240
2241#ifdef CONFIG_PPC_BOOK3S_64
2242 /*
2243 * If we are using 1TB segments and we are allowed to randomise
2244 * the heap, we can put it above 1TB so it is backed by a 1TB
2245 * segment. Otherwise the heap will be in the bottom 1TB
2246 * which always uses 256MB segments and this may result in a
2247 * performance penalty. We don't need to worry about radix. For
2248 * radix, mmu_highuser_ssize remains unchanged from 256MB.
2249 */
2250 if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2251 base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2252#endif
2253
2254 ret = PAGE_ALIGN(base + brk_rnd());
2255
2256 if (ret < mm->brk)
2257 return mm->brk;
2258
2259 return ret;
2260}
2261
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Derived from "arch/i386/kernel/process.c"
4 * Copyright (C) 1995 Linus Torvalds
5 *
6 * Updated and modified by Cort Dougan (cort@cs.nmt.edu) and
7 * Paul Mackerras (paulus@cs.anu.edu.au)
8 *
9 * PowerPC version
10 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
11 */
12
13#include <linux/errno.h>
14#include <linux/sched.h>
15#include <linux/sched/debug.h>
16#include <linux/sched/task.h>
17#include <linux/sched/task_stack.h>
18#include <linux/kernel.h>
19#include <linux/mm.h>
20#include <linux/smp.h>
21#include <linux/stddef.h>
22#include <linux/unistd.h>
23#include <linux/ptrace.h>
24#include <linux/slab.h>
25#include <linux/user.h>
26#include <linux/elf.h>
27#include <linux/prctl.h>
28#include <linux/init_task.h>
29#include <linux/export.h>
30#include <linux/kallsyms.h>
31#include <linux/mqueue.h>
32#include <linux/hardirq.h>
33#include <linux/utsname.h>
34#include <linux/ftrace.h>
35#include <linux/kernel_stat.h>
36#include <linux/personality.h>
37#include <linux/random.h>
38#include <linux/hw_breakpoint.h>
39#include <linux/uaccess.h>
40#include <linux/elf-randomize.h>
41#include <linux/pkeys.h>
42#include <linux/seq_buf.h>
43
44#include <asm/pgtable.h>
45#include <asm/io.h>
46#include <asm/processor.h>
47#include <asm/mmu.h>
48#include <asm/prom.h>
49#include <asm/machdep.h>
50#include <asm/time.h>
51#include <asm/runlatch.h>
52#include <asm/syscalls.h>
53#include <asm/switch_to.h>
54#include <asm/tm.h>
55#include <asm/debug.h>
56#ifdef CONFIG_PPC64
57#include <asm/firmware.h>
58#include <asm/hw_irq.h>
59#endif
60#include <asm/code-patching.h>
61#include <asm/exec.h>
62#include <asm/livepatch.h>
63#include <asm/cpu_has_feature.h>
64#include <asm/asm-prototypes.h>
65#include <asm/stacktrace.h>
66#include <asm/hw_breakpoint.h>
67
68#include <linux/kprobes.h>
69#include <linux/kdebug.h>
70
71/* Transactional Memory debug */
72#ifdef TM_DEBUG_SW
73#define TM_DEBUG(x...) printk(KERN_INFO x)
74#else
75#define TM_DEBUG(x...) do { } while(0)
76#endif
77
78extern unsigned long _get_SP(void);
79
80#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
81/*
82 * Are we running in "Suspend disabled" mode? If so we have to block any
83 * sigreturn that would get us into suspended state, and we also warn in some
84 * other paths that we should never reach with suspend disabled.
85 */
86bool tm_suspend_disabled __ro_after_init = false;
87
88static void check_if_tm_restore_required(struct task_struct *tsk)
89{
90 /*
91 * If we are saving the current thread's registers, and the
92 * thread is in a transactional state, set the TIF_RESTORE_TM
93 * bit so that we know to restore the registers before
94 * returning to userspace.
95 */
96 if (tsk == current && tsk->thread.regs &&
97 MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
98 !test_thread_flag(TIF_RESTORE_TM)) {
99 tsk->thread.ckpt_regs.msr = tsk->thread.regs->msr;
100 set_thread_flag(TIF_RESTORE_TM);
101 }
102}
103
104#else
105static inline void check_if_tm_restore_required(struct task_struct *tsk) { }
106#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
107
108bool strict_msr_control;
109EXPORT_SYMBOL(strict_msr_control);
110
111static int __init enable_strict_msr_control(char *str)
112{
113 strict_msr_control = true;
114 pr_info("Enabling strict facility control\n");
115
116 return 0;
117}
118early_param("ppc_strict_facility_enable", enable_strict_msr_control);
119
120/* notrace because it's called by restore_math */
121unsigned long notrace msr_check_and_set(unsigned long bits)
122{
123 unsigned long oldmsr = mfmsr();
124 unsigned long newmsr;
125
126 newmsr = oldmsr | bits;
127
128#ifdef CONFIG_VSX
129 if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP))
130 newmsr |= MSR_VSX;
131#endif
132
133 if (oldmsr != newmsr)
134 mtmsr_isync(newmsr);
135
136 return newmsr;
137}
138EXPORT_SYMBOL_GPL(msr_check_and_set);
139
140/* notrace because it's called by restore_math */
141void notrace __msr_check_and_clear(unsigned long bits)
142{
143 unsigned long oldmsr = mfmsr();
144 unsigned long newmsr;
145
146 newmsr = oldmsr & ~bits;
147
148#ifdef CONFIG_VSX
149 if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP))
150 newmsr &= ~MSR_VSX;
151#endif
152
153 if (oldmsr != newmsr)
154 mtmsr_isync(newmsr);
155}
156EXPORT_SYMBOL(__msr_check_and_clear);
157
158#ifdef CONFIG_PPC_FPU
159static void __giveup_fpu(struct task_struct *tsk)
160{
161 unsigned long msr;
162
163 save_fpu(tsk);
164 msr = tsk->thread.regs->msr;
165 msr &= ~(MSR_FP|MSR_FE0|MSR_FE1);
166#ifdef CONFIG_VSX
167 if (cpu_has_feature(CPU_FTR_VSX))
168 msr &= ~MSR_VSX;
169#endif
170 tsk->thread.regs->msr = msr;
171}
172
173void giveup_fpu(struct task_struct *tsk)
174{
175 check_if_tm_restore_required(tsk);
176
177 msr_check_and_set(MSR_FP);
178 __giveup_fpu(tsk);
179 msr_check_and_clear(MSR_FP);
180}
181EXPORT_SYMBOL(giveup_fpu);
182
183/*
184 * Make sure the floating-point register state in the
185 * the thread_struct is up to date for task tsk.
186 */
187void flush_fp_to_thread(struct task_struct *tsk)
188{
189 if (tsk->thread.regs) {
190 /*
191 * We need to disable preemption here because if we didn't,
192 * another process could get scheduled after the regs->msr
193 * test but before we have finished saving the FP registers
194 * to the thread_struct. That process could take over the
195 * FPU, and then when we get scheduled again we would store
196 * bogus values for the remaining FP registers.
197 */
198 preempt_disable();
199 if (tsk->thread.regs->msr & MSR_FP) {
200 /*
201 * This should only ever be called for current or
202 * for a stopped child process. Since we save away
203 * the FP register state on context switch,
204 * there is something wrong if a stopped child appears
205 * to still have its FP state in the CPU registers.
206 */
207 BUG_ON(tsk != current);
208 giveup_fpu(tsk);
209 }
210 preempt_enable();
211 }
212}
213EXPORT_SYMBOL_GPL(flush_fp_to_thread);
214
215void enable_kernel_fp(void)
216{
217 unsigned long cpumsr;
218
219 WARN_ON(preemptible());
220
221 cpumsr = msr_check_and_set(MSR_FP);
222
223 if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) {
224 check_if_tm_restore_required(current);
225 /*
226 * If a thread has already been reclaimed then the
227 * checkpointed registers are on the CPU but have definitely
228 * been saved by the reclaim code. Don't need to and *cannot*
229 * giveup as this would save to the 'live' structure not the
230 * checkpointed structure.
231 */
232 if (!MSR_TM_ACTIVE(cpumsr) &&
233 MSR_TM_ACTIVE(current->thread.regs->msr))
234 return;
235 __giveup_fpu(current);
236 }
237}
238EXPORT_SYMBOL(enable_kernel_fp);
239
240static int restore_fp(struct task_struct *tsk)
241{
242 if (tsk->thread.load_fp) {
243 load_fp_state(¤t->thread.fp_state);
244 current->thread.load_fp++;
245 return 1;
246 }
247 return 0;
248}
249#else
250static int restore_fp(struct task_struct *tsk) { return 0; }
251#endif /* CONFIG_PPC_FPU */
252
253#ifdef CONFIG_ALTIVEC
254#define loadvec(thr) ((thr).load_vec)
255
256static void __giveup_altivec(struct task_struct *tsk)
257{
258 unsigned long msr;
259
260 save_altivec(tsk);
261 msr = tsk->thread.regs->msr;
262 msr &= ~MSR_VEC;
263#ifdef CONFIG_VSX
264 if (cpu_has_feature(CPU_FTR_VSX))
265 msr &= ~MSR_VSX;
266#endif
267 tsk->thread.regs->msr = msr;
268}
269
270void giveup_altivec(struct task_struct *tsk)
271{
272 check_if_tm_restore_required(tsk);
273
274 msr_check_and_set(MSR_VEC);
275 __giveup_altivec(tsk);
276 msr_check_and_clear(MSR_VEC);
277}
278EXPORT_SYMBOL(giveup_altivec);
279
280void enable_kernel_altivec(void)
281{
282 unsigned long cpumsr;
283
284 WARN_ON(preemptible());
285
286 cpumsr = msr_check_and_set(MSR_VEC);
287
288 if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) {
289 check_if_tm_restore_required(current);
290 /*
291 * If a thread has already been reclaimed then the
292 * checkpointed registers are on the CPU but have definitely
293 * been saved by the reclaim code. Don't need to and *cannot*
294 * giveup as this would save to the 'live' structure not the
295 * checkpointed structure.
296 */
297 if (!MSR_TM_ACTIVE(cpumsr) &&
298 MSR_TM_ACTIVE(current->thread.regs->msr))
299 return;
300 __giveup_altivec(current);
301 }
302}
303EXPORT_SYMBOL(enable_kernel_altivec);
304
305/*
306 * Make sure the VMX/Altivec register state in the
307 * the thread_struct is up to date for task tsk.
308 */
309void flush_altivec_to_thread(struct task_struct *tsk)
310{
311 if (tsk->thread.regs) {
312 preempt_disable();
313 if (tsk->thread.regs->msr & MSR_VEC) {
314 BUG_ON(tsk != current);
315 giveup_altivec(tsk);
316 }
317 preempt_enable();
318 }
319}
320EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
321
322static int restore_altivec(struct task_struct *tsk)
323{
324 if (cpu_has_feature(CPU_FTR_ALTIVEC) && (tsk->thread.load_vec)) {
325 load_vr_state(&tsk->thread.vr_state);
326 tsk->thread.used_vr = 1;
327 tsk->thread.load_vec++;
328
329 return 1;
330 }
331 return 0;
332}
333#else
334#define loadvec(thr) 0
335static inline int restore_altivec(struct task_struct *tsk) { return 0; }
336#endif /* CONFIG_ALTIVEC */
337
338#ifdef CONFIG_VSX
339static void __giveup_vsx(struct task_struct *tsk)
340{
341 unsigned long msr = tsk->thread.regs->msr;
342
343 /*
344 * We should never be ssetting MSR_VSX without also setting
345 * MSR_FP and MSR_VEC
346 */
347 WARN_ON((msr & MSR_VSX) && !((msr & MSR_FP) && (msr & MSR_VEC)));
348
349 /* __giveup_fpu will clear MSR_VSX */
350 if (msr & MSR_FP)
351 __giveup_fpu(tsk);
352 if (msr & MSR_VEC)
353 __giveup_altivec(tsk);
354}
355
356static void giveup_vsx(struct task_struct *tsk)
357{
358 check_if_tm_restore_required(tsk);
359
360 msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
361 __giveup_vsx(tsk);
362 msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX);
363}
364
365void enable_kernel_vsx(void)
366{
367 unsigned long cpumsr;
368
369 WARN_ON(preemptible());
370
371 cpumsr = msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
372
373 if (current->thread.regs &&
374 (current->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP))) {
375 check_if_tm_restore_required(current);
376 /*
377 * If a thread has already been reclaimed then the
378 * checkpointed registers are on the CPU but have definitely
379 * been saved by the reclaim code. Don't need to and *cannot*
380 * giveup as this would save to the 'live' structure not the
381 * checkpointed structure.
382 */
383 if (!MSR_TM_ACTIVE(cpumsr) &&
384 MSR_TM_ACTIVE(current->thread.regs->msr))
385 return;
386 __giveup_vsx(current);
387 }
388}
389EXPORT_SYMBOL(enable_kernel_vsx);
390
391void flush_vsx_to_thread(struct task_struct *tsk)
392{
393 if (tsk->thread.regs) {
394 preempt_disable();
395 if (tsk->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP)) {
396 BUG_ON(tsk != current);
397 giveup_vsx(tsk);
398 }
399 preempt_enable();
400 }
401}
402EXPORT_SYMBOL_GPL(flush_vsx_to_thread);
403
404static int restore_vsx(struct task_struct *tsk)
405{
406 if (cpu_has_feature(CPU_FTR_VSX)) {
407 tsk->thread.used_vsr = 1;
408 return 1;
409 }
410
411 return 0;
412}
413#else
414static inline int restore_vsx(struct task_struct *tsk) { return 0; }
415#endif /* CONFIG_VSX */
416
417#ifdef CONFIG_SPE
418void giveup_spe(struct task_struct *tsk)
419{
420 check_if_tm_restore_required(tsk);
421
422 msr_check_and_set(MSR_SPE);
423 __giveup_spe(tsk);
424 msr_check_and_clear(MSR_SPE);
425}
426EXPORT_SYMBOL(giveup_spe);
427
428void enable_kernel_spe(void)
429{
430 WARN_ON(preemptible());
431
432 msr_check_and_set(MSR_SPE);
433
434 if (current->thread.regs && (current->thread.regs->msr & MSR_SPE)) {
435 check_if_tm_restore_required(current);
436 __giveup_spe(current);
437 }
438}
439EXPORT_SYMBOL(enable_kernel_spe);
440
441void flush_spe_to_thread(struct task_struct *tsk)
442{
443 if (tsk->thread.regs) {
444 preempt_disable();
445 if (tsk->thread.regs->msr & MSR_SPE) {
446 BUG_ON(tsk != current);
447 tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
448 giveup_spe(tsk);
449 }
450 preempt_enable();
451 }
452}
453#endif /* CONFIG_SPE */
454
455static unsigned long msr_all_available;
456
457static int __init init_msr_all_available(void)
458{
459#ifdef CONFIG_PPC_FPU
460 msr_all_available |= MSR_FP;
461#endif
462#ifdef CONFIG_ALTIVEC
463 if (cpu_has_feature(CPU_FTR_ALTIVEC))
464 msr_all_available |= MSR_VEC;
465#endif
466#ifdef CONFIG_VSX
467 if (cpu_has_feature(CPU_FTR_VSX))
468 msr_all_available |= MSR_VSX;
469#endif
470#ifdef CONFIG_SPE
471 if (cpu_has_feature(CPU_FTR_SPE))
472 msr_all_available |= MSR_SPE;
473#endif
474
475 return 0;
476}
477early_initcall(init_msr_all_available);
478
479void giveup_all(struct task_struct *tsk)
480{
481 unsigned long usermsr;
482
483 if (!tsk->thread.regs)
484 return;
485
486 check_if_tm_restore_required(tsk);
487
488 usermsr = tsk->thread.regs->msr;
489
490 if ((usermsr & msr_all_available) == 0)
491 return;
492
493 msr_check_and_set(msr_all_available);
494
495 WARN_ON((usermsr & MSR_VSX) && !((usermsr & MSR_FP) && (usermsr & MSR_VEC)));
496
497#ifdef CONFIG_PPC_FPU
498 if (usermsr & MSR_FP)
499 __giveup_fpu(tsk);
500#endif
501#ifdef CONFIG_ALTIVEC
502 if (usermsr & MSR_VEC)
503 __giveup_altivec(tsk);
504#endif
505#ifdef CONFIG_SPE
506 if (usermsr & MSR_SPE)
507 __giveup_spe(tsk);
508#endif
509
510 msr_check_and_clear(msr_all_available);
511}
512EXPORT_SYMBOL(giveup_all);
513
514/*
515 * The exception exit path calls restore_math() with interrupts hard disabled
516 * but the soft irq state not "reconciled". ftrace code that calls
517 * local_irq_save/restore causes warnings.
518 *
519 * Rather than complicate the exit path, just don't trace restore_math. This
520 * could be done by having ftrace entry code check for this un-reconciled
521 * condition where MSR[EE]=0 and PACA_IRQ_HARD_DIS is not set, and
522 * temporarily fix it up for the duration of the ftrace call.
523 */
524void notrace restore_math(struct pt_regs *regs)
525{
526 unsigned long msr;
527
528 if (!MSR_TM_ACTIVE(regs->msr) &&
529 !current->thread.load_fp && !loadvec(current->thread))
530 return;
531
532 msr = regs->msr;
533 msr_check_and_set(msr_all_available);
534
535 /*
536 * Only reload if the bit is not set in the user MSR, the bit BEING set
537 * indicates that the registers are hot
538 */
539 if ((!(msr & MSR_FP)) && restore_fp(current))
540 msr |= MSR_FP | current->thread.fpexc_mode;
541
542 if ((!(msr & MSR_VEC)) && restore_altivec(current))
543 msr |= MSR_VEC;
544
545 if ((msr & (MSR_FP | MSR_VEC)) == (MSR_FP | MSR_VEC) &&
546 restore_vsx(current)) {
547 msr |= MSR_VSX;
548 }
549
550 msr_check_and_clear(msr_all_available);
551
552 regs->msr = msr;
553}
554
555static void save_all(struct task_struct *tsk)
556{
557 unsigned long usermsr;
558
559 if (!tsk->thread.regs)
560 return;
561
562 usermsr = tsk->thread.regs->msr;
563
564 if ((usermsr & msr_all_available) == 0)
565 return;
566
567 msr_check_and_set(msr_all_available);
568
569 WARN_ON((usermsr & MSR_VSX) && !((usermsr & MSR_FP) && (usermsr & MSR_VEC)));
570
571 if (usermsr & MSR_FP)
572 save_fpu(tsk);
573
574 if (usermsr & MSR_VEC)
575 save_altivec(tsk);
576
577 if (usermsr & MSR_SPE)
578 __giveup_spe(tsk);
579
580 msr_check_and_clear(msr_all_available);
581 thread_pkey_regs_save(&tsk->thread);
582}
583
584void flush_all_to_thread(struct task_struct *tsk)
585{
586 if (tsk->thread.regs) {
587 preempt_disable();
588 BUG_ON(tsk != current);
589#ifdef CONFIG_SPE
590 if (tsk->thread.regs->msr & MSR_SPE)
591 tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
592#endif
593 save_all(tsk);
594
595 preempt_enable();
596 }
597}
598EXPORT_SYMBOL(flush_all_to_thread);
599
600#ifdef CONFIG_PPC_ADV_DEBUG_REGS
601void do_send_trap(struct pt_regs *regs, unsigned long address,
602 unsigned long error_code, int breakpt)
603{
604 current->thread.trap_nr = TRAP_HWBKPT;
605 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
606 11, SIGSEGV) == NOTIFY_STOP)
607 return;
608
609 /* Deliver the signal to userspace */
610 force_sig_ptrace_errno_trap(breakpt, /* breakpoint or watchpoint id */
611 (void __user *)address);
612}
613#else /* !CONFIG_PPC_ADV_DEBUG_REGS */
614void do_break (struct pt_regs *regs, unsigned long address,
615 unsigned long error_code)
616{
617 current->thread.trap_nr = TRAP_HWBKPT;
618 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
619 11, SIGSEGV) == NOTIFY_STOP)
620 return;
621
622 if (debugger_break_match(regs))
623 return;
624
625 /* Clear the breakpoint */
626 hw_breakpoint_disable();
627
628 /* Deliver the signal to userspace */
629 force_sig_fault(SIGTRAP, TRAP_HWBKPT, (void __user *)address);
630}
631#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
632
633static DEFINE_PER_CPU(struct arch_hw_breakpoint, current_brk);
634
635#ifdef CONFIG_PPC_ADV_DEBUG_REGS
636/*
637 * Set the debug registers back to their default "safe" values.
638 */
639static void set_debug_reg_defaults(struct thread_struct *thread)
640{
641 thread->debug.iac1 = thread->debug.iac2 = 0;
642#if CONFIG_PPC_ADV_DEBUG_IACS > 2
643 thread->debug.iac3 = thread->debug.iac4 = 0;
644#endif
645 thread->debug.dac1 = thread->debug.dac2 = 0;
646#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
647 thread->debug.dvc1 = thread->debug.dvc2 = 0;
648#endif
649 thread->debug.dbcr0 = 0;
650#ifdef CONFIG_BOOKE
651 /*
652 * Force User/Supervisor bits to b11 (user-only MSR[PR]=1)
653 */
654 thread->debug.dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US |
655 DBCR1_IAC3US | DBCR1_IAC4US;
656 /*
657 * Force Data Address Compare User/Supervisor bits to be User-only
658 * (0b11 MSR[PR]=1) and set all other bits in DBCR2 register to be 0.
659 */
660 thread->debug.dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
661#else
662 thread->debug.dbcr1 = 0;
663#endif
664}
665
666static void prime_debug_regs(struct debug_reg *debug)
667{
668 /*
669 * We could have inherited MSR_DE from userspace, since
670 * it doesn't get cleared on exception entry. Make sure
671 * MSR_DE is clear before we enable any debug events.
672 */
673 mtmsr(mfmsr() & ~MSR_DE);
674
675 mtspr(SPRN_IAC1, debug->iac1);
676 mtspr(SPRN_IAC2, debug->iac2);
677#if CONFIG_PPC_ADV_DEBUG_IACS > 2
678 mtspr(SPRN_IAC3, debug->iac3);
679 mtspr(SPRN_IAC4, debug->iac4);
680#endif
681 mtspr(SPRN_DAC1, debug->dac1);
682 mtspr(SPRN_DAC2, debug->dac2);
683#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
684 mtspr(SPRN_DVC1, debug->dvc1);
685 mtspr(SPRN_DVC2, debug->dvc2);
686#endif
687 mtspr(SPRN_DBCR0, debug->dbcr0);
688 mtspr(SPRN_DBCR1, debug->dbcr1);
689#ifdef CONFIG_BOOKE
690 mtspr(SPRN_DBCR2, debug->dbcr2);
691#endif
692}
693/*
694 * Unless neither the old or new thread are making use of the
695 * debug registers, set the debug registers from the values
696 * stored in the new thread.
697 */
698void switch_booke_debug_regs(struct debug_reg *new_debug)
699{
700 if ((current->thread.debug.dbcr0 & DBCR0_IDM)
701 || (new_debug->dbcr0 & DBCR0_IDM))
702 prime_debug_regs(new_debug);
703}
704EXPORT_SYMBOL_GPL(switch_booke_debug_regs);
705#else /* !CONFIG_PPC_ADV_DEBUG_REGS */
706#ifndef CONFIG_HAVE_HW_BREAKPOINT
707static void set_breakpoint(struct arch_hw_breakpoint *brk)
708{
709 preempt_disable();
710 __set_breakpoint(brk);
711 preempt_enable();
712}
713
714static void set_debug_reg_defaults(struct thread_struct *thread)
715{
716 thread->hw_brk.address = 0;
717 thread->hw_brk.type = 0;
718 if (ppc_breakpoint_available())
719 set_breakpoint(&thread->hw_brk);
720}
721#endif /* !CONFIG_HAVE_HW_BREAKPOINT */
722#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
723
724#ifdef CONFIG_PPC_ADV_DEBUG_REGS
725static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
726{
727 mtspr(SPRN_DAC1, dabr);
728#ifdef CONFIG_PPC_47x
729 isync();
730#endif
731 return 0;
732}
733#elif defined(CONFIG_PPC_BOOK3S)
734static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
735{
736 mtspr(SPRN_DABR, dabr);
737 if (cpu_has_feature(CPU_FTR_DABRX))
738 mtspr(SPRN_DABRX, dabrx);
739 return 0;
740}
741#elif defined(CONFIG_PPC_8xx)
742static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
743{
744 unsigned long addr = dabr & ~HW_BRK_TYPE_DABR;
745 unsigned long lctrl1 = 0x90000000; /* compare type: equal on E & F */
746 unsigned long lctrl2 = 0x8e000002; /* watchpoint 1 on cmp E | F */
747
748 if ((dabr & HW_BRK_TYPE_RDWR) == HW_BRK_TYPE_READ)
749 lctrl1 |= 0xa0000;
750 else if ((dabr & HW_BRK_TYPE_RDWR) == HW_BRK_TYPE_WRITE)
751 lctrl1 |= 0xf0000;
752 else if ((dabr & HW_BRK_TYPE_RDWR) == 0)
753 lctrl2 = 0;
754
755 mtspr(SPRN_LCTRL2, 0);
756 mtspr(SPRN_CMPE, addr);
757 mtspr(SPRN_CMPF, addr + 4);
758 mtspr(SPRN_LCTRL1, lctrl1);
759 mtspr(SPRN_LCTRL2, lctrl2);
760
761 return 0;
762}
763#else
764static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
765{
766 return -EINVAL;
767}
768#endif
769
770static inline int set_dabr(struct arch_hw_breakpoint *brk)
771{
772 unsigned long dabr, dabrx;
773
774 dabr = brk->address | (brk->type & HW_BRK_TYPE_DABR);
775 dabrx = ((brk->type >> 3) & 0x7);
776
777 if (ppc_md.set_dabr)
778 return ppc_md.set_dabr(dabr, dabrx);
779
780 return __set_dabr(dabr, dabrx);
781}
782
783void __set_breakpoint(struct arch_hw_breakpoint *brk)
784{
785 memcpy(this_cpu_ptr(¤t_brk), brk, sizeof(*brk));
786
787 if (dawr_enabled())
788 // Power8 or later
789 set_dawr(brk);
790 else if (!cpu_has_feature(CPU_FTR_ARCH_207S))
791 // Power7 or earlier
792 set_dabr(brk);
793 else
794 // Shouldn't happen due to higher level checks
795 WARN_ON_ONCE(1);
796}
797
798/* Check if we have DAWR or DABR hardware */
799bool ppc_breakpoint_available(void)
800{
801 if (dawr_enabled())
802 return true; /* POWER8 DAWR or POWER9 forced DAWR */
803 if (cpu_has_feature(CPU_FTR_ARCH_207S))
804 return false; /* POWER9 with DAWR disabled */
805 /* DABR: Everything but POWER8 and POWER9 */
806 return true;
807}
808EXPORT_SYMBOL_GPL(ppc_breakpoint_available);
809
810static inline bool hw_brk_match(struct arch_hw_breakpoint *a,
811 struct arch_hw_breakpoint *b)
812{
813 if (a->address != b->address)
814 return false;
815 if (a->type != b->type)
816 return false;
817 if (a->len != b->len)
818 return false;
819 return true;
820}
821
822#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
823
824static inline bool tm_enabled(struct task_struct *tsk)
825{
826 return tsk && tsk->thread.regs && (tsk->thread.regs->msr & MSR_TM);
827}
828
829static void tm_reclaim_thread(struct thread_struct *thr, uint8_t cause)
830{
831 /*
832 * Use the current MSR TM suspended bit to track if we have
833 * checkpointed state outstanding.
834 * On signal delivery, we'd normally reclaim the checkpointed
835 * state to obtain stack pointer (see:get_tm_stackpointer()).
836 * This will then directly return to userspace without going
837 * through __switch_to(). However, if the stack frame is bad,
838 * we need to exit this thread which calls __switch_to() which
839 * will again attempt to reclaim the already saved tm state.
840 * Hence we need to check that we've not already reclaimed
841 * this state.
842 * We do this using the current MSR, rather tracking it in
843 * some specific thread_struct bit, as it has the additional
844 * benefit of checking for a potential TM bad thing exception.
845 */
846 if (!MSR_TM_SUSPENDED(mfmsr()))
847 return;
848
849 giveup_all(container_of(thr, struct task_struct, thread));
850
851 tm_reclaim(thr, cause);
852
853 /*
854 * If we are in a transaction and FP is off then we can't have
855 * used FP inside that transaction. Hence the checkpointed
856 * state is the same as the live state. We need to copy the
857 * live state to the checkpointed state so that when the
858 * transaction is restored, the checkpointed state is correct
859 * and the aborted transaction sees the correct state. We use
860 * ckpt_regs.msr here as that's what tm_reclaim will use to
861 * determine if it's going to write the checkpointed state or
862 * not. So either this will write the checkpointed registers,
863 * or reclaim will. Similarly for VMX.
864 */
865 if ((thr->ckpt_regs.msr & MSR_FP) == 0)
866 memcpy(&thr->ckfp_state, &thr->fp_state,
867 sizeof(struct thread_fp_state));
868 if ((thr->ckpt_regs.msr & MSR_VEC) == 0)
869 memcpy(&thr->ckvr_state, &thr->vr_state,
870 sizeof(struct thread_vr_state));
871}
872
873void tm_reclaim_current(uint8_t cause)
874{
875 tm_enable();
876 tm_reclaim_thread(¤t->thread, cause);
877}
878
879static inline void tm_reclaim_task(struct task_struct *tsk)
880{
881 /* We have to work out if we're switching from/to a task that's in the
882 * middle of a transaction.
883 *
884 * In switching we need to maintain a 2nd register state as
885 * oldtask->thread.ckpt_regs. We tm_reclaim(oldproc); this saves the
886 * checkpointed (tbegin) state in ckpt_regs, ckfp_state and
887 * ckvr_state
888 *
889 * We also context switch (save) TFHAR/TEXASR/TFIAR in here.
890 */
891 struct thread_struct *thr = &tsk->thread;
892
893 if (!thr->regs)
894 return;
895
896 if (!MSR_TM_ACTIVE(thr->regs->msr))
897 goto out_and_saveregs;
898
899 WARN_ON(tm_suspend_disabled);
900
901 TM_DEBUG("--- tm_reclaim on pid %d (NIP=%lx, "
902 "ccr=%lx, msr=%lx, trap=%lx)\n",
903 tsk->pid, thr->regs->nip,
904 thr->regs->ccr, thr->regs->msr,
905 thr->regs->trap);
906
907 tm_reclaim_thread(thr, TM_CAUSE_RESCHED);
908
909 TM_DEBUG("--- tm_reclaim on pid %d complete\n",
910 tsk->pid);
911
912out_and_saveregs:
913 /* Always save the regs here, even if a transaction's not active.
914 * This context-switches a thread's TM info SPRs. We do it here to
915 * be consistent with the restore path (in recheckpoint) which
916 * cannot happen later in _switch().
917 */
918 tm_save_sprs(thr);
919}
920
921extern void __tm_recheckpoint(struct thread_struct *thread);
922
923void tm_recheckpoint(struct thread_struct *thread)
924{
925 unsigned long flags;
926
927 if (!(thread->regs->msr & MSR_TM))
928 return;
929
930 /* We really can't be interrupted here as the TEXASR registers can't
931 * change and later in the trecheckpoint code, we have a userspace R1.
932 * So let's hard disable over this region.
933 */
934 local_irq_save(flags);
935 hard_irq_disable();
936
937 /* The TM SPRs are restored here, so that TEXASR.FS can be set
938 * before the trecheckpoint and no explosion occurs.
939 */
940 tm_restore_sprs(thread);
941
942 __tm_recheckpoint(thread);
943
944 local_irq_restore(flags);
945}
946
947static inline void tm_recheckpoint_new_task(struct task_struct *new)
948{
949 if (!cpu_has_feature(CPU_FTR_TM))
950 return;
951
952 /* Recheckpoint the registers of the thread we're about to switch to.
953 *
954 * If the task was using FP, we non-lazily reload both the original and
955 * the speculative FP register states. This is because the kernel
956 * doesn't see if/when a TM rollback occurs, so if we take an FP
957 * unavailable later, we are unable to determine which set of FP regs
958 * need to be restored.
959 */
960 if (!tm_enabled(new))
961 return;
962
963 if (!MSR_TM_ACTIVE(new->thread.regs->msr)){
964 tm_restore_sprs(&new->thread);
965 return;
966 }
967 /* Recheckpoint to restore original checkpointed register state. */
968 TM_DEBUG("*** tm_recheckpoint of pid %d (new->msr 0x%lx)\n",
969 new->pid, new->thread.regs->msr);
970
971 tm_recheckpoint(&new->thread);
972
973 /*
974 * The checkpointed state has been restored but the live state has
975 * not, ensure all the math functionality is turned off to trigger
976 * restore_math() to reload.
977 */
978 new->thread.regs->msr &= ~(MSR_FP | MSR_VEC | MSR_VSX);
979
980 TM_DEBUG("*** tm_recheckpoint of pid %d complete "
981 "(kernel msr 0x%lx)\n",
982 new->pid, mfmsr());
983}
984
985static inline void __switch_to_tm(struct task_struct *prev,
986 struct task_struct *new)
987{
988 if (cpu_has_feature(CPU_FTR_TM)) {
989 if (tm_enabled(prev) || tm_enabled(new))
990 tm_enable();
991
992 if (tm_enabled(prev)) {
993 prev->thread.load_tm++;
994 tm_reclaim_task(prev);
995 if (!MSR_TM_ACTIVE(prev->thread.regs->msr) && prev->thread.load_tm == 0)
996 prev->thread.regs->msr &= ~MSR_TM;
997 }
998
999 tm_recheckpoint_new_task(new);
1000 }
1001}
1002
1003/*
1004 * This is called if we are on the way out to userspace and the
1005 * TIF_RESTORE_TM flag is set. It checks if we need to reload
1006 * FP and/or vector state and does so if necessary.
1007 * If userspace is inside a transaction (whether active or
1008 * suspended) and FP/VMX/VSX instructions have ever been enabled
1009 * inside that transaction, then we have to keep them enabled
1010 * and keep the FP/VMX/VSX state loaded while ever the transaction
1011 * continues. The reason is that if we didn't, and subsequently
1012 * got a FP/VMX/VSX unavailable interrupt inside a transaction,
1013 * we don't know whether it's the same transaction, and thus we
1014 * don't know which of the checkpointed state and the transactional
1015 * state to use.
1016 */
1017void restore_tm_state(struct pt_regs *regs)
1018{
1019 unsigned long msr_diff;
1020
1021 /*
1022 * This is the only moment we should clear TIF_RESTORE_TM as
1023 * it is here that ckpt_regs.msr and pt_regs.msr become the same
1024 * again, anything else could lead to an incorrect ckpt_msr being
1025 * saved and therefore incorrect signal contexts.
1026 */
1027 clear_thread_flag(TIF_RESTORE_TM);
1028 if (!MSR_TM_ACTIVE(regs->msr))
1029 return;
1030
1031 msr_diff = current->thread.ckpt_regs.msr & ~regs->msr;
1032 msr_diff &= MSR_FP | MSR_VEC | MSR_VSX;
1033
1034 /* Ensure that restore_math() will restore */
1035 if (msr_diff & MSR_FP)
1036 current->thread.load_fp = 1;
1037#ifdef CONFIG_ALTIVEC
1038 if (cpu_has_feature(CPU_FTR_ALTIVEC) && msr_diff & MSR_VEC)
1039 current->thread.load_vec = 1;
1040#endif
1041 restore_math(regs);
1042
1043 regs->msr |= msr_diff;
1044}
1045
1046#else
1047#define tm_recheckpoint_new_task(new)
1048#define __switch_to_tm(prev, new)
1049#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
1050
1051static inline void save_sprs(struct thread_struct *t)
1052{
1053#ifdef CONFIG_ALTIVEC
1054 if (cpu_has_feature(CPU_FTR_ALTIVEC))
1055 t->vrsave = mfspr(SPRN_VRSAVE);
1056#endif
1057#ifdef CONFIG_PPC_BOOK3S_64
1058 if (cpu_has_feature(CPU_FTR_DSCR))
1059 t->dscr = mfspr(SPRN_DSCR);
1060
1061 if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
1062 t->bescr = mfspr(SPRN_BESCR);
1063 t->ebbhr = mfspr(SPRN_EBBHR);
1064 t->ebbrr = mfspr(SPRN_EBBRR);
1065
1066 t->fscr = mfspr(SPRN_FSCR);
1067
1068 /*
1069 * Note that the TAR is not available for use in the kernel.
1070 * (To provide this, the TAR should be backed up/restored on
1071 * exception entry/exit instead, and be in pt_regs. FIXME,
1072 * this should be in pt_regs anyway (for debug).)
1073 */
1074 t->tar = mfspr(SPRN_TAR);
1075 }
1076#endif
1077
1078 thread_pkey_regs_save(t);
1079}
1080
1081static inline void restore_sprs(struct thread_struct *old_thread,
1082 struct thread_struct *new_thread)
1083{
1084#ifdef CONFIG_ALTIVEC
1085 if (cpu_has_feature(CPU_FTR_ALTIVEC) &&
1086 old_thread->vrsave != new_thread->vrsave)
1087 mtspr(SPRN_VRSAVE, new_thread->vrsave);
1088#endif
1089#ifdef CONFIG_PPC_BOOK3S_64
1090 if (cpu_has_feature(CPU_FTR_DSCR)) {
1091 u64 dscr = get_paca()->dscr_default;
1092 if (new_thread->dscr_inherit)
1093 dscr = new_thread->dscr;
1094
1095 if (old_thread->dscr != dscr)
1096 mtspr(SPRN_DSCR, dscr);
1097 }
1098
1099 if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
1100 if (old_thread->bescr != new_thread->bescr)
1101 mtspr(SPRN_BESCR, new_thread->bescr);
1102 if (old_thread->ebbhr != new_thread->ebbhr)
1103 mtspr(SPRN_EBBHR, new_thread->ebbhr);
1104 if (old_thread->ebbrr != new_thread->ebbrr)
1105 mtspr(SPRN_EBBRR, new_thread->ebbrr);
1106
1107 if (old_thread->fscr != new_thread->fscr)
1108 mtspr(SPRN_FSCR, new_thread->fscr);
1109
1110 if (old_thread->tar != new_thread->tar)
1111 mtspr(SPRN_TAR, new_thread->tar);
1112 }
1113
1114 if (cpu_has_feature(CPU_FTR_P9_TIDR) &&
1115 old_thread->tidr != new_thread->tidr)
1116 mtspr(SPRN_TIDR, new_thread->tidr);
1117#endif
1118
1119 thread_pkey_regs_restore(new_thread, old_thread);
1120}
1121
1122struct task_struct *__switch_to(struct task_struct *prev,
1123 struct task_struct *new)
1124{
1125 struct thread_struct *new_thread, *old_thread;
1126 struct task_struct *last;
1127#ifdef CONFIG_PPC_BOOK3S_64
1128 struct ppc64_tlb_batch *batch;
1129#endif
1130
1131 new_thread = &new->thread;
1132 old_thread = ¤t->thread;
1133
1134 WARN_ON(!irqs_disabled());
1135
1136#ifdef CONFIG_PPC_BOOK3S_64
1137 batch = this_cpu_ptr(&ppc64_tlb_batch);
1138 if (batch->active) {
1139 current_thread_info()->local_flags |= _TLF_LAZY_MMU;
1140 if (batch->index)
1141 __flush_tlb_pending(batch);
1142 batch->active = 0;
1143 }
1144#endif /* CONFIG_PPC_BOOK3S_64 */
1145
1146#ifdef CONFIG_PPC_ADV_DEBUG_REGS
1147 switch_booke_debug_regs(&new->thread.debug);
1148#else
1149/*
1150 * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would
1151 * schedule DABR
1152 */
1153#ifndef CONFIG_HAVE_HW_BREAKPOINT
1154 if (unlikely(!hw_brk_match(this_cpu_ptr(¤t_brk), &new->thread.hw_brk)))
1155 __set_breakpoint(&new->thread.hw_brk);
1156#endif /* CONFIG_HAVE_HW_BREAKPOINT */
1157#endif
1158
1159 /*
1160 * We need to save SPRs before treclaim/trecheckpoint as these will
1161 * change a number of them.
1162 */
1163 save_sprs(&prev->thread);
1164
1165 /* Save FPU, Altivec, VSX and SPE state */
1166 giveup_all(prev);
1167
1168 __switch_to_tm(prev, new);
1169
1170 if (!radix_enabled()) {
1171 /*
1172 * We can't take a PMU exception inside _switch() since there
1173 * is a window where the kernel stack SLB and the kernel stack
1174 * are out of sync. Hard disable here.
1175 */
1176 hard_irq_disable();
1177 }
1178
1179 /*
1180 * Call restore_sprs() before calling _switch(). If we move it after
1181 * _switch() then we miss out on calling it for new tasks. The reason
1182 * for this is we manually create a stack frame for new tasks that
1183 * directly returns through ret_from_fork() or
1184 * ret_from_kernel_thread(). See copy_thread() for details.
1185 */
1186 restore_sprs(old_thread, new_thread);
1187
1188 last = _switch(old_thread, new_thread);
1189
1190#ifdef CONFIG_PPC_BOOK3S_64
1191 if (current_thread_info()->local_flags & _TLF_LAZY_MMU) {
1192 current_thread_info()->local_flags &= ~_TLF_LAZY_MMU;
1193 batch = this_cpu_ptr(&ppc64_tlb_batch);
1194 batch->active = 1;
1195 }
1196
1197 if (current->thread.regs) {
1198 restore_math(current->thread.regs);
1199
1200 /*
1201 * The copy-paste buffer can only store into foreign real
1202 * addresses, so unprivileged processes can not see the
1203 * data or use it in any way unless they have foreign real
1204 * mappings. If the new process has the foreign real address
1205 * mappings, we must issue a cp_abort to clear any state and
1206 * prevent snooping, corruption or a covert channel.
1207 */
1208 if (current->thread.used_vas)
1209 asm volatile(PPC_CP_ABORT);
1210 }
1211#endif /* CONFIG_PPC_BOOK3S_64 */
1212
1213 return last;
1214}
1215
1216#define NR_INSN_TO_PRINT 16
1217
1218static void show_instructions(struct pt_regs *regs)
1219{
1220 int i;
1221 unsigned long pc = regs->nip - (NR_INSN_TO_PRINT * 3 / 4 * sizeof(int));
1222
1223 printk("Instruction dump:");
1224
1225 for (i = 0; i < NR_INSN_TO_PRINT; i++) {
1226 int instr;
1227
1228 if (!(i % 8))
1229 pr_cont("\n");
1230
1231#if !defined(CONFIG_BOOKE)
1232 /* If executing with the IMMU off, adjust pc rather
1233 * than print XXXXXXXX.
1234 */
1235 if (!(regs->msr & MSR_IR))
1236 pc = (unsigned long)phys_to_virt(pc);
1237#endif
1238
1239 if (!__kernel_text_address(pc) ||
1240 probe_kernel_address((const void *)pc, instr)) {
1241 pr_cont("XXXXXXXX ");
1242 } else {
1243 if (regs->nip == pc)
1244 pr_cont("<%08x> ", instr);
1245 else
1246 pr_cont("%08x ", instr);
1247 }
1248
1249 pc += sizeof(int);
1250 }
1251
1252 pr_cont("\n");
1253}
1254
1255void show_user_instructions(struct pt_regs *regs)
1256{
1257 unsigned long pc;
1258 int n = NR_INSN_TO_PRINT;
1259 struct seq_buf s;
1260 char buf[96]; /* enough for 8 times 9 + 2 chars */
1261
1262 pc = regs->nip - (NR_INSN_TO_PRINT * 3 / 4 * sizeof(int));
1263
1264 /*
1265 * Make sure the NIP points at userspace, not kernel text/data or
1266 * elsewhere.
1267 */
1268 if (!__access_ok(pc, NR_INSN_TO_PRINT * sizeof(int), USER_DS)) {
1269 pr_info("%s[%d]: Bad NIP, not dumping instructions.\n",
1270 current->comm, current->pid);
1271 return;
1272 }
1273
1274 seq_buf_init(&s, buf, sizeof(buf));
1275
1276 while (n) {
1277 int i;
1278
1279 seq_buf_clear(&s);
1280
1281 for (i = 0; i < 8 && n; i++, n--, pc += sizeof(int)) {
1282 int instr;
1283
1284 if (probe_kernel_address((const void *)pc, instr)) {
1285 seq_buf_printf(&s, "XXXXXXXX ");
1286 continue;
1287 }
1288 seq_buf_printf(&s, regs->nip == pc ? "<%08x> " : "%08x ", instr);
1289 }
1290
1291 if (!seq_buf_has_overflowed(&s))
1292 pr_info("%s[%d]: code: %s\n", current->comm,
1293 current->pid, s.buffer);
1294 }
1295}
1296
1297struct regbit {
1298 unsigned long bit;
1299 const char *name;
1300};
1301
1302static struct regbit msr_bits[] = {
1303#if defined(CONFIG_PPC64) && !defined(CONFIG_BOOKE)
1304 {MSR_SF, "SF"},
1305 {MSR_HV, "HV"},
1306#endif
1307 {MSR_VEC, "VEC"},
1308 {MSR_VSX, "VSX"},
1309#ifdef CONFIG_BOOKE
1310 {MSR_CE, "CE"},
1311#endif
1312 {MSR_EE, "EE"},
1313 {MSR_PR, "PR"},
1314 {MSR_FP, "FP"},
1315 {MSR_ME, "ME"},
1316#ifdef CONFIG_BOOKE
1317 {MSR_DE, "DE"},
1318#else
1319 {MSR_SE, "SE"},
1320 {MSR_BE, "BE"},
1321#endif
1322 {MSR_IR, "IR"},
1323 {MSR_DR, "DR"},
1324 {MSR_PMM, "PMM"},
1325#ifndef CONFIG_BOOKE
1326 {MSR_RI, "RI"},
1327 {MSR_LE, "LE"},
1328#endif
1329 {0, NULL}
1330};
1331
1332static void print_bits(unsigned long val, struct regbit *bits, const char *sep)
1333{
1334 const char *s = "";
1335
1336 for (; bits->bit; ++bits)
1337 if (val & bits->bit) {
1338 pr_cont("%s%s", s, bits->name);
1339 s = sep;
1340 }
1341}
1342
1343#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1344static struct regbit msr_tm_bits[] = {
1345 {MSR_TS_T, "T"},
1346 {MSR_TS_S, "S"},
1347 {MSR_TM, "E"},
1348 {0, NULL}
1349};
1350
1351static void print_tm_bits(unsigned long val)
1352{
1353/*
1354 * This only prints something if at least one of the TM bit is set.
1355 * Inside the TM[], the output means:
1356 * E: Enabled (bit 32)
1357 * S: Suspended (bit 33)
1358 * T: Transactional (bit 34)
1359 */
1360 if (val & (MSR_TM | MSR_TS_S | MSR_TS_T)) {
1361 pr_cont(",TM[");
1362 print_bits(val, msr_tm_bits, "");
1363 pr_cont("]");
1364 }
1365}
1366#else
1367static void print_tm_bits(unsigned long val) {}
1368#endif
1369
1370static void print_msr_bits(unsigned long val)
1371{
1372 pr_cont("<");
1373 print_bits(val, msr_bits, ",");
1374 print_tm_bits(val);
1375 pr_cont(">");
1376}
1377
1378#ifdef CONFIG_PPC64
1379#define REG "%016lx"
1380#define REGS_PER_LINE 4
1381#define LAST_VOLATILE 13
1382#else
1383#define REG "%08lx"
1384#define REGS_PER_LINE 8
1385#define LAST_VOLATILE 12
1386#endif
1387
1388void show_regs(struct pt_regs * regs)
1389{
1390 int i, trap;
1391
1392 show_regs_print_info(KERN_DEFAULT);
1393
1394 printk("NIP: "REG" LR: "REG" CTR: "REG"\n",
1395 regs->nip, regs->link, regs->ctr);
1396 printk("REGS: %px TRAP: %04lx %s (%s)\n",
1397 regs, regs->trap, print_tainted(), init_utsname()->release);
1398 printk("MSR: "REG" ", regs->msr);
1399 print_msr_bits(regs->msr);
1400 pr_cont(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer);
1401 trap = TRAP(regs);
1402 if ((TRAP(regs) != 0xc00) && cpu_has_feature(CPU_FTR_CFAR))
1403 pr_cont("CFAR: "REG" ", regs->orig_gpr3);
1404 if (trap == 0x200 || trap == 0x300 || trap == 0x600)
1405#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
1406 pr_cont("DEAR: "REG" ESR: "REG" ", regs->dar, regs->dsisr);
1407#else
1408 pr_cont("DAR: "REG" DSISR: %08lx ", regs->dar, regs->dsisr);
1409#endif
1410#ifdef CONFIG_PPC64
1411 pr_cont("IRQMASK: %lx ", regs->softe);
1412#endif
1413#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1414 if (MSR_TM_ACTIVE(regs->msr))
1415 pr_cont("\nPACATMSCRATCH: %016llx ", get_paca()->tm_scratch);
1416#endif
1417
1418 for (i = 0; i < 32; i++) {
1419 if ((i % REGS_PER_LINE) == 0)
1420 pr_cont("\nGPR%02d: ", i);
1421 pr_cont(REG " ", regs->gpr[i]);
1422 if (i == LAST_VOLATILE && !FULL_REGS(regs))
1423 break;
1424 }
1425 pr_cont("\n");
1426#ifdef CONFIG_KALLSYMS
1427 /*
1428 * Lookup NIP late so we have the best change of getting the
1429 * above info out without failing
1430 */
1431 printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
1432 printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
1433#endif
1434 show_stack(current, (unsigned long *) regs->gpr[1]);
1435 if (!user_mode(regs))
1436 show_instructions(regs);
1437}
1438
1439void flush_thread(void)
1440{
1441#ifdef CONFIG_HAVE_HW_BREAKPOINT
1442 flush_ptrace_hw_breakpoint(current);
1443#else /* CONFIG_HAVE_HW_BREAKPOINT */
1444 set_debug_reg_defaults(¤t->thread);
1445#endif /* CONFIG_HAVE_HW_BREAKPOINT */
1446}
1447
1448#ifdef CONFIG_PPC_BOOK3S_64
1449void arch_setup_new_exec(void)
1450{
1451 if (radix_enabled())
1452 return;
1453 hash__setup_new_exec();
1454}
1455#endif
1456
1457int set_thread_uses_vas(void)
1458{
1459#ifdef CONFIG_PPC_BOOK3S_64
1460 if (!cpu_has_feature(CPU_FTR_ARCH_300))
1461 return -EINVAL;
1462
1463 current->thread.used_vas = 1;
1464
1465 /*
1466 * Even a process that has no foreign real address mapping can use
1467 * an unpaired COPY instruction (to no real effect). Issue CP_ABORT
1468 * to clear any pending COPY and prevent a covert channel.
1469 *
1470 * __switch_to() will issue CP_ABORT on future context switches.
1471 */
1472 asm volatile(PPC_CP_ABORT);
1473
1474#endif /* CONFIG_PPC_BOOK3S_64 */
1475 return 0;
1476}
1477
1478#ifdef CONFIG_PPC64
1479/**
1480 * Assign a TIDR (thread ID) for task @t and set it in the thread
1481 * structure. For now, we only support setting TIDR for 'current' task.
1482 *
1483 * Since the TID value is a truncated form of it PID, it is possible
1484 * (but unlikely) for 2 threads to have the same TID. In the unlikely event
1485 * that 2 threads share the same TID and are waiting, one of the following
1486 * cases will happen:
1487 *
1488 * 1. The correct thread is running, the wrong thread is not
1489 * In this situation, the correct thread is woken and proceeds to pass it's
1490 * condition check.
1491 *
1492 * 2. Neither threads are running
1493 * In this situation, neither thread will be woken. When scheduled, the waiting
1494 * threads will execute either a wait, which will return immediately, followed
1495 * by a condition check, which will pass for the correct thread and fail
1496 * for the wrong thread, or they will execute the condition check immediately.
1497 *
1498 * 3. The wrong thread is running, the correct thread is not
1499 * The wrong thread will be woken, but will fail it's condition check and
1500 * re-execute wait. The correct thread, when scheduled, will execute either
1501 * it's condition check (which will pass), or wait, which returns immediately
1502 * when called the first time after the thread is scheduled, followed by it's
1503 * condition check (which will pass).
1504 *
1505 * 4. Both threads are running
1506 * Both threads will be woken. The wrong thread will fail it's condition check
1507 * and execute another wait, while the correct thread will pass it's condition
1508 * check.
1509 *
1510 * @t: the task to set the thread ID for
1511 */
1512int set_thread_tidr(struct task_struct *t)
1513{
1514 if (!cpu_has_feature(CPU_FTR_P9_TIDR))
1515 return -EINVAL;
1516
1517 if (t != current)
1518 return -EINVAL;
1519
1520 if (t->thread.tidr)
1521 return 0;
1522
1523 t->thread.tidr = (u16)task_pid_nr(t);
1524 mtspr(SPRN_TIDR, t->thread.tidr);
1525
1526 return 0;
1527}
1528EXPORT_SYMBOL_GPL(set_thread_tidr);
1529
1530#endif /* CONFIG_PPC64 */
1531
1532void
1533release_thread(struct task_struct *t)
1534{
1535}
1536
1537/*
1538 * this gets called so that we can store coprocessor state into memory and
1539 * copy the current task into the new thread.
1540 */
1541int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
1542{
1543 flush_all_to_thread(src);
1544 /*
1545 * Flush TM state out so we can copy it. __switch_to_tm() does this
1546 * flush but it removes the checkpointed state from the current CPU and
1547 * transitions the CPU out of TM mode. Hence we need to call
1548 * tm_recheckpoint_new_task() (on the same task) to restore the
1549 * checkpointed state back and the TM mode.
1550 *
1551 * Can't pass dst because it isn't ready. Doesn't matter, passing
1552 * dst is only important for __switch_to()
1553 */
1554 __switch_to_tm(src, src);
1555
1556 *dst = *src;
1557
1558 clear_task_ebb(dst);
1559
1560 return 0;
1561}
1562
1563static void setup_ksp_vsid(struct task_struct *p, unsigned long sp)
1564{
1565#ifdef CONFIG_PPC_BOOK3S_64
1566 unsigned long sp_vsid;
1567 unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
1568
1569 if (radix_enabled())
1570 return;
1571
1572 if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
1573 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T)
1574 << SLB_VSID_SHIFT_1T;
1575 else
1576 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_256M)
1577 << SLB_VSID_SHIFT;
1578 sp_vsid |= SLB_VSID_KERNEL | llp;
1579 p->thread.ksp_vsid = sp_vsid;
1580#endif
1581}
1582
1583/*
1584 * Copy a thread..
1585 */
1586
1587/*
1588 * Copy architecture-specific thread state
1589 */
1590int copy_thread_tls(unsigned long clone_flags, unsigned long usp,
1591 unsigned long kthread_arg, struct task_struct *p,
1592 unsigned long tls)
1593{
1594 struct pt_regs *childregs, *kregs;
1595 extern void ret_from_fork(void);
1596 extern void ret_from_kernel_thread(void);
1597 void (*f)(void);
1598 unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
1599 struct thread_info *ti = task_thread_info(p);
1600
1601 klp_init_thread_info(p);
1602
1603 /* Copy registers */
1604 sp -= sizeof(struct pt_regs);
1605 childregs = (struct pt_regs *) sp;
1606 if (unlikely(p->flags & PF_KTHREAD)) {
1607 /* kernel thread */
1608 memset(childregs, 0, sizeof(struct pt_regs));
1609 childregs->gpr[1] = sp + sizeof(struct pt_regs);
1610 /* function */
1611 if (usp)
1612 childregs->gpr[14] = ppc_function_entry((void *)usp);
1613#ifdef CONFIG_PPC64
1614 clear_tsk_thread_flag(p, TIF_32BIT);
1615 childregs->softe = IRQS_ENABLED;
1616#endif
1617 childregs->gpr[15] = kthread_arg;
1618 p->thread.regs = NULL; /* no user register state */
1619 ti->flags |= _TIF_RESTOREALL;
1620 f = ret_from_kernel_thread;
1621 } else {
1622 /* user thread */
1623 struct pt_regs *regs = current_pt_regs();
1624 CHECK_FULL_REGS(regs);
1625 *childregs = *regs;
1626 if (usp)
1627 childregs->gpr[1] = usp;
1628 p->thread.regs = childregs;
1629 childregs->gpr[3] = 0; /* Result from fork() */
1630 if (clone_flags & CLONE_SETTLS) {
1631#ifdef CONFIG_PPC64
1632 if (!is_32bit_task())
1633 childregs->gpr[13] = tls;
1634 else
1635#endif
1636 childregs->gpr[2] = tls;
1637 }
1638
1639 f = ret_from_fork;
1640 }
1641 childregs->msr &= ~(MSR_FP|MSR_VEC|MSR_VSX);
1642 sp -= STACK_FRAME_OVERHEAD;
1643
1644 /*
1645 * The way this works is that at some point in the future
1646 * some task will call _switch to switch to the new task.
1647 * That will pop off the stack frame created below and start
1648 * the new task running at ret_from_fork. The new task will
1649 * do some house keeping and then return from the fork or clone
1650 * system call, using the stack frame created above.
1651 */
1652 ((unsigned long *)sp)[0] = 0;
1653 sp -= sizeof(struct pt_regs);
1654 kregs = (struct pt_regs *) sp;
1655 sp -= STACK_FRAME_OVERHEAD;
1656 p->thread.ksp = sp;
1657#ifdef CONFIG_PPC32
1658 p->thread.ksp_limit = (unsigned long)end_of_stack(p);
1659#endif
1660#ifdef CONFIG_HAVE_HW_BREAKPOINT
1661 p->thread.ptrace_bps[0] = NULL;
1662#endif
1663
1664 p->thread.fp_save_area = NULL;
1665#ifdef CONFIG_ALTIVEC
1666 p->thread.vr_save_area = NULL;
1667#endif
1668
1669 setup_ksp_vsid(p, sp);
1670
1671#ifdef CONFIG_PPC64
1672 if (cpu_has_feature(CPU_FTR_DSCR)) {
1673 p->thread.dscr_inherit = current->thread.dscr_inherit;
1674 p->thread.dscr = mfspr(SPRN_DSCR);
1675 }
1676 if (cpu_has_feature(CPU_FTR_HAS_PPR))
1677 childregs->ppr = DEFAULT_PPR;
1678
1679 p->thread.tidr = 0;
1680#endif
1681 kregs->nip = ppc_function_entry(f);
1682 return 0;
1683}
1684
1685void preload_new_slb_context(unsigned long start, unsigned long sp);
1686
1687/*
1688 * Set up a thread for executing a new program
1689 */
1690void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
1691{
1692#ifdef CONFIG_PPC64
1693 unsigned long load_addr = regs->gpr[2]; /* saved by ELF_PLAT_INIT */
1694
1695#ifdef CONFIG_PPC_BOOK3S_64
1696 if (!radix_enabled())
1697 preload_new_slb_context(start, sp);
1698#endif
1699#endif
1700
1701 /*
1702 * If we exec out of a kernel thread then thread.regs will not be
1703 * set. Do it now.
1704 */
1705 if (!current->thread.regs) {
1706 struct pt_regs *regs = task_stack_page(current) + THREAD_SIZE;
1707 current->thread.regs = regs - 1;
1708 }
1709
1710#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1711 /*
1712 * Clear any transactional state, we're exec()ing. The cause is
1713 * not important as there will never be a recheckpoint so it's not
1714 * user visible.
1715 */
1716 if (MSR_TM_SUSPENDED(mfmsr()))
1717 tm_reclaim_current(0);
1718#endif
1719
1720 memset(regs->gpr, 0, sizeof(regs->gpr));
1721 regs->ctr = 0;
1722 regs->link = 0;
1723 regs->xer = 0;
1724 regs->ccr = 0;
1725 regs->gpr[1] = sp;
1726
1727 /*
1728 * We have just cleared all the nonvolatile GPRs, so make
1729 * FULL_REGS(regs) return true. This is necessary to allow
1730 * ptrace to examine the thread immediately after exec.
1731 */
1732 regs->trap &= ~1UL;
1733
1734#ifdef CONFIG_PPC32
1735 regs->mq = 0;
1736 regs->nip = start;
1737 regs->msr = MSR_USER;
1738#else
1739 if (!is_32bit_task()) {
1740 unsigned long entry;
1741
1742 if (is_elf2_task()) {
1743 /* Look ma, no function descriptors! */
1744 entry = start;
1745
1746 /*
1747 * Ulrich says:
1748 * The latest iteration of the ABI requires that when
1749 * calling a function (at its global entry point),
1750 * the caller must ensure r12 holds the entry point
1751 * address (so that the function can quickly
1752 * establish addressability).
1753 */
1754 regs->gpr[12] = start;
1755 /* Make sure that's restored on entry to userspace. */
1756 set_thread_flag(TIF_RESTOREALL);
1757 } else {
1758 unsigned long toc;
1759
1760 /* start is a relocated pointer to the function
1761 * descriptor for the elf _start routine. The first
1762 * entry in the function descriptor is the entry
1763 * address of _start and the second entry is the TOC
1764 * value we need to use.
1765 */
1766 __get_user(entry, (unsigned long __user *)start);
1767 __get_user(toc, (unsigned long __user *)start+1);
1768
1769 /* Check whether the e_entry function descriptor entries
1770 * need to be relocated before we can use them.
1771 */
1772 if (load_addr != 0) {
1773 entry += load_addr;
1774 toc += load_addr;
1775 }
1776 regs->gpr[2] = toc;
1777 }
1778 regs->nip = entry;
1779 regs->msr = MSR_USER64;
1780 } else {
1781 regs->nip = start;
1782 regs->gpr[2] = 0;
1783 regs->msr = MSR_USER32;
1784 }
1785#endif
1786#ifdef CONFIG_VSX
1787 current->thread.used_vsr = 0;
1788#endif
1789 current->thread.load_slb = 0;
1790 current->thread.load_fp = 0;
1791 memset(¤t->thread.fp_state, 0, sizeof(current->thread.fp_state));
1792 current->thread.fp_save_area = NULL;
1793#ifdef CONFIG_ALTIVEC
1794 memset(¤t->thread.vr_state, 0, sizeof(current->thread.vr_state));
1795 current->thread.vr_state.vscr.u[3] = 0x00010000; /* Java mode disabled */
1796 current->thread.vr_save_area = NULL;
1797 current->thread.vrsave = 0;
1798 current->thread.used_vr = 0;
1799 current->thread.load_vec = 0;
1800#endif /* CONFIG_ALTIVEC */
1801#ifdef CONFIG_SPE
1802 memset(current->thread.evr, 0, sizeof(current->thread.evr));
1803 current->thread.acc = 0;
1804 current->thread.spefscr = 0;
1805 current->thread.used_spe = 0;
1806#endif /* CONFIG_SPE */
1807#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1808 current->thread.tm_tfhar = 0;
1809 current->thread.tm_texasr = 0;
1810 current->thread.tm_tfiar = 0;
1811 current->thread.load_tm = 0;
1812#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
1813
1814 thread_pkey_regs_init(¤t->thread);
1815}
1816EXPORT_SYMBOL(start_thread);
1817
1818#define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
1819 | PR_FP_EXC_RES | PR_FP_EXC_INV)
1820
1821int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
1822{
1823 struct pt_regs *regs = tsk->thread.regs;
1824
1825 /* This is a bit hairy. If we are an SPE enabled processor
1826 * (have embedded fp) we store the IEEE exception enable flags in
1827 * fpexc_mode. fpexc_mode is also used for setting FP exception
1828 * mode (asyn, precise, disabled) for 'Classic' FP. */
1829 if (val & PR_FP_EXC_SW_ENABLE) {
1830#ifdef CONFIG_SPE
1831 if (cpu_has_feature(CPU_FTR_SPE)) {
1832 /*
1833 * When the sticky exception bits are set
1834 * directly by userspace, it must call prctl
1835 * with PR_GET_FPEXC (with PR_FP_EXC_SW_ENABLE
1836 * in the existing prctl settings) or
1837 * PR_SET_FPEXC (with PR_FP_EXC_SW_ENABLE in
1838 * the bits being set). <fenv.h> functions
1839 * saving and restoring the whole
1840 * floating-point environment need to do so
1841 * anyway to restore the prctl settings from
1842 * the saved environment.
1843 */
1844 tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR);
1845 tsk->thread.fpexc_mode = val &
1846 (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT);
1847 return 0;
1848 } else {
1849 return -EINVAL;
1850 }
1851#else
1852 return -EINVAL;
1853#endif
1854 }
1855
1856 /* on a CONFIG_SPE this does not hurt us. The bits that
1857 * __pack_fe01 use do not overlap with bits used for
1858 * PR_FP_EXC_SW_ENABLE. Additionally, the MSR[FE0,FE1] bits
1859 * on CONFIG_SPE implementations are reserved so writing to
1860 * them does not change anything */
1861 if (val > PR_FP_EXC_PRECISE)
1862 return -EINVAL;
1863 tsk->thread.fpexc_mode = __pack_fe01(val);
1864 if (regs != NULL && (regs->msr & MSR_FP) != 0)
1865 regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1))
1866 | tsk->thread.fpexc_mode;
1867 return 0;
1868}
1869
1870int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
1871{
1872 unsigned int val;
1873
1874 if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE)
1875#ifdef CONFIG_SPE
1876 if (cpu_has_feature(CPU_FTR_SPE)) {
1877 /*
1878 * When the sticky exception bits are set
1879 * directly by userspace, it must call prctl
1880 * with PR_GET_FPEXC (with PR_FP_EXC_SW_ENABLE
1881 * in the existing prctl settings) or
1882 * PR_SET_FPEXC (with PR_FP_EXC_SW_ENABLE in
1883 * the bits being set). <fenv.h> functions
1884 * saving and restoring the whole
1885 * floating-point environment need to do so
1886 * anyway to restore the prctl settings from
1887 * the saved environment.
1888 */
1889 tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR);
1890 val = tsk->thread.fpexc_mode;
1891 } else
1892 return -EINVAL;
1893#else
1894 return -EINVAL;
1895#endif
1896 else
1897 val = __unpack_fe01(tsk->thread.fpexc_mode);
1898 return put_user(val, (unsigned int __user *) adr);
1899}
1900
1901int set_endian(struct task_struct *tsk, unsigned int val)
1902{
1903 struct pt_regs *regs = tsk->thread.regs;
1904
1905 if ((val == PR_ENDIAN_LITTLE && !cpu_has_feature(CPU_FTR_REAL_LE)) ||
1906 (val == PR_ENDIAN_PPC_LITTLE && !cpu_has_feature(CPU_FTR_PPC_LE)))
1907 return -EINVAL;
1908
1909 if (regs == NULL)
1910 return -EINVAL;
1911
1912 if (val == PR_ENDIAN_BIG)
1913 regs->msr &= ~MSR_LE;
1914 else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE)
1915 regs->msr |= MSR_LE;
1916 else
1917 return -EINVAL;
1918
1919 return 0;
1920}
1921
1922int get_endian(struct task_struct *tsk, unsigned long adr)
1923{
1924 struct pt_regs *regs = tsk->thread.regs;
1925 unsigned int val;
1926
1927 if (!cpu_has_feature(CPU_FTR_PPC_LE) &&
1928 !cpu_has_feature(CPU_FTR_REAL_LE))
1929 return -EINVAL;
1930
1931 if (regs == NULL)
1932 return -EINVAL;
1933
1934 if (regs->msr & MSR_LE) {
1935 if (cpu_has_feature(CPU_FTR_REAL_LE))
1936 val = PR_ENDIAN_LITTLE;
1937 else
1938 val = PR_ENDIAN_PPC_LITTLE;
1939 } else
1940 val = PR_ENDIAN_BIG;
1941
1942 return put_user(val, (unsigned int __user *)adr);
1943}
1944
1945int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
1946{
1947 tsk->thread.align_ctl = val;
1948 return 0;
1949}
1950
1951int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
1952{
1953 return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr);
1954}
1955
1956static inline int valid_irq_stack(unsigned long sp, struct task_struct *p,
1957 unsigned long nbytes)
1958{
1959 unsigned long stack_page;
1960 unsigned long cpu = task_cpu(p);
1961
1962 stack_page = (unsigned long)hardirq_ctx[cpu];
1963 if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
1964 return 1;
1965
1966 stack_page = (unsigned long)softirq_ctx[cpu];
1967 if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
1968 return 1;
1969
1970 return 0;
1971}
1972
1973int validate_sp(unsigned long sp, struct task_struct *p,
1974 unsigned long nbytes)
1975{
1976 unsigned long stack_page = (unsigned long)task_stack_page(p);
1977
1978 if (sp < THREAD_SIZE)
1979 return 0;
1980
1981 if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
1982 return 1;
1983
1984 return valid_irq_stack(sp, p, nbytes);
1985}
1986
1987EXPORT_SYMBOL(validate_sp);
1988
1989static unsigned long __get_wchan(struct task_struct *p)
1990{
1991 unsigned long ip, sp;
1992 int count = 0;
1993
1994 if (!p || p == current || p->state == TASK_RUNNING)
1995 return 0;
1996
1997 sp = p->thread.ksp;
1998 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
1999 return 0;
2000
2001 do {
2002 sp = *(unsigned long *)sp;
2003 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD) ||
2004 p->state == TASK_RUNNING)
2005 return 0;
2006 if (count > 0) {
2007 ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE];
2008 if (!in_sched_functions(ip))
2009 return ip;
2010 }
2011 } while (count++ < 16);
2012 return 0;
2013}
2014
2015unsigned long get_wchan(struct task_struct *p)
2016{
2017 unsigned long ret;
2018
2019 if (!try_get_task_stack(p))
2020 return 0;
2021
2022 ret = __get_wchan(p);
2023
2024 put_task_stack(p);
2025
2026 return ret;
2027}
2028
2029static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
2030
2031void show_stack(struct task_struct *tsk, unsigned long *stack)
2032{
2033 unsigned long sp, ip, lr, newsp;
2034 int count = 0;
2035 int firstframe = 1;
2036#ifdef CONFIG_FUNCTION_GRAPH_TRACER
2037 unsigned long ret_addr;
2038 int ftrace_idx = 0;
2039#endif
2040
2041 if (tsk == NULL)
2042 tsk = current;
2043
2044 if (!try_get_task_stack(tsk))
2045 return;
2046
2047 sp = (unsigned long) stack;
2048 if (sp == 0) {
2049 if (tsk == current)
2050 sp = current_stack_pointer();
2051 else
2052 sp = tsk->thread.ksp;
2053 }
2054
2055 lr = 0;
2056 printk("Call Trace:\n");
2057 do {
2058 if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD))
2059 break;
2060
2061 stack = (unsigned long *) sp;
2062 newsp = stack[0];
2063 ip = stack[STACK_FRAME_LR_SAVE];
2064 if (!firstframe || ip != lr) {
2065 printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
2066#ifdef CONFIG_FUNCTION_GRAPH_TRACER
2067 ret_addr = ftrace_graph_ret_addr(current,
2068 &ftrace_idx, ip, stack);
2069 if (ret_addr != ip)
2070 pr_cont(" (%pS)", (void *)ret_addr);
2071#endif
2072 if (firstframe)
2073 pr_cont(" (unreliable)");
2074 pr_cont("\n");
2075 }
2076 firstframe = 0;
2077
2078 /*
2079 * See if this is an exception frame.
2080 * We look for the "regshere" marker in the current frame.
2081 */
2082 if (validate_sp(sp, tsk, STACK_INT_FRAME_SIZE)
2083 && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
2084 struct pt_regs *regs = (struct pt_regs *)
2085 (sp + STACK_FRAME_OVERHEAD);
2086 lr = regs->link;
2087 printk("--- interrupt: %lx at %pS\n LR = %pS\n",
2088 regs->trap, (void *)regs->nip, (void *)lr);
2089 firstframe = 1;
2090 }
2091
2092 sp = newsp;
2093 } while (count++ < kstack_depth_to_print);
2094
2095 put_task_stack(tsk);
2096}
2097
2098#ifdef CONFIG_PPC64
2099/* Called with hard IRQs off */
2100void notrace __ppc64_runlatch_on(void)
2101{
2102 struct thread_info *ti = current_thread_info();
2103
2104 if (cpu_has_feature(CPU_FTR_ARCH_206)) {
2105 /*
2106 * Least significant bit (RUN) is the only writable bit of
2107 * the CTRL register, so we can avoid mfspr. 2.06 is not the
2108 * earliest ISA where this is the case, but it's convenient.
2109 */
2110 mtspr(SPRN_CTRLT, CTRL_RUNLATCH);
2111 } else {
2112 unsigned long ctrl;
2113
2114 /*
2115 * Some architectures (e.g., Cell) have writable fields other
2116 * than RUN, so do the read-modify-write.
2117 */
2118 ctrl = mfspr(SPRN_CTRLF);
2119 ctrl |= CTRL_RUNLATCH;
2120 mtspr(SPRN_CTRLT, ctrl);
2121 }
2122
2123 ti->local_flags |= _TLF_RUNLATCH;
2124}
2125
2126/* Called with hard IRQs off */
2127void notrace __ppc64_runlatch_off(void)
2128{
2129 struct thread_info *ti = current_thread_info();
2130
2131 ti->local_flags &= ~_TLF_RUNLATCH;
2132
2133 if (cpu_has_feature(CPU_FTR_ARCH_206)) {
2134 mtspr(SPRN_CTRLT, 0);
2135 } else {
2136 unsigned long ctrl;
2137
2138 ctrl = mfspr(SPRN_CTRLF);
2139 ctrl &= ~CTRL_RUNLATCH;
2140 mtspr(SPRN_CTRLT, ctrl);
2141 }
2142}
2143#endif /* CONFIG_PPC64 */
2144
2145unsigned long arch_align_stack(unsigned long sp)
2146{
2147 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2148 sp -= get_random_int() & ~PAGE_MASK;
2149 return sp & ~0xf;
2150}
2151
2152static inline unsigned long brk_rnd(void)
2153{
2154 unsigned long rnd = 0;
2155
2156 /* 8MB for 32bit, 1GB for 64bit */
2157 if (is_32bit_task())
2158 rnd = (get_random_long() % (1UL<<(23-PAGE_SHIFT)));
2159 else
2160 rnd = (get_random_long() % (1UL<<(30-PAGE_SHIFT)));
2161
2162 return rnd << PAGE_SHIFT;
2163}
2164
2165unsigned long arch_randomize_brk(struct mm_struct *mm)
2166{
2167 unsigned long base = mm->brk;
2168 unsigned long ret;
2169
2170#ifdef CONFIG_PPC_BOOK3S_64
2171 /*
2172 * If we are using 1TB segments and we are allowed to randomise
2173 * the heap, we can put it above 1TB so it is backed by a 1TB
2174 * segment. Otherwise the heap will be in the bottom 1TB
2175 * which always uses 256MB segments and this may result in a
2176 * performance penalty. We don't need to worry about radix. For
2177 * radix, mmu_highuser_ssize remains unchanged from 256MB.
2178 */
2179 if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2180 base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2181#endif
2182
2183 ret = PAGE_ALIGN(base + brk_rnd());
2184
2185 if (ret < mm->brk)
2186 return mm->brk;
2187
2188 return ret;
2189}
2190