Loading...
1/*
2 * Derived from "arch/i386/kernel/process.c"
3 * Copyright (C) 1995 Linus Torvalds
4 *
5 * Updated and modified by Cort Dougan (cort@cs.nmt.edu) and
6 * Paul Mackerras (paulus@cs.anu.edu.au)
7 *
8 * PowerPC version
9 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
15 */
16
17#include <linux/errno.h>
18#include <linux/sched.h>
19#include <linux/sched/debug.h>
20#include <linux/sched/task.h>
21#include <linux/sched/task_stack.h>
22#include <linux/kernel.h>
23#include <linux/mm.h>
24#include <linux/smp.h>
25#include <linux/stddef.h>
26#include <linux/unistd.h>
27#include <linux/ptrace.h>
28#include <linux/slab.h>
29#include <linux/user.h>
30#include <linux/elf.h>
31#include <linux/prctl.h>
32#include <linux/init_task.h>
33#include <linux/export.h>
34#include <linux/kallsyms.h>
35#include <linux/mqueue.h>
36#include <linux/hardirq.h>
37#include <linux/utsname.h>
38#include <linux/ftrace.h>
39#include <linux/kernel_stat.h>
40#include <linux/personality.h>
41#include <linux/random.h>
42#include <linux/hw_breakpoint.h>
43#include <linux/uaccess.h>
44#include <linux/elf-randomize.h>
45#include <linux/pkeys.h>
46
47#include <asm/pgtable.h>
48#include <asm/io.h>
49#include <asm/processor.h>
50#include <asm/mmu.h>
51#include <asm/prom.h>
52#include <asm/machdep.h>
53#include <asm/time.h>
54#include <asm/runlatch.h>
55#include <asm/syscalls.h>
56#include <asm/switch_to.h>
57#include <asm/tm.h>
58#include <asm/debug.h>
59#ifdef CONFIG_PPC64
60#include <asm/firmware.h>
61#include <asm/hw_irq.h>
62#endif
63#include <asm/code-patching.h>
64#include <asm/exec.h>
65#include <asm/livepatch.h>
66#include <asm/cpu_has_feature.h>
67#include <asm/asm-prototypes.h>
68
69#include <linux/kprobes.h>
70#include <linux/kdebug.h>
71
72/* Transactional Memory debug */
73#ifdef TM_DEBUG_SW
74#define TM_DEBUG(x...) printk(KERN_INFO x)
75#else
76#define TM_DEBUG(x...) do { } while(0)
77#endif
78
79extern unsigned long _get_SP(void);
80
81#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
82/*
83 * Are we running in "Suspend disabled" mode? If so we have to block any
84 * sigreturn that would get us into suspended state, and we also warn in some
85 * other paths that we should never reach with suspend disabled.
86 */
87bool tm_suspend_disabled __ro_after_init = false;
88
89static void check_if_tm_restore_required(struct task_struct *tsk)
90{
91 /*
92 * If we are saving the current thread's registers, and the
93 * thread is in a transactional state, set the TIF_RESTORE_TM
94 * bit so that we know to restore the registers before
95 * returning to userspace.
96 */
97 if (tsk == current && tsk->thread.regs &&
98 MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
99 !test_thread_flag(TIF_RESTORE_TM)) {
100 tsk->thread.ckpt_regs.msr = tsk->thread.regs->msr;
101 set_thread_flag(TIF_RESTORE_TM);
102 }
103}
104
105static inline bool msr_tm_active(unsigned long msr)
106{
107 return MSR_TM_ACTIVE(msr);
108}
109
110static bool tm_active_with_fp(struct task_struct *tsk)
111{
112 return msr_tm_active(tsk->thread.regs->msr) &&
113 (tsk->thread.ckpt_regs.msr & MSR_FP);
114}
115
116static bool tm_active_with_altivec(struct task_struct *tsk)
117{
118 return msr_tm_active(tsk->thread.regs->msr) &&
119 (tsk->thread.ckpt_regs.msr & MSR_VEC);
120}
121#else
122static inline bool msr_tm_active(unsigned long msr) { return false; }
123static inline void check_if_tm_restore_required(struct task_struct *tsk) { }
124static inline bool tm_active_with_fp(struct task_struct *tsk) { return false; }
125static inline bool tm_active_with_altivec(struct task_struct *tsk) { return false; }
126#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
127
128bool strict_msr_control;
129EXPORT_SYMBOL(strict_msr_control);
130
131static int __init enable_strict_msr_control(char *str)
132{
133 strict_msr_control = true;
134 pr_info("Enabling strict facility control\n");
135
136 return 0;
137}
138early_param("ppc_strict_facility_enable", enable_strict_msr_control);
139
140unsigned long msr_check_and_set(unsigned long bits)
141{
142 unsigned long oldmsr = mfmsr();
143 unsigned long newmsr;
144
145 newmsr = oldmsr | bits;
146
147#ifdef CONFIG_VSX
148 if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP))
149 newmsr |= MSR_VSX;
150#endif
151
152 if (oldmsr != newmsr)
153 mtmsr_isync(newmsr);
154
155 return newmsr;
156}
157
158void __msr_check_and_clear(unsigned long bits)
159{
160 unsigned long oldmsr = mfmsr();
161 unsigned long newmsr;
162
163 newmsr = oldmsr & ~bits;
164
165#ifdef CONFIG_VSX
166 if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP))
167 newmsr &= ~MSR_VSX;
168#endif
169
170 if (oldmsr != newmsr)
171 mtmsr_isync(newmsr);
172}
173EXPORT_SYMBOL(__msr_check_and_clear);
174
175#ifdef CONFIG_PPC_FPU
176static void __giveup_fpu(struct task_struct *tsk)
177{
178 unsigned long msr;
179
180 save_fpu(tsk);
181 msr = tsk->thread.regs->msr;
182 msr &= ~MSR_FP;
183#ifdef CONFIG_VSX
184 if (cpu_has_feature(CPU_FTR_VSX))
185 msr &= ~MSR_VSX;
186#endif
187 tsk->thread.regs->msr = msr;
188}
189
190void giveup_fpu(struct task_struct *tsk)
191{
192 check_if_tm_restore_required(tsk);
193
194 msr_check_and_set(MSR_FP);
195 __giveup_fpu(tsk);
196 msr_check_and_clear(MSR_FP);
197}
198EXPORT_SYMBOL(giveup_fpu);
199
200/*
201 * Make sure the floating-point register state in the
202 * the thread_struct is up to date for task tsk.
203 */
204void flush_fp_to_thread(struct task_struct *tsk)
205{
206 if (tsk->thread.regs) {
207 /*
208 * We need to disable preemption here because if we didn't,
209 * another process could get scheduled after the regs->msr
210 * test but before we have finished saving the FP registers
211 * to the thread_struct. That process could take over the
212 * FPU, and then when we get scheduled again we would store
213 * bogus values for the remaining FP registers.
214 */
215 preempt_disable();
216 if (tsk->thread.regs->msr & MSR_FP) {
217 /*
218 * This should only ever be called for current or
219 * for a stopped child process. Since we save away
220 * the FP register state on context switch,
221 * there is something wrong if a stopped child appears
222 * to still have its FP state in the CPU registers.
223 */
224 BUG_ON(tsk != current);
225 giveup_fpu(tsk);
226 }
227 preempt_enable();
228 }
229}
230EXPORT_SYMBOL_GPL(flush_fp_to_thread);
231
232void enable_kernel_fp(void)
233{
234 unsigned long cpumsr;
235
236 WARN_ON(preemptible());
237
238 cpumsr = msr_check_and_set(MSR_FP);
239
240 if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) {
241 check_if_tm_restore_required(current);
242 /*
243 * If a thread has already been reclaimed then the
244 * checkpointed registers are on the CPU but have definitely
245 * been saved by the reclaim code. Don't need to and *cannot*
246 * giveup as this would save to the 'live' structure not the
247 * checkpointed structure.
248 */
249 if(!msr_tm_active(cpumsr) && msr_tm_active(current->thread.regs->msr))
250 return;
251 __giveup_fpu(current);
252 }
253}
254EXPORT_SYMBOL(enable_kernel_fp);
255
256static int restore_fp(struct task_struct *tsk)
257{
258 if (tsk->thread.load_fp || tm_active_with_fp(tsk)) {
259 load_fp_state(¤t->thread.fp_state);
260 current->thread.load_fp++;
261 return 1;
262 }
263 return 0;
264}
265#else
266static int restore_fp(struct task_struct *tsk) { return 0; }
267#endif /* CONFIG_PPC_FPU */
268
269#ifdef CONFIG_ALTIVEC
270#define loadvec(thr) ((thr).load_vec)
271
272static void __giveup_altivec(struct task_struct *tsk)
273{
274 unsigned long msr;
275
276 save_altivec(tsk);
277 msr = tsk->thread.regs->msr;
278 msr &= ~MSR_VEC;
279#ifdef CONFIG_VSX
280 if (cpu_has_feature(CPU_FTR_VSX))
281 msr &= ~MSR_VSX;
282#endif
283 tsk->thread.regs->msr = msr;
284}
285
286void giveup_altivec(struct task_struct *tsk)
287{
288 check_if_tm_restore_required(tsk);
289
290 msr_check_and_set(MSR_VEC);
291 __giveup_altivec(tsk);
292 msr_check_and_clear(MSR_VEC);
293}
294EXPORT_SYMBOL(giveup_altivec);
295
296void enable_kernel_altivec(void)
297{
298 unsigned long cpumsr;
299
300 WARN_ON(preemptible());
301
302 cpumsr = msr_check_and_set(MSR_VEC);
303
304 if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) {
305 check_if_tm_restore_required(current);
306 /*
307 * If a thread has already been reclaimed then the
308 * checkpointed registers are on the CPU but have definitely
309 * been saved by the reclaim code. Don't need to and *cannot*
310 * giveup as this would save to the 'live' structure not the
311 * checkpointed structure.
312 */
313 if(!msr_tm_active(cpumsr) && msr_tm_active(current->thread.regs->msr))
314 return;
315 __giveup_altivec(current);
316 }
317}
318EXPORT_SYMBOL(enable_kernel_altivec);
319
320/*
321 * Make sure the VMX/Altivec register state in the
322 * the thread_struct is up to date for task tsk.
323 */
324void flush_altivec_to_thread(struct task_struct *tsk)
325{
326 if (tsk->thread.regs) {
327 preempt_disable();
328 if (tsk->thread.regs->msr & MSR_VEC) {
329 BUG_ON(tsk != current);
330 giveup_altivec(tsk);
331 }
332 preempt_enable();
333 }
334}
335EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
336
337static int restore_altivec(struct task_struct *tsk)
338{
339 if (cpu_has_feature(CPU_FTR_ALTIVEC) &&
340 (tsk->thread.load_vec || tm_active_with_altivec(tsk))) {
341 load_vr_state(&tsk->thread.vr_state);
342 tsk->thread.used_vr = 1;
343 tsk->thread.load_vec++;
344
345 return 1;
346 }
347 return 0;
348}
349#else
350#define loadvec(thr) 0
351static inline int restore_altivec(struct task_struct *tsk) { return 0; }
352#endif /* CONFIG_ALTIVEC */
353
354#ifdef CONFIG_VSX
355static void __giveup_vsx(struct task_struct *tsk)
356{
357 unsigned long msr = tsk->thread.regs->msr;
358
359 /*
360 * We should never be ssetting MSR_VSX without also setting
361 * MSR_FP and MSR_VEC
362 */
363 WARN_ON((msr & MSR_VSX) && !((msr & MSR_FP) && (msr & MSR_VEC)));
364
365 /* __giveup_fpu will clear MSR_VSX */
366 if (msr & MSR_FP)
367 __giveup_fpu(tsk);
368 if (msr & MSR_VEC)
369 __giveup_altivec(tsk);
370}
371
372static void giveup_vsx(struct task_struct *tsk)
373{
374 check_if_tm_restore_required(tsk);
375
376 msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
377 __giveup_vsx(tsk);
378 msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX);
379}
380
381void enable_kernel_vsx(void)
382{
383 unsigned long cpumsr;
384
385 WARN_ON(preemptible());
386
387 cpumsr = msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
388
389 if (current->thread.regs &&
390 (current->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP))) {
391 check_if_tm_restore_required(current);
392 /*
393 * If a thread has already been reclaimed then the
394 * checkpointed registers are on the CPU but have definitely
395 * been saved by the reclaim code. Don't need to and *cannot*
396 * giveup as this would save to the 'live' structure not the
397 * checkpointed structure.
398 */
399 if(!msr_tm_active(cpumsr) && msr_tm_active(current->thread.regs->msr))
400 return;
401 __giveup_vsx(current);
402 }
403}
404EXPORT_SYMBOL(enable_kernel_vsx);
405
406void flush_vsx_to_thread(struct task_struct *tsk)
407{
408 if (tsk->thread.regs) {
409 preempt_disable();
410 if (tsk->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP)) {
411 BUG_ON(tsk != current);
412 giveup_vsx(tsk);
413 }
414 preempt_enable();
415 }
416}
417EXPORT_SYMBOL_GPL(flush_vsx_to_thread);
418
419static int restore_vsx(struct task_struct *tsk)
420{
421 if (cpu_has_feature(CPU_FTR_VSX)) {
422 tsk->thread.used_vsr = 1;
423 return 1;
424 }
425
426 return 0;
427}
428#else
429static inline int restore_vsx(struct task_struct *tsk) { return 0; }
430#endif /* CONFIG_VSX */
431
432#ifdef CONFIG_SPE
433void giveup_spe(struct task_struct *tsk)
434{
435 check_if_tm_restore_required(tsk);
436
437 msr_check_and_set(MSR_SPE);
438 __giveup_spe(tsk);
439 msr_check_and_clear(MSR_SPE);
440}
441EXPORT_SYMBOL(giveup_spe);
442
443void enable_kernel_spe(void)
444{
445 WARN_ON(preemptible());
446
447 msr_check_and_set(MSR_SPE);
448
449 if (current->thread.regs && (current->thread.regs->msr & MSR_SPE)) {
450 check_if_tm_restore_required(current);
451 __giveup_spe(current);
452 }
453}
454EXPORT_SYMBOL(enable_kernel_spe);
455
456void flush_spe_to_thread(struct task_struct *tsk)
457{
458 if (tsk->thread.regs) {
459 preempt_disable();
460 if (tsk->thread.regs->msr & MSR_SPE) {
461 BUG_ON(tsk != current);
462 tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
463 giveup_spe(tsk);
464 }
465 preempt_enable();
466 }
467}
468#endif /* CONFIG_SPE */
469
470static unsigned long msr_all_available;
471
472static int __init init_msr_all_available(void)
473{
474#ifdef CONFIG_PPC_FPU
475 msr_all_available |= MSR_FP;
476#endif
477#ifdef CONFIG_ALTIVEC
478 if (cpu_has_feature(CPU_FTR_ALTIVEC))
479 msr_all_available |= MSR_VEC;
480#endif
481#ifdef CONFIG_VSX
482 if (cpu_has_feature(CPU_FTR_VSX))
483 msr_all_available |= MSR_VSX;
484#endif
485#ifdef CONFIG_SPE
486 if (cpu_has_feature(CPU_FTR_SPE))
487 msr_all_available |= MSR_SPE;
488#endif
489
490 return 0;
491}
492early_initcall(init_msr_all_available);
493
494void giveup_all(struct task_struct *tsk)
495{
496 unsigned long usermsr;
497
498 if (!tsk->thread.regs)
499 return;
500
501 usermsr = tsk->thread.regs->msr;
502
503 if ((usermsr & msr_all_available) == 0)
504 return;
505
506 msr_check_and_set(msr_all_available);
507 check_if_tm_restore_required(tsk);
508
509 WARN_ON((usermsr & MSR_VSX) && !((usermsr & MSR_FP) && (usermsr & MSR_VEC)));
510
511#ifdef CONFIG_PPC_FPU
512 if (usermsr & MSR_FP)
513 __giveup_fpu(tsk);
514#endif
515#ifdef CONFIG_ALTIVEC
516 if (usermsr & MSR_VEC)
517 __giveup_altivec(tsk);
518#endif
519#ifdef CONFIG_SPE
520 if (usermsr & MSR_SPE)
521 __giveup_spe(tsk);
522#endif
523
524 msr_check_and_clear(msr_all_available);
525}
526EXPORT_SYMBOL(giveup_all);
527
528void restore_math(struct pt_regs *regs)
529{
530 unsigned long msr;
531
532 if (!msr_tm_active(regs->msr) &&
533 !current->thread.load_fp && !loadvec(current->thread))
534 return;
535
536 msr = regs->msr;
537 msr_check_and_set(msr_all_available);
538
539 /*
540 * Only reload if the bit is not set in the user MSR, the bit BEING set
541 * indicates that the registers are hot
542 */
543 if ((!(msr & MSR_FP)) && restore_fp(current))
544 msr |= MSR_FP | current->thread.fpexc_mode;
545
546 if ((!(msr & MSR_VEC)) && restore_altivec(current))
547 msr |= MSR_VEC;
548
549 if ((msr & (MSR_FP | MSR_VEC)) == (MSR_FP | MSR_VEC) &&
550 restore_vsx(current)) {
551 msr |= MSR_VSX;
552 }
553
554 msr_check_and_clear(msr_all_available);
555
556 regs->msr = msr;
557}
558
559static void save_all(struct task_struct *tsk)
560{
561 unsigned long usermsr;
562
563 if (!tsk->thread.regs)
564 return;
565
566 usermsr = tsk->thread.regs->msr;
567
568 if ((usermsr & msr_all_available) == 0)
569 return;
570
571 msr_check_and_set(msr_all_available);
572
573 WARN_ON((usermsr & MSR_VSX) && !((usermsr & MSR_FP) && (usermsr & MSR_VEC)));
574
575 if (usermsr & MSR_FP)
576 save_fpu(tsk);
577
578 if (usermsr & MSR_VEC)
579 save_altivec(tsk);
580
581 if (usermsr & MSR_SPE)
582 __giveup_spe(tsk);
583
584 msr_check_and_clear(msr_all_available);
585}
586
587void flush_all_to_thread(struct task_struct *tsk)
588{
589 if (tsk->thread.regs) {
590 preempt_disable();
591 BUG_ON(tsk != current);
592 save_all(tsk);
593
594#ifdef CONFIG_SPE
595 if (tsk->thread.regs->msr & MSR_SPE)
596 tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
597#endif
598
599 preempt_enable();
600 }
601}
602EXPORT_SYMBOL(flush_all_to_thread);
603
604#ifdef CONFIG_PPC_ADV_DEBUG_REGS
605void do_send_trap(struct pt_regs *regs, unsigned long address,
606 unsigned long error_code, int breakpt)
607{
608 current->thread.trap_nr = TRAP_HWBKPT;
609 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
610 11, SIGSEGV) == NOTIFY_STOP)
611 return;
612
613 /* Deliver the signal to userspace */
614 force_sig_ptrace_errno_trap(breakpt, /* breakpoint or watchpoint id */
615 (void __user *)address);
616}
617#else /* !CONFIG_PPC_ADV_DEBUG_REGS */
618void do_break (struct pt_regs *regs, unsigned long address,
619 unsigned long error_code)
620{
621 siginfo_t info;
622
623 current->thread.trap_nr = TRAP_HWBKPT;
624 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
625 11, SIGSEGV) == NOTIFY_STOP)
626 return;
627
628 if (debugger_break_match(regs))
629 return;
630
631 /* Clear the breakpoint */
632 hw_breakpoint_disable();
633
634 /* Deliver the signal to userspace */
635 info.si_signo = SIGTRAP;
636 info.si_errno = 0;
637 info.si_code = TRAP_HWBKPT;
638 info.si_addr = (void __user *)address;
639 force_sig_info(SIGTRAP, &info, current);
640}
641#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
642
643static DEFINE_PER_CPU(struct arch_hw_breakpoint, current_brk);
644
645#ifdef CONFIG_PPC_ADV_DEBUG_REGS
646/*
647 * Set the debug registers back to their default "safe" values.
648 */
649static void set_debug_reg_defaults(struct thread_struct *thread)
650{
651 thread->debug.iac1 = thread->debug.iac2 = 0;
652#if CONFIG_PPC_ADV_DEBUG_IACS > 2
653 thread->debug.iac3 = thread->debug.iac4 = 0;
654#endif
655 thread->debug.dac1 = thread->debug.dac2 = 0;
656#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
657 thread->debug.dvc1 = thread->debug.dvc2 = 0;
658#endif
659 thread->debug.dbcr0 = 0;
660#ifdef CONFIG_BOOKE
661 /*
662 * Force User/Supervisor bits to b11 (user-only MSR[PR]=1)
663 */
664 thread->debug.dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US |
665 DBCR1_IAC3US | DBCR1_IAC4US;
666 /*
667 * Force Data Address Compare User/Supervisor bits to be User-only
668 * (0b11 MSR[PR]=1) and set all other bits in DBCR2 register to be 0.
669 */
670 thread->debug.dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
671#else
672 thread->debug.dbcr1 = 0;
673#endif
674}
675
676static void prime_debug_regs(struct debug_reg *debug)
677{
678 /*
679 * We could have inherited MSR_DE from userspace, since
680 * it doesn't get cleared on exception entry. Make sure
681 * MSR_DE is clear before we enable any debug events.
682 */
683 mtmsr(mfmsr() & ~MSR_DE);
684
685 mtspr(SPRN_IAC1, debug->iac1);
686 mtspr(SPRN_IAC2, debug->iac2);
687#if CONFIG_PPC_ADV_DEBUG_IACS > 2
688 mtspr(SPRN_IAC3, debug->iac3);
689 mtspr(SPRN_IAC4, debug->iac4);
690#endif
691 mtspr(SPRN_DAC1, debug->dac1);
692 mtspr(SPRN_DAC2, debug->dac2);
693#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
694 mtspr(SPRN_DVC1, debug->dvc1);
695 mtspr(SPRN_DVC2, debug->dvc2);
696#endif
697 mtspr(SPRN_DBCR0, debug->dbcr0);
698 mtspr(SPRN_DBCR1, debug->dbcr1);
699#ifdef CONFIG_BOOKE
700 mtspr(SPRN_DBCR2, debug->dbcr2);
701#endif
702}
703/*
704 * Unless neither the old or new thread are making use of the
705 * debug registers, set the debug registers from the values
706 * stored in the new thread.
707 */
708void switch_booke_debug_regs(struct debug_reg *new_debug)
709{
710 if ((current->thread.debug.dbcr0 & DBCR0_IDM)
711 || (new_debug->dbcr0 & DBCR0_IDM))
712 prime_debug_regs(new_debug);
713}
714EXPORT_SYMBOL_GPL(switch_booke_debug_regs);
715#else /* !CONFIG_PPC_ADV_DEBUG_REGS */
716#ifndef CONFIG_HAVE_HW_BREAKPOINT
717static void set_debug_reg_defaults(struct thread_struct *thread)
718{
719 thread->hw_brk.address = 0;
720 thread->hw_brk.type = 0;
721 if (ppc_breakpoint_available())
722 set_breakpoint(&thread->hw_brk);
723}
724#endif /* !CONFIG_HAVE_HW_BREAKPOINT */
725#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
726
727#ifdef CONFIG_PPC_ADV_DEBUG_REGS
728static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
729{
730 mtspr(SPRN_DAC1, dabr);
731#ifdef CONFIG_PPC_47x
732 isync();
733#endif
734 return 0;
735}
736#elif defined(CONFIG_PPC_BOOK3S)
737static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
738{
739 mtspr(SPRN_DABR, dabr);
740 if (cpu_has_feature(CPU_FTR_DABRX))
741 mtspr(SPRN_DABRX, dabrx);
742 return 0;
743}
744#elif defined(CONFIG_PPC_8xx)
745static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
746{
747 unsigned long addr = dabr & ~HW_BRK_TYPE_DABR;
748 unsigned long lctrl1 = 0x90000000; /* compare type: equal on E & F */
749 unsigned long lctrl2 = 0x8e000002; /* watchpoint 1 on cmp E | F */
750
751 if ((dabr & HW_BRK_TYPE_RDWR) == HW_BRK_TYPE_READ)
752 lctrl1 |= 0xa0000;
753 else if ((dabr & HW_BRK_TYPE_RDWR) == HW_BRK_TYPE_WRITE)
754 lctrl1 |= 0xf0000;
755 else if ((dabr & HW_BRK_TYPE_RDWR) == 0)
756 lctrl2 = 0;
757
758 mtspr(SPRN_LCTRL2, 0);
759 mtspr(SPRN_CMPE, addr);
760 mtspr(SPRN_CMPF, addr + 4);
761 mtspr(SPRN_LCTRL1, lctrl1);
762 mtspr(SPRN_LCTRL2, lctrl2);
763
764 return 0;
765}
766#else
767static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
768{
769 return -EINVAL;
770}
771#endif
772
773static inline int set_dabr(struct arch_hw_breakpoint *brk)
774{
775 unsigned long dabr, dabrx;
776
777 dabr = brk->address | (brk->type & HW_BRK_TYPE_DABR);
778 dabrx = ((brk->type >> 3) & 0x7);
779
780 if (ppc_md.set_dabr)
781 return ppc_md.set_dabr(dabr, dabrx);
782
783 return __set_dabr(dabr, dabrx);
784}
785
786static inline int set_dawr(struct arch_hw_breakpoint *brk)
787{
788 unsigned long dawr, dawrx, mrd;
789
790 dawr = brk->address;
791
792 dawrx = (brk->type & (HW_BRK_TYPE_READ | HW_BRK_TYPE_WRITE)) \
793 << (63 - 58); //* read/write bits */
794 dawrx |= ((brk->type & (HW_BRK_TYPE_TRANSLATE)) >> 2) \
795 << (63 - 59); //* translate */
796 dawrx |= (brk->type & (HW_BRK_TYPE_PRIV_ALL)) \
797 >> 3; //* PRIM bits */
798 /* dawr length is stored in field MDR bits 48:53. Matches range in
799 doublewords (64 bits) baised by -1 eg. 0b000000=1DW and
800 0b111111=64DW.
801 brk->len is in bytes.
802 This aligns up to double word size, shifts and does the bias.
803 */
804 mrd = ((brk->len + 7) >> 3) - 1;
805 dawrx |= (mrd & 0x3f) << (63 - 53);
806
807 if (ppc_md.set_dawr)
808 return ppc_md.set_dawr(dawr, dawrx);
809 mtspr(SPRN_DAWR, dawr);
810 mtspr(SPRN_DAWRX, dawrx);
811 return 0;
812}
813
814void __set_breakpoint(struct arch_hw_breakpoint *brk)
815{
816 memcpy(this_cpu_ptr(¤t_brk), brk, sizeof(*brk));
817
818 if (cpu_has_feature(CPU_FTR_DAWR))
819 // Power8 or later
820 set_dawr(brk);
821 else if (!cpu_has_feature(CPU_FTR_ARCH_207S))
822 // Power7 or earlier
823 set_dabr(brk);
824 else
825 // Shouldn't happen due to higher level checks
826 WARN_ON_ONCE(1);
827}
828
829void set_breakpoint(struct arch_hw_breakpoint *brk)
830{
831 preempt_disable();
832 __set_breakpoint(brk);
833 preempt_enable();
834}
835
836/* Check if we have DAWR or DABR hardware */
837bool ppc_breakpoint_available(void)
838{
839 if (cpu_has_feature(CPU_FTR_DAWR))
840 return true; /* POWER8 DAWR */
841 if (cpu_has_feature(CPU_FTR_ARCH_207S))
842 return false; /* POWER9 with DAWR disabled */
843 /* DABR: Everything but POWER8 and POWER9 */
844 return true;
845}
846EXPORT_SYMBOL_GPL(ppc_breakpoint_available);
847
848#ifdef CONFIG_PPC64
849DEFINE_PER_CPU(struct cpu_usage, cpu_usage_array);
850#endif
851
852static inline bool hw_brk_match(struct arch_hw_breakpoint *a,
853 struct arch_hw_breakpoint *b)
854{
855 if (a->address != b->address)
856 return false;
857 if (a->type != b->type)
858 return false;
859 if (a->len != b->len)
860 return false;
861 return true;
862}
863
864#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
865
866static inline bool tm_enabled(struct task_struct *tsk)
867{
868 return tsk && tsk->thread.regs && (tsk->thread.regs->msr & MSR_TM);
869}
870
871static void tm_reclaim_thread(struct thread_struct *thr,
872 struct thread_info *ti, uint8_t cause)
873{
874 /*
875 * Use the current MSR TM suspended bit to track if we have
876 * checkpointed state outstanding.
877 * On signal delivery, we'd normally reclaim the checkpointed
878 * state to obtain stack pointer (see:get_tm_stackpointer()).
879 * This will then directly return to userspace without going
880 * through __switch_to(). However, if the stack frame is bad,
881 * we need to exit this thread which calls __switch_to() which
882 * will again attempt to reclaim the already saved tm state.
883 * Hence we need to check that we've not already reclaimed
884 * this state.
885 * We do this using the current MSR, rather tracking it in
886 * some specific thread_struct bit, as it has the additional
887 * benefit of checking for a potential TM bad thing exception.
888 */
889 if (!MSR_TM_SUSPENDED(mfmsr()))
890 return;
891
892 giveup_all(container_of(thr, struct task_struct, thread));
893
894 tm_reclaim(thr, cause);
895
896 /*
897 * If we are in a transaction and FP is off then we can't have
898 * used FP inside that transaction. Hence the checkpointed
899 * state is the same as the live state. We need to copy the
900 * live state to the checkpointed state so that when the
901 * transaction is restored, the checkpointed state is correct
902 * and the aborted transaction sees the correct state. We use
903 * ckpt_regs.msr here as that's what tm_reclaim will use to
904 * determine if it's going to write the checkpointed state or
905 * not. So either this will write the checkpointed registers,
906 * or reclaim will. Similarly for VMX.
907 */
908 if ((thr->ckpt_regs.msr & MSR_FP) == 0)
909 memcpy(&thr->ckfp_state, &thr->fp_state,
910 sizeof(struct thread_fp_state));
911 if ((thr->ckpt_regs.msr & MSR_VEC) == 0)
912 memcpy(&thr->ckvr_state, &thr->vr_state,
913 sizeof(struct thread_vr_state));
914}
915
916void tm_reclaim_current(uint8_t cause)
917{
918 tm_enable();
919 tm_reclaim_thread(¤t->thread, current_thread_info(), cause);
920}
921
922static inline void tm_reclaim_task(struct task_struct *tsk)
923{
924 /* We have to work out if we're switching from/to a task that's in the
925 * middle of a transaction.
926 *
927 * In switching we need to maintain a 2nd register state as
928 * oldtask->thread.ckpt_regs. We tm_reclaim(oldproc); this saves the
929 * checkpointed (tbegin) state in ckpt_regs, ckfp_state and
930 * ckvr_state
931 *
932 * We also context switch (save) TFHAR/TEXASR/TFIAR in here.
933 */
934 struct thread_struct *thr = &tsk->thread;
935
936 if (!thr->regs)
937 return;
938
939 if (!MSR_TM_ACTIVE(thr->regs->msr))
940 goto out_and_saveregs;
941
942 WARN_ON(tm_suspend_disabled);
943
944 TM_DEBUG("--- tm_reclaim on pid %d (NIP=%lx, "
945 "ccr=%lx, msr=%lx, trap=%lx)\n",
946 tsk->pid, thr->regs->nip,
947 thr->regs->ccr, thr->regs->msr,
948 thr->regs->trap);
949
950 tm_reclaim_thread(thr, task_thread_info(tsk), TM_CAUSE_RESCHED);
951
952 TM_DEBUG("--- tm_reclaim on pid %d complete\n",
953 tsk->pid);
954
955out_and_saveregs:
956 /* Always save the regs here, even if a transaction's not active.
957 * This context-switches a thread's TM info SPRs. We do it here to
958 * be consistent with the restore path (in recheckpoint) which
959 * cannot happen later in _switch().
960 */
961 tm_save_sprs(thr);
962}
963
964extern void __tm_recheckpoint(struct thread_struct *thread);
965
966void tm_recheckpoint(struct thread_struct *thread)
967{
968 unsigned long flags;
969
970 if (!(thread->regs->msr & MSR_TM))
971 return;
972
973 /* We really can't be interrupted here as the TEXASR registers can't
974 * change and later in the trecheckpoint code, we have a userspace R1.
975 * So let's hard disable over this region.
976 */
977 local_irq_save(flags);
978 hard_irq_disable();
979
980 /* The TM SPRs are restored here, so that TEXASR.FS can be set
981 * before the trecheckpoint and no explosion occurs.
982 */
983 tm_restore_sprs(thread);
984
985 __tm_recheckpoint(thread);
986
987 local_irq_restore(flags);
988}
989
990static inline void tm_recheckpoint_new_task(struct task_struct *new)
991{
992 if (!cpu_has_feature(CPU_FTR_TM))
993 return;
994
995 /* Recheckpoint the registers of the thread we're about to switch to.
996 *
997 * If the task was using FP, we non-lazily reload both the original and
998 * the speculative FP register states. This is because the kernel
999 * doesn't see if/when a TM rollback occurs, so if we take an FP
1000 * unavailable later, we are unable to determine which set of FP regs
1001 * need to be restored.
1002 */
1003 if (!tm_enabled(new))
1004 return;
1005
1006 if (!MSR_TM_ACTIVE(new->thread.regs->msr)){
1007 tm_restore_sprs(&new->thread);
1008 return;
1009 }
1010 /* Recheckpoint to restore original checkpointed register state. */
1011 TM_DEBUG("*** tm_recheckpoint of pid %d (new->msr 0x%lx)\n",
1012 new->pid, new->thread.regs->msr);
1013
1014 tm_recheckpoint(&new->thread);
1015
1016 /*
1017 * The checkpointed state has been restored but the live state has
1018 * not, ensure all the math functionality is turned off to trigger
1019 * restore_math() to reload.
1020 */
1021 new->thread.regs->msr &= ~(MSR_FP | MSR_VEC | MSR_VSX);
1022
1023 TM_DEBUG("*** tm_recheckpoint of pid %d complete "
1024 "(kernel msr 0x%lx)\n",
1025 new->pid, mfmsr());
1026}
1027
1028static inline void __switch_to_tm(struct task_struct *prev,
1029 struct task_struct *new)
1030{
1031 if (cpu_has_feature(CPU_FTR_TM)) {
1032 if (tm_enabled(prev) || tm_enabled(new))
1033 tm_enable();
1034
1035 if (tm_enabled(prev)) {
1036 prev->thread.load_tm++;
1037 tm_reclaim_task(prev);
1038 if (!MSR_TM_ACTIVE(prev->thread.regs->msr) && prev->thread.load_tm == 0)
1039 prev->thread.regs->msr &= ~MSR_TM;
1040 }
1041
1042 tm_recheckpoint_new_task(new);
1043 }
1044}
1045
1046/*
1047 * This is called if we are on the way out to userspace and the
1048 * TIF_RESTORE_TM flag is set. It checks if we need to reload
1049 * FP and/or vector state and does so if necessary.
1050 * If userspace is inside a transaction (whether active or
1051 * suspended) and FP/VMX/VSX instructions have ever been enabled
1052 * inside that transaction, then we have to keep them enabled
1053 * and keep the FP/VMX/VSX state loaded while ever the transaction
1054 * continues. The reason is that if we didn't, and subsequently
1055 * got a FP/VMX/VSX unavailable interrupt inside a transaction,
1056 * we don't know whether it's the same transaction, and thus we
1057 * don't know which of the checkpointed state and the transactional
1058 * state to use.
1059 */
1060void restore_tm_state(struct pt_regs *regs)
1061{
1062 unsigned long msr_diff;
1063
1064 /*
1065 * This is the only moment we should clear TIF_RESTORE_TM as
1066 * it is here that ckpt_regs.msr and pt_regs.msr become the same
1067 * again, anything else could lead to an incorrect ckpt_msr being
1068 * saved and therefore incorrect signal contexts.
1069 */
1070 clear_thread_flag(TIF_RESTORE_TM);
1071 if (!MSR_TM_ACTIVE(regs->msr))
1072 return;
1073
1074 msr_diff = current->thread.ckpt_regs.msr & ~regs->msr;
1075 msr_diff &= MSR_FP | MSR_VEC | MSR_VSX;
1076
1077 /* Ensure that restore_math() will restore */
1078 if (msr_diff & MSR_FP)
1079 current->thread.load_fp = 1;
1080#ifdef CONFIG_ALTIVEC
1081 if (cpu_has_feature(CPU_FTR_ALTIVEC) && msr_diff & MSR_VEC)
1082 current->thread.load_vec = 1;
1083#endif
1084 restore_math(regs);
1085
1086 regs->msr |= msr_diff;
1087}
1088
1089#else
1090#define tm_recheckpoint_new_task(new)
1091#define __switch_to_tm(prev, new)
1092#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
1093
1094static inline void save_sprs(struct thread_struct *t)
1095{
1096#ifdef CONFIG_ALTIVEC
1097 if (cpu_has_feature(CPU_FTR_ALTIVEC))
1098 t->vrsave = mfspr(SPRN_VRSAVE);
1099#endif
1100#ifdef CONFIG_PPC_BOOK3S_64
1101 if (cpu_has_feature(CPU_FTR_DSCR))
1102 t->dscr = mfspr(SPRN_DSCR);
1103
1104 if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
1105 t->bescr = mfspr(SPRN_BESCR);
1106 t->ebbhr = mfspr(SPRN_EBBHR);
1107 t->ebbrr = mfspr(SPRN_EBBRR);
1108
1109 t->fscr = mfspr(SPRN_FSCR);
1110
1111 /*
1112 * Note that the TAR is not available for use in the kernel.
1113 * (To provide this, the TAR should be backed up/restored on
1114 * exception entry/exit instead, and be in pt_regs. FIXME,
1115 * this should be in pt_regs anyway (for debug).)
1116 */
1117 t->tar = mfspr(SPRN_TAR);
1118 }
1119#endif
1120
1121 thread_pkey_regs_save(t);
1122}
1123
1124static inline void restore_sprs(struct thread_struct *old_thread,
1125 struct thread_struct *new_thread)
1126{
1127#ifdef CONFIG_ALTIVEC
1128 if (cpu_has_feature(CPU_FTR_ALTIVEC) &&
1129 old_thread->vrsave != new_thread->vrsave)
1130 mtspr(SPRN_VRSAVE, new_thread->vrsave);
1131#endif
1132#ifdef CONFIG_PPC_BOOK3S_64
1133 if (cpu_has_feature(CPU_FTR_DSCR)) {
1134 u64 dscr = get_paca()->dscr_default;
1135 if (new_thread->dscr_inherit)
1136 dscr = new_thread->dscr;
1137
1138 if (old_thread->dscr != dscr)
1139 mtspr(SPRN_DSCR, dscr);
1140 }
1141
1142 if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
1143 if (old_thread->bescr != new_thread->bescr)
1144 mtspr(SPRN_BESCR, new_thread->bescr);
1145 if (old_thread->ebbhr != new_thread->ebbhr)
1146 mtspr(SPRN_EBBHR, new_thread->ebbhr);
1147 if (old_thread->ebbrr != new_thread->ebbrr)
1148 mtspr(SPRN_EBBRR, new_thread->ebbrr);
1149
1150 if (old_thread->fscr != new_thread->fscr)
1151 mtspr(SPRN_FSCR, new_thread->fscr);
1152
1153 if (old_thread->tar != new_thread->tar)
1154 mtspr(SPRN_TAR, new_thread->tar);
1155 }
1156
1157 if (cpu_has_feature(CPU_FTR_ARCH_300) &&
1158 old_thread->tidr != new_thread->tidr)
1159 mtspr(SPRN_TIDR, new_thread->tidr);
1160#endif
1161
1162 thread_pkey_regs_restore(new_thread, old_thread);
1163}
1164
1165#ifdef CONFIG_PPC_BOOK3S_64
1166#define CP_SIZE 128
1167static const u8 dummy_copy_buffer[CP_SIZE] __attribute__((aligned(CP_SIZE)));
1168#endif
1169
1170struct task_struct *__switch_to(struct task_struct *prev,
1171 struct task_struct *new)
1172{
1173 struct thread_struct *new_thread, *old_thread;
1174 struct task_struct *last;
1175#ifdef CONFIG_PPC_BOOK3S_64
1176 struct ppc64_tlb_batch *batch;
1177#endif
1178
1179 new_thread = &new->thread;
1180 old_thread = ¤t->thread;
1181
1182 WARN_ON(!irqs_disabled());
1183
1184#ifdef CONFIG_PPC64
1185 /*
1186 * Collect processor utilization data per process
1187 */
1188 if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
1189 struct cpu_usage *cu = this_cpu_ptr(&cpu_usage_array);
1190 long unsigned start_tb, current_tb;
1191 start_tb = old_thread->start_tb;
1192 cu->current_tb = current_tb = mfspr(SPRN_PURR);
1193 old_thread->accum_tb += (current_tb - start_tb);
1194 new_thread->start_tb = current_tb;
1195 }
1196#endif /* CONFIG_PPC64 */
1197
1198#ifdef CONFIG_PPC_BOOK3S_64
1199 batch = this_cpu_ptr(&ppc64_tlb_batch);
1200 if (batch->active) {
1201 current_thread_info()->local_flags |= _TLF_LAZY_MMU;
1202 if (batch->index)
1203 __flush_tlb_pending(batch);
1204 batch->active = 0;
1205 }
1206#endif /* CONFIG_PPC_BOOK3S_64 */
1207
1208#ifdef CONFIG_PPC_ADV_DEBUG_REGS
1209 switch_booke_debug_regs(&new->thread.debug);
1210#else
1211/*
1212 * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would
1213 * schedule DABR
1214 */
1215#ifndef CONFIG_HAVE_HW_BREAKPOINT
1216 if (unlikely(!hw_brk_match(this_cpu_ptr(¤t_brk), &new->thread.hw_brk)))
1217 __set_breakpoint(&new->thread.hw_brk);
1218#endif /* CONFIG_HAVE_HW_BREAKPOINT */
1219#endif
1220
1221 /*
1222 * We need to save SPRs before treclaim/trecheckpoint as these will
1223 * change a number of them.
1224 */
1225 save_sprs(&prev->thread);
1226
1227 /* Save FPU, Altivec, VSX and SPE state */
1228 giveup_all(prev);
1229
1230 __switch_to_tm(prev, new);
1231
1232 if (!radix_enabled()) {
1233 /*
1234 * We can't take a PMU exception inside _switch() since there
1235 * is a window where the kernel stack SLB and the kernel stack
1236 * are out of sync. Hard disable here.
1237 */
1238 hard_irq_disable();
1239 }
1240
1241 /*
1242 * Call restore_sprs() before calling _switch(). If we move it after
1243 * _switch() then we miss out on calling it for new tasks. The reason
1244 * for this is we manually create a stack frame for new tasks that
1245 * directly returns through ret_from_fork() or
1246 * ret_from_kernel_thread(). See copy_thread() for details.
1247 */
1248 restore_sprs(old_thread, new_thread);
1249
1250 last = _switch(old_thread, new_thread);
1251
1252#ifdef CONFIG_PPC_BOOK3S_64
1253 if (current_thread_info()->local_flags & _TLF_LAZY_MMU) {
1254 current_thread_info()->local_flags &= ~_TLF_LAZY_MMU;
1255 batch = this_cpu_ptr(&ppc64_tlb_batch);
1256 batch->active = 1;
1257 }
1258
1259 if (current_thread_info()->task->thread.regs) {
1260 restore_math(current_thread_info()->task->thread.regs);
1261
1262 /*
1263 * The copy-paste buffer can only store into foreign real
1264 * addresses, so unprivileged processes can not see the
1265 * data or use it in any way unless they have foreign real
1266 * mappings. If the new process has the foreign real address
1267 * mappings, we must issue a cp_abort to clear any state and
1268 * prevent snooping, corruption or a covert channel.
1269 *
1270 * DD1 allows paste into normal system memory so we do an
1271 * unpaired copy, rather than cp_abort, to clear the buffer,
1272 * since cp_abort is quite expensive.
1273 */
1274 if (current_thread_info()->task->thread.used_vas) {
1275 asm volatile(PPC_CP_ABORT);
1276 } else if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
1277 asm volatile(PPC_COPY(%0, %1)
1278 : : "r"(dummy_copy_buffer), "r"(0));
1279 }
1280 }
1281#endif /* CONFIG_PPC_BOOK3S_64 */
1282
1283 return last;
1284}
1285
1286static int instructions_to_print = 16;
1287
1288static void show_instructions(struct pt_regs *regs)
1289{
1290 int i;
1291 unsigned long pc = regs->nip - (instructions_to_print * 3 / 4 *
1292 sizeof(int));
1293
1294 printk("Instruction dump:");
1295
1296 for (i = 0; i < instructions_to_print; i++) {
1297 int instr;
1298
1299 if (!(i % 8))
1300 pr_cont("\n");
1301
1302#if !defined(CONFIG_BOOKE)
1303 /* If executing with the IMMU off, adjust pc rather
1304 * than print XXXXXXXX.
1305 */
1306 if (!(regs->msr & MSR_IR))
1307 pc = (unsigned long)phys_to_virt(pc);
1308#endif
1309
1310 if (!__kernel_text_address(pc) ||
1311 probe_kernel_address((unsigned int __user *)pc, instr)) {
1312 pr_cont("XXXXXXXX ");
1313 } else {
1314 if (regs->nip == pc)
1315 pr_cont("<%08x> ", instr);
1316 else
1317 pr_cont("%08x ", instr);
1318 }
1319
1320 pc += sizeof(int);
1321 }
1322
1323 pr_cont("\n");
1324}
1325
1326struct regbit {
1327 unsigned long bit;
1328 const char *name;
1329};
1330
1331static struct regbit msr_bits[] = {
1332#if defined(CONFIG_PPC64) && !defined(CONFIG_BOOKE)
1333 {MSR_SF, "SF"},
1334 {MSR_HV, "HV"},
1335#endif
1336 {MSR_VEC, "VEC"},
1337 {MSR_VSX, "VSX"},
1338#ifdef CONFIG_BOOKE
1339 {MSR_CE, "CE"},
1340#endif
1341 {MSR_EE, "EE"},
1342 {MSR_PR, "PR"},
1343 {MSR_FP, "FP"},
1344 {MSR_ME, "ME"},
1345#ifdef CONFIG_BOOKE
1346 {MSR_DE, "DE"},
1347#else
1348 {MSR_SE, "SE"},
1349 {MSR_BE, "BE"},
1350#endif
1351 {MSR_IR, "IR"},
1352 {MSR_DR, "DR"},
1353 {MSR_PMM, "PMM"},
1354#ifndef CONFIG_BOOKE
1355 {MSR_RI, "RI"},
1356 {MSR_LE, "LE"},
1357#endif
1358 {0, NULL}
1359};
1360
1361static void print_bits(unsigned long val, struct regbit *bits, const char *sep)
1362{
1363 const char *s = "";
1364
1365 for (; bits->bit; ++bits)
1366 if (val & bits->bit) {
1367 pr_cont("%s%s", s, bits->name);
1368 s = sep;
1369 }
1370}
1371
1372#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1373static struct regbit msr_tm_bits[] = {
1374 {MSR_TS_T, "T"},
1375 {MSR_TS_S, "S"},
1376 {MSR_TM, "E"},
1377 {0, NULL}
1378};
1379
1380static void print_tm_bits(unsigned long val)
1381{
1382/*
1383 * This only prints something if at least one of the TM bit is set.
1384 * Inside the TM[], the output means:
1385 * E: Enabled (bit 32)
1386 * S: Suspended (bit 33)
1387 * T: Transactional (bit 34)
1388 */
1389 if (val & (MSR_TM | MSR_TS_S | MSR_TS_T)) {
1390 pr_cont(",TM[");
1391 print_bits(val, msr_tm_bits, "");
1392 pr_cont("]");
1393 }
1394}
1395#else
1396static void print_tm_bits(unsigned long val) {}
1397#endif
1398
1399static void print_msr_bits(unsigned long val)
1400{
1401 pr_cont("<");
1402 print_bits(val, msr_bits, ",");
1403 print_tm_bits(val);
1404 pr_cont(">");
1405}
1406
1407#ifdef CONFIG_PPC64
1408#define REG "%016lx"
1409#define REGS_PER_LINE 4
1410#define LAST_VOLATILE 13
1411#else
1412#define REG "%08lx"
1413#define REGS_PER_LINE 8
1414#define LAST_VOLATILE 12
1415#endif
1416
1417void show_regs(struct pt_regs * regs)
1418{
1419 int i, trap;
1420
1421 show_regs_print_info(KERN_DEFAULT);
1422
1423 printk("NIP: "REG" LR: "REG" CTR: "REG"\n",
1424 regs->nip, regs->link, regs->ctr);
1425 printk("REGS: %px TRAP: %04lx %s (%s)\n",
1426 regs, regs->trap, print_tainted(), init_utsname()->release);
1427 printk("MSR: "REG" ", regs->msr);
1428 print_msr_bits(regs->msr);
1429 pr_cont(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer);
1430 trap = TRAP(regs);
1431 if ((TRAP(regs) != 0xc00) && cpu_has_feature(CPU_FTR_CFAR))
1432 pr_cont("CFAR: "REG" ", regs->orig_gpr3);
1433 if (trap == 0x200 || trap == 0x300 || trap == 0x600)
1434#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
1435 pr_cont("DEAR: "REG" ESR: "REG" ", regs->dar, regs->dsisr);
1436#else
1437 pr_cont("DAR: "REG" DSISR: %08lx ", regs->dar, regs->dsisr);
1438#endif
1439#ifdef CONFIG_PPC64
1440 pr_cont("SOFTE: %ld ", regs->softe);
1441#endif
1442#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1443 if (MSR_TM_ACTIVE(regs->msr))
1444 pr_cont("\nPACATMSCRATCH: %016llx ", get_paca()->tm_scratch);
1445#endif
1446
1447 for (i = 0; i < 32; i++) {
1448 if ((i % REGS_PER_LINE) == 0)
1449 pr_cont("\nGPR%02d: ", i);
1450 pr_cont(REG " ", regs->gpr[i]);
1451 if (i == LAST_VOLATILE && !FULL_REGS(regs))
1452 break;
1453 }
1454 pr_cont("\n");
1455#ifdef CONFIG_KALLSYMS
1456 /*
1457 * Lookup NIP late so we have the best change of getting the
1458 * above info out without failing
1459 */
1460 printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
1461 printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
1462#endif
1463 show_stack(current, (unsigned long *) regs->gpr[1]);
1464 if (!user_mode(regs))
1465 show_instructions(regs);
1466}
1467
1468void flush_thread(void)
1469{
1470#ifdef CONFIG_HAVE_HW_BREAKPOINT
1471 flush_ptrace_hw_breakpoint(current);
1472#else /* CONFIG_HAVE_HW_BREAKPOINT */
1473 set_debug_reg_defaults(¤t->thread);
1474#endif /* CONFIG_HAVE_HW_BREAKPOINT */
1475}
1476
1477int set_thread_uses_vas(void)
1478{
1479#ifdef CONFIG_PPC_BOOK3S_64
1480 if (!cpu_has_feature(CPU_FTR_ARCH_300))
1481 return -EINVAL;
1482
1483 current->thread.used_vas = 1;
1484
1485 /*
1486 * Even a process that has no foreign real address mapping can use
1487 * an unpaired COPY instruction (to no real effect). Issue CP_ABORT
1488 * to clear any pending COPY and prevent a covert channel.
1489 *
1490 * __switch_to() will issue CP_ABORT on future context switches.
1491 */
1492 asm volatile(PPC_CP_ABORT);
1493
1494#endif /* CONFIG_PPC_BOOK3S_64 */
1495 return 0;
1496}
1497
1498#ifdef CONFIG_PPC64
1499static DEFINE_SPINLOCK(vas_thread_id_lock);
1500static DEFINE_IDA(vas_thread_ida);
1501
1502/*
1503 * We need to assign a unique thread id to each thread in a process.
1504 *
1505 * This thread id, referred to as TIDR, and separate from the Linux's tgid,
1506 * is intended to be used to direct an ASB_Notify from the hardware to the
1507 * thread, when a suitable event occurs in the system.
1508 *
1509 * One such event is a "paste" instruction in the context of Fast Thread
1510 * Wakeup (aka Core-to-core wake up in the Virtual Accelerator Switchboard
1511 * (VAS) in POWER9.
1512 *
1513 * To get a unique TIDR per process we could simply reuse task_pid_nr() but
1514 * the problem is that task_pid_nr() is not yet available copy_thread() is
1515 * called. Fixing that would require changing more intrusive arch-neutral
1516 * code in code path in copy_process()?.
1517 *
1518 * Further, to assign unique TIDRs within each process, we need an atomic
1519 * field (or an IDR) in task_struct, which again intrudes into the arch-
1520 * neutral code. So try to assign globally unique TIDRs for now.
1521 *
1522 * NOTE: TIDR 0 indicates that the thread does not need a TIDR value.
1523 * For now, only threads that expect to be notified by the VAS
1524 * hardware need a TIDR value and we assign values > 0 for those.
1525 */
1526#define MAX_THREAD_CONTEXT ((1 << 16) - 1)
1527static int assign_thread_tidr(void)
1528{
1529 int index;
1530 int err;
1531 unsigned long flags;
1532
1533again:
1534 if (!ida_pre_get(&vas_thread_ida, GFP_KERNEL))
1535 return -ENOMEM;
1536
1537 spin_lock_irqsave(&vas_thread_id_lock, flags);
1538 err = ida_get_new_above(&vas_thread_ida, 1, &index);
1539 spin_unlock_irqrestore(&vas_thread_id_lock, flags);
1540
1541 if (err == -EAGAIN)
1542 goto again;
1543 else if (err)
1544 return err;
1545
1546 if (index > MAX_THREAD_CONTEXT) {
1547 spin_lock_irqsave(&vas_thread_id_lock, flags);
1548 ida_remove(&vas_thread_ida, index);
1549 spin_unlock_irqrestore(&vas_thread_id_lock, flags);
1550 return -ENOMEM;
1551 }
1552
1553 return index;
1554}
1555
1556static void free_thread_tidr(int id)
1557{
1558 unsigned long flags;
1559
1560 spin_lock_irqsave(&vas_thread_id_lock, flags);
1561 ida_remove(&vas_thread_ida, id);
1562 spin_unlock_irqrestore(&vas_thread_id_lock, flags);
1563}
1564
1565/*
1566 * Clear any TIDR value assigned to this thread.
1567 */
1568void clear_thread_tidr(struct task_struct *t)
1569{
1570 if (!t->thread.tidr)
1571 return;
1572
1573 if (!cpu_has_feature(CPU_FTR_ARCH_300)) {
1574 WARN_ON_ONCE(1);
1575 return;
1576 }
1577
1578 mtspr(SPRN_TIDR, 0);
1579 free_thread_tidr(t->thread.tidr);
1580 t->thread.tidr = 0;
1581}
1582
1583void arch_release_task_struct(struct task_struct *t)
1584{
1585 clear_thread_tidr(t);
1586}
1587
1588/*
1589 * Assign a unique TIDR (thread id) for task @t and set it in the thread
1590 * structure. For now, we only support setting TIDR for 'current' task.
1591 */
1592int set_thread_tidr(struct task_struct *t)
1593{
1594 int rc;
1595
1596 if (!cpu_has_feature(CPU_FTR_ARCH_300))
1597 return -EINVAL;
1598
1599 if (t != current)
1600 return -EINVAL;
1601
1602 if (t->thread.tidr)
1603 return 0;
1604
1605 rc = assign_thread_tidr();
1606 if (rc < 0)
1607 return rc;
1608
1609 t->thread.tidr = rc;
1610 mtspr(SPRN_TIDR, t->thread.tidr);
1611
1612 return 0;
1613}
1614EXPORT_SYMBOL_GPL(set_thread_tidr);
1615
1616#endif /* CONFIG_PPC64 */
1617
1618void
1619release_thread(struct task_struct *t)
1620{
1621}
1622
1623/*
1624 * this gets called so that we can store coprocessor state into memory and
1625 * copy the current task into the new thread.
1626 */
1627int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
1628{
1629 flush_all_to_thread(src);
1630 /*
1631 * Flush TM state out so we can copy it. __switch_to_tm() does this
1632 * flush but it removes the checkpointed state from the current CPU and
1633 * transitions the CPU out of TM mode. Hence we need to call
1634 * tm_recheckpoint_new_task() (on the same task) to restore the
1635 * checkpointed state back and the TM mode.
1636 *
1637 * Can't pass dst because it isn't ready. Doesn't matter, passing
1638 * dst is only important for __switch_to()
1639 */
1640 __switch_to_tm(src, src);
1641
1642 *dst = *src;
1643
1644 clear_task_ebb(dst);
1645
1646 return 0;
1647}
1648
1649static void setup_ksp_vsid(struct task_struct *p, unsigned long sp)
1650{
1651#ifdef CONFIG_PPC_BOOK3S_64
1652 unsigned long sp_vsid;
1653 unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
1654
1655 if (radix_enabled())
1656 return;
1657
1658 if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
1659 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T)
1660 << SLB_VSID_SHIFT_1T;
1661 else
1662 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_256M)
1663 << SLB_VSID_SHIFT;
1664 sp_vsid |= SLB_VSID_KERNEL | llp;
1665 p->thread.ksp_vsid = sp_vsid;
1666#endif
1667}
1668
1669/*
1670 * Copy a thread..
1671 */
1672
1673/*
1674 * Copy architecture-specific thread state
1675 */
1676int copy_thread(unsigned long clone_flags, unsigned long usp,
1677 unsigned long kthread_arg, struct task_struct *p)
1678{
1679 struct pt_regs *childregs, *kregs;
1680 extern void ret_from_fork(void);
1681 extern void ret_from_kernel_thread(void);
1682 void (*f)(void);
1683 unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
1684 struct thread_info *ti = task_thread_info(p);
1685
1686 klp_init_thread_info(ti);
1687
1688 /* Copy registers */
1689 sp -= sizeof(struct pt_regs);
1690 childregs = (struct pt_regs *) sp;
1691 if (unlikely(p->flags & PF_KTHREAD)) {
1692 /* kernel thread */
1693 memset(childregs, 0, sizeof(struct pt_regs));
1694 childregs->gpr[1] = sp + sizeof(struct pt_regs);
1695 /* function */
1696 if (usp)
1697 childregs->gpr[14] = ppc_function_entry((void *)usp);
1698#ifdef CONFIG_PPC64
1699 clear_tsk_thread_flag(p, TIF_32BIT);
1700 childregs->softe = IRQS_ENABLED;
1701#endif
1702 childregs->gpr[15] = kthread_arg;
1703 p->thread.regs = NULL; /* no user register state */
1704 ti->flags |= _TIF_RESTOREALL;
1705 f = ret_from_kernel_thread;
1706 } else {
1707 /* user thread */
1708 struct pt_regs *regs = current_pt_regs();
1709 CHECK_FULL_REGS(regs);
1710 *childregs = *regs;
1711 if (usp)
1712 childregs->gpr[1] = usp;
1713 p->thread.regs = childregs;
1714 childregs->gpr[3] = 0; /* Result from fork() */
1715 if (clone_flags & CLONE_SETTLS) {
1716#ifdef CONFIG_PPC64
1717 if (!is_32bit_task())
1718 childregs->gpr[13] = childregs->gpr[6];
1719 else
1720#endif
1721 childregs->gpr[2] = childregs->gpr[6];
1722 }
1723
1724 f = ret_from_fork;
1725 }
1726 childregs->msr &= ~(MSR_FP|MSR_VEC|MSR_VSX);
1727 sp -= STACK_FRAME_OVERHEAD;
1728
1729 /*
1730 * The way this works is that at some point in the future
1731 * some task will call _switch to switch to the new task.
1732 * That will pop off the stack frame created below and start
1733 * the new task running at ret_from_fork. The new task will
1734 * do some house keeping and then return from the fork or clone
1735 * system call, using the stack frame created above.
1736 */
1737 ((unsigned long *)sp)[0] = 0;
1738 sp -= sizeof(struct pt_regs);
1739 kregs = (struct pt_regs *) sp;
1740 sp -= STACK_FRAME_OVERHEAD;
1741 p->thread.ksp = sp;
1742#ifdef CONFIG_PPC32
1743 p->thread.ksp_limit = (unsigned long)task_stack_page(p) +
1744 _ALIGN_UP(sizeof(struct thread_info), 16);
1745#endif
1746#ifdef CONFIG_HAVE_HW_BREAKPOINT
1747 p->thread.ptrace_bps[0] = NULL;
1748#endif
1749
1750 p->thread.fp_save_area = NULL;
1751#ifdef CONFIG_ALTIVEC
1752 p->thread.vr_save_area = NULL;
1753#endif
1754
1755 setup_ksp_vsid(p, sp);
1756
1757#ifdef CONFIG_PPC64
1758 if (cpu_has_feature(CPU_FTR_DSCR)) {
1759 p->thread.dscr_inherit = current->thread.dscr_inherit;
1760 p->thread.dscr = mfspr(SPRN_DSCR);
1761 }
1762 if (cpu_has_feature(CPU_FTR_HAS_PPR))
1763 p->thread.ppr = INIT_PPR;
1764
1765 p->thread.tidr = 0;
1766#endif
1767 kregs->nip = ppc_function_entry(f);
1768 return 0;
1769}
1770
1771/*
1772 * Set up a thread for executing a new program
1773 */
1774void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
1775{
1776#ifdef CONFIG_PPC64
1777 unsigned long load_addr = regs->gpr[2]; /* saved by ELF_PLAT_INIT */
1778#endif
1779
1780 /*
1781 * If we exec out of a kernel thread then thread.regs will not be
1782 * set. Do it now.
1783 */
1784 if (!current->thread.regs) {
1785 struct pt_regs *regs = task_stack_page(current) + THREAD_SIZE;
1786 current->thread.regs = regs - 1;
1787 }
1788
1789#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1790 /*
1791 * Clear any transactional state, we're exec()ing. The cause is
1792 * not important as there will never be a recheckpoint so it's not
1793 * user visible.
1794 */
1795 if (MSR_TM_SUSPENDED(mfmsr()))
1796 tm_reclaim_current(0);
1797#endif
1798
1799 memset(regs->gpr, 0, sizeof(regs->gpr));
1800 regs->ctr = 0;
1801 regs->link = 0;
1802 regs->xer = 0;
1803 regs->ccr = 0;
1804 regs->gpr[1] = sp;
1805
1806 /*
1807 * We have just cleared all the nonvolatile GPRs, so make
1808 * FULL_REGS(regs) return true. This is necessary to allow
1809 * ptrace to examine the thread immediately after exec.
1810 */
1811 regs->trap &= ~1UL;
1812
1813#ifdef CONFIG_PPC32
1814 regs->mq = 0;
1815 regs->nip = start;
1816 regs->msr = MSR_USER;
1817#else
1818 if (!is_32bit_task()) {
1819 unsigned long entry;
1820
1821 if (is_elf2_task()) {
1822 /* Look ma, no function descriptors! */
1823 entry = start;
1824
1825 /*
1826 * Ulrich says:
1827 * The latest iteration of the ABI requires that when
1828 * calling a function (at its global entry point),
1829 * the caller must ensure r12 holds the entry point
1830 * address (so that the function can quickly
1831 * establish addressability).
1832 */
1833 regs->gpr[12] = start;
1834 /* Make sure that's restored on entry to userspace. */
1835 set_thread_flag(TIF_RESTOREALL);
1836 } else {
1837 unsigned long toc;
1838
1839 /* start is a relocated pointer to the function
1840 * descriptor for the elf _start routine. The first
1841 * entry in the function descriptor is the entry
1842 * address of _start and the second entry is the TOC
1843 * value we need to use.
1844 */
1845 __get_user(entry, (unsigned long __user *)start);
1846 __get_user(toc, (unsigned long __user *)start+1);
1847
1848 /* Check whether the e_entry function descriptor entries
1849 * need to be relocated before we can use them.
1850 */
1851 if (load_addr != 0) {
1852 entry += load_addr;
1853 toc += load_addr;
1854 }
1855 regs->gpr[2] = toc;
1856 }
1857 regs->nip = entry;
1858 regs->msr = MSR_USER64;
1859 } else {
1860 regs->nip = start;
1861 regs->gpr[2] = 0;
1862 regs->msr = MSR_USER32;
1863 }
1864#endif
1865#ifdef CONFIG_VSX
1866 current->thread.used_vsr = 0;
1867#endif
1868 current->thread.load_fp = 0;
1869 memset(¤t->thread.fp_state, 0, sizeof(current->thread.fp_state));
1870 current->thread.fp_save_area = NULL;
1871#ifdef CONFIG_ALTIVEC
1872 memset(¤t->thread.vr_state, 0, sizeof(current->thread.vr_state));
1873 current->thread.vr_state.vscr.u[3] = 0x00010000; /* Java mode disabled */
1874 current->thread.vr_save_area = NULL;
1875 current->thread.vrsave = 0;
1876 current->thread.used_vr = 0;
1877 current->thread.load_vec = 0;
1878#endif /* CONFIG_ALTIVEC */
1879#ifdef CONFIG_SPE
1880 memset(current->thread.evr, 0, sizeof(current->thread.evr));
1881 current->thread.acc = 0;
1882 current->thread.spefscr = 0;
1883 current->thread.used_spe = 0;
1884#endif /* CONFIG_SPE */
1885#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1886 current->thread.tm_tfhar = 0;
1887 current->thread.tm_texasr = 0;
1888 current->thread.tm_tfiar = 0;
1889 current->thread.load_tm = 0;
1890#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
1891
1892 thread_pkey_regs_init(¤t->thread);
1893}
1894EXPORT_SYMBOL(start_thread);
1895
1896#define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
1897 | PR_FP_EXC_RES | PR_FP_EXC_INV)
1898
1899int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
1900{
1901 struct pt_regs *regs = tsk->thread.regs;
1902
1903 /* This is a bit hairy. If we are an SPE enabled processor
1904 * (have embedded fp) we store the IEEE exception enable flags in
1905 * fpexc_mode. fpexc_mode is also used for setting FP exception
1906 * mode (asyn, precise, disabled) for 'Classic' FP. */
1907 if (val & PR_FP_EXC_SW_ENABLE) {
1908#ifdef CONFIG_SPE
1909 if (cpu_has_feature(CPU_FTR_SPE)) {
1910 /*
1911 * When the sticky exception bits are set
1912 * directly by userspace, it must call prctl
1913 * with PR_GET_FPEXC (with PR_FP_EXC_SW_ENABLE
1914 * in the existing prctl settings) or
1915 * PR_SET_FPEXC (with PR_FP_EXC_SW_ENABLE in
1916 * the bits being set). <fenv.h> functions
1917 * saving and restoring the whole
1918 * floating-point environment need to do so
1919 * anyway to restore the prctl settings from
1920 * the saved environment.
1921 */
1922 tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR);
1923 tsk->thread.fpexc_mode = val &
1924 (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT);
1925 return 0;
1926 } else {
1927 return -EINVAL;
1928 }
1929#else
1930 return -EINVAL;
1931#endif
1932 }
1933
1934 /* on a CONFIG_SPE this does not hurt us. The bits that
1935 * __pack_fe01 use do not overlap with bits used for
1936 * PR_FP_EXC_SW_ENABLE. Additionally, the MSR[FE0,FE1] bits
1937 * on CONFIG_SPE implementations are reserved so writing to
1938 * them does not change anything */
1939 if (val > PR_FP_EXC_PRECISE)
1940 return -EINVAL;
1941 tsk->thread.fpexc_mode = __pack_fe01(val);
1942 if (regs != NULL && (regs->msr & MSR_FP) != 0)
1943 regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1))
1944 | tsk->thread.fpexc_mode;
1945 return 0;
1946}
1947
1948int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
1949{
1950 unsigned int val;
1951
1952 if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE)
1953#ifdef CONFIG_SPE
1954 if (cpu_has_feature(CPU_FTR_SPE)) {
1955 /*
1956 * When the sticky exception bits are set
1957 * directly by userspace, it must call prctl
1958 * with PR_GET_FPEXC (with PR_FP_EXC_SW_ENABLE
1959 * in the existing prctl settings) or
1960 * PR_SET_FPEXC (with PR_FP_EXC_SW_ENABLE in
1961 * the bits being set). <fenv.h> functions
1962 * saving and restoring the whole
1963 * floating-point environment need to do so
1964 * anyway to restore the prctl settings from
1965 * the saved environment.
1966 */
1967 tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR);
1968 val = tsk->thread.fpexc_mode;
1969 } else
1970 return -EINVAL;
1971#else
1972 return -EINVAL;
1973#endif
1974 else
1975 val = __unpack_fe01(tsk->thread.fpexc_mode);
1976 return put_user(val, (unsigned int __user *) adr);
1977}
1978
1979int set_endian(struct task_struct *tsk, unsigned int val)
1980{
1981 struct pt_regs *regs = tsk->thread.regs;
1982
1983 if ((val == PR_ENDIAN_LITTLE && !cpu_has_feature(CPU_FTR_REAL_LE)) ||
1984 (val == PR_ENDIAN_PPC_LITTLE && !cpu_has_feature(CPU_FTR_PPC_LE)))
1985 return -EINVAL;
1986
1987 if (regs == NULL)
1988 return -EINVAL;
1989
1990 if (val == PR_ENDIAN_BIG)
1991 regs->msr &= ~MSR_LE;
1992 else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE)
1993 regs->msr |= MSR_LE;
1994 else
1995 return -EINVAL;
1996
1997 return 0;
1998}
1999
2000int get_endian(struct task_struct *tsk, unsigned long adr)
2001{
2002 struct pt_regs *regs = tsk->thread.regs;
2003 unsigned int val;
2004
2005 if (!cpu_has_feature(CPU_FTR_PPC_LE) &&
2006 !cpu_has_feature(CPU_FTR_REAL_LE))
2007 return -EINVAL;
2008
2009 if (regs == NULL)
2010 return -EINVAL;
2011
2012 if (regs->msr & MSR_LE) {
2013 if (cpu_has_feature(CPU_FTR_REAL_LE))
2014 val = PR_ENDIAN_LITTLE;
2015 else
2016 val = PR_ENDIAN_PPC_LITTLE;
2017 } else
2018 val = PR_ENDIAN_BIG;
2019
2020 return put_user(val, (unsigned int __user *)adr);
2021}
2022
2023int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
2024{
2025 tsk->thread.align_ctl = val;
2026 return 0;
2027}
2028
2029int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
2030{
2031 return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr);
2032}
2033
2034static inline int valid_irq_stack(unsigned long sp, struct task_struct *p,
2035 unsigned long nbytes)
2036{
2037 unsigned long stack_page;
2038 unsigned long cpu = task_cpu(p);
2039
2040 /*
2041 * Avoid crashing if the stack has overflowed and corrupted
2042 * task_cpu(p), which is in the thread_info struct.
2043 */
2044 if (cpu < NR_CPUS && cpu_possible(cpu)) {
2045 stack_page = (unsigned long) hardirq_ctx[cpu];
2046 if (sp >= stack_page + sizeof(struct thread_struct)
2047 && sp <= stack_page + THREAD_SIZE - nbytes)
2048 return 1;
2049
2050 stack_page = (unsigned long) softirq_ctx[cpu];
2051 if (sp >= stack_page + sizeof(struct thread_struct)
2052 && sp <= stack_page + THREAD_SIZE - nbytes)
2053 return 1;
2054 }
2055 return 0;
2056}
2057
2058int validate_sp(unsigned long sp, struct task_struct *p,
2059 unsigned long nbytes)
2060{
2061 unsigned long stack_page = (unsigned long)task_stack_page(p);
2062
2063 if (sp >= stack_page + sizeof(struct thread_struct)
2064 && sp <= stack_page + THREAD_SIZE - nbytes)
2065 return 1;
2066
2067 return valid_irq_stack(sp, p, nbytes);
2068}
2069
2070EXPORT_SYMBOL(validate_sp);
2071
2072unsigned long get_wchan(struct task_struct *p)
2073{
2074 unsigned long ip, sp;
2075 int count = 0;
2076
2077 if (!p || p == current || p->state == TASK_RUNNING)
2078 return 0;
2079
2080 sp = p->thread.ksp;
2081 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
2082 return 0;
2083
2084 do {
2085 sp = *(unsigned long *)sp;
2086 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD) ||
2087 p->state == TASK_RUNNING)
2088 return 0;
2089 if (count > 0) {
2090 ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE];
2091 if (!in_sched_functions(ip))
2092 return ip;
2093 }
2094 } while (count++ < 16);
2095 return 0;
2096}
2097
2098static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
2099
2100void show_stack(struct task_struct *tsk, unsigned long *stack)
2101{
2102 unsigned long sp, ip, lr, newsp;
2103 int count = 0;
2104 int firstframe = 1;
2105#ifdef CONFIG_FUNCTION_GRAPH_TRACER
2106 int curr_frame = current->curr_ret_stack;
2107 extern void return_to_handler(void);
2108 unsigned long rth = (unsigned long)return_to_handler;
2109#endif
2110
2111 sp = (unsigned long) stack;
2112 if (tsk == NULL)
2113 tsk = current;
2114 if (sp == 0) {
2115 if (tsk == current)
2116 sp = current_stack_pointer();
2117 else
2118 sp = tsk->thread.ksp;
2119 }
2120
2121 lr = 0;
2122 printk("Call Trace:\n");
2123 do {
2124 if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD))
2125 return;
2126
2127 stack = (unsigned long *) sp;
2128 newsp = stack[0];
2129 ip = stack[STACK_FRAME_LR_SAVE];
2130 if (!firstframe || ip != lr) {
2131 printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
2132#ifdef CONFIG_FUNCTION_GRAPH_TRACER
2133 if ((ip == rth) && curr_frame >= 0) {
2134 pr_cont(" (%pS)",
2135 (void *)current->ret_stack[curr_frame].ret);
2136 curr_frame--;
2137 }
2138#endif
2139 if (firstframe)
2140 pr_cont(" (unreliable)");
2141 pr_cont("\n");
2142 }
2143 firstframe = 0;
2144
2145 /*
2146 * See if this is an exception frame.
2147 * We look for the "regshere" marker in the current frame.
2148 */
2149 if (validate_sp(sp, tsk, STACK_INT_FRAME_SIZE)
2150 && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
2151 struct pt_regs *regs = (struct pt_regs *)
2152 (sp + STACK_FRAME_OVERHEAD);
2153 lr = regs->link;
2154 printk("--- interrupt: %lx at %pS\n LR = %pS\n",
2155 regs->trap, (void *)regs->nip, (void *)lr);
2156 firstframe = 1;
2157 }
2158
2159 sp = newsp;
2160 } while (count++ < kstack_depth_to_print);
2161}
2162
2163#ifdef CONFIG_PPC64
2164/* Called with hard IRQs off */
2165void notrace __ppc64_runlatch_on(void)
2166{
2167 struct thread_info *ti = current_thread_info();
2168
2169 if (cpu_has_feature(CPU_FTR_ARCH_206)) {
2170 /*
2171 * Least significant bit (RUN) is the only writable bit of
2172 * the CTRL register, so we can avoid mfspr. 2.06 is not the
2173 * earliest ISA where this is the case, but it's convenient.
2174 */
2175 mtspr(SPRN_CTRLT, CTRL_RUNLATCH);
2176 } else {
2177 unsigned long ctrl;
2178
2179 /*
2180 * Some architectures (e.g., Cell) have writable fields other
2181 * than RUN, so do the read-modify-write.
2182 */
2183 ctrl = mfspr(SPRN_CTRLF);
2184 ctrl |= CTRL_RUNLATCH;
2185 mtspr(SPRN_CTRLT, ctrl);
2186 }
2187
2188 ti->local_flags |= _TLF_RUNLATCH;
2189}
2190
2191/* Called with hard IRQs off */
2192void notrace __ppc64_runlatch_off(void)
2193{
2194 struct thread_info *ti = current_thread_info();
2195
2196 ti->local_flags &= ~_TLF_RUNLATCH;
2197
2198 if (cpu_has_feature(CPU_FTR_ARCH_206)) {
2199 mtspr(SPRN_CTRLT, 0);
2200 } else {
2201 unsigned long ctrl;
2202
2203 ctrl = mfspr(SPRN_CTRLF);
2204 ctrl &= ~CTRL_RUNLATCH;
2205 mtspr(SPRN_CTRLT, ctrl);
2206 }
2207}
2208#endif /* CONFIG_PPC64 */
2209
2210unsigned long arch_align_stack(unsigned long sp)
2211{
2212 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2213 sp -= get_random_int() & ~PAGE_MASK;
2214 return sp & ~0xf;
2215}
2216
2217static inline unsigned long brk_rnd(void)
2218{
2219 unsigned long rnd = 0;
2220
2221 /* 8MB for 32bit, 1GB for 64bit */
2222 if (is_32bit_task())
2223 rnd = (get_random_long() % (1UL<<(23-PAGE_SHIFT)));
2224 else
2225 rnd = (get_random_long() % (1UL<<(30-PAGE_SHIFT)));
2226
2227 return rnd << PAGE_SHIFT;
2228}
2229
2230unsigned long arch_randomize_brk(struct mm_struct *mm)
2231{
2232 unsigned long base = mm->brk;
2233 unsigned long ret;
2234
2235#ifdef CONFIG_PPC_BOOK3S_64
2236 /*
2237 * If we are using 1TB segments and we are allowed to randomise
2238 * the heap, we can put it above 1TB so it is backed by a 1TB
2239 * segment. Otherwise the heap will be in the bottom 1TB
2240 * which always uses 256MB segments and this may result in a
2241 * performance penalty. We don't need to worry about radix. For
2242 * radix, mmu_highuser_ssize remains unchanged from 256MB.
2243 */
2244 if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2245 base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2246#endif
2247
2248 ret = PAGE_ALIGN(base + brk_rnd());
2249
2250 if (ret < mm->brk)
2251 return mm->brk;
2252
2253 return ret;
2254}
2255
1/*
2 * Derived from "arch/i386/kernel/process.c"
3 * Copyright (C) 1995 Linus Torvalds
4 *
5 * Updated and modified by Cort Dougan (cort@cs.nmt.edu) and
6 * Paul Mackerras (paulus@cs.anu.edu.au)
7 *
8 * PowerPC version
9 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
15 */
16
17#include <linux/errno.h>
18#include <linux/sched.h>
19#include <linux/kernel.h>
20#include <linux/mm.h>
21#include <linux/smp.h>
22#include <linux/stddef.h>
23#include <linux/unistd.h>
24#include <linux/ptrace.h>
25#include <linux/slab.h>
26#include <linux/user.h>
27#include <linux/elf.h>
28#include <linux/prctl.h>
29#include <linux/init_task.h>
30#include <linux/export.h>
31#include <linux/kallsyms.h>
32#include <linux/mqueue.h>
33#include <linux/hardirq.h>
34#include <linux/utsname.h>
35#include <linux/ftrace.h>
36#include <linux/kernel_stat.h>
37#include <linux/personality.h>
38#include <linux/random.h>
39#include <linux/hw_breakpoint.h>
40#include <linux/uaccess.h>
41
42#include <asm/pgtable.h>
43#include <asm/io.h>
44#include <asm/processor.h>
45#include <asm/mmu.h>
46#include <asm/prom.h>
47#include <asm/machdep.h>
48#include <asm/time.h>
49#include <asm/runlatch.h>
50#include <asm/syscalls.h>
51#include <asm/switch_to.h>
52#include <asm/tm.h>
53#include <asm/debug.h>
54#ifdef CONFIG_PPC64
55#include <asm/firmware.h>
56#endif
57#include <asm/code-patching.h>
58#include <linux/kprobes.h>
59#include <linux/kdebug.h>
60
61/* Transactional Memory debug */
62#ifdef TM_DEBUG_SW
63#define TM_DEBUG(x...) printk(KERN_INFO x)
64#else
65#define TM_DEBUG(x...) do { } while(0)
66#endif
67
68extern unsigned long _get_SP(void);
69
70#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
71static void check_if_tm_restore_required(struct task_struct *tsk)
72{
73 /*
74 * If we are saving the current thread's registers, and the
75 * thread is in a transactional state, set the TIF_RESTORE_TM
76 * bit so that we know to restore the registers before
77 * returning to userspace.
78 */
79 if (tsk == current && tsk->thread.regs &&
80 MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
81 !test_thread_flag(TIF_RESTORE_TM)) {
82 tsk->thread.ckpt_regs.msr = tsk->thread.regs->msr;
83 set_thread_flag(TIF_RESTORE_TM);
84 }
85}
86#else
87static inline void check_if_tm_restore_required(struct task_struct *tsk) { }
88#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
89
90bool strict_msr_control;
91EXPORT_SYMBOL(strict_msr_control);
92
93static int __init enable_strict_msr_control(char *str)
94{
95 strict_msr_control = true;
96 pr_info("Enabling strict facility control\n");
97
98 return 0;
99}
100early_param("ppc_strict_facility_enable", enable_strict_msr_control);
101
102void msr_check_and_set(unsigned long bits)
103{
104 unsigned long oldmsr = mfmsr();
105 unsigned long newmsr;
106
107 newmsr = oldmsr | bits;
108
109#ifdef CONFIG_VSX
110 if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP))
111 newmsr |= MSR_VSX;
112#endif
113
114 if (oldmsr != newmsr)
115 mtmsr_isync(newmsr);
116}
117
118void __msr_check_and_clear(unsigned long bits)
119{
120 unsigned long oldmsr = mfmsr();
121 unsigned long newmsr;
122
123 newmsr = oldmsr & ~bits;
124
125#ifdef CONFIG_VSX
126 if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP))
127 newmsr &= ~MSR_VSX;
128#endif
129
130 if (oldmsr != newmsr)
131 mtmsr_isync(newmsr);
132}
133EXPORT_SYMBOL(__msr_check_and_clear);
134
135#ifdef CONFIG_PPC_FPU
136void __giveup_fpu(struct task_struct *tsk)
137{
138 save_fpu(tsk);
139 tsk->thread.regs->msr &= ~MSR_FP;
140#ifdef CONFIG_VSX
141 if (cpu_has_feature(CPU_FTR_VSX))
142 tsk->thread.regs->msr &= ~MSR_VSX;
143#endif
144}
145
146void giveup_fpu(struct task_struct *tsk)
147{
148 check_if_tm_restore_required(tsk);
149
150 msr_check_and_set(MSR_FP);
151 __giveup_fpu(tsk);
152 msr_check_and_clear(MSR_FP);
153}
154EXPORT_SYMBOL(giveup_fpu);
155
156/*
157 * Make sure the floating-point register state in the
158 * the thread_struct is up to date for task tsk.
159 */
160void flush_fp_to_thread(struct task_struct *tsk)
161{
162 if (tsk->thread.regs) {
163 /*
164 * We need to disable preemption here because if we didn't,
165 * another process could get scheduled after the regs->msr
166 * test but before we have finished saving the FP registers
167 * to the thread_struct. That process could take over the
168 * FPU, and then when we get scheduled again we would store
169 * bogus values for the remaining FP registers.
170 */
171 preempt_disable();
172 if (tsk->thread.regs->msr & MSR_FP) {
173 /*
174 * This should only ever be called for current or
175 * for a stopped child process. Since we save away
176 * the FP register state on context switch,
177 * there is something wrong if a stopped child appears
178 * to still have its FP state in the CPU registers.
179 */
180 BUG_ON(tsk != current);
181 giveup_fpu(tsk);
182 }
183 preempt_enable();
184 }
185}
186EXPORT_SYMBOL_GPL(flush_fp_to_thread);
187
188void enable_kernel_fp(void)
189{
190 WARN_ON(preemptible());
191
192 msr_check_and_set(MSR_FP);
193
194 if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) {
195 check_if_tm_restore_required(current);
196 __giveup_fpu(current);
197 }
198}
199EXPORT_SYMBOL(enable_kernel_fp);
200
201static int restore_fp(struct task_struct *tsk) {
202 if (tsk->thread.load_fp) {
203 load_fp_state(¤t->thread.fp_state);
204 current->thread.load_fp++;
205 return 1;
206 }
207 return 0;
208}
209#else
210static int restore_fp(struct task_struct *tsk) { return 0; }
211#endif /* CONFIG_PPC_FPU */
212
213#ifdef CONFIG_ALTIVEC
214#define loadvec(thr) ((thr).load_vec)
215
216static void __giveup_altivec(struct task_struct *tsk)
217{
218 save_altivec(tsk);
219 tsk->thread.regs->msr &= ~MSR_VEC;
220#ifdef CONFIG_VSX
221 if (cpu_has_feature(CPU_FTR_VSX))
222 tsk->thread.regs->msr &= ~MSR_VSX;
223#endif
224}
225
226void giveup_altivec(struct task_struct *tsk)
227{
228 check_if_tm_restore_required(tsk);
229
230 msr_check_and_set(MSR_VEC);
231 __giveup_altivec(tsk);
232 msr_check_and_clear(MSR_VEC);
233}
234EXPORT_SYMBOL(giveup_altivec);
235
236void enable_kernel_altivec(void)
237{
238 WARN_ON(preemptible());
239
240 msr_check_and_set(MSR_VEC);
241
242 if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) {
243 check_if_tm_restore_required(current);
244 __giveup_altivec(current);
245 }
246}
247EXPORT_SYMBOL(enable_kernel_altivec);
248
249/*
250 * Make sure the VMX/Altivec register state in the
251 * the thread_struct is up to date for task tsk.
252 */
253void flush_altivec_to_thread(struct task_struct *tsk)
254{
255 if (tsk->thread.regs) {
256 preempt_disable();
257 if (tsk->thread.regs->msr & MSR_VEC) {
258 BUG_ON(tsk != current);
259 giveup_altivec(tsk);
260 }
261 preempt_enable();
262 }
263}
264EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
265
266static int restore_altivec(struct task_struct *tsk)
267{
268 if (cpu_has_feature(CPU_FTR_ALTIVEC) && tsk->thread.load_vec) {
269 load_vr_state(&tsk->thread.vr_state);
270 tsk->thread.used_vr = 1;
271 tsk->thread.load_vec++;
272
273 return 1;
274 }
275 return 0;
276}
277#else
278#define loadvec(thr) 0
279static inline int restore_altivec(struct task_struct *tsk) { return 0; }
280#endif /* CONFIG_ALTIVEC */
281
282#ifdef CONFIG_VSX
283static void __giveup_vsx(struct task_struct *tsk)
284{
285 if (tsk->thread.regs->msr & MSR_FP)
286 __giveup_fpu(tsk);
287 if (tsk->thread.regs->msr & MSR_VEC)
288 __giveup_altivec(tsk);
289 tsk->thread.regs->msr &= ~MSR_VSX;
290}
291
292static void giveup_vsx(struct task_struct *tsk)
293{
294 check_if_tm_restore_required(tsk);
295
296 msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
297 __giveup_vsx(tsk);
298 msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX);
299}
300
301static void save_vsx(struct task_struct *tsk)
302{
303 if (tsk->thread.regs->msr & MSR_FP)
304 save_fpu(tsk);
305 if (tsk->thread.regs->msr & MSR_VEC)
306 save_altivec(tsk);
307}
308
309void enable_kernel_vsx(void)
310{
311 WARN_ON(preemptible());
312
313 msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
314
315 if (current->thread.regs && (current->thread.regs->msr & MSR_VSX)) {
316 check_if_tm_restore_required(current);
317 if (current->thread.regs->msr & MSR_FP)
318 __giveup_fpu(current);
319 if (current->thread.regs->msr & MSR_VEC)
320 __giveup_altivec(current);
321 __giveup_vsx(current);
322 }
323}
324EXPORT_SYMBOL(enable_kernel_vsx);
325
326void flush_vsx_to_thread(struct task_struct *tsk)
327{
328 if (tsk->thread.regs) {
329 preempt_disable();
330 if (tsk->thread.regs->msr & MSR_VSX) {
331 BUG_ON(tsk != current);
332 giveup_vsx(tsk);
333 }
334 preempt_enable();
335 }
336}
337EXPORT_SYMBOL_GPL(flush_vsx_to_thread);
338
339static int restore_vsx(struct task_struct *tsk)
340{
341 if (cpu_has_feature(CPU_FTR_VSX)) {
342 tsk->thread.used_vsr = 1;
343 return 1;
344 }
345
346 return 0;
347}
348#else
349static inline int restore_vsx(struct task_struct *tsk) { return 0; }
350static inline void save_vsx(struct task_struct *tsk) { }
351#endif /* CONFIG_VSX */
352
353#ifdef CONFIG_SPE
354void giveup_spe(struct task_struct *tsk)
355{
356 check_if_tm_restore_required(tsk);
357
358 msr_check_and_set(MSR_SPE);
359 __giveup_spe(tsk);
360 msr_check_and_clear(MSR_SPE);
361}
362EXPORT_SYMBOL(giveup_spe);
363
364void enable_kernel_spe(void)
365{
366 WARN_ON(preemptible());
367
368 msr_check_and_set(MSR_SPE);
369
370 if (current->thread.regs && (current->thread.regs->msr & MSR_SPE)) {
371 check_if_tm_restore_required(current);
372 __giveup_spe(current);
373 }
374}
375EXPORT_SYMBOL(enable_kernel_spe);
376
377void flush_spe_to_thread(struct task_struct *tsk)
378{
379 if (tsk->thread.regs) {
380 preempt_disable();
381 if (tsk->thread.regs->msr & MSR_SPE) {
382 BUG_ON(tsk != current);
383 tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
384 giveup_spe(tsk);
385 }
386 preempt_enable();
387 }
388}
389#endif /* CONFIG_SPE */
390
391static unsigned long msr_all_available;
392
393static int __init init_msr_all_available(void)
394{
395#ifdef CONFIG_PPC_FPU
396 msr_all_available |= MSR_FP;
397#endif
398#ifdef CONFIG_ALTIVEC
399 if (cpu_has_feature(CPU_FTR_ALTIVEC))
400 msr_all_available |= MSR_VEC;
401#endif
402#ifdef CONFIG_VSX
403 if (cpu_has_feature(CPU_FTR_VSX))
404 msr_all_available |= MSR_VSX;
405#endif
406#ifdef CONFIG_SPE
407 if (cpu_has_feature(CPU_FTR_SPE))
408 msr_all_available |= MSR_SPE;
409#endif
410
411 return 0;
412}
413early_initcall(init_msr_all_available);
414
415void giveup_all(struct task_struct *tsk)
416{
417 unsigned long usermsr;
418
419 if (!tsk->thread.regs)
420 return;
421
422 usermsr = tsk->thread.regs->msr;
423
424 if ((usermsr & msr_all_available) == 0)
425 return;
426
427 msr_check_and_set(msr_all_available);
428
429#ifdef CONFIG_PPC_FPU
430 if (usermsr & MSR_FP)
431 __giveup_fpu(tsk);
432#endif
433#ifdef CONFIG_ALTIVEC
434 if (usermsr & MSR_VEC)
435 __giveup_altivec(tsk);
436#endif
437#ifdef CONFIG_VSX
438 if (usermsr & MSR_VSX)
439 __giveup_vsx(tsk);
440#endif
441#ifdef CONFIG_SPE
442 if (usermsr & MSR_SPE)
443 __giveup_spe(tsk);
444#endif
445
446 msr_check_and_clear(msr_all_available);
447}
448EXPORT_SYMBOL(giveup_all);
449
450void restore_math(struct pt_regs *regs)
451{
452 unsigned long msr;
453
454 if (!current->thread.load_fp && !loadvec(current->thread))
455 return;
456
457 msr = regs->msr;
458 msr_check_and_set(msr_all_available);
459
460 /*
461 * Only reload if the bit is not set in the user MSR, the bit BEING set
462 * indicates that the registers are hot
463 */
464 if ((!(msr & MSR_FP)) && restore_fp(current))
465 msr |= MSR_FP | current->thread.fpexc_mode;
466
467 if ((!(msr & MSR_VEC)) && restore_altivec(current))
468 msr |= MSR_VEC;
469
470 if ((msr & (MSR_FP | MSR_VEC)) == (MSR_FP | MSR_VEC) &&
471 restore_vsx(current)) {
472 msr |= MSR_VSX;
473 }
474
475 msr_check_and_clear(msr_all_available);
476
477 regs->msr = msr;
478}
479
480void save_all(struct task_struct *tsk)
481{
482 unsigned long usermsr;
483
484 if (!tsk->thread.regs)
485 return;
486
487 usermsr = tsk->thread.regs->msr;
488
489 if ((usermsr & msr_all_available) == 0)
490 return;
491
492 msr_check_and_set(msr_all_available);
493
494 /*
495 * Saving the way the register space is in hardware, save_vsx boils
496 * down to a save_fpu() and save_altivec()
497 */
498 if (usermsr & MSR_VSX) {
499 save_vsx(tsk);
500 } else {
501 if (usermsr & MSR_FP)
502 save_fpu(tsk);
503
504 if (usermsr & MSR_VEC)
505 save_altivec(tsk);
506 }
507
508 if (usermsr & MSR_SPE)
509 __giveup_spe(tsk);
510
511 msr_check_and_clear(msr_all_available);
512}
513
514void flush_all_to_thread(struct task_struct *tsk)
515{
516 if (tsk->thread.regs) {
517 preempt_disable();
518 BUG_ON(tsk != current);
519 save_all(tsk);
520
521#ifdef CONFIG_SPE
522 if (tsk->thread.regs->msr & MSR_SPE)
523 tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
524#endif
525
526 preempt_enable();
527 }
528}
529EXPORT_SYMBOL(flush_all_to_thread);
530
531#ifdef CONFIG_PPC_ADV_DEBUG_REGS
532void do_send_trap(struct pt_regs *regs, unsigned long address,
533 unsigned long error_code, int signal_code, int breakpt)
534{
535 siginfo_t info;
536
537 current->thread.trap_nr = signal_code;
538 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
539 11, SIGSEGV) == NOTIFY_STOP)
540 return;
541
542 /* Deliver the signal to userspace */
543 info.si_signo = SIGTRAP;
544 info.si_errno = breakpt; /* breakpoint or watchpoint id */
545 info.si_code = signal_code;
546 info.si_addr = (void __user *)address;
547 force_sig_info(SIGTRAP, &info, current);
548}
549#else /* !CONFIG_PPC_ADV_DEBUG_REGS */
550void do_break (struct pt_regs *regs, unsigned long address,
551 unsigned long error_code)
552{
553 siginfo_t info;
554
555 current->thread.trap_nr = TRAP_HWBKPT;
556 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
557 11, SIGSEGV) == NOTIFY_STOP)
558 return;
559
560 if (debugger_break_match(regs))
561 return;
562
563 /* Clear the breakpoint */
564 hw_breakpoint_disable();
565
566 /* Deliver the signal to userspace */
567 info.si_signo = SIGTRAP;
568 info.si_errno = 0;
569 info.si_code = TRAP_HWBKPT;
570 info.si_addr = (void __user *)address;
571 force_sig_info(SIGTRAP, &info, current);
572}
573#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
574
575static DEFINE_PER_CPU(struct arch_hw_breakpoint, current_brk);
576
577#ifdef CONFIG_PPC_ADV_DEBUG_REGS
578/*
579 * Set the debug registers back to their default "safe" values.
580 */
581static void set_debug_reg_defaults(struct thread_struct *thread)
582{
583 thread->debug.iac1 = thread->debug.iac2 = 0;
584#if CONFIG_PPC_ADV_DEBUG_IACS > 2
585 thread->debug.iac3 = thread->debug.iac4 = 0;
586#endif
587 thread->debug.dac1 = thread->debug.dac2 = 0;
588#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
589 thread->debug.dvc1 = thread->debug.dvc2 = 0;
590#endif
591 thread->debug.dbcr0 = 0;
592#ifdef CONFIG_BOOKE
593 /*
594 * Force User/Supervisor bits to b11 (user-only MSR[PR]=1)
595 */
596 thread->debug.dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US |
597 DBCR1_IAC3US | DBCR1_IAC4US;
598 /*
599 * Force Data Address Compare User/Supervisor bits to be User-only
600 * (0b11 MSR[PR]=1) and set all other bits in DBCR2 register to be 0.
601 */
602 thread->debug.dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
603#else
604 thread->debug.dbcr1 = 0;
605#endif
606}
607
608static void prime_debug_regs(struct debug_reg *debug)
609{
610 /*
611 * We could have inherited MSR_DE from userspace, since
612 * it doesn't get cleared on exception entry. Make sure
613 * MSR_DE is clear before we enable any debug events.
614 */
615 mtmsr(mfmsr() & ~MSR_DE);
616
617 mtspr(SPRN_IAC1, debug->iac1);
618 mtspr(SPRN_IAC2, debug->iac2);
619#if CONFIG_PPC_ADV_DEBUG_IACS > 2
620 mtspr(SPRN_IAC3, debug->iac3);
621 mtspr(SPRN_IAC4, debug->iac4);
622#endif
623 mtspr(SPRN_DAC1, debug->dac1);
624 mtspr(SPRN_DAC2, debug->dac2);
625#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
626 mtspr(SPRN_DVC1, debug->dvc1);
627 mtspr(SPRN_DVC2, debug->dvc2);
628#endif
629 mtspr(SPRN_DBCR0, debug->dbcr0);
630 mtspr(SPRN_DBCR1, debug->dbcr1);
631#ifdef CONFIG_BOOKE
632 mtspr(SPRN_DBCR2, debug->dbcr2);
633#endif
634}
635/*
636 * Unless neither the old or new thread are making use of the
637 * debug registers, set the debug registers from the values
638 * stored in the new thread.
639 */
640void switch_booke_debug_regs(struct debug_reg *new_debug)
641{
642 if ((current->thread.debug.dbcr0 & DBCR0_IDM)
643 || (new_debug->dbcr0 & DBCR0_IDM))
644 prime_debug_regs(new_debug);
645}
646EXPORT_SYMBOL_GPL(switch_booke_debug_regs);
647#else /* !CONFIG_PPC_ADV_DEBUG_REGS */
648#ifndef CONFIG_HAVE_HW_BREAKPOINT
649static void set_debug_reg_defaults(struct thread_struct *thread)
650{
651 thread->hw_brk.address = 0;
652 thread->hw_brk.type = 0;
653 set_breakpoint(&thread->hw_brk);
654}
655#endif /* !CONFIG_HAVE_HW_BREAKPOINT */
656#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
657
658#ifdef CONFIG_PPC_ADV_DEBUG_REGS
659static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
660{
661 mtspr(SPRN_DAC1, dabr);
662#ifdef CONFIG_PPC_47x
663 isync();
664#endif
665 return 0;
666}
667#elif defined(CONFIG_PPC_BOOK3S)
668static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
669{
670 mtspr(SPRN_DABR, dabr);
671 if (cpu_has_feature(CPU_FTR_DABRX))
672 mtspr(SPRN_DABRX, dabrx);
673 return 0;
674}
675#else
676static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
677{
678 return -EINVAL;
679}
680#endif
681
682static inline int set_dabr(struct arch_hw_breakpoint *brk)
683{
684 unsigned long dabr, dabrx;
685
686 dabr = brk->address | (brk->type & HW_BRK_TYPE_DABR);
687 dabrx = ((brk->type >> 3) & 0x7);
688
689 if (ppc_md.set_dabr)
690 return ppc_md.set_dabr(dabr, dabrx);
691
692 return __set_dabr(dabr, dabrx);
693}
694
695static inline int set_dawr(struct arch_hw_breakpoint *brk)
696{
697 unsigned long dawr, dawrx, mrd;
698
699 dawr = brk->address;
700
701 dawrx = (brk->type & (HW_BRK_TYPE_READ | HW_BRK_TYPE_WRITE)) \
702 << (63 - 58); //* read/write bits */
703 dawrx |= ((brk->type & (HW_BRK_TYPE_TRANSLATE)) >> 2) \
704 << (63 - 59); //* translate */
705 dawrx |= (brk->type & (HW_BRK_TYPE_PRIV_ALL)) \
706 >> 3; //* PRIM bits */
707 /* dawr length is stored in field MDR bits 48:53. Matches range in
708 doublewords (64 bits) baised by -1 eg. 0b000000=1DW and
709 0b111111=64DW.
710 brk->len is in bytes.
711 This aligns up to double word size, shifts and does the bias.
712 */
713 mrd = ((brk->len + 7) >> 3) - 1;
714 dawrx |= (mrd & 0x3f) << (63 - 53);
715
716 if (ppc_md.set_dawr)
717 return ppc_md.set_dawr(dawr, dawrx);
718 mtspr(SPRN_DAWR, dawr);
719 mtspr(SPRN_DAWRX, dawrx);
720 return 0;
721}
722
723void __set_breakpoint(struct arch_hw_breakpoint *brk)
724{
725 memcpy(this_cpu_ptr(¤t_brk), brk, sizeof(*brk));
726
727 if (cpu_has_feature(CPU_FTR_DAWR))
728 set_dawr(brk);
729 else
730 set_dabr(brk);
731}
732
733void set_breakpoint(struct arch_hw_breakpoint *brk)
734{
735 preempt_disable();
736 __set_breakpoint(brk);
737 preempt_enable();
738}
739
740#ifdef CONFIG_PPC64
741DEFINE_PER_CPU(struct cpu_usage, cpu_usage_array);
742#endif
743
744static inline bool hw_brk_match(struct arch_hw_breakpoint *a,
745 struct arch_hw_breakpoint *b)
746{
747 if (a->address != b->address)
748 return false;
749 if (a->type != b->type)
750 return false;
751 if (a->len != b->len)
752 return false;
753 return true;
754}
755
756#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
757static void tm_reclaim_thread(struct thread_struct *thr,
758 struct thread_info *ti, uint8_t cause)
759{
760 unsigned long msr_diff = 0;
761
762 /*
763 * If FP/VSX registers have been already saved to the
764 * thread_struct, move them to the transact_fp array.
765 * We clear the TIF_RESTORE_TM bit since after the reclaim
766 * the thread will no longer be transactional.
767 */
768 if (test_ti_thread_flag(ti, TIF_RESTORE_TM)) {
769 msr_diff = thr->ckpt_regs.msr & ~thr->regs->msr;
770 if (msr_diff & MSR_FP)
771 memcpy(&thr->transact_fp, &thr->fp_state,
772 sizeof(struct thread_fp_state));
773 if (msr_diff & MSR_VEC)
774 memcpy(&thr->transact_vr, &thr->vr_state,
775 sizeof(struct thread_vr_state));
776 clear_ti_thread_flag(ti, TIF_RESTORE_TM);
777 msr_diff &= MSR_FP | MSR_VEC | MSR_VSX | MSR_FE0 | MSR_FE1;
778 }
779
780 /*
781 * Use the current MSR TM suspended bit to track if we have
782 * checkpointed state outstanding.
783 * On signal delivery, we'd normally reclaim the checkpointed
784 * state to obtain stack pointer (see:get_tm_stackpointer()).
785 * This will then directly return to userspace without going
786 * through __switch_to(). However, if the stack frame is bad,
787 * we need to exit this thread which calls __switch_to() which
788 * will again attempt to reclaim the already saved tm state.
789 * Hence we need to check that we've not already reclaimed
790 * this state.
791 * We do this using the current MSR, rather tracking it in
792 * some specific thread_struct bit, as it has the additional
793 * benifit of checking for a potential TM bad thing exception.
794 */
795 if (!MSR_TM_SUSPENDED(mfmsr()))
796 return;
797
798 tm_reclaim(thr, thr->regs->msr, cause);
799
800 /* Having done the reclaim, we now have the checkpointed
801 * FP/VSX values in the registers. These might be valid
802 * even if we have previously called enable_kernel_fp() or
803 * flush_fp_to_thread(), so update thr->regs->msr to
804 * indicate their current validity.
805 */
806 thr->regs->msr |= msr_diff;
807}
808
809void tm_reclaim_current(uint8_t cause)
810{
811 tm_enable();
812 tm_reclaim_thread(¤t->thread, current_thread_info(), cause);
813}
814
815static inline void tm_reclaim_task(struct task_struct *tsk)
816{
817 /* We have to work out if we're switching from/to a task that's in the
818 * middle of a transaction.
819 *
820 * In switching we need to maintain a 2nd register state as
821 * oldtask->thread.ckpt_regs. We tm_reclaim(oldproc); this saves the
822 * checkpointed (tbegin) state in ckpt_regs and saves the transactional
823 * (current) FPRs into oldtask->thread.transact_fpr[].
824 *
825 * We also context switch (save) TFHAR/TEXASR/TFIAR in here.
826 */
827 struct thread_struct *thr = &tsk->thread;
828
829 if (!thr->regs)
830 return;
831
832 if (!MSR_TM_ACTIVE(thr->regs->msr))
833 goto out_and_saveregs;
834
835 /* Stash the original thread MSR, as giveup_fpu et al will
836 * modify it. We hold onto it to see whether the task used
837 * FP & vector regs. If the TIF_RESTORE_TM flag is set,
838 * ckpt_regs.msr is already set.
839 */
840 if (!test_ti_thread_flag(task_thread_info(tsk), TIF_RESTORE_TM))
841 thr->ckpt_regs.msr = thr->regs->msr;
842
843 TM_DEBUG("--- tm_reclaim on pid %d (NIP=%lx, "
844 "ccr=%lx, msr=%lx, trap=%lx)\n",
845 tsk->pid, thr->regs->nip,
846 thr->regs->ccr, thr->regs->msr,
847 thr->regs->trap);
848
849 tm_reclaim_thread(thr, task_thread_info(tsk), TM_CAUSE_RESCHED);
850
851 TM_DEBUG("--- tm_reclaim on pid %d complete\n",
852 tsk->pid);
853
854out_and_saveregs:
855 /* Always save the regs here, even if a transaction's not active.
856 * This context-switches a thread's TM info SPRs. We do it here to
857 * be consistent with the restore path (in recheckpoint) which
858 * cannot happen later in _switch().
859 */
860 tm_save_sprs(thr);
861}
862
863extern void __tm_recheckpoint(struct thread_struct *thread,
864 unsigned long orig_msr);
865
866void tm_recheckpoint(struct thread_struct *thread,
867 unsigned long orig_msr)
868{
869 unsigned long flags;
870
871 /* We really can't be interrupted here as the TEXASR registers can't
872 * change and later in the trecheckpoint code, we have a userspace R1.
873 * So let's hard disable over this region.
874 */
875 local_irq_save(flags);
876 hard_irq_disable();
877
878 /* The TM SPRs are restored here, so that TEXASR.FS can be set
879 * before the trecheckpoint and no explosion occurs.
880 */
881 tm_restore_sprs(thread);
882
883 __tm_recheckpoint(thread, orig_msr);
884
885 local_irq_restore(flags);
886}
887
888static inline void tm_recheckpoint_new_task(struct task_struct *new)
889{
890 unsigned long msr;
891
892 if (!cpu_has_feature(CPU_FTR_TM))
893 return;
894
895 /* Recheckpoint the registers of the thread we're about to switch to.
896 *
897 * If the task was using FP, we non-lazily reload both the original and
898 * the speculative FP register states. This is because the kernel
899 * doesn't see if/when a TM rollback occurs, so if we take an FP
900 * unavoidable later, we are unable to determine which set of FP regs
901 * need to be restored.
902 */
903 if (!new->thread.regs)
904 return;
905
906 if (!MSR_TM_ACTIVE(new->thread.regs->msr)){
907 tm_restore_sprs(&new->thread);
908 return;
909 }
910 msr = new->thread.ckpt_regs.msr;
911 /* Recheckpoint to restore original checkpointed register state. */
912 TM_DEBUG("*** tm_recheckpoint of pid %d "
913 "(new->msr 0x%lx, new->origmsr 0x%lx)\n",
914 new->pid, new->thread.regs->msr, msr);
915
916 /* This loads the checkpointed FP/VEC state, if used */
917 tm_recheckpoint(&new->thread, msr);
918
919 /* This loads the speculative FP/VEC state, if used */
920 if (msr & MSR_FP) {
921 do_load_up_transact_fpu(&new->thread);
922 new->thread.regs->msr |=
923 (MSR_FP | new->thread.fpexc_mode);
924 }
925#ifdef CONFIG_ALTIVEC
926 if (msr & MSR_VEC) {
927 do_load_up_transact_altivec(&new->thread);
928 new->thread.regs->msr |= MSR_VEC;
929 }
930#endif
931 /* We may as well turn on VSX too since all the state is restored now */
932 if (msr & MSR_VSX)
933 new->thread.regs->msr |= MSR_VSX;
934
935 TM_DEBUG("*** tm_recheckpoint of pid %d complete "
936 "(kernel msr 0x%lx)\n",
937 new->pid, mfmsr());
938}
939
940static inline void __switch_to_tm(struct task_struct *prev)
941{
942 if (cpu_has_feature(CPU_FTR_TM)) {
943 tm_enable();
944 tm_reclaim_task(prev);
945 }
946}
947
948/*
949 * This is called if we are on the way out to userspace and the
950 * TIF_RESTORE_TM flag is set. It checks if we need to reload
951 * FP and/or vector state and does so if necessary.
952 * If userspace is inside a transaction (whether active or
953 * suspended) and FP/VMX/VSX instructions have ever been enabled
954 * inside that transaction, then we have to keep them enabled
955 * and keep the FP/VMX/VSX state loaded while ever the transaction
956 * continues. The reason is that if we didn't, and subsequently
957 * got a FP/VMX/VSX unavailable interrupt inside a transaction,
958 * we don't know whether it's the same transaction, and thus we
959 * don't know which of the checkpointed state and the transactional
960 * state to use.
961 */
962void restore_tm_state(struct pt_regs *regs)
963{
964 unsigned long msr_diff;
965
966 clear_thread_flag(TIF_RESTORE_TM);
967 if (!MSR_TM_ACTIVE(regs->msr))
968 return;
969
970 msr_diff = current->thread.ckpt_regs.msr & ~regs->msr;
971 msr_diff &= MSR_FP | MSR_VEC | MSR_VSX;
972
973 restore_math(regs);
974
975 regs->msr |= msr_diff;
976}
977
978#else
979#define tm_recheckpoint_new_task(new)
980#define __switch_to_tm(prev)
981#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
982
983static inline void save_sprs(struct thread_struct *t)
984{
985#ifdef CONFIG_ALTIVEC
986 if (cpu_has_feature(CPU_FTR_ALTIVEC))
987 t->vrsave = mfspr(SPRN_VRSAVE);
988#endif
989#ifdef CONFIG_PPC_BOOK3S_64
990 if (cpu_has_feature(CPU_FTR_DSCR))
991 t->dscr = mfspr(SPRN_DSCR);
992
993 if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
994 t->bescr = mfspr(SPRN_BESCR);
995 t->ebbhr = mfspr(SPRN_EBBHR);
996 t->ebbrr = mfspr(SPRN_EBBRR);
997
998 t->fscr = mfspr(SPRN_FSCR);
999
1000 /*
1001 * Note that the TAR is not available for use in the kernel.
1002 * (To provide this, the TAR should be backed up/restored on
1003 * exception entry/exit instead, and be in pt_regs. FIXME,
1004 * this should be in pt_regs anyway (for debug).)
1005 */
1006 t->tar = mfspr(SPRN_TAR);
1007 }
1008#endif
1009}
1010
1011static inline void restore_sprs(struct thread_struct *old_thread,
1012 struct thread_struct *new_thread)
1013{
1014#ifdef CONFIG_ALTIVEC
1015 if (cpu_has_feature(CPU_FTR_ALTIVEC) &&
1016 old_thread->vrsave != new_thread->vrsave)
1017 mtspr(SPRN_VRSAVE, new_thread->vrsave);
1018#endif
1019#ifdef CONFIG_PPC_BOOK3S_64
1020 if (cpu_has_feature(CPU_FTR_DSCR)) {
1021 u64 dscr = get_paca()->dscr_default;
1022 u64 fscr = old_thread->fscr & ~FSCR_DSCR;
1023
1024 if (new_thread->dscr_inherit) {
1025 dscr = new_thread->dscr;
1026 fscr |= FSCR_DSCR;
1027 }
1028
1029 if (old_thread->dscr != dscr)
1030 mtspr(SPRN_DSCR, dscr);
1031
1032 if (old_thread->fscr != fscr)
1033 mtspr(SPRN_FSCR, fscr);
1034 }
1035
1036 if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
1037 if (old_thread->bescr != new_thread->bescr)
1038 mtspr(SPRN_BESCR, new_thread->bescr);
1039 if (old_thread->ebbhr != new_thread->ebbhr)
1040 mtspr(SPRN_EBBHR, new_thread->ebbhr);
1041 if (old_thread->ebbrr != new_thread->ebbrr)
1042 mtspr(SPRN_EBBRR, new_thread->ebbrr);
1043
1044 if (old_thread->tar != new_thread->tar)
1045 mtspr(SPRN_TAR, new_thread->tar);
1046 }
1047#endif
1048}
1049
1050struct task_struct *__switch_to(struct task_struct *prev,
1051 struct task_struct *new)
1052{
1053 struct thread_struct *new_thread, *old_thread;
1054 struct task_struct *last;
1055#ifdef CONFIG_PPC_BOOK3S_64
1056 struct ppc64_tlb_batch *batch;
1057#endif
1058
1059 new_thread = &new->thread;
1060 old_thread = ¤t->thread;
1061
1062 WARN_ON(!irqs_disabled());
1063
1064#ifdef CONFIG_PPC64
1065 /*
1066 * Collect processor utilization data per process
1067 */
1068 if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
1069 struct cpu_usage *cu = this_cpu_ptr(&cpu_usage_array);
1070 long unsigned start_tb, current_tb;
1071 start_tb = old_thread->start_tb;
1072 cu->current_tb = current_tb = mfspr(SPRN_PURR);
1073 old_thread->accum_tb += (current_tb - start_tb);
1074 new_thread->start_tb = current_tb;
1075 }
1076#endif /* CONFIG_PPC64 */
1077
1078#ifdef CONFIG_PPC_BOOK3S_64
1079 batch = this_cpu_ptr(&ppc64_tlb_batch);
1080 if (batch->active) {
1081 current_thread_info()->local_flags |= _TLF_LAZY_MMU;
1082 if (batch->index)
1083 __flush_tlb_pending(batch);
1084 batch->active = 0;
1085 }
1086#endif /* CONFIG_PPC_BOOK3S_64 */
1087
1088#ifdef CONFIG_PPC_ADV_DEBUG_REGS
1089 switch_booke_debug_regs(&new->thread.debug);
1090#else
1091/*
1092 * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would
1093 * schedule DABR
1094 */
1095#ifndef CONFIG_HAVE_HW_BREAKPOINT
1096 if (unlikely(!hw_brk_match(this_cpu_ptr(¤t_brk), &new->thread.hw_brk)))
1097 __set_breakpoint(&new->thread.hw_brk);
1098#endif /* CONFIG_HAVE_HW_BREAKPOINT */
1099#endif
1100
1101 /*
1102 * We need to save SPRs before treclaim/trecheckpoint as these will
1103 * change a number of them.
1104 */
1105 save_sprs(&prev->thread);
1106
1107 __switch_to_tm(prev);
1108
1109 /* Save FPU, Altivec, VSX and SPE state */
1110 giveup_all(prev);
1111
1112 /*
1113 * We can't take a PMU exception inside _switch() since there is a
1114 * window where the kernel stack SLB and the kernel stack are out
1115 * of sync. Hard disable here.
1116 */
1117 hard_irq_disable();
1118
1119 tm_recheckpoint_new_task(new);
1120
1121 /*
1122 * Call restore_sprs() before calling _switch(). If we move it after
1123 * _switch() then we miss out on calling it for new tasks. The reason
1124 * for this is we manually create a stack frame for new tasks that
1125 * directly returns through ret_from_fork() or
1126 * ret_from_kernel_thread(). See copy_thread() for details.
1127 */
1128 restore_sprs(old_thread, new_thread);
1129
1130 last = _switch(old_thread, new_thread);
1131
1132#ifdef CONFIG_PPC_BOOK3S_64
1133 if (current_thread_info()->local_flags & _TLF_LAZY_MMU) {
1134 current_thread_info()->local_flags &= ~_TLF_LAZY_MMU;
1135 batch = this_cpu_ptr(&ppc64_tlb_batch);
1136 batch->active = 1;
1137 }
1138
1139 if (current_thread_info()->task->thread.regs)
1140 restore_math(current_thread_info()->task->thread.regs);
1141
1142#endif /* CONFIG_PPC_BOOK3S_64 */
1143
1144 return last;
1145}
1146
1147static int instructions_to_print = 16;
1148
1149static void show_instructions(struct pt_regs *regs)
1150{
1151 int i;
1152 unsigned long pc = regs->nip - (instructions_to_print * 3 / 4 *
1153 sizeof(int));
1154
1155 printk("Instruction dump:");
1156
1157 for (i = 0; i < instructions_to_print; i++) {
1158 int instr;
1159
1160 if (!(i % 8))
1161 printk("\n");
1162
1163#if !defined(CONFIG_BOOKE)
1164 /* If executing with the IMMU off, adjust pc rather
1165 * than print XXXXXXXX.
1166 */
1167 if (!(regs->msr & MSR_IR))
1168 pc = (unsigned long)phys_to_virt(pc);
1169#endif
1170
1171 if (!__kernel_text_address(pc) ||
1172 probe_kernel_address((unsigned int __user *)pc, instr)) {
1173 printk(KERN_CONT "XXXXXXXX ");
1174 } else {
1175 if (regs->nip == pc)
1176 printk(KERN_CONT "<%08x> ", instr);
1177 else
1178 printk(KERN_CONT "%08x ", instr);
1179 }
1180
1181 pc += sizeof(int);
1182 }
1183
1184 printk("\n");
1185}
1186
1187struct regbit {
1188 unsigned long bit;
1189 const char *name;
1190};
1191
1192static struct regbit msr_bits[] = {
1193#if defined(CONFIG_PPC64) && !defined(CONFIG_BOOKE)
1194 {MSR_SF, "SF"},
1195 {MSR_HV, "HV"},
1196#endif
1197 {MSR_VEC, "VEC"},
1198 {MSR_VSX, "VSX"},
1199#ifdef CONFIG_BOOKE
1200 {MSR_CE, "CE"},
1201#endif
1202 {MSR_EE, "EE"},
1203 {MSR_PR, "PR"},
1204 {MSR_FP, "FP"},
1205 {MSR_ME, "ME"},
1206#ifdef CONFIG_BOOKE
1207 {MSR_DE, "DE"},
1208#else
1209 {MSR_SE, "SE"},
1210 {MSR_BE, "BE"},
1211#endif
1212 {MSR_IR, "IR"},
1213 {MSR_DR, "DR"},
1214 {MSR_PMM, "PMM"},
1215#ifndef CONFIG_BOOKE
1216 {MSR_RI, "RI"},
1217 {MSR_LE, "LE"},
1218#endif
1219 {0, NULL}
1220};
1221
1222static void print_bits(unsigned long val, struct regbit *bits, const char *sep)
1223{
1224 const char *s = "";
1225
1226 for (; bits->bit; ++bits)
1227 if (val & bits->bit) {
1228 printk("%s%s", s, bits->name);
1229 s = sep;
1230 }
1231}
1232
1233#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1234static struct regbit msr_tm_bits[] = {
1235 {MSR_TS_T, "T"},
1236 {MSR_TS_S, "S"},
1237 {MSR_TM, "E"},
1238 {0, NULL}
1239};
1240
1241static void print_tm_bits(unsigned long val)
1242{
1243/*
1244 * This only prints something if at least one of the TM bit is set.
1245 * Inside the TM[], the output means:
1246 * E: Enabled (bit 32)
1247 * S: Suspended (bit 33)
1248 * T: Transactional (bit 34)
1249 */
1250 if (val & (MSR_TM | MSR_TS_S | MSR_TS_T)) {
1251 printk(",TM[");
1252 print_bits(val, msr_tm_bits, "");
1253 printk("]");
1254 }
1255}
1256#else
1257static void print_tm_bits(unsigned long val) {}
1258#endif
1259
1260static void print_msr_bits(unsigned long val)
1261{
1262 printk("<");
1263 print_bits(val, msr_bits, ",");
1264 print_tm_bits(val);
1265 printk(">");
1266}
1267
1268#ifdef CONFIG_PPC64
1269#define REG "%016lx"
1270#define REGS_PER_LINE 4
1271#define LAST_VOLATILE 13
1272#else
1273#define REG "%08lx"
1274#define REGS_PER_LINE 8
1275#define LAST_VOLATILE 12
1276#endif
1277
1278void show_regs(struct pt_regs * regs)
1279{
1280 int i, trap;
1281
1282 show_regs_print_info(KERN_DEFAULT);
1283
1284 printk("NIP: "REG" LR: "REG" CTR: "REG"\n",
1285 regs->nip, regs->link, regs->ctr);
1286 printk("REGS: %p TRAP: %04lx %s (%s)\n",
1287 regs, regs->trap, print_tainted(), init_utsname()->release);
1288 printk("MSR: "REG" ", regs->msr);
1289 print_msr_bits(regs->msr);
1290 printk(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer);
1291 trap = TRAP(regs);
1292 if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR))
1293 printk("CFAR: "REG" ", regs->orig_gpr3);
1294 if (trap == 0x200 || trap == 0x300 || trap == 0x600)
1295#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
1296 printk("DEAR: "REG" ESR: "REG" ", regs->dar, regs->dsisr);
1297#else
1298 printk("DAR: "REG" DSISR: %08lx ", regs->dar, regs->dsisr);
1299#endif
1300#ifdef CONFIG_PPC64
1301 printk("SOFTE: %ld ", regs->softe);
1302#endif
1303#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1304 if (MSR_TM_ACTIVE(regs->msr))
1305 printk("\nPACATMSCRATCH: %016llx ", get_paca()->tm_scratch);
1306#endif
1307
1308 for (i = 0; i < 32; i++) {
1309 if ((i % REGS_PER_LINE) == 0)
1310 printk("\nGPR%02d: ", i);
1311 printk(REG " ", regs->gpr[i]);
1312 if (i == LAST_VOLATILE && !FULL_REGS(regs))
1313 break;
1314 }
1315 printk("\n");
1316#ifdef CONFIG_KALLSYMS
1317 /*
1318 * Lookup NIP late so we have the best change of getting the
1319 * above info out without failing
1320 */
1321 printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
1322 printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
1323#endif
1324 show_stack(current, (unsigned long *) regs->gpr[1]);
1325 if (!user_mode(regs))
1326 show_instructions(regs);
1327}
1328
1329void exit_thread(void)
1330{
1331}
1332
1333void flush_thread(void)
1334{
1335#ifdef CONFIG_HAVE_HW_BREAKPOINT
1336 flush_ptrace_hw_breakpoint(current);
1337#else /* CONFIG_HAVE_HW_BREAKPOINT */
1338 set_debug_reg_defaults(¤t->thread);
1339#endif /* CONFIG_HAVE_HW_BREAKPOINT */
1340}
1341
1342void
1343release_thread(struct task_struct *t)
1344{
1345}
1346
1347/*
1348 * this gets called so that we can store coprocessor state into memory and
1349 * copy the current task into the new thread.
1350 */
1351int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
1352{
1353 flush_all_to_thread(src);
1354 /*
1355 * Flush TM state out so we can copy it. __switch_to_tm() does this
1356 * flush but it removes the checkpointed state from the current CPU and
1357 * transitions the CPU out of TM mode. Hence we need to call
1358 * tm_recheckpoint_new_task() (on the same task) to restore the
1359 * checkpointed state back and the TM mode.
1360 */
1361 __switch_to_tm(src);
1362 tm_recheckpoint_new_task(src);
1363
1364 *dst = *src;
1365
1366 clear_task_ebb(dst);
1367
1368 return 0;
1369}
1370
1371static void setup_ksp_vsid(struct task_struct *p, unsigned long sp)
1372{
1373#ifdef CONFIG_PPC_STD_MMU_64
1374 unsigned long sp_vsid;
1375 unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
1376
1377 if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
1378 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T)
1379 << SLB_VSID_SHIFT_1T;
1380 else
1381 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_256M)
1382 << SLB_VSID_SHIFT;
1383 sp_vsid |= SLB_VSID_KERNEL | llp;
1384 p->thread.ksp_vsid = sp_vsid;
1385#endif
1386}
1387
1388/*
1389 * Copy a thread..
1390 */
1391
1392/*
1393 * Copy architecture-specific thread state
1394 */
1395int copy_thread(unsigned long clone_flags, unsigned long usp,
1396 unsigned long kthread_arg, struct task_struct *p)
1397{
1398 struct pt_regs *childregs, *kregs;
1399 extern void ret_from_fork(void);
1400 extern void ret_from_kernel_thread(void);
1401 void (*f)(void);
1402 unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
1403
1404 /* Copy registers */
1405 sp -= sizeof(struct pt_regs);
1406 childregs = (struct pt_regs *) sp;
1407 if (unlikely(p->flags & PF_KTHREAD)) {
1408 /* kernel thread */
1409 struct thread_info *ti = (void *)task_stack_page(p);
1410 memset(childregs, 0, sizeof(struct pt_regs));
1411 childregs->gpr[1] = sp + sizeof(struct pt_regs);
1412 /* function */
1413 if (usp)
1414 childregs->gpr[14] = ppc_function_entry((void *)usp);
1415#ifdef CONFIG_PPC64
1416 clear_tsk_thread_flag(p, TIF_32BIT);
1417 childregs->softe = 1;
1418#endif
1419 childregs->gpr[15] = kthread_arg;
1420 p->thread.regs = NULL; /* no user register state */
1421 ti->flags |= _TIF_RESTOREALL;
1422 f = ret_from_kernel_thread;
1423 } else {
1424 /* user thread */
1425 struct pt_regs *regs = current_pt_regs();
1426 CHECK_FULL_REGS(regs);
1427 *childregs = *regs;
1428 if (usp)
1429 childregs->gpr[1] = usp;
1430 p->thread.regs = childregs;
1431 childregs->gpr[3] = 0; /* Result from fork() */
1432 if (clone_flags & CLONE_SETTLS) {
1433#ifdef CONFIG_PPC64
1434 if (!is_32bit_task())
1435 childregs->gpr[13] = childregs->gpr[6];
1436 else
1437#endif
1438 childregs->gpr[2] = childregs->gpr[6];
1439 }
1440
1441 f = ret_from_fork;
1442 }
1443 childregs->msr &= ~(MSR_FP|MSR_VEC|MSR_VSX);
1444 sp -= STACK_FRAME_OVERHEAD;
1445
1446 /*
1447 * The way this works is that at some point in the future
1448 * some task will call _switch to switch to the new task.
1449 * That will pop off the stack frame created below and start
1450 * the new task running at ret_from_fork. The new task will
1451 * do some house keeping and then return from the fork or clone
1452 * system call, using the stack frame created above.
1453 */
1454 ((unsigned long *)sp)[0] = 0;
1455 sp -= sizeof(struct pt_regs);
1456 kregs = (struct pt_regs *) sp;
1457 sp -= STACK_FRAME_OVERHEAD;
1458 p->thread.ksp = sp;
1459#ifdef CONFIG_PPC32
1460 p->thread.ksp_limit = (unsigned long)task_stack_page(p) +
1461 _ALIGN_UP(sizeof(struct thread_info), 16);
1462#endif
1463#ifdef CONFIG_HAVE_HW_BREAKPOINT
1464 p->thread.ptrace_bps[0] = NULL;
1465#endif
1466
1467 p->thread.fp_save_area = NULL;
1468#ifdef CONFIG_ALTIVEC
1469 p->thread.vr_save_area = NULL;
1470#endif
1471
1472 setup_ksp_vsid(p, sp);
1473
1474#ifdef CONFIG_PPC64
1475 if (cpu_has_feature(CPU_FTR_DSCR)) {
1476 p->thread.dscr_inherit = current->thread.dscr_inherit;
1477 p->thread.dscr = mfspr(SPRN_DSCR);
1478 }
1479 if (cpu_has_feature(CPU_FTR_HAS_PPR))
1480 p->thread.ppr = INIT_PPR;
1481#endif
1482 kregs->nip = ppc_function_entry(f);
1483 return 0;
1484}
1485
1486/*
1487 * Set up a thread for executing a new program
1488 */
1489void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
1490{
1491#ifdef CONFIG_PPC64
1492 unsigned long load_addr = regs->gpr[2]; /* saved by ELF_PLAT_INIT */
1493#endif
1494
1495 /*
1496 * If we exec out of a kernel thread then thread.regs will not be
1497 * set. Do it now.
1498 */
1499 if (!current->thread.regs) {
1500 struct pt_regs *regs = task_stack_page(current) + THREAD_SIZE;
1501 current->thread.regs = regs - 1;
1502 }
1503
1504 memset(regs->gpr, 0, sizeof(regs->gpr));
1505 regs->ctr = 0;
1506 regs->link = 0;
1507 regs->xer = 0;
1508 regs->ccr = 0;
1509 regs->gpr[1] = sp;
1510
1511 /*
1512 * We have just cleared all the nonvolatile GPRs, so make
1513 * FULL_REGS(regs) return true. This is necessary to allow
1514 * ptrace to examine the thread immediately after exec.
1515 */
1516 regs->trap &= ~1UL;
1517
1518#ifdef CONFIG_PPC32
1519 regs->mq = 0;
1520 regs->nip = start;
1521 regs->msr = MSR_USER;
1522#else
1523 if (!is_32bit_task()) {
1524 unsigned long entry;
1525
1526 if (is_elf2_task()) {
1527 /* Look ma, no function descriptors! */
1528 entry = start;
1529
1530 /*
1531 * Ulrich says:
1532 * The latest iteration of the ABI requires that when
1533 * calling a function (at its global entry point),
1534 * the caller must ensure r12 holds the entry point
1535 * address (so that the function can quickly
1536 * establish addressability).
1537 */
1538 regs->gpr[12] = start;
1539 /* Make sure that's restored on entry to userspace. */
1540 set_thread_flag(TIF_RESTOREALL);
1541 } else {
1542 unsigned long toc;
1543
1544 /* start is a relocated pointer to the function
1545 * descriptor for the elf _start routine. The first
1546 * entry in the function descriptor is the entry
1547 * address of _start and the second entry is the TOC
1548 * value we need to use.
1549 */
1550 __get_user(entry, (unsigned long __user *)start);
1551 __get_user(toc, (unsigned long __user *)start+1);
1552
1553 /* Check whether the e_entry function descriptor entries
1554 * need to be relocated before we can use them.
1555 */
1556 if (load_addr != 0) {
1557 entry += load_addr;
1558 toc += load_addr;
1559 }
1560 regs->gpr[2] = toc;
1561 }
1562 regs->nip = entry;
1563 regs->msr = MSR_USER64;
1564 } else {
1565 regs->nip = start;
1566 regs->gpr[2] = 0;
1567 regs->msr = MSR_USER32;
1568 }
1569#endif
1570#ifdef CONFIG_VSX
1571 current->thread.used_vsr = 0;
1572#endif
1573 memset(¤t->thread.fp_state, 0, sizeof(current->thread.fp_state));
1574 current->thread.fp_save_area = NULL;
1575#ifdef CONFIG_ALTIVEC
1576 memset(¤t->thread.vr_state, 0, sizeof(current->thread.vr_state));
1577 current->thread.vr_state.vscr.u[3] = 0x00010000; /* Java mode disabled */
1578 current->thread.vr_save_area = NULL;
1579 current->thread.vrsave = 0;
1580 current->thread.used_vr = 0;
1581#endif /* CONFIG_ALTIVEC */
1582#ifdef CONFIG_SPE
1583 memset(current->thread.evr, 0, sizeof(current->thread.evr));
1584 current->thread.acc = 0;
1585 current->thread.spefscr = 0;
1586 current->thread.used_spe = 0;
1587#endif /* CONFIG_SPE */
1588#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1589 if (cpu_has_feature(CPU_FTR_TM))
1590 regs->msr |= MSR_TM;
1591 current->thread.tm_tfhar = 0;
1592 current->thread.tm_texasr = 0;
1593 current->thread.tm_tfiar = 0;
1594#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
1595}
1596EXPORT_SYMBOL(start_thread);
1597
1598#define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
1599 | PR_FP_EXC_RES | PR_FP_EXC_INV)
1600
1601int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
1602{
1603 struct pt_regs *regs = tsk->thread.regs;
1604
1605 /* This is a bit hairy. If we are an SPE enabled processor
1606 * (have embedded fp) we store the IEEE exception enable flags in
1607 * fpexc_mode. fpexc_mode is also used for setting FP exception
1608 * mode (asyn, precise, disabled) for 'Classic' FP. */
1609 if (val & PR_FP_EXC_SW_ENABLE) {
1610#ifdef CONFIG_SPE
1611 if (cpu_has_feature(CPU_FTR_SPE)) {
1612 /*
1613 * When the sticky exception bits are set
1614 * directly by userspace, it must call prctl
1615 * with PR_GET_FPEXC (with PR_FP_EXC_SW_ENABLE
1616 * in the existing prctl settings) or
1617 * PR_SET_FPEXC (with PR_FP_EXC_SW_ENABLE in
1618 * the bits being set). <fenv.h> functions
1619 * saving and restoring the whole
1620 * floating-point environment need to do so
1621 * anyway to restore the prctl settings from
1622 * the saved environment.
1623 */
1624 tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR);
1625 tsk->thread.fpexc_mode = val &
1626 (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT);
1627 return 0;
1628 } else {
1629 return -EINVAL;
1630 }
1631#else
1632 return -EINVAL;
1633#endif
1634 }
1635
1636 /* on a CONFIG_SPE this does not hurt us. The bits that
1637 * __pack_fe01 use do not overlap with bits used for
1638 * PR_FP_EXC_SW_ENABLE. Additionally, the MSR[FE0,FE1] bits
1639 * on CONFIG_SPE implementations are reserved so writing to
1640 * them does not change anything */
1641 if (val > PR_FP_EXC_PRECISE)
1642 return -EINVAL;
1643 tsk->thread.fpexc_mode = __pack_fe01(val);
1644 if (regs != NULL && (regs->msr & MSR_FP) != 0)
1645 regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1))
1646 | tsk->thread.fpexc_mode;
1647 return 0;
1648}
1649
1650int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
1651{
1652 unsigned int val;
1653
1654 if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE)
1655#ifdef CONFIG_SPE
1656 if (cpu_has_feature(CPU_FTR_SPE)) {
1657 /*
1658 * When the sticky exception bits are set
1659 * directly by userspace, it must call prctl
1660 * with PR_GET_FPEXC (with PR_FP_EXC_SW_ENABLE
1661 * in the existing prctl settings) or
1662 * PR_SET_FPEXC (with PR_FP_EXC_SW_ENABLE in
1663 * the bits being set). <fenv.h> functions
1664 * saving and restoring the whole
1665 * floating-point environment need to do so
1666 * anyway to restore the prctl settings from
1667 * the saved environment.
1668 */
1669 tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR);
1670 val = tsk->thread.fpexc_mode;
1671 } else
1672 return -EINVAL;
1673#else
1674 return -EINVAL;
1675#endif
1676 else
1677 val = __unpack_fe01(tsk->thread.fpexc_mode);
1678 return put_user(val, (unsigned int __user *) adr);
1679}
1680
1681int set_endian(struct task_struct *tsk, unsigned int val)
1682{
1683 struct pt_regs *regs = tsk->thread.regs;
1684
1685 if ((val == PR_ENDIAN_LITTLE && !cpu_has_feature(CPU_FTR_REAL_LE)) ||
1686 (val == PR_ENDIAN_PPC_LITTLE && !cpu_has_feature(CPU_FTR_PPC_LE)))
1687 return -EINVAL;
1688
1689 if (regs == NULL)
1690 return -EINVAL;
1691
1692 if (val == PR_ENDIAN_BIG)
1693 regs->msr &= ~MSR_LE;
1694 else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE)
1695 regs->msr |= MSR_LE;
1696 else
1697 return -EINVAL;
1698
1699 return 0;
1700}
1701
1702int get_endian(struct task_struct *tsk, unsigned long adr)
1703{
1704 struct pt_regs *regs = tsk->thread.regs;
1705 unsigned int val;
1706
1707 if (!cpu_has_feature(CPU_FTR_PPC_LE) &&
1708 !cpu_has_feature(CPU_FTR_REAL_LE))
1709 return -EINVAL;
1710
1711 if (regs == NULL)
1712 return -EINVAL;
1713
1714 if (regs->msr & MSR_LE) {
1715 if (cpu_has_feature(CPU_FTR_REAL_LE))
1716 val = PR_ENDIAN_LITTLE;
1717 else
1718 val = PR_ENDIAN_PPC_LITTLE;
1719 } else
1720 val = PR_ENDIAN_BIG;
1721
1722 return put_user(val, (unsigned int __user *)adr);
1723}
1724
1725int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
1726{
1727 tsk->thread.align_ctl = val;
1728 return 0;
1729}
1730
1731int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
1732{
1733 return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr);
1734}
1735
1736static inline int valid_irq_stack(unsigned long sp, struct task_struct *p,
1737 unsigned long nbytes)
1738{
1739 unsigned long stack_page;
1740 unsigned long cpu = task_cpu(p);
1741
1742 /*
1743 * Avoid crashing if the stack has overflowed and corrupted
1744 * task_cpu(p), which is in the thread_info struct.
1745 */
1746 if (cpu < NR_CPUS && cpu_possible(cpu)) {
1747 stack_page = (unsigned long) hardirq_ctx[cpu];
1748 if (sp >= stack_page + sizeof(struct thread_struct)
1749 && sp <= stack_page + THREAD_SIZE - nbytes)
1750 return 1;
1751
1752 stack_page = (unsigned long) softirq_ctx[cpu];
1753 if (sp >= stack_page + sizeof(struct thread_struct)
1754 && sp <= stack_page + THREAD_SIZE - nbytes)
1755 return 1;
1756 }
1757 return 0;
1758}
1759
1760int validate_sp(unsigned long sp, struct task_struct *p,
1761 unsigned long nbytes)
1762{
1763 unsigned long stack_page = (unsigned long)task_stack_page(p);
1764
1765 if (sp >= stack_page + sizeof(struct thread_struct)
1766 && sp <= stack_page + THREAD_SIZE - nbytes)
1767 return 1;
1768
1769 return valid_irq_stack(sp, p, nbytes);
1770}
1771
1772EXPORT_SYMBOL(validate_sp);
1773
1774unsigned long get_wchan(struct task_struct *p)
1775{
1776 unsigned long ip, sp;
1777 int count = 0;
1778
1779 if (!p || p == current || p->state == TASK_RUNNING)
1780 return 0;
1781
1782 sp = p->thread.ksp;
1783 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
1784 return 0;
1785
1786 do {
1787 sp = *(unsigned long *)sp;
1788 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
1789 return 0;
1790 if (count > 0) {
1791 ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE];
1792 if (!in_sched_functions(ip))
1793 return ip;
1794 }
1795 } while (count++ < 16);
1796 return 0;
1797}
1798
1799static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
1800
1801void show_stack(struct task_struct *tsk, unsigned long *stack)
1802{
1803 unsigned long sp, ip, lr, newsp;
1804 int count = 0;
1805 int firstframe = 1;
1806#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1807 int curr_frame = current->curr_ret_stack;
1808 extern void return_to_handler(void);
1809 unsigned long rth = (unsigned long)return_to_handler;
1810#endif
1811
1812 sp = (unsigned long) stack;
1813 if (tsk == NULL)
1814 tsk = current;
1815 if (sp == 0) {
1816 if (tsk == current)
1817 sp = current_stack_pointer();
1818 else
1819 sp = tsk->thread.ksp;
1820 }
1821
1822 lr = 0;
1823 printk("Call Trace:\n");
1824 do {
1825 if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD))
1826 return;
1827
1828 stack = (unsigned long *) sp;
1829 newsp = stack[0];
1830 ip = stack[STACK_FRAME_LR_SAVE];
1831 if (!firstframe || ip != lr) {
1832 printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
1833#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1834 if ((ip == rth) && curr_frame >= 0) {
1835 printk(" (%pS)",
1836 (void *)current->ret_stack[curr_frame].ret);
1837 curr_frame--;
1838 }
1839#endif
1840 if (firstframe)
1841 printk(" (unreliable)");
1842 printk("\n");
1843 }
1844 firstframe = 0;
1845
1846 /*
1847 * See if this is an exception frame.
1848 * We look for the "regshere" marker in the current frame.
1849 */
1850 if (validate_sp(sp, tsk, STACK_INT_FRAME_SIZE)
1851 && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
1852 struct pt_regs *regs = (struct pt_regs *)
1853 (sp + STACK_FRAME_OVERHEAD);
1854 lr = regs->link;
1855 printk("--- interrupt: %lx at %pS\n LR = %pS\n",
1856 regs->trap, (void *)regs->nip, (void *)lr);
1857 firstframe = 1;
1858 }
1859
1860 sp = newsp;
1861 } while (count++ < kstack_depth_to_print);
1862}
1863
1864#ifdef CONFIG_PPC64
1865/* Called with hard IRQs off */
1866void notrace __ppc64_runlatch_on(void)
1867{
1868 struct thread_info *ti = current_thread_info();
1869 unsigned long ctrl;
1870
1871 ctrl = mfspr(SPRN_CTRLF);
1872 ctrl |= CTRL_RUNLATCH;
1873 mtspr(SPRN_CTRLT, ctrl);
1874
1875 ti->local_flags |= _TLF_RUNLATCH;
1876}
1877
1878/* Called with hard IRQs off */
1879void notrace __ppc64_runlatch_off(void)
1880{
1881 struct thread_info *ti = current_thread_info();
1882 unsigned long ctrl;
1883
1884 ti->local_flags &= ~_TLF_RUNLATCH;
1885
1886 ctrl = mfspr(SPRN_CTRLF);
1887 ctrl &= ~CTRL_RUNLATCH;
1888 mtspr(SPRN_CTRLT, ctrl);
1889}
1890#endif /* CONFIG_PPC64 */
1891
1892unsigned long arch_align_stack(unsigned long sp)
1893{
1894 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1895 sp -= get_random_int() & ~PAGE_MASK;
1896 return sp & ~0xf;
1897}
1898
1899static inline unsigned long brk_rnd(void)
1900{
1901 unsigned long rnd = 0;
1902
1903 /* 8MB for 32bit, 1GB for 64bit */
1904 if (is_32bit_task())
1905 rnd = (get_random_long() % (1UL<<(23-PAGE_SHIFT)));
1906 else
1907 rnd = (get_random_long() % (1UL<<(30-PAGE_SHIFT)));
1908
1909 return rnd << PAGE_SHIFT;
1910}
1911
1912unsigned long arch_randomize_brk(struct mm_struct *mm)
1913{
1914 unsigned long base = mm->brk;
1915 unsigned long ret;
1916
1917#ifdef CONFIG_PPC_STD_MMU_64
1918 /*
1919 * If we are using 1TB segments and we are allowed to randomise
1920 * the heap, we can put it above 1TB so it is backed by a 1TB
1921 * segment. Otherwise the heap will be in the bottom 1TB
1922 * which always uses 256MB segments and this may result in a
1923 * performance penalty.
1924 */
1925 if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
1926 base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
1927#endif
1928
1929 ret = PAGE_ALIGN(base + brk_rnd());
1930
1931 if (ret < mm->brk)
1932 return mm->brk;
1933
1934 return ret;
1935}
1936