Loading...
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994 - 1999, 2000 by Ralf Baechle and others.
7 * Copyright (C) 2005, 2006 by Ralf Baechle (ralf@linux-mips.org)
8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9 * Copyright (C) 2004 Thiemo Seufer
10 * Copyright (C) 2013 Imagination Technologies Ltd.
11 */
12#include <linux/cpu.h>
13#include <linux/errno.h>
14#include <linux/init.h>
15#include <linux/kallsyms.h>
16#include <linux/kernel.h>
17#include <linux/nmi.h>
18#include <linux/personality.h>
19#include <linux/prctl.h>
20#include <linux/random.h>
21#include <linux/sched.h>
22#include <linux/sched/debug.h>
23#include <linux/sched/task_stack.h>
24
25#include <asm/abi.h>
26#include <asm/asm.h>
27#include <asm/dsemul.h>
28#include <asm/dsp.h>
29#include <asm/exec.h>
30#include <asm/fpu.h>
31#include <asm/inst.h>
32#include <asm/irq.h>
33#include <asm/irq_regs.h>
34#include <asm/isadep.h>
35#include <asm/msa.h>
36#include <asm/mips-cps.h>
37#include <asm/mipsregs.h>
38#include <asm/processor.h>
39#include <asm/reg.h>
40#include <asm/stacktrace.h>
41
42#ifdef CONFIG_HOTPLUG_CPU
43void arch_cpu_idle_dead(void)
44{
45 play_dead();
46}
47#endif
48
49asmlinkage void ret_from_fork(void);
50asmlinkage void ret_from_kernel_thread(void);
51
52void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp)
53{
54 unsigned long status;
55
56 /* New thread loses kernel privileges. */
57 status = regs->cp0_status & ~(ST0_CU0|ST0_CU1|ST0_CU2|ST0_FR|KU_MASK);
58 status |= KU_USER;
59 regs->cp0_status = status;
60 lose_fpu(0);
61 clear_thread_flag(TIF_MSA_CTX_LIVE);
62 clear_used_math();
63#ifdef CONFIG_MIPS_FP_SUPPORT
64 atomic_set(¤t->thread.bd_emu_frame, BD_EMUFRAME_NONE);
65#endif
66 init_dsp();
67 regs->cp0_epc = pc;
68 regs->regs[29] = sp;
69}
70
71void exit_thread(struct task_struct *tsk)
72{
73 /*
74 * User threads may have allocated a delay slot emulation frame.
75 * If so, clean up that allocation.
76 */
77 if (!(current->flags & PF_KTHREAD))
78 dsemul_thread_cleanup(tsk);
79}
80
81int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
82{
83 /*
84 * Save any process state which is live in hardware registers to the
85 * parent context prior to duplication. This prevents the new child
86 * state becoming stale if the parent is preempted before copy_thread()
87 * gets a chance to save the parent's live hardware registers to the
88 * child context.
89 */
90 preempt_disable();
91
92 if (is_msa_enabled())
93 save_msa(current);
94 else if (is_fpu_owner())
95 _save_fp(current);
96
97 save_dsp(current);
98
99 preempt_enable();
100
101 *dst = *src;
102 return 0;
103}
104
105/*
106 * Copy architecture-specific thread state
107 */
108int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
109{
110 unsigned long clone_flags = args->flags;
111 unsigned long usp = args->stack;
112 unsigned long tls = args->tls;
113 struct thread_info *ti = task_thread_info(p);
114 struct pt_regs *childregs, *regs = current_pt_regs();
115 unsigned long childksp;
116
117 childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32;
118
119 /* set up new TSS. */
120 childregs = (struct pt_regs *) childksp - 1;
121 /* Put the stack after the struct pt_regs. */
122 childksp = (unsigned long) childregs;
123 p->thread.cp0_status = (read_c0_status() & ~(ST0_CU2|ST0_CU1)) | ST0_KERNEL_CUMASK;
124 if (unlikely(args->fn)) {
125 /* kernel thread */
126 unsigned long status = p->thread.cp0_status;
127 memset(childregs, 0, sizeof(struct pt_regs));
128 p->thread.reg16 = (unsigned long)args->fn;
129 p->thread.reg17 = (unsigned long)args->fn_arg;
130 p->thread.reg29 = childksp;
131 p->thread.reg31 = (unsigned long) ret_from_kernel_thread;
132#if defined(CONFIG_CPU_R3000)
133 status = (status & ~(ST0_KUP | ST0_IEP | ST0_IEC)) |
134 ((status & (ST0_KUC | ST0_IEC)) << 2);
135#else
136 status |= ST0_EXL;
137#endif
138 childregs->cp0_status = status;
139 return 0;
140 }
141
142 /* user thread */
143 *childregs = *regs;
144 childregs->regs[7] = 0; /* Clear error flag */
145 childregs->regs[2] = 0; /* Child gets zero as return value */
146 if (usp)
147 childregs->regs[29] = usp;
148
149 p->thread.reg29 = (unsigned long) childregs;
150 p->thread.reg31 = (unsigned long) ret_from_fork;
151
152 /*
153 * New tasks lose permission to use the fpu. This accelerates context
154 * switching for most programs since they don't use the fpu.
155 */
156 childregs->cp0_status &= ~(ST0_CU2|ST0_CU1);
157
158 clear_tsk_thread_flag(p, TIF_USEDFPU);
159 clear_tsk_thread_flag(p, TIF_USEDMSA);
160 clear_tsk_thread_flag(p, TIF_MSA_CTX_LIVE);
161
162#ifdef CONFIG_MIPS_MT_FPAFF
163 clear_tsk_thread_flag(p, TIF_FPUBOUND);
164#endif /* CONFIG_MIPS_MT_FPAFF */
165
166#ifdef CONFIG_MIPS_FP_SUPPORT
167 atomic_set(&p->thread.bd_emu_frame, BD_EMUFRAME_NONE);
168#endif
169
170 if (clone_flags & CLONE_SETTLS)
171 ti->tp_value = tls;
172
173 return 0;
174}
175
176#ifdef CONFIG_STACKPROTECTOR
177#include <linux/stackprotector.h>
178unsigned long __stack_chk_guard __read_mostly;
179EXPORT_SYMBOL(__stack_chk_guard);
180#endif
181
182struct mips_frame_info {
183 void *func;
184 unsigned long func_size;
185 int frame_size;
186 int pc_offset;
187};
188
189#define J_TARGET(pc,target) \
190 (((unsigned long)(pc) & 0xf0000000) | ((target) << 2))
191
192static inline int is_jr_ra_ins(union mips_instruction *ip)
193{
194#ifdef CONFIG_CPU_MICROMIPS
195 /*
196 * jr16 ra
197 * jr ra
198 */
199 if (mm_insn_16bit(ip->word >> 16)) {
200 if (ip->mm16_r5_format.opcode == mm_pool16c_op &&
201 ip->mm16_r5_format.rt == mm_jr16_op &&
202 ip->mm16_r5_format.imm == 31)
203 return 1;
204 return 0;
205 }
206
207 if (ip->r_format.opcode == mm_pool32a_op &&
208 ip->r_format.func == mm_pool32axf_op &&
209 ((ip->u_format.uimmediate >> 6) & GENMASK(9, 0)) == mm_jalr_op &&
210 ip->r_format.rt == 31)
211 return 1;
212 return 0;
213#else
214 if (ip->r_format.opcode == spec_op &&
215 ip->r_format.func == jr_op &&
216 ip->r_format.rs == 31)
217 return 1;
218 return 0;
219#endif
220}
221
222static inline int is_ra_save_ins(union mips_instruction *ip, int *poff)
223{
224#ifdef CONFIG_CPU_MICROMIPS
225 /*
226 * swsp ra,offset
227 * swm16 reglist,offset(sp)
228 * swm32 reglist,offset(sp)
229 * sw32 ra,offset(sp)
230 * jradiussp - NOT SUPPORTED
231 *
232 * microMIPS is way more fun...
233 */
234 if (mm_insn_16bit(ip->word >> 16)) {
235 switch (ip->mm16_r5_format.opcode) {
236 case mm_swsp16_op:
237 if (ip->mm16_r5_format.rt != 31)
238 return 0;
239
240 *poff = ip->mm16_r5_format.imm;
241 *poff = (*poff << 2) / sizeof(ulong);
242 return 1;
243
244 case mm_pool16c_op:
245 switch (ip->mm16_m_format.func) {
246 case mm_swm16_op:
247 *poff = ip->mm16_m_format.imm;
248 *poff += 1 + ip->mm16_m_format.rlist;
249 *poff = (*poff << 2) / sizeof(ulong);
250 return 1;
251
252 default:
253 return 0;
254 }
255
256 default:
257 return 0;
258 }
259 }
260
261 switch (ip->i_format.opcode) {
262 case mm_sw32_op:
263 if (ip->i_format.rs != 29)
264 return 0;
265 if (ip->i_format.rt != 31)
266 return 0;
267
268 *poff = ip->i_format.simmediate / sizeof(ulong);
269 return 1;
270
271 case mm_pool32b_op:
272 switch (ip->mm_m_format.func) {
273 case mm_swm32_func:
274 if (ip->mm_m_format.rd < 0x10)
275 return 0;
276 if (ip->mm_m_format.base != 29)
277 return 0;
278
279 *poff = ip->mm_m_format.simmediate;
280 *poff += (ip->mm_m_format.rd & 0xf) * sizeof(u32);
281 *poff /= sizeof(ulong);
282 return 1;
283 default:
284 return 0;
285 }
286
287 default:
288 return 0;
289 }
290#else
291 /* sw / sd $ra, offset($sp) */
292 if ((ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op) &&
293 ip->i_format.rs == 29 && ip->i_format.rt == 31) {
294 *poff = ip->i_format.simmediate / sizeof(ulong);
295 return 1;
296 }
297#ifdef CONFIG_CPU_LOONGSON64
298 if ((ip->loongson3_lswc2_format.opcode == swc2_op) &&
299 (ip->loongson3_lswc2_format.ls == 1) &&
300 (ip->loongson3_lswc2_format.fr == 0) &&
301 (ip->loongson3_lswc2_format.base == 29)) {
302 if (ip->loongson3_lswc2_format.rt == 31) {
303 *poff = ip->loongson3_lswc2_format.offset << 1;
304 return 1;
305 }
306 if (ip->loongson3_lswc2_format.rq == 31) {
307 *poff = (ip->loongson3_lswc2_format.offset << 1) + 1;
308 return 1;
309 }
310 }
311#endif
312 return 0;
313#endif
314}
315
316static inline int is_jump_ins(union mips_instruction *ip)
317{
318#ifdef CONFIG_CPU_MICROMIPS
319 /*
320 * jr16,jrc,jalr16,jalr16
321 * jal
322 * jalr/jr,jalr.hb/jr.hb,jalrs,jalrs.hb
323 * jraddiusp - NOT SUPPORTED
324 *
325 * microMIPS is kind of more fun...
326 */
327 if (mm_insn_16bit(ip->word >> 16)) {
328 if ((ip->mm16_r5_format.opcode == mm_pool16c_op &&
329 (ip->mm16_r5_format.rt & mm_jr16_op) == mm_jr16_op))
330 return 1;
331 return 0;
332 }
333
334 if (ip->j_format.opcode == mm_j32_op)
335 return 1;
336 if (ip->j_format.opcode == mm_jal32_op)
337 return 1;
338 if (ip->r_format.opcode != mm_pool32a_op ||
339 ip->r_format.func != mm_pool32axf_op)
340 return 0;
341 return ((ip->u_format.uimmediate >> 6) & mm_jalr_op) == mm_jalr_op;
342#else
343 if (ip->j_format.opcode == j_op)
344 return 1;
345 if (ip->j_format.opcode == jal_op)
346 return 1;
347 if (ip->r_format.opcode != spec_op)
348 return 0;
349 return ip->r_format.func == jalr_op || ip->r_format.func == jr_op;
350#endif
351}
352
353static inline int is_sp_move_ins(union mips_instruction *ip, int *frame_size)
354{
355#ifdef CONFIG_CPU_MICROMIPS
356 unsigned short tmp;
357
358 /*
359 * addiusp -imm
360 * addius5 sp,-imm
361 * addiu32 sp,sp,-imm
362 * jradiussp - NOT SUPPORTED
363 *
364 * microMIPS is not more fun...
365 */
366 if (mm_insn_16bit(ip->word >> 16)) {
367 if (ip->mm16_r3_format.opcode == mm_pool16d_op &&
368 ip->mm16_r3_format.simmediate & mm_addiusp_func) {
369 tmp = ip->mm_b0_format.simmediate >> 1;
370 tmp = ((tmp & 0x1ff) ^ 0x100) - 0x100;
371 if ((tmp + 2) < 4) /* 0x0,0x1,0x1fe,0x1ff are special */
372 tmp ^= 0x100;
373 *frame_size = -(signed short)(tmp << 2);
374 return 1;
375 }
376 if (ip->mm16_r5_format.opcode == mm_pool16d_op &&
377 ip->mm16_r5_format.rt == 29) {
378 tmp = ip->mm16_r5_format.imm >> 1;
379 *frame_size = -(signed short)(tmp & 0xf);
380 return 1;
381 }
382 return 0;
383 }
384
385 if (ip->mm_i_format.opcode == mm_addiu32_op &&
386 ip->mm_i_format.rt == 29 && ip->mm_i_format.rs == 29) {
387 *frame_size = -ip->i_format.simmediate;
388 return 1;
389 }
390#else
391 /* addiu/daddiu sp,sp,-imm */
392 if (ip->i_format.rs != 29 || ip->i_format.rt != 29)
393 return 0;
394
395 if (ip->i_format.opcode == addiu_op ||
396 ip->i_format.opcode == daddiu_op) {
397 *frame_size = -ip->i_format.simmediate;
398 return 1;
399 }
400#endif
401 return 0;
402}
403
404static int get_frame_info(struct mips_frame_info *info)
405{
406 bool is_mmips = IS_ENABLED(CONFIG_CPU_MICROMIPS);
407 union mips_instruction insn, *ip, *ip_end;
408 unsigned int last_insn_size = 0;
409 bool saw_jump = false;
410
411 info->pc_offset = -1;
412 info->frame_size = 0;
413
414 ip = (void *)msk_isa16_mode((ulong)info->func);
415 if (!ip)
416 goto err;
417
418 ip_end = (void *)ip + (info->func_size ? info->func_size : 512);
419
420 while (ip < ip_end) {
421 ip = (void *)ip + last_insn_size;
422
423 if (is_mmips && mm_insn_16bit(ip->halfword[0])) {
424 insn.word = ip->halfword[0] << 16;
425 last_insn_size = 2;
426 } else if (is_mmips) {
427 insn.word = ip->halfword[0] << 16 | ip->halfword[1];
428 last_insn_size = 4;
429 } else {
430 insn.word = ip->word;
431 last_insn_size = 4;
432 }
433
434 if (is_jr_ra_ins(ip)) {
435 break;
436 } else if (!info->frame_size) {
437 is_sp_move_ins(&insn, &info->frame_size);
438 continue;
439 } else if (!saw_jump && is_jump_ins(ip)) {
440 /*
441 * If we see a jump instruction, we are finished
442 * with the frame save.
443 *
444 * Some functions can have a shortcut return at
445 * the beginning of the function, so don't start
446 * looking for jump instruction until we see the
447 * frame setup.
448 *
449 * The RA save instruction can get put into the
450 * delay slot of the jump instruction, so look
451 * at the next instruction, too.
452 */
453 saw_jump = true;
454 continue;
455 }
456 if (info->pc_offset == -1 &&
457 is_ra_save_ins(&insn, &info->pc_offset))
458 break;
459 if (saw_jump)
460 break;
461 }
462 if (info->frame_size && info->pc_offset >= 0) /* nested */
463 return 0;
464 if (info->pc_offset < 0) /* leaf */
465 return 1;
466 /* prologue seems bogus... */
467err:
468 return -1;
469}
470
471static struct mips_frame_info schedule_mfi __read_mostly;
472
473#ifdef CONFIG_KALLSYMS
474static unsigned long get___schedule_addr(void)
475{
476 return kallsyms_lookup_name("__schedule");
477}
478#else
479static unsigned long get___schedule_addr(void)
480{
481 union mips_instruction *ip = (void *)schedule;
482 int max_insns = 8;
483 int i;
484
485 for (i = 0; i < max_insns; i++, ip++) {
486 if (ip->j_format.opcode == j_op)
487 return J_TARGET(ip, ip->j_format.target);
488 }
489 return 0;
490}
491#endif
492
493static int __init frame_info_init(void)
494{
495 unsigned long size = 0;
496#ifdef CONFIG_KALLSYMS
497 unsigned long ofs;
498#endif
499 unsigned long addr;
500
501 addr = get___schedule_addr();
502 if (!addr)
503 addr = (unsigned long)schedule;
504
505#ifdef CONFIG_KALLSYMS
506 kallsyms_lookup_size_offset(addr, &size, &ofs);
507#endif
508 schedule_mfi.func = (void *)addr;
509 schedule_mfi.func_size = size;
510
511 get_frame_info(&schedule_mfi);
512
513 /*
514 * Without schedule() frame info, result given by
515 * thread_saved_pc() and __get_wchan() are not reliable.
516 */
517 if (schedule_mfi.pc_offset < 0)
518 printk("Can't analyze schedule() prologue at %p\n", schedule);
519
520 return 0;
521}
522
523arch_initcall(frame_info_init);
524
525/*
526 * Return saved PC of a blocked thread.
527 */
528static unsigned long thread_saved_pc(struct task_struct *tsk)
529{
530 struct thread_struct *t = &tsk->thread;
531
532 /* New born processes are a special case */
533 if (t->reg31 == (unsigned long) ret_from_fork)
534 return t->reg31;
535 if (schedule_mfi.pc_offset < 0)
536 return 0;
537 return ((unsigned long *)t->reg29)[schedule_mfi.pc_offset];
538}
539
540
541#ifdef CONFIG_KALLSYMS
542/* generic stack unwinding function */
543unsigned long notrace unwind_stack_by_address(unsigned long stack_page,
544 unsigned long *sp,
545 unsigned long pc,
546 unsigned long *ra)
547{
548 unsigned long low, high, irq_stack_high;
549 struct mips_frame_info info;
550 unsigned long size, ofs;
551 struct pt_regs *regs;
552 int leaf;
553
554 if (!stack_page)
555 return 0;
556
557 /*
558 * IRQ stacks start at IRQ_STACK_START
559 * task stacks at THREAD_SIZE - 32
560 */
561 low = stack_page;
562 if (!preemptible() && on_irq_stack(raw_smp_processor_id(), *sp)) {
563 high = stack_page + IRQ_STACK_START;
564 irq_stack_high = high;
565 } else {
566 high = stack_page + THREAD_SIZE - 32;
567 irq_stack_high = 0;
568 }
569
570 /*
571 * If we reached the top of the interrupt stack, start unwinding
572 * the interrupted task stack.
573 */
574 if (unlikely(*sp == irq_stack_high)) {
575 unsigned long task_sp = *(unsigned long *)*sp;
576
577 /*
578 * Check that the pointer saved in the IRQ stack head points to
579 * something within the stack of the current task
580 */
581 if (!object_is_on_stack((void *)task_sp))
582 return 0;
583
584 /*
585 * Follow pointer to tasks kernel stack frame where interrupted
586 * state was saved.
587 */
588 regs = (struct pt_regs *)task_sp;
589 pc = regs->cp0_epc;
590 if (!user_mode(regs) && __kernel_text_address(pc)) {
591 *sp = regs->regs[29];
592 *ra = regs->regs[31];
593 return pc;
594 }
595 return 0;
596 }
597 if (!kallsyms_lookup_size_offset(pc, &size, &ofs))
598 return 0;
599 /*
600 * Return ra if an exception occurred at the first instruction
601 */
602 if (unlikely(ofs == 0)) {
603 pc = *ra;
604 *ra = 0;
605 return pc;
606 }
607
608 info.func = (void *)(pc - ofs);
609 info.func_size = ofs; /* analyze from start to ofs */
610 leaf = get_frame_info(&info);
611 if (leaf < 0)
612 return 0;
613
614 if (*sp < low || *sp + info.frame_size > high)
615 return 0;
616
617 if (leaf)
618 /*
619 * For some extreme cases, get_frame_info() can
620 * consider wrongly a nested function as a leaf
621 * one. In that cases avoid to return always the
622 * same value.
623 */
624 pc = pc != *ra ? *ra : 0;
625 else
626 pc = ((unsigned long *)(*sp))[info.pc_offset];
627
628 *sp += info.frame_size;
629 *ra = 0;
630 return __kernel_text_address(pc) ? pc : 0;
631}
632EXPORT_SYMBOL(unwind_stack_by_address);
633
634/* used by show_backtrace() */
635unsigned long unwind_stack(struct task_struct *task, unsigned long *sp,
636 unsigned long pc, unsigned long *ra)
637{
638 unsigned long stack_page = 0;
639 int cpu;
640
641 for_each_possible_cpu(cpu) {
642 if (on_irq_stack(cpu, *sp)) {
643 stack_page = (unsigned long)irq_stack[cpu];
644 break;
645 }
646 }
647
648 if (!stack_page)
649 stack_page = (unsigned long)task_stack_page(task);
650
651 return unwind_stack_by_address(stack_page, sp, pc, ra);
652}
653#endif
654
655/*
656 * __get_wchan - a maintenance nightmare^W^Wpain in the ass ...
657 */
658unsigned long __get_wchan(struct task_struct *task)
659{
660 unsigned long pc = 0;
661#ifdef CONFIG_KALLSYMS
662 unsigned long sp;
663 unsigned long ra = 0;
664#endif
665
666 if (!task_stack_page(task))
667 goto out;
668
669 pc = thread_saved_pc(task);
670
671#ifdef CONFIG_KALLSYMS
672 sp = task->thread.reg29 + schedule_mfi.frame_size;
673
674 while (in_sched_functions(pc))
675 pc = unwind_stack(task, &sp, pc, &ra);
676#endif
677
678out:
679 return pc;
680}
681
682unsigned long mips_stack_top(void)
683{
684 unsigned long top = TASK_SIZE & PAGE_MASK;
685
686 if (IS_ENABLED(CONFIG_MIPS_FP_SUPPORT)) {
687 /* One page for branch delay slot "emulation" */
688 top -= PAGE_SIZE;
689 }
690
691 /* Space for the VDSO, data page & GIC user page */
692 top -= PAGE_ALIGN(current->thread.abi->vdso->size);
693 top -= PAGE_SIZE;
694 top -= mips_gic_present() ? PAGE_SIZE : 0;
695
696 /* Space for cache colour alignment */
697 if (cpu_has_dc_aliases)
698 top -= shm_align_mask + 1;
699
700 /* Space to randomize the VDSO base */
701 if (current->flags & PF_RANDOMIZE)
702 top -= VDSO_RANDOMIZE_SIZE;
703
704 return top;
705}
706
707/*
708 * Don't forget that the stack pointer must be aligned on a 8 bytes
709 * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
710 */
711unsigned long arch_align_stack(unsigned long sp)
712{
713 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
714 sp -= get_random_u32_below(PAGE_SIZE);
715
716 return sp & ALMASK;
717}
718
719static struct cpumask backtrace_csd_busy;
720
721static void handle_backtrace(void *info)
722{
723 nmi_cpu_backtrace(get_irq_regs());
724 cpumask_clear_cpu(smp_processor_id(), &backtrace_csd_busy);
725}
726
727static DEFINE_PER_CPU(call_single_data_t, backtrace_csd) =
728 CSD_INIT(handle_backtrace, NULL);
729
730static void raise_backtrace(cpumask_t *mask)
731{
732 call_single_data_t *csd;
733 int cpu;
734
735 for_each_cpu(cpu, mask) {
736 /*
737 * If we previously sent an IPI to the target CPU & it hasn't
738 * cleared its bit in the busy cpumask then it didn't handle
739 * our previous IPI & it's not safe for us to reuse the
740 * call_single_data_t.
741 */
742 if (cpumask_test_and_set_cpu(cpu, &backtrace_csd_busy)) {
743 pr_warn("Unable to send backtrace IPI to CPU%u - perhaps it hung?\n",
744 cpu);
745 continue;
746 }
747
748 csd = &per_cpu(backtrace_csd, cpu);
749 smp_call_function_single_async(cpu, csd);
750 }
751}
752
753void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
754{
755 nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_backtrace);
756}
757
758int mips_get_process_fp_mode(struct task_struct *task)
759{
760 int value = 0;
761
762 if (!test_tsk_thread_flag(task, TIF_32BIT_FPREGS))
763 value |= PR_FP_MODE_FR;
764 if (test_tsk_thread_flag(task, TIF_HYBRID_FPREGS))
765 value |= PR_FP_MODE_FRE;
766
767 return value;
768}
769
770static long prepare_for_fp_mode_switch(void *unused)
771{
772 /*
773 * This is icky, but we use this to simply ensure that all CPUs have
774 * context switched, regardless of whether they were previously running
775 * kernel or user code. This ensures that no CPU that a mode-switching
776 * program may execute on keeps its FPU enabled (& in the old mode)
777 * throughout the mode switch.
778 */
779 return 0;
780}
781
782int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
783{
784 const unsigned int known_bits = PR_FP_MODE_FR | PR_FP_MODE_FRE;
785 struct task_struct *t;
786 struct cpumask process_cpus;
787 int cpu;
788
789 /* If nothing to change, return right away, successfully. */
790 if (value == mips_get_process_fp_mode(task))
791 return 0;
792
793 /* Only accept a mode change if 64-bit FP enabled for o32. */
794 if (!IS_ENABLED(CONFIG_MIPS_O32_FP64_SUPPORT))
795 return -EOPNOTSUPP;
796
797 /* And only for o32 tasks. */
798 if (IS_ENABLED(CONFIG_64BIT) && !test_thread_flag(TIF_32BIT_REGS))
799 return -EOPNOTSUPP;
800
801 /* Check the value is valid */
802 if (value & ~known_bits)
803 return -EOPNOTSUPP;
804
805 /* Setting FRE without FR is not supported. */
806 if ((value & (PR_FP_MODE_FR | PR_FP_MODE_FRE)) == PR_FP_MODE_FRE)
807 return -EOPNOTSUPP;
808
809 /* Avoid inadvertently triggering emulation */
810 if ((value & PR_FP_MODE_FR) && raw_cpu_has_fpu &&
811 !(raw_current_cpu_data.fpu_id & MIPS_FPIR_F64))
812 return -EOPNOTSUPP;
813 if ((value & PR_FP_MODE_FRE) && raw_cpu_has_fpu && !cpu_has_fre)
814 return -EOPNOTSUPP;
815
816 /* FR = 0 not supported in MIPS R6 */
817 if (!(value & PR_FP_MODE_FR) && raw_cpu_has_fpu && cpu_has_mips_r6)
818 return -EOPNOTSUPP;
819
820 /* Indicate the new FP mode in each thread */
821 for_each_thread(task, t) {
822 /* Update desired FP register width */
823 if (value & PR_FP_MODE_FR) {
824 clear_tsk_thread_flag(t, TIF_32BIT_FPREGS);
825 } else {
826 set_tsk_thread_flag(t, TIF_32BIT_FPREGS);
827 clear_tsk_thread_flag(t, TIF_MSA_CTX_LIVE);
828 }
829
830 /* Update desired FP single layout */
831 if (value & PR_FP_MODE_FRE)
832 set_tsk_thread_flag(t, TIF_HYBRID_FPREGS);
833 else
834 clear_tsk_thread_flag(t, TIF_HYBRID_FPREGS);
835 }
836
837 /*
838 * We need to ensure that all threads in the process have switched mode
839 * before returning, in order to allow userland to not worry about
840 * races. We can do this by forcing all CPUs that any thread in the
841 * process may be running on to schedule something else - in this case
842 * prepare_for_fp_mode_switch().
843 *
844 * We begin by generating a mask of all CPUs that any thread in the
845 * process may be running on.
846 */
847 cpumask_clear(&process_cpus);
848 for_each_thread(task, t)
849 cpumask_set_cpu(task_cpu(t), &process_cpus);
850
851 /*
852 * Now we schedule prepare_for_fp_mode_switch() on each of those CPUs.
853 *
854 * The CPUs may have rescheduled already since we switched mode or
855 * generated the cpumask, but that doesn't matter. If the task in this
856 * process is scheduled out then our scheduling
857 * prepare_for_fp_mode_switch() will simply be redundant. If it's
858 * scheduled in then it will already have picked up the new FP mode
859 * whilst doing so.
860 */
861 cpus_read_lock();
862 for_each_cpu_and(cpu, &process_cpus, cpu_online_mask)
863 work_on_cpu(cpu, prepare_for_fp_mode_switch, NULL);
864 cpus_read_unlock();
865
866 return 0;
867}
868
869#if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
870void mips_dump_regs32(u32 *uregs, const struct pt_regs *regs)
871{
872 unsigned int i;
873
874 for (i = MIPS32_EF_R1; i <= MIPS32_EF_R31; i++) {
875 /* k0/k1 are copied as zero. */
876 if (i == MIPS32_EF_R26 || i == MIPS32_EF_R27)
877 uregs[i] = 0;
878 else
879 uregs[i] = regs->regs[i - MIPS32_EF_R0];
880 }
881
882 uregs[MIPS32_EF_LO] = regs->lo;
883 uregs[MIPS32_EF_HI] = regs->hi;
884 uregs[MIPS32_EF_CP0_EPC] = regs->cp0_epc;
885 uregs[MIPS32_EF_CP0_BADVADDR] = regs->cp0_badvaddr;
886 uregs[MIPS32_EF_CP0_STATUS] = regs->cp0_status;
887 uregs[MIPS32_EF_CP0_CAUSE] = regs->cp0_cause;
888}
889#endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
890
891#ifdef CONFIG_64BIT
892void mips_dump_regs64(u64 *uregs, const struct pt_regs *regs)
893{
894 unsigned int i;
895
896 for (i = MIPS64_EF_R1; i <= MIPS64_EF_R31; i++) {
897 /* k0/k1 are copied as zero. */
898 if (i == MIPS64_EF_R26 || i == MIPS64_EF_R27)
899 uregs[i] = 0;
900 else
901 uregs[i] = regs->regs[i - MIPS64_EF_R0];
902 }
903
904 uregs[MIPS64_EF_LO] = regs->lo;
905 uregs[MIPS64_EF_HI] = regs->hi;
906 uregs[MIPS64_EF_CP0_EPC] = regs->cp0_epc;
907 uregs[MIPS64_EF_CP0_BADVADDR] = regs->cp0_badvaddr;
908 uregs[MIPS64_EF_CP0_STATUS] = regs->cp0_status;
909 uregs[MIPS64_EF_CP0_CAUSE] = regs->cp0_cause;
910}
911#endif /* CONFIG_64BIT */
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994 - 1999, 2000 by Ralf Baechle and others.
7 * Copyright (C) 2005, 2006 by Ralf Baechle (ralf@linux-mips.org)
8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9 * Copyright (C) 2004 Thiemo Seufer
10 * Copyright (C) 2013 Imagination Technologies Ltd.
11 */
12#include <linux/errno.h>
13#include <linux/sched.h>
14#include <linux/tick.h>
15#include <linux/kernel.h>
16#include <linux/mm.h>
17#include <linux/stddef.h>
18#include <linux/unistd.h>
19#include <linux/export.h>
20#include <linux/ptrace.h>
21#include <linux/mman.h>
22#include <linux/personality.h>
23#include <linux/sys.h>
24#include <linux/init.h>
25#include <linux/completion.h>
26#include <linux/kallsyms.h>
27#include <linux/random.h>
28#include <linux/prctl.h>
29
30#include <asm/asm.h>
31#include <asm/bootinfo.h>
32#include <asm/cpu.h>
33#include <asm/dsemul.h>
34#include <asm/dsp.h>
35#include <asm/fpu.h>
36#include <asm/irq.h>
37#include <asm/msa.h>
38#include <asm/pgtable.h>
39#include <asm/mipsregs.h>
40#include <asm/processor.h>
41#include <asm/reg.h>
42#include <linux/uaccess.h>
43#include <asm/io.h>
44#include <asm/elf.h>
45#include <asm/isadep.h>
46#include <asm/inst.h>
47#include <asm/stacktrace.h>
48#include <asm/irq_regs.h>
49
50#ifdef CONFIG_HOTPLUG_CPU
51void arch_cpu_idle_dead(void)
52{
53 /* What the heck is this check doing ? */
54 if (!cpumask_test_cpu(smp_processor_id(), &cpu_callin_map))
55 play_dead();
56}
57#endif
58
59asmlinkage void ret_from_fork(void);
60asmlinkage void ret_from_kernel_thread(void);
61
62void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp)
63{
64 unsigned long status;
65
66 /* New thread loses kernel privileges. */
67 status = regs->cp0_status & ~(ST0_CU0|ST0_CU1|ST0_FR|KU_MASK);
68 status |= KU_USER;
69 regs->cp0_status = status;
70 lose_fpu(0);
71 clear_thread_flag(TIF_MSA_CTX_LIVE);
72 clear_used_math();
73 atomic_set(¤t->thread.bd_emu_frame, BD_EMUFRAME_NONE);
74 init_dsp();
75 regs->cp0_epc = pc;
76 regs->regs[29] = sp;
77}
78
79void exit_thread(struct task_struct *tsk)
80{
81 /*
82 * User threads may have allocated a delay slot emulation frame.
83 * If so, clean up that allocation.
84 */
85 if (!(current->flags & PF_KTHREAD))
86 dsemul_thread_cleanup(tsk);
87}
88
89int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
90{
91 /*
92 * Save any process state which is live in hardware registers to the
93 * parent context prior to duplication. This prevents the new child
94 * state becoming stale if the parent is preempted before copy_thread()
95 * gets a chance to save the parent's live hardware registers to the
96 * child context.
97 */
98 preempt_disable();
99
100 if (is_msa_enabled())
101 save_msa(current);
102 else if (is_fpu_owner())
103 _save_fp(current);
104
105 save_dsp(current);
106
107 preempt_enable();
108
109 *dst = *src;
110 return 0;
111}
112
113/*
114 * Copy architecture-specific thread state
115 */
116int copy_thread(unsigned long clone_flags, unsigned long usp,
117 unsigned long kthread_arg, struct task_struct *p)
118{
119 struct thread_info *ti = task_thread_info(p);
120 struct pt_regs *childregs, *regs = current_pt_regs();
121 unsigned long childksp;
122 p->set_child_tid = p->clear_child_tid = NULL;
123
124 childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32;
125
126 /* set up new TSS. */
127 childregs = (struct pt_regs *) childksp - 1;
128 /* Put the stack after the struct pt_regs. */
129 childksp = (unsigned long) childregs;
130 p->thread.cp0_status = read_c0_status() & ~(ST0_CU2|ST0_CU1);
131 if (unlikely(p->flags & PF_KTHREAD)) {
132 /* kernel thread */
133 unsigned long status = p->thread.cp0_status;
134 memset(childregs, 0, sizeof(struct pt_regs));
135 ti->addr_limit = KERNEL_DS;
136 p->thread.reg16 = usp; /* fn */
137 p->thread.reg17 = kthread_arg;
138 p->thread.reg29 = childksp;
139 p->thread.reg31 = (unsigned long) ret_from_kernel_thread;
140#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
141 status = (status & ~(ST0_KUP | ST0_IEP | ST0_IEC)) |
142 ((status & (ST0_KUC | ST0_IEC)) << 2);
143#else
144 status |= ST0_EXL;
145#endif
146 childregs->cp0_status = status;
147 return 0;
148 }
149
150 /* user thread */
151 *childregs = *regs;
152 childregs->regs[7] = 0; /* Clear error flag */
153 childregs->regs[2] = 0; /* Child gets zero as return value */
154 if (usp)
155 childregs->regs[29] = usp;
156 ti->addr_limit = USER_DS;
157
158 p->thread.reg29 = (unsigned long) childregs;
159 p->thread.reg31 = (unsigned long) ret_from_fork;
160
161 /*
162 * New tasks lose permission to use the fpu. This accelerates context
163 * switching for most programs since they don't use the fpu.
164 */
165 childregs->cp0_status &= ~(ST0_CU2|ST0_CU1);
166
167 clear_tsk_thread_flag(p, TIF_USEDFPU);
168 clear_tsk_thread_flag(p, TIF_USEDMSA);
169 clear_tsk_thread_flag(p, TIF_MSA_CTX_LIVE);
170
171#ifdef CONFIG_MIPS_MT_FPAFF
172 clear_tsk_thread_flag(p, TIF_FPUBOUND);
173#endif /* CONFIG_MIPS_MT_FPAFF */
174
175 atomic_set(&p->thread.bd_emu_frame, BD_EMUFRAME_NONE);
176
177 if (clone_flags & CLONE_SETTLS)
178 ti->tp_value = regs->regs[7];
179
180 return 0;
181}
182
183#ifdef CONFIG_CC_STACKPROTECTOR
184#include <linux/stackprotector.h>
185unsigned long __stack_chk_guard __read_mostly;
186EXPORT_SYMBOL(__stack_chk_guard);
187#endif
188
189struct mips_frame_info {
190 void *func;
191 unsigned long func_size;
192 int frame_size;
193 int pc_offset;
194};
195
196#define J_TARGET(pc,target) \
197 (((unsigned long)(pc) & 0xf0000000) | ((target) << 2))
198
199static inline int is_ra_save_ins(union mips_instruction *ip, int *poff)
200{
201#ifdef CONFIG_CPU_MICROMIPS
202 /*
203 * swsp ra,offset
204 * swm16 reglist,offset(sp)
205 * swm32 reglist,offset(sp)
206 * sw32 ra,offset(sp)
207 * jradiussp - NOT SUPPORTED
208 *
209 * microMIPS is way more fun...
210 */
211 if (mm_insn_16bit(ip->halfword[1])) {
212 switch (ip->mm16_r5_format.opcode) {
213 case mm_swsp16_op:
214 if (ip->mm16_r5_format.rt != 31)
215 return 0;
216
217 *poff = ip->mm16_r5_format.simmediate;
218 *poff = (*poff << 2) / sizeof(ulong);
219 return 1;
220
221 case mm_pool16c_op:
222 switch (ip->mm16_m_format.func) {
223 case mm_swm16_op:
224 *poff = ip->mm16_m_format.imm;
225 *poff += 1 + ip->mm16_m_format.rlist;
226 *poff = (*poff << 2) / sizeof(ulong);
227 return 1;
228
229 default:
230 return 0;
231 }
232
233 default:
234 return 0;
235 }
236 }
237
238 switch (ip->i_format.opcode) {
239 case mm_sw32_op:
240 if (ip->i_format.rs != 29)
241 return 0;
242 if (ip->i_format.rt != 31)
243 return 0;
244
245 *poff = ip->i_format.simmediate / sizeof(ulong);
246 return 1;
247
248 case mm_pool32b_op:
249 switch (ip->mm_m_format.func) {
250 case mm_swm32_func:
251 if (ip->mm_m_format.rd < 0x10)
252 return 0;
253 if (ip->mm_m_format.base != 29)
254 return 0;
255
256 *poff = ip->mm_m_format.simmediate;
257 *poff += (ip->mm_m_format.rd & 0xf) * sizeof(u32);
258 *poff /= sizeof(ulong);
259 return 1;
260 default:
261 return 0;
262 }
263
264 default:
265 return 0;
266 }
267#else
268 /* sw / sd $ra, offset($sp) */
269 if ((ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op) &&
270 ip->i_format.rs == 29 && ip->i_format.rt == 31) {
271 *poff = ip->i_format.simmediate / sizeof(ulong);
272 return 1;
273 }
274
275 return 0;
276#endif
277}
278
279static inline int is_jump_ins(union mips_instruction *ip)
280{
281#ifdef CONFIG_CPU_MICROMIPS
282 /*
283 * jr16,jrc,jalr16,jalr16
284 * jal
285 * jalr/jr,jalr.hb/jr.hb,jalrs,jalrs.hb
286 * jraddiusp - NOT SUPPORTED
287 *
288 * microMIPS is kind of more fun...
289 */
290 if (mm_insn_16bit(ip->halfword[1])) {
291 if ((ip->mm16_r5_format.opcode == mm_pool16c_op &&
292 (ip->mm16_r5_format.rt & mm_jr16_op) == mm_jr16_op))
293 return 1;
294 return 0;
295 }
296
297 if (ip->j_format.opcode == mm_j32_op)
298 return 1;
299 if (ip->j_format.opcode == mm_jal32_op)
300 return 1;
301 if (ip->r_format.opcode != mm_pool32a_op ||
302 ip->r_format.func != mm_pool32axf_op)
303 return 0;
304 return ((ip->u_format.uimmediate >> 6) & mm_jalr_op) == mm_jalr_op;
305#else
306 if (ip->j_format.opcode == j_op)
307 return 1;
308 if (ip->j_format.opcode == jal_op)
309 return 1;
310 if (ip->r_format.opcode != spec_op)
311 return 0;
312 return ip->r_format.func == jalr_op || ip->r_format.func == jr_op;
313#endif
314}
315
316static inline int is_sp_move_ins(union mips_instruction *ip)
317{
318#ifdef CONFIG_CPU_MICROMIPS
319 /*
320 * addiusp -imm
321 * addius5 sp,-imm
322 * addiu32 sp,sp,-imm
323 * jradiussp - NOT SUPPORTED
324 *
325 * microMIPS is not more fun...
326 */
327 if (mm_insn_16bit(ip->halfword[1])) {
328 return (ip->mm16_r3_format.opcode == mm_pool16d_op &&
329 ip->mm16_r3_format.simmediate && mm_addiusp_func) ||
330 (ip->mm16_r5_format.opcode == mm_pool16d_op &&
331 ip->mm16_r5_format.rt == 29);
332 }
333
334 return ip->mm_i_format.opcode == mm_addiu32_op &&
335 ip->mm_i_format.rt == 29 && ip->mm_i_format.rs == 29;
336#else
337 /* addiu/daddiu sp,sp,-imm */
338 if (ip->i_format.rs != 29 || ip->i_format.rt != 29)
339 return 0;
340 if (ip->i_format.opcode == addiu_op || ip->i_format.opcode == daddiu_op)
341 return 1;
342#endif
343 return 0;
344}
345
346static int get_frame_info(struct mips_frame_info *info)
347{
348 bool is_mmips = IS_ENABLED(CONFIG_CPU_MICROMIPS);
349 union mips_instruction insn, *ip, *ip_end;
350 const unsigned int max_insns = 128;
351 unsigned int i;
352
353 info->pc_offset = -1;
354 info->frame_size = 0;
355
356 ip = (void *)msk_isa16_mode((ulong)info->func);
357 if (!ip)
358 goto err;
359
360 ip_end = (void *)ip + info->func_size;
361
362 for (i = 0; i < max_insns && ip < ip_end; i++, ip++) {
363 if (is_mmips && mm_insn_16bit(ip->halfword[0])) {
364 insn.halfword[0] = 0;
365 insn.halfword[1] = ip->halfword[0];
366 } else if (is_mmips) {
367 insn.halfword[0] = ip->halfword[1];
368 insn.halfword[1] = ip->halfword[0];
369 } else {
370 insn.word = ip->word;
371 }
372
373 if (is_jump_ins(&insn))
374 break;
375
376 if (!info->frame_size) {
377 if (is_sp_move_ins(&insn))
378 {
379#ifdef CONFIG_CPU_MICROMIPS
380 if (mm_insn_16bit(ip->halfword[0]))
381 {
382 unsigned short tmp;
383
384 if (ip->halfword[0] & mm_addiusp_func)
385 {
386 tmp = (((ip->halfword[0] >> 1) & 0x1ff) << 2);
387 info->frame_size = -(signed short)(tmp | ((tmp & 0x100) ? 0xfe00 : 0));
388 } else {
389 tmp = (ip->halfword[0] >> 1);
390 info->frame_size = -(signed short)(tmp & 0xf);
391 }
392 ip = (void *) &ip->halfword[1];
393 ip--;
394 } else
395#endif
396 info->frame_size = - ip->i_format.simmediate;
397 }
398 continue;
399 }
400 if (info->pc_offset == -1 &&
401 is_ra_save_ins(&insn, &info->pc_offset))
402 break;
403 }
404 if (info->frame_size && info->pc_offset >= 0) /* nested */
405 return 0;
406 if (info->pc_offset < 0) /* leaf */
407 return 1;
408 /* prologue seems bogus... */
409err:
410 return -1;
411}
412
413static struct mips_frame_info schedule_mfi __read_mostly;
414
415#ifdef CONFIG_KALLSYMS
416static unsigned long get___schedule_addr(void)
417{
418 return kallsyms_lookup_name("__schedule");
419}
420#else
421static unsigned long get___schedule_addr(void)
422{
423 union mips_instruction *ip = (void *)schedule;
424 int max_insns = 8;
425 int i;
426
427 for (i = 0; i < max_insns; i++, ip++) {
428 if (ip->j_format.opcode == j_op)
429 return J_TARGET(ip, ip->j_format.target);
430 }
431 return 0;
432}
433#endif
434
435static int __init frame_info_init(void)
436{
437 unsigned long size = 0;
438#ifdef CONFIG_KALLSYMS
439 unsigned long ofs;
440#endif
441 unsigned long addr;
442
443 addr = get___schedule_addr();
444 if (!addr)
445 addr = (unsigned long)schedule;
446
447#ifdef CONFIG_KALLSYMS
448 kallsyms_lookup_size_offset(addr, &size, &ofs);
449#endif
450 schedule_mfi.func = (void *)addr;
451 schedule_mfi.func_size = size;
452
453 get_frame_info(&schedule_mfi);
454
455 /*
456 * Without schedule() frame info, result given by
457 * thread_saved_pc() and get_wchan() are not reliable.
458 */
459 if (schedule_mfi.pc_offset < 0)
460 printk("Can't analyze schedule() prologue at %p\n", schedule);
461
462 return 0;
463}
464
465arch_initcall(frame_info_init);
466
467/*
468 * Return saved PC of a blocked thread.
469 */
470unsigned long thread_saved_pc(struct task_struct *tsk)
471{
472 struct thread_struct *t = &tsk->thread;
473
474 /* New born processes are a special case */
475 if (t->reg31 == (unsigned long) ret_from_fork)
476 return t->reg31;
477 if (schedule_mfi.pc_offset < 0)
478 return 0;
479 return ((unsigned long *)t->reg29)[schedule_mfi.pc_offset];
480}
481
482
483#ifdef CONFIG_KALLSYMS
484/* generic stack unwinding function */
485unsigned long notrace unwind_stack_by_address(unsigned long stack_page,
486 unsigned long *sp,
487 unsigned long pc,
488 unsigned long *ra)
489{
490 struct mips_frame_info info;
491 unsigned long size, ofs;
492 int leaf;
493 extern void ret_from_irq(void);
494 extern void ret_from_exception(void);
495
496 if (!stack_page)
497 return 0;
498
499 /*
500 * If we reached the bottom of interrupt context,
501 * return saved pc in pt_regs.
502 */
503 if (pc == (unsigned long)ret_from_irq ||
504 pc == (unsigned long)ret_from_exception) {
505 struct pt_regs *regs;
506 if (*sp >= stack_page &&
507 *sp + sizeof(*regs) <= stack_page + THREAD_SIZE - 32) {
508 regs = (struct pt_regs *)*sp;
509 pc = regs->cp0_epc;
510 if (!user_mode(regs) && __kernel_text_address(pc)) {
511 *sp = regs->regs[29];
512 *ra = regs->regs[31];
513 return pc;
514 }
515 }
516 return 0;
517 }
518 if (!kallsyms_lookup_size_offset(pc, &size, &ofs))
519 return 0;
520 /*
521 * Return ra if an exception occurred at the first instruction
522 */
523 if (unlikely(ofs == 0)) {
524 pc = *ra;
525 *ra = 0;
526 return pc;
527 }
528
529 info.func = (void *)(pc - ofs);
530 info.func_size = ofs; /* analyze from start to ofs */
531 leaf = get_frame_info(&info);
532 if (leaf < 0)
533 return 0;
534
535 if (*sp < stack_page ||
536 *sp + info.frame_size > stack_page + THREAD_SIZE - 32)
537 return 0;
538
539 if (leaf)
540 /*
541 * For some extreme cases, get_frame_info() can
542 * consider wrongly a nested function as a leaf
543 * one. In that cases avoid to return always the
544 * same value.
545 */
546 pc = pc != *ra ? *ra : 0;
547 else
548 pc = ((unsigned long *)(*sp))[info.pc_offset];
549
550 *sp += info.frame_size;
551 *ra = 0;
552 return __kernel_text_address(pc) ? pc : 0;
553}
554EXPORT_SYMBOL(unwind_stack_by_address);
555
556/* used by show_backtrace() */
557unsigned long unwind_stack(struct task_struct *task, unsigned long *sp,
558 unsigned long pc, unsigned long *ra)
559{
560 unsigned long stack_page = 0;
561 int cpu;
562
563 for_each_possible_cpu(cpu) {
564 if (on_irq_stack(cpu, *sp)) {
565 stack_page = (unsigned long)irq_stack[cpu];
566 break;
567 }
568 }
569
570 if (!stack_page)
571 stack_page = (unsigned long)task_stack_page(task);
572
573 return unwind_stack_by_address(stack_page, sp, pc, ra);
574}
575#endif
576
577/*
578 * get_wchan - a maintenance nightmare^W^Wpain in the ass ...
579 */
580unsigned long get_wchan(struct task_struct *task)
581{
582 unsigned long pc = 0;
583#ifdef CONFIG_KALLSYMS
584 unsigned long sp;
585 unsigned long ra = 0;
586#endif
587
588 if (!task || task == current || task->state == TASK_RUNNING)
589 goto out;
590 if (!task_stack_page(task))
591 goto out;
592
593 pc = thread_saved_pc(task);
594
595#ifdef CONFIG_KALLSYMS
596 sp = task->thread.reg29 + schedule_mfi.frame_size;
597
598 while (in_sched_functions(pc))
599 pc = unwind_stack(task, &sp, pc, &ra);
600#endif
601
602out:
603 return pc;
604}
605
606/*
607 * Don't forget that the stack pointer must be aligned on a 8 bytes
608 * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
609 */
610unsigned long arch_align_stack(unsigned long sp)
611{
612 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
613 sp -= get_random_int() & ~PAGE_MASK;
614
615 return sp & ALMASK;
616}
617
618static void arch_dump_stack(void *info)
619{
620 struct pt_regs *regs;
621
622 regs = get_irq_regs();
623
624 if (regs)
625 show_regs(regs);
626
627 dump_stack();
628}
629
630void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
631{
632 long this_cpu = get_cpu();
633
634 if (cpumask_test_cpu(this_cpu, mask) && !exclude_self)
635 dump_stack();
636
637 smp_call_function_many(mask, arch_dump_stack, NULL, 1);
638
639 put_cpu();
640}
641
642int mips_get_process_fp_mode(struct task_struct *task)
643{
644 int value = 0;
645
646 if (!test_tsk_thread_flag(task, TIF_32BIT_FPREGS))
647 value |= PR_FP_MODE_FR;
648 if (test_tsk_thread_flag(task, TIF_HYBRID_FPREGS))
649 value |= PR_FP_MODE_FRE;
650
651 return value;
652}
653
654static void prepare_for_fp_mode_switch(void *info)
655{
656 struct mm_struct *mm = info;
657
658 if (current->mm == mm)
659 lose_fpu(1);
660}
661
662int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
663{
664 const unsigned int known_bits = PR_FP_MODE_FR | PR_FP_MODE_FRE;
665 struct task_struct *t;
666 int max_users;
667
668 /* Check the value is valid */
669 if (value & ~known_bits)
670 return -EOPNOTSUPP;
671
672 /* Avoid inadvertently triggering emulation */
673 if ((value & PR_FP_MODE_FR) && raw_cpu_has_fpu &&
674 !(raw_current_cpu_data.fpu_id & MIPS_FPIR_F64))
675 return -EOPNOTSUPP;
676 if ((value & PR_FP_MODE_FRE) && raw_cpu_has_fpu && !cpu_has_fre)
677 return -EOPNOTSUPP;
678
679 /* FR = 0 not supported in MIPS R6 */
680 if (!(value & PR_FP_MODE_FR) && raw_cpu_has_fpu && cpu_has_mips_r6)
681 return -EOPNOTSUPP;
682
683 /* Proceed with the mode switch */
684 preempt_disable();
685
686 /* Save FP & vector context, then disable FPU & MSA */
687 if (task->signal == current->signal)
688 lose_fpu(1);
689
690 /* Prevent any threads from obtaining live FP context */
691 atomic_set(&task->mm->context.fp_mode_switching, 1);
692 smp_mb__after_atomic();
693
694 /*
695 * If there are multiple online CPUs then force any which are running
696 * threads in this process to lose their FPU context, which they can't
697 * regain until fp_mode_switching is cleared later.
698 */
699 if (num_online_cpus() > 1) {
700 /* No need to send an IPI for the local CPU */
701 max_users = (task->mm == current->mm) ? 1 : 0;
702
703 if (atomic_read(¤t->mm->mm_users) > max_users)
704 smp_call_function(prepare_for_fp_mode_switch,
705 (void *)current->mm, 1);
706 }
707
708 /*
709 * There are now no threads of the process with live FP context, so it
710 * is safe to proceed with the FP mode switch.
711 */
712 for_each_thread(task, t) {
713 /* Update desired FP register width */
714 if (value & PR_FP_MODE_FR) {
715 clear_tsk_thread_flag(t, TIF_32BIT_FPREGS);
716 } else {
717 set_tsk_thread_flag(t, TIF_32BIT_FPREGS);
718 clear_tsk_thread_flag(t, TIF_MSA_CTX_LIVE);
719 }
720
721 /* Update desired FP single layout */
722 if (value & PR_FP_MODE_FRE)
723 set_tsk_thread_flag(t, TIF_HYBRID_FPREGS);
724 else
725 clear_tsk_thread_flag(t, TIF_HYBRID_FPREGS);
726 }
727
728 /* Allow threads to use FP again */
729 atomic_set(&task->mm->context.fp_mode_switching, 0);
730 preempt_enable();
731
732 return 0;
733}