Loading...
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994 - 1999, 2000 by Ralf Baechle and others.
7 * Copyright (C) 2005, 2006 by Ralf Baechle (ralf@linux-mips.org)
8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9 * Copyright (C) 2004 Thiemo Seufer
10 * Copyright (C) 2013 Imagination Technologies Ltd.
11 */
12#include <linux/errno.h>
13#include <linux/sched.h>
14#include <linux/sched/debug.h>
15#include <linux/sched/task.h>
16#include <linux/sched/task_stack.h>
17#include <linux/tick.h>
18#include <linux/kernel.h>
19#include <linux/mm.h>
20#include <linux/stddef.h>
21#include <linux/unistd.h>
22#include <linux/export.h>
23#include <linux/ptrace.h>
24#include <linux/mman.h>
25#include <linux/personality.h>
26#include <linux/sys.h>
27#include <linux/init.h>
28#include <linux/completion.h>
29#include <linux/kallsyms.h>
30#include <linux/random.h>
31#include <linux/prctl.h>
32#include <linux/nmi.h>
33#include <linux/cpu.h>
34
35#include <asm/abi.h>
36#include <asm/asm.h>
37#include <asm/bootinfo.h>
38#include <asm/cpu.h>
39#include <asm/dsemul.h>
40#include <asm/dsp.h>
41#include <asm/fpu.h>
42#include <asm/irq.h>
43#include <asm/mips-cps.h>
44#include <asm/msa.h>
45#include <asm/pgtable.h>
46#include <asm/mipsregs.h>
47#include <asm/processor.h>
48#include <asm/reg.h>
49#include <linux/uaccess.h>
50#include <asm/io.h>
51#include <asm/elf.h>
52#include <asm/isadep.h>
53#include <asm/inst.h>
54#include <asm/stacktrace.h>
55#include <asm/irq_regs.h>
56
57#ifdef CONFIG_HOTPLUG_CPU
58void arch_cpu_idle_dead(void)
59{
60 play_dead();
61}
62#endif
63
64asmlinkage void ret_from_fork(void);
65asmlinkage void ret_from_kernel_thread(void);
66
67void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp)
68{
69 unsigned long status;
70
71 /* New thread loses kernel privileges. */
72 status = regs->cp0_status & ~(ST0_CU0|ST0_CU1|ST0_FR|KU_MASK);
73 status |= KU_USER;
74 regs->cp0_status = status;
75 lose_fpu(0);
76 clear_thread_flag(TIF_MSA_CTX_LIVE);
77 clear_used_math();
78 atomic_set(¤t->thread.bd_emu_frame, BD_EMUFRAME_NONE);
79 init_dsp();
80 regs->cp0_epc = pc;
81 regs->regs[29] = sp;
82}
83
84void exit_thread(struct task_struct *tsk)
85{
86 /*
87 * User threads may have allocated a delay slot emulation frame.
88 * If so, clean up that allocation.
89 */
90 if (!(current->flags & PF_KTHREAD))
91 dsemul_thread_cleanup(tsk);
92}
93
94int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
95{
96 /*
97 * Save any process state which is live in hardware registers to the
98 * parent context prior to duplication. This prevents the new child
99 * state becoming stale if the parent is preempted before copy_thread()
100 * gets a chance to save the parent's live hardware registers to the
101 * child context.
102 */
103 preempt_disable();
104
105 if (is_msa_enabled())
106 save_msa(current);
107 else if (is_fpu_owner())
108 _save_fp(current);
109
110 save_dsp(current);
111
112 preempt_enable();
113
114 *dst = *src;
115 return 0;
116}
117
118/*
119 * Copy architecture-specific thread state
120 */
121int copy_thread_tls(unsigned long clone_flags, unsigned long usp,
122 unsigned long kthread_arg, struct task_struct *p, unsigned long tls)
123{
124 struct thread_info *ti = task_thread_info(p);
125 struct pt_regs *childregs, *regs = current_pt_regs();
126 unsigned long childksp;
127
128 childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32;
129
130 /* set up new TSS. */
131 childregs = (struct pt_regs *) childksp - 1;
132 /* Put the stack after the struct pt_regs. */
133 childksp = (unsigned long) childregs;
134 p->thread.cp0_status = read_c0_status() & ~(ST0_CU2|ST0_CU1);
135 if (unlikely(p->flags & PF_KTHREAD)) {
136 /* kernel thread */
137 unsigned long status = p->thread.cp0_status;
138 memset(childregs, 0, sizeof(struct pt_regs));
139 ti->addr_limit = KERNEL_DS;
140 p->thread.reg16 = usp; /* fn */
141 p->thread.reg17 = kthread_arg;
142 p->thread.reg29 = childksp;
143 p->thread.reg31 = (unsigned long) ret_from_kernel_thread;
144#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
145 status = (status & ~(ST0_KUP | ST0_IEP | ST0_IEC)) |
146 ((status & (ST0_KUC | ST0_IEC)) << 2);
147#else
148 status |= ST0_EXL;
149#endif
150 childregs->cp0_status = status;
151 return 0;
152 }
153
154 /* user thread */
155 *childregs = *regs;
156 childregs->regs[7] = 0; /* Clear error flag */
157 childregs->regs[2] = 0; /* Child gets zero as return value */
158 if (usp)
159 childregs->regs[29] = usp;
160 ti->addr_limit = USER_DS;
161
162 p->thread.reg29 = (unsigned long) childregs;
163 p->thread.reg31 = (unsigned long) ret_from_fork;
164
165 /*
166 * New tasks lose permission to use the fpu. This accelerates context
167 * switching for most programs since they don't use the fpu.
168 */
169 childregs->cp0_status &= ~(ST0_CU2|ST0_CU1);
170
171 clear_tsk_thread_flag(p, TIF_USEDFPU);
172 clear_tsk_thread_flag(p, TIF_USEDMSA);
173 clear_tsk_thread_flag(p, TIF_MSA_CTX_LIVE);
174
175#ifdef CONFIG_MIPS_MT_FPAFF
176 clear_tsk_thread_flag(p, TIF_FPUBOUND);
177#endif /* CONFIG_MIPS_MT_FPAFF */
178
179 atomic_set(&p->thread.bd_emu_frame, BD_EMUFRAME_NONE);
180
181 if (clone_flags & CLONE_SETTLS)
182 ti->tp_value = tls;
183
184 return 0;
185}
186
187#ifdef CONFIG_STACKPROTECTOR
188#include <linux/stackprotector.h>
189unsigned long __stack_chk_guard __read_mostly;
190EXPORT_SYMBOL(__stack_chk_guard);
191#endif
192
193struct mips_frame_info {
194 void *func;
195 unsigned long func_size;
196 int frame_size;
197 int pc_offset;
198};
199
200#define J_TARGET(pc,target) \
201 (((unsigned long)(pc) & 0xf0000000) | ((target) << 2))
202
203static inline int is_ra_save_ins(union mips_instruction *ip, int *poff)
204{
205#ifdef CONFIG_CPU_MICROMIPS
206 /*
207 * swsp ra,offset
208 * swm16 reglist,offset(sp)
209 * swm32 reglist,offset(sp)
210 * sw32 ra,offset(sp)
211 * jradiussp - NOT SUPPORTED
212 *
213 * microMIPS is way more fun...
214 */
215 if (mm_insn_16bit(ip->word >> 16)) {
216 switch (ip->mm16_r5_format.opcode) {
217 case mm_swsp16_op:
218 if (ip->mm16_r5_format.rt != 31)
219 return 0;
220
221 *poff = ip->mm16_r5_format.imm;
222 *poff = (*poff << 2) / sizeof(ulong);
223 return 1;
224
225 case mm_pool16c_op:
226 switch (ip->mm16_m_format.func) {
227 case mm_swm16_op:
228 *poff = ip->mm16_m_format.imm;
229 *poff += 1 + ip->mm16_m_format.rlist;
230 *poff = (*poff << 2) / sizeof(ulong);
231 return 1;
232
233 default:
234 return 0;
235 }
236
237 default:
238 return 0;
239 }
240 }
241
242 switch (ip->i_format.opcode) {
243 case mm_sw32_op:
244 if (ip->i_format.rs != 29)
245 return 0;
246 if (ip->i_format.rt != 31)
247 return 0;
248
249 *poff = ip->i_format.simmediate / sizeof(ulong);
250 return 1;
251
252 case mm_pool32b_op:
253 switch (ip->mm_m_format.func) {
254 case mm_swm32_func:
255 if (ip->mm_m_format.rd < 0x10)
256 return 0;
257 if (ip->mm_m_format.base != 29)
258 return 0;
259
260 *poff = ip->mm_m_format.simmediate;
261 *poff += (ip->mm_m_format.rd & 0xf) * sizeof(u32);
262 *poff /= sizeof(ulong);
263 return 1;
264 default:
265 return 0;
266 }
267
268 default:
269 return 0;
270 }
271#else
272 /* sw / sd $ra, offset($sp) */
273 if ((ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op) &&
274 ip->i_format.rs == 29 && ip->i_format.rt == 31) {
275 *poff = ip->i_format.simmediate / sizeof(ulong);
276 return 1;
277 }
278
279 return 0;
280#endif
281}
282
283static inline int is_jump_ins(union mips_instruction *ip)
284{
285#ifdef CONFIG_CPU_MICROMIPS
286 /*
287 * jr16,jrc,jalr16,jalr16
288 * jal
289 * jalr/jr,jalr.hb/jr.hb,jalrs,jalrs.hb
290 * jraddiusp - NOT SUPPORTED
291 *
292 * microMIPS is kind of more fun...
293 */
294 if (mm_insn_16bit(ip->word >> 16)) {
295 if ((ip->mm16_r5_format.opcode == mm_pool16c_op &&
296 (ip->mm16_r5_format.rt & mm_jr16_op) == mm_jr16_op))
297 return 1;
298 return 0;
299 }
300
301 if (ip->j_format.opcode == mm_j32_op)
302 return 1;
303 if (ip->j_format.opcode == mm_jal32_op)
304 return 1;
305 if (ip->r_format.opcode != mm_pool32a_op ||
306 ip->r_format.func != mm_pool32axf_op)
307 return 0;
308 return ((ip->u_format.uimmediate >> 6) & mm_jalr_op) == mm_jalr_op;
309#else
310 if (ip->j_format.opcode == j_op)
311 return 1;
312 if (ip->j_format.opcode == jal_op)
313 return 1;
314 if (ip->r_format.opcode != spec_op)
315 return 0;
316 return ip->r_format.func == jalr_op || ip->r_format.func == jr_op;
317#endif
318}
319
320static inline int is_sp_move_ins(union mips_instruction *ip, int *frame_size)
321{
322#ifdef CONFIG_CPU_MICROMIPS
323 unsigned short tmp;
324
325 /*
326 * addiusp -imm
327 * addius5 sp,-imm
328 * addiu32 sp,sp,-imm
329 * jradiussp - NOT SUPPORTED
330 *
331 * microMIPS is not more fun...
332 */
333 if (mm_insn_16bit(ip->word >> 16)) {
334 if (ip->mm16_r3_format.opcode == mm_pool16d_op &&
335 ip->mm16_r3_format.simmediate & mm_addiusp_func) {
336 tmp = ip->mm_b0_format.simmediate >> 1;
337 tmp = ((tmp & 0x1ff) ^ 0x100) - 0x100;
338 if ((tmp + 2) < 4) /* 0x0,0x1,0x1fe,0x1ff are special */
339 tmp ^= 0x100;
340 *frame_size = -(signed short)(tmp << 2);
341 return 1;
342 }
343 if (ip->mm16_r5_format.opcode == mm_pool16d_op &&
344 ip->mm16_r5_format.rt == 29) {
345 tmp = ip->mm16_r5_format.imm >> 1;
346 *frame_size = -(signed short)(tmp & 0xf);
347 return 1;
348 }
349 return 0;
350 }
351
352 if (ip->mm_i_format.opcode == mm_addiu32_op &&
353 ip->mm_i_format.rt == 29 && ip->mm_i_format.rs == 29) {
354 *frame_size = -ip->i_format.simmediate;
355 return 1;
356 }
357#else
358 /* addiu/daddiu sp,sp,-imm */
359 if (ip->i_format.rs != 29 || ip->i_format.rt != 29)
360 return 0;
361
362 if (ip->i_format.opcode == addiu_op ||
363 ip->i_format.opcode == daddiu_op) {
364 *frame_size = -ip->i_format.simmediate;
365 return 1;
366 }
367#endif
368 return 0;
369}
370
371static int get_frame_info(struct mips_frame_info *info)
372{
373 bool is_mmips = IS_ENABLED(CONFIG_CPU_MICROMIPS);
374 union mips_instruction insn, *ip;
375 const unsigned int max_insns = 128;
376 unsigned int last_insn_size = 0;
377 unsigned int i;
378 bool saw_jump = false;
379
380 info->pc_offset = -1;
381 info->frame_size = 0;
382
383 ip = (void *)msk_isa16_mode((ulong)info->func);
384 if (!ip)
385 goto err;
386
387 for (i = 0; i < max_insns; i++) {
388 ip = (void *)ip + last_insn_size;
389
390 if (is_mmips && mm_insn_16bit(ip->halfword[0])) {
391 insn.word = ip->halfword[0] << 16;
392 last_insn_size = 2;
393 } else if (is_mmips) {
394 insn.word = ip->halfword[0] << 16 | ip->halfword[1];
395 last_insn_size = 4;
396 } else {
397 insn.word = ip->word;
398 last_insn_size = 4;
399 }
400
401 if (!info->frame_size) {
402 is_sp_move_ins(&insn, &info->frame_size);
403 continue;
404 } else if (!saw_jump && is_jump_ins(ip)) {
405 /*
406 * If we see a jump instruction, we are finished
407 * with the frame save.
408 *
409 * Some functions can have a shortcut return at
410 * the beginning of the function, so don't start
411 * looking for jump instruction until we see the
412 * frame setup.
413 *
414 * The RA save instruction can get put into the
415 * delay slot of the jump instruction, so look
416 * at the next instruction, too.
417 */
418 saw_jump = true;
419 continue;
420 }
421 if (info->pc_offset == -1 &&
422 is_ra_save_ins(&insn, &info->pc_offset))
423 break;
424 if (saw_jump)
425 break;
426 }
427 if (info->frame_size && info->pc_offset >= 0) /* nested */
428 return 0;
429 if (info->pc_offset < 0) /* leaf */
430 return 1;
431 /* prologue seems bogus... */
432err:
433 return -1;
434}
435
436static struct mips_frame_info schedule_mfi __read_mostly;
437
438#ifdef CONFIG_KALLSYMS
439static unsigned long get___schedule_addr(void)
440{
441 return kallsyms_lookup_name("__schedule");
442}
443#else
444static unsigned long get___schedule_addr(void)
445{
446 union mips_instruction *ip = (void *)schedule;
447 int max_insns = 8;
448 int i;
449
450 for (i = 0; i < max_insns; i++, ip++) {
451 if (ip->j_format.opcode == j_op)
452 return J_TARGET(ip, ip->j_format.target);
453 }
454 return 0;
455}
456#endif
457
458static int __init frame_info_init(void)
459{
460 unsigned long size = 0;
461#ifdef CONFIG_KALLSYMS
462 unsigned long ofs;
463#endif
464 unsigned long addr;
465
466 addr = get___schedule_addr();
467 if (!addr)
468 addr = (unsigned long)schedule;
469
470#ifdef CONFIG_KALLSYMS
471 kallsyms_lookup_size_offset(addr, &size, &ofs);
472#endif
473 schedule_mfi.func = (void *)addr;
474 schedule_mfi.func_size = size;
475
476 get_frame_info(&schedule_mfi);
477
478 /*
479 * Without schedule() frame info, result given by
480 * thread_saved_pc() and get_wchan() are not reliable.
481 */
482 if (schedule_mfi.pc_offset < 0)
483 printk("Can't analyze schedule() prologue at %p\n", schedule);
484
485 return 0;
486}
487
488arch_initcall(frame_info_init);
489
490/*
491 * Return saved PC of a blocked thread.
492 */
493static unsigned long thread_saved_pc(struct task_struct *tsk)
494{
495 struct thread_struct *t = &tsk->thread;
496
497 /* New born processes are a special case */
498 if (t->reg31 == (unsigned long) ret_from_fork)
499 return t->reg31;
500 if (schedule_mfi.pc_offset < 0)
501 return 0;
502 return ((unsigned long *)t->reg29)[schedule_mfi.pc_offset];
503}
504
505
506#ifdef CONFIG_KALLSYMS
507/* generic stack unwinding function */
508unsigned long notrace unwind_stack_by_address(unsigned long stack_page,
509 unsigned long *sp,
510 unsigned long pc,
511 unsigned long *ra)
512{
513 unsigned long low, high, irq_stack_high;
514 struct mips_frame_info info;
515 unsigned long size, ofs;
516 struct pt_regs *regs;
517 int leaf;
518
519 if (!stack_page)
520 return 0;
521
522 /*
523 * IRQ stacks start at IRQ_STACK_START
524 * task stacks at THREAD_SIZE - 32
525 */
526 low = stack_page;
527 if (!preemptible() && on_irq_stack(raw_smp_processor_id(), *sp)) {
528 high = stack_page + IRQ_STACK_START;
529 irq_stack_high = high;
530 } else {
531 high = stack_page + THREAD_SIZE - 32;
532 irq_stack_high = 0;
533 }
534
535 /*
536 * If we reached the top of the interrupt stack, start unwinding
537 * the interrupted task stack.
538 */
539 if (unlikely(*sp == irq_stack_high)) {
540 unsigned long task_sp = *(unsigned long *)*sp;
541
542 /*
543 * Check that the pointer saved in the IRQ stack head points to
544 * something within the stack of the current task
545 */
546 if (!object_is_on_stack((void *)task_sp))
547 return 0;
548
549 /*
550 * Follow pointer to tasks kernel stack frame where interrupted
551 * state was saved.
552 */
553 regs = (struct pt_regs *)task_sp;
554 pc = regs->cp0_epc;
555 if (!user_mode(regs) && __kernel_text_address(pc)) {
556 *sp = regs->regs[29];
557 *ra = regs->regs[31];
558 return pc;
559 }
560 return 0;
561 }
562 if (!kallsyms_lookup_size_offset(pc, &size, &ofs))
563 return 0;
564 /*
565 * Return ra if an exception occurred at the first instruction
566 */
567 if (unlikely(ofs == 0)) {
568 pc = *ra;
569 *ra = 0;
570 return pc;
571 }
572
573 info.func = (void *)(pc - ofs);
574 info.func_size = ofs; /* analyze from start to ofs */
575 leaf = get_frame_info(&info);
576 if (leaf < 0)
577 return 0;
578
579 if (*sp < low || *sp + info.frame_size > high)
580 return 0;
581
582 if (leaf)
583 /*
584 * For some extreme cases, get_frame_info() can
585 * consider wrongly a nested function as a leaf
586 * one. In that cases avoid to return always the
587 * same value.
588 */
589 pc = pc != *ra ? *ra : 0;
590 else
591 pc = ((unsigned long *)(*sp))[info.pc_offset];
592
593 *sp += info.frame_size;
594 *ra = 0;
595 return __kernel_text_address(pc) ? pc : 0;
596}
597EXPORT_SYMBOL(unwind_stack_by_address);
598
599/* used by show_backtrace() */
600unsigned long unwind_stack(struct task_struct *task, unsigned long *sp,
601 unsigned long pc, unsigned long *ra)
602{
603 unsigned long stack_page = 0;
604 int cpu;
605
606 for_each_possible_cpu(cpu) {
607 if (on_irq_stack(cpu, *sp)) {
608 stack_page = (unsigned long)irq_stack[cpu];
609 break;
610 }
611 }
612
613 if (!stack_page)
614 stack_page = (unsigned long)task_stack_page(task);
615
616 return unwind_stack_by_address(stack_page, sp, pc, ra);
617}
618#endif
619
620/*
621 * get_wchan - a maintenance nightmare^W^Wpain in the ass ...
622 */
623unsigned long get_wchan(struct task_struct *task)
624{
625 unsigned long pc = 0;
626#ifdef CONFIG_KALLSYMS
627 unsigned long sp;
628 unsigned long ra = 0;
629#endif
630
631 if (!task || task == current || task->state == TASK_RUNNING)
632 goto out;
633 if (!task_stack_page(task))
634 goto out;
635
636 pc = thread_saved_pc(task);
637
638#ifdef CONFIG_KALLSYMS
639 sp = task->thread.reg29 + schedule_mfi.frame_size;
640
641 while (in_sched_functions(pc))
642 pc = unwind_stack(task, &sp, pc, &ra);
643#endif
644
645out:
646 return pc;
647}
648
649unsigned long mips_stack_top(void)
650{
651 unsigned long top = TASK_SIZE & PAGE_MASK;
652
653 /* One page for branch delay slot "emulation" */
654 top -= PAGE_SIZE;
655
656 /* Space for the VDSO, data page & GIC user page */
657 top -= PAGE_ALIGN(current->thread.abi->vdso->size);
658 top -= PAGE_SIZE;
659 top -= mips_gic_present() ? PAGE_SIZE : 0;
660
661 /* Space for cache colour alignment */
662 if (cpu_has_dc_aliases)
663 top -= shm_align_mask + 1;
664
665 /* Space to randomize the VDSO base */
666 if (current->flags & PF_RANDOMIZE)
667 top -= VDSO_RANDOMIZE_SIZE;
668
669 return top;
670}
671
672/*
673 * Don't forget that the stack pointer must be aligned on a 8 bytes
674 * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
675 */
676unsigned long arch_align_stack(unsigned long sp)
677{
678 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
679 sp -= get_random_int() & ~PAGE_MASK;
680
681 return sp & ALMASK;
682}
683
684static DEFINE_PER_CPU(call_single_data_t, backtrace_csd);
685static struct cpumask backtrace_csd_busy;
686
687static void handle_backtrace(void *info)
688{
689 nmi_cpu_backtrace(get_irq_regs());
690 cpumask_clear_cpu(smp_processor_id(), &backtrace_csd_busy);
691}
692
693static void raise_backtrace(cpumask_t *mask)
694{
695 call_single_data_t *csd;
696 int cpu;
697
698 for_each_cpu(cpu, mask) {
699 /*
700 * If we previously sent an IPI to the target CPU & it hasn't
701 * cleared its bit in the busy cpumask then it didn't handle
702 * our previous IPI & it's not safe for us to reuse the
703 * call_single_data_t.
704 */
705 if (cpumask_test_and_set_cpu(cpu, &backtrace_csd_busy)) {
706 pr_warn("Unable to send backtrace IPI to CPU%u - perhaps it hung?\n",
707 cpu);
708 continue;
709 }
710
711 csd = &per_cpu(backtrace_csd, cpu);
712 csd->func = handle_backtrace;
713 smp_call_function_single_async(cpu, csd);
714 }
715}
716
717void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
718{
719 nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_backtrace);
720}
721
722int mips_get_process_fp_mode(struct task_struct *task)
723{
724 int value = 0;
725
726 if (!test_tsk_thread_flag(task, TIF_32BIT_FPREGS))
727 value |= PR_FP_MODE_FR;
728 if (test_tsk_thread_flag(task, TIF_HYBRID_FPREGS))
729 value |= PR_FP_MODE_FRE;
730
731 return value;
732}
733
734static long prepare_for_fp_mode_switch(void *unused)
735{
736 /*
737 * This is icky, but we use this to simply ensure that all CPUs have
738 * context switched, regardless of whether they were previously running
739 * kernel or user code. This ensures that no CPU that a mode-switching
740 * program may execute on keeps its FPU enabled (& in the old mode)
741 * throughout the mode switch.
742 */
743 return 0;
744}
745
746int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
747{
748 const unsigned int known_bits = PR_FP_MODE_FR | PR_FP_MODE_FRE;
749 struct task_struct *t;
750 struct cpumask process_cpus;
751 int cpu;
752
753 /* If nothing to change, return right away, successfully. */
754 if (value == mips_get_process_fp_mode(task))
755 return 0;
756
757 /* Only accept a mode change if 64-bit FP enabled for o32. */
758 if (!IS_ENABLED(CONFIG_MIPS_O32_FP64_SUPPORT))
759 return -EOPNOTSUPP;
760
761 /* And only for o32 tasks. */
762 if (IS_ENABLED(CONFIG_64BIT) && !test_thread_flag(TIF_32BIT_REGS))
763 return -EOPNOTSUPP;
764
765 /* Check the value is valid */
766 if (value & ~known_bits)
767 return -EOPNOTSUPP;
768
769 /* Setting FRE without FR is not supported. */
770 if ((value & (PR_FP_MODE_FR | PR_FP_MODE_FRE)) == PR_FP_MODE_FRE)
771 return -EOPNOTSUPP;
772
773 /* Avoid inadvertently triggering emulation */
774 if ((value & PR_FP_MODE_FR) && raw_cpu_has_fpu &&
775 !(raw_current_cpu_data.fpu_id & MIPS_FPIR_F64))
776 return -EOPNOTSUPP;
777 if ((value & PR_FP_MODE_FRE) && raw_cpu_has_fpu && !cpu_has_fre)
778 return -EOPNOTSUPP;
779
780 /* FR = 0 not supported in MIPS R6 */
781 if (!(value & PR_FP_MODE_FR) && raw_cpu_has_fpu && cpu_has_mips_r6)
782 return -EOPNOTSUPP;
783
784 /* Indicate the new FP mode in each thread */
785 for_each_thread(task, t) {
786 /* Update desired FP register width */
787 if (value & PR_FP_MODE_FR) {
788 clear_tsk_thread_flag(t, TIF_32BIT_FPREGS);
789 } else {
790 set_tsk_thread_flag(t, TIF_32BIT_FPREGS);
791 clear_tsk_thread_flag(t, TIF_MSA_CTX_LIVE);
792 }
793
794 /* Update desired FP single layout */
795 if (value & PR_FP_MODE_FRE)
796 set_tsk_thread_flag(t, TIF_HYBRID_FPREGS);
797 else
798 clear_tsk_thread_flag(t, TIF_HYBRID_FPREGS);
799 }
800
801 /*
802 * We need to ensure that all threads in the process have switched mode
803 * before returning, in order to allow userland to not worry about
804 * races. We can do this by forcing all CPUs that any thread in the
805 * process may be running on to schedule something else - in this case
806 * prepare_for_fp_mode_switch().
807 *
808 * We begin by generating a mask of all CPUs that any thread in the
809 * process may be running on.
810 */
811 cpumask_clear(&process_cpus);
812 for_each_thread(task, t)
813 cpumask_set_cpu(task_cpu(t), &process_cpus);
814
815 /*
816 * Now we schedule prepare_for_fp_mode_switch() on each of those CPUs.
817 *
818 * The CPUs may have rescheduled already since we switched mode or
819 * generated the cpumask, but that doesn't matter. If the task in this
820 * process is scheduled out then our scheduling
821 * prepare_for_fp_mode_switch() will simply be redundant. If it's
822 * scheduled in then it will already have picked up the new FP mode
823 * whilst doing so.
824 */
825 get_online_cpus();
826 for_each_cpu_and(cpu, &process_cpus, cpu_online_mask)
827 work_on_cpu(cpu, prepare_for_fp_mode_switch, NULL);
828 put_online_cpus();
829
830 return 0;
831}
832
833#if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
834void mips_dump_regs32(u32 *uregs, const struct pt_regs *regs)
835{
836 unsigned int i;
837
838 for (i = MIPS32_EF_R1; i <= MIPS32_EF_R31; i++) {
839 /* k0/k1 are copied as zero. */
840 if (i == MIPS32_EF_R26 || i == MIPS32_EF_R27)
841 uregs[i] = 0;
842 else
843 uregs[i] = regs->regs[i - MIPS32_EF_R0];
844 }
845
846 uregs[MIPS32_EF_LO] = regs->lo;
847 uregs[MIPS32_EF_HI] = regs->hi;
848 uregs[MIPS32_EF_CP0_EPC] = regs->cp0_epc;
849 uregs[MIPS32_EF_CP0_BADVADDR] = regs->cp0_badvaddr;
850 uregs[MIPS32_EF_CP0_STATUS] = regs->cp0_status;
851 uregs[MIPS32_EF_CP0_CAUSE] = regs->cp0_cause;
852}
853#endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
854
855#ifdef CONFIG_64BIT
856void mips_dump_regs64(u64 *uregs, const struct pt_regs *regs)
857{
858 unsigned int i;
859
860 for (i = MIPS64_EF_R1; i <= MIPS64_EF_R31; i++) {
861 /* k0/k1 are copied as zero. */
862 if (i == MIPS64_EF_R26 || i == MIPS64_EF_R27)
863 uregs[i] = 0;
864 else
865 uregs[i] = regs->regs[i - MIPS64_EF_R0];
866 }
867
868 uregs[MIPS64_EF_LO] = regs->lo;
869 uregs[MIPS64_EF_HI] = regs->hi;
870 uregs[MIPS64_EF_CP0_EPC] = regs->cp0_epc;
871 uregs[MIPS64_EF_CP0_BADVADDR] = regs->cp0_badvaddr;
872 uregs[MIPS64_EF_CP0_STATUS] = regs->cp0_status;
873 uregs[MIPS64_EF_CP0_CAUSE] = regs->cp0_cause;
874}
875#endif /* CONFIG_64BIT */
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994 - 1999, 2000 by Ralf Baechle and others.
7 * Copyright (C) 2005, 2006 by Ralf Baechle (ralf@linux-mips.org)
8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9 * Copyright (C) 2004 Thiemo Seufer
10 * Copyright (C) 2013 Imagination Technologies Ltd.
11 */
12#include <linux/errno.h>
13#include <linux/sched.h>
14#include <linux/sched/debug.h>
15#include <linux/sched/task.h>
16#include <linux/sched/task_stack.h>
17#include <linux/tick.h>
18#include <linux/kernel.h>
19#include <linux/mm.h>
20#include <linux/stddef.h>
21#include <linux/unistd.h>
22#include <linux/export.h>
23#include <linux/ptrace.h>
24#include <linux/mman.h>
25#include <linux/personality.h>
26#include <linux/sys.h>
27#include <linux/init.h>
28#include <linux/completion.h>
29#include <linux/kallsyms.h>
30#include <linux/random.h>
31#include <linux/prctl.h>
32
33#include <asm/asm.h>
34#include <asm/bootinfo.h>
35#include <asm/cpu.h>
36#include <asm/dsemul.h>
37#include <asm/dsp.h>
38#include <asm/fpu.h>
39#include <asm/irq.h>
40#include <asm/msa.h>
41#include <asm/pgtable.h>
42#include <asm/mipsregs.h>
43#include <asm/processor.h>
44#include <asm/reg.h>
45#include <linux/uaccess.h>
46#include <asm/io.h>
47#include <asm/elf.h>
48#include <asm/isadep.h>
49#include <asm/inst.h>
50#include <asm/stacktrace.h>
51#include <asm/irq_regs.h>
52
53#ifdef CONFIG_HOTPLUG_CPU
54void arch_cpu_idle_dead(void)
55{
56 play_dead();
57}
58#endif
59
60asmlinkage void ret_from_fork(void);
61asmlinkage void ret_from_kernel_thread(void);
62
63void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp)
64{
65 unsigned long status;
66
67 /* New thread loses kernel privileges. */
68 status = regs->cp0_status & ~(ST0_CU0|ST0_CU1|ST0_FR|KU_MASK);
69 status |= KU_USER;
70 regs->cp0_status = status;
71 lose_fpu(0);
72 clear_thread_flag(TIF_MSA_CTX_LIVE);
73 clear_used_math();
74 atomic_set(¤t->thread.bd_emu_frame, BD_EMUFRAME_NONE);
75 init_dsp();
76 regs->cp0_epc = pc;
77 regs->regs[29] = sp;
78}
79
80void exit_thread(struct task_struct *tsk)
81{
82 /*
83 * User threads may have allocated a delay slot emulation frame.
84 * If so, clean up that allocation.
85 */
86 if (!(current->flags & PF_KTHREAD))
87 dsemul_thread_cleanup(tsk);
88}
89
90int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
91{
92 /*
93 * Save any process state which is live in hardware registers to the
94 * parent context prior to duplication. This prevents the new child
95 * state becoming stale if the parent is preempted before copy_thread()
96 * gets a chance to save the parent's live hardware registers to the
97 * child context.
98 */
99 preempt_disable();
100
101 if (is_msa_enabled())
102 save_msa(current);
103 else if (is_fpu_owner())
104 _save_fp(current);
105
106 save_dsp(current);
107
108 preempt_enable();
109
110 *dst = *src;
111 return 0;
112}
113
114/*
115 * Copy architecture-specific thread state
116 */
117int copy_thread_tls(unsigned long clone_flags, unsigned long usp,
118 unsigned long kthread_arg, struct task_struct *p, unsigned long tls)
119{
120 struct thread_info *ti = task_thread_info(p);
121 struct pt_regs *childregs, *regs = current_pt_regs();
122 unsigned long childksp;
123
124 childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32;
125
126 /* set up new TSS. */
127 childregs = (struct pt_regs *) childksp - 1;
128 /* Put the stack after the struct pt_regs. */
129 childksp = (unsigned long) childregs;
130 p->thread.cp0_status = read_c0_status() & ~(ST0_CU2|ST0_CU1);
131 if (unlikely(p->flags & PF_KTHREAD)) {
132 /* kernel thread */
133 unsigned long status = p->thread.cp0_status;
134 memset(childregs, 0, sizeof(struct pt_regs));
135 ti->addr_limit = KERNEL_DS;
136 p->thread.reg16 = usp; /* fn */
137 p->thread.reg17 = kthread_arg;
138 p->thread.reg29 = childksp;
139 p->thread.reg31 = (unsigned long) ret_from_kernel_thread;
140#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
141 status = (status & ~(ST0_KUP | ST0_IEP | ST0_IEC)) |
142 ((status & (ST0_KUC | ST0_IEC)) << 2);
143#else
144 status |= ST0_EXL;
145#endif
146 childregs->cp0_status = status;
147 return 0;
148 }
149
150 /* user thread */
151 *childregs = *regs;
152 childregs->regs[7] = 0; /* Clear error flag */
153 childregs->regs[2] = 0; /* Child gets zero as return value */
154 if (usp)
155 childregs->regs[29] = usp;
156 ti->addr_limit = USER_DS;
157
158 p->thread.reg29 = (unsigned long) childregs;
159 p->thread.reg31 = (unsigned long) ret_from_fork;
160
161 /*
162 * New tasks lose permission to use the fpu. This accelerates context
163 * switching for most programs since they don't use the fpu.
164 */
165 childregs->cp0_status &= ~(ST0_CU2|ST0_CU1);
166
167 clear_tsk_thread_flag(p, TIF_USEDFPU);
168 clear_tsk_thread_flag(p, TIF_USEDMSA);
169 clear_tsk_thread_flag(p, TIF_MSA_CTX_LIVE);
170
171#ifdef CONFIG_MIPS_MT_FPAFF
172 clear_tsk_thread_flag(p, TIF_FPUBOUND);
173#endif /* CONFIG_MIPS_MT_FPAFF */
174
175 atomic_set(&p->thread.bd_emu_frame, BD_EMUFRAME_NONE);
176
177 if (clone_flags & CLONE_SETTLS)
178 ti->tp_value = tls;
179
180 return 0;
181}
182
183#ifdef CONFIG_CC_STACKPROTECTOR
184#include <linux/stackprotector.h>
185unsigned long __stack_chk_guard __read_mostly;
186EXPORT_SYMBOL(__stack_chk_guard);
187#endif
188
189struct mips_frame_info {
190 void *func;
191 unsigned long func_size;
192 int frame_size;
193 int pc_offset;
194};
195
196#define J_TARGET(pc,target) \
197 (((unsigned long)(pc) & 0xf0000000) | ((target) << 2))
198
199static inline int is_ra_save_ins(union mips_instruction *ip, int *poff)
200{
201#ifdef CONFIG_CPU_MICROMIPS
202 /*
203 * swsp ra,offset
204 * swm16 reglist,offset(sp)
205 * swm32 reglist,offset(sp)
206 * sw32 ra,offset(sp)
207 * jradiussp - NOT SUPPORTED
208 *
209 * microMIPS is way more fun...
210 */
211 if (mm_insn_16bit(ip->word >> 16)) {
212 switch (ip->mm16_r5_format.opcode) {
213 case mm_swsp16_op:
214 if (ip->mm16_r5_format.rt != 31)
215 return 0;
216
217 *poff = ip->mm16_r5_format.imm;
218 *poff = (*poff << 2) / sizeof(ulong);
219 return 1;
220
221 case mm_pool16c_op:
222 switch (ip->mm16_m_format.func) {
223 case mm_swm16_op:
224 *poff = ip->mm16_m_format.imm;
225 *poff += 1 + ip->mm16_m_format.rlist;
226 *poff = (*poff << 2) / sizeof(ulong);
227 return 1;
228
229 default:
230 return 0;
231 }
232
233 default:
234 return 0;
235 }
236 }
237
238 switch (ip->i_format.opcode) {
239 case mm_sw32_op:
240 if (ip->i_format.rs != 29)
241 return 0;
242 if (ip->i_format.rt != 31)
243 return 0;
244
245 *poff = ip->i_format.simmediate / sizeof(ulong);
246 return 1;
247
248 case mm_pool32b_op:
249 switch (ip->mm_m_format.func) {
250 case mm_swm32_func:
251 if (ip->mm_m_format.rd < 0x10)
252 return 0;
253 if (ip->mm_m_format.base != 29)
254 return 0;
255
256 *poff = ip->mm_m_format.simmediate;
257 *poff += (ip->mm_m_format.rd & 0xf) * sizeof(u32);
258 *poff /= sizeof(ulong);
259 return 1;
260 default:
261 return 0;
262 }
263
264 default:
265 return 0;
266 }
267#else
268 /* sw / sd $ra, offset($sp) */
269 if ((ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op) &&
270 ip->i_format.rs == 29 && ip->i_format.rt == 31) {
271 *poff = ip->i_format.simmediate / sizeof(ulong);
272 return 1;
273 }
274
275 return 0;
276#endif
277}
278
279static inline int is_jump_ins(union mips_instruction *ip)
280{
281#ifdef CONFIG_CPU_MICROMIPS
282 /*
283 * jr16,jrc,jalr16,jalr16
284 * jal
285 * jalr/jr,jalr.hb/jr.hb,jalrs,jalrs.hb
286 * jraddiusp - NOT SUPPORTED
287 *
288 * microMIPS is kind of more fun...
289 */
290 if (mm_insn_16bit(ip->word >> 16)) {
291 if ((ip->mm16_r5_format.opcode == mm_pool16c_op &&
292 (ip->mm16_r5_format.rt & mm_jr16_op) == mm_jr16_op))
293 return 1;
294 return 0;
295 }
296
297 if (ip->j_format.opcode == mm_j32_op)
298 return 1;
299 if (ip->j_format.opcode == mm_jal32_op)
300 return 1;
301 if (ip->r_format.opcode != mm_pool32a_op ||
302 ip->r_format.func != mm_pool32axf_op)
303 return 0;
304 return ((ip->u_format.uimmediate >> 6) & mm_jalr_op) == mm_jalr_op;
305#else
306 if (ip->j_format.opcode == j_op)
307 return 1;
308 if (ip->j_format.opcode == jal_op)
309 return 1;
310 if (ip->r_format.opcode != spec_op)
311 return 0;
312 return ip->r_format.func == jalr_op || ip->r_format.func == jr_op;
313#endif
314}
315
316static inline int is_sp_move_ins(union mips_instruction *ip, int *frame_size)
317{
318#ifdef CONFIG_CPU_MICROMIPS
319 unsigned short tmp;
320
321 /*
322 * addiusp -imm
323 * addius5 sp,-imm
324 * addiu32 sp,sp,-imm
325 * jradiussp - NOT SUPPORTED
326 *
327 * microMIPS is not more fun...
328 */
329 if (mm_insn_16bit(ip->word >> 16)) {
330 if (ip->mm16_r3_format.opcode == mm_pool16d_op &&
331 ip->mm16_r3_format.simmediate & mm_addiusp_func) {
332 tmp = ip->mm_b0_format.simmediate >> 1;
333 tmp = ((tmp & 0x1ff) ^ 0x100) - 0x100;
334 if ((tmp + 2) < 4) /* 0x0,0x1,0x1fe,0x1ff are special */
335 tmp ^= 0x100;
336 *frame_size = -(signed short)(tmp << 2);
337 return 1;
338 }
339 if (ip->mm16_r5_format.opcode == mm_pool16d_op &&
340 ip->mm16_r5_format.rt == 29) {
341 tmp = ip->mm16_r5_format.imm >> 1;
342 *frame_size = -(signed short)(tmp & 0xf);
343 return 1;
344 }
345 return 0;
346 }
347
348 if (ip->mm_i_format.opcode == mm_addiu32_op &&
349 ip->mm_i_format.rt == 29 && ip->mm_i_format.rs == 29) {
350 *frame_size = -ip->i_format.simmediate;
351 return 1;
352 }
353#else
354 /* addiu/daddiu sp,sp,-imm */
355 if (ip->i_format.rs != 29 || ip->i_format.rt != 29)
356 return 0;
357
358 if (ip->i_format.opcode == addiu_op ||
359 ip->i_format.opcode == daddiu_op) {
360 *frame_size = -ip->i_format.simmediate;
361 return 1;
362 }
363#endif
364 return 0;
365}
366
367static int get_frame_info(struct mips_frame_info *info)
368{
369 bool is_mmips = IS_ENABLED(CONFIG_CPU_MICROMIPS);
370 union mips_instruction insn, *ip, *ip_end;
371 const unsigned int max_insns = 128;
372 unsigned int last_insn_size = 0;
373 unsigned int i;
374 bool saw_jump = false;
375
376 info->pc_offset = -1;
377 info->frame_size = 0;
378
379 ip = (void *)msk_isa16_mode((ulong)info->func);
380 if (!ip)
381 goto err;
382
383 ip_end = (void *)ip + info->func_size;
384
385 for (i = 0; i < max_insns && ip < ip_end; i++) {
386 ip = (void *)ip + last_insn_size;
387 if (is_mmips && mm_insn_16bit(ip->halfword[0])) {
388 insn.word = ip->halfword[0] << 16;
389 last_insn_size = 2;
390 } else if (is_mmips) {
391 insn.word = ip->halfword[0] << 16 | ip->halfword[1];
392 last_insn_size = 4;
393 } else {
394 insn.word = ip->word;
395 last_insn_size = 4;
396 }
397
398 if (!info->frame_size) {
399 is_sp_move_ins(&insn, &info->frame_size);
400 continue;
401 } else if (!saw_jump && is_jump_ins(ip)) {
402 /*
403 * If we see a jump instruction, we are finished
404 * with the frame save.
405 *
406 * Some functions can have a shortcut return at
407 * the beginning of the function, so don't start
408 * looking for jump instruction until we see the
409 * frame setup.
410 *
411 * The RA save instruction can get put into the
412 * delay slot of the jump instruction, so look
413 * at the next instruction, too.
414 */
415 saw_jump = true;
416 continue;
417 }
418 if (info->pc_offset == -1 &&
419 is_ra_save_ins(&insn, &info->pc_offset))
420 break;
421 if (saw_jump)
422 break;
423 }
424 if (info->frame_size && info->pc_offset >= 0) /* nested */
425 return 0;
426 if (info->pc_offset < 0) /* leaf */
427 return 1;
428 /* prologue seems bogus... */
429err:
430 return -1;
431}
432
433static struct mips_frame_info schedule_mfi __read_mostly;
434
435#ifdef CONFIG_KALLSYMS
436static unsigned long get___schedule_addr(void)
437{
438 return kallsyms_lookup_name("__schedule");
439}
440#else
441static unsigned long get___schedule_addr(void)
442{
443 union mips_instruction *ip = (void *)schedule;
444 int max_insns = 8;
445 int i;
446
447 for (i = 0; i < max_insns; i++, ip++) {
448 if (ip->j_format.opcode == j_op)
449 return J_TARGET(ip, ip->j_format.target);
450 }
451 return 0;
452}
453#endif
454
455static int __init frame_info_init(void)
456{
457 unsigned long size = 0;
458#ifdef CONFIG_KALLSYMS
459 unsigned long ofs;
460#endif
461 unsigned long addr;
462
463 addr = get___schedule_addr();
464 if (!addr)
465 addr = (unsigned long)schedule;
466
467#ifdef CONFIG_KALLSYMS
468 kallsyms_lookup_size_offset(addr, &size, &ofs);
469#endif
470 schedule_mfi.func = (void *)addr;
471 schedule_mfi.func_size = size;
472
473 get_frame_info(&schedule_mfi);
474
475 /*
476 * Without schedule() frame info, result given by
477 * thread_saved_pc() and get_wchan() are not reliable.
478 */
479 if (schedule_mfi.pc_offset < 0)
480 printk("Can't analyze schedule() prologue at %p\n", schedule);
481
482 return 0;
483}
484
485arch_initcall(frame_info_init);
486
487/*
488 * Return saved PC of a blocked thread.
489 */
490static unsigned long thread_saved_pc(struct task_struct *tsk)
491{
492 struct thread_struct *t = &tsk->thread;
493
494 /* New born processes are a special case */
495 if (t->reg31 == (unsigned long) ret_from_fork)
496 return t->reg31;
497 if (schedule_mfi.pc_offset < 0)
498 return 0;
499 return ((unsigned long *)t->reg29)[schedule_mfi.pc_offset];
500}
501
502
503#ifdef CONFIG_KALLSYMS
504/* generic stack unwinding function */
505unsigned long notrace unwind_stack_by_address(unsigned long stack_page,
506 unsigned long *sp,
507 unsigned long pc,
508 unsigned long *ra)
509{
510 unsigned long low, high, irq_stack_high;
511 struct mips_frame_info info;
512 unsigned long size, ofs;
513 struct pt_regs *regs;
514 int leaf;
515
516 if (!stack_page)
517 return 0;
518
519 /*
520 * IRQ stacks start at IRQ_STACK_START
521 * task stacks at THREAD_SIZE - 32
522 */
523 low = stack_page;
524 if (!preemptible() && on_irq_stack(raw_smp_processor_id(), *sp)) {
525 high = stack_page + IRQ_STACK_START;
526 irq_stack_high = high;
527 } else {
528 high = stack_page + THREAD_SIZE - 32;
529 irq_stack_high = 0;
530 }
531
532 /*
533 * If we reached the top of the interrupt stack, start unwinding
534 * the interrupted task stack.
535 */
536 if (unlikely(*sp == irq_stack_high)) {
537 unsigned long task_sp = *(unsigned long *)*sp;
538
539 /*
540 * Check that the pointer saved in the IRQ stack head points to
541 * something within the stack of the current task
542 */
543 if (!object_is_on_stack((void *)task_sp))
544 return 0;
545
546 /*
547 * Follow pointer to tasks kernel stack frame where interrupted
548 * state was saved.
549 */
550 regs = (struct pt_regs *)task_sp;
551 pc = regs->cp0_epc;
552 if (!user_mode(regs) && __kernel_text_address(pc)) {
553 *sp = regs->regs[29];
554 *ra = regs->regs[31];
555 return pc;
556 }
557 return 0;
558 }
559 if (!kallsyms_lookup_size_offset(pc, &size, &ofs))
560 return 0;
561 /*
562 * Return ra if an exception occurred at the first instruction
563 */
564 if (unlikely(ofs == 0)) {
565 pc = *ra;
566 *ra = 0;
567 return pc;
568 }
569
570 info.func = (void *)(pc - ofs);
571 info.func_size = ofs; /* analyze from start to ofs */
572 leaf = get_frame_info(&info);
573 if (leaf < 0)
574 return 0;
575
576 if (*sp < low || *sp + info.frame_size > high)
577 return 0;
578
579 if (leaf)
580 /*
581 * For some extreme cases, get_frame_info() can
582 * consider wrongly a nested function as a leaf
583 * one. In that cases avoid to return always the
584 * same value.
585 */
586 pc = pc != *ra ? *ra : 0;
587 else
588 pc = ((unsigned long *)(*sp))[info.pc_offset];
589
590 *sp += info.frame_size;
591 *ra = 0;
592 return __kernel_text_address(pc) ? pc : 0;
593}
594EXPORT_SYMBOL(unwind_stack_by_address);
595
596/* used by show_backtrace() */
597unsigned long unwind_stack(struct task_struct *task, unsigned long *sp,
598 unsigned long pc, unsigned long *ra)
599{
600 unsigned long stack_page = 0;
601 int cpu;
602
603 for_each_possible_cpu(cpu) {
604 if (on_irq_stack(cpu, *sp)) {
605 stack_page = (unsigned long)irq_stack[cpu];
606 break;
607 }
608 }
609
610 if (!stack_page)
611 stack_page = (unsigned long)task_stack_page(task);
612
613 return unwind_stack_by_address(stack_page, sp, pc, ra);
614}
615#endif
616
617/*
618 * get_wchan - a maintenance nightmare^W^Wpain in the ass ...
619 */
620unsigned long get_wchan(struct task_struct *task)
621{
622 unsigned long pc = 0;
623#ifdef CONFIG_KALLSYMS
624 unsigned long sp;
625 unsigned long ra = 0;
626#endif
627
628 if (!task || task == current || task->state == TASK_RUNNING)
629 goto out;
630 if (!task_stack_page(task))
631 goto out;
632
633 pc = thread_saved_pc(task);
634
635#ifdef CONFIG_KALLSYMS
636 sp = task->thread.reg29 + schedule_mfi.frame_size;
637
638 while (in_sched_functions(pc))
639 pc = unwind_stack(task, &sp, pc, &ra);
640#endif
641
642out:
643 return pc;
644}
645
646/*
647 * Don't forget that the stack pointer must be aligned on a 8 bytes
648 * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
649 */
650unsigned long arch_align_stack(unsigned long sp)
651{
652 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
653 sp -= get_random_int() & ~PAGE_MASK;
654
655 return sp & ALMASK;
656}
657
658static void arch_dump_stack(void *info)
659{
660 struct pt_regs *regs;
661
662 regs = get_irq_regs();
663
664 if (regs)
665 show_regs(regs);
666
667 dump_stack();
668}
669
670void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
671{
672 long this_cpu = get_cpu();
673
674 if (cpumask_test_cpu(this_cpu, mask) && !exclude_self)
675 dump_stack();
676
677 smp_call_function_many(mask, arch_dump_stack, NULL, 1);
678
679 put_cpu();
680}
681
682int mips_get_process_fp_mode(struct task_struct *task)
683{
684 int value = 0;
685
686 if (!test_tsk_thread_flag(task, TIF_32BIT_FPREGS))
687 value |= PR_FP_MODE_FR;
688 if (test_tsk_thread_flag(task, TIF_HYBRID_FPREGS))
689 value |= PR_FP_MODE_FRE;
690
691 return value;
692}
693
694static void prepare_for_fp_mode_switch(void *info)
695{
696 struct mm_struct *mm = info;
697
698 if (current->mm == mm)
699 lose_fpu(1);
700}
701
702int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
703{
704 const unsigned int known_bits = PR_FP_MODE_FR | PR_FP_MODE_FRE;
705 struct task_struct *t;
706 int max_users;
707
708 /* If nothing to change, return right away, successfully. */
709 if (value == mips_get_process_fp_mode(task))
710 return 0;
711
712 /* Only accept a mode change if 64-bit FP enabled for o32. */
713 if (!IS_ENABLED(CONFIG_MIPS_O32_FP64_SUPPORT))
714 return -EOPNOTSUPP;
715
716 /* And only for o32 tasks. */
717 if (IS_ENABLED(CONFIG_64BIT) && !test_thread_flag(TIF_32BIT_REGS))
718 return -EOPNOTSUPP;
719
720 /* Check the value is valid */
721 if (value & ~known_bits)
722 return -EOPNOTSUPP;
723
724 /* Setting FRE without FR is not supported. */
725 if ((value & (PR_FP_MODE_FR | PR_FP_MODE_FRE)) == PR_FP_MODE_FRE)
726 return -EOPNOTSUPP;
727
728 /* Avoid inadvertently triggering emulation */
729 if ((value & PR_FP_MODE_FR) && raw_cpu_has_fpu &&
730 !(raw_current_cpu_data.fpu_id & MIPS_FPIR_F64))
731 return -EOPNOTSUPP;
732 if ((value & PR_FP_MODE_FRE) && raw_cpu_has_fpu && !cpu_has_fre)
733 return -EOPNOTSUPP;
734
735 /* FR = 0 not supported in MIPS R6 */
736 if (!(value & PR_FP_MODE_FR) && raw_cpu_has_fpu && cpu_has_mips_r6)
737 return -EOPNOTSUPP;
738
739 /* Proceed with the mode switch */
740 preempt_disable();
741
742 /* Save FP & vector context, then disable FPU & MSA */
743 if (task->signal == current->signal)
744 lose_fpu(1);
745
746 /* Prevent any threads from obtaining live FP context */
747 atomic_set(&task->mm->context.fp_mode_switching, 1);
748 smp_mb__after_atomic();
749
750 /*
751 * If there are multiple online CPUs then force any which are running
752 * threads in this process to lose their FPU context, which they can't
753 * regain until fp_mode_switching is cleared later.
754 */
755 if (num_online_cpus() > 1) {
756 /* No need to send an IPI for the local CPU */
757 max_users = (task->mm == current->mm) ? 1 : 0;
758
759 if (atomic_read(¤t->mm->mm_users) > max_users)
760 smp_call_function(prepare_for_fp_mode_switch,
761 (void *)current->mm, 1);
762 }
763
764 /*
765 * There are now no threads of the process with live FP context, so it
766 * is safe to proceed with the FP mode switch.
767 */
768 for_each_thread(task, t) {
769 /* Update desired FP register width */
770 if (value & PR_FP_MODE_FR) {
771 clear_tsk_thread_flag(t, TIF_32BIT_FPREGS);
772 } else {
773 set_tsk_thread_flag(t, TIF_32BIT_FPREGS);
774 clear_tsk_thread_flag(t, TIF_MSA_CTX_LIVE);
775 }
776
777 /* Update desired FP single layout */
778 if (value & PR_FP_MODE_FRE)
779 set_tsk_thread_flag(t, TIF_HYBRID_FPREGS);
780 else
781 clear_tsk_thread_flag(t, TIF_HYBRID_FPREGS);
782 }
783
784 /* Allow threads to use FP again */
785 atomic_set(&task->mm->context.fp_mode_switching, 0);
786 preempt_enable();
787
788 wake_up_var(&task->mm->context.fp_mode_switching);
789
790 return 0;
791}
792
793#if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
794void mips_dump_regs32(u32 *uregs, const struct pt_regs *regs)
795{
796 unsigned int i;
797
798 for (i = MIPS32_EF_R1; i <= MIPS32_EF_R31; i++) {
799 /* k0/k1 are copied as zero. */
800 if (i == MIPS32_EF_R26 || i == MIPS32_EF_R27)
801 uregs[i] = 0;
802 else
803 uregs[i] = regs->regs[i - MIPS32_EF_R0];
804 }
805
806 uregs[MIPS32_EF_LO] = regs->lo;
807 uregs[MIPS32_EF_HI] = regs->hi;
808 uregs[MIPS32_EF_CP0_EPC] = regs->cp0_epc;
809 uregs[MIPS32_EF_CP0_BADVADDR] = regs->cp0_badvaddr;
810 uregs[MIPS32_EF_CP0_STATUS] = regs->cp0_status;
811 uregs[MIPS32_EF_CP0_CAUSE] = regs->cp0_cause;
812}
813#endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
814
815#ifdef CONFIG_64BIT
816void mips_dump_regs64(u64 *uregs, const struct pt_regs *regs)
817{
818 unsigned int i;
819
820 for (i = MIPS64_EF_R1; i <= MIPS64_EF_R31; i++) {
821 /* k0/k1 are copied as zero. */
822 if (i == MIPS64_EF_R26 || i == MIPS64_EF_R27)
823 uregs[i] = 0;
824 else
825 uregs[i] = regs->regs[i - MIPS64_EF_R0];
826 }
827
828 uregs[MIPS64_EF_LO] = regs->lo;
829 uregs[MIPS64_EF_HI] = regs->hi;
830 uregs[MIPS64_EF_CP0_EPC] = regs->cp0_epc;
831 uregs[MIPS64_EF_CP0_BADVADDR] = regs->cp0_badvaddr;
832 uregs[MIPS64_EF_CP0_STATUS] = regs->cp0_status;
833 uregs[MIPS64_EF_CP0_CAUSE] = regs->cp0_cause;
834}
835#endif /* CONFIG_64BIT */