Loading...
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994 - 1999, 2000 by Ralf Baechle and others.
7 * Copyright (C) 2005, 2006 by Ralf Baechle (ralf@linux-mips.org)
8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9 * Copyright (C) 2004 Thiemo Seufer
10 * Copyright (C) 2013 Imagination Technologies Ltd.
11 */
12#include <linux/cpu.h>
13#include <linux/errno.h>
14#include <linux/init.h>
15#include <linux/kallsyms.h>
16#include <linux/kernel.h>
17#include <linux/nmi.h>
18#include <linux/personality.h>
19#include <linux/prctl.h>
20#include <linux/random.h>
21#include <linux/sched.h>
22#include <linux/sched/debug.h>
23#include <linux/sched/task_stack.h>
24
25#include <asm/abi.h>
26#include <asm/asm.h>
27#include <asm/dsemul.h>
28#include <asm/dsp.h>
29#include <asm/exec.h>
30#include <asm/fpu.h>
31#include <asm/inst.h>
32#include <asm/irq.h>
33#include <asm/irq_regs.h>
34#include <asm/isadep.h>
35#include <asm/msa.h>
36#include <asm/mips-cps.h>
37#include <asm/mipsregs.h>
38#include <asm/processor.h>
39#include <asm/reg.h>
40#include <asm/stacktrace.h>
41
42#ifdef CONFIG_HOTPLUG_CPU
43void arch_cpu_idle_dead(void)
44{
45 play_dead();
46}
47#endif
48
49asmlinkage void ret_from_fork(void);
50asmlinkage void ret_from_kernel_thread(void);
51
52void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp)
53{
54 unsigned long status;
55
56 /* New thread loses kernel privileges. */
57 status = regs->cp0_status & ~(ST0_CU0|ST0_CU1|ST0_CU2|ST0_FR|KU_MASK);
58 status |= KU_USER;
59 regs->cp0_status = status;
60 lose_fpu(0);
61 clear_thread_flag(TIF_MSA_CTX_LIVE);
62 clear_used_math();
63#ifdef CONFIG_MIPS_FP_SUPPORT
64 atomic_set(¤t->thread.bd_emu_frame, BD_EMUFRAME_NONE);
65#endif
66 init_dsp();
67 regs->cp0_epc = pc;
68 regs->regs[29] = sp;
69}
70
71void exit_thread(struct task_struct *tsk)
72{
73 /*
74 * User threads may have allocated a delay slot emulation frame.
75 * If so, clean up that allocation.
76 */
77 if (!(current->flags & PF_KTHREAD))
78 dsemul_thread_cleanup(tsk);
79}
80
81int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
82{
83 /*
84 * Save any process state which is live in hardware registers to the
85 * parent context prior to duplication. This prevents the new child
86 * state becoming stale if the parent is preempted before copy_thread()
87 * gets a chance to save the parent's live hardware registers to the
88 * child context.
89 */
90 preempt_disable();
91
92 if (is_msa_enabled())
93 save_msa(current);
94 else if (is_fpu_owner())
95 _save_fp(current);
96
97 save_dsp(current);
98
99 preempt_enable();
100
101 *dst = *src;
102 return 0;
103}
104
105/*
106 * Copy architecture-specific thread state
107 */
108int copy_thread(unsigned long clone_flags, unsigned long usp,
109 unsigned long kthread_arg, struct task_struct *p,
110 unsigned long tls)
111{
112 struct thread_info *ti = task_thread_info(p);
113 struct pt_regs *childregs, *regs = current_pt_regs();
114 unsigned long childksp;
115
116 childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32;
117
118 /* set up new TSS. */
119 childregs = (struct pt_regs *) childksp - 1;
120 /* Put the stack after the struct pt_regs. */
121 childksp = (unsigned long) childregs;
122 p->thread.cp0_status = (read_c0_status() & ~(ST0_CU2|ST0_CU1)) | ST0_KERNEL_CUMASK;
123 if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
124 /* kernel thread */
125 unsigned long status = p->thread.cp0_status;
126 memset(childregs, 0, sizeof(struct pt_regs));
127 p->thread.reg16 = usp; /* fn */
128 p->thread.reg17 = kthread_arg;
129 p->thread.reg29 = childksp;
130 p->thread.reg31 = (unsigned long) ret_from_kernel_thread;
131#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
132 status = (status & ~(ST0_KUP | ST0_IEP | ST0_IEC)) |
133 ((status & (ST0_KUC | ST0_IEC)) << 2);
134#else
135 status |= ST0_EXL;
136#endif
137 childregs->cp0_status = status;
138 return 0;
139 }
140
141 /* user thread */
142 *childregs = *regs;
143 childregs->regs[7] = 0; /* Clear error flag */
144 childregs->regs[2] = 0; /* Child gets zero as return value */
145 if (usp)
146 childregs->regs[29] = usp;
147
148 p->thread.reg29 = (unsigned long) childregs;
149 p->thread.reg31 = (unsigned long) ret_from_fork;
150
151 /*
152 * New tasks lose permission to use the fpu. This accelerates context
153 * switching for most programs since they don't use the fpu.
154 */
155 childregs->cp0_status &= ~(ST0_CU2|ST0_CU1);
156
157 clear_tsk_thread_flag(p, TIF_USEDFPU);
158 clear_tsk_thread_flag(p, TIF_USEDMSA);
159 clear_tsk_thread_flag(p, TIF_MSA_CTX_LIVE);
160
161#ifdef CONFIG_MIPS_MT_FPAFF
162 clear_tsk_thread_flag(p, TIF_FPUBOUND);
163#endif /* CONFIG_MIPS_MT_FPAFF */
164
165#ifdef CONFIG_MIPS_FP_SUPPORT
166 atomic_set(&p->thread.bd_emu_frame, BD_EMUFRAME_NONE);
167#endif
168
169 if (clone_flags & CLONE_SETTLS)
170 ti->tp_value = tls;
171
172 return 0;
173}
174
175#ifdef CONFIG_STACKPROTECTOR
176#include <linux/stackprotector.h>
177unsigned long __stack_chk_guard __read_mostly;
178EXPORT_SYMBOL(__stack_chk_guard);
179#endif
180
181struct mips_frame_info {
182 void *func;
183 unsigned long func_size;
184 int frame_size;
185 int pc_offset;
186};
187
188#define J_TARGET(pc,target) \
189 (((unsigned long)(pc) & 0xf0000000) | ((target) << 2))
190
191static inline int is_jr_ra_ins(union mips_instruction *ip)
192{
193#ifdef CONFIG_CPU_MICROMIPS
194 /*
195 * jr16 ra
196 * jr ra
197 */
198 if (mm_insn_16bit(ip->word >> 16)) {
199 if (ip->mm16_r5_format.opcode == mm_pool16c_op &&
200 ip->mm16_r5_format.rt == mm_jr16_op &&
201 ip->mm16_r5_format.imm == 31)
202 return 1;
203 return 0;
204 }
205
206 if (ip->r_format.opcode == mm_pool32a_op &&
207 ip->r_format.func == mm_pool32axf_op &&
208 ((ip->u_format.uimmediate >> 6) & GENMASK(9, 0)) == mm_jalr_op &&
209 ip->r_format.rt == 31)
210 return 1;
211 return 0;
212#else
213 if (ip->r_format.opcode == spec_op &&
214 ip->r_format.func == jr_op &&
215 ip->r_format.rs == 31)
216 return 1;
217 return 0;
218#endif
219}
220
221static inline int is_ra_save_ins(union mips_instruction *ip, int *poff)
222{
223#ifdef CONFIG_CPU_MICROMIPS
224 /*
225 * swsp ra,offset
226 * swm16 reglist,offset(sp)
227 * swm32 reglist,offset(sp)
228 * sw32 ra,offset(sp)
229 * jradiussp - NOT SUPPORTED
230 *
231 * microMIPS is way more fun...
232 */
233 if (mm_insn_16bit(ip->word >> 16)) {
234 switch (ip->mm16_r5_format.opcode) {
235 case mm_swsp16_op:
236 if (ip->mm16_r5_format.rt != 31)
237 return 0;
238
239 *poff = ip->mm16_r5_format.imm;
240 *poff = (*poff << 2) / sizeof(ulong);
241 return 1;
242
243 case mm_pool16c_op:
244 switch (ip->mm16_m_format.func) {
245 case mm_swm16_op:
246 *poff = ip->mm16_m_format.imm;
247 *poff += 1 + ip->mm16_m_format.rlist;
248 *poff = (*poff << 2) / sizeof(ulong);
249 return 1;
250
251 default:
252 return 0;
253 }
254
255 default:
256 return 0;
257 }
258 }
259
260 switch (ip->i_format.opcode) {
261 case mm_sw32_op:
262 if (ip->i_format.rs != 29)
263 return 0;
264 if (ip->i_format.rt != 31)
265 return 0;
266
267 *poff = ip->i_format.simmediate / sizeof(ulong);
268 return 1;
269
270 case mm_pool32b_op:
271 switch (ip->mm_m_format.func) {
272 case mm_swm32_func:
273 if (ip->mm_m_format.rd < 0x10)
274 return 0;
275 if (ip->mm_m_format.base != 29)
276 return 0;
277
278 *poff = ip->mm_m_format.simmediate;
279 *poff += (ip->mm_m_format.rd & 0xf) * sizeof(u32);
280 *poff /= sizeof(ulong);
281 return 1;
282 default:
283 return 0;
284 }
285
286 default:
287 return 0;
288 }
289#else
290 /* sw / sd $ra, offset($sp) */
291 if ((ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op) &&
292 ip->i_format.rs == 29 && ip->i_format.rt == 31) {
293 *poff = ip->i_format.simmediate / sizeof(ulong);
294 return 1;
295 }
296#ifdef CONFIG_CPU_LOONGSON64
297 if ((ip->loongson3_lswc2_format.opcode == swc2_op) &&
298 (ip->loongson3_lswc2_format.ls == 1) &&
299 (ip->loongson3_lswc2_format.fr == 0) &&
300 (ip->loongson3_lswc2_format.base == 29)) {
301 if (ip->loongson3_lswc2_format.rt == 31) {
302 *poff = ip->loongson3_lswc2_format.offset << 1;
303 return 1;
304 }
305 if (ip->loongson3_lswc2_format.rq == 31) {
306 *poff = (ip->loongson3_lswc2_format.offset << 1) + 1;
307 return 1;
308 }
309 }
310#endif
311 return 0;
312#endif
313}
314
315static inline int is_jump_ins(union mips_instruction *ip)
316{
317#ifdef CONFIG_CPU_MICROMIPS
318 /*
319 * jr16,jrc,jalr16,jalr16
320 * jal
321 * jalr/jr,jalr.hb/jr.hb,jalrs,jalrs.hb
322 * jraddiusp - NOT SUPPORTED
323 *
324 * microMIPS is kind of more fun...
325 */
326 if (mm_insn_16bit(ip->word >> 16)) {
327 if ((ip->mm16_r5_format.opcode == mm_pool16c_op &&
328 (ip->mm16_r5_format.rt & mm_jr16_op) == mm_jr16_op))
329 return 1;
330 return 0;
331 }
332
333 if (ip->j_format.opcode == mm_j32_op)
334 return 1;
335 if (ip->j_format.opcode == mm_jal32_op)
336 return 1;
337 if (ip->r_format.opcode != mm_pool32a_op ||
338 ip->r_format.func != mm_pool32axf_op)
339 return 0;
340 return ((ip->u_format.uimmediate >> 6) & mm_jalr_op) == mm_jalr_op;
341#else
342 if (ip->j_format.opcode == j_op)
343 return 1;
344 if (ip->j_format.opcode == jal_op)
345 return 1;
346 if (ip->r_format.opcode != spec_op)
347 return 0;
348 return ip->r_format.func == jalr_op || ip->r_format.func == jr_op;
349#endif
350}
351
352static inline int is_sp_move_ins(union mips_instruction *ip, int *frame_size)
353{
354#ifdef CONFIG_CPU_MICROMIPS
355 unsigned short tmp;
356
357 /*
358 * addiusp -imm
359 * addius5 sp,-imm
360 * addiu32 sp,sp,-imm
361 * jradiussp - NOT SUPPORTED
362 *
363 * microMIPS is not more fun...
364 */
365 if (mm_insn_16bit(ip->word >> 16)) {
366 if (ip->mm16_r3_format.opcode == mm_pool16d_op &&
367 ip->mm16_r3_format.simmediate & mm_addiusp_func) {
368 tmp = ip->mm_b0_format.simmediate >> 1;
369 tmp = ((tmp & 0x1ff) ^ 0x100) - 0x100;
370 if ((tmp + 2) < 4) /* 0x0,0x1,0x1fe,0x1ff are special */
371 tmp ^= 0x100;
372 *frame_size = -(signed short)(tmp << 2);
373 return 1;
374 }
375 if (ip->mm16_r5_format.opcode == mm_pool16d_op &&
376 ip->mm16_r5_format.rt == 29) {
377 tmp = ip->mm16_r5_format.imm >> 1;
378 *frame_size = -(signed short)(tmp & 0xf);
379 return 1;
380 }
381 return 0;
382 }
383
384 if (ip->mm_i_format.opcode == mm_addiu32_op &&
385 ip->mm_i_format.rt == 29 && ip->mm_i_format.rs == 29) {
386 *frame_size = -ip->i_format.simmediate;
387 return 1;
388 }
389#else
390 /* addiu/daddiu sp,sp,-imm */
391 if (ip->i_format.rs != 29 || ip->i_format.rt != 29)
392 return 0;
393
394 if (ip->i_format.opcode == addiu_op ||
395 ip->i_format.opcode == daddiu_op) {
396 *frame_size = -ip->i_format.simmediate;
397 return 1;
398 }
399#endif
400 return 0;
401}
402
403static int get_frame_info(struct mips_frame_info *info)
404{
405 bool is_mmips = IS_ENABLED(CONFIG_CPU_MICROMIPS);
406 union mips_instruction insn, *ip, *ip_end;
407 unsigned int last_insn_size = 0;
408 bool saw_jump = false;
409
410 info->pc_offset = -1;
411 info->frame_size = 0;
412
413 ip = (void *)msk_isa16_mode((ulong)info->func);
414 if (!ip)
415 goto err;
416
417 ip_end = (void *)ip + (info->func_size ? info->func_size : 512);
418
419 while (ip < ip_end) {
420 ip = (void *)ip + last_insn_size;
421
422 if (is_mmips && mm_insn_16bit(ip->halfword[0])) {
423 insn.word = ip->halfword[0] << 16;
424 last_insn_size = 2;
425 } else if (is_mmips) {
426 insn.word = ip->halfword[0] << 16 | ip->halfword[1];
427 last_insn_size = 4;
428 } else {
429 insn.word = ip->word;
430 last_insn_size = 4;
431 }
432
433 if (is_jr_ra_ins(ip)) {
434 break;
435 } else if (!info->frame_size) {
436 is_sp_move_ins(&insn, &info->frame_size);
437 continue;
438 } else if (!saw_jump && is_jump_ins(ip)) {
439 /*
440 * If we see a jump instruction, we are finished
441 * with the frame save.
442 *
443 * Some functions can have a shortcut return at
444 * the beginning of the function, so don't start
445 * looking for jump instruction until we see the
446 * frame setup.
447 *
448 * The RA save instruction can get put into the
449 * delay slot of the jump instruction, so look
450 * at the next instruction, too.
451 */
452 saw_jump = true;
453 continue;
454 }
455 if (info->pc_offset == -1 &&
456 is_ra_save_ins(&insn, &info->pc_offset))
457 break;
458 if (saw_jump)
459 break;
460 }
461 if (info->frame_size && info->pc_offset >= 0) /* nested */
462 return 0;
463 if (info->pc_offset < 0) /* leaf */
464 return 1;
465 /* prologue seems bogus... */
466err:
467 return -1;
468}
469
470static struct mips_frame_info schedule_mfi __read_mostly;
471
472#ifdef CONFIG_KALLSYMS
473static unsigned long get___schedule_addr(void)
474{
475 return kallsyms_lookup_name("__schedule");
476}
477#else
478static unsigned long get___schedule_addr(void)
479{
480 union mips_instruction *ip = (void *)schedule;
481 int max_insns = 8;
482 int i;
483
484 for (i = 0; i < max_insns; i++, ip++) {
485 if (ip->j_format.opcode == j_op)
486 return J_TARGET(ip, ip->j_format.target);
487 }
488 return 0;
489}
490#endif
491
492static int __init frame_info_init(void)
493{
494 unsigned long size = 0;
495#ifdef CONFIG_KALLSYMS
496 unsigned long ofs;
497#endif
498 unsigned long addr;
499
500 addr = get___schedule_addr();
501 if (!addr)
502 addr = (unsigned long)schedule;
503
504#ifdef CONFIG_KALLSYMS
505 kallsyms_lookup_size_offset(addr, &size, &ofs);
506#endif
507 schedule_mfi.func = (void *)addr;
508 schedule_mfi.func_size = size;
509
510 get_frame_info(&schedule_mfi);
511
512 /*
513 * Without schedule() frame info, result given by
514 * thread_saved_pc() and get_wchan() are not reliable.
515 */
516 if (schedule_mfi.pc_offset < 0)
517 printk("Can't analyze schedule() prologue at %p\n", schedule);
518
519 return 0;
520}
521
522arch_initcall(frame_info_init);
523
524/*
525 * Return saved PC of a blocked thread.
526 */
527static unsigned long thread_saved_pc(struct task_struct *tsk)
528{
529 struct thread_struct *t = &tsk->thread;
530
531 /* New born processes are a special case */
532 if (t->reg31 == (unsigned long) ret_from_fork)
533 return t->reg31;
534 if (schedule_mfi.pc_offset < 0)
535 return 0;
536 return ((unsigned long *)t->reg29)[schedule_mfi.pc_offset];
537}
538
539
540#ifdef CONFIG_KALLSYMS
541/* generic stack unwinding function */
542unsigned long notrace unwind_stack_by_address(unsigned long stack_page,
543 unsigned long *sp,
544 unsigned long pc,
545 unsigned long *ra)
546{
547 unsigned long low, high, irq_stack_high;
548 struct mips_frame_info info;
549 unsigned long size, ofs;
550 struct pt_regs *regs;
551 int leaf;
552
553 if (!stack_page)
554 return 0;
555
556 /*
557 * IRQ stacks start at IRQ_STACK_START
558 * task stacks at THREAD_SIZE - 32
559 */
560 low = stack_page;
561 if (!preemptible() && on_irq_stack(raw_smp_processor_id(), *sp)) {
562 high = stack_page + IRQ_STACK_START;
563 irq_stack_high = high;
564 } else {
565 high = stack_page + THREAD_SIZE - 32;
566 irq_stack_high = 0;
567 }
568
569 /*
570 * If we reached the top of the interrupt stack, start unwinding
571 * the interrupted task stack.
572 */
573 if (unlikely(*sp == irq_stack_high)) {
574 unsigned long task_sp = *(unsigned long *)*sp;
575
576 /*
577 * Check that the pointer saved in the IRQ stack head points to
578 * something within the stack of the current task
579 */
580 if (!object_is_on_stack((void *)task_sp))
581 return 0;
582
583 /*
584 * Follow pointer to tasks kernel stack frame where interrupted
585 * state was saved.
586 */
587 regs = (struct pt_regs *)task_sp;
588 pc = regs->cp0_epc;
589 if (!user_mode(regs) && __kernel_text_address(pc)) {
590 *sp = regs->regs[29];
591 *ra = regs->regs[31];
592 return pc;
593 }
594 return 0;
595 }
596 if (!kallsyms_lookup_size_offset(pc, &size, &ofs))
597 return 0;
598 /*
599 * Return ra if an exception occurred at the first instruction
600 */
601 if (unlikely(ofs == 0)) {
602 pc = *ra;
603 *ra = 0;
604 return pc;
605 }
606
607 info.func = (void *)(pc - ofs);
608 info.func_size = ofs; /* analyze from start to ofs */
609 leaf = get_frame_info(&info);
610 if (leaf < 0)
611 return 0;
612
613 if (*sp < low || *sp + info.frame_size > high)
614 return 0;
615
616 if (leaf)
617 /*
618 * For some extreme cases, get_frame_info() can
619 * consider wrongly a nested function as a leaf
620 * one. In that cases avoid to return always the
621 * same value.
622 */
623 pc = pc != *ra ? *ra : 0;
624 else
625 pc = ((unsigned long *)(*sp))[info.pc_offset];
626
627 *sp += info.frame_size;
628 *ra = 0;
629 return __kernel_text_address(pc) ? pc : 0;
630}
631EXPORT_SYMBOL(unwind_stack_by_address);
632
633/* used by show_backtrace() */
634unsigned long unwind_stack(struct task_struct *task, unsigned long *sp,
635 unsigned long pc, unsigned long *ra)
636{
637 unsigned long stack_page = 0;
638 int cpu;
639
640 for_each_possible_cpu(cpu) {
641 if (on_irq_stack(cpu, *sp)) {
642 stack_page = (unsigned long)irq_stack[cpu];
643 break;
644 }
645 }
646
647 if (!stack_page)
648 stack_page = (unsigned long)task_stack_page(task);
649
650 return unwind_stack_by_address(stack_page, sp, pc, ra);
651}
652#endif
653
654/*
655 * get_wchan - a maintenance nightmare^W^Wpain in the ass ...
656 */
657unsigned long get_wchan(struct task_struct *task)
658{
659 unsigned long pc = 0;
660#ifdef CONFIG_KALLSYMS
661 unsigned long sp;
662 unsigned long ra = 0;
663#endif
664
665 if (!task || task == current || task_is_running(task))
666 goto out;
667 if (!task_stack_page(task))
668 goto out;
669
670 pc = thread_saved_pc(task);
671
672#ifdef CONFIG_KALLSYMS
673 sp = task->thread.reg29 + schedule_mfi.frame_size;
674
675 while (in_sched_functions(pc))
676 pc = unwind_stack(task, &sp, pc, &ra);
677#endif
678
679out:
680 return pc;
681}
682
683unsigned long mips_stack_top(void)
684{
685 unsigned long top = TASK_SIZE & PAGE_MASK;
686
687 if (IS_ENABLED(CONFIG_MIPS_FP_SUPPORT)) {
688 /* One page for branch delay slot "emulation" */
689 top -= PAGE_SIZE;
690 }
691
692 /* Space for the VDSO, data page & GIC user page */
693 top -= PAGE_ALIGN(current->thread.abi->vdso->size);
694 top -= PAGE_SIZE;
695 top -= mips_gic_present() ? PAGE_SIZE : 0;
696
697 /* Space for cache colour alignment */
698 if (cpu_has_dc_aliases)
699 top -= shm_align_mask + 1;
700
701 /* Space to randomize the VDSO base */
702 if (current->flags & PF_RANDOMIZE)
703 top -= VDSO_RANDOMIZE_SIZE;
704
705 return top;
706}
707
708/*
709 * Don't forget that the stack pointer must be aligned on a 8 bytes
710 * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
711 */
712unsigned long arch_align_stack(unsigned long sp)
713{
714 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
715 sp -= get_random_int() & ~PAGE_MASK;
716
717 return sp & ALMASK;
718}
719
720static struct cpumask backtrace_csd_busy;
721
722static void handle_backtrace(void *info)
723{
724 nmi_cpu_backtrace(get_irq_regs());
725 cpumask_clear_cpu(smp_processor_id(), &backtrace_csd_busy);
726}
727
728static DEFINE_PER_CPU(call_single_data_t, backtrace_csd) =
729 CSD_INIT(handle_backtrace, NULL);
730
731static void raise_backtrace(cpumask_t *mask)
732{
733 call_single_data_t *csd;
734 int cpu;
735
736 for_each_cpu(cpu, mask) {
737 /*
738 * If we previously sent an IPI to the target CPU & it hasn't
739 * cleared its bit in the busy cpumask then it didn't handle
740 * our previous IPI & it's not safe for us to reuse the
741 * call_single_data_t.
742 */
743 if (cpumask_test_and_set_cpu(cpu, &backtrace_csd_busy)) {
744 pr_warn("Unable to send backtrace IPI to CPU%u - perhaps it hung?\n",
745 cpu);
746 continue;
747 }
748
749 csd = &per_cpu(backtrace_csd, cpu);
750 smp_call_function_single_async(cpu, csd);
751 }
752}
753
754void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
755{
756 nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_backtrace);
757}
758
759int mips_get_process_fp_mode(struct task_struct *task)
760{
761 int value = 0;
762
763 if (!test_tsk_thread_flag(task, TIF_32BIT_FPREGS))
764 value |= PR_FP_MODE_FR;
765 if (test_tsk_thread_flag(task, TIF_HYBRID_FPREGS))
766 value |= PR_FP_MODE_FRE;
767
768 return value;
769}
770
771static long prepare_for_fp_mode_switch(void *unused)
772{
773 /*
774 * This is icky, but we use this to simply ensure that all CPUs have
775 * context switched, regardless of whether they were previously running
776 * kernel or user code. This ensures that no CPU that a mode-switching
777 * program may execute on keeps its FPU enabled (& in the old mode)
778 * throughout the mode switch.
779 */
780 return 0;
781}
782
783int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
784{
785 const unsigned int known_bits = PR_FP_MODE_FR | PR_FP_MODE_FRE;
786 struct task_struct *t;
787 struct cpumask process_cpus;
788 int cpu;
789
790 /* If nothing to change, return right away, successfully. */
791 if (value == mips_get_process_fp_mode(task))
792 return 0;
793
794 /* Only accept a mode change if 64-bit FP enabled for o32. */
795 if (!IS_ENABLED(CONFIG_MIPS_O32_FP64_SUPPORT))
796 return -EOPNOTSUPP;
797
798 /* And only for o32 tasks. */
799 if (IS_ENABLED(CONFIG_64BIT) && !test_thread_flag(TIF_32BIT_REGS))
800 return -EOPNOTSUPP;
801
802 /* Check the value is valid */
803 if (value & ~known_bits)
804 return -EOPNOTSUPP;
805
806 /* Setting FRE without FR is not supported. */
807 if ((value & (PR_FP_MODE_FR | PR_FP_MODE_FRE)) == PR_FP_MODE_FRE)
808 return -EOPNOTSUPP;
809
810 /* Avoid inadvertently triggering emulation */
811 if ((value & PR_FP_MODE_FR) && raw_cpu_has_fpu &&
812 !(raw_current_cpu_data.fpu_id & MIPS_FPIR_F64))
813 return -EOPNOTSUPP;
814 if ((value & PR_FP_MODE_FRE) && raw_cpu_has_fpu && !cpu_has_fre)
815 return -EOPNOTSUPP;
816
817 /* FR = 0 not supported in MIPS R6 */
818 if (!(value & PR_FP_MODE_FR) && raw_cpu_has_fpu && cpu_has_mips_r6)
819 return -EOPNOTSUPP;
820
821 /* Indicate the new FP mode in each thread */
822 for_each_thread(task, t) {
823 /* Update desired FP register width */
824 if (value & PR_FP_MODE_FR) {
825 clear_tsk_thread_flag(t, TIF_32BIT_FPREGS);
826 } else {
827 set_tsk_thread_flag(t, TIF_32BIT_FPREGS);
828 clear_tsk_thread_flag(t, TIF_MSA_CTX_LIVE);
829 }
830
831 /* Update desired FP single layout */
832 if (value & PR_FP_MODE_FRE)
833 set_tsk_thread_flag(t, TIF_HYBRID_FPREGS);
834 else
835 clear_tsk_thread_flag(t, TIF_HYBRID_FPREGS);
836 }
837
838 /*
839 * We need to ensure that all threads in the process have switched mode
840 * before returning, in order to allow userland to not worry about
841 * races. We can do this by forcing all CPUs that any thread in the
842 * process may be running on to schedule something else - in this case
843 * prepare_for_fp_mode_switch().
844 *
845 * We begin by generating a mask of all CPUs that any thread in the
846 * process may be running on.
847 */
848 cpumask_clear(&process_cpus);
849 for_each_thread(task, t)
850 cpumask_set_cpu(task_cpu(t), &process_cpus);
851
852 /*
853 * Now we schedule prepare_for_fp_mode_switch() on each of those CPUs.
854 *
855 * The CPUs may have rescheduled already since we switched mode or
856 * generated the cpumask, but that doesn't matter. If the task in this
857 * process is scheduled out then our scheduling
858 * prepare_for_fp_mode_switch() will simply be redundant. If it's
859 * scheduled in then it will already have picked up the new FP mode
860 * whilst doing so.
861 */
862 get_online_cpus();
863 for_each_cpu_and(cpu, &process_cpus, cpu_online_mask)
864 work_on_cpu(cpu, prepare_for_fp_mode_switch, NULL);
865 put_online_cpus();
866
867 return 0;
868}
869
870#if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
871void mips_dump_regs32(u32 *uregs, const struct pt_regs *regs)
872{
873 unsigned int i;
874
875 for (i = MIPS32_EF_R1; i <= MIPS32_EF_R31; i++) {
876 /* k0/k1 are copied as zero. */
877 if (i == MIPS32_EF_R26 || i == MIPS32_EF_R27)
878 uregs[i] = 0;
879 else
880 uregs[i] = regs->regs[i - MIPS32_EF_R0];
881 }
882
883 uregs[MIPS32_EF_LO] = regs->lo;
884 uregs[MIPS32_EF_HI] = regs->hi;
885 uregs[MIPS32_EF_CP0_EPC] = regs->cp0_epc;
886 uregs[MIPS32_EF_CP0_BADVADDR] = regs->cp0_badvaddr;
887 uregs[MIPS32_EF_CP0_STATUS] = regs->cp0_status;
888 uregs[MIPS32_EF_CP0_CAUSE] = regs->cp0_cause;
889}
890#endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
891
892#ifdef CONFIG_64BIT
893void mips_dump_regs64(u64 *uregs, const struct pt_regs *regs)
894{
895 unsigned int i;
896
897 for (i = MIPS64_EF_R1; i <= MIPS64_EF_R31; i++) {
898 /* k0/k1 are copied as zero. */
899 if (i == MIPS64_EF_R26 || i == MIPS64_EF_R27)
900 uregs[i] = 0;
901 else
902 uregs[i] = regs->regs[i - MIPS64_EF_R0];
903 }
904
905 uregs[MIPS64_EF_LO] = regs->lo;
906 uregs[MIPS64_EF_HI] = regs->hi;
907 uregs[MIPS64_EF_CP0_EPC] = regs->cp0_epc;
908 uregs[MIPS64_EF_CP0_BADVADDR] = regs->cp0_badvaddr;
909 uregs[MIPS64_EF_CP0_STATUS] = regs->cp0_status;
910 uregs[MIPS64_EF_CP0_CAUSE] = regs->cp0_cause;
911}
912#endif /* CONFIG_64BIT */
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994 - 1999, 2000 by Ralf Baechle and others.
7 * Copyright (C) 2005, 2006 by Ralf Baechle (ralf@linux-mips.org)
8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9 * Copyright (C) 2004 Thiemo Seufer
10 * Copyright (C) 2013 Imagination Technologies Ltd.
11 */
12#include <linux/errno.h>
13#include <linux/sched.h>
14#include <linux/sched/debug.h>
15#include <linux/sched/task.h>
16#include <linux/sched/task_stack.h>
17#include <linux/tick.h>
18#include <linux/kernel.h>
19#include <linux/mm.h>
20#include <linux/stddef.h>
21#include <linux/unistd.h>
22#include <linux/export.h>
23#include <linux/ptrace.h>
24#include <linux/mman.h>
25#include <linux/personality.h>
26#include <linux/sys.h>
27#include <linux/init.h>
28#include <linux/completion.h>
29#include <linux/kallsyms.h>
30#include <linux/random.h>
31#include <linux/prctl.h>
32#include <linux/nmi.h>
33#include <linux/cpu.h>
34
35#include <asm/abi.h>
36#include <asm/asm.h>
37#include <asm/bootinfo.h>
38#include <asm/cpu.h>
39#include <asm/dsemul.h>
40#include <asm/dsp.h>
41#include <asm/fpu.h>
42#include <asm/irq.h>
43#include <asm/mips-cps.h>
44#include <asm/msa.h>
45#include <asm/mipsregs.h>
46#include <asm/processor.h>
47#include <asm/reg.h>
48#include <linux/uaccess.h>
49#include <asm/io.h>
50#include <asm/elf.h>
51#include <asm/isadep.h>
52#include <asm/inst.h>
53#include <asm/stacktrace.h>
54#include <asm/irq_regs.h>
55
56#ifdef CONFIG_HOTPLUG_CPU
57void arch_cpu_idle_dead(void)
58{
59 play_dead();
60}
61#endif
62
63asmlinkage void ret_from_fork(void);
64asmlinkage void ret_from_kernel_thread(void);
65
66void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp)
67{
68 unsigned long status;
69
70 /* New thread loses kernel privileges. */
71 status = regs->cp0_status & ~(ST0_CU0|ST0_CU1|ST0_FR|KU_MASK);
72 status |= KU_USER;
73 regs->cp0_status = status;
74 lose_fpu(0);
75 clear_thread_flag(TIF_MSA_CTX_LIVE);
76 clear_used_math();
77#ifdef CONFIG_MIPS_FP_SUPPORT
78 atomic_set(¤t->thread.bd_emu_frame, BD_EMUFRAME_NONE);
79#endif
80 init_dsp();
81 regs->cp0_epc = pc;
82 regs->regs[29] = sp;
83}
84
85void exit_thread(struct task_struct *tsk)
86{
87 /*
88 * User threads may have allocated a delay slot emulation frame.
89 * If so, clean up that allocation.
90 */
91 if (!(current->flags & PF_KTHREAD))
92 dsemul_thread_cleanup(tsk);
93}
94
95int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
96{
97 /*
98 * Save any process state which is live in hardware registers to the
99 * parent context prior to duplication. This prevents the new child
100 * state becoming stale if the parent is preempted before copy_thread()
101 * gets a chance to save the parent's live hardware registers to the
102 * child context.
103 */
104 preempt_disable();
105
106 if (is_msa_enabled())
107 save_msa(current);
108 else if (is_fpu_owner())
109 _save_fp(current);
110
111 save_dsp(current);
112
113 preempt_enable();
114
115 *dst = *src;
116 return 0;
117}
118
119/*
120 * Copy architecture-specific thread state
121 */
122int copy_thread(unsigned long clone_flags, unsigned long usp,
123 unsigned long kthread_arg, struct task_struct *p,
124 unsigned long tls)
125{
126 struct thread_info *ti = task_thread_info(p);
127 struct pt_regs *childregs, *regs = current_pt_regs();
128 unsigned long childksp;
129
130 childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32;
131
132 /* set up new TSS. */
133 childregs = (struct pt_regs *) childksp - 1;
134 /* Put the stack after the struct pt_regs. */
135 childksp = (unsigned long) childregs;
136 p->thread.cp0_status = read_c0_status() & ~(ST0_CU2|ST0_CU1);
137 if (unlikely(p->flags & PF_KTHREAD)) {
138 /* kernel thread */
139 unsigned long status = p->thread.cp0_status;
140 memset(childregs, 0, sizeof(struct pt_regs));
141 ti->addr_limit = KERNEL_DS;
142 p->thread.reg16 = usp; /* fn */
143 p->thread.reg17 = kthread_arg;
144 p->thread.reg29 = childksp;
145 p->thread.reg31 = (unsigned long) ret_from_kernel_thread;
146#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
147 status = (status & ~(ST0_KUP | ST0_IEP | ST0_IEC)) |
148 ((status & (ST0_KUC | ST0_IEC)) << 2);
149#else
150 status |= ST0_EXL;
151#endif
152 childregs->cp0_status = status;
153 return 0;
154 }
155
156 /* user thread */
157 *childregs = *regs;
158 childregs->regs[7] = 0; /* Clear error flag */
159 childregs->regs[2] = 0; /* Child gets zero as return value */
160 if (usp)
161 childregs->regs[29] = usp;
162 ti->addr_limit = USER_DS;
163
164 p->thread.reg29 = (unsigned long) childregs;
165 p->thread.reg31 = (unsigned long) ret_from_fork;
166
167 /*
168 * New tasks lose permission to use the fpu. This accelerates context
169 * switching for most programs since they don't use the fpu.
170 */
171 childregs->cp0_status &= ~(ST0_CU2|ST0_CU1);
172
173 clear_tsk_thread_flag(p, TIF_USEDFPU);
174 clear_tsk_thread_flag(p, TIF_USEDMSA);
175 clear_tsk_thread_flag(p, TIF_MSA_CTX_LIVE);
176
177#ifdef CONFIG_MIPS_MT_FPAFF
178 clear_tsk_thread_flag(p, TIF_FPUBOUND);
179#endif /* CONFIG_MIPS_MT_FPAFF */
180
181#ifdef CONFIG_MIPS_FP_SUPPORT
182 atomic_set(&p->thread.bd_emu_frame, BD_EMUFRAME_NONE);
183#endif
184
185 if (clone_flags & CLONE_SETTLS)
186 ti->tp_value = tls;
187
188 return 0;
189}
190
191#ifdef CONFIG_STACKPROTECTOR
192#include <linux/stackprotector.h>
193unsigned long __stack_chk_guard __read_mostly;
194EXPORT_SYMBOL(__stack_chk_guard);
195#endif
196
197struct mips_frame_info {
198 void *func;
199 unsigned long func_size;
200 int frame_size;
201 int pc_offset;
202};
203
204#define J_TARGET(pc,target) \
205 (((unsigned long)(pc) & 0xf0000000) | ((target) << 2))
206
207static inline int is_ra_save_ins(union mips_instruction *ip, int *poff)
208{
209#ifdef CONFIG_CPU_MICROMIPS
210 /*
211 * swsp ra,offset
212 * swm16 reglist,offset(sp)
213 * swm32 reglist,offset(sp)
214 * sw32 ra,offset(sp)
215 * jradiussp - NOT SUPPORTED
216 *
217 * microMIPS is way more fun...
218 */
219 if (mm_insn_16bit(ip->word >> 16)) {
220 switch (ip->mm16_r5_format.opcode) {
221 case mm_swsp16_op:
222 if (ip->mm16_r5_format.rt != 31)
223 return 0;
224
225 *poff = ip->mm16_r5_format.imm;
226 *poff = (*poff << 2) / sizeof(ulong);
227 return 1;
228
229 case mm_pool16c_op:
230 switch (ip->mm16_m_format.func) {
231 case mm_swm16_op:
232 *poff = ip->mm16_m_format.imm;
233 *poff += 1 + ip->mm16_m_format.rlist;
234 *poff = (*poff << 2) / sizeof(ulong);
235 return 1;
236
237 default:
238 return 0;
239 }
240
241 default:
242 return 0;
243 }
244 }
245
246 switch (ip->i_format.opcode) {
247 case mm_sw32_op:
248 if (ip->i_format.rs != 29)
249 return 0;
250 if (ip->i_format.rt != 31)
251 return 0;
252
253 *poff = ip->i_format.simmediate / sizeof(ulong);
254 return 1;
255
256 case mm_pool32b_op:
257 switch (ip->mm_m_format.func) {
258 case mm_swm32_func:
259 if (ip->mm_m_format.rd < 0x10)
260 return 0;
261 if (ip->mm_m_format.base != 29)
262 return 0;
263
264 *poff = ip->mm_m_format.simmediate;
265 *poff += (ip->mm_m_format.rd & 0xf) * sizeof(u32);
266 *poff /= sizeof(ulong);
267 return 1;
268 default:
269 return 0;
270 }
271
272 default:
273 return 0;
274 }
275#else
276 /* sw / sd $ra, offset($sp) */
277 if ((ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op) &&
278 ip->i_format.rs == 29 && ip->i_format.rt == 31) {
279 *poff = ip->i_format.simmediate / sizeof(ulong);
280 return 1;
281 }
282
283 return 0;
284#endif
285}
286
287static inline int is_jump_ins(union mips_instruction *ip)
288{
289#ifdef CONFIG_CPU_MICROMIPS
290 /*
291 * jr16,jrc,jalr16,jalr16
292 * jal
293 * jalr/jr,jalr.hb/jr.hb,jalrs,jalrs.hb
294 * jraddiusp - NOT SUPPORTED
295 *
296 * microMIPS is kind of more fun...
297 */
298 if (mm_insn_16bit(ip->word >> 16)) {
299 if ((ip->mm16_r5_format.opcode == mm_pool16c_op &&
300 (ip->mm16_r5_format.rt & mm_jr16_op) == mm_jr16_op))
301 return 1;
302 return 0;
303 }
304
305 if (ip->j_format.opcode == mm_j32_op)
306 return 1;
307 if (ip->j_format.opcode == mm_jal32_op)
308 return 1;
309 if (ip->r_format.opcode != mm_pool32a_op ||
310 ip->r_format.func != mm_pool32axf_op)
311 return 0;
312 return ((ip->u_format.uimmediate >> 6) & mm_jalr_op) == mm_jalr_op;
313#else
314 if (ip->j_format.opcode == j_op)
315 return 1;
316 if (ip->j_format.opcode == jal_op)
317 return 1;
318 if (ip->r_format.opcode != spec_op)
319 return 0;
320 return ip->r_format.func == jalr_op || ip->r_format.func == jr_op;
321#endif
322}
323
324static inline int is_sp_move_ins(union mips_instruction *ip, int *frame_size)
325{
326#ifdef CONFIG_CPU_MICROMIPS
327 unsigned short tmp;
328
329 /*
330 * addiusp -imm
331 * addius5 sp,-imm
332 * addiu32 sp,sp,-imm
333 * jradiussp - NOT SUPPORTED
334 *
335 * microMIPS is not more fun...
336 */
337 if (mm_insn_16bit(ip->word >> 16)) {
338 if (ip->mm16_r3_format.opcode == mm_pool16d_op &&
339 ip->mm16_r3_format.simmediate & mm_addiusp_func) {
340 tmp = ip->mm_b0_format.simmediate >> 1;
341 tmp = ((tmp & 0x1ff) ^ 0x100) - 0x100;
342 if ((tmp + 2) < 4) /* 0x0,0x1,0x1fe,0x1ff are special */
343 tmp ^= 0x100;
344 *frame_size = -(signed short)(tmp << 2);
345 return 1;
346 }
347 if (ip->mm16_r5_format.opcode == mm_pool16d_op &&
348 ip->mm16_r5_format.rt == 29) {
349 tmp = ip->mm16_r5_format.imm >> 1;
350 *frame_size = -(signed short)(tmp & 0xf);
351 return 1;
352 }
353 return 0;
354 }
355
356 if (ip->mm_i_format.opcode == mm_addiu32_op &&
357 ip->mm_i_format.rt == 29 && ip->mm_i_format.rs == 29) {
358 *frame_size = -ip->i_format.simmediate;
359 return 1;
360 }
361#else
362 /* addiu/daddiu sp,sp,-imm */
363 if (ip->i_format.rs != 29 || ip->i_format.rt != 29)
364 return 0;
365
366 if (ip->i_format.opcode == addiu_op ||
367 ip->i_format.opcode == daddiu_op) {
368 *frame_size = -ip->i_format.simmediate;
369 return 1;
370 }
371#endif
372 return 0;
373}
374
375static int get_frame_info(struct mips_frame_info *info)
376{
377 bool is_mmips = IS_ENABLED(CONFIG_CPU_MICROMIPS);
378 union mips_instruction insn, *ip;
379 const unsigned int max_insns = 128;
380 unsigned int last_insn_size = 0;
381 unsigned int i;
382 bool saw_jump = false;
383
384 info->pc_offset = -1;
385 info->frame_size = 0;
386
387 ip = (void *)msk_isa16_mode((ulong)info->func);
388 if (!ip)
389 goto err;
390
391 for (i = 0; i < max_insns; i++) {
392 ip = (void *)ip + last_insn_size;
393
394 if (is_mmips && mm_insn_16bit(ip->halfword[0])) {
395 insn.word = ip->halfword[0] << 16;
396 last_insn_size = 2;
397 } else if (is_mmips) {
398 insn.word = ip->halfword[0] << 16 | ip->halfword[1];
399 last_insn_size = 4;
400 } else {
401 insn.word = ip->word;
402 last_insn_size = 4;
403 }
404
405 if (!info->frame_size) {
406 is_sp_move_ins(&insn, &info->frame_size);
407 continue;
408 } else if (!saw_jump && is_jump_ins(ip)) {
409 /*
410 * If we see a jump instruction, we are finished
411 * with the frame save.
412 *
413 * Some functions can have a shortcut return at
414 * the beginning of the function, so don't start
415 * looking for jump instruction until we see the
416 * frame setup.
417 *
418 * The RA save instruction can get put into the
419 * delay slot of the jump instruction, so look
420 * at the next instruction, too.
421 */
422 saw_jump = true;
423 continue;
424 }
425 if (info->pc_offset == -1 &&
426 is_ra_save_ins(&insn, &info->pc_offset))
427 break;
428 if (saw_jump)
429 break;
430 }
431 if (info->frame_size && info->pc_offset >= 0) /* nested */
432 return 0;
433 if (info->pc_offset < 0) /* leaf */
434 return 1;
435 /* prologue seems bogus... */
436err:
437 return -1;
438}
439
440static struct mips_frame_info schedule_mfi __read_mostly;
441
442#ifdef CONFIG_KALLSYMS
443static unsigned long get___schedule_addr(void)
444{
445 return kallsyms_lookup_name("__schedule");
446}
447#else
448static unsigned long get___schedule_addr(void)
449{
450 union mips_instruction *ip = (void *)schedule;
451 int max_insns = 8;
452 int i;
453
454 for (i = 0; i < max_insns; i++, ip++) {
455 if (ip->j_format.opcode == j_op)
456 return J_TARGET(ip, ip->j_format.target);
457 }
458 return 0;
459}
460#endif
461
462static int __init frame_info_init(void)
463{
464 unsigned long size = 0;
465#ifdef CONFIG_KALLSYMS
466 unsigned long ofs;
467#endif
468 unsigned long addr;
469
470 addr = get___schedule_addr();
471 if (!addr)
472 addr = (unsigned long)schedule;
473
474#ifdef CONFIG_KALLSYMS
475 kallsyms_lookup_size_offset(addr, &size, &ofs);
476#endif
477 schedule_mfi.func = (void *)addr;
478 schedule_mfi.func_size = size;
479
480 get_frame_info(&schedule_mfi);
481
482 /*
483 * Without schedule() frame info, result given by
484 * thread_saved_pc() and get_wchan() are not reliable.
485 */
486 if (schedule_mfi.pc_offset < 0)
487 printk("Can't analyze schedule() prologue at %p\n", schedule);
488
489 return 0;
490}
491
492arch_initcall(frame_info_init);
493
494/*
495 * Return saved PC of a blocked thread.
496 */
497static unsigned long thread_saved_pc(struct task_struct *tsk)
498{
499 struct thread_struct *t = &tsk->thread;
500
501 /* New born processes are a special case */
502 if (t->reg31 == (unsigned long) ret_from_fork)
503 return t->reg31;
504 if (schedule_mfi.pc_offset < 0)
505 return 0;
506 return ((unsigned long *)t->reg29)[schedule_mfi.pc_offset];
507}
508
509
510#ifdef CONFIG_KALLSYMS
511/* generic stack unwinding function */
512unsigned long notrace unwind_stack_by_address(unsigned long stack_page,
513 unsigned long *sp,
514 unsigned long pc,
515 unsigned long *ra)
516{
517 unsigned long low, high, irq_stack_high;
518 struct mips_frame_info info;
519 unsigned long size, ofs;
520 struct pt_regs *regs;
521 int leaf;
522
523 if (!stack_page)
524 return 0;
525
526 /*
527 * IRQ stacks start at IRQ_STACK_START
528 * task stacks at THREAD_SIZE - 32
529 */
530 low = stack_page;
531 if (!preemptible() && on_irq_stack(raw_smp_processor_id(), *sp)) {
532 high = stack_page + IRQ_STACK_START;
533 irq_stack_high = high;
534 } else {
535 high = stack_page + THREAD_SIZE - 32;
536 irq_stack_high = 0;
537 }
538
539 /*
540 * If we reached the top of the interrupt stack, start unwinding
541 * the interrupted task stack.
542 */
543 if (unlikely(*sp == irq_stack_high)) {
544 unsigned long task_sp = *(unsigned long *)*sp;
545
546 /*
547 * Check that the pointer saved in the IRQ stack head points to
548 * something within the stack of the current task
549 */
550 if (!object_is_on_stack((void *)task_sp))
551 return 0;
552
553 /*
554 * Follow pointer to tasks kernel stack frame where interrupted
555 * state was saved.
556 */
557 regs = (struct pt_regs *)task_sp;
558 pc = regs->cp0_epc;
559 if (!user_mode(regs) && __kernel_text_address(pc)) {
560 *sp = regs->regs[29];
561 *ra = regs->regs[31];
562 return pc;
563 }
564 return 0;
565 }
566 if (!kallsyms_lookup_size_offset(pc, &size, &ofs))
567 return 0;
568 /*
569 * Return ra if an exception occurred at the first instruction
570 */
571 if (unlikely(ofs == 0)) {
572 pc = *ra;
573 *ra = 0;
574 return pc;
575 }
576
577 info.func = (void *)(pc - ofs);
578 info.func_size = ofs; /* analyze from start to ofs */
579 leaf = get_frame_info(&info);
580 if (leaf < 0)
581 return 0;
582
583 if (*sp < low || *sp + info.frame_size > high)
584 return 0;
585
586 if (leaf)
587 /*
588 * For some extreme cases, get_frame_info() can
589 * consider wrongly a nested function as a leaf
590 * one. In that cases avoid to return always the
591 * same value.
592 */
593 pc = pc != *ra ? *ra : 0;
594 else
595 pc = ((unsigned long *)(*sp))[info.pc_offset];
596
597 *sp += info.frame_size;
598 *ra = 0;
599 return __kernel_text_address(pc) ? pc : 0;
600}
601EXPORT_SYMBOL(unwind_stack_by_address);
602
603/* used by show_backtrace() */
604unsigned long unwind_stack(struct task_struct *task, unsigned long *sp,
605 unsigned long pc, unsigned long *ra)
606{
607 unsigned long stack_page = 0;
608 int cpu;
609
610 for_each_possible_cpu(cpu) {
611 if (on_irq_stack(cpu, *sp)) {
612 stack_page = (unsigned long)irq_stack[cpu];
613 break;
614 }
615 }
616
617 if (!stack_page)
618 stack_page = (unsigned long)task_stack_page(task);
619
620 return unwind_stack_by_address(stack_page, sp, pc, ra);
621}
622#endif
623
624/*
625 * get_wchan - a maintenance nightmare^W^Wpain in the ass ...
626 */
627unsigned long get_wchan(struct task_struct *task)
628{
629 unsigned long pc = 0;
630#ifdef CONFIG_KALLSYMS
631 unsigned long sp;
632 unsigned long ra = 0;
633#endif
634
635 if (!task || task == current || task->state == TASK_RUNNING)
636 goto out;
637 if (!task_stack_page(task))
638 goto out;
639
640 pc = thread_saved_pc(task);
641
642#ifdef CONFIG_KALLSYMS
643 sp = task->thread.reg29 + schedule_mfi.frame_size;
644
645 while (in_sched_functions(pc))
646 pc = unwind_stack(task, &sp, pc, &ra);
647#endif
648
649out:
650 return pc;
651}
652
653unsigned long mips_stack_top(void)
654{
655 unsigned long top = TASK_SIZE & PAGE_MASK;
656
657 if (IS_ENABLED(CONFIG_MIPS_FP_SUPPORT)) {
658 /* One page for branch delay slot "emulation" */
659 top -= PAGE_SIZE;
660 }
661
662 /* Space for the VDSO, data page & GIC user page */
663 top -= PAGE_ALIGN(current->thread.abi->vdso->size);
664 top -= PAGE_SIZE;
665 top -= mips_gic_present() ? PAGE_SIZE : 0;
666
667 /* Space for cache colour alignment */
668 if (cpu_has_dc_aliases)
669 top -= shm_align_mask + 1;
670
671 /* Space to randomize the VDSO base */
672 if (current->flags & PF_RANDOMIZE)
673 top -= VDSO_RANDOMIZE_SIZE;
674
675 return top;
676}
677
678/*
679 * Don't forget that the stack pointer must be aligned on a 8 bytes
680 * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
681 */
682unsigned long arch_align_stack(unsigned long sp)
683{
684 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
685 sp -= get_random_int() & ~PAGE_MASK;
686
687 return sp & ALMASK;
688}
689
690static DEFINE_PER_CPU(call_single_data_t, backtrace_csd);
691static struct cpumask backtrace_csd_busy;
692
693static void handle_backtrace(void *info)
694{
695 nmi_cpu_backtrace(get_irq_regs());
696 cpumask_clear_cpu(smp_processor_id(), &backtrace_csd_busy);
697}
698
699static void raise_backtrace(cpumask_t *mask)
700{
701 call_single_data_t *csd;
702 int cpu;
703
704 for_each_cpu(cpu, mask) {
705 /*
706 * If we previously sent an IPI to the target CPU & it hasn't
707 * cleared its bit in the busy cpumask then it didn't handle
708 * our previous IPI & it's not safe for us to reuse the
709 * call_single_data_t.
710 */
711 if (cpumask_test_and_set_cpu(cpu, &backtrace_csd_busy)) {
712 pr_warn("Unable to send backtrace IPI to CPU%u - perhaps it hung?\n",
713 cpu);
714 continue;
715 }
716
717 csd = &per_cpu(backtrace_csd, cpu);
718 csd->func = handle_backtrace;
719 smp_call_function_single_async(cpu, csd);
720 }
721}
722
723void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
724{
725 nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_backtrace);
726}
727
728int mips_get_process_fp_mode(struct task_struct *task)
729{
730 int value = 0;
731
732 if (!test_tsk_thread_flag(task, TIF_32BIT_FPREGS))
733 value |= PR_FP_MODE_FR;
734 if (test_tsk_thread_flag(task, TIF_HYBRID_FPREGS))
735 value |= PR_FP_MODE_FRE;
736
737 return value;
738}
739
740static long prepare_for_fp_mode_switch(void *unused)
741{
742 /*
743 * This is icky, but we use this to simply ensure that all CPUs have
744 * context switched, regardless of whether they were previously running
745 * kernel or user code. This ensures that no CPU that a mode-switching
746 * program may execute on keeps its FPU enabled (& in the old mode)
747 * throughout the mode switch.
748 */
749 return 0;
750}
751
752int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
753{
754 const unsigned int known_bits = PR_FP_MODE_FR | PR_FP_MODE_FRE;
755 struct task_struct *t;
756 struct cpumask process_cpus;
757 int cpu;
758
759 /* If nothing to change, return right away, successfully. */
760 if (value == mips_get_process_fp_mode(task))
761 return 0;
762
763 /* Only accept a mode change if 64-bit FP enabled for o32. */
764 if (!IS_ENABLED(CONFIG_MIPS_O32_FP64_SUPPORT))
765 return -EOPNOTSUPP;
766
767 /* And only for o32 tasks. */
768 if (IS_ENABLED(CONFIG_64BIT) && !test_thread_flag(TIF_32BIT_REGS))
769 return -EOPNOTSUPP;
770
771 /* Check the value is valid */
772 if (value & ~known_bits)
773 return -EOPNOTSUPP;
774
775 /* Setting FRE without FR is not supported. */
776 if ((value & (PR_FP_MODE_FR | PR_FP_MODE_FRE)) == PR_FP_MODE_FRE)
777 return -EOPNOTSUPP;
778
779 /* Avoid inadvertently triggering emulation */
780 if ((value & PR_FP_MODE_FR) && raw_cpu_has_fpu &&
781 !(raw_current_cpu_data.fpu_id & MIPS_FPIR_F64))
782 return -EOPNOTSUPP;
783 if ((value & PR_FP_MODE_FRE) && raw_cpu_has_fpu && !cpu_has_fre)
784 return -EOPNOTSUPP;
785
786 /* FR = 0 not supported in MIPS R6 */
787 if (!(value & PR_FP_MODE_FR) && raw_cpu_has_fpu && cpu_has_mips_r6)
788 return -EOPNOTSUPP;
789
790 /* Indicate the new FP mode in each thread */
791 for_each_thread(task, t) {
792 /* Update desired FP register width */
793 if (value & PR_FP_MODE_FR) {
794 clear_tsk_thread_flag(t, TIF_32BIT_FPREGS);
795 } else {
796 set_tsk_thread_flag(t, TIF_32BIT_FPREGS);
797 clear_tsk_thread_flag(t, TIF_MSA_CTX_LIVE);
798 }
799
800 /* Update desired FP single layout */
801 if (value & PR_FP_MODE_FRE)
802 set_tsk_thread_flag(t, TIF_HYBRID_FPREGS);
803 else
804 clear_tsk_thread_flag(t, TIF_HYBRID_FPREGS);
805 }
806
807 /*
808 * We need to ensure that all threads in the process have switched mode
809 * before returning, in order to allow userland to not worry about
810 * races. We can do this by forcing all CPUs that any thread in the
811 * process may be running on to schedule something else - in this case
812 * prepare_for_fp_mode_switch().
813 *
814 * We begin by generating a mask of all CPUs that any thread in the
815 * process may be running on.
816 */
817 cpumask_clear(&process_cpus);
818 for_each_thread(task, t)
819 cpumask_set_cpu(task_cpu(t), &process_cpus);
820
821 /*
822 * Now we schedule prepare_for_fp_mode_switch() on each of those CPUs.
823 *
824 * The CPUs may have rescheduled already since we switched mode or
825 * generated the cpumask, but that doesn't matter. If the task in this
826 * process is scheduled out then our scheduling
827 * prepare_for_fp_mode_switch() will simply be redundant. If it's
828 * scheduled in then it will already have picked up the new FP mode
829 * whilst doing so.
830 */
831 get_online_cpus();
832 for_each_cpu_and(cpu, &process_cpus, cpu_online_mask)
833 work_on_cpu(cpu, prepare_for_fp_mode_switch, NULL);
834 put_online_cpus();
835
836 return 0;
837}
838
839#if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
840void mips_dump_regs32(u32 *uregs, const struct pt_regs *regs)
841{
842 unsigned int i;
843
844 for (i = MIPS32_EF_R1; i <= MIPS32_EF_R31; i++) {
845 /* k0/k1 are copied as zero. */
846 if (i == MIPS32_EF_R26 || i == MIPS32_EF_R27)
847 uregs[i] = 0;
848 else
849 uregs[i] = regs->regs[i - MIPS32_EF_R0];
850 }
851
852 uregs[MIPS32_EF_LO] = regs->lo;
853 uregs[MIPS32_EF_HI] = regs->hi;
854 uregs[MIPS32_EF_CP0_EPC] = regs->cp0_epc;
855 uregs[MIPS32_EF_CP0_BADVADDR] = regs->cp0_badvaddr;
856 uregs[MIPS32_EF_CP0_STATUS] = regs->cp0_status;
857 uregs[MIPS32_EF_CP0_CAUSE] = regs->cp0_cause;
858}
859#endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
860
861#ifdef CONFIG_64BIT
862void mips_dump_regs64(u64 *uregs, const struct pt_regs *regs)
863{
864 unsigned int i;
865
866 for (i = MIPS64_EF_R1; i <= MIPS64_EF_R31; i++) {
867 /* k0/k1 are copied as zero. */
868 if (i == MIPS64_EF_R26 || i == MIPS64_EF_R27)
869 uregs[i] = 0;
870 else
871 uregs[i] = regs->regs[i - MIPS64_EF_R0];
872 }
873
874 uregs[MIPS64_EF_LO] = regs->lo;
875 uregs[MIPS64_EF_HI] = regs->hi;
876 uregs[MIPS64_EF_CP0_EPC] = regs->cp0_epc;
877 uregs[MIPS64_EF_CP0_BADVADDR] = regs->cp0_badvaddr;
878 uregs[MIPS64_EF_CP0_STATUS] = regs->cp0_status;
879 uregs[MIPS64_EF_CP0_CAUSE] = regs->cp0_cause;
880}
881#endif /* CONFIG_64BIT */