Loading...
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994 - 1999, 2000 by Ralf Baechle and others.
7 * Copyright (C) 2005, 2006 by Ralf Baechle (ralf@linux-mips.org)
8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9 * Copyright (C) 2004 Thiemo Seufer
10 * Copyright (C) 2013 Imagination Technologies Ltd.
11 */
12#include <linux/errno.h>
13#include <linux/sched.h>
14#include <linux/tick.h>
15#include <linux/kernel.h>
16#include <linux/mm.h>
17#include <linux/stddef.h>
18#include <linux/unistd.h>
19#include <linux/export.h>
20#include <linux/ptrace.h>
21#include <linux/mman.h>
22#include <linux/personality.h>
23#include <linux/sys.h>
24#include <linux/init.h>
25#include <linux/completion.h>
26#include <linux/kallsyms.h>
27#include <linux/random.h>
28#include <linux/prctl.h>
29
30#include <asm/asm.h>
31#include <asm/bootinfo.h>
32#include <asm/cpu.h>
33#include <asm/dsp.h>
34#include <asm/fpu.h>
35#include <asm/msa.h>
36#include <asm/pgtable.h>
37#include <asm/mipsregs.h>
38#include <asm/processor.h>
39#include <asm/reg.h>
40#include <asm/uaccess.h>
41#include <asm/io.h>
42#include <asm/elf.h>
43#include <asm/isadep.h>
44#include <asm/inst.h>
45#include <asm/stacktrace.h>
46#include <asm/irq_regs.h>
47
48#ifdef CONFIG_HOTPLUG_CPU
49void arch_cpu_idle_dead(void)
50{
51 /* What the heck is this check doing ? */
52 if (!cpumask_test_cpu(smp_processor_id(), &cpu_callin_map))
53 play_dead();
54}
55#endif
56
57asmlinkage void ret_from_fork(void);
58asmlinkage void ret_from_kernel_thread(void);
59
60void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp)
61{
62 unsigned long status;
63
64 /* New thread loses kernel privileges. */
65 status = regs->cp0_status & ~(ST0_CU0|ST0_CU1|ST0_FR|KU_MASK);
66 status |= KU_USER;
67 regs->cp0_status = status;
68 lose_fpu(0);
69 clear_thread_flag(TIF_MSA_CTX_LIVE);
70 clear_used_math();
71 init_dsp();
72 regs->cp0_epc = pc;
73 regs->regs[29] = sp;
74}
75
76void exit_thread(void)
77{
78}
79
80void flush_thread(void)
81{
82}
83
84int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
85{
86 /*
87 * Save any process state which is live in hardware registers to the
88 * parent context prior to duplication. This prevents the new child
89 * state becoming stale if the parent is preempted before copy_thread()
90 * gets a chance to save the parent's live hardware registers to the
91 * child context.
92 */
93 preempt_disable();
94
95 if (is_msa_enabled())
96 save_msa(current);
97 else if (is_fpu_owner())
98 _save_fp(current);
99
100 save_dsp(current);
101
102 preempt_enable();
103
104 *dst = *src;
105 return 0;
106}
107
108/*
109 * Copy architecture-specific thread state
110 */
111int copy_thread(unsigned long clone_flags, unsigned long usp,
112 unsigned long kthread_arg, struct task_struct *p)
113{
114 struct thread_info *ti = task_thread_info(p);
115 struct pt_regs *childregs, *regs = current_pt_regs();
116 unsigned long childksp;
117 p->set_child_tid = p->clear_child_tid = NULL;
118
119 childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32;
120
121 /* set up new TSS. */
122 childregs = (struct pt_regs *) childksp - 1;
123 /* Put the stack after the struct pt_regs. */
124 childksp = (unsigned long) childregs;
125 p->thread.cp0_status = read_c0_status() & ~(ST0_CU2|ST0_CU1);
126 if (unlikely(p->flags & PF_KTHREAD)) {
127 /* kernel thread */
128 unsigned long status = p->thread.cp0_status;
129 memset(childregs, 0, sizeof(struct pt_regs));
130 ti->addr_limit = KERNEL_DS;
131 p->thread.reg16 = usp; /* fn */
132 p->thread.reg17 = kthread_arg;
133 p->thread.reg29 = childksp;
134 p->thread.reg31 = (unsigned long) ret_from_kernel_thread;
135#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
136 status = (status & ~(ST0_KUP | ST0_IEP | ST0_IEC)) |
137 ((status & (ST0_KUC | ST0_IEC)) << 2);
138#else
139 status |= ST0_EXL;
140#endif
141 childregs->cp0_status = status;
142 return 0;
143 }
144
145 /* user thread */
146 *childregs = *regs;
147 childregs->regs[7] = 0; /* Clear error flag */
148 childregs->regs[2] = 0; /* Child gets zero as return value */
149 if (usp)
150 childregs->regs[29] = usp;
151 ti->addr_limit = USER_DS;
152
153 p->thread.reg29 = (unsigned long) childregs;
154 p->thread.reg31 = (unsigned long) ret_from_fork;
155
156 /*
157 * New tasks lose permission to use the fpu. This accelerates context
158 * switching for most programs since they don't use the fpu.
159 */
160 childregs->cp0_status &= ~(ST0_CU2|ST0_CU1);
161
162 clear_tsk_thread_flag(p, TIF_USEDFPU);
163 clear_tsk_thread_flag(p, TIF_USEDMSA);
164 clear_tsk_thread_flag(p, TIF_MSA_CTX_LIVE);
165
166#ifdef CONFIG_MIPS_MT_FPAFF
167 clear_tsk_thread_flag(p, TIF_FPUBOUND);
168#endif /* CONFIG_MIPS_MT_FPAFF */
169
170 if (clone_flags & CLONE_SETTLS)
171 ti->tp_value = regs->regs[7];
172
173 return 0;
174}
175
176#ifdef CONFIG_CC_STACKPROTECTOR
177#include <linux/stackprotector.h>
178unsigned long __stack_chk_guard __read_mostly;
179EXPORT_SYMBOL(__stack_chk_guard);
180#endif
181
182struct mips_frame_info {
183 void *func;
184 unsigned long func_size;
185 int frame_size;
186 int pc_offset;
187};
188
189#define J_TARGET(pc,target) \
190 (((unsigned long)(pc) & 0xf0000000) | ((target) << 2))
191
192static inline int is_ra_save_ins(union mips_instruction *ip)
193{
194#ifdef CONFIG_CPU_MICROMIPS
195 union mips_instruction mmi;
196
197 /*
198 * swsp ra,offset
199 * swm16 reglist,offset(sp)
200 * swm32 reglist,offset(sp)
201 * sw32 ra,offset(sp)
202 * jradiussp - NOT SUPPORTED
203 *
204 * microMIPS is way more fun...
205 */
206 if (mm_insn_16bit(ip->halfword[0])) {
207 mmi.word = (ip->halfword[0] << 16);
208 return (mmi.mm16_r5_format.opcode == mm_swsp16_op &&
209 mmi.mm16_r5_format.rt == 31) ||
210 (mmi.mm16_m_format.opcode == mm_pool16c_op &&
211 mmi.mm16_m_format.func == mm_swm16_op);
212 }
213 else {
214 mmi.halfword[0] = ip->halfword[1];
215 mmi.halfword[1] = ip->halfword[0];
216 return (mmi.mm_m_format.opcode == mm_pool32b_op &&
217 mmi.mm_m_format.rd > 9 &&
218 mmi.mm_m_format.base == 29 &&
219 mmi.mm_m_format.func == mm_swm32_func) ||
220 (mmi.i_format.opcode == mm_sw32_op &&
221 mmi.i_format.rs == 29 &&
222 mmi.i_format.rt == 31);
223 }
224#else
225 /* sw / sd $ra, offset($sp) */
226 return (ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op) &&
227 ip->i_format.rs == 29 &&
228 ip->i_format.rt == 31;
229#endif
230}
231
232static inline int is_jump_ins(union mips_instruction *ip)
233{
234#ifdef CONFIG_CPU_MICROMIPS
235 /*
236 * jr16,jrc,jalr16,jalr16
237 * jal
238 * jalr/jr,jalr.hb/jr.hb,jalrs,jalrs.hb
239 * jraddiusp - NOT SUPPORTED
240 *
241 * microMIPS is kind of more fun...
242 */
243 union mips_instruction mmi;
244
245 mmi.word = (ip->halfword[0] << 16);
246
247 if ((mmi.mm16_r5_format.opcode == mm_pool16c_op &&
248 (mmi.mm16_r5_format.rt & mm_jr16_op) == mm_jr16_op) ||
249 ip->j_format.opcode == mm_jal32_op)
250 return 1;
251 if (ip->r_format.opcode != mm_pool32a_op ||
252 ip->r_format.func != mm_pool32axf_op)
253 return 0;
254 return ((ip->u_format.uimmediate >> 6) & mm_jalr_op) == mm_jalr_op;
255#else
256 if (ip->j_format.opcode == j_op)
257 return 1;
258 if (ip->j_format.opcode == jal_op)
259 return 1;
260 if (ip->r_format.opcode != spec_op)
261 return 0;
262 return ip->r_format.func == jalr_op || ip->r_format.func == jr_op;
263#endif
264}
265
266static inline int is_sp_move_ins(union mips_instruction *ip)
267{
268#ifdef CONFIG_CPU_MICROMIPS
269 /*
270 * addiusp -imm
271 * addius5 sp,-imm
272 * addiu32 sp,sp,-imm
273 * jradiussp - NOT SUPPORTED
274 *
275 * microMIPS is not more fun...
276 */
277 if (mm_insn_16bit(ip->halfword[0])) {
278 union mips_instruction mmi;
279
280 mmi.word = (ip->halfword[0] << 16);
281 return (mmi.mm16_r3_format.opcode == mm_pool16d_op &&
282 mmi.mm16_r3_format.simmediate && mm_addiusp_func) ||
283 (mmi.mm16_r5_format.opcode == mm_pool16d_op &&
284 mmi.mm16_r5_format.rt == 29);
285 }
286 return ip->mm_i_format.opcode == mm_addiu32_op &&
287 ip->mm_i_format.rt == 29 && ip->mm_i_format.rs == 29;
288#else
289 /* addiu/daddiu sp,sp,-imm */
290 if (ip->i_format.rs != 29 || ip->i_format.rt != 29)
291 return 0;
292 if (ip->i_format.opcode == addiu_op || ip->i_format.opcode == daddiu_op)
293 return 1;
294#endif
295 return 0;
296}
297
298static int get_frame_info(struct mips_frame_info *info)
299{
300#ifdef CONFIG_CPU_MICROMIPS
301 union mips_instruction *ip = (void *) (((char *) info->func) - 1);
302#else
303 union mips_instruction *ip = info->func;
304#endif
305 unsigned max_insns = info->func_size / sizeof(union mips_instruction);
306 unsigned i;
307
308 info->pc_offset = -1;
309 info->frame_size = 0;
310
311 if (!ip)
312 goto err;
313
314 if (max_insns == 0)
315 max_insns = 128U; /* unknown function size */
316 max_insns = min(128U, max_insns);
317
318 for (i = 0; i < max_insns; i++, ip++) {
319
320 if (is_jump_ins(ip))
321 break;
322 if (!info->frame_size) {
323 if (is_sp_move_ins(ip))
324 {
325#ifdef CONFIG_CPU_MICROMIPS
326 if (mm_insn_16bit(ip->halfword[0]))
327 {
328 unsigned short tmp;
329
330 if (ip->halfword[0] & mm_addiusp_func)
331 {
332 tmp = (((ip->halfword[0] >> 1) & 0x1ff) << 2);
333 info->frame_size = -(signed short)(tmp | ((tmp & 0x100) ? 0xfe00 : 0));
334 } else {
335 tmp = (ip->halfword[0] >> 1);
336 info->frame_size = -(signed short)(tmp & 0xf);
337 }
338 ip = (void *) &ip->halfword[1];
339 ip--;
340 } else
341#endif
342 info->frame_size = - ip->i_format.simmediate;
343 }
344 continue;
345 }
346 if (info->pc_offset == -1 && is_ra_save_ins(ip)) {
347 info->pc_offset =
348 ip->i_format.simmediate / sizeof(long);
349 break;
350 }
351 }
352 if (info->frame_size && info->pc_offset >= 0) /* nested */
353 return 0;
354 if (info->pc_offset < 0) /* leaf */
355 return 1;
356 /* prologue seems boggus... */
357err:
358 return -1;
359}
360
361static struct mips_frame_info schedule_mfi __read_mostly;
362
363#ifdef CONFIG_KALLSYMS
364static unsigned long get___schedule_addr(void)
365{
366 return kallsyms_lookup_name("__schedule");
367}
368#else
369static unsigned long get___schedule_addr(void)
370{
371 union mips_instruction *ip = (void *)schedule;
372 int max_insns = 8;
373 int i;
374
375 for (i = 0; i < max_insns; i++, ip++) {
376 if (ip->j_format.opcode == j_op)
377 return J_TARGET(ip, ip->j_format.target);
378 }
379 return 0;
380}
381#endif
382
383static int __init frame_info_init(void)
384{
385 unsigned long size = 0;
386#ifdef CONFIG_KALLSYMS
387 unsigned long ofs;
388#endif
389 unsigned long addr;
390
391 addr = get___schedule_addr();
392 if (!addr)
393 addr = (unsigned long)schedule;
394
395#ifdef CONFIG_KALLSYMS
396 kallsyms_lookup_size_offset(addr, &size, &ofs);
397#endif
398 schedule_mfi.func = (void *)addr;
399 schedule_mfi.func_size = size;
400
401 get_frame_info(&schedule_mfi);
402
403 /*
404 * Without schedule() frame info, result given by
405 * thread_saved_pc() and get_wchan() are not reliable.
406 */
407 if (schedule_mfi.pc_offset < 0)
408 printk("Can't analyze schedule() prologue at %p\n", schedule);
409
410 return 0;
411}
412
413arch_initcall(frame_info_init);
414
415/*
416 * Return saved PC of a blocked thread.
417 */
418unsigned long thread_saved_pc(struct task_struct *tsk)
419{
420 struct thread_struct *t = &tsk->thread;
421
422 /* New born processes are a special case */
423 if (t->reg31 == (unsigned long) ret_from_fork)
424 return t->reg31;
425 if (schedule_mfi.pc_offset < 0)
426 return 0;
427 return ((unsigned long *)t->reg29)[schedule_mfi.pc_offset];
428}
429
430
431#ifdef CONFIG_KALLSYMS
432/* generic stack unwinding function */
433unsigned long notrace unwind_stack_by_address(unsigned long stack_page,
434 unsigned long *sp,
435 unsigned long pc,
436 unsigned long *ra)
437{
438 struct mips_frame_info info;
439 unsigned long size, ofs;
440 int leaf;
441 extern void ret_from_irq(void);
442 extern void ret_from_exception(void);
443
444 if (!stack_page)
445 return 0;
446
447 /*
448 * If we reached the bottom of interrupt context,
449 * return saved pc in pt_regs.
450 */
451 if (pc == (unsigned long)ret_from_irq ||
452 pc == (unsigned long)ret_from_exception) {
453 struct pt_regs *regs;
454 if (*sp >= stack_page &&
455 *sp + sizeof(*regs) <= stack_page + THREAD_SIZE - 32) {
456 regs = (struct pt_regs *)*sp;
457 pc = regs->cp0_epc;
458 if (__kernel_text_address(pc)) {
459 *sp = regs->regs[29];
460 *ra = regs->regs[31];
461 return pc;
462 }
463 }
464 return 0;
465 }
466 if (!kallsyms_lookup_size_offset(pc, &size, &ofs))
467 return 0;
468 /*
469 * Return ra if an exception occurred at the first instruction
470 */
471 if (unlikely(ofs == 0)) {
472 pc = *ra;
473 *ra = 0;
474 return pc;
475 }
476
477 info.func = (void *)(pc - ofs);
478 info.func_size = ofs; /* analyze from start to ofs */
479 leaf = get_frame_info(&info);
480 if (leaf < 0)
481 return 0;
482
483 if (*sp < stack_page ||
484 *sp + info.frame_size > stack_page + THREAD_SIZE - 32)
485 return 0;
486
487 if (leaf)
488 /*
489 * For some extreme cases, get_frame_info() can
490 * consider wrongly a nested function as a leaf
491 * one. In that cases avoid to return always the
492 * same value.
493 */
494 pc = pc != *ra ? *ra : 0;
495 else
496 pc = ((unsigned long *)(*sp))[info.pc_offset];
497
498 *sp += info.frame_size;
499 *ra = 0;
500 return __kernel_text_address(pc) ? pc : 0;
501}
502EXPORT_SYMBOL(unwind_stack_by_address);
503
504/* used by show_backtrace() */
505unsigned long unwind_stack(struct task_struct *task, unsigned long *sp,
506 unsigned long pc, unsigned long *ra)
507{
508 unsigned long stack_page = (unsigned long)task_stack_page(task);
509 return unwind_stack_by_address(stack_page, sp, pc, ra);
510}
511#endif
512
513/*
514 * get_wchan - a maintenance nightmare^W^Wpain in the ass ...
515 */
516unsigned long get_wchan(struct task_struct *task)
517{
518 unsigned long pc = 0;
519#ifdef CONFIG_KALLSYMS
520 unsigned long sp;
521 unsigned long ra = 0;
522#endif
523
524 if (!task || task == current || task->state == TASK_RUNNING)
525 goto out;
526 if (!task_stack_page(task))
527 goto out;
528
529 pc = thread_saved_pc(task);
530
531#ifdef CONFIG_KALLSYMS
532 sp = task->thread.reg29 + schedule_mfi.frame_size;
533
534 while (in_sched_functions(pc))
535 pc = unwind_stack(task, &sp, pc, &ra);
536#endif
537
538out:
539 return pc;
540}
541
542/*
543 * Don't forget that the stack pointer must be aligned on a 8 bytes
544 * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
545 */
546unsigned long arch_align_stack(unsigned long sp)
547{
548 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
549 sp -= get_random_int() & ~PAGE_MASK;
550
551 return sp & ALMASK;
552}
553
554static void arch_dump_stack(void *info)
555{
556 struct pt_regs *regs;
557
558 regs = get_irq_regs();
559
560 if (regs)
561 show_regs(regs);
562
563 dump_stack();
564}
565
566void arch_trigger_all_cpu_backtrace(bool include_self)
567{
568 smp_call_function(arch_dump_stack, NULL, 1);
569}
570
571int mips_get_process_fp_mode(struct task_struct *task)
572{
573 int value = 0;
574
575 if (!test_tsk_thread_flag(task, TIF_32BIT_FPREGS))
576 value |= PR_FP_MODE_FR;
577 if (test_tsk_thread_flag(task, TIF_HYBRID_FPREGS))
578 value |= PR_FP_MODE_FRE;
579
580 return value;
581}
582
583int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
584{
585 const unsigned int known_bits = PR_FP_MODE_FR | PR_FP_MODE_FRE;
586 unsigned long switch_count;
587 struct task_struct *t;
588
589 /* Check the value is valid */
590 if (value & ~known_bits)
591 return -EOPNOTSUPP;
592
593 /* Avoid inadvertently triggering emulation */
594 if ((value & PR_FP_MODE_FR) && cpu_has_fpu &&
595 !(current_cpu_data.fpu_id & MIPS_FPIR_F64))
596 return -EOPNOTSUPP;
597 if ((value & PR_FP_MODE_FRE) && cpu_has_fpu && !cpu_has_fre)
598 return -EOPNOTSUPP;
599
600 /* FR = 0 not supported in MIPS R6 */
601 if (!(value & PR_FP_MODE_FR) && cpu_has_fpu && cpu_has_mips_r6)
602 return -EOPNOTSUPP;
603
604 /* Save FP & vector context, then disable FPU & MSA */
605 if (task->signal == current->signal)
606 lose_fpu(1);
607
608 /* Prevent any threads from obtaining live FP context */
609 atomic_set(&task->mm->context.fp_mode_switching, 1);
610 smp_mb__after_atomic();
611
612 /*
613 * If there are multiple online CPUs then wait until all threads whose
614 * FP mode is about to change have been context switched. This approach
615 * allows us to only worry about whether an FP mode switch is in
616 * progress when FP is first used in a tasks time slice. Pretty much all
617 * of the mode switch overhead can thus be confined to cases where mode
618 * switches are actually occurring. That is, to here. However for the
619 * thread performing the mode switch it may take a while...
620 */
621 if (num_online_cpus() > 1) {
622 spin_lock_irq(&task->sighand->siglock);
623
624 for_each_thread(task, t) {
625 if (t == current)
626 continue;
627
628 switch_count = t->nvcsw + t->nivcsw;
629
630 do {
631 spin_unlock_irq(&task->sighand->siglock);
632 cond_resched();
633 spin_lock_irq(&task->sighand->siglock);
634 } while ((t->nvcsw + t->nivcsw) == switch_count);
635 }
636
637 spin_unlock_irq(&task->sighand->siglock);
638 }
639
640 /*
641 * There are now no threads of the process with live FP context, so it
642 * is safe to proceed with the FP mode switch.
643 */
644 for_each_thread(task, t) {
645 /* Update desired FP register width */
646 if (value & PR_FP_MODE_FR) {
647 clear_tsk_thread_flag(t, TIF_32BIT_FPREGS);
648 } else {
649 set_tsk_thread_flag(t, TIF_32BIT_FPREGS);
650 clear_tsk_thread_flag(t, TIF_MSA_CTX_LIVE);
651 }
652
653 /* Update desired FP single layout */
654 if (value & PR_FP_MODE_FRE)
655 set_tsk_thread_flag(t, TIF_HYBRID_FPREGS);
656 else
657 clear_tsk_thread_flag(t, TIF_HYBRID_FPREGS);
658 }
659
660 /* Allow threads to use FP again */
661 atomic_set(&task->mm->context.fp_mode_switching, 0);
662
663 return 0;
664}
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994 - 1999, 2000 by Ralf Baechle and others.
7 * Copyright (C) 2005, 2006 by Ralf Baechle (ralf@linux-mips.org)
8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9 * Copyright (C) 2004 Thiemo Seufer
10 * Copyright (C) 2013 Imagination Technologies Ltd.
11 */
12#include <linux/cpu.h>
13#include <linux/errno.h>
14#include <linux/init.h>
15#include <linux/kallsyms.h>
16#include <linux/kernel.h>
17#include <linux/nmi.h>
18#include <linux/personality.h>
19#include <linux/prctl.h>
20#include <linux/random.h>
21#include <linux/sched.h>
22#include <linux/sched/debug.h>
23#include <linux/sched/task_stack.h>
24
25#include <asm/abi.h>
26#include <asm/asm.h>
27#include <asm/dsemul.h>
28#include <asm/dsp.h>
29#include <asm/exec.h>
30#include <asm/fpu.h>
31#include <asm/inst.h>
32#include <asm/irq.h>
33#include <asm/irq_regs.h>
34#include <asm/isadep.h>
35#include <asm/msa.h>
36#include <asm/mips-cps.h>
37#include <asm/mipsregs.h>
38#include <asm/processor.h>
39#include <asm/reg.h>
40#include <asm/stacktrace.h>
41
42#ifdef CONFIG_HOTPLUG_CPU
43void __noreturn arch_cpu_idle_dead(void)
44{
45 play_dead();
46}
47#endif
48
49asmlinkage void ret_from_fork(void);
50asmlinkage void ret_from_kernel_thread(void);
51
52void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp)
53{
54 unsigned long status;
55
56 /* New thread loses kernel privileges. */
57 status = regs->cp0_status & ~(ST0_CU0|ST0_CU1|ST0_CU2|ST0_FR|KU_MASK);
58 status |= KU_USER;
59 regs->cp0_status = status;
60 lose_fpu(0);
61 clear_thread_flag(TIF_MSA_CTX_LIVE);
62 clear_used_math();
63#ifdef CONFIG_MIPS_FP_SUPPORT
64 atomic_set(¤t->thread.bd_emu_frame, BD_EMUFRAME_NONE);
65#endif
66 init_dsp();
67 regs->cp0_epc = pc;
68 regs->regs[29] = sp;
69}
70
71void exit_thread(struct task_struct *tsk)
72{
73 /*
74 * User threads may have allocated a delay slot emulation frame.
75 * If so, clean up that allocation.
76 */
77 if (!(current->flags & PF_KTHREAD))
78 dsemul_thread_cleanup(tsk);
79}
80
81int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
82{
83 /*
84 * Save any process state which is live in hardware registers to the
85 * parent context prior to duplication. This prevents the new child
86 * state becoming stale if the parent is preempted before copy_thread()
87 * gets a chance to save the parent's live hardware registers to the
88 * child context.
89 */
90 preempt_disable();
91
92 if (is_msa_enabled())
93 save_msa(current);
94 else if (is_fpu_owner())
95 _save_fp(current);
96
97 save_dsp(current);
98
99 preempt_enable();
100
101 *dst = *src;
102 return 0;
103}
104
105/*
106 * Copy architecture-specific thread state
107 */
108int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
109{
110 unsigned long clone_flags = args->flags;
111 unsigned long usp = args->stack;
112 unsigned long tls = args->tls;
113 struct thread_info *ti = task_thread_info(p);
114 struct pt_regs *childregs, *regs = current_pt_regs();
115 unsigned long childksp;
116
117 childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32;
118
119 /* set up new TSS. */
120 childregs = (struct pt_regs *) childksp - 1;
121 /* Put the stack after the struct pt_regs. */
122 childksp = (unsigned long) childregs;
123 p->thread.cp0_status = (read_c0_status() & ~(ST0_CU2|ST0_CU1)) | ST0_KERNEL_CUMASK;
124
125 /*
126 * New tasks lose permission to use the fpu. This accelerates context
127 * switching for most programs since they don't use the fpu.
128 */
129 clear_tsk_thread_flag(p, TIF_USEDFPU);
130 clear_tsk_thread_flag(p, TIF_USEDMSA);
131 clear_tsk_thread_flag(p, TIF_MSA_CTX_LIVE);
132
133#ifdef CONFIG_MIPS_MT_FPAFF
134 clear_tsk_thread_flag(p, TIF_FPUBOUND);
135#endif /* CONFIG_MIPS_MT_FPAFF */
136
137 if (unlikely(args->fn)) {
138 /* kernel thread */
139 unsigned long status = p->thread.cp0_status;
140 memset(childregs, 0, sizeof(struct pt_regs));
141 p->thread.reg16 = (unsigned long)args->fn;
142 p->thread.reg17 = (unsigned long)args->fn_arg;
143 p->thread.reg29 = childksp;
144 p->thread.reg31 = (unsigned long) ret_from_kernel_thread;
145#if defined(CONFIG_CPU_R3000)
146 status = (status & ~(ST0_KUP | ST0_IEP | ST0_IEC)) |
147 ((status & (ST0_KUC | ST0_IEC)) << 2);
148#else
149 status |= ST0_EXL;
150#endif
151 childregs->cp0_status = status;
152 return 0;
153 }
154
155 /* user thread */
156 *childregs = *regs;
157 childregs->regs[7] = 0; /* Clear error flag */
158 childregs->regs[2] = 0; /* Child gets zero as return value */
159 if (usp)
160 childregs->regs[29] = usp;
161
162 p->thread.reg29 = (unsigned long) childregs;
163 p->thread.reg31 = (unsigned long) ret_from_fork;
164
165 childregs->cp0_status &= ~(ST0_CU2|ST0_CU1);
166
167#ifdef CONFIG_MIPS_FP_SUPPORT
168 atomic_set(&p->thread.bd_emu_frame, BD_EMUFRAME_NONE);
169#endif
170
171 if (clone_flags & CLONE_SETTLS)
172 ti->tp_value = tls;
173
174 return 0;
175}
176
177#ifdef CONFIG_STACKPROTECTOR
178#include <linux/stackprotector.h>
179unsigned long __stack_chk_guard __read_mostly;
180EXPORT_SYMBOL(__stack_chk_guard);
181#endif
182
183struct mips_frame_info {
184 void *func;
185 unsigned long func_size;
186 int frame_size;
187 int pc_offset;
188};
189
190#define J_TARGET(pc,target) \
191 (((unsigned long)(pc) & 0xf0000000) | ((target) << 2))
192
193static inline int is_jr_ra_ins(union mips_instruction *ip)
194{
195#ifdef CONFIG_CPU_MICROMIPS
196 /*
197 * jr16 ra
198 * jr ra
199 */
200 if (mm_insn_16bit(ip->word >> 16)) {
201 if (ip->mm16_r5_format.opcode == mm_pool16c_op &&
202 ip->mm16_r5_format.rt == mm_jr16_op &&
203 ip->mm16_r5_format.imm == 31)
204 return 1;
205 return 0;
206 }
207
208 if (ip->r_format.opcode == mm_pool32a_op &&
209 ip->r_format.func == mm_pool32axf_op &&
210 ((ip->u_format.uimmediate >> 6) & GENMASK(9, 0)) == mm_jalr_op &&
211 ip->r_format.rt == 31)
212 return 1;
213 return 0;
214#else
215 if (ip->r_format.opcode == spec_op &&
216 ip->r_format.func == jr_op &&
217 ip->r_format.rs == 31)
218 return 1;
219 return 0;
220#endif
221}
222
223static inline int is_ra_save_ins(union mips_instruction *ip, int *poff)
224{
225#ifdef CONFIG_CPU_MICROMIPS
226 /*
227 * swsp ra,offset
228 * swm16 reglist,offset(sp)
229 * swm32 reglist,offset(sp)
230 * sw32 ra,offset(sp)
231 * jradiussp - NOT SUPPORTED
232 *
233 * microMIPS is way more fun...
234 */
235 if (mm_insn_16bit(ip->word >> 16)) {
236 switch (ip->mm16_r5_format.opcode) {
237 case mm_swsp16_op:
238 if (ip->mm16_r5_format.rt != 31)
239 return 0;
240
241 *poff = ip->mm16_r5_format.imm;
242 *poff = (*poff << 2) / sizeof(ulong);
243 return 1;
244
245 case mm_pool16c_op:
246 switch (ip->mm16_m_format.func) {
247 case mm_swm16_op:
248 *poff = ip->mm16_m_format.imm;
249 *poff += 1 + ip->mm16_m_format.rlist;
250 *poff = (*poff << 2) / sizeof(ulong);
251 return 1;
252
253 default:
254 return 0;
255 }
256
257 default:
258 return 0;
259 }
260 }
261
262 switch (ip->i_format.opcode) {
263 case mm_sw32_op:
264 if (ip->i_format.rs != 29)
265 return 0;
266 if (ip->i_format.rt != 31)
267 return 0;
268
269 *poff = ip->i_format.simmediate / sizeof(ulong);
270 return 1;
271
272 case mm_pool32b_op:
273 switch (ip->mm_m_format.func) {
274 case mm_swm32_func:
275 if (ip->mm_m_format.rd < 0x10)
276 return 0;
277 if (ip->mm_m_format.base != 29)
278 return 0;
279
280 *poff = ip->mm_m_format.simmediate;
281 *poff += (ip->mm_m_format.rd & 0xf) * sizeof(u32);
282 *poff /= sizeof(ulong);
283 return 1;
284 default:
285 return 0;
286 }
287
288 default:
289 return 0;
290 }
291#else
292 /* sw / sd $ra, offset($sp) */
293 if ((ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op) &&
294 ip->i_format.rs == 29 && ip->i_format.rt == 31) {
295 *poff = ip->i_format.simmediate / sizeof(ulong);
296 return 1;
297 }
298#ifdef CONFIG_CPU_LOONGSON64
299 if ((ip->loongson3_lswc2_format.opcode == swc2_op) &&
300 (ip->loongson3_lswc2_format.ls == 1) &&
301 (ip->loongson3_lswc2_format.fr == 0) &&
302 (ip->loongson3_lswc2_format.base == 29)) {
303 if (ip->loongson3_lswc2_format.rt == 31) {
304 *poff = ip->loongson3_lswc2_format.offset << 1;
305 return 1;
306 }
307 if (ip->loongson3_lswc2_format.rq == 31) {
308 *poff = (ip->loongson3_lswc2_format.offset << 1) + 1;
309 return 1;
310 }
311 }
312#endif
313 return 0;
314#endif
315}
316
317static inline int is_jump_ins(union mips_instruction *ip)
318{
319#ifdef CONFIG_CPU_MICROMIPS
320 /*
321 * jr16,jrc,jalr16,jalr16
322 * jal
323 * jalr/jr,jalr.hb/jr.hb,jalrs,jalrs.hb
324 * jraddiusp - NOT SUPPORTED
325 *
326 * microMIPS is kind of more fun...
327 */
328 if (mm_insn_16bit(ip->word >> 16)) {
329 if ((ip->mm16_r5_format.opcode == mm_pool16c_op &&
330 (ip->mm16_r5_format.rt & mm_jr16_op) == mm_jr16_op))
331 return 1;
332 return 0;
333 }
334
335 if (ip->j_format.opcode == mm_j32_op)
336 return 1;
337 if (ip->j_format.opcode == mm_jal32_op)
338 return 1;
339 if (ip->r_format.opcode != mm_pool32a_op ||
340 ip->r_format.func != mm_pool32axf_op)
341 return 0;
342 return ((ip->u_format.uimmediate >> 6) & mm_jalr_op) == mm_jalr_op;
343#else
344 if (ip->j_format.opcode == j_op)
345 return 1;
346 if (ip->j_format.opcode == jal_op)
347 return 1;
348 if (ip->r_format.opcode != spec_op)
349 return 0;
350 return ip->r_format.func == jalr_op || ip->r_format.func == jr_op;
351#endif
352}
353
354static inline int is_sp_move_ins(union mips_instruction *ip, int *frame_size)
355{
356#ifdef CONFIG_CPU_MICROMIPS
357 unsigned short tmp;
358
359 /*
360 * addiusp -imm
361 * addius5 sp,-imm
362 * addiu32 sp,sp,-imm
363 * jradiussp - NOT SUPPORTED
364 *
365 * microMIPS is not more fun...
366 */
367 if (mm_insn_16bit(ip->word >> 16)) {
368 if (ip->mm16_r3_format.opcode == mm_pool16d_op &&
369 ip->mm16_r3_format.simmediate & mm_addiusp_func) {
370 tmp = ip->mm_b0_format.simmediate >> 1;
371 tmp = ((tmp & 0x1ff) ^ 0x100) - 0x100;
372 if ((tmp + 2) < 4) /* 0x0,0x1,0x1fe,0x1ff are special */
373 tmp ^= 0x100;
374 *frame_size = -(signed short)(tmp << 2);
375 return 1;
376 }
377 if (ip->mm16_r5_format.opcode == mm_pool16d_op &&
378 ip->mm16_r5_format.rt == 29) {
379 tmp = ip->mm16_r5_format.imm >> 1;
380 *frame_size = -(signed short)(tmp & 0xf);
381 return 1;
382 }
383 return 0;
384 }
385
386 if (ip->mm_i_format.opcode == mm_addiu32_op &&
387 ip->mm_i_format.rt == 29 && ip->mm_i_format.rs == 29) {
388 *frame_size = -ip->i_format.simmediate;
389 return 1;
390 }
391#else
392 /* addiu/daddiu sp,sp,-imm */
393 if (ip->i_format.rs != 29 || ip->i_format.rt != 29)
394 return 0;
395
396 if (ip->i_format.opcode == addiu_op ||
397 ip->i_format.opcode == daddiu_op) {
398 *frame_size = -ip->i_format.simmediate;
399 return 1;
400 }
401#endif
402 return 0;
403}
404
405static int get_frame_info(struct mips_frame_info *info)
406{
407 bool is_mmips = IS_ENABLED(CONFIG_CPU_MICROMIPS);
408 union mips_instruction insn, *ip, *ip_end;
409 unsigned int last_insn_size = 0;
410 bool saw_jump = false;
411
412 info->pc_offset = -1;
413 info->frame_size = 0;
414
415 ip = (void *)msk_isa16_mode((ulong)info->func);
416 if (!ip)
417 goto err;
418
419 ip_end = (void *)ip + (info->func_size ? info->func_size : 512);
420
421 while (ip < ip_end) {
422 ip = (void *)ip + last_insn_size;
423
424 if (is_mmips && mm_insn_16bit(ip->halfword[0])) {
425 insn.word = ip->halfword[0] << 16;
426 last_insn_size = 2;
427 } else if (is_mmips) {
428 insn.word = ip->halfword[0] << 16 | ip->halfword[1];
429 last_insn_size = 4;
430 } else {
431 insn.word = ip->word;
432 last_insn_size = 4;
433 }
434
435 if (is_jr_ra_ins(ip)) {
436 break;
437 } else if (!info->frame_size) {
438 is_sp_move_ins(&insn, &info->frame_size);
439 continue;
440 } else if (!saw_jump && is_jump_ins(ip)) {
441 /*
442 * If we see a jump instruction, we are finished
443 * with the frame save.
444 *
445 * Some functions can have a shortcut return at
446 * the beginning of the function, so don't start
447 * looking for jump instruction until we see the
448 * frame setup.
449 *
450 * The RA save instruction can get put into the
451 * delay slot of the jump instruction, so look
452 * at the next instruction, too.
453 */
454 saw_jump = true;
455 continue;
456 }
457 if (info->pc_offset == -1 &&
458 is_ra_save_ins(&insn, &info->pc_offset))
459 break;
460 if (saw_jump)
461 break;
462 }
463 if (info->frame_size && info->pc_offset >= 0) /* nested */
464 return 0;
465 if (info->pc_offset < 0) /* leaf */
466 return 1;
467 /* prologue seems bogus... */
468err:
469 return -1;
470}
471
472static struct mips_frame_info schedule_mfi __read_mostly;
473
474#ifdef CONFIG_KALLSYMS
475static unsigned long get___schedule_addr(void)
476{
477 return kallsyms_lookup_name("__schedule");
478}
479#else
480static unsigned long get___schedule_addr(void)
481{
482 union mips_instruction *ip = (void *)schedule;
483 int max_insns = 8;
484 int i;
485
486 for (i = 0; i < max_insns; i++, ip++) {
487 if (ip->j_format.opcode == j_op)
488 return J_TARGET(ip, ip->j_format.target);
489 }
490 return 0;
491}
492#endif
493
494static int __init frame_info_init(void)
495{
496 unsigned long size = 0;
497#ifdef CONFIG_KALLSYMS
498 unsigned long ofs;
499#endif
500 unsigned long addr;
501
502 addr = get___schedule_addr();
503 if (!addr)
504 addr = (unsigned long)schedule;
505
506#ifdef CONFIG_KALLSYMS
507 kallsyms_lookup_size_offset(addr, &size, &ofs);
508#endif
509 schedule_mfi.func = (void *)addr;
510 schedule_mfi.func_size = size;
511
512 get_frame_info(&schedule_mfi);
513
514 /*
515 * Without schedule() frame info, result given by
516 * thread_saved_pc() and __get_wchan() are not reliable.
517 */
518 if (schedule_mfi.pc_offset < 0)
519 printk("Can't analyze schedule() prologue at %p\n", schedule);
520
521 return 0;
522}
523
524arch_initcall(frame_info_init);
525
526/*
527 * Return saved PC of a blocked thread.
528 */
529static unsigned long thread_saved_pc(struct task_struct *tsk)
530{
531 struct thread_struct *t = &tsk->thread;
532
533 /* New born processes are a special case */
534 if (t->reg31 == (unsigned long) ret_from_fork)
535 return t->reg31;
536 if (schedule_mfi.pc_offset < 0)
537 return 0;
538 return ((unsigned long *)t->reg29)[schedule_mfi.pc_offset];
539}
540
541
542#ifdef CONFIG_KALLSYMS
543/* generic stack unwinding function */
544unsigned long notrace unwind_stack_by_address(unsigned long stack_page,
545 unsigned long *sp,
546 unsigned long pc,
547 unsigned long *ra)
548{
549 unsigned long low, high, irq_stack_high;
550 struct mips_frame_info info;
551 unsigned long size, ofs;
552 struct pt_regs *regs;
553 int leaf;
554
555 if (!stack_page)
556 return 0;
557
558 /*
559 * IRQ stacks start at IRQ_STACK_START
560 * task stacks at THREAD_SIZE - 32
561 */
562 low = stack_page;
563 if (!preemptible() && on_irq_stack(raw_smp_processor_id(), *sp)) {
564 high = stack_page + IRQ_STACK_START;
565 irq_stack_high = high;
566 } else {
567 high = stack_page + THREAD_SIZE - 32;
568 irq_stack_high = 0;
569 }
570
571 /*
572 * If we reached the top of the interrupt stack, start unwinding
573 * the interrupted task stack.
574 */
575 if (unlikely(*sp == irq_stack_high)) {
576 unsigned long task_sp = *(unsigned long *)*sp;
577
578 /*
579 * Check that the pointer saved in the IRQ stack head points to
580 * something within the stack of the current task
581 */
582 if (!object_is_on_stack((void *)task_sp))
583 return 0;
584
585 /*
586 * Follow pointer to tasks kernel stack frame where interrupted
587 * state was saved.
588 */
589 regs = (struct pt_regs *)task_sp;
590 pc = regs->cp0_epc;
591 if (!user_mode(regs) && __kernel_text_address(pc)) {
592 *sp = regs->regs[29];
593 *ra = regs->regs[31];
594 return pc;
595 }
596 return 0;
597 }
598 if (!kallsyms_lookup_size_offset(pc, &size, &ofs))
599 return 0;
600 /*
601 * Return ra if an exception occurred at the first instruction
602 */
603 if (unlikely(ofs == 0)) {
604 pc = *ra;
605 *ra = 0;
606 return pc;
607 }
608
609 info.func = (void *)(pc - ofs);
610 info.func_size = ofs; /* analyze from start to ofs */
611 leaf = get_frame_info(&info);
612 if (leaf < 0)
613 return 0;
614
615 if (*sp < low || *sp + info.frame_size > high)
616 return 0;
617
618 if (leaf)
619 /*
620 * For some extreme cases, get_frame_info() can
621 * consider wrongly a nested function as a leaf
622 * one. In that cases avoid to return always the
623 * same value.
624 */
625 pc = pc != *ra ? *ra : 0;
626 else
627 pc = ((unsigned long *)(*sp))[info.pc_offset];
628
629 *sp += info.frame_size;
630 *ra = 0;
631 return __kernel_text_address(pc) ? pc : 0;
632}
633EXPORT_SYMBOL(unwind_stack_by_address);
634
635/* used by show_backtrace() */
636unsigned long unwind_stack(struct task_struct *task, unsigned long *sp,
637 unsigned long pc, unsigned long *ra)
638{
639 unsigned long stack_page = 0;
640 int cpu;
641
642 for_each_possible_cpu(cpu) {
643 if (on_irq_stack(cpu, *sp)) {
644 stack_page = (unsigned long)irq_stack[cpu];
645 break;
646 }
647 }
648
649 if (!stack_page)
650 stack_page = (unsigned long)task_stack_page(task);
651
652 return unwind_stack_by_address(stack_page, sp, pc, ra);
653}
654#endif
655
656/*
657 * __get_wchan - a maintenance nightmare^W^Wpain in the ass ...
658 */
659unsigned long __get_wchan(struct task_struct *task)
660{
661 unsigned long pc = 0;
662#ifdef CONFIG_KALLSYMS
663 unsigned long sp;
664 unsigned long ra = 0;
665#endif
666
667 if (!task_stack_page(task))
668 goto out;
669
670 pc = thread_saved_pc(task);
671
672#ifdef CONFIG_KALLSYMS
673 sp = task->thread.reg29 + schedule_mfi.frame_size;
674
675 while (in_sched_functions(pc))
676 pc = unwind_stack(task, &sp, pc, &ra);
677#endif
678
679out:
680 return pc;
681}
682
683unsigned long mips_stack_top(void)
684{
685 unsigned long top = TASK_SIZE & PAGE_MASK;
686
687 if (IS_ENABLED(CONFIG_MIPS_FP_SUPPORT)) {
688 /* One page for branch delay slot "emulation" */
689 top -= PAGE_SIZE;
690 }
691
692 /* Space for the VDSO, data page & GIC user page */
693 top -= PAGE_ALIGN(current->thread.abi->vdso->size);
694 top -= PAGE_SIZE;
695 top -= mips_gic_present() ? PAGE_SIZE : 0;
696
697 /* Space for cache colour alignment */
698 if (cpu_has_dc_aliases)
699 top -= shm_align_mask + 1;
700
701 /* Space to randomize the VDSO base */
702 if (current->flags & PF_RANDOMIZE)
703 top -= VDSO_RANDOMIZE_SIZE;
704
705 return top;
706}
707
708/*
709 * Don't forget that the stack pointer must be aligned on a 8 bytes
710 * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
711 */
712unsigned long arch_align_stack(unsigned long sp)
713{
714 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
715 sp -= get_random_u32_below(PAGE_SIZE);
716
717 return sp & ALMASK;
718}
719
720static struct cpumask backtrace_csd_busy;
721
722static void handle_backtrace(void *info)
723{
724 nmi_cpu_backtrace(get_irq_regs());
725 cpumask_clear_cpu(smp_processor_id(), &backtrace_csd_busy);
726}
727
728static DEFINE_PER_CPU(call_single_data_t, backtrace_csd) =
729 CSD_INIT(handle_backtrace, NULL);
730
731static void raise_backtrace(cpumask_t *mask)
732{
733 call_single_data_t *csd;
734 int cpu;
735
736 for_each_cpu(cpu, mask) {
737 /*
738 * If we previously sent an IPI to the target CPU & it hasn't
739 * cleared its bit in the busy cpumask then it didn't handle
740 * our previous IPI & it's not safe for us to reuse the
741 * call_single_data_t.
742 */
743 if (cpumask_test_and_set_cpu(cpu, &backtrace_csd_busy)) {
744 pr_warn("Unable to send backtrace IPI to CPU%u - perhaps it hung?\n",
745 cpu);
746 continue;
747 }
748
749 csd = &per_cpu(backtrace_csd, cpu);
750 smp_call_function_single_async(cpu, csd);
751 }
752}
753
754void arch_trigger_cpumask_backtrace(const cpumask_t *mask, int exclude_cpu)
755{
756 nmi_trigger_cpumask_backtrace(mask, exclude_cpu, raise_backtrace);
757}
758
759int mips_get_process_fp_mode(struct task_struct *task)
760{
761 int value = 0;
762
763 if (!test_tsk_thread_flag(task, TIF_32BIT_FPREGS))
764 value |= PR_FP_MODE_FR;
765 if (test_tsk_thread_flag(task, TIF_HYBRID_FPREGS))
766 value |= PR_FP_MODE_FRE;
767
768 return value;
769}
770
771static long prepare_for_fp_mode_switch(void *unused)
772{
773 /*
774 * This is icky, but we use this to simply ensure that all CPUs have
775 * context switched, regardless of whether they were previously running
776 * kernel or user code. This ensures that no CPU that a mode-switching
777 * program may execute on keeps its FPU enabled (& in the old mode)
778 * throughout the mode switch.
779 */
780 return 0;
781}
782
783int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
784{
785 const unsigned int known_bits = PR_FP_MODE_FR | PR_FP_MODE_FRE;
786 struct task_struct *t;
787 struct cpumask process_cpus;
788 int cpu;
789
790 /* If nothing to change, return right away, successfully. */
791 if (value == mips_get_process_fp_mode(task))
792 return 0;
793
794 /* Only accept a mode change if 64-bit FP enabled for o32. */
795 if (!IS_ENABLED(CONFIG_MIPS_O32_FP64_SUPPORT))
796 return -EOPNOTSUPP;
797
798 /* And only for o32 tasks. */
799 if (IS_ENABLED(CONFIG_64BIT) && !test_thread_flag(TIF_32BIT_REGS))
800 return -EOPNOTSUPP;
801
802 /* Check the value is valid */
803 if (value & ~known_bits)
804 return -EOPNOTSUPP;
805
806 /* Setting FRE without FR is not supported. */
807 if ((value & (PR_FP_MODE_FR | PR_FP_MODE_FRE)) == PR_FP_MODE_FRE)
808 return -EOPNOTSUPP;
809
810 /* Avoid inadvertently triggering emulation */
811 if ((value & PR_FP_MODE_FR) && raw_cpu_has_fpu &&
812 !(raw_current_cpu_data.fpu_id & MIPS_FPIR_F64))
813 return -EOPNOTSUPP;
814 if ((value & PR_FP_MODE_FRE) && raw_cpu_has_fpu && !cpu_has_fre)
815 return -EOPNOTSUPP;
816
817 /* FR = 0 not supported in MIPS R6 */
818 if (!(value & PR_FP_MODE_FR) && raw_cpu_has_fpu && cpu_has_mips_r6)
819 return -EOPNOTSUPP;
820
821 /* Indicate the new FP mode in each thread */
822 for_each_thread(task, t) {
823 /* Update desired FP register width */
824 if (value & PR_FP_MODE_FR) {
825 clear_tsk_thread_flag(t, TIF_32BIT_FPREGS);
826 } else {
827 set_tsk_thread_flag(t, TIF_32BIT_FPREGS);
828 clear_tsk_thread_flag(t, TIF_MSA_CTX_LIVE);
829 }
830
831 /* Update desired FP single layout */
832 if (value & PR_FP_MODE_FRE)
833 set_tsk_thread_flag(t, TIF_HYBRID_FPREGS);
834 else
835 clear_tsk_thread_flag(t, TIF_HYBRID_FPREGS);
836 }
837
838 /*
839 * We need to ensure that all threads in the process have switched mode
840 * before returning, in order to allow userland to not worry about
841 * races. We can do this by forcing all CPUs that any thread in the
842 * process may be running on to schedule something else - in this case
843 * prepare_for_fp_mode_switch().
844 *
845 * We begin by generating a mask of all CPUs that any thread in the
846 * process may be running on.
847 */
848 cpumask_clear(&process_cpus);
849 for_each_thread(task, t)
850 cpumask_set_cpu(task_cpu(t), &process_cpus);
851
852 /*
853 * Now we schedule prepare_for_fp_mode_switch() on each of those CPUs.
854 *
855 * The CPUs may have rescheduled already since we switched mode or
856 * generated the cpumask, but that doesn't matter. If the task in this
857 * process is scheduled out then our scheduling
858 * prepare_for_fp_mode_switch() will simply be redundant. If it's
859 * scheduled in then it will already have picked up the new FP mode
860 * whilst doing so.
861 */
862 cpus_read_lock();
863 for_each_cpu_and(cpu, &process_cpus, cpu_online_mask)
864 work_on_cpu(cpu, prepare_for_fp_mode_switch, NULL);
865 cpus_read_unlock();
866
867 return 0;
868}
869
870#if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
871void mips_dump_regs32(u32 *uregs, const struct pt_regs *regs)
872{
873 unsigned int i;
874
875 for (i = MIPS32_EF_R1; i <= MIPS32_EF_R31; i++) {
876 /* k0/k1 are copied as zero. */
877 if (i == MIPS32_EF_R26 || i == MIPS32_EF_R27)
878 uregs[i] = 0;
879 else
880 uregs[i] = regs->regs[i - MIPS32_EF_R0];
881 }
882
883 uregs[MIPS32_EF_LO] = regs->lo;
884 uregs[MIPS32_EF_HI] = regs->hi;
885 uregs[MIPS32_EF_CP0_EPC] = regs->cp0_epc;
886 uregs[MIPS32_EF_CP0_BADVADDR] = regs->cp0_badvaddr;
887 uregs[MIPS32_EF_CP0_STATUS] = regs->cp0_status;
888 uregs[MIPS32_EF_CP0_CAUSE] = regs->cp0_cause;
889}
890#endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
891
892#ifdef CONFIG_64BIT
893void mips_dump_regs64(u64 *uregs, const struct pt_regs *regs)
894{
895 unsigned int i;
896
897 for (i = MIPS64_EF_R1; i <= MIPS64_EF_R31; i++) {
898 /* k0/k1 are copied as zero. */
899 if (i == MIPS64_EF_R26 || i == MIPS64_EF_R27)
900 uregs[i] = 0;
901 else
902 uregs[i] = regs->regs[i - MIPS64_EF_R0];
903 }
904
905 uregs[MIPS64_EF_LO] = regs->lo;
906 uregs[MIPS64_EF_HI] = regs->hi;
907 uregs[MIPS64_EF_CP0_EPC] = regs->cp0_epc;
908 uregs[MIPS64_EF_CP0_BADVADDR] = regs->cp0_badvaddr;
909 uregs[MIPS64_EF_CP0_STATUS] = regs->cp0_status;
910 uregs[MIPS64_EF_CP0_CAUSE] = regs->cp0_cause;
911}
912#endif /* CONFIG_64BIT */