Linux Audio

Check our new training course

Loading...
v6.2
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Copyright (C) 1994 - 1999, 2000 by Ralf Baechle and others.
  7 * Copyright (C) 2005, 2006 by Ralf Baechle (ralf@linux-mips.org)
  8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
  9 * Copyright (C) 2004 Thiemo Seufer
 10 * Copyright (C) 2013  Imagination Technologies Ltd.
 11 */
 12#include <linux/cpu.h>
 13#include <linux/errno.h>
 14#include <linux/init.h>
 15#include <linux/kallsyms.h>
 16#include <linux/kernel.h>
 17#include <linux/nmi.h>
 
 
 
 
 
 18#include <linux/personality.h>
 19#include <linux/prctl.h>
 
 
 
 20#include <linux/random.h>
 21#include <linux/sched.h>
 22#include <linux/sched/debug.h>
 23#include <linux/sched/task_stack.h>
 24
 25#include <asm/abi.h>
 26#include <asm/asm.h>
 27#include <asm/dsemul.h>
 
 28#include <asm/dsp.h>
 29#include <asm/exec.h>
 30#include <asm/fpu.h>
 31#include <asm/inst.h>
 32#include <asm/irq.h>
 33#include <asm/irq_regs.h>
 34#include <asm/isadep.h>
 35#include <asm/msa.h>
 36#include <asm/mips-cps.h>
 37#include <asm/mipsregs.h>
 38#include <asm/processor.h>
 39#include <asm/reg.h>
 
 
 
 
 
 40#include <asm/stacktrace.h>
 
 41
 42#ifdef CONFIG_HOTPLUG_CPU
 43void arch_cpu_idle_dead(void)
 44{
 45	play_dead();
 
 
 46}
 47#endif
 48
 49asmlinkage void ret_from_fork(void);
 50asmlinkage void ret_from_kernel_thread(void);
 51
 52void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp)
 53{
 54	unsigned long status;
 55
 56	/* New thread loses kernel privileges. */
 57	status = regs->cp0_status & ~(ST0_CU0|ST0_CU1|ST0_CU2|ST0_FR|KU_MASK);
 58	status |= KU_USER;
 59	regs->cp0_status = status;
 60	lose_fpu(0);
 61	clear_thread_flag(TIF_MSA_CTX_LIVE);
 62	clear_used_math();
 63#ifdef CONFIG_MIPS_FP_SUPPORT
 64	atomic_set(&current->thread.bd_emu_frame, BD_EMUFRAME_NONE);
 65#endif
 66	init_dsp();
 67	regs->cp0_epc = pc;
 68	regs->regs[29] = sp;
 69}
 70
 71void exit_thread(struct task_struct *tsk)
 
 
 
 
 72{
 73	/*
 74	 * User threads may have allocated a delay slot emulation frame.
 75	 * If so, clean up that allocation.
 76	 */
 77	if (!(current->flags & PF_KTHREAD))
 78		dsemul_thread_cleanup(tsk);
 79}
 80
 81int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
 82{
 83	/*
 84	 * Save any process state which is live in hardware registers to the
 85	 * parent context prior to duplication. This prevents the new child
 86	 * state becoming stale if the parent is preempted before copy_thread()
 87	 * gets a chance to save the parent's live hardware registers to the
 88	 * child context.
 89	 */
 90	preempt_disable();
 91
 92	if (is_msa_enabled())
 93		save_msa(current);
 94	else if (is_fpu_owner())
 95		_save_fp(current);
 96
 97	save_dsp(current);
 98
 99	preempt_enable();
100
101	*dst = *src;
102	return 0;
103}
104
105/*
106 * Copy architecture-specific thread state
107 */
108int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
 
109{
110	unsigned long clone_flags = args->flags;
111	unsigned long usp = args->stack;
112	unsigned long tls = args->tls;
113	struct thread_info *ti = task_thread_info(p);
114	struct pt_regs *childregs, *regs = current_pt_regs();
115	unsigned long childksp;
 
116
117	childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32;
118
119	/* set up new TSS. */
120	childregs = (struct pt_regs *) childksp - 1;
121	/*  Put the stack after the struct pt_regs.  */
122	childksp = (unsigned long) childregs;
123	p->thread.cp0_status = (read_c0_status() & ~(ST0_CU2|ST0_CU1)) | ST0_KERNEL_CUMASK;
124	if (unlikely(args->fn)) {
125		/* kernel thread */
126		unsigned long status = p->thread.cp0_status;
127		memset(childregs, 0, sizeof(struct pt_regs));
128		p->thread.reg16 = (unsigned long)args->fn;
129		p->thread.reg17 = (unsigned long)args->fn_arg;
 
130		p->thread.reg29 = childksp;
131		p->thread.reg31 = (unsigned long) ret_from_kernel_thread;
132#if defined(CONFIG_CPU_R3000)
133		status = (status & ~(ST0_KUP | ST0_IEP | ST0_IEC)) |
134			 ((status & (ST0_KUC | ST0_IEC)) << 2);
135#else
136		status |= ST0_EXL;
137#endif
138		childregs->cp0_status = status;
139		return 0;
140	}
141
142	/* user thread */
143	*childregs = *regs;
144	childregs->regs[7] = 0; /* Clear error flag */
145	childregs->regs[2] = 0; /* Child gets zero as return value */
146	if (usp)
147		childregs->regs[29] = usp;
 
148
149	p->thread.reg29 = (unsigned long) childregs;
150	p->thread.reg31 = (unsigned long) ret_from_fork;
151
152	/*
153	 * New tasks lose permission to use the fpu. This accelerates context
154	 * switching for most programs since they don't use the fpu.
155	 */
156	childregs->cp0_status &= ~(ST0_CU2|ST0_CU1);
157
158	clear_tsk_thread_flag(p, TIF_USEDFPU);
159	clear_tsk_thread_flag(p, TIF_USEDMSA);
160	clear_tsk_thread_flag(p, TIF_MSA_CTX_LIVE);
161
162#ifdef CONFIG_MIPS_MT_FPAFF
163	clear_tsk_thread_flag(p, TIF_FPUBOUND);
164#endif /* CONFIG_MIPS_MT_FPAFF */
165
166#ifdef CONFIG_MIPS_FP_SUPPORT
167	atomic_set(&p->thread.bd_emu_frame, BD_EMUFRAME_NONE);
168#endif
169
170	if (clone_flags & CLONE_SETTLS)
171		ti->tp_value = tls;
172
173	return 0;
174}
175
176#ifdef CONFIG_STACKPROTECTOR
177#include <linux/stackprotector.h>
178unsigned long __stack_chk_guard __read_mostly;
179EXPORT_SYMBOL(__stack_chk_guard);
180#endif
181
182struct mips_frame_info {
183	void		*func;
184	unsigned long	func_size;
185	int		frame_size;
186	int		pc_offset;
187};
188
189#define J_TARGET(pc,target)	\
190		(((unsigned long)(pc) & 0xf0000000) | ((target) << 2))
191
192static inline int is_jr_ra_ins(union mips_instruction *ip)
193{
194#ifdef CONFIG_CPU_MICROMIPS
195	/*
196	 * jr16 ra
197	 * jr ra
198	 */
199	if (mm_insn_16bit(ip->word >> 16)) {
200		if (ip->mm16_r5_format.opcode == mm_pool16c_op &&
201		    ip->mm16_r5_format.rt == mm_jr16_op &&
202		    ip->mm16_r5_format.imm == 31)
203			return 1;
204		return 0;
205	}
206
207	if (ip->r_format.opcode == mm_pool32a_op &&
208	    ip->r_format.func == mm_pool32axf_op &&
209	    ((ip->u_format.uimmediate >> 6) & GENMASK(9, 0)) == mm_jalr_op &&
210	    ip->r_format.rt == 31)
211		return 1;
212	return 0;
213#else
214	if (ip->r_format.opcode == spec_op &&
215	    ip->r_format.func == jr_op &&
216	    ip->r_format.rs == 31)
217		return 1;
218	return 0;
219#endif
220}
221
222static inline int is_ra_save_ins(union mips_instruction *ip, int *poff)
223{
224#ifdef CONFIG_CPU_MICROMIPS
225	/*
226	 * swsp ra,offset
227	 * swm16 reglist,offset(sp)
228	 * swm32 reglist,offset(sp)
229	 * sw32 ra,offset(sp)
230	 * jradiussp - NOT SUPPORTED
231	 *
232	 * microMIPS is way more fun...
233	 */
234	if (mm_insn_16bit(ip->word >> 16)) {
235		switch (ip->mm16_r5_format.opcode) {
236		case mm_swsp16_op:
237			if (ip->mm16_r5_format.rt != 31)
238				return 0;
239
240			*poff = ip->mm16_r5_format.imm;
241			*poff = (*poff << 2) / sizeof(ulong);
242			return 1;
243
244		case mm_pool16c_op:
245			switch (ip->mm16_m_format.func) {
246			case mm_swm16_op:
247				*poff = ip->mm16_m_format.imm;
248				*poff += 1 + ip->mm16_m_format.rlist;
249				*poff = (*poff << 2) / sizeof(ulong);
250				return 1;
251
252			default:
253				return 0;
254			}
255
256		default:
257			return 0;
258		}
259	}
260
261	switch (ip->i_format.opcode) {
262	case mm_sw32_op:
263		if (ip->i_format.rs != 29)
264			return 0;
265		if (ip->i_format.rt != 31)
266			return 0;
267
268		*poff = ip->i_format.simmediate / sizeof(ulong);
269		return 1;
270
271	case mm_pool32b_op:
272		switch (ip->mm_m_format.func) {
273		case mm_swm32_func:
274			if (ip->mm_m_format.rd < 0x10)
275				return 0;
276			if (ip->mm_m_format.base != 29)
277				return 0;
278
279			*poff = ip->mm_m_format.simmediate;
280			*poff += (ip->mm_m_format.rd & 0xf) * sizeof(u32);
281			*poff /= sizeof(ulong);
282			return 1;
283		default:
284			return 0;
285		}
286
287	default:
288		return 0;
289	}
290#else
291	/* sw / sd $ra, offset($sp) */
292	if ((ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op) &&
293		ip->i_format.rs == 29 && ip->i_format.rt == 31) {
294		*poff = ip->i_format.simmediate / sizeof(ulong);
295		return 1;
296	}
297#ifdef CONFIG_CPU_LOONGSON64
298	if ((ip->loongson3_lswc2_format.opcode == swc2_op) &&
299		      (ip->loongson3_lswc2_format.ls == 1) &&
300		      (ip->loongson3_lswc2_format.fr == 0) &&
301		      (ip->loongson3_lswc2_format.base == 29)) {
302		if (ip->loongson3_lswc2_format.rt == 31) {
303			*poff = ip->loongson3_lswc2_format.offset << 1;
304			return 1;
305		}
306		if (ip->loongson3_lswc2_format.rq == 31) {
307			*poff = (ip->loongson3_lswc2_format.offset << 1) + 1;
308			return 1;
309		}
310	}
311#endif
312	return 0;
313#endif
314}
315
316static inline int is_jump_ins(union mips_instruction *ip)
317{
318#ifdef CONFIG_CPU_MICROMIPS
319	/*
320	 * jr16,jrc,jalr16,jalr16
321	 * jal
322	 * jalr/jr,jalr.hb/jr.hb,jalrs,jalrs.hb
323	 * jraddiusp - NOT SUPPORTED
324	 *
325	 * microMIPS is kind of more fun...
326	 */
327	if (mm_insn_16bit(ip->word >> 16)) {
328		if ((ip->mm16_r5_format.opcode == mm_pool16c_op &&
329		    (ip->mm16_r5_format.rt & mm_jr16_op) == mm_jr16_op))
330			return 1;
331		return 0;
332	}
333
334	if (ip->j_format.opcode == mm_j32_op)
335		return 1;
336	if (ip->j_format.opcode == mm_jal32_op)
 
 
337		return 1;
338	if (ip->r_format.opcode != mm_pool32a_op ||
339			ip->r_format.func != mm_pool32axf_op)
340		return 0;
341	return ((ip->u_format.uimmediate >> 6) & mm_jalr_op) == mm_jalr_op;
342#else
343	if (ip->j_format.opcode == j_op)
344		return 1;
345	if (ip->j_format.opcode == jal_op)
346		return 1;
347	if (ip->r_format.opcode != spec_op)
348		return 0;
349	return ip->r_format.func == jalr_op || ip->r_format.func == jr_op;
350#endif
351}
352
353static inline int is_sp_move_ins(union mips_instruction *ip, int *frame_size)
354{
355#ifdef CONFIG_CPU_MICROMIPS
356	unsigned short tmp;
357
358	/*
359	 * addiusp -imm
360	 * addius5 sp,-imm
361	 * addiu32 sp,sp,-imm
362	 * jradiussp - NOT SUPPORTED
363	 *
364	 * microMIPS is not more fun...
365	 */
366	if (mm_insn_16bit(ip->word >> 16)) {
367		if (ip->mm16_r3_format.opcode == mm_pool16d_op &&
368		    ip->mm16_r3_format.simmediate & mm_addiusp_func) {
369			tmp = ip->mm_b0_format.simmediate >> 1;
370			tmp = ((tmp & 0x1ff) ^ 0x100) - 0x100;
371			if ((tmp + 2) < 4) /* 0x0,0x1,0x1fe,0x1ff are special */
372				tmp ^= 0x100;
373			*frame_size = -(signed short)(tmp << 2);
374			return 1;
375		}
376		if (ip->mm16_r5_format.opcode == mm_pool16d_op &&
377		    ip->mm16_r5_format.rt == 29) {
378			tmp = ip->mm16_r5_format.imm >> 1;
379			*frame_size = -(signed short)(tmp & 0xf);
380			return 1;
381		}
382		return 0;
383	}
384
385	if (ip->mm_i_format.opcode == mm_addiu32_op &&
386	    ip->mm_i_format.rt == 29 && ip->mm_i_format.rs == 29) {
387		*frame_size = -ip->i_format.simmediate;
388		return 1;
 
389	}
 
 
390#else
391	/* addiu/daddiu sp,sp,-imm */
392	if (ip->i_format.rs != 29 || ip->i_format.rt != 29)
393		return 0;
394
395	if (ip->i_format.opcode == addiu_op ||
396	    ip->i_format.opcode == daddiu_op) {
397		*frame_size = -ip->i_format.simmediate;
398		return 1;
399	}
400#endif
401	return 0;
402}
403
404static int get_frame_info(struct mips_frame_info *info)
405{
406	bool is_mmips = IS_ENABLED(CONFIG_CPU_MICROMIPS);
407	union mips_instruction insn, *ip, *ip_end;
408	unsigned int last_insn_size = 0;
409	bool saw_jump = false;
 
 
 
410
411	info->pc_offset = -1;
412	info->frame_size = 0;
413
414	ip = (void *)msk_isa16_mode((ulong)info->func);
415	if (!ip)
416		goto err;
417
418	ip_end = (void *)ip + (info->func_size ? info->func_size : 512);
419
420	while (ip < ip_end) {
421		ip = (void *)ip + last_insn_size;
422
423		if (is_mmips && mm_insn_16bit(ip->halfword[0])) {
424			insn.word = ip->halfword[0] << 16;
425			last_insn_size = 2;
426		} else if (is_mmips) {
427			insn.word = ip->halfword[0] << 16 | ip->halfword[1];
428			last_insn_size = 4;
429		} else {
430			insn.word = ip->word;
431			last_insn_size = 4;
432		}
433
434		if (is_jr_ra_ins(ip)) {
435			break;
436		} else if (!info->frame_size) {
437			is_sp_move_ins(&insn, &info->frame_size);
438			continue;
439		} else if (!saw_jump && is_jump_ins(ip)) {
440			/*
441			 * If we see a jump instruction, we are finished
442			 * with the frame save.
443			 *
444			 * Some functions can have a shortcut return at
445			 * the beginning of the function, so don't start
446			 * looking for jump instruction until we see the
447			 * frame setup.
448			 *
449			 * The RA save instruction can get put into the
450			 * delay slot of the jump instruction, so look
451			 * at the next instruction, too.
452			 */
453			saw_jump = true;
 
 
 
 
454			continue;
455		}
456		if (info->pc_offset == -1 &&
457		    is_ra_save_ins(&insn, &info->pc_offset))
458			break;
459		if (saw_jump)
460			break;
 
461	}
462	if (info->frame_size && info->pc_offset >= 0) /* nested */
463		return 0;
464	if (info->pc_offset < 0) /* leaf */
465		return 1;
466	/* prologue seems bogus... */
467err:
468	return -1;
469}
470
471static struct mips_frame_info schedule_mfi __read_mostly;
472
473#ifdef CONFIG_KALLSYMS
474static unsigned long get___schedule_addr(void)
475{
476	return kallsyms_lookup_name("__schedule");
477}
478#else
479static unsigned long get___schedule_addr(void)
480{
481	union mips_instruction *ip = (void *)schedule;
482	int max_insns = 8;
483	int i;
484
485	for (i = 0; i < max_insns; i++, ip++) {
486		if (ip->j_format.opcode == j_op)
487			return J_TARGET(ip, ip->j_format.target);
488	}
489	return 0;
490}
491#endif
492
493static int __init frame_info_init(void)
494{
495	unsigned long size = 0;
496#ifdef CONFIG_KALLSYMS
497	unsigned long ofs;
498#endif
499	unsigned long addr;
500
501	addr = get___schedule_addr();
502	if (!addr)
503		addr = (unsigned long)schedule;
504
505#ifdef CONFIG_KALLSYMS
506	kallsyms_lookup_size_offset(addr, &size, &ofs);
507#endif
508	schedule_mfi.func = (void *)addr;
509	schedule_mfi.func_size = size;
510
511	get_frame_info(&schedule_mfi);
512
513	/*
514	 * Without schedule() frame info, result given by
515	 * thread_saved_pc() and __get_wchan() are not reliable.
516	 */
517	if (schedule_mfi.pc_offset < 0)
518		printk("Can't analyze schedule() prologue at %p\n", schedule);
519
520	return 0;
521}
522
523arch_initcall(frame_info_init);
524
525/*
526 * Return saved PC of a blocked thread.
527 */
528static unsigned long thread_saved_pc(struct task_struct *tsk)
529{
530	struct thread_struct *t = &tsk->thread;
531
532	/* New born processes are a special case */
533	if (t->reg31 == (unsigned long) ret_from_fork)
534		return t->reg31;
535	if (schedule_mfi.pc_offset < 0)
536		return 0;
537	return ((unsigned long *)t->reg29)[schedule_mfi.pc_offset];
538}
539
540
541#ifdef CONFIG_KALLSYMS
542/* generic stack unwinding function */
543unsigned long notrace unwind_stack_by_address(unsigned long stack_page,
544					      unsigned long *sp,
545					      unsigned long pc,
546					      unsigned long *ra)
547{
548	unsigned long low, high, irq_stack_high;
549	struct mips_frame_info info;
550	unsigned long size, ofs;
551	struct pt_regs *regs;
552	int leaf;
 
 
553
554	if (!stack_page)
555		return 0;
556
557	/*
558	 * IRQ stacks start at IRQ_STACK_START
559	 * task stacks at THREAD_SIZE - 32
560	 */
561	low = stack_page;
562	if (!preemptible() && on_irq_stack(raw_smp_processor_id(), *sp)) {
563		high = stack_page + IRQ_STACK_START;
564		irq_stack_high = high;
565	} else {
566		high = stack_page + THREAD_SIZE - 32;
567		irq_stack_high = 0;
568	}
569
570	/*
571	 * If we reached the top of the interrupt stack, start unwinding
572	 * the interrupted task stack.
573	 */
574	if (unlikely(*sp == irq_stack_high)) {
575		unsigned long task_sp = *(unsigned long *)*sp;
576
577		/*
578		 * Check that the pointer saved in the IRQ stack head points to
579		 * something within the stack of the current task
580		 */
581		if (!object_is_on_stack((void *)task_sp))
582			return 0;
583
584		/*
585		 * Follow pointer to tasks kernel stack frame where interrupted
586		 * state was saved.
587		 */
588		regs = (struct pt_regs *)task_sp;
589		pc = regs->cp0_epc;
590		if (!user_mode(regs) && __kernel_text_address(pc)) {
591			*sp = regs->regs[29];
592			*ra = regs->regs[31];
593			return pc;
594		}
595		return 0;
596	}
597	if (!kallsyms_lookup_size_offset(pc, &size, &ofs))
598		return 0;
599	/*
600	 * Return ra if an exception occurred at the first instruction
601	 */
602	if (unlikely(ofs == 0)) {
603		pc = *ra;
604		*ra = 0;
605		return pc;
606	}
607
608	info.func = (void *)(pc - ofs);
609	info.func_size = ofs;	/* analyze from start to ofs */
610	leaf = get_frame_info(&info);
611	if (leaf < 0)
612		return 0;
613
614	if (*sp < low || *sp + info.frame_size > high)
 
615		return 0;
616
617	if (leaf)
618		/*
619		 * For some extreme cases, get_frame_info() can
620		 * consider wrongly a nested function as a leaf
621		 * one. In that cases avoid to return always the
622		 * same value.
623		 */
624		pc = pc != *ra ? *ra : 0;
625	else
626		pc = ((unsigned long *)(*sp))[info.pc_offset];
627
628	*sp += info.frame_size;
629	*ra = 0;
630	return __kernel_text_address(pc) ? pc : 0;
631}
632EXPORT_SYMBOL(unwind_stack_by_address);
633
634/* used by show_backtrace() */
635unsigned long unwind_stack(struct task_struct *task, unsigned long *sp,
636			   unsigned long pc, unsigned long *ra)
637{
638	unsigned long stack_page = 0;
639	int cpu;
640
641	for_each_possible_cpu(cpu) {
642		if (on_irq_stack(cpu, *sp)) {
643			stack_page = (unsigned long)irq_stack[cpu];
644			break;
645		}
646	}
647
648	if (!stack_page)
649		stack_page = (unsigned long)task_stack_page(task);
650
651	return unwind_stack_by_address(stack_page, sp, pc, ra);
652}
653#endif
654
655/*
656 * __get_wchan - a maintenance nightmare^W^Wpain in the ass ...
657 */
658unsigned long __get_wchan(struct task_struct *task)
659{
660	unsigned long pc = 0;
661#ifdef CONFIG_KALLSYMS
662	unsigned long sp;
663	unsigned long ra = 0;
664#endif
665
 
 
666	if (!task_stack_page(task))
667		goto out;
668
669	pc = thread_saved_pc(task);
670
671#ifdef CONFIG_KALLSYMS
672	sp = task->thread.reg29 + schedule_mfi.frame_size;
673
674	while (in_sched_functions(pc))
675		pc = unwind_stack(task, &sp, pc, &ra);
676#endif
677
678out:
679	return pc;
680}
681
682unsigned long mips_stack_top(void)
683{
684	unsigned long top = TASK_SIZE & PAGE_MASK;
685
686	if (IS_ENABLED(CONFIG_MIPS_FP_SUPPORT)) {
687		/* One page for branch delay slot "emulation" */
688		top -= PAGE_SIZE;
689	}
690
691	/* Space for the VDSO, data page & GIC user page */
692	top -= PAGE_ALIGN(current->thread.abi->vdso->size);
693	top -= PAGE_SIZE;
694	top -= mips_gic_present() ? PAGE_SIZE : 0;
695
696	/* Space for cache colour alignment */
697	if (cpu_has_dc_aliases)
698		top -= shm_align_mask + 1;
699
700	/* Space to randomize the VDSO base */
701	if (current->flags & PF_RANDOMIZE)
702		top -= VDSO_RANDOMIZE_SIZE;
703
704	return top;
705}
706
707/*
708 * Don't forget that the stack pointer must be aligned on a 8 bytes
709 * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
710 */
711unsigned long arch_align_stack(unsigned long sp)
712{
713	if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
714		sp -= get_random_u32_below(PAGE_SIZE);
715
716	return sp & ALMASK;
717}
718
719static struct cpumask backtrace_csd_busy;
720
721static void handle_backtrace(void *info)
722{
723	nmi_cpu_backtrace(get_irq_regs());
724	cpumask_clear_cpu(smp_processor_id(), &backtrace_csd_busy);
725}
726
727static DEFINE_PER_CPU(call_single_data_t, backtrace_csd) =
728	CSD_INIT(handle_backtrace, NULL);
729
730static void raise_backtrace(cpumask_t *mask)
731{
732	call_single_data_t *csd;
733	int cpu;
734
735	for_each_cpu(cpu, mask) {
736		/*
737		 * If we previously sent an IPI to the target CPU & it hasn't
738		 * cleared its bit in the busy cpumask then it didn't handle
739		 * our previous IPI & it's not safe for us to reuse the
740		 * call_single_data_t.
741		 */
742		if (cpumask_test_and_set_cpu(cpu, &backtrace_csd_busy)) {
743			pr_warn("Unable to send backtrace IPI to CPU%u - perhaps it hung?\n",
744				cpu);
745			continue;
746		}
747
748		csd = &per_cpu(backtrace_csd, cpu);
749		smp_call_function_single_async(cpu, csd);
750	}
751}
752
753void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
754{
755	nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_backtrace);
756}
757
758int mips_get_process_fp_mode(struct task_struct *task)
759{
760	int value = 0;
761
762	if (!test_tsk_thread_flag(task, TIF_32BIT_FPREGS))
763		value |= PR_FP_MODE_FR;
764	if (test_tsk_thread_flag(task, TIF_HYBRID_FPREGS))
765		value |= PR_FP_MODE_FRE;
766
767	return value;
768}
769
770static long prepare_for_fp_mode_switch(void *unused)
771{
772	/*
773	 * This is icky, but we use this to simply ensure that all CPUs have
774	 * context switched, regardless of whether they were previously running
775	 * kernel or user code. This ensures that no CPU that a mode-switching
776	 * program may execute on keeps its FPU enabled (& in the old mode)
777	 * throughout the mode switch.
778	 */
779	return 0;
780}
781
782int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
783{
784	const unsigned int known_bits = PR_FP_MODE_FR | PR_FP_MODE_FRE;
 
785	struct task_struct *t;
786	struct cpumask process_cpus;
787	int cpu;
788
789	/* If nothing to change, return right away, successfully.  */
790	if (value == mips_get_process_fp_mode(task))
791		return 0;
792
793	/* Only accept a mode change if 64-bit FP enabled for o32.  */
794	if (!IS_ENABLED(CONFIG_MIPS_O32_FP64_SUPPORT))
795		return -EOPNOTSUPP;
796
797	/* And only for o32 tasks.  */
798	if (IS_ENABLED(CONFIG_64BIT) && !test_thread_flag(TIF_32BIT_REGS))
799		return -EOPNOTSUPP;
800
801	/* Check the value is valid */
802	if (value & ~known_bits)
803		return -EOPNOTSUPP;
804
805	/* Setting FRE without FR is not supported.  */
806	if ((value & (PR_FP_MODE_FR | PR_FP_MODE_FRE)) == PR_FP_MODE_FRE)
807		return -EOPNOTSUPP;
808
809	/* Avoid inadvertently triggering emulation */
810	if ((value & PR_FP_MODE_FR) && raw_cpu_has_fpu &&
811	    !(raw_current_cpu_data.fpu_id & MIPS_FPIR_F64))
812		return -EOPNOTSUPP;
813	if ((value & PR_FP_MODE_FRE) && raw_cpu_has_fpu && !cpu_has_fre)
814		return -EOPNOTSUPP;
815
816	/* FR = 0 not supported in MIPS R6 */
817	if (!(value & PR_FP_MODE_FR) && raw_cpu_has_fpu && cpu_has_mips_r6)
818		return -EOPNOTSUPP;
819
820	/* Indicate the new FP mode in each thread */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
821	for_each_thread(task, t) {
822		/* Update desired FP register width */
823		if (value & PR_FP_MODE_FR) {
824			clear_tsk_thread_flag(t, TIF_32BIT_FPREGS);
825		} else {
826			set_tsk_thread_flag(t, TIF_32BIT_FPREGS);
827			clear_tsk_thread_flag(t, TIF_MSA_CTX_LIVE);
828		}
829
830		/* Update desired FP single layout */
831		if (value & PR_FP_MODE_FRE)
832			set_tsk_thread_flag(t, TIF_HYBRID_FPREGS);
833		else
834			clear_tsk_thread_flag(t, TIF_HYBRID_FPREGS);
835	}
836
837	/*
838	 * We need to ensure that all threads in the process have switched mode
839	 * before returning, in order to allow userland to not worry about
840	 * races. We can do this by forcing all CPUs that any thread in the
841	 * process may be running on to schedule something else - in this case
842	 * prepare_for_fp_mode_switch().
843	 *
844	 * We begin by generating a mask of all CPUs that any thread in the
845	 * process may be running on.
846	 */
847	cpumask_clear(&process_cpus);
848	for_each_thread(task, t)
849		cpumask_set_cpu(task_cpu(t), &process_cpus);
850
851	/*
852	 * Now we schedule prepare_for_fp_mode_switch() on each of those CPUs.
853	 *
854	 * The CPUs may have rescheduled already since we switched mode or
855	 * generated the cpumask, but that doesn't matter. If the task in this
856	 * process is scheduled out then our scheduling
857	 * prepare_for_fp_mode_switch() will simply be redundant. If it's
858	 * scheduled in then it will already have picked up the new FP mode
859	 * whilst doing so.
860	 */
861	cpus_read_lock();
862	for_each_cpu_and(cpu, &process_cpus, cpu_online_mask)
863		work_on_cpu(cpu, prepare_for_fp_mode_switch, NULL);
864	cpus_read_unlock();
865
866	return 0;
867}
868
869#if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
870void mips_dump_regs32(u32 *uregs, const struct pt_regs *regs)
871{
872	unsigned int i;
873
874	for (i = MIPS32_EF_R1; i <= MIPS32_EF_R31; i++) {
875		/* k0/k1 are copied as zero. */
876		if (i == MIPS32_EF_R26 || i == MIPS32_EF_R27)
877			uregs[i] = 0;
878		else
879			uregs[i] = regs->regs[i - MIPS32_EF_R0];
880	}
881
882	uregs[MIPS32_EF_LO] = regs->lo;
883	uregs[MIPS32_EF_HI] = regs->hi;
884	uregs[MIPS32_EF_CP0_EPC] = regs->cp0_epc;
885	uregs[MIPS32_EF_CP0_BADVADDR] = regs->cp0_badvaddr;
886	uregs[MIPS32_EF_CP0_STATUS] = regs->cp0_status;
887	uregs[MIPS32_EF_CP0_CAUSE] = regs->cp0_cause;
888}
889#endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
890
891#ifdef CONFIG_64BIT
892void mips_dump_regs64(u64 *uregs, const struct pt_regs *regs)
893{
894	unsigned int i;
895
896	for (i = MIPS64_EF_R1; i <= MIPS64_EF_R31; i++) {
897		/* k0/k1 are copied as zero. */
898		if (i == MIPS64_EF_R26 || i == MIPS64_EF_R27)
899			uregs[i] = 0;
900		else
901			uregs[i] = regs->regs[i - MIPS64_EF_R0];
902	}
903
904	uregs[MIPS64_EF_LO] = regs->lo;
905	uregs[MIPS64_EF_HI] = regs->hi;
906	uregs[MIPS64_EF_CP0_EPC] = regs->cp0_epc;
907	uregs[MIPS64_EF_CP0_BADVADDR] = regs->cp0_badvaddr;
908	uregs[MIPS64_EF_CP0_STATUS] = regs->cp0_status;
909	uregs[MIPS64_EF_CP0_CAUSE] = regs->cp0_cause;
910}
911#endif /* CONFIG_64BIT */
v4.6
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Copyright (C) 1994 - 1999, 2000 by Ralf Baechle and others.
  7 * Copyright (C) 2005, 2006 by Ralf Baechle (ralf@linux-mips.org)
  8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
  9 * Copyright (C) 2004 Thiemo Seufer
 10 * Copyright (C) 2013  Imagination Technologies Ltd.
 11 */
 
 12#include <linux/errno.h>
 13#include <linux/sched.h>
 14#include <linux/tick.h>
 15#include <linux/kernel.h>
 16#include <linux/mm.h>
 17#include <linux/stddef.h>
 18#include <linux/unistd.h>
 19#include <linux/export.h>
 20#include <linux/ptrace.h>
 21#include <linux/mman.h>
 22#include <linux/personality.h>
 23#include <linux/sys.h>
 24#include <linux/init.h>
 25#include <linux/completion.h>
 26#include <linux/kallsyms.h>
 27#include <linux/random.h>
 28#include <linux/prctl.h>
 
 
 29
 
 30#include <asm/asm.h>
 31#include <asm/bootinfo.h>
 32#include <asm/cpu.h>
 33#include <asm/dsp.h>
 
 34#include <asm/fpu.h>
 
 
 
 
 35#include <asm/msa.h>
 36#include <asm/pgtable.h>
 37#include <asm/mipsregs.h>
 38#include <asm/processor.h>
 39#include <asm/reg.h>
 40#include <asm/uaccess.h>
 41#include <asm/io.h>
 42#include <asm/elf.h>
 43#include <asm/isadep.h>
 44#include <asm/inst.h>
 45#include <asm/stacktrace.h>
 46#include <asm/irq_regs.h>
 47
 48#ifdef CONFIG_HOTPLUG_CPU
 49void arch_cpu_idle_dead(void)
 50{
 51	/* What the heck is this check doing ? */
 52	if (!cpumask_test_cpu(smp_processor_id(), &cpu_callin_map))
 53		play_dead();
 54}
 55#endif
 56
 57asmlinkage void ret_from_fork(void);
 58asmlinkage void ret_from_kernel_thread(void);
 59
 60void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp)
 61{
 62	unsigned long status;
 63
 64	/* New thread loses kernel privileges. */
 65	status = regs->cp0_status & ~(ST0_CU0|ST0_CU1|ST0_FR|KU_MASK);
 66	status |= KU_USER;
 67	regs->cp0_status = status;
 68	lose_fpu(0);
 69	clear_thread_flag(TIF_MSA_CTX_LIVE);
 70	clear_used_math();
 
 
 
 71	init_dsp();
 72	regs->cp0_epc = pc;
 73	regs->regs[29] = sp;
 74}
 75
 76void exit_thread(void)
 77{
 78}
 79
 80void flush_thread(void)
 81{
 
 
 
 
 
 
 82}
 83
 84int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
 85{
 86	/*
 87	 * Save any process state which is live in hardware registers to the
 88	 * parent context prior to duplication. This prevents the new child
 89	 * state becoming stale if the parent is preempted before copy_thread()
 90	 * gets a chance to save the parent's live hardware registers to the
 91	 * child context.
 92	 */
 93	preempt_disable();
 94
 95	if (is_msa_enabled())
 96		save_msa(current);
 97	else if (is_fpu_owner())
 98		_save_fp(current);
 99
100	save_dsp(current);
101
102	preempt_enable();
103
104	*dst = *src;
105	return 0;
106}
107
108/*
109 * Copy architecture-specific thread state
110 */
111int copy_thread(unsigned long clone_flags, unsigned long usp,
112	unsigned long kthread_arg, struct task_struct *p)
113{
 
 
 
114	struct thread_info *ti = task_thread_info(p);
115	struct pt_regs *childregs, *regs = current_pt_regs();
116	unsigned long childksp;
117	p->set_child_tid = p->clear_child_tid = NULL;
118
119	childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32;
120
121	/* set up new TSS. */
122	childregs = (struct pt_regs *) childksp - 1;
123	/*  Put the stack after the struct pt_regs.  */
124	childksp = (unsigned long) childregs;
125	p->thread.cp0_status = read_c0_status() & ~(ST0_CU2|ST0_CU1);
126	if (unlikely(p->flags & PF_KTHREAD)) {
127		/* kernel thread */
128		unsigned long status = p->thread.cp0_status;
129		memset(childregs, 0, sizeof(struct pt_regs));
130		ti->addr_limit = KERNEL_DS;
131		p->thread.reg16 = usp; /* fn */
132		p->thread.reg17 = kthread_arg;
133		p->thread.reg29 = childksp;
134		p->thread.reg31 = (unsigned long) ret_from_kernel_thread;
135#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
136		status = (status & ~(ST0_KUP | ST0_IEP | ST0_IEC)) |
137			 ((status & (ST0_KUC | ST0_IEC)) << 2);
138#else
139		status |= ST0_EXL;
140#endif
141		childregs->cp0_status = status;
142		return 0;
143	}
144
145	/* user thread */
146	*childregs = *regs;
147	childregs->regs[7] = 0; /* Clear error flag */
148	childregs->regs[2] = 0; /* Child gets zero as return value */
149	if (usp)
150		childregs->regs[29] = usp;
151	ti->addr_limit = USER_DS;
152
153	p->thread.reg29 = (unsigned long) childregs;
154	p->thread.reg31 = (unsigned long) ret_from_fork;
155
156	/*
157	 * New tasks lose permission to use the fpu. This accelerates context
158	 * switching for most programs since they don't use the fpu.
159	 */
160	childregs->cp0_status &= ~(ST0_CU2|ST0_CU1);
161
162	clear_tsk_thread_flag(p, TIF_USEDFPU);
163	clear_tsk_thread_flag(p, TIF_USEDMSA);
164	clear_tsk_thread_flag(p, TIF_MSA_CTX_LIVE);
165
166#ifdef CONFIG_MIPS_MT_FPAFF
167	clear_tsk_thread_flag(p, TIF_FPUBOUND);
168#endif /* CONFIG_MIPS_MT_FPAFF */
169
 
 
 
 
170	if (clone_flags & CLONE_SETTLS)
171		ti->tp_value = regs->regs[7];
172
173	return 0;
174}
175
176#ifdef CONFIG_CC_STACKPROTECTOR
177#include <linux/stackprotector.h>
178unsigned long __stack_chk_guard __read_mostly;
179EXPORT_SYMBOL(__stack_chk_guard);
180#endif
181
182struct mips_frame_info {
183	void		*func;
184	unsigned long	func_size;
185	int		frame_size;
186	int		pc_offset;
187};
188
189#define J_TARGET(pc,target)	\
190		(((unsigned long)(pc) & 0xf0000000) | ((target) << 2))
191
192static inline int is_ra_save_ins(union mips_instruction *ip)
193{
194#ifdef CONFIG_CPU_MICROMIPS
195	union mips_instruction mmi;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
196
 
 
 
197	/*
198	 * swsp ra,offset
199	 * swm16 reglist,offset(sp)
200	 * swm32 reglist,offset(sp)
201	 * sw32 ra,offset(sp)
202	 * jradiussp - NOT SUPPORTED
203	 *
204	 * microMIPS is way more fun...
205	 */
206	if (mm_insn_16bit(ip->halfword[0])) {
207		mmi.word = (ip->halfword[0] << 16);
208		return (mmi.mm16_r5_format.opcode == mm_swsp16_op &&
209			mmi.mm16_r5_format.rt == 31) ||
210		       (mmi.mm16_m_format.opcode == mm_pool16c_op &&
211			mmi.mm16_m_format.func == mm_swm16_op);
212	}
213	else {
214		mmi.halfword[0] = ip->halfword[1];
215		mmi.halfword[1] = ip->halfword[0];
216		return (mmi.mm_m_format.opcode == mm_pool32b_op &&
217			mmi.mm_m_format.rd > 9 &&
218			mmi.mm_m_format.base == 29 &&
219			mmi.mm_m_format.func == mm_swm32_func) ||
220		       (mmi.i_format.opcode == mm_sw32_op &&
221			mmi.i_format.rs == 29 &&
222			mmi.i_format.rt == 31);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
223	}
224#else
225	/* sw / sd $ra, offset($sp) */
226	return (ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op) &&
227		ip->i_format.rs == 29 &&
228		ip->i_format.rt == 31;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
229#endif
230}
231
232static inline int is_jump_ins(union mips_instruction *ip)
233{
234#ifdef CONFIG_CPU_MICROMIPS
235	/*
236	 * jr16,jrc,jalr16,jalr16
237	 * jal
238	 * jalr/jr,jalr.hb/jr.hb,jalrs,jalrs.hb
239	 * jraddiusp - NOT SUPPORTED
240	 *
241	 * microMIPS is kind of more fun...
242	 */
243	union mips_instruction mmi;
 
 
 
 
 
244
245	mmi.word = (ip->halfword[0] << 16);
246
247	if ((mmi.mm16_r5_format.opcode == mm_pool16c_op &&
248	    (mmi.mm16_r5_format.rt & mm_jr16_op) == mm_jr16_op) ||
249	    ip->j_format.opcode == mm_jal32_op)
250		return 1;
251	if (ip->r_format.opcode != mm_pool32a_op ||
252			ip->r_format.func != mm_pool32axf_op)
253		return 0;
254	return ((ip->u_format.uimmediate >> 6) & mm_jalr_op) == mm_jalr_op;
255#else
256	if (ip->j_format.opcode == j_op)
257		return 1;
258	if (ip->j_format.opcode == jal_op)
259		return 1;
260	if (ip->r_format.opcode != spec_op)
261		return 0;
262	return ip->r_format.func == jalr_op || ip->r_format.func == jr_op;
263#endif
264}
265
266static inline int is_sp_move_ins(union mips_instruction *ip)
267{
268#ifdef CONFIG_CPU_MICROMIPS
 
 
269	/*
270	 * addiusp -imm
271	 * addius5 sp,-imm
272	 * addiu32 sp,sp,-imm
273	 * jradiussp - NOT SUPPORTED
274	 *
275	 * microMIPS is not more fun...
276	 */
277	if (mm_insn_16bit(ip->halfword[0])) {
278		union mips_instruction mmi;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
279
280		mmi.word = (ip->halfword[0] << 16);
281		return (mmi.mm16_r3_format.opcode == mm_pool16d_op &&
282			mmi.mm16_r3_format.simmediate && mm_addiusp_func) ||
283		       (mmi.mm16_r5_format.opcode == mm_pool16d_op &&
284			mmi.mm16_r5_format.rt == 29);
285	}
286	return ip->mm_i_format.opcode == mm_addiu32_op &&
287	       ip->mm_i_format.rt == 29 && ip->mm_i_format.rs == 29;
288#else
289	/* addiu/daddiu sp,sp,-imm */
290	if (ip->i_format.rs != 29 || ip->i_format.rt != 29)
291		return 0;
292	if (ip->i_format.opcode == addiu_op || ip->i_format.opcode == daddiu_op)
 
 
 
293		return 1;
 
294#endif
295	return 0;
296}
297
298static int get_frame_info(struct mips_frame_info *info)
299{
300#ifdef CONFIG_CPU_MICROMIPS
301	union mips_instruction *ip = (void *) (((char *) info->func) - 1);
302#else
303	union mips_instruction *ip = info->func;
304#endif
305	unsigned max_insns = info->func_size / sizeof(union mips_instruction);
306	unsigned i;
307
308	info->pc_offset = -1;
309	info->frame_size = 0;
310
 
311	if (!ip)
312		goto err;
313
314	if (max_insns == 0)
315		max_insns = 128U;	/* unknown function size */
316	max_insns = min(128U, max_insns);
 
317
318	for (i = 0; i < max_insns; i++, ip++) {
 
 
 
 
 
 
 
 
 
319
320		if (is_jump_ins(ip))
321			break;
322		if (!info->frame_size) {
323			if (is_sp_move_ins(ip))
324			{
325#ifdef CONFIG_CPU_MICROMIPS
326				if (mm_insn_16bit(ip->halfword[0]))
327				{
328					unsigned short tmp;
329
330					if (ip->halfword[0] & mm_addiusp_func)
331					{
332						tmp = (((ip->halfword[0] >> 1) & 0x1ff) << 2);
333						info->frame_size = -(signed short)(tmp | ((tmp & 0x100) ? 0xfe00 : 0));
334					} else {
335						tmp = (ip->halfword[0] >> 1);
336						info->frame_size = -(signed short)(tmp & 0xf);
337					}
338					ip = (void *) &ip->halfword[1];
339					ip--;
340				} else
341#endif
342				info->frame_size = - ip->i_format.simmediate;
343			}
344			continue;
345		}
346		if (info->pc_offset == -1 && is_ra_save_ins(ip)) {
347			info->pc_offset =
348				ip->i_format.simmediate / sizeof(long);
 
349			break;
350		}
351	}
352	if (info->frame_size && info->pc_offset >= 0) /* nested */
353		return 0;
354	if (info->pc_offset < 0) /* leaf */
355		return 1;
356	/* prologue seems boggus... */
357err:
358	return -1;
359}
360
361static struct mips_frame_info schedule_mfi __read_mostly;
362
363#ifdef CONFIG_KALLSYMS
364static unsigned long get___schedule_addr(void)
365{
366	return kallsyms_lookup_name("__schedule");
367}
368#else
369static unsigned long get___schedule_addr(void)
370{
371	union mips_instruction *ip = (void *)schedule;
372	int max_insns = 8;
373	int i;
374
375	for (i = 0; i < max_insns; i++, ip++) {
376		if (ip->j_format.opcode == j_op)
377			return J_TARGET(ip, ip->j_format.target);
378	}
379	return 0;
380}
381#endif
382
383static int __init frame_info_init(void)
384{
385	unsigned long size = 0;
386#ifdef CONFIG_KALLSYMS
387	unsigned long ofs;
388#endif
389	unsigned long addr;
390
391	addr = get___schedule_addr();
392	if (!addr)
393		addr = (unsigned long)schedule;
394
395#ifdef CONFIG_KALLSYMS
396	kallsyms_lookup_size_offset(addr, &size, &ofs);
397#endif
398	schedule_mfi.func = (void *)addr;
399	schedule_mfi.func_size = size;
400
401	get_frame_info(&schedule_mfi);
402
403	/*
404	 * Without schedule() frame info, result given by
405	 * thread_saved_pc() and get_wchan() are not reliable.
406	 */
407	if (schedule_mfi.pc_offset < 0)
408		printk("Can't analyze schedule() prologue at %p\n", schedule);
409
410	return 0;
411}
412
413arch_initcall(frame_info_init);
414
415/*
416 * Return saved PC of a blocked thread.
417 */
418unsigned long thread_saved_pc(struct task_struct *tsk)
419{
420	struct thread_struct *t = &tsk->thread;
421
422	/* New born processes are a special case */
423	if (t->reg31 == (unsigned long) ret_from_fork)
424		return t->reg31;
425	if (schedule_mfi.pc_offset < 0)
426		return 0;
427	return ((unsigned long *)t->reg29)[schedule_mfi.pc_offset];
428}
429
430
431#ifdef CONFIG_KALLSYMS
432/* generic stack unwinding function */
433unsigned long notrace unwind_stack_by_address(unsigned long stack_page,
434					      unsigned long *sp,
435					      unsigned long pc,
436					      unsigned long *ra)
437{
 
438	struct mips_frame_info info;
439	unsigned long size, ofs;
 
440	int leaf;
441	extern void ret_from_irq(void);
442	extern void ret_from_exception(void);
443
444	if (!stack_page)
445		return 0;
446
447	/*
448	 * If we reached the bottom of interrupt context,
449	 * return saved pc in pt_regs.
450	 */
451	if (pc == (unsigned long)ret_from_irq ||
452	    pc == (unsigned long)ret_from_exception) {
453		struct pt_regs *regs;
454		if (*sp >= stack_page &&
455		    *sp + sizeof(*regs) <= stack_page + THREAD_SIZE - 32) {
456			regs = (struct pt_regs *)*sp;
457			pc = regs->cp0_epc;
458			if (__kernel_text_address(pc)) {
459				*sp = regs->regs[29];
460				*ra = regs->regs[31];
461				return pc;
462			}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
463		}
464		return 0;
465	}
466	if (!kallsyms_lookup_size_offset(pc, &size, &ofs))
467		return 0;
468	/*
469	 * Return ra if an exception occurred at the first instruction
470	 */
471	if (unlikely(ofs == 0)) {
472		pc = *ra;
473		*ra = 0;
474		return pc;
475	}
476
477	info.func = (void *)(pc - ofs);
478	info.func_size = ofs;	/* analyze from start to ofs */
479	leaf = get_frame_info(&info);
480	if (leaf < 0)
481		return 0;
482
483	if (*sp < stack_page ||
484	    *sp + info.frame_size > stack_page + THREAD_SIZE - 32)
485		return 0;
486
487	if (leaf)
488		/*
489		 * For some extreme cases, get_frame_info() can
490		 * consider wrongly a nested function as a leaf
491		 * one. In that cases avoid to return always the
492		 * same value.
493		 */
494		pc = pc != *ra ? *ra : 0;
495	else
496		pc = ((unsigned long *)(*sp))[info.pc_offset];
497
498	*sp += info.frame_size;
499	*ra = 0;
500	return __kernel_text_address(pc) ? pc : 0;
501}
502EXPORT_SYMBOL(unwind_stack_by_address);
503
504/* used by show_backtrace() */
505unsigned long unwind_stack(struct task_struct *task, unsigned long *sp,
506			   unsigned long pc, unsigned long *ra)
507{
508	unsigned long stack_page = (unsigned long)task_stack_page(task);
 
 
 
 
 
 
 
 
 
 
 
 
509	return unwind_stack_by_address(stack_page, sp, pc, ra);
510}
511#endif
512
513/*
514 * get_wchan - a maintenance nightmare^W^Wpain in the ass ...
515 */
516unsigned long get_wchan(struct task_struct *task)
517{
518	unsigned long pc = 0;
519#ifdef CONFIG_KALLSYMS
520	unsigned long sp;
521	unsigned long ra = 0;
522#endif
523
524	if (!task || task == current || task->state == TASK_RUNNING)
525		goto out;
526	if (!task_stack_page(task))
527		goto out;
528
529	pc = thread_saved_pc(task);
530
531#ifdef CONFIG_KALLSYMS
532	sp = task->thread.reg29 + schedule_mfi.frame_size;
533
534	while (in_sched_functions(pc))
535		pc = unwind_stack(task, &sp, pc, &ra);
536#endif
537
538out:
539	return pc;
540}
541
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
542/*
543 * Don't forget that the stack pointer must be aligned on a 8 bytes
544 * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
545 */
546unsigned long arch_align_stack(unsigned long sp)
547{
548	if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
549		sp -= get_random_int() & ~PAGE_MASK;
550
551	return sp & ALMASK;
552}
553
554static void arch_dump_stack(void *info)
 
 
555{
556	struct pt_regs *regs;
 
 
 
 
 
557
558	regs = get_irq_regs();
 
 
 
559
560	if (regs)
561		show_regs(regs);
 
 
 
 
 
 
 
 
 
 
562
563	dump_stack();
 
 
564}
565
566void arch_trigger_all_cpu_backtrace(bool include_self)
567{
568	smp_call_function(arch_dump_stack, NULL, 1);
569}
570
571int mips_get_process_fp_mode(struct task_struct *task)
572{
573	int value = 0;
574
575	if (!test_tsk_thread_flag(task, TIF_32BIT_FPREGS))
576		value |= PR_FP_MODE_FR;
577	if (test_tsk_thread_flag(task, TIF_HYBRID_FPREGS))
578		value |= PR_FP_MODE_FRE;
579
580	return value;
581}
582
 
 
 
 
 
 
 
 
 
 
 
 
583int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
584{
585	const unsigned int known_bits = PR_FP_MODE_FR | PR_FP_MODE_FRE;
586	unsigned long switch_count;
587	struct task_struct *t;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
588
589	/* Check the value is valid */
590	if (value & ~known_bits)
591		return -EOPNOTSUPP;
592
 
 
 
 
593	/* Avoid inadvertently triggering emulation */
594	if ((value & PR_FP_MODE_FR) && cpu_has_fpu &&
595	    !(current_cpu_data.fpu_id & MIPS_FPIR_F64))
596		return -EOPNOTSUPP;
597	if ((value & PR_FP_MODE_FRE) && cpu_has_fpu && !cpu_has_fre)
598		return -EOPNOTSUPP;
599
600	/* FR = 0 not supported in MIPS R6 */
601	if (!(value & PR_FP_MODE_FR) && cpu_has_fpu && cpu_has_mips_r6)
602		return -EOPNOTSUPP;
603
604	/* Save FP & vector context, then disable FPU & MSA */
605	if (task->signal == current->signal)
606		lose_fpu(1);
607
608	/* Prevent any threads from obtaining live FP context */
609	atomic_set(&task->mm->context.fp_mode_switching, 1);
610	smp_mb__after_atomic();
611
612	/*
613	 * If there are multiple online CPUs then wait until all threads whose
614	 * FP mode is about to change have been context switched. This approach
615	 * allows us to only worry about whether an FP mode switch is in
616	 * progress when FP is first used in a tasks time slice. Pretty much all
617	 * of the mode switch overhead can thus be confined to cases where mode
618	 * switches are actually occurring. That is, to here. However for the
619	 * thread performing the mode switch it may take a while...
620	 */
621	if (num_online_cpus() > 1) {
622		spin_lock_irq(&task->sighand->siglock);
623
624		for_each_thread(task, t) {
625			if (t == current)
626				continue;
627
628			switch_count = t->nvcsw + t->nivcsw;
629
630			do {
631				spin_unlock_irq(&task->sighand->siglock);
632				cond_resched();
633				spin_lock_irq(&task->sighand->siglock);
634			} while ((t->nvcsw + t->nivcsw) == switch_count);
635		}
636
637		spin_unlock_irq(&task->sighand->siglock);
638	}
639
640	/*
641	 * There are now no threads of the process with live FP context, so it
642	 * is safe to proceed with the FP mode switch.
643	 */
644	for_each_thread(task, t) {
645		/* Update desired FP register width */
646		if (value & PR_FP_MODE_FR) {
647			clear_tsk_thread_flag(t, TIF_32BIT_FPREGS);
648		} else {
649			set_tsk_thread_flag(t, TIF_32BIT_FPREGS);
650			clear_tsk_thread_flag(t, TIF_MSA_CTX_LIVE);
651		}
652
653		/* Update desired FP single layout */
654		if (value & PR_FP_MODE_FRE)
655			set_tsk_thread_flag(t, TIF_HYBRID_FPREGS);
656		else
657			clear_tsk_thread_flag(t, TIF_HYBRID_FPREGS);
658	}
659
660	/* Allow threads to use FP again */
661	atomic_set(&task->mm->context.fp_mode_switching, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
662
663	return 0;
664}