Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1// SPDX-License-Identifier: GPL-2.0
  2// Copyright (C) 2005-2017 Andes Technology Corporation
  3
  4#include <linux/sched.h>
  5#include <linux/sched/debug.h>
  6#include <linux/sched/task_stack.h>
  7#include <linux/delay.h>
  8#include <linux/kallsyms.h>
  9#include <linux/uaccess.h>
 10#include <asm/elf.h>
 11#include <asm/proc-fns.h>
 12#include <asm/fpu.h>
 13#include <linux/ptrace.h>
 14#include <linux/reboot.h>
 15
 16#if IS_ENABLED(CONFIG_LAZY_FPU)
 17struct task_struct *last_task_used_math;
 18#endif
 19
 20extern void setup_mm_for_reboot(char mode);
 21
 22extern inline void arch_reset(char mode)
 23{
 24	if (mode == 's') {
 25		/* Use cpu handler, jump to 0 */
 26		cpu_reset(0);
 27	}
 28}
 29
 30void (*pm_power_off) (void);
 31EXPORT_SYMBOL(pm_power_off);
 32
 33static char reboot_mode_nds32 = 'h';
 34
 35int __init reboot_setup(char *str)
 36{
 37	reboot_mode_nds32 = str[0];
 38	return 1;
 39}
 40
 41static int cpub_pwroff(void)
 42{
 43	return 0;
 44}
 45
 46__setup("reboot=", reboot_setup);
 47
 48void machine_halt(void)
 49{
 50	cpub_pwroff();
 51}
 52
 53EXPORT_SYMBOL(machine_halt);
 54
 55void machine_power_off(void)
 56{
 57	if (pm_power_off)
 58		pm_power_off();
 59}
 60
 61EXPORT_SYMBOL(machine_power_off);
 62
 63void machine_restart(char *cmd)
 64{
 65	/*
 66	 * Clean and disable cache, and turn off interrupts
 67	 */
 68	cpu_proc_fin();
 69
 70	/*
 71	 * Tell the mm system that we are going to reboot -
 72	 * we may need it to insert some 1:1 mappings so that
 73	 * soft boot works.
 74	 */
 75	setup_mm_for_reboot(reboot_mode_nds32);
 76
 77	/* Execute kernel restart handler call chain */
 78	do_kernel_restart(cmd);
 79
 80	/*
 81	 * Now call the architecture specific reboot code.
 82	 */
 83	arch_reset(reboot_mode_nds32);
 84
 85	/*
 86	 * Whoops - the architecture was unable to reboot.
 87	 * Tell the user!
 88	 */
 89	mdelay(1000);
 90	pr_info("Reboot failed -- System halted\n");
 91	while (1) ;
 92}
 93
 94EXPORT_SYMBOL(machine_restart);
 95
 96void show_regs(struct pt_regs *regs)
 97{
 98	printk("PC is at %pS\n", (void *)instruction_pointer(regs));
 99	printk("LP is at %pS\n", (void *)regs->lp);
100	pr_info("pc : [<%08lx>]    lp : [<%08lx>]    %s\n"
101		"sp : %08lx  fp : %08lx  gp : %08lx\n",
102		instruction_pointer(regs),
103		regs->lp, print_tainted(), regs->sp, regs->fp, regs->gp);
104	pr_info("r25: %08lx  r24: %08lx\n", regs->uregs[25], regs->uregs[24]);
105
106	pr_info("r23: %08lx  r22: %08lx  r21: %08lx  r20: %08lx\n",
107		regs->uregs[23], regs->uregs[22],
108		regs->uregs[21], regs->uregs[20]);
109	pr_info("r19: %08lx  r18: %08lx  r17: %08lx  r16: %08lx\n",
110		regs->uregs[19], regs->uregs[18],
111		regs->uregs[17], regs->uregs[16]);
112	pr_info("r15: %08lx  r14: %08lx  r13: %08lx  r12: %08lx\n",
113		regs->uregs[15], regs->uregs[14],
114		regs->uregs[13], regs->uregs[12]);
115	pr_info("r11: %08lx  r10: %08lx  r9 : %08lx  r8 : %08lx\n",
116		regs->uregs[11], regs->uregs[10],
117		regs->uregs[9], regs->uregs[8]);
118	pr_info("r7 : %08lx  r6 : %08lx  r5 : %08lx  r4 : %08lx\n",
119		regs->uregs[7], regs->uregs[6], regs->uregs[5], regs->uregs[4]);
120	pr_info("r3 : %08lx  r2 : %08lx  r1 : %08lx  r0 : %08lx\n",
121		regs->uregs[3], regs->uregs[2], regs->uregs[1], regs->uregs[0]);
122	pr_info("  IRQs o%s  Segment %s\n",
123		interrupts_enabled(regs) ? "n" : "ff",
124		segment_eq(get_fs(), KERNEL_DS)? "kernel" : "user");
125}
126
127EXPORT_SYMBOL(show_regs);
128
129void exit_thread(struct task_struct *tsk)
130{
131#if defined(CONFIG_FPU) && defined(CONFIG_LAZY_FPU)
132	if (last_task_used_math == tsk)
133		last_task_used_math = NULL;
134#endif
135}
136
137void flush_thread(void)
138{
139#if defined(CONFIG_FPU)
140	clear_fpu(task_pt_regs(current));
141	clear_used_math();
142# ifdef CONFIG_LAZY_FPU
143	if (last_task_used_math == current)
144		last_task_used_math = NULL;
145# endif
146#endif
147}
148
149DEFINE_PER_CPU(struct task_struct *, __entry_task);
150
151asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
152int copy_thread(unsigned long clone_flags, unsigned long stack_start,
153		unsigned long stk_sz, struct task_struct *p)
154{
155	struct pt_regs *childregs = task_pt_regs(p);
156
157	memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context));
158
159	if (unlikely(p->flags & PF_KTHREAD)) {
160		memset(childregs, 0, sizeof(struct pt_regs));
161		/* kernel thread fn */
162		p->thread.cpu_context.r6 = stack_start;
163		/* kernel thread argument */
164		p->thread.cpu_context.r7 = stk_sz;
165	} else {
166		*childregs = *current_pt_regs();
167		if (stack_start)
168			childregs->sp = stack_start;
169		/* child get zero as ret. */
170		childregs->uregs[0] = 0;
171		childregs->osp = 0;
172		if (clone_flags & CLONE_SETTLS)
173			childregs->uregs[25] = childregs->uregs[3];
174	}
175	/* cpu context switching  */
176	p->thread.cpu_context.pc = (unsigned long)ret_from_fork;
177	p->thread.cpu_context.sp = (unsigned long)childregs;
178
179#if IS_ENABLED(CONFIG_FPU)
180	if (used_math()) {
181# if !IS_ENABLED(CONFIG_LAZY_FPU)
182		unlazy_fpu(current);
183# else
184		preempt_disable();
185		if (last_task_used_math == current)
186			save_fpu(current);
187		preempt_enable();
188# endif
189		p->thread.fpu = current->thread.fpu;
190		clear_fpu(task_pt_regs(p));
191		set_stopped_child_used_math(p);
192	}
193#endif
194
195#ifdef CONFIG_HWZOL
196	childregs->lb = 0;
197	childregs->le = 0;
198	childregs->lc = 0;
199#endif
200
201	return 0;
202}
203
204#if IS_ENABLED(CONFIG_FPU)
205struct task_struct *_switch_fpu(struct task_struct *prev, struct task_struct *next)
206{
207#if !IS_ENABLED(CONFIG_LAZY_FPU)
208	unlazy_fpu(prev);
209#endif
210	if (!(next->flags & PF_KTHREAD))
211		clear_fpu(task_pt_regs(next));
212	return prev;
213}
214#endif
215
216/*
217 * fill in the fpe structure for a core dump...
218 */
219int dump_fpu(struct pt_regs *regs, elf_fpregset_t * fpu)
220{
221	int fpvalid = 0;
222#if IS_ENABLED(CONFIG_FPU)
223	struct task_struct *tsk = current;
224
225	fpvalid = tsk_used_math(tsk);
226	if (fpvalid) {
227		lose_fpu();
228		memcpy(fpu, &tsk->thread.fpu, sizeof(*fpu));
229	}
230#endif
231	return fpvalid;
232}
233
234EXPORT_SYMBOL(dump_fpu);
235
236unsigned long get_wchan(struct task_struct *p)
237{
238	unsigned long fp, lr;
239	unsigned long stack_start, stack_end;
240	int count = 0;
241
242	if (!p || p == current || p->state == TASK_RUNNING)
243		return 0;
244
245	if (IS_ENABLED(CONFIG_FRAME_POINTER)) {
246		stack_start = (unsigned long)end_of_stack(p);
247		stack_end = (unsigned long)task_stack_page(p) + THREAD_SIZE;
248
249		fp = thread_saved_fp(p);
250		do {
251			if (fp < stack_start || fp > stack_end)
252				return 0;
253			lr = ((unsigned long *)fp)[0];
254			if (!in_sched_functions(lr))
255				return lr;
256			fp = *(unsigned long *)(fp + 4);
257		} while (count++ < 16);
258	}
259	return 0;
260}
261
262EXPORT_SYMBOL(get_wchan);