Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2// Copyright (C) 2005-2017 Andes Technology Corporation
  3
  4#include <linux/module.h>
  5#include <linux/personality.h>
  6#include <linux/kallsyms.h>
  7#include <linux/hardirq.h>
  8#include <linux/kdebug.h>
  9#include <linux/sched/task_stack.h>
 10#include <linux/uaccess.h>
 11#include <linux/ftrace.h>
 12
 13#include <asm/proc-fns.h>
 14#include <asm/unistd.h>
 15#include <asm/fpu.h>
 16
 17#include <linux/ptrace.h>
 18#include <nds32_intrinsic.h>
 19
 20extern void show_pte(struct mm_struct *mm, unsigned long addr);
 21
 22/*
 23 * Dump out the contents of some memory nicely...
 24 */
 25void dump_mem(const char *lvl, unsigned long bottom, unsigned long top)
 26{
 27	unsigned long first;
 28	mm_segment_t fs;
 29	int i;
 30
 31	/*
 32	 * We need to switch to kernel mode so that we can use __get_user
 33	 * to safely read from kernel space.  Note that we now dump the
 34	 * code first, just in case the backtrace kills us.
 35	 */
 36	fs = get_fs();
 37	set_fs(KERNEL_DS);
 38
 39	pr_emerg("%s(0x%08lx to 0x%08lx)\n", lvl, bottom, top);
 40
 41	for (first = bottom & ~31; first < top; first += 32) {
 42		unsigned long p;
 43		char str[sizeof(" 12345678") * 8 + 1];
 44
 45		memset(str, ' ', sizeof(str));
 46		str[sizeof(str) - 1] = '\0';
 47
 48		for (p = first, i = 0; i < 8 && p < top; i++, p += 4) {
 49			if (p >= bottom && p < top) {
 50				unsigned long val;
 51				if (__get_user(val, (unsigned long *)p) == 0)
 52					sprintf(str + i * 9, " %08lx", val);
 53				else
 54					sprintf(str + i * 9, " ????????");
 55			}
 56		}
 57		pr_emerg("%s%04lx:%s\n", lvl, first & 0xffff, str);
 58	}
 59
 60	set_fs(fs);
 61}
 62
 63EXPORT_SYMBOL(dump_mem);
 64
 65static void dump_instr(struct pt_regs *regs)
 66{
 67	unsigned long addr = instruction_pointer(regs);
 68	mm_segment_t fs;
 69	char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
 70	int i;
 71
 72	return;
 73	/*
 74	 * We need to switch to kernel mode so that we can use __get_user
 75	 * to safely read from kernel space.  Note that we now dump the
 76	 * code first, just in case the backtrace kills us.
 77	 */
 78	fs = get_fs();
 79	set_fs(KERNEL_DS);
 80
 81	pr_emerg("Code: ");
 82	for (i = -4; i < 1; i++) {
 83		unsigned int val, bad;
 84
 85		bad = __get_user(val, &((u32 *) addr)[i]);
 86
 87		if (!bad) {
 88			p += sprintf(p, i == 0 ? "(%08x) " : "%08x ", val);
 89		} else {
 90			p += sprintf(p, "bad PC value");
 91			break;
 92		}
 93	}
 94	pr_emerg("Code: %s\n", str);
 95
 96	set_fs(fs);
 97}
 98
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 99#define LOOP_TIMES (100)
100static void __dump(struct task_struct *tsk, unsigned long *base_reg)
101{
102	unsigned long ret_addr;
103	int cnt = LOOP_TIMES, graph = 0;
104	pr_emerg("Call Trace:\n");
105	if (!IS_ENABLED(CONFIG_FRAME_POINTER)) {
106		while (!kstack_end(base_reg)) {
107			ret_addr = *base_reg++;
108			if (__kernel_text_address(ret_addr)) {
109				ret_addr = ftrace_graph_ret_addr(
110						tsk, &graph, ret_addr, NULL);
111				print_ip_sym(ret_addr);
112			}
113			if (--cnt < 0)
114				break;
115		}
116	} else {
117		while (!kstack_end((void *)base_reg) &&
118		       !((unsigned long)base_reg & 0x3) &&
119		       ((unsigned long)base_reg >= TASK_SIZE)) {
120			unsigned long next_fp;
121			ret_addr = base_reg[LP_OFFSET];
 
 
 
 
122			next_fp = base_reg[FP_OFFSET];
 
123			if (__kernel_text_address(ret_addr)) {
124
125				ret_addr = ftrace_graph_ret_addr(
126						tsk, &graph, ret_addr, NULL);
127				print_ip_sym(ret_addr);
128			}
129			if (--cnt < 0)
130				break;
131			base_reg = (unsigned long *)next_fp;
132		}
133	}
134	pr_emerg("\n");
135}
136
137void show_stack(struct task_struct *tsk, unsigned long *sp)
138{
139	unsigned long *base_reg;
140
141	if (!tsk)
142		tsk = current;
143	if (!IS_ENABLED(CONFIG_FRAME_POINTER)) {
144		if (tsk != current)
145			base_reg = (unsigned long *)(tsk->thread.cpu_context.sp);
146		else
147			__asm__ __volatile__("\tori\t%0, $sp, #0\n":"=r"(base_reg));
148	} else {
149		if (tsk != current)
150			base_reg = (unsigned long *)(tsk->thread.cpu_context.fp);
151		else
152			__asm__ __volatile__("\tori\t%0, $fp, #0\n":"=r"(base_reg));
153	}
154	__dump(tsk, base_reg);
155	barrier();
156}
157
158DEFINE_SPINLOCK(die_lock);
159
160/*
161 * This function is protected against re-entrancy.
162 */
163void die(const char *str, struct pt_regs *regs, int err)
164{
165	struct task_struct *tsk = current;
166	static int die_counter;
167
168	console_verbose();
169	spin_lock_irq(&die_lock);
170	bust_spinlocks(1);
171
172	pr_emerg("Internal error: %s: %x [#%d]\n", str, err, ++die_counter);
173	print_modules();
174	pr_emerg("CPU: %i\n", smp_processor_id());
175	show_regs(regs);
176	pr_emerg("Process %s (pid: %d, stack limit = 0x%p)\n",
177		 tsk->comm, tsk->pid, end_of_stack(tsk));
178
179	if (!user_mode(regs) || in_interrupt()) {
180		dump_mem("Stack: ", regs->sp, (regs->sp + PAGE_SIZE) & PAGE_MASK);
 
181		dump_instr(regs);
182		dump_stack();
183	}
184
185	bust_spinlocks(0);
186	spin_unlock_irq(&die_lock);
187	do_exit(SIGSEGV);
188}
189
190EXPORT_SYMBOL(die);
191
192void die_if_kernel(const char *str, struct pt_regs *regs, int err)
193{
194	if (user_mode(regs))
195		return;
196
197	die(str, regs, err);
198}
199
200int bad_syscall(int n, struct pt_regs *regs)
201{
 
 
202	if (current->personality != PER_LINUX) {
203		send_sig(SIGSEGV, current, 1);
204		return regs->uregs[0];
205	}
206
207	force_sig_fault(SIGILL, ILL_ILLTRP,
208			(void __user *)instruction_pointer(regs) - 4);
 
 
 
 
209	die_if_kernel("Oops - bad syscall", regs, n);
210	return regs->uregs[0];
211}
212
213void __pte_error(const char *file, int line, unsigned long val)
214{
215	pr_emerg("%s:%d: bad pte %08lx.\n", file, line, val);
216}
217
218void __pmd_error(const char *file, int line, unsigned long val)
219{
220	pr_emerg("%s:%d: bad pmd %08lx.\n", file, line, val);
221}
222
223void __pgd_error(const char *file, int line, unsigned long val)
224{
225	pr_emerg("%s:%d: bad pgd %08lx.\n", file, line, val);
226}
227
228extern char *exception_vector, *exception_vector_end;
229void __init trap_init(void)
230{
231	return;
232}
233
234void __init early_trap_init(void)
235{
236	unsigned long ivb = 0;
237	unsigned long base = PAGE_OFFSET;
238
239	memcpy((unsigned long *)base, (unsigned long *)&exception_vector,
240	       ((unsigned long)&exception_vector_end -
241		(unsigned long)&exception_vector));
242	ivb = __nds32__mfsr(NDS32_SR_IVB);
243	/* Check platform support. */
244	if (((ivb & IVB_mskNIVIC) >> IVB_offNIVIC) < 2)
245		panic
246		    ("IVIC mode is not allowed on the platform with interrupt controller\n");
247	__nds32__mtsr((ivb & ~IVB_mskESZ) | (IVB_valESZ16 << IVB_offESZ) |
248		      IVB_BASE, NDS32_SR_IVB);
249	__nds32__mtsr(INT_MASK_INITAIAL_VAL, NDS32_SR_INT_MASK);
250
251	/*
252	 * 0x800 = 128 vectors * 16byte.
253	 * It should be enough to flush a page.
254	 */
255	cpu_cache_wbinval_page(base, true);
256}
257
258static void send_sigtrap(struct pt_regs *regs, int error_code, int si_code)
 
259{
260	struct task_struct *tsk = current;
261
262	tsk->thread.trap_no = ENTRY_DEBUG_RELATED;
263	tsk->thread.error_code = error_code;
264
265	force_sig_fault(SIGTRAP, si_code,
266			(void __user *)instruction_pointer(regs));
 
 
 
267}
268
269void do_debug_trap(unsigned long entry, unsigned long addr,
270		   unsigned long type, struct pt_regs *regs)
271{
272	if (notify_die(DIE_OOPS, "Oops", regs, addr, type, SIGTRAP)
273	    == NOTIFY_STOP)
274		return;
275
276	if (user_mode(regs)) {
277		/* trap_signal */
278		send_sigtrap(regs, 0, TRAP_BRKPT);
279	} else {
280		/* kernel_trap */
281		if (!fixup_exception(regs))
282			die("unexpected kernel_trap", regs, 0);
283	}
284}
285
286void unhandled_interruption(struct pt_regs *regs)
287{
 
288	pr_emerg("unhandled_interruption\n");
289	show_regs(regs);
290	if (!user_mode(regs))
291		do_exit(SIGKILL);
292	force_sig(SIGKILL);
 
 
293}
294
295void unhandled_exceptions(unsigned long entry, unsigned long addr,
296			  unsigned long type, struct pt_regs *regs)
297{
 
298	pr_emerg("Unhandled Exception: entry: %lx addr:%lx itype:%lx\n", entry,
299		 addr, type);
300	show_regs(regs);
301	if (!user_mode(regs))
302		do_exit(SIGKILL);
303	force_sig(SIGKILL);
 
 
 
304}
305
306extern int do_page_fault(unsigned long entry, unsigned long addr,
307			 unsigned int error_code, struct pt_regs *regs);
308
309/*
310 * 2:DEF dispatch for TLB MISC exception handler
311*/
312
313void do_dispatch_tlb_misc(unsigned long entry, unsigned long addr,
314			  unsigned long type, struct pt_regs *regs)
315{
316	type = type & (ITYPE_mskINST | ITYPE_mskETYPE);
317	if ((type & ITYPE_mskETYPE) < 5) {
318		/* Permission exceptions */
319		do_page_fault(entry, addr, type, regs);
320	} else
321		unhandled_exceptions(entry, addr, type, regs);
322}
323
324void do_revinsn(struct pt_regs *regs)
325{
 
326	pr_emerg("Reserved Instruction\n");
327	show_regs(regs);
328	if (!user_mode(regs))
329		do_exit(SIGILL);
330	force_sig(SIGILL);
 
 
331}
332
333#ifdef CONFIG_ALIGNMENT_TRAP
334extern int unalign_access_mode;
335extern int do_unaligned_access(unsigned long addr, struct pt_regs *regs);
336#endif
337void do_dispatch_general(unsigned long entry, unsigned long addr,
338			 unsigned long itype, struct pt_regs *regs,
339			 unsigned long oipc)
340{
341	unsigned int swid = itype >> ITYPE_offSWID;
342	unsigned long type = itype & (ITYPE_mskINST | ITYPE_mskETYPE);
343	if (type == ETYPE_ALIGNMENT_CHECK) {
344#ifdef CONFIG_ALIGNMENT_TRAP
345		/* Alignment check */
346		if (user_mode(regs) && unalign_access_mode) {
347			int ret;
348			ret = do_unaligned_access(addr, regs);
349
350			if (ret == 0)
351				return;
352
353			if (ret == -EFAULT)
354				pr_emerg
355				    ("Unhandled unaligned access exception\n");
356		}
357#endif
358		do_page_fault(entry, addr, type, regs);
359	} else if (type == ETYPE_RESERVED_INSTRUCTION) {
360		/* Reserved instruction */
361		do_revinsn(regs);
362	} else if (type == ETYPE_COPROCESSOR) {
363		/* Coprocessor */
364#if IS_ENABLED(CONFIG_FPU)
365		unsigned int fucop_exist = __nds32__mfsr(NDS32_SR_FUCOP_EXIST);
366		unsigned int cpid = ((itype & ITYPE_mskCPID) >> ITYPE_offCPID);
367
368		if ((cpid == FPU_CPID) &&
369		    (fucop_exist & FUCOP_EXIST_mskCP0ISFPU)) {
370			unsigned int subtype = (itype & ITYPE_mskSTYPE);
371
372			if (true == do_fpu_exception(subtype, regs))
373				return;
374		}
375#endif
376		unhandled_exceptions(entry, addr, type, regs);
377	} else if (type == ETYPE_TRAP && swid == SWID_RAISE_INTERRUPT_LEVEL) {
378		/* trap, used on v3 EDM target debugging workaround */
379		/*
380		 * DIPC(OIPC) is passed as parameter before
381		 * interrupt is enabled, so the DIPC will not be corrupted
382		 * even though interrupts are coming in
383		 */
384		/*
385		 * 1. update ipc
386		 * 2. update pt_regs ipc with oipc
387		 * 3. update pt_regs ipsw (clear DEX)
388		 */
389		__asm__ volatile ("mtsr %0, $IPC\n\t"::"r" (oipc));
390		regs->ipc = oipc;
391		if (regs->pipsw & PSW_mskDEX) {
392			pr_emerg
393			    ("Nested Debug exception is possibly happened\n");
394			pr_emerg("ipc:%08x pipc:%08x\n",
395				 (unsigned int)regs->ipc,
396				 (unsigned int)regs->pipc);
397		}
398		do_debug_trap(entry, addr, itype, regs);
399		regs->ipsw &= ~PSW_mskDEX;
400	} else
401		unhandled_exceptions(entry, addr, type, regs);
402}
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2// Copyright (C) 2005-2017 Andes Technology Corporation
  3
  4#include <linux/module.h>
  5#include <linux/personality.h>
  6#include <linux/kallsyms.h>
  7#include <linux/hardirq.h>
  8#include <linux/kdebug.h>
  9#include <linux/sched/task_stack.h>
 10#include <linux/uaccess.h>
 
 11
 12#include <asm/proc-fns.h>
 13#include <asm/unistd.h>
 
 14
 15#include <linux/ptrace.h>
 16#include <nds32_intrinsic.h>
 17
 18extern void show_pte(struct mm_struct *mm, unsigned long addr);
 19
 20/*
 21 * Dump out the contents of some memory nicely...
 22 */
 23void dump_mem(const char *lvl, unsigned long bottom, unsigned long top)
 24{
 25	unsigned long first;
 26	mm_segment_t fs;
 27	int i;
 28
 29	/*
 30	 * We need to switch to kernel mode so that we can use __get_user
 31	 * to safely read from kernel space.  Note that we now dump the
 32	 * code first, just in case the backtrace kills us.
 33	 */
 34	fs = get_fs();
 35	set_fs(KERNEL_DS);
 36
 37	pr_emerg("%s(0x%08lx to 0x%08lx)\n", lvl, bottom, top);
 38
 39	for (first = bottom & ~31; first < top; first += 32) {
 40		unsigned long p;
 41		char str[sizeof(" 12345678") * 8 + 1];
 42
 43		memset(str, ' ', sizeof(str));
 44		str[sizeof(str) - 1] = '\0';
 45
 46		for (p = first, i = 0; i < 8 && p < top; i++, p += 4) {
 47			if (p >= bottom && p < top) {
 48				unsigned long val;
 49				if (__get_user(val, (unsigned long *)p) == 0)
 50					sprintf(str + i * 9, " %08lx", val);
 51				else
 52					sprintf(str + i * 9, " ????????");
 53			}
 54		}
 55		pr_emerg("%s%04lx:%s\n", lvl, first & 0xffff, str);
 56	}
 57
 58	set_fs(fs);
 59}
 60
 61EXPORT_SYMBOL(dump_mem);
 62
 63static void dump_instr(struct pt_regs *regs)
 64{
 65	unsigned long addr = instruction_pointer(regs);
 66	mm_segment_t fs;
 67	char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
 68	int i;
 69
 70	return;
 71	/*
 72	 * We need to switch to kernel mode so that we can use __get_user
 73	 * to safely read from kernel space.  Note that we now dump the
 74	 * code first, just in case the backtrace kills us.
 75	 */
 76	fs = get_fs();
 77	set_fs(KERNEL_DS);
 78
 79	pr_emerg("Code: ");
 80	for (i = -4; i < 1; i++) {
 81		unsigned int val, bad;
 82
 83		bad = __get_user(val, &((u32 *) addr)[i]);
 84
 85		if (!bad) {
 86			p += sprintf(p, i == 0 ? "(%08x) " : "%08x ", val);
 87		} else {
 88			p += sprintf(p, "bad PC value");
 89			break;
 90		}
 91	}
 92	pr_emerg("Code: %s\n", str);
 93
 94	set_fs(fs);
 95}
 96
 97#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 98#include <linux/ftrace.h>
 99static void
100get_real_ret_addr(unsigned long *addr, struct task_struct *tsk, int *graph)
101{
102	if (*addr == (unsigned long)return_to_handler) {
103		int index = tsk->curr_ret_stack;
104
105		if (tsk->ret_stack && index >= *graph) {
106			index -= *graph;
107			*addr = tsk->ret_stack[index].ret;
108			(*graph)++;
109		}
110	}
111}
112#else
113static inline void
114get_real_ret_addr(unsigned long *addr, struct task_struct *tsk, int *graph)
115{
116}
117#endif
118
119#define LOOP_TIMES (100)
120static void __dump(struct task_struct *tsk, unsigned long *base_reg)
121{
122	unsigned long ret_addr;
123	int cnt = LOOP_TIMES, graph = 0;
124	pr_emerg("Call Trace:\n");
125	if (!IS_ENABLED(CONFIG_FRAME_POINTER)) {
126		while (!kstack_end(base_reg)) {
127			ret_addr = *base_reg++;
128			if (__kernel_text_address(ret_addr)) {
129				get_real_ret_addr(&ret_addr, tsk, &graph);
 
130				print_ip_sym(ret_addr);
131			}
132			if (--cnt < 0)
133				break;
134		}
135	} else {
136		while (!kstack_end((void *)base_reg) &&
137		       !((unsigned long)base_reg & 0x3) &&
138		       ((unsigned long)base_reg >= TASK_SIZE)) {
139			unsigned long next_fp;
140#if !defined(NDS32_ABI_2)
141			ret_addr = base_reg[0];
142			next_fp = base_reg[1];
143#else
144			ret_addr = base_reg[-1];
145			next_fp = base_reg[FP_OFFSET];
146#endif
147			if (__kernel_text_address(ret_addr)) {
148				get_real_ret_addr(&ret_addr, tsk, &graph);
 
 
149				print_ip_sym(ret_addr);
150			}
151			if (--cnt < 0)
152				break;
153			base_reg = (unsigned long *)next_fp;
154		}
155	}
156	pr_emerg("\n");
157}
158
159void show_stack(struct task_struct *tsk, unsigned long *sp)
160{
161	unsigned long *base_reg;
162
163	if (!tsk)
164		tsk = current;
165	if (!IS_ENABLED(CONFIG_FRAME_POINTER)) {
166		if (tsk != current)
167			base_reg = (unsigned long *)(tsk->thread.cpu_context.sp);
168		else
169			__asm__ __volatile__("\tori\t%0, $sp, #0\n":"=r"(base_reg));
170	} else {
171		if (tsk != current)
172			base_reg = (unsigned long *)(tsk->thread.cpu_context.fp);
173		else
174			__asm__ __volatile__("\tori\t%0, $fp, #0\n":"=r"(base_reg));
175	}
176	__dump(tsk, base_reg);
177	barrier();
178}
179
180DEFINE_SPINLOCK(die_lock);
181
182/*
183 * This function is protected against re-entrancy.
184 */
185void die(const char *str, struct pt_regs *regs, int err)
186{
187	struct task_struct *tsk = current;
188	static int die_counter;
189
190	console_verbose();
191	spin_lock_irq(&die_lock);
192	bust_spinlocks(1);
193
194	pr_emerg("Internal error: %s: %x [#%d]\n", str, err, ++die_counter);
195	print_modules();
196	pr_emerg("CPU: %i\n", smp_processor_id());
197	show_regs(regs);
198	pr_emerg("Process %s (pid: %d, stack limit = 0x%p)\n",
199		 tsk->comm, tsk->pid, task_thread_info(tsk) + 1);
200
201	if (!user_mode(regs) || in_interrupt()) {
202		dump_mem("Stack: ", regs->sp,
203			 THREAD_SIZE + (unsigned long)task_thread_info(tsk));
204		dump_instr(regs);
205		dump_stack();
206	}
207
208	bust_spinlocks(0);
209	spin_unlock_irq(&die_lock);
210	do_exit(SIGSEGV);
211}
212
213EXPORT_SYMBOL(die);
214
215void die_if_kernel(const char *str, struct pt_regs *regs, int err)
216{
217	if (user_mode(regs))
218		return;
219
220	die(str, regs, err);
221}
222
223int bad_syscall(int n, struct pt_regs *regs)
224{
225	siginfo_t info;
226
227	if (current->personality != PER_LINUX) {
228		send_sig(SIGSEGV, current, 1);
229		return regs->uregs[0];
230	}
231
232	info.si_signo = SIGILL;
233	info.si_errno = 0;
234	info.si_code = ILL_ILLTRP;
235	info.si_addr = (void __user *)instruction_pointer(regs) - 4;
236
237	force_sig_info(SIGILL, &info, current);
238	die_if_kernel("Oops - bad syscall", regs, n);
239	return regs->uregs[0];
240}
241
242void __pte_error(const char *file, int line, unsigned long val)
243{
244	pr_emerg("%s:%d: bad pte %08lx.\n", file, line, val);
245}
246
247void __pmd_error(const char *file, int line, unsigned long val)
248{
249	pr_emerg("%s:%d: bad pmd %08lx.\n", file, line, val);
250}
251
252void __pgd_error(const char *file, int line, unsigned long val)
253{
254	pr_emerg("%s:%d: bad pgd %08lx.\n", file, line, val);
255}
256
257extern char *exception_vector, *exception_vector_end;
258void __init trap_init(void)
259{
260	return;
261}
262
263void __init early_trap_init(void)
264{
265	unsigned long ivb = 0;
266	unsigned long base = PAGE_OFFSET;
267
268	memcpy((unsigned long *)base, (unsigned long *)&exception_vector,
269	       ((unsigned long)&exception_vector_end -
270		(unsigned long)&exception_vector));
271	ivb = __nds32__mfsr(NDS32_SR_IVB);
272	/* Check platform support. */
273	if (((ivb & IVB_mskNIVIC) >> IVB_offNIVIC) < 2)
274		panic
275		    ("IVIC mode is not allowed on the platform with interrupt controller\n");
276	__nds32__mtsr((ivb & ~IVB_mskESZ) | (IVB_valESZ16 << IVB_offESZ) |
277		      IVB_BASE, NDS32_SR_IVB);
278	__nds32__mtsr(INT_MASK_INITAIAL_VAL, NDS32_SR_INT_MASK);
279
280	/*
281	 * 0x800 = 128 vectors * 16byte.
282	 * It should be enough to flush a page.
283	 */
284	cpu_cache_wbinval_page(base, true);
285}
286
287void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
288		  int error_code, int si_code)
289{
290	struct siginfo info;
291
292	tsk->thread.trap_no = ENTRY_DEBUG_RELATED;
293	tsk->thread.error_code = error_code;
294
295	memset(&info, 0, sizeof(info));
296	info.si_signo = SIGTRAP;
297	info.si_code = si_code;
298	info.si_addr = (void __user *)instruction_pointer(regs);
299	force_sig_info(SIGTRAP, &info, tsk);
300}
301
302void do_debug_trap(unsigned long entry, unsigned long addr,
303		   unsigned long type, struct pt_regs *regs)
304{
305	if (notify_die(DIE_OOPS, "Oops", regs, addr, type, SIGTRAP)
306	    == NOTIFY_STOP)
307		return;
308
309	if (user_mode(regs)) {
310		/* trap_signal */
311		send_sigtrap(current, regs, 0, TRAP_BRKPT);
312	} else {
313		/* kernel_trap */
314		if (!fixup_exception(regs))
315			die("unexpected kernel_trap", regs, 0);
316	}
317}
318
319void unhandled_interruption(struct pt_regs *regs)
320{
321	siginfo_t si;
322	pr_emerg("unhandled_interruption\n");
323	show_regs(regs);
324	if (!user_mode(regs))
325		do_exit(SIGKILL);
326	si.si_signo = SIGKILL;
327	si.si_errno = 0;
328	force_sig_info(SIGKILL, &si, current);
329}
330
331void unhandled_exceptions(unsigned long entry, unsigned long addr,
332			  unsigned long type, struct pt_regs *regs)
333{
334	siginfo_t si;
335	pr_emerg("Unhandled Exception: entry: %lx addr:%lx itype:%lx\n", entry,
336		 addr, type);
337	show_regs(regs);
338	if (!user_mode(regs))
339		do_exit(SIGKILL);
340	si.si_signo = SIGKILL;
341	si.si_errno = 0;
342	si.si_addr = (void *)addr;
343	force_sig_info(SIGKILL, &si, current);
344}
345
346extern int do_page_fault(unsigned long entry, unsigned long addr,
347			 unsigned int error_code, struct pt_regs *regs);
348
349/*
350 * 2:DEF dispatch for TLB MISC exception handler
351*/
352
353void do_dispatch_tlb_misc(unsigned long entry, unsigned long addr,
354			  unsigned long type, struct pt_regs *regs)
355{
356	type = type & (ITYPE_mskINST | ITYPE_mskETYPE);
357	if ((type & ITYPE_mskETYPE) < 5) {
358		/* Permission exceptions */
359		do_page_fault(entry, addr, type, regs);
360	} else
361		unhandled_exceptions(entry, addr, type, regs);
362}
363
364void do_revinsn(struct pt_regs *regs)
365{
366	siginfo_t si;
367	pr_emerg("Reserved Instruction\n");
368	show_regs(regs);
369	if (!user_mode(regs))
370		do_exit(SIGILL);
371	si.si_signo = SIGILL;
372	si.si_errno = 0;
373	force_sig_info(SIGILL, &si, current);
374}
375
376#ifdef CONFIG_ALIGNMENT_TRAP
377extern int unalign_access_mode;
378extern int do_unaligned_access(unsigned long addr, struct pt_regs *regs);
379#endif
380void do_dispatch_general(unsigned long entry, unsigned long addr,
381			 unsigned long itype, struct pt_regs *regs,
382			 unsigned long oipc)
383{
384	unsigned int swid = itype >> ITYPE_offSWID;
385	unsigned long type = itype & (ITYPE_mskINST | ITYPE_mskETYPE);
386	if (type == ETYPE_ALIGNMENT_CHECK) {
387#ifdef CONFIG_ALIGNMENT_TRAP
388		/* Alignment check */
389		if (user_mode(regs) && unalign_access_mode) {
390			int ret;
391			ret = do_unaligned_access(addr, regs);
392
393			if (ret == 0)
394				return;
395
396			if (ret == -EFAULT)
397				pr_emerg
398				    ("Unhandled unaligned access exception\n");
399		}
400#endif
401		do_page_fault(entry, addr, type, regs);
402	} else if (type == ETYPE_RESERVED_INSTRUCTION) {
403		/* Reserved instruction */
404		do_revinsn(regs);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
405	} else if (type == ETYPE_TRAP && swid == SWID_RAISE_INTERRUPT_LEVEL) {
406		/* trap, used on v3 EDM target debugging workaround */
407		/*
408		 * DIPC(OIPC) is passed as parameter before
409		 * interrupt is enabled, so the DIPC will not be corrupted
410		 * even though interrupts are coming in
411		 */
412		/*
413		 * 1. update ipc
414		 * 2. update pt_regs ipc with oipc
415		 * 3. update pt_regs ipsw (clear DEX)
416		 */
417		__asm__ volatile ("mtsr %0, $IPC\n\t"::"r" (oipc));
418		regs->ipc = oipc;
419		if (regs->pipsw & PSW_mskDEX) {
420			pr_emerg
421			    ("Nested Debug exception is possibly happened\n");
422			pr_emerg("ipc:%08x pipc:%08x\n",
423				 (unsigned int)regs->ipc,
424				 (unsigned int)regs->pipc);
425		}
426		do_debug_trap(entry, addr, itype, regs);
427		regs->ipsw &= ~PSW_mskDEX;
428	} else
429		unhandled_exceptions(entry, addr, type, regs);
430}