Loading...
Note: File does not exist in v3.1.
1// SPDX-License-Identifier: GPL-2.0
2// Copyright (C) 2005-2017 Andes Technology Corporation
3
4#include <linux/module.h>
5#include <linux/personality.h>
6#include <linux/kallsyms.h>
7#include <linux/hardirq.h>
8#include <linux/kdebug.h>
9#include <linux/sched/task_stack.h>
10#include <linux/uaccess.h>
11#include <linux/ftrace.h>
12
13#include <asm/proc-fns.h>
14#include <asm/unistd.h>
15#include <asm/fpu.h>
16
17#include <linux/ptrace.h>
18#include <nds32_intrinsic.h>
19
20extern void show_pte(struct mm_struct *mm, unsigned long addr);
21
22/*
23 * Dump out the contents of some memory nicely...
24 */
25void dump_mem(const char *lvl, unsigned long bottom, unsigned long top)
26{
27 unsigned long first;
28 int i;
29
30 pr_emerg("%s(0x%08lx to 0x%08lx)\n", lvl, bottom, top);
31
32 for (first = bottom & ~31; first < top; first += 32) {
33 unsigned long p;
34 char str[sizeof(" 12345678") * 8 + 1];
35
36 memset(str, ' ', sizeof(str));
37 str[sizeof(str) - 1] = '\0';
38
39 for (p = first, i = 0; i < 8 && p < top; i++, p += 4) {
40 if (p >= bottom && p < top) {
41 unsigned long val;
42
43 if (get_kernel_nofault(val,
44 (unsigned long *)p) == 0)
45 sprintf(str + i * 9, " %08lx", val);
46 else
47 sprintf(str + i * 9, " ????????");
48 }
49 }
50 pr_emerg("%s%04lx:%s\n", lvl, first & 0xffff, str);
51 }
52}
53
54EXPORT_SYMBOL(dump_mem);
55
56#define LOOP_TIMES (100)
57static void __dump(struct task_struct *tsk, unsigned long *base_reg,
58 const char *loglvl)
59{
60 unsigned long ret_addr;
61 int cnt = LOOP_TIMES, graph = 0;
62 printk("%sCall Trace:\n", loglvl);
63 if (!IS_ENABLED(CONFIG_FRAME_POINTER)) {
64 while (!kstack_end(base_reg)) {
65 ret_addr = *base_reg++;
66 if (__kernel_text_address(ret_addr)) {
67 ret_addr = ftrace_graph_ret_addr(
68 tsk, &graph, ret_addr, NULL);
69 print_ip_sym(loglvl, ret_addr);
70 }
71 if (--cnt < 0)
72 break;
73 }
74 } else {
75 while (!kstack_end((void *)base_reg) &&
76 !((unsigned long)base_reg & 0x3) &&
77 ((unsigned long)base_reg >= TASK_SIZE)) {
78 unsigned long next_fp;
79 ret_addr = base_reg[LP_OFFSET];
80 next_fp = base_reg[FP_OFFSET];
81 if (__kernel_text_address(ret_addr)) {
82
83 ret_addr = ftrace_graph_ret_addr(
84 tsk, &graph, ret_addr, NULL);
85 print_ip_sym(loglvl, ret_addr);
86 }
87 if (--cnt < 0)
88 break;
89 base_reg = (unsigned long *)next_fp;
90 }
91 }
92 printk("%s\n", loglvl);
93}
94
95void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
96{
97 unsigned long *base_reg;
98
99 if (!tsk)
100 tsk = current;
101 if (!IS_ENABLED(CONFIG_FRAME_POINTER)) {
102 if (tsk != current)
103 base_reg = (unsigned long *)(tsk->thread.cpu_context.sp);
104 else
105 __asm__ __volatile__("\tori\t%0, $sp, #0\n":"=r"(base_reg));
106 } else {
107 if (tsk != current)
108 base_reg = (unsigned long *)(tsk->thread.cpu_context.fp);
109 else
110 __asm__ __volatile__("\tori\t%0, $fp, #0\n":"=r"(base_reg));
111 }
112 __dump(tsk, base_reg, loglvl);
113 barrier();
114}
115
116DEFINE_SPINLOCK(die_lock);
117
118/*
119 * This function is protected against re-entrancy.
120 */
121void die(const char *str, struct pt_regs *regs, int err)
122{
123 struct task_struct *tsk = current;
124 static int die_counter;
125
126 console_verbose();
127 spin_lock_irq(&die_lock);
128 bust_spinlocks(1);
129
130 pr_emerg("Internal error: %s: %x [#%d]\n", str, err, ++die_counter);
131 print_modules();
132 pr_emerg("CPU: %i\n", smp_processor_id());
133 show_regs(regs);
134 pr_emerg("Process %s (pid: %d, stack limit = 0x%p)\n",
135 tsk->comm, tsk->pid, end_of_stack(tsk));
136
137 if (!user_mode(regs) || in_interrupt()) {
138 dump_mem("Stack: ", regs->sp, (regs->sp + PAGE_SIZE) & PAGE_MASK);
139 dump_stack();
140 }
141
142 bust_spinlocks(0);
143 spin_unlock_irq(&die_lock);
144 do_exit(SIGSEGV);
145}
146
147EXPORT_SYMBOL(die);
148
149void die_if_kernel(const char *str, struct pt_regs *regs, int err)
150{
151 if (user_mode(regs))
152 return;
153
154 die(str, regs, err);
155}
156
157int bad_syscall(int n, struct pt_regs *regs)
158{
159 if (current->personality != PER_LINUX) {
160 send_sig(SIGSEGV, current, 1);
161 return regs->uregs[0];
162 }
163
164 force_sig_fault(SIGILL, ILL_ILLTRP,
165 (void __user *)instruction_pointer(regs) - 4);
166 die_if_kernel("Oops - bad syscall", regs, n);
167 return regs->uregs[0];
168}
169
170void __pte_error(const char *file, int line, unsigned long val)
171{
172 pr_emerg("%s:%d: bad pte %08lx.\n", file, line, val);
173}
174
175void __pmd_error(const char *file, int line, unsigned long val)
176{
177 pr_emerg("%s:%d: bad pmd %08lx.\n", file, line, val);
178}
179
180void __pgd_error(const char *file, int line, unsigned long val)
181{
182 pr_emerg("%s:%d: bad pgd %08lx.\n", file, line, val);
183}
184
185extern char *exception_vector, *exception_vector_end;
186void __init trap_init(void)
187{
188 return;
189}
190
191void __init early_trap_init(void)
192{
193 unsigned long ivb = 0;
194 unsigned long base = PAGE_OFFSET;
195
196 memcpy((unsigned long *)base, (unsigned long *)&exception_vector,
197 ((unsigned long)&exception_vector_end -
198 (unsigned long)&exception_vector));
199 ivb = __nds32__mfsr(NDS32_SR_IVB);
200 /* Check platform support. */
201 if (((ivb & IVB_mskNIVIC) >> IVB_offNIVIC) < 2)
202 panic
203 ("IVIC mode is not allowed on the platform with interrupt controller\n");
204 __nds32__mtsr((ivb & ~IVB_mskESZ) | (IVB_valESZ16 << IVB_offESZ) |
205 IVB_BASE, NDS32_SR_IVB);
206 __nds32__mtsr(INT_MASK_INITAIAL_VAL, NDS32_SR_INT_MASK);
207
208 /*
209 * 0x800 = 128 vectors * 16byte.
210 * It should be enough to flush a page.
211 */
212 cpu_cache_wbinval_page(base, true);
213}
214
215static void send_sigtrap(struct pt_regs *regs, int error_code, int si_code)
216{
217 struct task_struct *tsk = current;
218
219 tsk->thread.trap_no = ENTRY_DEBUG_RELATED;
220 tsk->thread.error_code = error_code;
221
222 force_sig_fault(SIGTRAP, si_code,
223 (void __user *)instruction_pointer(regs));
224}
225
226void do_debug_trap(unsigned long entry, unsigned long addr,
227 unsigned long type, struct pt_regs *regs)
228{
229 if (notify_die(DIE_OOPS, "Oops", regs, addr, type, SIGTRAP)
230 == NOTIFY_STOP)
231 return;
232
233 if (user_mode(regs)) {
234 /* trap_signal */
235 send_sigtrap(regs, 0, TRAP_BRKPT);
236 } else {
237 /* kernel_trap */
238 if (!fixup_exception(regs))
239 die("unexpected kernel_trap", regs, 0);
240 }
241}
242
243void unhandled_interruption(struct pt_regs *regs)
244{
245 pr_emerg("unhandled_interruption\n");
246 show_regs(regs);
247 if (!user_mode(regs))
248 do_exit(SIGKILL);
249 force_sig(SIGKILL);
250}
251
252void unhandled_exceptions(unsigned long entry, unsigned long addr,
253 unsigned long type, struct pt_regs *regs)
254{
255 pr_emerg("Unhandled Exception: entry: %lx addr:%lx itype:%lx\n", entry,
256 addr, type);
257 show_regs(regs);
258 if (!user_mode(regs))
259 do_exit(SIGKILL);
260 force_sig(SIGKILL);
261}
262
263extern int do_page_fault(unsigned long entry, unsigned long addr,
264 unsigned int error_code, struct pt_regs *regs);
265
266/*
267 * 2:DEF dispatch for TLB MISC exception handler
268*/
269
270void do_dispatch_tlb_misc(unsigned long entry, unsigned long addr,
271 unsigned long type, struct pt_regs *regs)
272{
273 type = type & (ITYPE_mskINST | ITYPE_mskETYPE);
274 if ((type & ITYPE_mskETYPE) < 5) {
275 /* Permission exceptions */
276 do_page_fault(entry, addr, type, regs);
277 } else
278 unhandled_exceptions(entry, addr, type, regs);
279}
280
281void do_revinsn(struct pt_regs *regs)
282{
283 pr_emerg("Reserved Instruction\n");
284 show_regs(regs);
285 if (!user_mode(regs))
286 do_exit(SIGILL);
287 force_sig(SIGILL);
288}
289
290#ifdef CONFIG_ALIGNMENT_TRAP
291extern int unalign_access_mode;
292extern int do_unaligned_access(unsigned long addr, struct pt_regs *regs);
293#endif
294void do_dispatch_general(unsigned long entry, unsigned long addr,
295 unsigned long itype, struct pt_regs *regs,
296 unsigned long oipc)
297{
298 unsigned int swid = itype >> ITYPE_offSWID;
299 unsigned long type = itype & (ITYPE_mskINST | ITYPE_mskETYPE);
300 if (type == ETYPE_ALIGNMENT_CHECK) {
301#ifdef CONFIG_ALIGNMENT_TRAP
302 /* Alignment check */
303 if (user_mode(regs) && unalign_access_mode) {
304 int ret;
305 ret = do_unaligned_access(addr, regs);
306
307 if (ret == 0)
308 return;
309
310 if (ret == -EFAULT)
311 pr_emerg
312 ("Unhandled unaligned access exception\n");
313 }
314#endif
315 do_page_fault(entry, addr, type, regs);
316 } else if (type == ETYPE_RESERVED_INSTRUCTION) {
317 /* Reserved instruction */
318 do_revinsn(regs);
319 } else if (type == ETYPE_COPROCESSOR) {
320 /* Coprocessor */
321#if IS_ENABLED(CONFIG_FPU)
322 unsigned int fucop_exist = __nds32__mfsr(NDS32_SR_FUCOP_EXIST);
323 unsigned int cpid = ((itype & ITYPE_mskCPID) >> ITYPE_offCPID);
324
325 if ((cpid == FPU_CPID) &&
326 (fucop_exist & FUCOP_EXIST_mskCP0ISFPU)) {
327 unsigned int subtype = (itype & ITYPE_mskSTYPE);
328
329 if (true == do_fpu_exception(subtype, regs))
330 return;
331 }
332#endif
333 unhandled_exceptions(entry, addr, type, regs);
334 } else if (type == ETYPE_TRAP && swid == SWID_RAISE_INTERRUPT_LEVEL) {
335 /* trap, used on v3 EDM target debugging workaround */
336 /*
337 * DIPC(OIPC) is passed as parameter before
338 * interrupt is enabled, so the DIPC will not be corrupted
339 * even though interrupts are coming in
340 */
341 /*
342 * 1. update ipc
343 * 2. update pt_regs ipc with oipc
344 * 3. update pt_regs ipsw (clear DEX)
345 */
346 __asm__ volatile ("mtsr %0, $IPC\n\t"::"r" (oipc));
347 regs->ipc = oipc;
348 if (regs->pipsw & PSW_mskDEX) {
349 pr_emerg
350 ("Nested Debug exception is possibly happened\n");
351 pr_emerg("ipc:%08x pipc:%08x\n",
352 (unsigned int)regs->ipc,
353 (unsigned int)regs->pipc);
354 }
355 do_debug_trap(entry, addr, itype, regs);
356 regs->ipsw &= ~PSW_mskDEX;
357 } else
358 unhandled_exceptions(entry, addr, type, regs);
359}