Loading...
1/*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15#include <linux/sched.h>
16#include <linux/kernel.h>
17#include <linux/kprobes.h>
18#include <linux/module.h>
19#include <linux/pfn.h>
20#include <linux/kallsyms.h>
21#include <linux/stacktrace.h>
22#include <linux/uaccess.h>
23#include <linux/mmzone.h>
24#include <asm/backtrace.h>
25#include <asm/page.h>
26#include <asm/tlbflush.h>
27#include <asm/ucontext.h>
28#include <asm/sigframe.h>
29#include <asm/stack.h>
30#include <arch/abi.h>
31#include <arch/interrupts.h>
32
33#define KBT_ONGOING 0 /* Backtrace still ongoing */
34#define KBT_DONE 1 /* Backtrace cleanly completed */
35#define KBT_RUNNING 2 /* Can't run backtrace on a running task */
36#define KBT_LOOP 3 /* Backtrace entered a loop */
37
38/* Is address on the specified kernel stack? */
39static int in_kernel_stack(struct KBacktraceIterator *kbt, unsigned long sp)
40{
41 ulong kstack_base = (ulong) kbt->task->stack;
42 if (kstack_base == 0) /* corrupt task pointer; just follow stack... */
43 return sp >= PAGE_OFFSET && sp < (unsigned long)high_memory;
44 return sp >= kstack_base && sp < kstack_base + THREAD_SIZE;
45}
46
47/* Is address valid for reading? */
48static int valid_address(struct KBacktraceIterator *kbt, unsigned long address)
49{
50 HV_PTE *l1_pgtable = kbt->pgtable;
51 HV_PTE *l2_pgtable;
52 unsigned long pfn;
53 HV_PTE pte;
54 struct page *page;
55
56 if (l1_pgtable == NULL)
57 return 0; /* can't read user space in other tasks */
58
59#ifdef CONFIG_64BIT
60 /* Find the real l1_pgtable by looking in the l0_pgtable. */
61 pte = l1_pgtable[HV_L0_INDEX(address)];
62 if (!hv_pte_get_present(pte))
63 return 0;
64 pfn = hv_pte_get_pfn(pte);
65 if (pte_huge(pte)) {
66 if (!pfn_valid(pfn)) {
67 pr_err("L0 huge page has bad pfn %#lx\n", pfn);
68 return 0;
69 }
70 return hv_pte_get_present(pte) && hv_pte_get_readable(pte);
71 }
72 page = pfn_to_page(pfn);
73 BUG_ON(PageHighMem(page)); /* No HIGHMEM on 64-bit. */
74 l1_pgtable = (HV_PTE *)pfn_to_kaddr(pfn);
75#endif
76 pte = l1_pgtable[HV_L1_INDEX(address)];
77 if (!hv_pte_get_present(pte))
78 return 0;
79 pfn = hv_pte_get_pfn(pte);
80 if (pte_huge(pte)) {
81 if (!pfn_valid(pfn)) {
82 pr_err("huge page has bad pfn %#lx\n", pfn);
83 return 0;
84 }
85 return hv_pte_get_present(pte) && hv_pte_get_readable(pte);
86 }
87
88 page = pfn_to_page(pfn);
89 if (PageHighMem(page)) {
90 pr_err("L2 page table not in LOWMEM (%#llx)\n",
91 HV_PFN_TO_CPA(pfn));
92 return 0;
93 }
94 l2_pgtable = (HV_PTE *)pfn_to_kaddr(pfn);
95 pte = l2_pgtable[HV_L2_INDEX(address)];
96 return hv_pte_get_present(pte) && hv_pte_get_readable(pte);
97}
98
99/* Callback for backtracer; basically a glorified memcpy */
100static bool read_memory_func(void *result, unsigned long address,
101 unsigned int size, void *vkbt)
102{
103 int retval;
104 struct KBacktraceIterator *kbt = (struct KBacktraceIterator *)vkbt;
105 if (__kernel_text_address(address)) {
106 /* OK to read kernel code. */
107 } else if (address >= PAGE_OFFSET) {
108 /* We only tolerate kernel-space reads of this task's stack */
109 if (!in_kernel_stack(kbt, address))
110 return 0;
111 } else if (!valid_address(kbt, address)) {
112 return 0; /* invalid user-space address */
113 }
114 pagefault_disable();
115 retval = __copy_from_user_inatomic(result,
116 (void __user __force *)address,
117 size);
118 pagefault_enable();
119 return (retval == 0);
120}
121
122/* Return a pt_regs pointer for a valid fault handler frame */
123static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt)
124{
125 const char *fault = NULL; /* happy compiler */
126 char fault_buf[64];
127 unsigned long sp = kbt->it.sp;
128 struct pt_regs *p;
129
130 if (!in_kernel_stack(kbt, sp))
131 return NULL;
132 if (!in_kernel_stack(kbt, sp + C_ABI_SAVE_AREA_SIZE + PTREGS_SIZE-1))
133 return NULL;
134 p = (struct pt_regs *)(sp + C_ABI_SAVE_AREA_SIZE);
135 if (p->faultnum == INT_SWINT_1 || p->faultnum == INT_SWINT_1_SIGRETURN)
136 fault = "syscall";
137 else {
138 if (kbt->verbose) { /* else we aren't going to use it */
139 snprintf(fault_buf, sizeof(fault_buf),
140 "interrupt %ld", p->faultnum);
141 fault = fault_buf;
142 }
143 }
144 if (EX1_PL(p->ex1) == KERNEL_PL &&
145 __kernel_text_address(p->pc) &&
146 in_kernel_stack(kbt, p->sp) &&
147 p->sp >= sp) {
148 if (kbt->verbose)
149 pr_err(" <%s while in kernel mode>\n", fault);
150 } else if (EX1_PL(p->ex1) == USER_PL &&
151 p->pc < PAGE_OFFSET &&
152 p->sp < PAGE_OFFSET) {
153 if (kbt->verbose)
154 pr_err(" <%s while in user mode>\n", fault);
155 } else if (kbt->verbose) {
156 pr_err(" (odd fault: pc %#lx, sp %#lx, ex1 %#lx?)\n",
157 p->pc, p->sp, p->ex1);
158 p = NULL;
159 }
160 if (!kbt->profile || (INT_MASK(p->faultnum) & QUEUED_INTERRUPTS) == 0)
161 return p;
162 return NULL;
163}
164
165/* Is the pc pointing to a sigreturn trampoline? */
166static int is_sigreturn(unsigned long pc)
167{
168 return (pc == VDSO_BASE);
169}
170
171/* Return a pt_regs pointer for a valid signal handler frame */
172static struct pt_regs *valid_sigframe(struct KBacktraceIterator* kbt)
173{
174 BacktraceIterator *b = &kbt->it;
175
176 if (b->pc == VDSO_BASE) {
177 struct rt_sigframe *frame;
178 unsigned long sigframe_top =
179 b->sp + sizeof(struct rt_sigframe) - 1;
180 if (!valid_address(kbt, b->sp) ||
181 !valid_address(kbt, sigframe_top)) {
182 if (kbt->verbose)
183 pr_err(" (odd signal: sp %#lx?)\n",
184 (unsigned long)(b->sp));
185 return NULL;
186 }
187 frame = (struct rt_sigframe *)b->sp;
188 if (kbt->verbose) {
189 pr_err(" <received signal %d>\n",
190 frame->info.si_signo);
191 }
192 return (struct pt_regs *)&frame->uc.uc_mcontext;
193 }
194 return NULL;
195}
196
197static int KBacktraceIterator_is_sigreturn(struct KBacktraceIterator *kbt)
198{
199 return is_sigreturn(kbt->it.pc);
200}
201
202static int KBacktraceIterator_restart(struct KBacktraceIterator *kbt)
203{
204 struct pt_regs *p;
205
206 p = valid_fault_handler(kbt);
207 if (p == NULL)
208 p = valid_sigframe(kbt);
209 if (p == NULL)
210 return 0;
211 backtrace_init(&kbt->it, read_memory_func, kbt,
212 p->pc, p->lr, p->sp, p->regs[52]);
213 kbt->new_context = 1;
214 return 1;
215}
216
217/* Find a frame that isn't a sigreturn, if there is one. */
218static int KBacktraceIterator_next_item_inclusive(
219 struct KBacktraceIterator *kbt)
220{
221 for (;;) {
222 do {
223 if (!KBacktraceIterator_is_sigreturn(kbt))
224 return KBT_ONGOING;
225 } while (backtrace_next(&kbt->it));
226
227 if (!KBacktraceIterator_restart(kbt))
228 return KBT_DONE;
229 }
230}
231
232/*
233 * If the current sp is on a page different than what we recorded
234 * as the top-of-kernel-stack last time we context switched, we have
235 * probably blown the stack, and nothing is going to work out well.
236 * If we can at least get out a warning, that may help the debug,
237 * though we probably won't be able to backtrace into the code that
238 * actually did the recursive damage.
239 */
240static void validate_stack(struct pt_regs *regs)
241{
242 int cpu = smp_processor_id();
243 unsigned long ksp0 = get_current_ksp0();
244 unsigned long ksp0_base = ksp0 - THREAD_SIZE;
245 unsigned long sp = stack_pointer;
246
247 if (EX1_PL(regs->ex1) == KERNEL_PL && regs->sp >= ksp0) {
248 pr_err("WARNING: cpu %d: kernel stack page %#lx underrun!\n"
249 " sp %#lx (%#lx in caller), caller pc %#lx, lr %#lx\n",
250 cpu, ksp0_base, sp, regs->sp, regs->pc, regs->lr);
251 }
252
253 else if (sp < ksp0_base + sizeof(struct thread_info)) {
254 pr_err("WARNING: cpu %d: kernel stack page %#lx overrun!\n"
255 " sp %#lx (%#lx in caller), caller pc %#lx, lr %#lx\n",
256 cpu, ksp0_base, sp, regs->sp, regs->pc, regs->lr);
257 }
258}
259
260void KBacktraceIterator_init(struct KBacktraceIterator *kbt,
261 struct task_struct *t, struct pt_regs *regs)
262{
263 unsigned long pc, lr, sp, r52;
264 int is_current;
265
266 /*
267 * Set up callback information. We grab the kernel stack base
268 * so we will allow reads of that address range, and if we're
269 * asking about the current process we grab the page table
270 * so we can check user accesses before trying to read them.
271 * We flush the TLB to avoid any weird skew issues.
272 */
273 is_current = (t == NULL);
274 kbt->is_current = is_current;
275 if (is_current)
276 t = validate_current();
277 kbt->task = t;
278 kbt->pgtable = NULL;
279 kbt->verbose = 0; /* override in caller if desired */
280 kbt->profile = 0; /* override in caller if desired */
281 kbt->end = KBT_ONGOING;
282 kbt->new_context = 0;
283 if (is_current) {
284 HV_PhysAddr pgdir_pa = hv_inquire_context().page_table;
285 if (pgdir_pa == (unsigned long)swapper_pg_dir - PAGE_OFFSET) {
286 /*
287 * Not just an optimization: this also allows
288 * this to work at all before va/pa mappings
289 * are set up.
290 */
291 kbt->pgtable = swapper_pg_dir;
292 } else {
293 struct page *page = pfn_to_page(PFN_DOWN(pgdir_pa));
294 if (!PageHighMem(page))
295 kbt->pgtable = __va(pgdir_pa);
296 else
297 pr_err("page table not in LOWMEM"
298 " (%#llx)\n", pgdir_pa);
299 }
300 local_flush_tlb_all();
301 validate_stack(regs);
302 }
303
304 if (regs == NULL) {
305 if (is_current || t->state == TASK_RUNNING) {
306 /* Can't do this; we need registers */
307 kbt->end = KBT_RUNNING;
308 return;
309 }
310 pc = get_switch_to_pc();
311 lr = t->thread.pc;
312 sp = t->thread.ksp;
313 r52 = 0;
314 } else {
315 pc = regs->pc;
316 lr = regs->lr;
317 sp = regs->sp;
318 r52 = regs->regs[52];
319 }
320
321 backtrace_init(&kbt->it, read_memory_func, kbt, pc, lr, sp, r52);
322 kbt->end = KBacktraceIterator_next_item_inclusive(kbt);
323}
324EXPORT_SYMBOL(KBacktraceIterator_init);
325
326int KBacktraceIterator_end(struct KBacktraceIterator *kbt)
327{
328 return kbt->end != KBT_ONGOING;
329}
330EXPORT_SYMBOL(KBacktraceIterator_end);
331
332void KBacktraceIterator_next(struct KBacktraceIterator *kbt)
333{
334 unsigned long old_pc = kbt->it.pc, old_sp = kbt->it.sp;
335 kbt->new_context = 0;
336 if (!backtrace_next(&kbt->it) && !KBacktraceIterator_restart(kbt)) {
337 kbt->end = KBT_DONE;
338 return;
339 }
340 kbt->end = KBacktraceIterator_next_item_inclusive(kbt);
341 if (old_pc == kbt->it.pc && old_sp == kbt->it.sp) {
342 /* Trapped in a loop; give up. */
343 kbt->end = KBT_LOOP;
344 }
345}
346EXPORT_SYMBOL(KBacktraceIterator_next);
347
348/*
349 * This method wraps the backtracer's more generic support.
350 * It is only invoked from the architecture-specific code; show_stack()
351 * and dump_stack() (in entry.S) are architecture-independent entry points.
352 */
353void tile_show_stack(struct KBacktraceIterator *kbt, int headers)
354{
355 int i;
356
357 if (headers) {
358 /*
359 * Add a blank line since if we are called from panic(),
360 * then bust_spinlocks() spit out a space in front of us
361 * and it will mess up our KERN_ERR.
362 */
363 pr_err("\n");
364 pr_err("Starting stack dump of tid %d, pid %d (%s)"
365 " on cpu %d at cycle %lld\n",
366 kbt->task->pid, kbt->task->tgid, kbt->task->comm,
367 smp_processor_id(), get_cycles());
368 }
369 kbt->verbose = 1;
370 i = 0;
371 for (; !KBacktraceIterator_end(kbt); KBacktraceIterator_next(kbt)) {
372 char *modname;
373 const char *name;
374 unsigned long address = kbt->it.pc;
375 unsigned long offset, size;
376 char namebuf[KSYM_NAME_LEN+100];
377
378 if (address >= PAGE_OFFSET)
379 name = kallsyms_lookup(address, &size, &offset,
380 &modname, namebuf);
381 else
382 name = NULL;
383
384 if (!name)
385 namebuf[0] = '\0';
386 else {
387 size_t namelen = strlen(namebuf);
388 size_t remaining = (sizeof(namebuf) - 1) - namelen;
389 char *p = namebuf + namelen;
390 int rc = snprintf(p, remaining, "+%#lx/%#lx ",
391 offset, size);
392 if (modname && rc < remaining)
393 snprintf(p + rc, remaining - rc,
394 "[%s] ", modname);
395 namebuf[sizeof(namebuf)-1] = '\0';
396 }
397
398 pr_err(" frame %d: 0x%lx %s(sp 0x%lx)\n",
399 i++, address, namebuf, (unsigned long)(kbt->it.sp));
400
401 if (i >= 100) {
402 pr_err("Stack dump truncated"
403 " (%d frames)\n", i);
404 break;
405 }
406 }
407 if (kbt->end == KBT_LOOP)
408 pr_err("Stack dump stopped; next frame identical to this one\n");
409 if (headers)
410 pr_err("Stack dump complete\n");
411}
412EXPORT_SYMBOL(tile_show_stack);
413
414
415/* This is called from show_regs() and _dump_stack() */
416void dump_stack_regs(struct pt_regs *regs)
417{
418 struct KBacktraceIterator kbt;
419 KBacktraceIterator_init(&kbt, NULL, regs);
420 tile_show_stack(&kbt, 1);
421}
422EXPORT_SYMBOL(dump_stack_regs);
423
424static struct pt_regs *regs_to_pt_regs(struct pt_regs *regs,
425 ulong pc, ulong lr, ulong sp, ulong r52)
426{
427 memset(regs, 0, sizeof(struct pt_regs));
428 regs->pc = pc;
429 regs->lr = lr;
430 regs->sp = sp;
431 regs->regs[52] = r52;
432 return regs;
433}
434
435/* This is called from dump_stack() and just converts to pt_regs */
436void _dump_stack(int dummy, ulong pc, ulong lr, ulong sp, ulong r52)
437{
438 struct pt_regs regs;
439 dump_stack_regs(regs_to_pt_regs(®s, pc, lr, sp, r52));
440}
441
442/* This is called from KBacktraceIterator_init_current() */
443void _KBacktraceIterator_init_current(struct KBacktraceIterator *kbt, ulong pc,
444 ulong lr, ulong sp, ulong r52)
445{
446 struct pt_regs regs;
447 KBacktraceIterator_init(kbt, NULL,
448 regs_to_pt_regs(®s, pc, lr, sp, r52));
449}
450
451/* This is called only from kernel/sched.c, with esp == NULL */
452void show_stack(struct task_struct *task, unsigned long *esp)
453{
454 struct KBacktraceIterator kbt;
455 if (task == NULL || task == current)
456 KBacktraceIterator_init_current(&kbt);
457 else
458 KBacktraceIterator_init(&kbt, task, NULL);
459 tile_show_stack(&kbt, 0);
460}
461
462#ifdef CONFIG_STACKTRACE
463
464/* Support generic Linux stack API too */
465
466void save_stack_trace_tsk(struct task_struct *task, struct stack_trace *trace)
467{
468 struct KBacktraceIterator kbt;
469 int skip = trace->skip;
470 int i = 0;
471
472 if (task == NULL || task == current)
473 KBacktraceIterator_init_current(&kbt);
474 else
475 KBacktraceIterator_init(&kbt, task, NULL);
476 for (; !KBacktraceIterator_end(&kbt); KBacktraceIterator_next(&kbt)) {
477 if (skip) {
478 --skip;
479 continue;
480 }
481 if (i >= trace->max_entries || kbt.it.pc < PAGE_OFFSET)
482 break;
483 trace->entries[i++] = kbt.it.pc;
484 }
485 trace->nr_entries = i;
486}
487EXPORT_SYMBOL(save_stack_trace_tsk);
488
489void save_stack_trace(struct stack_trace *trace)
490{
491 save_stack_trace_tsk(NULL, trace);
492}
493
494#endif
495
496/* In entry.S */
497EXPORT_SYMBOL(KBacktraceIterator_init_current);
1/*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15#include <linux/sched.h>
16#include <linux/kernel.h>
17#include <linux/kprobes.h>
18#include <linux/module.h>
19#include <linux/pfn.h>
20#include <linux/kallsyms.h>
21#include <linux/stacktrace.h>
22#include <linux/uaccess.h>
23#include <linux/mmzone.h>
24#include <linux/dcache.h>
25#include <linux/fs.h>
26#include <linux/hardirq.h>
27#include <linux/string.h>
28#include <asm/backtrace.h>
29#include <asm/page.h>
30#include <asm/ucontext.h>
31#include <asm/switch_to.h>
32#include <asm/sigframe.h>
33#include <asm/stack.h>
34#include <asm/vdso.h>
35#include <arch/abi.h>
36#include <arch/interrupts.h>
37
38#define KBT_ONGOING 0 /* Backtrace still ongoing */
39#define KBT_DONE 1 /* Backtrace cleanly completed */
40#define KBT_RUNNING 2 /* Can't run backtrace on a running task */
41#define KBT_LOOP 3 /* Backtrace entered a loop */
42
43/* Is address on the specified kernel stack? */
44static int in_kernel_stack(struct KBacktraceIterator *kbt, unsigned long sp)
45{
46 ulong kstack_base = (ulong) kbt->task->stack;
47 if (kstack_base == 0) /* corrupt task pointer; just follow stack... */
48 return sp >= PAGE_OFFSET && sp < (unsigned long)high_memory;
49 return sp >= kstack_base && sp < kstack_base + THREAD_SIZE;
50}
51
52/* Callback for backtracer; basically a glorified memcpy */
53static bool read_memory_func(void *result, unsigned long address,
54 unsigned int size, void *vkbt)
55{
56 int retval;
57 struct KBacktraceIterator *kbt = (struct KBacktraceIterator *)vkbt;
58
59 if (address == 0)
60 return 0;
61 if (__kernel_text_address(address)) {
62 /* OK to read kernel code. */
63 } else if (address >= PAGE_OFFSET) {
64 /* We only tolerate kernel-space reads of this task's stack */
65 if (!in_kernel_stack(kbt, address))
66 return 0;
67 } else if (!kbt->is_current) {
68 return 0; /* can't read from other user address spaces */
69 }
70 pagefault_disable();
71 retval = __copy_from_user_inatomic(result,
72 (void __user __force *)address,
73 size);
74 pagefault_enable();
75 return (retval == 0);
76}
77
78/* Return a pt_regs pointer for a valid fault handler frame */
79static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt)
80{
81 char fault[64];
82 unsigned long sp = kbt->it.sp;
83 struct pt_regs *p;
84
85 if (sp % sizeof(long) != 0)
86 return NULL;
87 if (!in_kernel_stack(kbt, sp))
88 return NULL;
89 if (!in_kernel_stack(kbt, sp + C_ABI_SAVE_AREA_SIZE + PTREGS_SIZE-1))
90 return NULL;
91 p = (struct pt_regs *)(sp + C_ABI_SAVE_AREA_SIZE);
92 if (kbt->verbose) { /* else we aren't going to use it */
93 if (p->faultnum == INT_SWINT_1 ||
94 p->faultnum == INT_SWINT_1_SIGRETURN)
95 snprintf(fault, sizeof(fault),
96 "syscall %ld", p->regs[TREG_SYSCALL_NR]);
97 else
98 snprintf(fault, sizeof(fault),
99 "interrupt %ld", p->faultnum);
100 }
101 if (EX1_PL(p->ex1) == KERNEL_PL &&
102 __kernel_text_address(p->pc) &&
103 in_kernel_stack(kbt, p->sp) &&
104 p->sp >= sp) {
105 if (kbt->verbose)
106 pr_err(" <%s while in kernel mode>\n", fault);
107 } else if (user_mode(p) &&
108 p->sp < PAGE_OFFSET && p->sp != 0) {
109 if (kbt->verbose)
110 pr_err(" <%s while in user mode>\n", fault);
111 } else {
112 if (kbt->verbose && (p->pc != 0 || p->sp != 0 || p->ex1 != 0))
113 pr_err(" (odd fault: pc %#lx, sp %#lx, ex1 %#lx?)\n",
114 p->pc, p->sp, p->ex1);
115 return NULL;
116 }
117 if (kbt->profile && ((1ULL << p->faultnum) & QUEUED_INTERRUPTS) != 0)
118 return NULL;
119 return p;
120}
121
122/* Is the iterator pointing to a sigreturn trampoline? */
123static int is_sigreturn(struct KBacktraceIterator *kbt)
124{
125 return kbt->task->mm &&
126 (kbt->it.pc == ((ulong)kbt->task->mm->context.vdso_base +
127 (ulong)&__vdso_rt_sigreturn));
128}
129
130/* Return a pt_regs pointer for a valid signal handler frame */
131static struct pt_regs *valid_sigframe(struct KBacktraceIterator* kbt,
132 struct rt_sigframe* kframe)
133{
134 BacktraceIterator *b = &kbt->it;
135
136 if (is_sigreturn(kbt) && b->sp < PAGE_OFFSET &&
137 b->sp % sizeof(long) == 0) {
138 int retval;
139 pagefault_disable();
140 retval = __copy_from_user_inatomic(
141 kframe, (void __user __force *)b->sp,
142 sizeof(*kframe));
143 pagefault_enable();
144 if (retval != 0 ||
145 (unsigned int)(kframe->info.si_signo) >= _NSIG)
146 return NULL;
147 if (kbt->verbose) {
148 pr_err(" <received signal %d>\n",
149 kframe->info.si_signo);
150 }
151 return (struct pt_regs *)&kframe->uc.uc_mcontext;
152 }
153 return NULL;
154}
155
156static int KBacktraceIterator_restart(struct KBacktraceIterator *kbt)
157{
158 struct pt_regs *p;
159 struct rt_sigframe kframe;
160
161 p = valid_fault_handler(kbt);
162 if (p == NULL)
163 p = valid_sigframe(kbt, &kframe);
164 if (p == NULL)
165 return 0;
166 backtrace_init(&kbt->it, read_memory_func, kbt,
167 p->pc, p->lr, p->sp, p->regs[52]);
168 kbt->new_context = 1;
169 return 1;
170}
171
172/* Find a frame that isn't a sigreturn, if there is one. */
173static int KBacktraceIterator_next_item_inclusive(
174 struct KBacktraceIterator *kbt)
175{
176 for (;;) {
177 do {
178 if (!is_sigreturn(kbt))
179 return KBT_ONGOING;
180 } while (backtrace_next(&kbt->it));
181
182 if (!KBacktraceIterator_restart(kbt))
183 return KBT_DONE;
184 }
185}
186
187/*
188 * If the current sp is on a page different than what we recorded
189 * as the top-of-kernel-stack last time we context switched, we have
190 * probably blown the stack, and nothing is going to work out well.
191 * If we can at least get out a warning, that may help the debug,
192 * though we probably won't be able to backtrace into the code that
193 * actually did the recursive damage.
194 */
195static void validate_stack(struct pt_regs *regs)
196{
197 int cpu = raw_smp_processor_id();
198 unsigned long ksp0 = get_current_ksp0();
199 unsigned long ksp0_base = ksp0 & -THREAD_SIZE;
200 unsigned long sp = stack_pointer;
201
202 if (EX1_PL(regs->ex1) == KERNEL_PL && regs->sp >= ksp0) {
203 pr_err("WARNING: cpu %d: kernel stack %#lx..%#lx underrun!\n"
204 " sp %#lx (%#lx in caller), caller pc %#lx, lr %#lx\n",
205 cpu, ksp0_base, ksp0, sp, regs->sp, regs->pc, regs->lr);
206 }
207
208 else if (sp < ksp0_base + sizeof(struct thread_info)) {
209 pr_err("WARNING: cpu %d: kernel stack %#lx..%#lx overrun!\n"
210 " sp %#lx (%#lx in caller), caller pc %#lx, lr %#lx\n",
211 cpu, ksp0_base, ksp0, sp, regs->sp, regs->pc, regs->lr);
212 }
213}
214
215void KBacktraceIterator_init(struct KBacktraceIterator *kbt,
216 struct task_struct *t, struct pt_regs *regs)
217{
218 unsigned long pc, lr, sp, r52;
219 int is_current;
220
221 /*
222 * Set up callback information. We grab the kernel stack base
223 * so we will allow reads of that address range.
224 */
225 is_current = (t == NULL || t == current);
226 kbt->is_current = is_current;
227 if (is_current)
228 t = validate_current();
229 kbt->task = t;
230 kbt->verbose = 0; /* override in caller if desired */
231 kbt->profile = 0; /* override in caller if desired */
232 kbt->end = KBT_ONGOING;
233 kbt->new_context = 1;
234 if (is_current)
235 validate_stack(regs);
236
237 if (regs == NULL) {
238 if (is_current || t->state == TASK_RUNNING) {
239 /* Can't do this; we need registers */
240 kbt->end = KBT_RUNNING;
241 return;
242 }
243 pc = get_switch_to_pc();
244 lr = t->thread.pc;
245 sp = t->thread.ksp;
246 r52 = 0;
247 } else {
248 pc = regs->pc;
249 lr = regs->lr;
250 sp = regs->sp;
251 r52 = regs->regs[52];
252 }
253
254 backtrace_init(&kbt->it, read_memory_func, kbt, pc, lr, sp, r52);
255 kbt->end = KBacktraceIterator_next_item_inclusive(kbt);
256}
257EXPORT_SYMBOL(KBacktraceIterator_init);
258
259int KBacktraceIterator_end(struct KBacktraceIterator *kbt)
260{
261 return kbt->end != KBT_ONGOING;
262}
263EXPORT_SYMBOL(KBacktraceIterator_end);
264
265void KBacktraceIterator_next(struct KBacktraceIterator *kbt)
266{
267 unsigned long old_pc = kbt->it.pc, old_sp = kbt->it.sp;
268 kbt->new_context = 0;
269 if (!backtrace_next(&kbt->it) && !KBacktraceIterator_restart(kbt)) {
270 kbt->end = KBT_DONE;
271 return;
272 }
273 kbt->end = KBacktraceIterator_next_item_inclusive(kbt);
274 if (old_pc == kbt->it.pc && old_sp == kbt->it.sp) {
275 /* Trapped in a loop; give up. */
276 kbt->end = KBT_LOOP;
277 }
278}
279EXPORT_SYMBOL(KBacktraceIterator_next);
280
281static void describe_addr(struct KBacktraceIterator *kbt,
282 unsigned long address,
283 int have_mmap_sem, char *buf, size_t bufsize)
284{
285 struct vm_area_struct *vma;
286 size_t namelen, remaining;
287 unsigned long size, offset, adjust;
288 char *p, *modname;
289 const char *name;
290 int rc;
291
292 /*
293 * Look one byte back for every caller frame (i.e. those that
294 * aren't a new context) so we look up symbol data for the
295 * call itself, not the following instruction, which may be on
296 * a different line (or in a different function).
297 */
298 adjust = !kbt->new_context;
299 address -= adjust;
300
301 if (address >= PAGE_OFFSET) {
302 /* Handle kernel symbols. */
303 BUG_ON(bufsize < KSYM_NAME_LEN);
304 name = kallsyms_lookup(address, &size, &offset,
305 &modname, buf);
306 if (name == NULL) {
307 buf[0] = '\0';
308 return;
309 }
310 namelen = strlen(buf);
311 remaining = (bufsize - 1) - namelen;
312 p = buf + namelen;
313 rc = snprintf(p, remaining, "+%#lx/%#lx ",
314 offset + adjust, size);
315 if (modname && rc < remaining)
316 snprintf(p + rc, remaining - rc, "[%s] ", modname);
317 buf[bufsize-1] = '\0';
318 return;
319 }
320
321 /* If we don't have the mmap_sem, we can't show any more info. */
322 buf[0] = '\0';
323 if (!have_mmap_sem)
324 return;
325
326 /* Find vma info. */
327 vma = find_vma(kbt->task->mm, address);
328 if (vma == NULL || address < vma->vm_start) {
329 snprintf(buf, bufsize, "[unmapped address] ");
330 return;
331 }
332
333 if (vma->vm_file) {
334 p = file_path(vma->vm_file, buf, bufsize);
335 if (IS_ERR(p))
336 p = "?";
337 name = kbasename(p);
338 } else {
339 name = "anon";
340 }
341
342 /* Generate a string description of the vma info. */
343 namelen = strlen(name);
344 remaining = (bufsize - 1) - namelen;
345 memmove(buf, name, namelen);
346 snprintf(buf + namelen, remaining, "[%lx+%lx] ",
347 vma->vm_start, vma->vm_end - vma->vm_start);
348}
349
350/*
351 * Avoid possible crash recursion during backtrace. If it happens, it
352 * makes it easy to lose the actual root cause of the failure, so we
353 * put a simple guard on all the backtrace loops.
354 */
355static bool start_backtrace(void)
356{
357 if (current_thread_info()->in_backtrace) {
358 pr_err("Backtrace requested while in backtrace!\n");
359 return false;
360 }
361 current_thread_info()->in_backtrace = true;
362 return true;
363}
364
365static void end_backtrace(void)
366{
367 current_thread_info()->in_backtrace = false;
368}
369
370/*
371 * This method wraps the backtracer's more generic support.
372 * It is only invoked from the architecture-specific code; show_stack()
373 * and dump_stack() are architecture-independent entry points.
374 */
375void tile_show_stack(struct KBacktraceIterator *kbt)
376{
377 int i;
378 int have_mmap_sem = 0;
379
380 if (!start_backtrace())
381 return;
382 kbt->verbose = 1;
383 i = 0;
384 for (; !KBacktraceIterator_end(kbt); KBacktraceIterator_next(kbt)) {
385 char namebuf[KSYM_NAME_LEN+100];
386 unsigned long address = kbt->it.pc;
387
388 /*
389 * Try to acquire the mmap_sem as we pass into userspace.
390 * If we're in an interrupt context, don't even try, since
391 * it's not safe to call e.g. d_path() from an interrupt,
392 * since it uses spin locks without disabling interrupts.
393 * Note we test "kbt->task == current", not "kbt->is_current",
394 * since we're checking that "current" will work in d_path().
395 */
396 if (kbt->task == current && address < PAGE_OFFSET &&
397 !have_mmap_sem && kbt->task->mm && !in_interrupt()) {
398 have_mmap_sem =
399 down_read_trylock(&kbt->task->mm->mmap_sem);
400 }
401
402 describe_addr(kbt, address, have_mmap_sem,
403 namebuf, sizeof(namebuf));
404
405 pr_err(" frame %d: 0x%lx %s(sp 0x%lx)\n",
406 i++, address, namebuf, (unsigned long)(kbt->it.sp));
407
408 if (i >= 100) {
409 pr_err("Stack dump truncated (%d frames)\n", i);
410 break;
411 }
412 }
413 if (kbt->end == KBT_LOOP)
414 pr_err("Stack dump stopped; next frame identical to this one\n");
415 if (have_mmap_sem)
416 up_read(&kbt->task->mm->mmap_sem);
417 end_backtrace();
418}
419EXPORT_SYMBOL(tile_show_stack);
420
421static struct pt_regs *regs_to_pt_regs(struct pt_regs *regs,
422 ulong pc, ulong lr, ulong sp, ulong r52)
423{
424 memset(regs, 0, sizeof(struct pt_regs));
425 regs->pc = pc;
426 regs->lr = lr;
427 regs->sp = sp;
428 regs->regs[52] = r52;
429 return regs;
430}
431
432/* Deprecated function currently only used by kernel_double_fault(). */
433void _dump_stack(int dummy, ulong pc, ulong lr, ulong sp, ulong r52)
434{
435 struct KBacktraceIterator kbt;
436 struct pt_regs regs;
437
438 regs_to_pt_regs(®s, pc, lr, sp, r52);
439 KBacktraceIterator_init(&kbt, NULL, ®s);
440 tile_show_stack(&kbt);
441}
442
443/* This is called from KBacktraceIterator_init_current() */
444void _KBacktraceIterator_init_current(struct KBacktraceIterator *kbt, ulong pc,
445 ulong lr, ulong sp, ulong r52)
446{
447 struct pt_regs regs;
448 KBacktraceIterator_init(kbt, NULL,
449 regs_to_pt_regs(®s, pc, lr, sp, r52));
450}
451
452/*
453 * Called from sched_show_task() with task != NULL, or dump_stack()
454 * with task == NULL. The esp argument is always NULL.
455 */
456void show_stack(struct task_struct *task, unsigned long *esp)
457{
458 struct KBacktraceIterator kbt;
459 if (task == NULL || task == current) {
460 KBacktraceIterator_init_current(&kbt);
461 KBacktraceIterator_next(&kbt); /* don't show first frame */
462 } else {
463 KBacktraceIterator_init(&kbt, task, NULL);
464 }
465 tile_show_stack(&kbt);
466}
467
468#ifdef CONFIG_STACKTRACE
469
470/* Support generic Linux stack API too */
471
472static void save_stack_trace_common(struct task_struct *task,
473 struct pt_regs *regs,
474 bool user,
475 struct stack_trace *trace)
476{
477 struct KBacktraceIterator kbt;
478 int skip = trace->skip;
479 int i = 0;
480
481 if (!start_backtrace())
482 goto done;
483 if (regs != NULL) {
484 KBacktraceIterator_init(&kbt, NULL, regs);
485 } else if (task == NULL || task == current) {
486 KBacktraceIterator_init_current(&kbt);
487 skip++; /* don't show KBacktraceIterator_init_current */
488 } else {
489 KBacktraceIterator_init(&kbt, task, NULL);
490 }
491 for (; !KBacktraceIterator_end(&kbt); KBacktraceIterator_next(&kbt)) {
492 if (skip) {
493 --skip;
494 continue;
495 }
496 if (i >= trace->max_entries ||
497 (!user && kbt.it.pc < PAGE_OFFSET))
498 break;
499 trace->entries[i++] = kbt.it.pc;
500 }
501 end_backtrace();
502done:
503 if (i < trace->max_entries)
504 trace->entries[i++] = ULONG_MAX;
505 trace->nr_entries = i;
506}
507
508void save_stack_trace_tsk(struct task_struct *task, struct stack_trace *trace)
509{
510 save_stack_trace_common(task, NULL, false, trace);
511}
512EXPORT_SYMBOL(save_stack_trace_tsk);
513
514void save_stack_trace(struct stack_trace *trace)
515{
516 save_stack_trace_common(NULL, NULL, false, trace);
517}
518EXPORT_SYMBOL_GPL(save_stack_trace);
519
520void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
521{
522 save_stack_trace_common(NULL, regs, false, trace);
523}
524
525void save_stack_trace_user(struct stack_trace *trace)
526{
527 /* Trace user stack if we are not a kernel thread. */
528 if (current->mm)
529 save_stack_trace_common(NULL, task_pt_regs(current),
530 true, trace);
531 else if (trace->nr_entries < trace->max_entries)
532 trace->entries[trace->nr_entries++] = ULONG_MAX;
533}
534#endif
535
536/* In entry.S */
537EXPORT_SYMBOL(KBacktraceIterator_init_current);