Loading...
1/*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15#include <linux/sched.h>
16#include <linux/kernel.h>
17#include <linux/kprobes.h>
18#include <linux/module.h>
19#include <linux/pfn.h>
20#include <linux/kallsyms.h>
21#include <linux/stacktrace.h>
22#include <linux/uaccess.h>
23#include <linux/mmzone.h>
24#include <linux/dcache.h>
25#include <linux/fs.h>
26#include <asm/backtrace.h>
27#include <asm/page.h>
28#include <asm/ucontext.h>
29#include <asm/switch_to.h>
30#include <asm/sigframe.h>
31#include <asm/stack.h>
32#include <arch/abi.h>
33#include <arch/interrupts.h>
34
35#define KBT_ONGOING 0 /* Backtrace still ongoing */
36#define KBT_DONE 1 /* Backtrace cleanly completed */
37#define KBT_RUNNING 2 /* Can't run backtrace on a running task */
38#define KBT_LOOP 3 /* Backtrace entered a loop */
39
40/* Is address on the specified kernel stack? */
41static int in_kernel_stack(struct KBacktraceIterator *kbt, unsigned long sp)
42{
43 ulong kstack_base = (ulong) kbt->task->stack;
44 if (kstack_base == 0) /* corrupt task pointer; just follow stack... */
45 return sp >= PAGE_OFFSET && sp < (unsigned long)high_memory;
46 return sp >= kstack_base && sp < kstack_base + THREAD_SIZE;
47}
48
49/* Callback for backtracer; basically a glorified memcpy */
50static bool read_memory_func(void *result, unsigned long address,
51 unsigned int size, void *vkbt)
52{
53 int retval;
54 struct KBacktraceIterator *kbt = (struct KBacktraceIterator *)vkbt;
55
56 if (address == 0)
57 return 0;
58 if (__kernel_text_address(address)) {
59 /* OK to read kernel code. */
60 } else if (address >= PAGE_OFFSET) {
61 /* We only tolerate kernel-space reads of this task's stack */
62 if (!in_kernel_stack(kbt, address))
63 return 0;
64 } else if (!kbt->is_current) {
65 return 0; /* can't read from other user address spaces */
66 }
67 pagefault_disable();
68 retval = __copy_from_user_inatomic(result,
69 (void __user __force *)address,
70 size);
71 pagefault_enable();
72 return (retval == 0);
73}
74
75/* Return a pt_regs pointer for a valid fault handler frame */
76static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt)
77{
78 const char *fault = NULL; /* happy compiler */
79 char fault_buf[64];
80 unsigned long sp = kbt->it.sp;
81 struct pt_regs *p;
82
83 if (sp % sizeof(long) != 0)
84 return NULL;
85 if (!in_kernel_stack(kbt, sp))
86 return NULL;
87 if (!in_kernel_stack(kbt, sp + C_ABI_SAVE_AREA_SIZE + PTREGS_SIZE-1))
88 return NULL;
89 p = (struct pt_regs *)(sp + C_ABI_SAVE_AREA_SIZE);
90 if (p->faultnum == INT_SWINT_1 || p->faultnum == INT_SWINT_1_SIGRETURN)
91 fault = "syscall";
92 else {
93 if (kbt->verbose) { /* else we aren't going to use it */
94 snprintf(fault_buf, sizeof(fault_buf),
95 "interrupt %ld", p->faultnum);
96 fault = fault_buf;
97 }
98 }
99 if (EX1_PL(p->ex1) == KERNEL_PL &&
100 __kernel_text_address(p->pc) &&
101 in_kernel_stack(kbt, p->sp) &&
102 p->sp >= sp) {
103 if (kbt->verbose)
104 pr_err(" <%s while in kernel mode>\n", fault);
105 } else if (EX1_PL(p->ex1) == USER_PL &&
106 p->pc < PAGE_OFFSET &&
107 p->sp < PAGE_OFFSET) {
108 if (kbt->verbose)
109 pr_err(" <%s while in user mode>\n", fault);
110 } else if (kbt->verbose) {
111 pr_err(" (odd fault: pc %#lx, sp %#lx, ex1 %#lx?)\n",
112 p->pc, p->sp, p->ex1);
113 p = NULL;
114 }
115 if (!kbt->profile || (INT_MASK(p->faultnum) & QUEUED_INTERRUPTS) == 0)
116 return p;
117 return NULL;
118}
119
120/* Is the pc pointing to a sigreturn trampoline? */
121static int is_sigreturn(unsigned long pc)
122{
123 return (pc == VDSO_BASE);
124}
125
126/* Return a pt_regs pointer for a valid signal handler frame */
127static struct pt_regs *valid_sigframe(struct KBacktraceIterator* kbt,
128 struct rt_sigframe* kframe)
129{
130 BacktraceIterator *b = &kbt->it;
131
132 if (b->pc == VDSO_BASE && b->sp < PAGE_OFFSET &&
133 b->sp % sizeof(long) == 0) {
134 int retval;
135 pagefault_disable();
136 retval = __copy_from_user_inatomic(
137 kframe, (void __user __force *)b->sp,
138 sizeof(*kframe));
139 pagefault_enable();
140 if (retval != 0 ||
141 (unsigned int)(kframe->info.si_signo) >= _NSIG)
142 return NULL;
143 if (kbt->verbose) {
144 pr_err(" <received signal %d>\n",
145 kframe->info.si_signo);
146 }
147 return (struct pt_regs *)&kframe->uc.uc_mcontext;
148 }
149 return NULL;
150}
151
152static int KBacktraceIterator_is_sigreturn(struct KBacktraceIterator *kbt)
153{
154 return is_sigreturn(kbt->it.pc);
155}
156
157static int KBacktraceIterator_restart(struct KBacktraceIterator *kbt)
158{
159 struct pt_regs *p;
160 struct rt_sigframe kframe;
161
162 p = valid_fault_handler(kbt);
163 if (p == NULL)
164 p = valid_sigframe(kbt, &kframe);
165 if (p == NULL)
166 return 0;
167 backtrace_init(&kbt->it, read_memory_func, kbt,
168 p->pc, p->lr, p->sp, p->regs[52]);
169 kbt->new_context = 1;
170 return 1;
171}
172
173/* Find a frame that isn't a sigreturn, if there is one. */
174static int KBacktraceIterator_next_item_inclusive(
175 struct KBacktraceIterator *kbt)
176{
177 for (;;) {
178 do {
179 if (!KBacktraceIterator_is_sigreturn(kbt))
180 return KBT_ONGOING;
181 } while (backtrace_next(&kbt->it));
182
183 if (!KBacktraceIterator_restart(kbt))
184 return KBT_DONE;
185 }
186}
187
188/*
189 * If the current sp is on a page different than what we recorded
190 * as the top-of-kernel-stack last time we context switched, we have
191 * probably blown the stack, and nothing is going to work out well.
192 * If we can at least get out a warning, that may help the debug,
193 * though we probably won't be able to backtrace into the code that
194 * actually did the recursive damage.
195 */
196static void validate_stack(struct pt_regs *regs)
197{
198 int cpu = smp_processor_id();
199 unsigned long ksp0 = get_current_ksp0();
200 unsigned long ksp0_base = ksp0 - THREAD_SIZE;
201 unsigned long sp = stack_pointer;
202
203 if (EX1_PL(regs->ex1) == KERNEL_PL && regs->sp >= ksp0) {
204 pr_err("WARNING: cpu %d: kernel stack page %#lx underrun!\n"
205 " sp %#lx (%#lx in caller), caller pc %#lx, lr %#lx\n",
206 cpu, ksp0_base, sp, regs->sp, regs->pc, regs->lr);
207 }
208
209 else if (sp < ksp0_base + sizeof(struct thread_info)) {
210 pr_err("WARNING: cpu %d: kernel stack page %#lx overrun!\n"
211 " sp %#lx (%#lx in caller), caller pc %#lx, lr %#lx\n",
212 cpu, ksp0_base, sp, regs->sp, regs->pc, regs->lr);
213 }
214}
215
216void KBacktraceIterator_init(struct KBacktraceIterator *kbt,
217 struct task_struct *t, struct pt_regs *regs)
218{
219 unsigned long pc, lr, sp, r52;
220 int is_current;
221
222 /*
223 * Set up callback information. We grab the kernel stack base
224 * so we will allow reads of that address range.
225 */
226 is_current = (t == NULL || t == current);
227 kbt->is_current = is_current;
228 if (is_current)
229 t = validate_current();
230 kbt->task = t;
231 kbt->verbose = 0; /* override in caller if desired */
232 kbt->profile = 0; /* override in caller if desired */
233 kbt->end = KBT_ONGOING;
234 kbt->new_context = 1;
235 if (is_current)
236 validate_stack(regs);
237
238 if (regs == NULL) {
239 if (is_current || t->state == TASK_RUNNING) {
240 /* Can't do this; we need registers */
241 kbt->end = KBT_RUNNING;
242 return;
243 }
244 pc = get_switch_to_pc();
245 lr = t->thread.pc;
246 sp = t->thread.ksp;
247 r52 = 0;
248 } else {
249 pc = regs->pc;
250 lr = regs->lr;
251 sp = regs->sp;
252 r52 = regs->regs[52];
253 }
254
255 backtrace_init(&kbt->it, read_memory_func, kbt, pc, lr, sp, r52);
256 kbt->end = KBacktraceIterator_next_item_inclusive(kbt);
257}
258EXPORT_SYMBOL(KBacktraceIterator_init);
259
260int KBacktraceIterator_end(struct KBacktraceIterator *kbt)
261{
262 return kbt->end != KBT_ONGOING;
263}
264EXPORT_SYMBOL(KBacktraceIterator_end);
265
266void KBacktraceIterator_next(struct KBacktraceIterator *kbt)
267{
268 unsigned long old_pc = kbt->it.pc, old_sp = kbt->it.sp;
269 kbt->new_context = 0;
270 if (!backtrace_next(&kbt->it) && !KBacktraceIterator_restart(kbt)) {
271 kbt->end = KBT_DONE;
272 return;
273 }
274 kbt->end = KBacktraceIterator_next_item_inclusive(kbt);
275 if (old_pc == kbt->it.pc && old_sp == kbt->it.sp) {
276 /* Trapped in a loop; give up. */
277 kbt->end = KBT_LOOP;
278 }
279}
280EXPORT_SYMBOL(KBacktraceIterator_next);
281
282static void describe_addr(struct KBacktraceIterator *kbt,
283 unsigned long address,
284 int have_mmap_sem, char *buf, size_t bufsize)
285{
286 struct vm_area_struct *vma;
287 size_t namelen, remaining;
288 unsigned long size, offset, adjust;
289 char *p, *modname;
290 const char *name;
291 int rc;
292
293 /*
294 * Look one byte back for every caller frame (i.e. those that
295 * aren't a new context) so we look up symbol data for the
296 * call itself, not the following instruction, which may be on
297 * a different line (or in a different function).
298 */
299 adjust = !kbt->new_context;
300 address -= adjust;
301
302 if (address >= PAGE_OFFSET) {
303 /* Handle kernel symbols. */
304 BUG_ON(bufsize < KSYM_NAME_LEN);
305 name = kallsyms_lookup(address, &size, &offset,
306 &modname, buf);
307 if (name == NULL) {
308 buf[0] = '\0';
309 return;
310 }
311 namelen = strlen(buf);
312 remaining = (bufsize - 1) - namelen;
313 p = buf + namelen;
314 rc = snprintf(p, remaining, "+%#lx/%#lx ",
315 offset + adjust, size);
316 if (modname && rc < remaining)
317 snprintf(p + rc, remaining - rc, "[%s] ", modname);
318 buf[bufsize-1] = '\0';
319 return;
320 }
321
322 /* If we don't have the mmap_sem, we can't show any more info. */
323 buf[0] = '\0';
324 if (!have_mmap_sem)
325 return;
326
327 /* Find vma info. */
328 vma = find_vma(kbt->task->mm, address);
329 if (vma == NULL || address < vma->vm_start) {
330 snprintf(buf, bufsize, "[unmapped address] ");
331 return;
332 }
333
334 if (vma->vm_file) {
335 char *s;
336 p = d_path(&vma->vm_file->f_path, buf, bufsize);
337 if (IS_ERR(p))
338 p = "?";
339 s = strrchr(p, '/');
340 if (s)
341 p = s+1;
342 } else {
343 p = "anon";
344 }
345
346 /* Generate a string description of the vma info. */
347 namelen = strlen(p);
348 remaining = (bufsize - 1) - namelen;
349 memmove(buf, p, namelen);
350 snprintf(buf + namelen, remaining, "[%lx+%lx] ",
351 vma->vm_start, vma->vm_end - vma->vm_start);
352}
353
354/*
355 * This method wraps the backtracer's more generic support.
356 * It is only invoked from the architecture-specific code; show_stack()
357 * and dump_stack() (in entry.S) are architecture-independent entry points.
358 */
359void tile_show_stack(struct KBacktraceIterator *kbt, int headers)
360{
361 int i;
362 int have_mmap_sem = 0;
363
364 if (headers) {
365 /*
366 * Add a blank line since if we are called from panic(),
367 * then bust_spinlocks() spit out a space in front of us
368 * and it will mess up our KERN_ERR.
369 */
370 pr_err("\n");
371 pr_err("Starting stack dump of tid %d, pid %d (%s)"
372 " on cpu %d at cycle %lld\n",
373 kbt->task->pid, kbt->task->tgid, kbt->task->comm,
374 smp_processor_id(), get_cycles());
375 }
376 kbt->verbose = 1;
377 i = 0;
378 for (; !KBacktraceIterator_end(kbt); KBacktraceIterator_next(kbt)) {
379 char namebuf[KSYM_NAME_LEN+100];
380 unsigned long address = kbt->it.pc;
381
382 /* Try to acquire the mmap_sem as we pass into userspace. */
383 if (address < PAGE_OFFSET && !have_mmap_sem && kbt->task->mm)
384 have_mmap_sem =
385 down_read_trylock(&kbt->task->mm->mmap_sem);
386
387 describe_addr(kbt, address, have_mmap_sem,
388 namebuf, sizeof(namebuf));
389
390 pr_err(" frame %d: 0x%lx %s(sp 0x%lx)\n",
391 i++, address, namebuf, (unsigned long)(kbt->it.sp));
392
393 if (i >= 100) {
394 pr_err("Stack dump truncated"
395 " (%d frames)\n", i);
396 break;
397 }
398 }
399 if (kbt->end == KBT_LOOP)
400 pr_err("Stack dump stopped; next frame identical to this one\n");
401 if (headers)
402 pr_err("Stack dump complete\n");
403 if (have_mmap_sem)
404 up_read(&kbt->task->mm->mmap_sem);
405}
406EXPORT_SYMBOL(tile_show_stack);
407
408
409/* This is called from show_regs() and _dump_stack() */
410void dump_stack_regs(struct pt_regs *regs)
411{
412 struct KBacktraceIterator kbt;
413 KBacktraceIterator_init(&kbt, NULL, regs);
414 tile_show_stack(&kbt, 1);
415}
416EXPORT_SYMBOL(dump_stack_regs);
417
418static struct pt_regs *regs_to_pt_regs(struct pt_regs *regs,
419 ulong pc, ulong lr, ulong sp, ulong r52)
420{
421 memset(regs, 0, sizeof(struct pt_regs));
422 regs->pc = pc;
423 regs->lr = lr;
424 regs->sp = sp;
425 regs->regs[52] = r52;
426 return regs;
427}
428
429/* This is called from dump_stack() and just converts to pt_regs */
430void _dump_stack(int dummy, ulong pc, ulong lr, ulong sp, ulong r52)
431{
432 struct pt_regs regs;
433 dump_stack_regs(regs_to_pt_regs(®s, pc, lr, sp, r52));
434}
435
436/* This is called from KBacktraceIterator_init_current() */
437void _KBacktraceIterator_init_current(struct KBacktraceIterator *kbt, ulong pc,
438 ulong lr, ulong sp, ulong r52)
439{
440 struct pt_regs regs;
441 KBacktraceIterator_init(kbt, NULL,
442 regs_to_pt_regs(®s, pc, lr, sp, r52));
443}
444
445/* This is called only from kernel/sched.c, with esp == NULL */
446void show_stack(struct task_struct *task, unsigned long *esp)
447{
448 struct KBacktraceIterator kbt;
449 if (task == NULL || task == current)
450 KBacktraceIterator_init_current(&kbt);
451 else
452 KBacktraceIterator_init(&kbt, task, NULL);
453 tile_show_stack(&kbt, 0);
454}
455
456#ifdef CONFIG_STACKTRACE
457
458/* Support generic Linux stack API too */
459
460void save_stack_trace_tsk(struct task_struct *task, struct stack_trace *trace)
461{
462 struct KBacktraceIterator kbt;
463 int skip = trace->skip;
464 int i = 0;
465
466 if (task == NULL || task == current)
467 KBacktraceIterator_init_current(&kbt);
468 else
469 KBacktraceIterator_init(&kbt, task, NULL);
470 for (; !KBacktraceIterator_end(&kbt); KBacktraceIterator_next(&kbt)) {
471 if (skip) {
472 --skip;
473 continue;
474 }
475 if (i >= trace->max_entries || kbt.it.pc < PAGE_OFFSET)
476 break;
477 trace->entries[i++] = kbt.it.pc;
478 }
479 trace->nr_entries = i;
480}
481EXPORT_SYMBOL(save_stack_trace_tsk);
482
483void save_stack_trace(struct stack_trace *trace)
484{
485 save_stack_trace_tsk(NULL, trace);
486}
487
488#endif
489
490/* In entry.S */
491EXPORT_SYMBOL(KBacktraceIterator_init_current);
1/*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15#include <linux/sched.h>
16#include <linux/kernel.h>
17#include <linux/kprobes.h>
18#include <linux/module.h>
19#include <linux/pfn.h>
20#include <linux/kallsyms.h>
21#include <linux/stacktrace.h>
22#include <linux/uaccess.h>
23#include <linux/mmzone.h>
24#include <linux/dcache.h>
25#include <linux/fs.h>
26#include <linux/hardirq.h>
27#include <linux/string.h>
28#include <asm/backtrace.h>
29#include <asm/page.h>
30#include <asm/ucontext.h>
31#include <asm/switch_to.h>
32#include <asm/sigframe.h>
33#include <asm/stack.h>
34#include <asm/vdso.h>
35#include <arch/abi.h>
36#include <arch/interrupts.h>
37
38#define KBT_ONGOING 0 /* Backtrace still ongoing */
39#define KBT_DONE 1 /* Backtrace cleanly completed */
40#define KBT_RUNNING 2 /* Can't run backtrace on a running task */
41#define KBT_LOOP 3 /* Backtrace entered a loop */
42
43/* Is address on the specified kernel stack? */
44static int in_kernel_stack(struct KBacktraceIterator *kbt, unsigned long sp)
45{
46 ulong kstack_base = (ulong) kbt->task->stack;
47 if (kstack_base == 0) /* corrupt task pointer; just follow stack... */
48 return sp >= PAGE_OFFSET && sp < (unsigned long)high_memory;
49 return sp >= kstack_base && sp < kstack_base + THREAD_SIZE;
50}
51
52/* Callback for backtracer; basically a glorified memcpy */
53static bool read_memory_func(void *result, unsigned long address,
54 unsigned int size, void *vkbt)
55{
56 int retval;
57 struct KBacktraceIterator *kbt = (struct KBacktraceIterator *)vkbt;
58
59 if (address == 0)
60 return 0;
61 if (__kernel_text_address(address)) {
62 /* OK to read kernel code. */
63 } else if (address >= PAGE_OFFSET) {
64 /* We only tolerate kernel-space reads of this task's stack */
65 if (!in_kernel_stack(kbt, address))
66 return 0;
67 } else if (!kbt->is_current) {
68 return 0; /* can't read from other user address spaces */
69 }
70 pagefault_disable();
71 retval = __copy_from_user_inatomic(result,
72 (void __user __force *)address,
73 size);
74 pagefault_enable();
75 return (retval == 0);
76}
77
78/* Return a pt_regs pointer for a valid fault handler frame */
79static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt)
80{
81 char fault[64];
82 unsigned long sp = kbt->it.sp;
83 struct pt_regs *p;
84
85 if (sp % sizeof(long) != 0)
86 return NULL;
87 if (!in_kernel_stack(kbt, sp))
88 return NULL;
89 if (!in_kernel_stack(kbt, sp + C_ABI_SAVE_AREA_SIZE + PTREGS_SIZE-1))
90 return NULL;
91 p = (struct pt_regs *)(sp + C_ABI_SAVE_AREA_SIZE);
92 if (kbt->verbose) { /* else we aren't going to use it */
93 if (p->faultnum == INT_SWINT_1 ||
94 p->faultnum == INT_SWINT_1_SIGRETURN)
95 snprintf(fault, sizeof(fault),
96 "syscall %ld", p->regs[TREG_SYSCALL_NR]);
97 else
98 snprintf(fault, sizeof(fault),
99 "interrupt %ld", p->faultnum);
100 }
101 if (EX1_PL(p->ex1) == KERNEL_PL &&
102 __kernel_text_address(p->pc) &&
103 in_kernel_stack(kbt, p->sp) &&
104 p->sp >= sp) {
105 if (kbt->verbose)
106 pr_err(" <%s while in kernel mode>\n", fault);
107 } else if (user_mode(p) &&
108 p->sp < PAGE_OFFSET && p->sp != 0) {
109 if (kbt->verbose)
110 pr_err(" <%s while in user mode>\n", fault);
111 } else {
112 if (kbt->verbose && (p->pc != 0 || p->sp != 0 || p->ex1 != 0))
113 pr_err(" (odd fault: pc %#lx, sp %#lx, ex1 %#lx?)\n",
114 p->pc, p->sp, p->ex1);
115 return NULL;
116 }
117 if (kbt->profile && ((1ULL << p->faultnum) & QUEUED_INTERRUPTS) != 0)
118 return NULL;
119 return p;
120}
121
122/* Is the iterator pointing to a sigreturn trampoline? */
123static int is_sigreturn(struct KBacktraceIterator *kbt)
124{
125 return kbt->task->mm &&
126 (kbt->it.pc == ((ulong)kbt->task->mm->context.vdso_base +
127 (ulong)&__vdso_rt_sigreturn));
128}
129
130/* Return a pt_regs pointer for a valid signal handler frame */
131static struct pt_regs *valid_sigframe(struct KBacktraceIterator* kbt,
132 struct rt_sigframe* kframe)
133{
134 BacktraceIterator *b = &kbt->it;
135
136 if (is_sigreturn(kbt) && b->sp < PAGE_OFFSET &&
137 b->sp % sizeof(long) == 0) {
138 int retval;
139 pagefault_disable();
140 retval = __copy_from_user_inatomic(
141 kframe, (void __user __force *)b->sp,
142 sizeof(*kframe));
143 pagefault_enable();
144 if (retval != 0 ||
145 (unsigned int)(kframe->info.si_signo) >= _NSIG)
146 return NULL;
147 if (kbt->verbose) {
148 pr_err(" <received signal %d>\n",
149 kframe->info.si_signo);
150 }
151 return (struct pt_regs *)&kframe->uc.uc_mcontext;
152 }
153 return NULL;
154}
155
156static int KBacktraceIterator_restart(struct KBacktraceIterator *kbt)
157{
158 struct pt_regs *p;
159 struct rt_sigframe kframe;
160
161 p = valid_fault_handler(kbt);
162 if (p == NULL)
163 p = valid_sigframe(kbt, &kframe);
164 if (p == NULL)
165 return 0;
166 backtrace_init(&kbt->it, read_memory_func, kbt,
167 p->pc, p->lr, p->sp, p->regs[52]);
168 kbt->new_context = 1;
169 return 1;
170}
171
172/* Find a frame that isn't a sigreturn, if there is one. */
173static int KBacktraceIterator_next_item_inclusive(
174 struct KBacktraceIterator *kbt)
175{
176 for (;;) {
177 do {
178 if (!is_sigreturn(kbt))
179 return KBT_ONGOING;
180 } while (backtrace_next(&kbt->it));
181
182 if (!KBacktraceIterator_restart(kbt))
183 return KBT_DONE;
184 }
185}
186
187/*
188 * If the current sp is on a page different than what we recorded
189 * as the top-of-kernel-stack last time we context switched, we have
190 * probably blown the stack, and nothing is going to work out well.
191 * If we can at least get out a warning, that may help the debug,
192 * though we probably won't be able to backtrace into the code that
193 * actually did the recursive damage.
194 */
195static void validate_stack(struct pt_regs *regs)
196{
197 int cpu = raw_smp_processor_id();
198 unsigned long ksp0 = get_current_ksp0();
199 unsigned long ksp0_base = ksp0 & -THREAD_SIZE;
200 unsigned long sp = stack_pointer;
201
202 if (EX1_PL(regs->ex1) == KERNEL_PL && regs->sp >= ksp0) {
203 pr_err("WARNING: cpu %d: kernel stack %#lx..%#lx underrun!\n"
204 " sp %#lx (%#lx in caller), caller pc %#lx, lr %#lx\n",
205 cpu, ksp0_base, ksp0, sp, regs->sp, regs->pc, regs->lr);
206 }
207
208 else if (sp < ksp0_base + sizeof(struct thread_info)) {
209 pr_err("WARNING: cpu %d: kernel stack %#lx..%#lx overrun!\n"
210 " sp %#lx (%#lx in caller), caller pc %#lx, lr %#lx\n",
211 cpu, ksp0_base, ksp0, sp, regs->sp, regs->pc, regs->lr);
212 }
213}
214
215void KBacktraceIterator_init(struct KBacktraceIterator *kbt,
216 struct task_struct *t, struct pt_regs *regs)
217{
218 unsigned long pc, lr, sp, r52;
219 int is_current;
220
221 /*
222 * Set up callback information. We grab the kernel stack base
223 * so we will allow reads of that address range.
224 */
225 is_current = (t == NULL || t == current);
226 kbt->is_current = is_current;
227 if (is_current)
228 t = validate_current();
229 kbt->task = t;
230 kbt->verbose = 0; /* override in caller if desired */
231 kbt->profile = 0; /* override in caller if desired */
232 kbt->end = KBT_ONGOING;
233 kbt->new_context = 1;
234 if (is_current)
235 validate_stack(regs);
236
237 if (regs == NULL) {
238 if (is_current || t->state == TASK_RUNNING) {
239 /* Can't do this; we need registers */
240 kbt->end = KBT_RUNNING;
241 return;
242 }
243 pc = get_switch_to_pc();
244 lr = t->thread.pc;
245 sp = t->thread.ksp;
246 r52 = 0;
247 } else {
248 pc = regs->pc;
249 lr = regs->lr;
250 sp = regs->sp;
251 r52 = regs->regs[52];
252 }
253
254 backtrace_init(&kbt->it, read_memory_func, kbt, pc, lr, sp, r52);
255 kbt->end = KBacktraceIterator_next_item_inclusive(kbt);
256}
257EXPORT_SYMBOL(KBacktraceIterator_init);
258
259int KBacktraceIterator_end(struct KBacktraceIterator *kbt)
260{
261 return kbt->end != KBT_ONGOING;
262}
263EXPORT_SYMBOL(KBacktraceIterator_end);
264
265void KBacktraceIterator_next(struct KBacktraceIterator *kbt)
266{
267 unsigned long old_pc = kbt->it.pc, old_sp = kbt->it.sp;
268 kbt->new_context = 0;
269 if (!backtrace_next(&kbt->it) && !KBacktraceIterator_restart(kbt)) {
270 kbt->end = KBT_DONE;
271 return;
272 }
273 kbt->end = KBacktraceIterator_next_item_inclusive(kbt);
274 if (old_pc == kbt->it.pc && old_sp == kbt->it.sp) {
275 /* Trapped in a loop; give up. */
276 kbt->end = KBT_LOOP;
277 }
278}
279EXPORT_SYMBOL(KBacktraceIterator_next);
280
281static void describe_addr(struct KBacktraceIterator *kbt,
282 unsigned long address,
283 int have_mmap_sem, char *buf, size_t bufsize)
284{
285 struct vm_area_struct *vma;
286 size_t namelen, remaining;
287 unsigned long size, offset, adjust;
288 char *p, *modname;
289 const char *name;
290 int rc;
291
292 /*
293 * Look one byte back for every caller frame (i.e. those that
294 * aren't a new context) so we look up symbol data for the
295 * call itself, not the following instruction, which may be on
296 * a different line (or in a different function).
297 */
298 adjust = !kbt->new_context;
299 address -= adjust;
300
301 if (address >= PAGE_OFFSET) {
302 /* Handle kernel symbols. */
303 BUG_ON(bufsize < KSYM_NAME_LEN);
304 name = kallsyms_lookup(address, &size, &offset,
305 &modname, buf);
306 if (name == NULL) {
307 buf[0] = '\0';
308 return;
309 }
310 namelen = strlen(buf);
311 remaining = (bufsize - 1) - namelen;
312 p = buf + namelen;
313 rc = snprintf(p, remaining, "+%#lx/%#lx ",
314 offset + adjust, size);
315 if (modname && rc < remaining)
316 snprintf(p + rc, remaining - rc, "[%s] ", modname);
317 buf[bufsize-1] = '\0';
318 return;
319 }
320
321 /* If we don't have the mmap_sem, we can't show any more info. */
322 buf[0] = '\0';
323 if (!have_mmap_sem)
324 return;
325
326 /* Find vma info. */
327 vma = find_vma(kbt->task->mm, address);
328 if (vma == NULL || address < vma->vm_start) {
329 snprintf(buf, bufsize, "[unmapped address] ");
330 return;
331 }
332
333 if (vma->vm_file) {
334 p = file_path(vma->vm_file, buf, bufsize);
335 if (IS_ERR(p))
336 p = "?";
337 name = kbasename(p);
338 } else {
339 name = "anon";
340 }
341
342 /* Generate a string description of the vma info. */
343 namelen = strlen(name);
344 remaining = (bufsize - 1) - namelen;
345 memmove(buf, name, namelen);
346 snprintf(buf + namelen, remaining, "[%lx+%lx] ",
347 vma->vm_start, vma->vm_end - vma->vm_start);
348}
349
350/*
351 * Avoid possible crash recursion during backtrace. If it happens, it
352 * makes it easy to lose the actual root cause of the failure, so we
353 * put a simple guard on all the backtrace loops.
354 */
355static bool start_backtrace(void)
356{
357 if (current_thread_info()->in_backtrace) {
358 pr_err("Backtrace requested while in backtrace!\n");
359 return false;
360 }
361 current_thread_info()->in_backtrace = true;
362 return true;
363}
364
365static void end_backtrace(void)
366{
367 current_thread_info()->in_backtrace = false;
368}
369
370/*
371 * This method wraps the backtracer's more generic support.
372 * It is only invoked from the architecture-specific code; show_stack()
373 * and dump_stack() are architecture-independent entry points.
374 */
375void tile_show_stack(struct KBacktraceIterator *kbt)
376{
377 int i;
378 int have_mmap_sem = 0;
379
380 if (!start_backtrace())
381 return;
382 kbt->verbose = 1;
383 i = 0;
384 for (; !KBacktraceIterator_end(kbt); KBacktraceIterator_next(kbt)) {
385 char namebuf[KSYM_NAME_LEN+100];
386 unsigned long address = kbt->it.pc;
387
388 /*
389 * Try to acquire the mmap_sem as we pass into userspace.
390 * If we're in an interrupt context, don't even try, since
391 * it's not safe to call e.g. d_path() from an interrupt,
392 * since it uses spin locks without disabling interrupts.
393 * Note we test "kbt->task == current", not "kbt->is_current",
394 * since we're checking that "current" will work in d_path().
395 */
396 if (kbt->task == current && address < PAGE_OFFSET &&
397 !have_mmap_sem && kbt->task->mm && !in_interrupt()) {
398 have_mmap_sem =
399 down_read_trylock(&kbt->task->mm->mmap_sem);
400 }
401
402 describe_addr(kbt, address, have_mmap_sem,
403 namebuf, sizeof(namebuf));
404
405 pr_err(" frame %d: 0x%lx %s(sp 0x%lx)\n",
406 i++, address, namebuf, (unsigned long)(kbt->it.sp));
407
408 if (i >= 100) {
409 pr_err("Stack dump truncated (%d frames)\n", i);
410 break;
411 }
412 }
413 if (kbt->end == KBT_LOOP)
414 pr_err("Stack dump stopped; next frame identical to this one\n");
415 if (have_mmap_sem)
416 up_read(&kbt->task->mm->mmap_sem);
417 end_backtrace();
418}
419EXPORT_SYMBOL(tile_show_stack);
420
421static struct pt_regs *regs_to_pt_regs(struct pt_regs *regs,
422 ulong pc, ulong lr, ulong sp, ulong r52)
423{
424 memset(regs, 0, sizeof(struct pt_regs));
425 regs->pc = pc;
426 regs->lr = lr;
427 regs->sp = sp;
428 regs->regs[52] = r52;
429 return regs;
430}
431
432/* Deprecated function currently only used by kernel_double_fault(). */
433void _dump_stack(int dummy, ulong pc, ulong lr, ulong sp, ulong r52)
434{
435 struct KBacktraceIterator kbt;
436 struct pt_regs regs;
437
438 regs_to_pt_regs(®s, pc, lr, sp, r52);
439 KBacktraceIterator_init(&kbt, NULL, ®s);
440 tile_show_stack(&kbt);
441}
442
443/* This is called from KBacktraceIterator_init_current() */
444void _KBacktraceIterator_init_current(struct KBacktraceIterator *kbt, ulong pc,
445 ulong lr, ulong sp, ulong r52)
446{
447 struct pt_regs regs;
448 KBacktraceIterator_init(kbt, NULL,
449 regs_to_pt_regs(®s, pc, lr, sp, r52));
450}
451
452/*
453 * Called from sched_show_task() with task != NULL, or dump_stack()
454 * with task == NULL. The esp argument is always NULL.
455 */
456void show_stack(struct task_struct *task, unsigned long *esp)
457{
458 struct KBacktraceIterator kbt;
459 if (task == NULL || task == current) {
460 KBacktraceIterator_init_current(&kbt);
461 KBacktraceIterator_next(&kbt); /* don't show first frame */
462 } else {
463 KBacktraceIterator_init(&kbt, task, NULL);
464 }
465 tile_show_stack(&kbt);
466}
467
468#ifdef CONFIG_STACKTRACE
469
470/* Support generic Linux stack API too */
471
472static void save_stack_trace_common(struct task_struct *task,
473 struct pt_regs *regs,
474 bool user,
475 struct stack_trace *trace)
476{
477 struct KBacktraceIterator kbt;
478 int skip = trace->skip;
479 int i = 0;
480
481 if (!start_backtrace())
482 goto done;
483 if (regs != NULL) {
484 KBacktraceIterator_init(&kbt, NULL, regs);
485 } else if (task == NULL || task == current) {
486 KBacktraceIterator_init_current(&kbt);
487 skip++; /* don't show KBacktraceIterator_init_current */
488 } else {
489 KBacktraceIterator_init(&kbt, task, NULL);
490 }
491 for (; !KBacktraceIterator_end(&kbt); KBacktraceIterator_next(&kbt)) {
492 if (skip) {
493 --skip;
494 continue;
495 }
496 if (i >= trace->max_entries ||
497 (!user && kbt.it.pc < PAGE_OFFSET))
498 break;
499 trace->entries[i++] = kbt.it.pc;
500 }
501 end_backtrace();
502done:
503 if (i < trace->max_entries)
504 trace->entries[i++] = ULONG_MAX;
505 trace->nr_entries = i;
506}
507
508void save_stack_trace_tsk(struct task_struct *task, struct stack_trace *trace)
509{
510 save_stack_trace_common(task, NULL, false, trace);
511}
512EXPORT_SYMBOL(save_stack_trace_tsk);
513
514void save_stack_trace(struct stack_trace *trace)
515{
516 save_stack_trace_common(NULL, NULL, false, trace);
517}
518EXPORT_SYMBOL_GPL(save_stack_trace);
519
520void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
521{
522 save_stack_trace_common(NULL, regs, false, trace);
523}
524
525void save_stack_trace_user(struct stack_trace *trace)
526{
527 /* Trace user stack if we are not a kernel thread. */
528 if (current->mm)
529 save_stack_trace_common(NULL, task_pt_regs(current),
530 true, trace);
531 else if (trace->nr_entries < trace->max_entries)
532 trace->entries[trace->nr_entries++] = ULONG_MAX;
533}
534#endif
535
536/* In entry.S */
537EXPORT_SYMBOL(KBacktraceIterator_init_current);