Linux Audio

Check our new training course

Loading...
v3.1
  1/*
  2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
  3 *
  4 *   This program is free software; you can redistribute it and/or
  5 *   modify it under the terms of the GNU General Public License
  6 *   as published by the Free Software Foundation, version 2.
  7 *
  8 *   This program is distributed in the hope that it will be useful, but
  9 *   WITHOUT ANY WARRANTY; without even the implied warranty of
 10 *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
 11 *   NON INFRINGEMENT.  See the GNU General Public License for
 12 *   more details.
 13 */
 14
 15#include <linux/sched.h>
 16#include <linux/kernel.h>
 17#include <linux/kprobes.h>
 18#include <linux/module.h>
 19#include <linux/pfn.h>
 20#include <linux/kallsyms.h>
 21#include <linux/stacktrace.h>
 22#include <linux/uaccess.h>
 23#include <linux/mmzone.h>
 
 
 
 24#include <asm/backtrace.h>
 25#include <asm/page.h>
 26#include <asm/tlbflush.h>
 27#include <asm/ucontext.h>
 
 28#include <asm/sigframe.h>
 29#include <asm/stack.h>
 
 30#include <arch/abi.h>
 31#include <arch/interrupts.h>
 32
 33#define KBT_ONGOING	0  /* Backtrace still ongoing */
 34#define KBT_DONE	1  /* Backtrace cleanly completed */
 35#define KBT_RUNNING	2  /* Can't run backtrace on a running task */
 36#define KBT_LOOP	3  /* Backtrace entered a loop */
 37
 38/* Is address on the specified kernel stack? */
 39static int in_kernel_stack(struct KBacktraceIterator *kbt, unsigned long sp)
 40{
 41	ulong kstack_base = (ulong) kbt->task->stack;
 42	if (kstack_base == 0)  /* corrupt task pointer; just follow stack... */
 43		return sp >= PAGE_OFFSET && sp < (unsigned long)high_memory;
 44	return sp >= kstack_base && sp < kstack_base + THREAD_SIZE;
 45}
 46
 47/* Is address valid for reading? */
 48static int valid_address(struct KBacktraceIterator *kbt, unsigned long address)
 49{
 50	HV_PTE *l1_pgtable = kbt->pgtable;
 51	HV_PTE *l2_pgtable;
 52	unsigned long pfn;
 53	HV_PTE pte;
 54	struct page *page;
 55
 56	if (l1_pgtable == NULL)
 57		return 0;	/* can't read user space in other tasks */
 58
 59#ifdef CONFIG_64BIT
 60	/* Find the real l1_pgtable by looking in the l0_pgtable. */
 61	pte = l1_pgtable[HV_L0_INDEX(address)];
 62	if (!hv_pte_get_present(pte))
 63		return 0;
 64	pfn = hv_pte_get_pfn(pte);
 65	if (pte_huge(pte)) {
 66		if (!pfn_valid(pfn)) {
 67			pr_err("L0 huge page has bad pfn %#lx\n", pfn);
 68			return 0;
 69		}
 70		return hv_pte_get_present(pte) && hv_pte_get_readable(pte);
 71	}
 72	page = pfn_to_page(pfn);
 73	BUG_ON(PageHighMem(page));  /* No HIGHMEM on 64-bit. */
 74	l1_pgtable = (HV_PTE *)pfn_to_kaddr(pfn);
 75#endif
 76	pte = l1_pgtable[HV_L1_INDEX(address)];
 77	if (!hv_pte_get_present(pte))
 78		return 0;
 79	pfn = hv_pte_get_pfn(pte);
 80	if (pte_huge(pte)) {
 81		if (!pfn_valid(pfn)) {
 82			pr_err("huge page has bad pfn %#lx\n", pfn);
 83			return 0;
 84		}
 85		return hv_pte_get_present(pte) && hv_pte_get_readable(pte);
 86	}
 87
 88	page = pfn_to_page(pfn);
 89	if (PageHighMem(page)) {
 90		pr_err("L2 page table not in LOWMEM (%#llx)\n",
 91		       HV_PFN_TO_CPA(pfn));
 92		return 0;
 93	}
 94	l2_pgtable = (HV_PTE *)pfn_to_kaddr(pfn);
 95	pte = l2_pgtable[HV_L2_INDEX(address)];
 96	return hv_pte_get_present(pte) && hv_pte_get_readable(pte);
 97}
 98
 99/* Callback for backtracer; basically a glorified memcpy */
100static bool read_memory_func(void *result, unsigned long address,
101			     unsigned int size, void *vkbt)
102{
103	int retval;
104	struct KBacktraceIterator *kbt = (struct KBacktraceIterator *)vkbt;
 
 
 
105	if (__kernel_text_address(address)) {
106		/* OK to read kernel code. */
107	} else if (address >= PAGE_OFFSET) {
108		/* We only tolerate kernel-space reads of this task's stack */
109		if (!in_kernel_stack(kbt, address))
110			return 0;
111	} else if (!valid_address(kbt, address)) {
112		return 0;	/* invalid user-space address */
113	}
114	pagefault_disable();
115	retval = __copy_from_user_inatomic(result,
116					   (void __user __force *)address,
117					   size);
118	pagefault_enable();
119	return (retval == 0);
120}
121
122/* Return a pt_regs pointer for a valid fault handler frame */
123static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt)
124{
125	const char *fault = NULL;  /* happy compiler */
126	char fault_buf[64];
127	unsigned long sp = kbt->it.sp;
128	struct pt_regs *p;
129
 
 
130	if (!in_kernel_stack(kbt, sp))
131		return NULL;
132	if (!in_kernel_stack(kbt, sp + C_ABI_SAVE_AREA_SIZE + PTREGS_SIZE-1))
133		return NULL;
134	p = (struct pt_regs *)(sp + C_ABI_SAVE_AREA_SIZE);
135	if (p->faultnum == INT_SWINT_1 || p->faultnum == INT_SWINT_1_SIGRETURN)
136		fault = "syscall";
137	else {
138		if (kbt->verbose) {     /* else we aren't going to use it */
139			snprintf(fault_buf, sizeof(fault_buf),
140				 "interrupt %ld", p->faultnum);
141			fault = fault_buf;
142		}
143	}
144	if (EX1_PL(p->ex1) == KERNEL_PL &&
145	    __kernel_text_address(p->pc) &&
146	    in_kernel_stack(kbt, p->sp) &&
147	    p->sp >= sp) {
148		if (kbt->verbose)
149			pr_err("  <%s while in kernel mode>\n", fault);
150	} else if (EX1_PL(p->ex1) == USER_PL &&
151	    p->pc < PAGE_OFFSET &&
152	    p->sp < PAGE_OFFSET) {
153		if (kbt->verbose)
154			pr_err("  <%s while in user mode>\n", fault);
155	} else if (kbt->verbose) {
156		pr_err("  (odd fault: pc %#lx, sp %#lx, ex1 %#lx?)\n",
157		       p->pc, p->sp, p->ex1);
158		p = NULL;
159	}
160	if (!kbt->profile || (INT_MASK(p->faultnum) & QUEUED_INTERRUPTS) == 0)
161		return p;
162	return NULL;
163}
164
165/* Is the pc pointing to a sigreturn trampoline? */
166static int is_sigreturn(unsigned long pc)
167{
168	return (pc == VDSO_BASE);
169}
170
171/* Return a pt_regs pointer for a valid signal handler frame */
172static struct pt_regs *valid_sigframe(struct KBacktraceIterator* kbt)
 
173{
174	BacktraceIterator *b = &kbt->it;
175
176	if (b->pc == VDSO_BASE) {
177		struct rt_sigframe *frame;
178		unsigned long sigframe_top =
179			b->sp + sizeof(struct rt_sigframe) - 1;
180		if (!valid_address(kbt, b->sp) ||
181		    !valid_address(kbt, sigframe_top)) {
182			if (kbt->verbose)
183				pr_err("  (odd signal: sp %#lx?)\n",
184				       (unsigned long)(b->sp));
 
185			return NULL;
186		}
187		frame = (struct rt_sigframe *)b->sp;
188		if (kbt->verbose) {
189			pr_err("  <received signal %d>\n",
190			       frame->info.si_signo);
191		}
192		return (struct pt_regs *)&frame->uc.uc_mcontext;
193	}
194	return NULL;
195}
196
197static int KBacktraceIterator_is_sigreturn(struct KBacktraceIterator *kbt)
198{
199	return is_sigreturn(kbt->it.pc);
200}
201
202static int KBacktraceIterator_restart(struct KBacktraceIterator *kbt)
203{
204	struct pt_regs *p;
 
205
206	p = valid_fault_handler(kbt);
207	if (p == NULL)
208		p = valid_sigframe(kbt);
209	if (p == NULL)
210		return 0;
211	backtrace_init(&kbt->it, read_memory_func, kbt,
212		       p->pc, p->lr, p->sp, p->regs[52]);
213	kbt->new_context = 1;
214	return 1;
215}
216
217/* Find a frame that isn't a sigreturn, if there is one. */
218static int KBacktraceIterator_next_item_inclusive(
219	struct KBacktraceIterator *kbt)
220{
221	for (;;) {
222		do {
223			if (!KBacktraceIterator_is_sigreturn(kbt))
224				return KBT_ONGOING;
225		} while (backtrace_next(&kbt->it));
226
227		if (!KBacktraceIterator_restart(kbt))
228			return KBT_DONE;
229	}
230}
231
232/*
233 * If the current sp is on a page different than what we recorded
234 * as the top-of-kernel-stack last time we context switched, we have
235 * probably blown the stack, and nothing is going to work out well.
236 * If we can at least get out a warning, that may help the debug,
237 * though we probably won't be able to backtrace into the code that
238 * actually did the recursive damage.
239 */
240static void validate_stack(struct pt_regs *regs)
241{
242	int cpu = smp_processor_id();
243	unsigned long ksp0 = get_current_ksp0();
244	unsigned long ksp0_base = ksp0 - THREAD_SIZE;
245	unsigned long sp = stack_pointer;
246
247	if (EX1_PL(regs->ex1) == KERNEL_PL && regs->sp >= ksp0) {
248		pr_err("WARNING: cpu %d: kernel stack page %#lx underrun!\n"
249		       "  sp %#lx (%#lx in caller), caller pc %#lx, lr %#lx\n",
250		       cpu, ksp0_base, sp, regs->sp, regs->pc, regs->lr);
251	}
252
253	else if (sp < ksp0_base + sizeof(struct thread_info)) {
254		pr_err("WARNING: cpu %d: kernel stack page %#lx overrun!\n"
255		       "  sp %#lx (%#lx in caller), caller pc %#lx, lr %#lx\n",
256		       cpu, ksp0_base, sp, regs->sp, regs->pc, regs->lr);
257	}
258}
259
260void KBacktraceIterator_init(struct KBacktraceIterator *kbt,
261			     struct task_struct *t, struct pt_regs *regs)
262{
263	unsigned long pc, lr, sp, r52;
264	int is_current;
265
266	/*
267	 * Set up callback information.  We grab the kernel stack base
268	 * so we will allow reads of that address range, and if we're
269	 * asking about the current process we grab the page table
270	 * so we can check user accesses before trying to read them.
271	 * We flush the TLB to avoid any weird skew issues.
272	 */
273	is_current = (t == NULL);
274	kbt->is_current = is_current;
275	if (is_current)
276		t = validate_current();
277	kbt->task = t;
278	kbt->pgtable = NULL;
279	kbt->verbose = 0;   /* override in caller if desired */
280	kbt->profile = 0;   /* override in caller if desired */
281	kbt->end = KBT_ONGOING;
282	kbt->new_context = 0;
283	if (is_current) {
284		HV_PhysAddr pgdir_pa = hv_inquire_context().page_table;
285		if (pgdir_pa == (unsigned long)swapper_pg_dir - PAGE_OFFSET) {
286			/*
287			 * Not just an optimization: this also allows
288			 * this to work at all before va/pa mappings
289			 * are set up.
290			 */
291			kbt->pgtable = swapper_pg_dir;
292		} else {
293			struct page *page = pfn_to_page(PFN_DOWN(pgdir_pa));
294			if (!PageHighMem(page))
295				kbt->pgtable = __va(pgdir_pa);
296			else
297				pr_err("page table not in LOWMEM"
298				       " (%#llx)\n", pgdir_pa);
299		}
300		local_flush_tlb_all();
301		validate_stack(regs);
302	}
303
304	if (regs == NULL) {
305		if (is_current || t->state == TASK_RUNNING) {
306			/* Can't do this; we need registers */
307			kbt->end = KBT_RUNNING;
308			return;
309		}
310		pc = get_switch_to_pc();
311		lr = t->thread.pc;
312		sp = t->thread.ksp;
313		r52 = 0;
314	} else {
315		pc = regs->pc;
316		lr = regs->lr;
317		sp = regs->sp;
318		r52 = regs->regs[52];
319	}
320
321	backtrace_init(&kbt->it, read_memory_func, kbt, pc, lr, sp, r52);
322	kbt->end = KBacktraceIterator_next_item_inclusive(kbt);
323}
324EXPORT_SYMBOL(KBacktraceIterator_init);
325
326int KBacktraceIterator_end(struct KBacktraceIterator *kbt)
327{
328	return kbt->end != KBT_ONGOING;
329}
330EXPORT_SYMBOL(KBacktraceIterator_end);
331
332void KBacktraceIterator_next(struct KBacktraceIterator *kbt)
333{
334	unsigned long old_pc = kbt->it.pc, old_sp = kbt->it.sp;
335	kbt->new_context = 0;
336	if (!backtrace_next(&kbt->it) && !KBacktraceIterator_restart(kbt)) {
337		kbt->end = KBT_DONE;
338		return;
339	}
340	kbt->end = KBacktraceIterator_next_item_inclusive(kbt);
341	if (old_pc == kbt->it.pc && old_sp == kbt->it.sp) {
342		/* Trapped in a loop; give up. */
343		kbt->end = KBT_LOOP;
344	}
345}
346EXPORT_SYMBOL(KBacktraceIterator_next);
347
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
348/*
349 * This method wraps the backtracer's more generic support.
350 * It is only invoked from the architecture-specific code; show_stack()
351 * and dump_stack() (in entry.S) are architecture-independent entry points.
352 */
353void tile_show_stack(struct KBacktraceIterator *kbt, int headers)
354{
355	int i;
 
356
 
 
357	if (headers) {
358		/*
359		 * Add a blank line since if we are called from panic(),
360		 * then bust_spinlocks() spit out a space in front of us
361		 * and it will mess up our KERN_ERR.
362		 */
363		pr_err("\n");
364		pr_err("Starting stack dump of tid %d, pid %d (%s)"
365		       " on cpu %d at cycle %lld\n",
366		       kbt->task->pid, kbt->task->tgid, kbt->task->comm,
367		       smp_processor_id(), get_cycles());
368	}
369	kbt->verbose = 1;
370	i = 0;
371	for (; !KBacktraceIterator_end(kbt); KBacktraceIterator_next(kbt)) {
372		char *modname;
373		const char *name;
374		unsigned long address = kbt->it.pc;
375		unsigned long offset, size;
376		char namebuf[KSYM_NAME_LEN+100];
 
377
378		if (address >= PAGE_OFFSET)
379			name = kallsyms_lookup(address, &size, &offset,
380					       &modname, namebuf);
381		else
382			name = NULL;
383
384		if (!name)
385			namebuf[0] = '\0';
386		else {
387			size_t namelen = strlen(namebuf);
388			size_t remaining = (sizeof(namebuf) - 1) - namelen;
389			char *p = namebuf + namelen;
390			int rc = snprintf(p, remaining, "+%#lx/%#lx ",
391					  offset, size);
392			if (modname && rc < remaining)
393				snprintf(p + rc, remaining - rc,
394					 "[%s] ", modname);
395			namebuf[sizeof(namebuf)-1] = '\0';
396		}
397
398		pr_err("  frame %d: 0x%lx %s(sp 0x%lx)\n",
399		       i++, address, namebuf, (unsigned long)(kbt->it.sp));
400
401		if (i >= 100) {
402			pr_err("Stack dump truncated"
403			       " (%d frames)\n", i);
404			break;
405		}
406	}
407	if (kbt->end == KBT_LOOP)
408		pr_err("Stack dump stopped; next frame identical to this one\n");
409	if (headers)
410		pr_err("Stack dump complete\n");
 
 
 
411}
412EXPORT_SYMBOL(tile_show_stack);
413
414
415/* This is called from show_regs() and _dump_stack() */
416void dump_stack_regs(struct pt_regs *regs)
417{
418	struct KBacktraceIterator kbt;
419	KBacktraceIterator_init(&kbt, NULL, regs);
420	tile_show_stack(&kbt, 1);
421}
422EXPORT_SYMBOL(dump_stack_regs);
423
424static struct pt_regs *regs_to_pt_regs(struct pt_regs *regs,
425				       ulong pc, ulong lr, ulong sp, ulong r52)
426{
427	memset(regs, 0, sizeof(struct pt_regs));
428	regs->pc = pc;
429	regs->lr = lr;
430	regs->sp = sp;
431	regs->regs[52] = r52;
432	return regs;
433}
434
435/* This is called from dump_stack() and just converts to pt_regs */
436void _dump_stack(int dummy, ulong pc, ulong lr, ulong sp, ulong r52)
437{
438	struct pt_regs regs;
439	dump_stack_regs(regs_to_pt_regs(&regs, pc, lr, sp, r52));
440}
441
442/* This is called from KBacktraceIterator_init_current() */
443void _KBacktraceIterator_init_current(struct KBacktraceIterator *kbt, ulong pc,
444				      ulong lr, ulong sp, ulong r52)
445{
446	struct pt_regs regs;
447	KBacktraceIterator_init(kbt, NULL,
448				regs_to_pt_regs(&regs, pc, lr, sp, r52));
449}
450
451/* This is called only from kernel/sched.c, with esp == NULL */
452void show_stack(struct task_struct *task, unsigned long *esp)
453{
454	struct KBacktraceIterator kbt;
455	if (task == NULL || task == current)
456		KBacktraceIterator_init_current(&kbt);
457	else
458		KBacktraceIterator_init(&kbt, task, NULL);
459	tile_show_stack(&kbt, 0);
460}
461
462#ifdef CONFIG_STACKTRACE
463
464/* Support generic Linux stack API too */
465
466void save_stack_trace_tsk(struct task_struct *task, struct stack_trace *trace)
467{
468	struct KBacktraceIterator kbt;
469	int skip = trace->skip;
470	int i = 0;
471
 
 
472	if (task == NULL || task == current)
473		KBacktraceIterator_init_current(&kbt);
474	else
475		KBacktraceIterator_init(&kbt, task, NULL);
476	for (; !KBacktraceIterator_end(&kbt); KBacktraceIterator_next(&kbt)) {
477		if (skip) {
478			--skip;
479			continue;
480		}
481		if (i >= trace->max_entries || kbt.it.pc < PAGE_OFFSET)
482			break;
483		trace->entries[i++] = kbt.it.pc;
484	}
 
 
485	trace->nr_entries = i;
486}
487EXPORT_SYMBOL(save_stack_trace_tsk);
488
489void save_stack_trace(struct stack_trace *trace)
490{
491	save_stack_trace_tsk(NULL, trace);
492}
 
493
494#endif
495
496/* In entry.S */
497EXPORT_SYMBOL(KBacktraceIterator_init_current);
v3.15
  1/*
  2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
  3 *
  4 *   This program is free software; you can redistribute it and/or
  5 *   modify it under the terms of the GNU General Public License
  6 *   as published by the Free Software Foundation, version 2.
  7 *
  8 *   This program is distributed in the hope that it will be useful, but
  9 *   WITHOUT ANY WARRANTY; without even the implied warranty of
 10 *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
 11 *   NON INFRINGEMENT.  See the GNU General Public License for
 12 *   more details.
 13 */
 14
 15#include <linux/sched.h>
 16#include <linux/kernel.h>
 17#include <linux/kprobes.h>
 18#include <linux/module.h>
 19#include <linux/pfn.h>
 20#include <linux/kallsyms.h>
 21#include <linux/stacktrace.h>
 22#include <linux/uaccess.h>
 23#include <linux/mmzone.h>
 24#include <linux/dcache.h>
 25#include <linux/fs.h>
 26#include <linux/string.h>
 27#include <asm/backtrace.h>
 28#include <asm/page.h>
 
 29#include <asm/ucontext.h>
 30#include <asm/switch_to.h>
 31#include <asm/sigframe.h>
 32#include <asm/stack.h>
 33#include <asm/vdso.h>
 34#include <arch/abi.h>
 35#include <arch/interrupts.h>
 36
 37#define KBT_ONGOING	0  /* Backtrace still ongoing */
 38#define KBT_DONE	1  /* Backtrace cleanly completed */
 39#define KBT_RUNNING	2  /* Can't run backtrace on a running task */
 40#define KBT_LOOP	3  /* Backtrace entered a loop */
 41
 42/* Is address on the specified kernel stack? */
 43static int in_kernel_stack(struct KBacktraceIterator *kbt, unsigned long sp)
 44{
 45	ulong kstack_base = (ulong) kbt->task->stack;
 46	if (kstack_base == 0)  /* corrupt task pointer; just follow stack... */
 47		return sp >= PAGE_OFFSET && sp < (unsigned long)high_memory;
 48	return sp >= kstack_base && sp < kstack_base + THREAD_SIZE;
 49}
 50
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 51/* Callback for backtracer; basically a glorified memcpy */
 52static bool read_memory_func(void *result, unsigned long address,
 53			     unsigned int size, void *vkbt)
 54{
 55	int retval;
 56	struct KBacktraceIterator *kbt = (struct KBacktraceIterator *)vkbt;
 57
 58	if (address == 0)
 59		return 0;
 60	if (__kernel_text_address(address)) {
 61		/* OK to read kernel code. */
 62	} else if (address >= PAGE_OFFSET) {
 63		/* We only tolerate kernel-space reads of this task's stack */
 64		if (!in_kernel_stack(kbt, address))
 65			return 0;
 66	} else if (!kbt->is_current) {
 67		return 0;	/* can't read from other user address spaces */
 68	}
 69	pagefault_disable();
 70	retval = __copy_from_user_inatomic(result,
 71					   (void __user __force *)address,
 72					   size);
 73	pagefault_enable();
 74	return (retval == 0);
 75}
 76
 77/* Return a pt_regs pointer for a valid fault handler frame */
 78static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt)
 79{
 80	const char *fault = NULL;  /* happy compiler */
 81	char fault_buf[64];
 82	unsigned long sp = kbt->it.sp;
 83	struct pt_regs *p;
 84
 85	if (sp % sizeof(long) != 0)
 86		return NULL;
 87	if (!in_kernel_stack(kbt, sp))
 88		return NULL;
 89	if (!in_kernel_stack(kbt, sp + C_ABI_SAVE_AREA_SIZE + PTREGS_SIZE-1))
 90		return NULL;
 91	p = (struct pt_regs *)(sp + C_ABI_SAVE_AREA_SIZE);
 92	if (p->faultnum == INT_SWINT_1 || p->faultnum == INT_SWINT_1_SIGRETURN)
 93		fault = "syscall";
 94	else {
 95		if (kbt->verbose) {     /* else we aren't going to use it */
 96			snprintf(fault_buf, sizeof(fault_buf),
 97				 "interrupt %ld", p->faultnum);
 98			fault = fault_buf;
 99		}
100	}
101	if (EX1_PL(p->ex1) == KERNEL_PL &&
102	    __kernel_text_address(p->pc) &&
103	    in_kernel_stack(kbt, p->sp) &&
104	    p->sp >= sp) {
105		if (kbt->verbose)
106			pr_err("  <%s while in kernel mode>\n", fault);
107	} else if (user_mode(p) &&
108		   p->sp < PAGE_OFFSET && p->sp != 0) {
 
109		if (kbt->verbose)
110			pr_err("  <%s while in user mode>\n", fault);
111	} else if (kbt->verbose) {
112		pr_err("  (odd fault: pc %#lx, sp %#lx, ex1 %#lx?)\n",
113		       p->pc, p->sp, p->ex1);
114		p = NULL;
115	}
116	if (!kbt->profile || ((1ULL << p->faultnum) & QUEUED_INTERRUPTS) == 0)
117		return p;
118	return NULL;
119}
120
121/* Is the pc pointing to a sigreturn trampoline? */
122static int is_sigreturn(unsigned long pc)
123{
124	return current->mm && (pc == VDSO_SYM(&__vdso_rt_sigreturn));
125}
126
127/* Return a pt_regs pointer for a valid signal handler frame */
128static struct pt_regs *valid_sigframe(struct KBacktraceIterator* kbt,
129				      struct rt_sigframe* kframe)
130{
131	BacktraceIterator *b = &kbt->it;
132
133	if (is_sigreturn(b->pc) && b->sp < PAGE_OFFSET &&
134	    b->sp % sizeof(long) == 0) {
135		int retval;
136		pagefault_disable();
137		retval = __copy_from_user_inatomic(
138			kframe, (void __user __force *)b->sp,
139			sizeof(*kframe));
140		pagefault_enable();
141		if (retval != 0 ||
142		    (unsigned int)(kframe->info.si_signo) >= _NSIG)
143			return NULL;
 
 
144		if (kbt->verbose) {
145			pr_err("  <received signal %d>\n",
146			       kframe->info.si_signo);
147		}
148		return (struct pt_regs *)&kframe->uc.uc_mcontext;
149	}
150	return NULL;
151}
152
153static int KBacktraceIterator_is_sigreturn(struct KBacktraceIterator *kbt)
154{
155	return is_sigreturn(kbt->it.pc);
156}
157
158static int KBacktraceIterator_restart(struct KBacktraceIterator *kbt)
159{
160	struct pt_regs *p;
161	struct rt_sigframe kframe;
162
163	p = valid_fault_handler(kbt);
164	if (p == NULL)
165		p = valid_sigframe(kbt, &kframe);
166	if (p == NULL)
167		return 0;
168	backtrace_init(&kbt->it, read_memory_func, kbt,
169		       p->pc, p->lr, p->sp, p->regs[52]);
170	kbt->new_context = 1;
171	return 1;
172}
173
174/* Find a frame that isn't a sigreturn, if there is one. */
175static int KBacktraceIterator_next_item_inclusive(
176	struct KBacktraceIterator *kbt)
177{
178	for (;;) {
179		do {
180			if (!KBacktraceIterator_is_sigreturn(kbt))
181				return KBT_ONGOING;
182		} while (backtrace_next(&kbt->it));
183
184		if (!KBacktraceIterator_restart(kbt))
185			return KBT_DONE;
186	}
187}
188
189/*
190 * If the current sp is on a page different than what we recorded
191 * as the top-of-kernel-stack last time we context switched, we have
192 * probably blown the stack, and nothing is going to work out well.
193 * If we can at least get out a warning, that may help the debug,
194 * though we probably won't be able to backtrace into the code that
195 * actually did the recursive damage.
196 */
197static void validate_stack(struct pt_regs *regs)
198{
199	int cpu = raw_smp_processor_id();
200	unsigned long ksp0 = get_current_ksp0();
201	unsigned long ksp0_base = ksp0 & -THREAD_SIZE;
202	unsigned long sp = stack_pointer;
203
204	if (EX1_PL(regs->ex1) == KERNEL_PL && regs->sp >= ksp0) {
205		pr_err("WARNING: cpu %d: kernel stack %#lx..%#lx underrun!\n"
206		       "  sp %#lx (%#lx in caller), caller pc %#lx, lr %#lx\n",
207		       cpu, ksp0_base, ksp0, sp, regs->sp, regs->pc, regs->lr);
208	}
209
210	else if (sp < ksp0_base + sizeof(struct thread_info)) {
211		pr_err("WARNING: cpu %d: kernel stack %#lx..%#lx overrun!\n"
212		       "  sp %#lx (%#lx in caller), caller pc %#lx, lr %#lx\n",
213		       cpu, ksp0_base, ksp0, sp, regs->sp, regs->pc, regs->lr);
214	}
215}
216
217void KBacktraceIterator_init(struct KBacktraceIterator *kbt,
218			     struct task_struct *t, struct pt_regs *regs)
219{
220	unsigned long pc, lr, sp, r52;
221	int is_current;
222
223	/*
224	 * Set up callback information.  We grab the kernel stack base
225	 * so we will allow reads of that address range.
 
 
 
226	 */
227	is_current = (t == NULL || t == current);
228	kbt->is_current = is_current;
229	if (is_current)
230		t = validate_current();
231	kbt->task = t;
 
232	kbt->verbose = 0;   /* override in caller if desired */
233	kbt->profile = 0;   /* override in caller if desired */
234	kbt->end = KBT_ONGOING;
235	kbt->new_context = 1;
236	if (is_current)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
237		validate_stack(regs);
 
238
239	if (regs == NULL) {
240		if (is_current || t->state == TASK_RUNNING) {
241			/* Can't do this; we need registers */
242			kbt->end = KBT_RUNNING;
243			return;
244		}
245		pc = get_switch_to_pc();
246		lr = t->thread.pc;
247		sp = t->thread.ksp;
248		r52 = 0;
249	} else {
250		pc = regs->pc;
251		lr = regs->lr;
252		sp = regs->sp;
253		r52 = regs->regs[52];
254	}
255
256	backtrace_init(&kbt->it, read_memory_func, kbt, pc, lr, sp, r52);
257	kbt->end = KBacktraceIterator_next_item_inclusive(kbt);
258}
259EXPORT_SYMBOL(KBacktraceIterator_init);
260
261int KBacktraceIterator_end(struct KBacktraceIterator *kbt)
262{
263	return kbt->end != KBT_ONGOING;
264}
265EXPORT_SYMBOL(KBacktraceIterator_end);
266
267void KBacktraceIterator_next(struct KBacktraceIterator *kbt)
268{
269	unsigned long old_pc = kbt->it.pc, old_sp = kbt->it.sp;
270	kbt->new_context = 0;
271	if (!backtrace_next(&kbt->it) && !KBacktraceIterator_restart(kbt)) {
272		kbt->end = KBT_DONE;
273		return;
274	}
275	kbt->end = KBacktraceIterator_next_item_inclusive(kbt);
276	if (old_pc == kbt->it.pc && old_sp == kbt->it.sp) {
277		/* Trapped in a loop; give up. */
278		kbt->end = KBT_LOOP;
279	}
280}
281EXPORT_SYMBOL(KBacktraceIterator_next);
282
283static void describe_addr(struct KBacktraceIterator *kbt,
284			  unsigned long address,
285			  int have_mmap_sem, char *buf, size_t bufsize)
286{
287	struct vm_area_struct *vma;
288	size_t namelen, remaining;
289	unsigned long size, offset, adjust;
290	char *p, *modname;
291	const char *name;
292	int rc;
293
294	/*
295	 * Look one byte back for every caller frame (i.e. those that
296	 * aren't a new context) so we look up symbol data for the
297	 * call itself, not the following instruction, which may be on
298	 * a different line (or in a different function).
299	 */
300	adjust = !kbt->new_context;
301	address -= adjust;
302
303	if (address >= PAGE_OFFSET) {
304		/* Handle kernel symbols. */
305		BUG_ON(bufsize < KSYM_NAME_LEN);
306		name = kallsyms_lookup(address, &size, &offset,
307				       &modname, buf);
308		if (name == NULL) {
309			buf[0] = '\0';
310			return;
311		}
312		namelen = strlen(buf);
313		remaining = (bufsize - 1) - namelen;
314		p = buf + namelen;
315		rc = snprintf(p, remaining, "+%#lx/%#lx ",
316			      offset + adjust, size);
317		if (modname && rc < remaining)
318			snprintf(p + rc, remaining - rc, "[%s] ", modname);
319		buf[bufsize-1] = '\0';
320		return;
321	}
322
323	/* If we don't have the mmap_sem, we can't show any more info. */
324	buf[0] = '\0';
325	if (!have_mmap_sem)
326		return;
327
328	/* Find vma info. */
329	vma = find_vma(kbt->task->mm, address);
330	if (vma == NULL || address < vma->vm_start) {
331		snprintf(buf, bufsize, "[unmapped address] ");
332		return;
333	}
334
335	if (vma->vm_file) {
336		p = d_path(&vma->vm_file->f_path, buf, bufsize);
337		if (IS_ERR(p))
338			p = "?";
339		name = kbasename(p);
340	} else {
341		name = "anon";
342	}
343
344	/* Generate a string description of the vma info. */
345	namelen = strlen(name);
346	remaining = (bufsize - 1) - namelen;
347	memmove(buf, name, namelen);
348	snprintf(buf + namelen, remaining, "[%lx+%lx] ",
349		 vma->vm_start, vma->vm_end - vma->vm_start);
350}
351
352/*
353 * Avoid possible crash recursion during backtrace.  If it happens, it
354 * makes it easy to lose the actual root cause of the failure, so we
355 * put a simple guard on all the backtrace loops.
356 */
357static bool start_backtrace(void)
358{
359	if (current->thread.in_backtrace) {
360		pr_err("Backtrace requested while in backtrace!\n");
361		return false;
362	}
363	current->thread.in_backtrace = true;
364	return true;
365}
366
367static void end_backtrace(void)
368{
369	current->thread.in_backtrace = false;
370}
371
372/*
373 * This method wraps the backtracer's more generic support.
374 * It is only invoked from the architecture-specific code; show_stack()
375 * and dump_stack() (in entry.S) are architecture-independent entry points.
376 */
377void tile_show_stack(struct KBacktraceIterator *kbt, int headers)
378{
379	int i;
380	int have_mmap_sem = 0;
381
382	if (!start_backtrace())
383		return;
384	if (headers) {
385		/*
386		 * Add a blank line since if we are called from panic(),
387		 * then bust_spinlocks() spit out a space in front of us
388		 * and it will mess up our KERN_ERR.
389		 */
390		pr_err("\n");
391		pr_err("Starting stack dump of tid %d, pid %d (%s)"
392		       " on cpu %d at cycle %lld\n",
393		       kbt->task->pid, kbt->task->tgid, kbt->task->comm,
394		       raw_smp_processor_id(), get_cycles());
395	}
396	kbt->verbose = 1;
397	i = 0;
398	for (; !KBacktraceIterator_end(kbt); KBacktraceIterator_next(kbt)) {
 
 
 
 
399		char namebuf[KSYM_NAME_LEN+100];
400		unsigned long address = kbt->it.pc;
401
402		/* Try to acquire the mmap_sem as we pass into userspace. */
403		if (address < PAGE_OFFSET && !have_mmap_sem && kbt->task->mm)
404			have_mmap_sem =
405				down_read_trylock(&kbt->task->mm->mmap_sem);
406
407		describe_addr(kbt, address, have_mmap_sem,
408			      namebuf, sizeof(namebuf));
 
 
 
 
 
 
 
 
 
 
 
 
409
410		pr_err("  frame %d: 0x%lx %s(sp 0x%lx)\n",
411		       i++, address, namebuf, (unsigned long)(kbt->it.sp));
412
413		if (i >= 100) {
414			pr_err("Stack dump truncated"
415			       " (%d frames)\n", i);
416			break;
417		}
418	}
419	if (kbt->end == KBT_LOOP)
420		pr_err("Stack dump stopped; next frame identical to this one\n");
421	if (headers)
422		pr_err("Stack dump complete\n");
423	if (have_mmap_sem)
424		up_read(&kbt->task->mm->mmap_sem);
425	end_backtrace();
426}
427EXPORT_SYMBOL(tile_show_stack);
428
429
430/* This is called from show_regs() and _dump_stack() */
431void dump_stack_regs(struct pt_regs *regs)
432{
433	struct KBacktraceIterator kbt;
434	KBacktraceIterator_init(&kbt, NULL, regs);
435	tile_show_stack(&kbt, 1);
436}
437EXPORT_SYMBOL(dump_stack_regs);
438
439static struct pt_regs *regs_to_pt_regs(struct pt_regs *regs,
440				       ulong pc, ulong lr, ulong sp, ulong r52)
441{
442	memset(regs, 0, sizeof(struct pt_regs));
443	regs->pc = pc;
444	regs->lr = lr;
445	regs->sp = sp;
446	regs->regs[52] = r52;
447	return regs;
448}
449
450/* This is called from dump_stack() and just converts to pt_regs */
451void _dump_stack(int dummy, ulong pc, ulong lr, ulong sp, ulong r52)
452{
453	struct pt_regs regs;
454	dump_stack_regs(regs_to_pt_regs(&regs, pc, lr, sp, r52));
455}
456
457/* This is called from KBacktraceIterator_init_current() */
458void _KBacktraceIterator_init_current(struct KBacktraceIterator *kbt, ulong pc,
459				      ulong lr, ulong sp, ulong r52)
460{
461	struct pt_regs regs;
462	KBacktraceIterator_init(kbt, NULL,
463				regs_to_pt_regs(&regs, pc, lr, sp, r52));
464}
465
466/* This is called only from kernel/sched/core.c, with esp == NULL */
467void show_stack(struct task_struct *task, unsigned long *esp)
468{
469	struct KBacktraceIterator kbt;
470	if (task == NULL || task == current)
471		KBacktraceIterator_init_current(&kbt);
472	else
473		KBacktraceIterator_init(&kbt, task, NULL);
474	tile_show_stack(&kbt, 0);
475}
476
477#ifdef CONFIG_STACKTRACE
478
479/* Support generic Linux stack API too */
480
481void save_stack_trace_tsk(struct task_struct *task, struct stack_trace *trace)
482{
483	struct KBacktraceIterator kbt;
484	int skip = trace->skip;
485	int i = 0;
486
487	if (!start_backtrace())
488		goto done;
489	if (task == NULL || task == current)
490		KBacktraceIterator_init_current(&kbt);
491	else
492		KBacktraceIterator_init(&kbt, task, NULL);
493	for (; !KBacktraceIterator_end(&kbt); KBacktraceIterator_next(&kbt)) {
494		if (skip) {
495			--skip;
496			continue;
497		}
498		if (i >= trace->max_entries || kbt.it.pc < PAGE_OFFSET)
499			break;
500		trace->entries[i++] = kbt.it.pc;
501	}
502	end_backtrace();
503done:
504	trace->nr_entries = i;
505}
506EXPORT_SYMBOL(save_stack_trace_tsk);
507
508void save_stack_trace(struct stack_trace *trace)
509{
510	save_stack_trace_tsk(NULL, trace);
511}
512EXPORT_SYMBOL_GPL(save_stack_trace);
513
514#endif
515
516/* In entry.S */
517EXPORT_SYMBOL(KBacktraceIterator_init_current);