Linux Audio

Check our new training course

Loading...
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Stack dumping functions
  4 *
  5 *  Copyright IBM Corp. 1999, 2013
  6 */
  7
  8#include <linux/kallsyms.h>
  9#include <linux/hardirq.h>
 10#include <linux/kprobes.h>
 11#include <linux/utsname.h>
 12#include <linux/export.h>
 13#include <linux/kdebug.h>
 14#include <linux/ptrace.h>
 15#include <linux/mm.h>
 16#include <linux/module.h>
 17#include <linux/sched.h>
 18#include <linux/sched/debug.h>
 19#include <linux/sched/task_stack.h>
 20#include <asm/processor.h>
 21#include <asm/debug.h>
 22#include <asm/dis.h>
 23#include <asm/ipl.h>
 24
 25/*
 26 * For dump_trace we have tree different stack to consider:
 27 *   - the panic stack which is used if the kernel stack has overflown
 28 *   - the asynchronous interrupt stack (cpu related)
 29 *   - the synchronous kernel stack (process related)
 30 * The stack trace can start at any of the three stacks and can potentially
 31 * touch all of them. The order is: panic stack, async stack, sync stack.
 32 */
 33static unsigned long
 34__dump_trace(dump_trace_func_t func, void *data, unsigned long sp,
 35	     unsigned long low, unsigned long high)
 36{
 37	struct stack_frame *sf;
 38	struct pt_regs *regs;
 39
 40	while (1) {
 41		if (sp < low || sp > high - sizeof(*sf))
 42			return sp;
 43		sf = (struct stack_frame *) sp;
 44		if (func(data, sf->gprs[8], 0))
 45			return sp;
 46		/* Follow the backchain. */
 47		while (1) {
 
 
 48			low = sp;
 49			sp = sf->back_chain;
 50			if (!sp)
 51				break;
 52			if (sp <= low || sp > high - sizeof(*sf))
 53				return sp;
 54			sf = (struct stack_frame *) sp;
 55			if (func(data, sf->gprs[8], 1))
 56				return sp;
 57		}
 58		/* Zero backchain detected, check for interrupt frame. */
 59		sp = (unsigned long) (sf + 1);
 60		if (sp <= low || sp > high - sizeof(*regs))
 61			return sp;
 62		regs = (struct pt_regs *) sp;
 63		if (!user_mode(regs)) {
 64			if (func(data, regs->psw.addr, 1))
 65				return sp;
 66		}
 67		low = sp;
 68		sp = regs->gprs[15];
 69	}
 70}
 71
 72void dump_trace(dump_trace_func_t func, void *data, struct task_struct *task,
 73		unsigned long sp)
 74{
 75	unsigned long frame_size;
 76
 77	frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
 78#ifdef CONFIG_CHECK_STACK
 79	sp = __dump_trace(func, data, sp,
 80			  S390_lowcore.panic_stack + frame_size - PAGE_SIZE,
 81			  S390_lowcore.panic_stack + frame_size);
 82#endif
 83	sp = __dump_trace(func, data, sp,
 84			  S390_lowcore.async_stack + frame_size - ASYNC_SIZE,
 85			  S390_lowcore.async_stack + frame_size);
 86	task = task ?: current;
 87	__dump_trace(func, data, sp,
 88		     (unsigned long)task_stack_page(task),
 89		     (unsigned long)task_stack_page(task) + THREAD_SIZE);
 
 
 
 
 90}
 91EXPORT_SYMBOL_GPL(dump_trace);
 92
 93static int show_address(void *data, unsigned long address, int reliable)
 94{
 95	if (reliable)
 96		printk(" [<%016lx>] %pSR \n", address, (void *)address);
 97	else
 98		printk("([<%016lx>] %pSR)\n", address, (void *)address);
 99	return 0;
100}
101
102void show_stack(struct task_struct *task, unsigned long *stack)
103{
104	unsigned long sp = (unsigned long) stack;
105
106	if (!sp)
107		sp = task ? task->thread.ksp : current_stack_pointer();
108	printk("Call Trace:\n");
109	dump_trace(show_address, NULL, task, sp);
110	if (!task)
111		task = current;
112	debug_show_held_locks(task);
113}
114
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
115static void show_last_breaking_event(struct pt_regs *regs)
116{
117	printk("Last Breaking-Event-Address:\n");
118	printk(" [<%016lx>] %pSR\n", regs->args[0], (void *)regs->args[0]);
119}
120
121void show_registers(struct pt_regs *regs)
122{
123	struct psw_bits *psw = &psw_bits(regs->psw);
124	char *mode;
125
126	mode = user_mode(regs) ? "User" : "Krnl";
127	printk("%s PSW : %p %p", mode, (void *)regs->psw.mask, (void *)regs->psw.addr);
128	if (!user_mode(regs))
129		pr_cont(" (%pSR)", (void *)regs->psw.addr);
130	pr_cont("\n");
131	printk("           R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x "
132	       "P:%x AS:%x CC:%x PM:%x", psw->per, psw->dat, psw->io, psw->ext,
133	       psw->key, psw->mcheck, psw->wait, psw->pstate, psw->as, psw->cc, psw->pm);
134	pr_cont(" RI:%x EA:%x\n", psw->ri, psw->eaba);
135	printk("%s GPRS: %016lx %016lx %016lx %016lx\n", mode,
136	       regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]);
137	printk("           %016lx %016lx %016lx %016lx\n",
138	       regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]);
139	printk("           %016lx %016lx %016lx %016lx\n",
140	       regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]);
141	printk("           %016lx %016lx %016lx %016lx\n",
142	       regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]);
143	show_code(regs);
144}
145
146void show_regs(struct pt_regs *regs)
147{
148	show_regs_print_info(KERN_DEFAULT);
149	show_registers(regs);
150	/* Show stack backtrace if pt_regs is from kernel mode */
151	if (!user_mode(regs))
152		show_stack(NULL, (unsigned long *) regs->gprs[15]);
153	show_last_breaking_event(regs);
154}
155
156static DEFINE_SPINLOCK(die_lock);
157
158void die(struct pt_regs *regs, const char *str)
159{
160	static int die_counter;
161
162	oops_enter();
163	lgr_info_log();
164	debug_stop_all();
165	console_verbose();
166	spin_lock_irq(&die_lock);
167	bust_spinlocks(1);
168	printk("%s: %04x ilc:%d [#%d] ", str, regs->int_code & 0xffff,
169	       regs->int_code >> 17, ++die_counter);
170#ifdef CONFIG_PREEMPT
171	pr_cont("PREEMPT ");
172#endif
173#ifdef CONFIG_SMP
174	pr_cont("SMP ");
175#endif
176	if (debug_pagealloc_enabled())
177		pr_cont("DEBUG_PAGEALLOC");
178	pr_cont("\n");
179	notify_die(DIE_OOPS, str, regs, 0, regs->int_code & 0xffff, SIGSEGV);
180	print_modules();
181	show_regs(regs);
182	bust_spinlocks(0);
183	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
184	spin_unlock_irq(&die_lock);
185	if (in_interrupt())
186		panic("Fatal exception in interrupt");
187	if (panic_on_oops)
188		panic("Fatal exception: panic_on_oops");
189	oops_exit();
190	do_exit(SIGSEGV);
191}
v4.6
 
  1/*
  2 * Stack dumping functions
  3 *
  4 *  Copyright IBM Corp. 1999, 2013
  5 */
  6
  7#include <linux/kallsyms.h>
  8#include <linux/hardirq.h>
  9#include <linux/kprobes.h>
 10#include <linux/utsname.h>
 11#include <linux/export.h>
 12#include <linux/kdebug.h>
 13#include <linux/ptrace.h>
 14#include <linux/mm.h>
 15#include <linux/module.h>
 16#include <linux/sched.h>
 
 
 17#include <asm/processor.h>
 18#include <asm/debug.h>
 19#include <asm/dis.h>
 20#include <asm/ipl.h>
 21
 22/*
 23 * For dump_trace we have tree different stack to consider:
 24 *   - the panic stack which is used if the kernel stack has overflown
 25 *   - the asynchronous interrupt stack (cpu related)
 26 *   - the synchronous kernel stack (process related)
 27 * The stack trace can start at any of the three stacks and can potentially
 28 * touch all of them. The order is: panic stack, async stack, sync stack.
 29 */
 30static unsigned long
 31__dump_trace(dump_trace_func_t func, void *data, unsigned long sp,
 32	     unsigned long low, unsigned long high)
 33{
 34	struct stack_frame *sf;
 35	struct pt_regs *regs;
 36
 37	while (1) {
 38		if (sp < low || sp > high - sizeof(*sf))
 39			return sp;
 40		sf = (struct stack_frame *) sp;
 
 
 41		/* Follow the backchain. */
 42		while (1) {
 43			if (func(data, sf->gprs[8]))
 44				return sp;
 45			low = sp;
 46			sp = sf->back_chain;
 47			if (!sp)
 48				break;
 49			if (sp <= low || sp > high - sizeof(*sf))
 50				return sp;
 51			sf = (struct stack_frame *) sp;
 
 
 52		}
 53		/* Zero backchain detected, check for interrupt frame. */
 54		sp = (unsigned long) (sf + 1);
 55		if (sp <= low || sp > high - sizeof(*regs))
 56			return sp;
 57		regs = (struct pt_regs *) sp;
 58		if (!user_mode(regs)) {
 59			if (func(data, regs->psw.addr))
 60				return sp;
 61		}
 62		low = sp;
 63		sp = regs->gprs[15];
 64	}
 65}
 66
 67void dump_trace(dump_trace_func_t func, void *data, struct task_struct *task,
 68		unsigned long sp)
 69{
 70	unsigned long frame_size;
 71
 72	frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
 73#ifdef CONFIG_CHECK_STACK
 74	sp = __dump_trace(func, data, sp,
 75			  S390_lowcore.panic_stack + frame_size - 4096,
 76			  S390_lowcore.panic_stack + frame_size);
 77#endif
 78	sp = __dump_trace(func, data, sp,
 79			  S390_lowcore.async_stack + frame_size - ASYNC_SIZE,
 80			  S390_lowcore.async_stack + frame_size);
 81	if (task)
 82		__dump_trace(func, data, sp,
 83			     (unsigned long)task_stack_page(task),
 84			     (unsigned long)task_stack_page(task) + THREAD_SIZE);
 85	else
 86		__dump_trace(func, data, sp,
 87			     S390_lowcore.thread_info,
 88			     S390_lowcore.thread_info + THREAD_SIZE);
 89}
 90EXPORT_SYMBOL_GPL(dump_trace);
 91
 92static int show_address(void *data, unsigned long address)
 93{
 94	printk("([<%016lx>] %pSR)\n", address, (void *)address);
 
 
 
 95	return 0;
 96}
 97
 98static void show_trace(struct task_struct *task, unsigned long sp)
 99{
 
 
100	if (!sp)
101		sp = task ? task->thread.ksp : current_stack_pointer();
102	printk("Call Trace:\n");
103	dump_trace(show_address, NULL, task, sp);
104	if (!task)
105		task = current;
106	debug_show_held_locks(task);
107}
108
109void show_stack(struct task_struct *task, unsigned long *sp)
110{
111	unsigned long *stack;
112	int i;
113
114	stack = sp;
115	if (!stack) {
116		if (!task)
117			stack = (unsigned long *)current_stack_pointer();
118		else
119			stack = (unsigned long *)task->thread.ksp;
120	}
121	for (i = 0; i < 20; i++) {
122		if (((addr_t) stack & (THREAD_SIZE-1)) == 0)
123			break;
124		if ((i * sizeof(long) % 32) == 0)
125			printk("%s       ", i == 0 ? "" : "\n");
126		printk("%016lx ", *stack++);
127	}
128	printk("\n");
129	show_trace(task, (unsigned long)sp);
130}
131
132static void show_last_breaking_event(struct pt_regs *regs)
133{
134	printk("Last Breaking-Event-Address:\n");
135	printk(" [<%016lx>] %pSR\n", regs->args[0], (void *)regs->args[0]);
136}
137
138void show_registers(struct pt_regs *regs)
139{
140	struct psw_bits *psw = &psw_bits(regs->psw);
141	char *mode;
142
143	mode = user_mode(regs) ? "User" : "Krnl";
144	printk("%s PSW : %p %p", mode, (void *)regs->psw.mask, (void *)regs->psw.addr);
145	if (!user_mode(regs))
146		printk(" (%pSR)", (void *)regs->psw.addr);
147	printk("\n");
148	printk("           R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x "
149	       "P:%x AS:%x CC:%x PM:%x", psw->r, psw->t, psw->i, psw->e,
150	       psw->key, psw->m, psw->w, psw->p, psw->as, psw->cc, psw->pm);
151	printk(" RI:%x EA:%x", psw->ri, psw->eaba);
152	printk("\n%s GPRS: %016lx %016lx %016lx %016lx\n", mode,
153	       regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]);
154	printk("           %016lx %016lx %016lx %016lx\n",
155	       regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]);
156	printk("           %016lx %016lx %016lx %016lx\n",
157	       regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]);
158	printk("           %016lx %016lx %016lx %016lx\n",
159	       regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]);
160	show_code(regs);
161}
162
163void show_regs(struct pt_regs *regs)
164{
165	show_regs_print_info(KERN_DEFAULT);
166	show_registers(regs);
167	/* Show stack backtrace if pt_regs is from kernel mode */
168	if (!user_mode(regs))
169		show_trace(NULL, regs->gprs[15]);
170	show_last_breaking_event(regs);
171}
172
173static DEFINE_SPINLOCK(die_lock);
174
175void die(struct pt_regs *regs, const char *str)
176{
177	static int die_counter;
178
179	oops_enter();
180	lgr_info_log();
181	debug_stop_all();
182	console_verbose();
183	spin_lock_irq(&die_lock);
184	bust_spinlocks(1);
185	printk("%s: %04x ilc:%d [#%d] ", str, regs->int_code & 0xffff,
186	       regs->int_code >> 17, ++die_counter);
187#ifdef CONFIG_PREEMPT
188	printk("PREEMPT ");
189#endif
190#ifdef CONFIG_SMP
191	printk("SMP ");
192#endif
193	if (debug_pagealloc_enabled())
194		printk("DEBUG_PAGEALLOC");
195	printk("\n");
196	notify_die(DIE_OOPS, str, regs, 0, regs->int_code & 0xffff, SIGSEGV);
197	print_modules();
198	show_regs(regs);
199	bust_spinlocks(0);
200	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
201	spin_unlock_irq(&die_lock);
202	if (in_interrupt())
203		panic("Fatal exception in interrupt");
204	if (panic_on_oops)
205		panic("Fatal exception: panic_on_oops");
206	oops_exit();
207	do_exit(SIGSEGV);
208}