Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Stack dumping functions
  4 *
  5 *  Copyright IBM Corp. 1999, 2013
  6 */
  7
  8#include <linux/kallsyms.h>
  9#include <linux/hardirq.h>
 10#include <linux/kprobes.h>
 11#include <linux/utsname.h>
 12#include <linux/export.h>
 13#include <linux/kdebug.h>
 14#include <linux/ptrace.h>
 15#include <linux/mm.h>
 16#include <linux/module.h>
 17#include <linux/sched.h>
 18#include <linux/sched/debug.h>
 19#include <linux/sched/task_stack.h>
 20#include <asm/processor.h>
 21#include <asm/debug.h>
 22#include <asm/dis.h>
 23#include <asm/ipl.h>
 24
 25/*
 26 * For dump_trace we have tree different stack to consider:
 27 *   - the panic stack which is used if the kernel stack has overflown
 28 *   - the asynchronous interrupt stack (cpu related)
 29 *   - the synchronous kernel stack (process related)
 30 * The stack trace can start at any of the three stacks and can potentially
 31 * touch all of them. The order is: panic stack, async stack, sync stack.
 32 */
 33static unsigned long
 34__dump_trace(dump_trace_func_t func, void *data, unsigned long sp,
 35	     unsigned long low, unsigned long high)
 36{
 37	struct stack_frame *sf;
 38	struct pt_regs *regs;
 39
 40	while (1) {
 41		if (sp < low || sp > high - sizeof(*sf))
 42			return sp;
 43		sf = (struct stack_frame *) sp;
 44		if (func(data, sf->gprs[8], 0))
 45			return sp;
 46		/* Follow the backchain. */
 47		while (1) {
 48			low = sp;
 49			sp = sf->back_chain;
 50			if (!sp)
 51				break;
 52			if (sp <= low || sp > high - sizeof(*sf))
 53				return sp;
 54			sf = (struct stack_frame *) sp;
 55			if (func(data, sf->gprs[8], 1))
 56				return sp;
 57		}
 58		/* Zero backchain detected, check for interrupt frame. */
 59		sp = (unsigned long) (sf + 1);
 60		if (sp <= low || sp > high - sizeof(*regs))
 61			return sp;
 62		regs = (struct pt_regs *) sp;
 63		if (!user_mode(regs)) {
 64			if (func(data, regs->psw.addr, 1))
 65				return sp;
 66		}
 67		low = sp;
 68		sp = regs->gprs[15];
 69	}
 70}
 71
 72void dump_trace(dump_trace_func_t func, void *data, struct task_struct *task,
 73		unsigned long sp)
 74{
 75	unsigned long frame_size;
 76
 77	frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
 78#ifdef CONFIG_CHECK_STACK
 79	sp = __dump_trace(func, data, sp,
 80			  S390_lowcore.panic_stack + frame_size - PAGE_SIZE,
 81			  S390_lowcore.panic_stack + frame_size);
 82#endif
 83	sp = __dump_trace(func, data, sp,
 84			  S390_lowcore.async_stack + frame_size - ASYNC_SIZE,
 85			  S390_lowcore.async_stack + frame_size);
 86	task = task ?: current;
 87	__dump_trace(func, data, sp,
 88		     (unsigned long)task_stack_page(task),
 89		     (unsigned long)task_stack_page(task) + THREAD_SIZE);
 90}
 91EXPORT_SYMBOL_GPL(dump_trace);
 92
 93static int show_address(void *data, unsigned long address, int reliable)
 94{
 95	if (reliable)
 96		printk(" [<%016lx>] %pSR \n", address, (void *)address);
 97	else
 98		printk("([<%016lx>] %pSR)\n", address, (void *)address);
 99	return 0;
100}
101
102void show_stack(struct task_struct *task, unsigned long *stack)
103{
104	unsigned long sp = (unsigned long) stack;
105
106	if (!sp)
107		sp = task ? task->thread.ksp : current_stack_pointer();
108	printk("Call Trace:\n");
109	dump_trace(show_address, NULL, task, sp);
110	if (!task)
111		task = current;
112	debug_show_held_locks(task);
113}
114
115static void show_last_breaking_event(struct pt_regs *regs)
116{
117	printk("Last Breaking-Event-Address:\n");
118	printk(" [<%016lx>] %pSR\n", regs->args[0], (void *)regs->args[0]);
119}
120
121void show_registers(struct pt_regs *regs)
122{
123	struct psw_bits *psw = &psw_bits(regs->psw);
124	char *mode;
125
126	mode = user_mode(regs) ? "User" : "Krnl";
127	printk("%s PSW : %p %p", mode, (void *)regs->psw.mask, (void *)regs->psw.addr);
128	if (!user_mode(regs))
129		pr_cont(" (%pSR)", (void *)regs->psw.addr);
130	pr_cont("\n");
131	printk("           R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x "
132	       "P:%x AS:%x CC:%x PM:%x", psw->per, psw->dat, psw->io, psw->ext,
133	       psw->key, psw->mcheck, psw->wait, psw->pstate, psw->as, psw->cc, psw->pm);
134	pr_cont(" RI:%x EA:%x\n", psw->ri, psw->eaba);
135	printk("%s GPRS: %016lx %016lx %016lx %016lx\n", mode,
136	       regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]);
137	printk("           %016lx %016lx %016lx %016lx\n",
138	       regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]);
139	printk("           %016lx %016lx %016lx %016lx\n",
140	       regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]);
141	printk("           %016lx %016lx %016lx %016lx\n",
142	       regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]);
143	show_code(regs);
144}
145
146void show_regs(struct pt_regs *regs)
147{
148	show_regs_print_info(KERN_DEFAULT);
149	show_registers(regs);
150	/* Show stack backtrace if pt_regs is from kernel mode */
151	if (!user_mode(regs))
152		show_stack(NULL, (unsigned long *) regs->gprs[15]);
153	show_last_breaking_event(regs);
154}
155
156static DEFINE_SPINLOCK(die_lock);
157
158void die(struct pt_regs *regs, const char *str)
159{
160	static int die_counter;
161
162	oops_enter();
163	lgr_info_log();
164	debug_stop_all();
165	console_verbose();
166	spin_lock_irq(&die_lock);
167	bust_spinlocks(1);
168	printk("%s: %04x ilc:%d [#%d] ", str, regs->int_code & 0xffff,
169	       regs->int_code >> 17, ++die_counter);
170#ifdef CONFIG_PREEMPT
171	pr_cont("PREEMPT ");
172#endif
173#ifdef CONFIG_SMP
174	pr_cont("SMP ");
175#endif
176	if (debug_pagealloc_enabled())
177		pr_cont("DEBUG_PAGEALLOC");
178	pr_cont("\n");
179	notify_die(DIE_OOPS, str, regs, 0, regs->int_code & 0xffff, SIGSEGV);
180	print_modules();
181	show_regs(regs);
182	bust_spinlocks(0);
183	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
184	spin_unlock_irq(&die_lock);
185	if (in_interrupt())
186		panic("Fatal exception in interrupt");
187	if (panic_on_oops)
188		panic("Fatal exception: panic_on_oops");
189	oops_exit();
190	do_exit(SIGSEGV);
191}