Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Stack dumping functions
  4 *
  5 *  Copyright IBM Corp. 1999, 2013
  6 */
  7
  8#include <linux/kallsyms.h>
  9#include <linux/hardirq.h>
 10#include <linux/kprobes.h>
 11#include <linux/utsname.h>
 12#include <linux/export.h>
 13#include <linux/kdebug.h>
 14#include <linux/ptrace.h>
 15#include <linux/mm.h>
 16#include <linux/module.h>
 17#include <linux/sched.h>
 18#include <linux/sched/debug.h>
 19#include <linux/sched/task_stack.h>
 20#include <asm/processor.h>
 21#include <asm/debug.h>
 22#include <asm/dis.h>
 23#include <asm/ipl.h>
 24#include <asm/unwind.h>
 25
 26const char *stack_type_name(enum stack_type type)
 27{
 28	switch (type) {
 29	case STACK_TYPE_TASK:
 30		return "task";
 31	case STACK_TYPE_IRQ:
 32		return "irq";
 33	case STACK_TYPE_NODAT:
 34		return "nodat";
 35	case STACK_TYPE_RESTART:
 36		return "restart";
 37	default:
 38		return "unknown";
 39	}
 40}
 41EXPORT_SYMBOL_GPL(stack_type_name);
 42
 43static inline bool in_stack(unsigned long sp, struct stack_info *info,
 44			    enum stack_type type, unsigned long low,
 45			    unsigned long high)
 46{
 47	if (sp < low || sp >= high)
 48		return false;
 49	info->type = type;
 50	info->begin = low;
 51	info->end = high;
 52	return true;
 53}
 54
 55static bool in_task_stack(unsigned long sp, struct task_struct *task,
 56			  struct stack_info *info)
 57{
 58	unsigned long stack;
 59
 60	stack = (unsigned long) task_stack_page(task);
 61	return in_stack(sp, info, STACK_TYPE_TASK, stack, stack + THREAD_SIZE);
 62}
 63
 64static bool in_irq_stack(unsigned long sp, struct stack_info *info)
 65{
 66	unsigned long frame_size, top;
 67
 68	frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
 69	top = S390_lowcore.async_stack + frame_size;
 70	return in_stack(sp, info, STACK_TYPE_IRQ, top - THREAD_SIZE, top);
 71}
 72
 73static bool in_nodat_stack(unsigned long sp, struct stack_info *info)
 74{
 75	unsigned long frame_size, top;
 76
 77	frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
 78	top = S390_lowcore.nodat_stack + frame_size;
 79	return in_stack(sp, info, STACK_TYPE_NODAT, top - THREAD_SIZE, top);
 80}
 81
 82static bool in_mcck_stack(unsigned long sp, struct stack_info *info)
 
 
 
 
 
 
 
 
 
 83{
 84	unsigned long frame_size, top;
 85
 86	frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
 87	top = S390_lowcore.mcck_stack + frame_size;
 88	return in_stack(sp, info, STACK_TYPE_MCCK, top - THREAD_SIZE, top);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 89}
 90
 91static bool in_restart_stack(unsigned long sp, struct stack_info *info)
 92{
 93	unsigned long frame_size, top;
 
 
 
 94
 95	frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
 96	top = S390_lowcore.restart_stack + frame_size;
 97	return in_stack(sp, info, STACK_TYPE_RESTART, top - THREAD_SIZE, top);
 98}
 99
100int get_stack_info(unsigned long sp, struct task_struct *task,
101		   struct stack_info *info, unsigned long *visit_mask)
102{
103	if (!sp)
104		goto unknown;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
105
106	/* Sanity check: ABI requires SP to be aligned 8 bytes. */
107	if (sp & 0x7)
108		goto unknown;
109
110	/* Check per-task stack */
111	if (in_task_stack(sp, task, info))
112		goto recursion_check;
113
114	if (task != current)
115		goto unknown;
116
117	/* Check per-cpu stacks */
118	if (!in_irq_stack(sp, info) &&
119	    !in_nodat_stack(sp, info) &&
120	    !in_restart_stack(sp, info) &&
121	    !in_mcck_stack(sp, info))
122		goto unknown;
123
124recursion_check:
125	/*
126	 * Make sure we don't iterate through any given stack more than once.
127	 * If it comes up a second time then there's something wrong going on:
128	 * just break out and report an unknown stack type.
129	 */
130	if (*visit_mask & (1UL << info->type))
131		goto unknown;
132	*visit_mask |= 1UL << info->type;
133	return 0;
134unknown:
135	info->type = STACK_TYPE_UNKNOWN;
136	return -EINVAL;
137}
138
139void show_stack(struct task_struct *task, unsigned long *stack,
140		       const char *loglvl)
141{
142	struct unwind_state state;
143
144	printk("%sCall Trace:\n", loglvl);
145	unwind_for_each_frame(&state, task, NULL, (unsigned long) stack)
146		printk(state.reliable ? "%s [<%016lx>] %pSR \n" :
147					"%s([<%016lx>] %pSR)\n",
148		       loglvl, state.ip, (void *) state.ip);
149	debug_show_held_locks(task ? : current);
150}
151
152static void show_last_breaking_event(struct pt_regs *regs)
153{
 
154	printk("Last Breaking-Event-Address:\n");
155	printk(" [<%016lx>] %pSR\n", regs->last_break, (void *)regs->last_break);
 
 
 
 
 
 
156}
157
158void show_registers(struct pt_regs *regs)
159{
160	struct psw_bits *psw = &psw_bits(regs->psw);
161	char *mode;
162
163	mode = user_mode(regs) ? "User" : "Krnl";
164	printk("%s PSW : %px %px", mode, (void *)regs->psw.mask, (void *)regs->psw.addr);
165	if (!user_mode(regs))
166		pr_cont(" (%pSR)", (void *)regs->psw.addr);
167	pr_cont("\n");
168	printk("           R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x "
169	       "P:%x AS:%x CC:%x PM:%x", psw->per, psw->dat, psw->io, psw->ext,
170	       psw->key, psw->mcheck, psw->wait, psw->pstate, psw->as, psw->cc, psw->pm);
171	pr_cont(" RI:%x EA:%x\n", psw->ri, psw->eaba);
172	printk("%s GPRS: %016lx %016lx %016lx %016lx\n", mode,
 
 
 
 
 
 
173	       regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]);
174	printk("           %016lx %016lx %016lx %016lx\n",
175	       regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]);
176	printk("           %016lx %016lx %016lx %016lx\n",
177	       regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]);
178	printk("           %016lx %016lx %016lx %016lx\n",
179	       regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]);
180	show_code(regs);
181}
182
183void show_regs(struct pt_regs *regs)
184{
185	show_regs_print_info(KERN_DEFAULT);
186	show_registers(regs);
187	/* Show stack backtrace if pt_regs is from kernel mode */
188	if (!user_mode(regs))
189		show_stack(NULL, (unsigned long *) regs->gprs[15], KERN_DEFAULT);
190	show_last_breaking_event(regs);
191}
192
193static DEFINE_SPINLOCK(die_lock);
194
195void __noreturn die(struct pt_regs *regs, const char *str)
196{
197	static int die_counter;
198
199	oops_enter();
200	lgr_info_log();
201	debug_stop_all();
202	console_verbose();
203	spin_lock_irq(&die_lock);
204	bust_spinlocks(1);
205	printk("%s: %04x ilc:%d [#%d] ", str, regs->int_code & 0xffff,
206	       regs->int_code >> 17, ++die_counter);
207#ifdef CONFIG_PREEMPT
208	pr_cont("PREEMPT ");
209#elif defined(CONFIG_PREEMPT_RT)
210	pr_cont("PREEMPT_RT ");
211#endif
212	pr_cont("SMP ");
213	if (debug_pagealloc_enabled())
214		pr_cont("DEBUG_PAGEALLOC");
215	pr_cont("\n");
 
216	notify_die(DIE_OOPS, str, regs, 0, regs->int_code & 0xffff, SIGSEGV);
217	print_modules();
218	show_regs(regs);
219	bust_spinlocks(0);
220	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
221	spin_unlock_irq(&die_lock);
222	if (in_interrupt())
223		panic("Fatal exception in interrupt");
224	if (panic_on_oops)
225		panic("Fatal exception: panic_on_oops");
226	oops_exit();
227	make_task_dead(SIGSEGV);
228}
v3.15
 
  1/*
  2 * Stack dumping functions
  3 *
  4 *  Copyright IBM Corp. 1999, 2013
  5 */
  6
  7#include <linux/kallsyms.h>
  8#include <linux/hardirq.h>
  9#include <linux/kprobes.h>
 10#include <linux/utsname.h>
 11#include <linux/export.h>
 12#include <linux/kdebug.h>
 13#include <linux/ptrace.h>
 
 14#include <linux/module.h>
 15#include <linux/sched.h>
 
 
 16#include <asm/processor.h>
 17#include <asm/debug.h>
 18#include <asm/dis.h>
 19#include <asm/ipl.h>
 
 20
 21#ifndef CONFIG_64BIT
 22#define LONG "%08lx "
 23#define FOURLONG "%08lx %08lx %08lx %08lx\n"
 24static int kstack_depth_to_print = 12;
 25#else /* CONFIG_64BIT */
 26#define LONG "%016lx "
 27#define FOURLONG "%016lx %016lx %016lx %016lx\n"
 28static int kstack_depth_to_print = 20;
 29#endif /* CONFIG_64BIT */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 30
 31/*
 32 * For show_trace we have tree different stack to consider:
 33 *   - the panic stack which is used if the kernel stack has overflown
 34 *   - the asynchronous interrupt stack (cpu related)
 35 *   - the synchronous kernel stack (process related)
 36 * The stack trace can start at any of the three stack and can potentially
 37 * touch all of them. The order is: panic stack, async stack, sync stack.
 38 */
 39static unsigned long
 40__show_trace(unsigned long sp, unsigned long low, unsigned long high)
 41{
 42	struct stack_frame *sf;
 43	struct pt_regs *regs;
 44	unsigned long addr;
 45
 46	while (1) {
 47		sp = sp & PSW_ADDR_INSN;
 48		if (sp < low || sp > high - sizeof(*sf))
 49			return sp;
 50		sf = (struct stack_frame *) sp;
 51		addr = sf->gprs[8] & PSW_ADDR_INSN;
 52		printk("([<%016lx>] %pSR)\n", addr, (void *)addr);
 53		/* Follow the backchain. */
 54		while (1) {
 55			low = sp;
 56			sp = sf->back_chain & PSW_ADDR_INSN;
 57			if (!sp)
 58				break;
 59			if (sp <= low || sp > high - sizeof(*sf))
 60				return sp;
 61			sf = (struct stack_frame *) sp;
 62			addr = sf->gprs[8] & PSW_ADDR_INSN;
 63			printk(" [<%016lx>] %pSR\n", addr, (void *)addr);
 64		}
 65		/* Zero backchain detected, check for interrupt frame. */
 66		sp = (unsigned long) (sf + 1);
 67		if (sp <= low || sp > high - sizeof(*regs))
 68			return sp;
 69		regs = (struct pt_regs *) sp;
 70		addr = regs->psw.addr & PSW_ADDR_INSN;
 71		printk(" [<%016lx>] %pSR\n", addr, (void *)addr);
 72		low = sp;
 73		sp = regs->gprs[15];
 74	}
 75}
 76
 77static void show_trace(struct task_struct *task, unsigned long *stack)
 78{
 79	const unsigned long frame_size =
 80		STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
 81	register unsigned long __r15 asm ("15");
 82	unsigned long sp;
 83
 84	sp = (unsigned long) stack;
 
 
 
 
 
 
 
 85	if (!sp)
 86		sp = task ? task->thread.ksp : __r15;
 87	printk("Call Trace:\n");
 88#ifdef CONFIG_CHECK_STACK
 89	sp = __show_trace(sp,
 90			  S390_lowcore.panic_stack + frame_size - 4096,
 91			  S390_lowcore.panic_stack + frame_size);
 92#endif
 93	sp = __show_trace(sp,
 94			  S390_lowcore.async_stack + frame_size - ASYNC_SIZE,
 95			  S390_lowcore.async_stack + frame_size);
 96	if (task)
 97		__show_trace(sp, (unsigned long) task_stack_page(task),
 98			     (unsigned long) task_stack_page(task) + THREAD_SIZE);
 99	else
100		__show_trace(sp, S390_lowcore.thread_info,
101			     S390_lowcore.thread_info + THREAD_SIZE);
102	if (!task)
103		task = current;
104	debug_show_held_locks(task);
105}
106
107void show_stack(struct task_struct *task, unsigned long *sp)
108{
109	register unsigned long *__r15 asm ("15");
110	unsigned long *stack;
111	int i;
112
113	if (!sp)
114		stack = task ? (unsigned long *) task->thread.ksp : __r15;
115	else
116		stack = sp;
117
118	for (i = 0; i < kstack_depth_to_print; i++) {
119		if (((addr_t) stack & (THREAD_SIZE-1)) == 0)
120			break;
121		if ((i * sizeof(long) % 32) == 0)
122			printk("%s       ", i == 0 ? "" : "\n");
123		printk(LONG, *stack++);
124	}
125	printk("\n");
126	show_trace(task, sp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
127}
128
129static void show_last_breaking_event(struct pt_regs *regs)
130{
131#ifdef CONFIG_64BIT
132	printk("Last Breaking-Event-Address:\n");
133	printk(" [<%016lx>] %pSR\n", regs->args[0], (void *)regs->args[0]);
134#endif
135}
136
137static inline int mask_bits(struct pt_regs *regs, unsigned long bits)
138{
139	return (regs->psw.mask & bits) / ((~bits + 1) & bits);
140}
141
142void show_registers(struct pt_regs *regs)
143{
 
144	char *mode;
145
146	mode = user_mode(regs) ? "User" : "Krnl";
147	printk("%s PSW : %p %p", mode, (void *)regs->psw.mask, (void *)regs->psw.addr);
148	if (!user_mode(regs))
149		printk(" (%pSR)", (void *)regs->psw.addr);
150	printk("\n");
151	printk("           R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x "
152	       "P:%x AS:%x CC:%x PM:%x", mask_bits(regs, PSW_MASK_PER),
153	       mask_bits(regs, PSW_MASK_DAT), mask_bits(regs, PSW_MASK_IO),
154	       mask_bits(regs, PSW_MASK_EXT), mask_bits(regs, PSW_MASK_KEY),
155	       mask_bits(regs, PSW_MASK_MCHECK), mask_bits(regs, PSW_MASK_WAIT),
156	       mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC),
157	       mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM));
158#ifdef CONFIG_64BIT
159	printk(" EA:%x", mask_bits(regs, PSW_MASK_EA | PSW_MASK_BA));
160#endif
161	printk("\n%s GPRS: " FOURLONG, mode,
162	       regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]);
163	printk("           " FOURLONG,
164	       regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]);
165	printk("           " FOURLONG,
166	       regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]);
167	printk("           " FOURLONG,
168	       regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]);
169	show_code(regs);
170}
171
172void show_regs(struct pt_regs *regs)
173{
174	show_regs_print_info(KERN_DEFAULT);
175	show_registers(regs);
176	/* Show stack backtrace if pt_regs is from kernel mode */
177	if (!user_mode(regs))
178		show_trace(NULL, (unsigned long *) regs->gprs[15]);
179	show_last_breaking_event(regs);
180}
181
182static DEFINE_SPINLOCK(die_lock);
183
184void die(struct pt_regs *regs, const char *str)
185{
186	static int die_counter;
187
188	oops_enter();
189	lgr_info_log();
190	debug_stop_all();
191	console_verbose();
192	spin_lock_irq(&die_lock);
193	bust_spinlocks(1);
194	printk("%s: %04x [#%d] ", str, regs->int_code & 0xffff, ++die_counter);
 
195#ifdef CONFIG_PREEMPT
196	printk("PREEMPT ");
197#endif
198#ifdef CONFIG_SMP
199	printk("SMP ");
200#endif
201#ifdef CONFIG_DEBUG_PAGEALLOC
202	printk("DEBUG_PAGEALLOC");
203#endif
204	printk("\n");
205	notify_die(DIE_OOPS, str, regs, 0, regs->int_code & 0xffff, SIGSEGV);
206	print_modules();
207	show_regs(regs);
208	bust_spinlocks(0);
209	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
210	spin_unlock_irq(&die_lock);
211	if (in_interrupt())
212		panic("Fatal exception in interrupt");
213	if (panic_on_oops)
214		panic("Fatal exception: panic_on_oops");
215	oops_exit();
216	do_exit(SIGSEGV);
217}