Loading...
1/*
2 * Copyright (C) 1991, 1992 Linus Torvalds
3 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
4 */
5#include <linux/kallsyms.h>
6#include <linux/kprobes.h>
7#include <linux/uaccess.h>
8#include <linux/utsname.h>
9#include <linux/hardirq.h>
10#include <linux/kdebug.h>
11#include <linux/module.h>
12#include <linux/ptrace.h>
13#include <linux/ftrace.h>
14#include <linux/kexec.h>
15#include <linux/bug.h>
16#include <linux/nmi.h>
17#include <linux/sysfs.h>
18
19#include <asm/stacktrace.h>
20
21
22int panic_on_unrecovered_nmi;
23int panic_on_io_nmi;
24unsigned int code_bytes = 64;
25int kstack_depth_to_print = 3 * STACKSLOTS_PER_LINE;
26static int die_counter;
27
28void printk_address(unsigned long address, int reliable)
29{
30 printk(" [<%p>] %s%pB\n", (void *) address,
31 reliable ? "" : "? ", (void *) address);
32}
33
34#ifdef CONFIG_FUNCTION_GRAPH_TRACER
35static void
36print_ftrace_graph_addr(unsigned long addr, void *data,
37 const struct stacktrace_ops *ops,
38 struct thread_info *tinfo, int *graph)
39{
40 struct task_struct *task = tinfo->task;
41 unsigned long ret_addr;
42 int index = task->curr_ret_stack;
43
44 if (addr != (unsigned long)return_to_handler)
45 return;
46
47 if (!task->ret_stack || index < *graph)
48 return;
49
50 index -= *graph;
51 ret_addr = task->ret_stack[index].ret;
52
53 ops->address(data, ret_addr, 1);
54
55 (*graph)++;
56}
57#else
58static inline void
59print_ftrace_graph_addr(unsigned long addr, void *data,
60 const struct stacktrace_ops *ops,
61 struct thread_info *tinfo, int *graph)
62{ }
63#endif
64
65/*
66 * x86-64 can have up to three kernel stacks:
67 * process stack
68 * interrupt stack
69 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
70 */
71
72static inline int valid_stack_ptr(struct thread_info *tinfo,
73 void *p, unsigned int size, void *end)
74{
75 void *t = tinfo;
76 if (end) {
77 if (p < end && p >= (end-THREAD_SIZE))
78 return 1;
79 else
80 return 0;
81 }
82 return p > t && p < t + THREAD_SIZE - size;
83}
84
85unsigned long
86print_context_stack(struct thread_info *tinfo,
87 unsigned long *stack, unsigned long bp,
88 const struct stacktrace_ops *ops, void *data,
89 unsigned long *end, int *graph)
90{
91 struct stack_frame *frame = (struct stack_frame *)bp;
92
93 while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
94 unsigned long addr;
95
96 addr = *stack;
97 if (__kernel_text_address(addr)) {
98 if ((unsigned long) stack == bp + sizeof(long)) {
99 ops->address(data, addr, 1);
100 frame = frame->next_frame;
101 bp = (unsigned long) frame;
102 } else {
103 ops->address(data, addr, 0);
104 }
105 print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
106 }
107 stack++;
108 }
109 return bp;
110}
111EXPORT_SYMBOL_GPL(print_context_stack);
112
113unsigned long
114print_context_stack_bp(struct thread_info *tinfo,
115 unsigned long *stack, unsigned long bp,
116 const struct stacktrace_ops *ops, void *data,
117 unsigned long *end, int *graph)
118{
119 struct stack_frame *frame = (struct stack_frame *)bp;
120 unsigned long *ret_addr = &frame->return_address;
121
122 while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
123 unsigned long addr = *ret_addr;
124
125 if (!__kernel_text_address(addr))
126 break;
127
128 ops->address(data, addr, 1);
129 frame = frame->next_frame;
130 ret_addr = &frame->return_address;
131 print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
132 }
133
134 return (unsigned long)frame;
135}
136EXPORT_SYMBOL_GPL(print_context_stack_bp);
137
138static int print_trace_stack(void *data, char *name)
139{
140 printk("%s <%s> ", (char *)data, name);
141 return 0;
142}
143
144/*
145 * Print one address/symbol entries per line.
146 */
147static void print_trace_address(void *data, unsigned long addr, int reliable)
148{
149 touch_nmi_watchdog();
150 printk(data);
151 printk_address(addr, reliable);
152}
153
154static const struct stacktrace_ops print_trace_ops = {
155 .stack = print_trace_stack,
156 .address = print_trace_address,
157 .walk_stack = print_context_stack,
158};
159
160void
161show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
162 unsigned long *stack, unsigned long bp, char *log_lvl)
163{
164 printk("%sCall Trace:\n", log_lvl);
165 dump_trace(task, regs, stack, bp, &print_trace_ops, log_lvl);
166}
167
168void show_trace(struct task_struct *task, struct pt_regs *regs,
169 unsigned long *stack, unsigned long bp)
170{
171 show_trace_log_lvl(task, regs, stack, bp, "");
172}
173
174void show_stack(struct task_struct *task, unsigned long *sp)
175{
176 show_stack_log_lvl(task, NULL, sp, 0, "");
177}
178
179/*
180 * The architecture-independent dump_stack generator
181 */
182void dump_stack(void)
183{
184 unsigned long bp;
185 unsigned long stack;
186
187 bp = stack_frame(current, NULL);
188 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
189 current->pid, current->comm, print_tainted(),
190 init_utsname()->release,
191 (int)strcspn(init_utsname()->version, " "),
192 init_utsname()->version);
193 show_trace(NULL, NULL, &stack, bp);
194}
195EXPORT_SYMBOL(dump_stack);
196
197static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
198static int die_owner = -1;
199static unsigned int die_nest_count;
200
201unsigned __kprobes long oops_begin(void)
202{
203 int cpu;
204 unsigned long flags;
205
206 oops_enter();
207
208 /* racy, but better than risking deadlock. */
209 raw_local_irq_save(flags);
210 cpu = smp_processor_id();
211 if (!arch_spin_trylock(&die_lock)) {
212 if (cpu == die_owner)
213 /* nested oops. should stop eventually */;
214 else
215 arch_spin_lock(&die_lock);
216 }
217 die_nest_count++;
218 die_owner = cpu;
219 console_verbose();
220 bust_spinlocks(1);
221 return flags;
222}
223EXPORT_SYMBOL_GPL(oops_begin);
224
225void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
226{
227 if (regs && kexec_should_crash(current))
228 crash_kexec(regs);
229
230 bust_spinlocks(0);
231 die_owner = -1;
232 add_taint(TAINT_DIE);
233 die_nest_count--;
234 if (!die_nest_count)
235 /* Nest count reaches zero, release the lock. */
236 arch_spin_unlock(&die_lock);
237 raw_local_irq_restore(flags);
238 oops_exit();
239
240 if (!signr)
241 return;
242 if (in_interrupt())
243 panic("Fatal exception in interrupt");
244 if (panic_on_oops)
245 panic("Fatal exception");
246 do_exit(signr);
247}
248
249int __kprobes __die(const char *str, struct pt_regs *regs, long err)
250{
251#ifdef CONFIG_X86_32
252 unsigned short ss;
253 unsigned long sp;
254#endif
255 printk(KERN_EMERG "%s: %04lx [#%d] ", str, err & 0xffff, ++die_counter);
256#ifdef CONFIG_PREEMPT
257 printk("PREEMPT ");
258#endif
259#ifdef CONFIG_SMP
260 printk("SMP ");
261#endif
262#ifdef CONFIG_DEBUG_PAGEALLOC
263 printk("DEBUG_PAGEALLOC");
264#endif
265 printk("\n");
266 if (notify_die(DIE_OOPS, str, regs, err,
267 current->thread.trap_no, SIGSEGV) == NOTIFY_STOP)
268 return 1;
269
270 show_registers(regs);
271#ifdef CONFIG_X86_32
272 if (user_mode_vm(regs)) {
273 sp = regs->sp;
274 ss = regs->ss & 0xffff;
275 } else {
276 sp = kernel_stack_pointer(regs);
277 savesegment(ss, ss);
278 }
279 printk(KERN_EMERG "EIP: [<%08lx>] ", regs->ip);
280 print_symbol("%s", regs->ip);
281 printk(" SS:ESP %04x:%08lx\n", ss, sp);
282#else
283 /* Executive summary in case the oops scrolled away */
284 printk(KERN_ALERT "RIP ");
285 printk_address(regs->ip, 1);
286 printk(" RSP <%016lx>\n", regs->sp);
287#endif
288 return 0;
289}
290
291/*
292 * This is gone through when something in the kernel has done something bad
293 * and is about to be terminated:
294 */
295void die(const char *str, struct pt_regs *regs, long err)
296{
297 unsigned long flags = oops_begin();
298 int sig = SIGSEGV;
299
300 if (!user_mode_vm(regs))
301 report_bug(regs->ip, regs);
302
303 if (__die(str, regs, err))
304 sig = 0;
305 oops_end(flags, regs, sig);
306}
307
308static int __init kstack_setup(char *s)
309{
310 if (!s)
311 return -EINVAL;
312 kstack_depth_to_print = simple_strtoul(s, NULL, 0);
313 return 0;
314}
315early_param("kstack", kstack_setup);
316
317static int __init code_bytes_setup(char *s)
318{
319 code_bytes = simple_strtoul(s, NULL, 0);
320 if (code_bytes > 8192)
321 code_bytes = 8192;
322
323 return 1;
324}
325__setup("code_bytes=", code_bytes_setup);
1/*
2 * Copyright (C) 1991, 1992 Linus Torvalds
3 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
4 */
5#include <linux/kallsyms.h>
6#include <linux/kprobes.h>
7#include <linux/uaccess.h>
8#include <linux/utsname.h>
9#include <linux/hardirq.h>
10#include <linux/kdebug.h>
11#include <linux/module.h>
12#include <linux/ptrace.h>
13#include <linux/sched/debug.h>
14#include <linux/sched/task_stack.h>
15#include <linux/ftrace.h>
16#include <linux/kexec.h>
17#include <linux/bug.h>
18#include <linux/nmi.h>
19#include <linux/sysfs.h>
20
21#include <asm/cpu_entry_area.h>
22#include <asm/stacktrace.h>
23#include <asm/unwind.h>
24
25int panic_on_unrecovered_nmi;
26int panic_on_io_nmi;
27static unsigned int code_bytes = 64;
28static int die_counter;
29
30bool in_task_stack(unsigned long *stack, struct task_struct *task,
31 struct stack_info *info)
32{
33 unsigned long *begin = task_stack_page(task);
34 unsigned long *end = task_stack_page(task) + THREAD_SIZE;
35
36 if (stack < begin || stack >= end)
37 return false;
38
39 info->type = STACK_TYPE_TASK;
40 info->begin = begin;
41 info->end = end;
42 info->next_sp = NULL;
43
44 return true;
45}
46
47bool in_entry_stack(unsigned long *stack, struct stack_info *info)
48{
49 struct entry_stack *ss = cpu_entry_stack(smp_processor_id());
50
51 void *begin = ss;
52 void *end = ss + 1;
53
54 if ((void *)stack < begin || (void *)stack >= end)
55 return false;
56
57 info->type = STACK_TYPE_ENTRY;
58 info->begin = begin;
59 info->end = end;
60 info->next_sp = NULL;
61
62 return true;
63}
64
65static void printk_stack_address(unsigned long address, int reliable,
66 char *log_lvl)
67{
68 touch_nmi_watchdog();
69 printk("%s %s%pB\n", log_lvl, reliable ? "" : "? ", (void *)address);
70}
71
72void show_iret_regs(struct pt_regs *regs)
73{
74 printk(KERN_DEFAULT "RIP: %04x:%pS\n", (int)regs->cs, (void *)regs->ip);
75 printk(KERN_DEFAULT "RSP: %04x:%016lx EFLAGS: %08lx", (int)regs->ss,
76 regs->sp, regs->flags);
77}
78
79static void show_regs_if_on_stack(struct stack_info *info, struct pt_regs *regs,
80 bool partial)
81{
82 /*
83 * These on_stack() checks aren't strictly necessary: the unwind code
84 * has already validated the 'regs' pointer. The checks are done for
85 * ordering reasons: if the registers are on the next stack, we don't
86 * want to print them out yet. Otherwise they'll be shown as part of
87 * the wrong stack. Later, when show_trace_log_lvl() switches to the
88 * next stack, this function will be called again with the same regs so
89 * they can be printed in the right context.
90 */
91 if (!partial && on_stack(info, regs, sizeof(*regs))) {
92 __show_regs(regs, 0);
93
94 } else if (partial && on_stack(info, (void *)regs + IRET_FRAME_OFFSET,
95 IRET_FRAME_SIZE)) {
96 /*
97 * When an interrupt or exception occurs in entry code, the
98 * full pt_regs might not have been saved yet. In that case
99 * just print the iret frame.
100 */
101 show_iret_regs(regs);
102 }
103}
104
105void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
106 unsigned long *stack, char *log_lvl)
107{
108 struct unwind_state state;
109 struct stack_info stack_info = {0};
110 unsigned long visit_mask = 0;
111 int graph_idx = 0;
112 bool partial = false;
113
114 printk("%sCall Trace:\n", log_lvl);
115
116 unwind_start(&state, task, regs, stack);
117 stack = stack ? : get_stack_pointer(task, regs);
118 regs = unwind_get_entry_regs(&state, &partial);
119
120 /*
121 * Iterate through the stacks, starting with the current stack pointer.
122 * Each stack has a pointer to the next one.
123 *
124 * x86-64 can have several stacks:
125 * - task stack
126 * - interrupt stack
127 * - HW exception stacks (double fault, nmi, debug, mce)
128 * - entry stack
129 *
130 * x86-32 can have up to four stacks:
131 * - task stack
132 * - softirq stack
133 * - hardirq stack
134 * - entry stack
135 */
136 for ( ; stack; stack = PTR_ALIGN(stack_info.next_sp, sizeof(long))) {
137 const char *stack_name;
138
139 if (get_stack_info(stack, task, &stack_info, &visit_mask)) {
140 /*
141 * We weren't on a valid stack. It's possible that
142 * we overflowed a valid stack into a guard page.
143 * See if the next page up is valid so that we can
144 * generate some kind of backtrace if this happens.
145 */
146 stack = (unsigned long *)PAGE_ALIGN((unsigned long)stack);
147 if (get_stack_info(stack, task, &stack_info, &visit_mask))
148 break;
149 }
150
151 stack_name = stack_type_name(stack_info.type);
152 if (stack_name)
153 printk("%s <%s>\n", log_lvl, stack_name);
154
155 if (regs)
156 show_regs_if_on_stack(&stack_info, regs, partial);
157
158 /*
159 * Scan the stack, printing any text addresses we find. At the
160 * same time, follow proper stack frames with the unwinder.
161 *
162 * Addresses found during the scan which are not reported by
163 * the unwinder are considered to be additional clues which are
164 * sometimes useful for debugging and are prefixed with '?'.
165 * This also serves as a failsafe option in case the unwinder
166 * goes off in the weeds.
167 */
168 for (; stack < stack_info.end; stack++) {
169 unsigned long real_addr;
170 int reliable = 0;
171 unsigned long addr = READ_ONCE_NOCHECK(*stack);
172 unsigned long *ret_addr_p =
173 unwind_get_return_address_ptr(&state);
174
175 if (!__kernel_text_address(addr))
176 continue;
177
178 /*
179 * Don't print regs->ip again if it was already printed
180 * by show_regs_if_on_stack().
181 */
182 if (regs && stack == ®s->ip)
183 goto next;
184
185 if (stack == ret_addr_p)
186 reliable = 1;
187
188 /*
189 * When function graph tracing is enabled for a
190 * function, its return address on the stack is
191 * replaced with the address of an ftrace handler
192 * (return_to_handler). In that case, before printing
193 * the "real" address, we want to print the handler
194 * address as an "unreliable" hint that function graph
195 * tracing was involved.
196 */
197 real_addr = ftrace_graph_ret_addr(task, &graph_idx,
198 addr, stack);
199 if (real_addr != addr)
200 printk_stack_address(addr, 0, log_lvl);
201 printk_stack_address(real_addr, reliable, log_lvl);
202
203 if (!reliable)
204 continue;
205
206next:
207 /*
208 * Get the next frame from the unwinder. No need to
209 * check for an error: if anything goes wrong, the rest
210 * of the addresses will just be printed as unreliable.
211 */
212 unwind_next_frame(&state);
213
214 /* if the frame has entry regs, print them */
215 regs = unwind_get_entry_regs(&state, &partial);
216 if (regs)
217 show_regs_if_on_stack(&stack_info, regs, partial);
218 }
219
220 if (stack_name)
221 printk("%s </%s>\n", log_lvl, stack_name);
222 }
223}
224
225void show_stack(struct task_struct *task, unsigned long *sp)
226{
227 task = task ? : current;
228
229 /*
230 * Stack frames below this one aren't interesting. Don't show them
231 * if we're printing for %current.
232 */
233 if (!sp && task == current)
234 sp = get_stack_pointer(current, NULL);
235
236 show_trace_log_lvl(task, NULL, sp, KERN_DEFAULT);
237}
238
239void show_stack_regs(struct pt_regs *regs)
240{
241 show_trace_log_lvl(current, regs, NULL, KERN_DEFAULT);
242}
243
244static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
245static int die_owner = -1;
246static unsigned int die_nest_count;
247
248unsigned long oops_begin(void)
249{
250 int cpu;
251 unsigned long flags;
252
253 oops_enter();
254
255 /* racy, but better than risking deadlock. */
256 raw_local_irq_save(flags);
257 cpu = smp_processor_id();
258 if (!arch_spin_trylock(&die_lock)) {
259 if (cpu == die_owner)
260 /* nested oops. should stop eventually */;
261 else
262 arch_spin_lock(&die_lock);
263 }
264 die_nest_count++;
265 die_owner = cpu;
266 console_verbose();
267 bust_spinlocks(1);
268 return flags;
269}
270EXPORT_SYMBOL_GPL(oops_begin);
271NOKPROBE_SYMBOL(oops_begin);
272
273void __noreturn rewind_stack_do_exit(int signr);
274
275void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
276{
277 if (regs && kexec_should_crash(current))
278 crash_kexec(regs);
279
280 bust_spinlocks(0);
281 die_owner = -1;
282 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
283 die_nest_count--;
284 if (!die_nest_count)
285 /* Nest count reaches zero, release the lock. */
286 arch_spin_unlock(&die_lock);
287 raw_local_irq_restore(flags);
288 oops_exit();
289
290 if (!signr)
291 return;
292 if (in_interrupt())
293 panic("Fatal exception in interrupt");
294 if (panic_on_oops)
295 panic("Fatal exception");
296
297 /*
298 * We're not going to return, but we might be on an IST stack or
299 * have very little stack space left. Rewind the stack and kill
300 * the task.
301 */
302 rewind_stack_do_exit(signr);
303}
304NOKPROBE_SYMBOL(oops_end);
305
306int __die(const char *str, struct pt_regs *regs, long err)
307{
308#ifdef CONFIG_X86_32
309 unsigned short ss;
310 unsigned long sp;
311#endif
312 printk(KERN_DEFAULT
313 "%s: %04lx [#%d]%s%s%s%s%s\n", str, err & 0xffff, ++die_counter,
314 IS_ENABLED(CONFIG_PREEMPT) ? " PREEMPT" : "",
315 IS_ENABLED(CONFIG_SMP) ? " SMP" : "",
316 debug_pagealloc_enabled() ? " DEBUG_PAGEALLOC" : "",
317 IS_ENABLED(CONFIG_KASAN) ? " KASAN" : "",
318 IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION) ?
319 (boot_cpu_has(X86_FEATURE_PTI) ? " PTI" : " NOPTI") : "");
320
321 if (notify_die(DIE_OOPS, str, regs, err,
322 current->thread.trap_nr, SIGSEGV) == NOTIFY_STOP)
323 return 1;
324
325 print_modules();
326 show_regs(regs);
327#ifdef CONFIG_X86_32
328 if (user_mode(regs)) {
329 sp = regs->sp;
330 ss = regs->ss;
331 } else {
332 sp = kernel_stack_pointer(regs);
333 savesegment(ss, ss);
334 }
335 printk(KERN_EMERG "EIP: %pS SS:ESP: %04x:%08lx\n",
336 (void *)regs->ip, ss, sp);
337#else
338 /* Executive summary in case the oops scrolled away */
339 printk(KERN_ALERT "RIP: %pS RSP: %016lx\n", (void *)regs->ip, regs->sp);
340#endif
341 return 0;
342}
343NOKPROBE_SYMBOL(__die);
344
345/*
346 * This is gone through when something in the kernel has done something bad
347 * and is about to be terminated:
348 */
349void die(const char *str, struct pt_regs *regs, long err)
350{
351 unsigned long flags = oops_begin();
352 int sig = SIGSEGV;
353
354 if (__die(str, regs, err))
355 sig = 0;
356 oops_end(flags, regs, sig);
357}
358
359static int __init code_bytes_setup(char *s)
360{
361 ssize_t ret;
362 unsigned long val;
363
364 if (!s)
365 return -EINVAL;
366
367 ret = kstrtoul(s, 0, &val);
368 if (ret)
369 return ret;
370
371 code_bytes = val;
372 if (code_bytes > 8192)
373 code_bytes = 8192;
374
375 return 1;
376}
377__setup("code_bytes=", code_bytes_setup);
378
379void show_regs(struct pt_regs *regs)
380{
381 bool all = true;
382 int i;
383
384 show_regs_print_info(KERN_DEFAULT);
385
386 if (IS_ENABLED(CONFIG_X86_32))
387 all = !user_mode(regs);
388
389 __show_regs(regs, all);
390
391 /*
392 * When in-kernel, we also print out the stack and code at the
393 * time of the fault..
394 */
395 if (!user_mode(regs)) {
396 unsigned int code_prologue = code_bytes * 43 / 64;
397 unsigned int code_len = code_bytes;
398 unsigned char c;
399 u8 *ip;
400
401 show_trace_log_lvl(current, regs, NULL, KERN_DEFAULT);
402
403 printk(KERN_DEFAULT "Code: ");
404
405 ip = (u8 *)regs->ip - code_prologue;
406 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
407 /* try starting at IP */
408 ip = (u8 *)regs->ip;
409 code_len = code_len - code_prologue + 1;
410 }
411 for (i = 0; i < code_len; i++, ip++) {
412 if (ip < (u8 *)PAGE_OFFSET ||
413 probe_kernel_address(ip, c)) {
414 pr_cont(" Bad RIP value.");
415 break;
416 }
417 if (ip == (u8 *)regs->ip)
418 pr_cont("<%02x> ", c);
419 else
420 pr_cont("%02x ", c);
421 }
422 }
423 pr_cont("\n");
424}