Loading...
1/*
2 * Copyright (C) 1991, 1992 Linus Torvalds
3 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
4 */
5#include <linux/kallsyms.h>
6#include <linux/kprobes.h>
7#include <linux/uaccess.h>
8#include <linux/utsname.h>
9#include <linux/hardirq.h>
10#include <linux/kdebug.h>
11#include <linux/module.h>
12#include <linux/ptrace.h>
13#include <linux/ftrace.h>
14#include <linux/kexec.h>
15#include <linux/bug.h>
16#include <linux/nmi.h>
17#include <linux/sysfs.h>
18
19#include <asm/stacktrace.h>
20
21
22int panic_on_unrecovered_nmi;
23int panic_on_io_nmi;
24unsigned int code_bytes = 64;
25int kstack_depth_to_print = 3 * STACKSLOTS_PER_LINE;
26static int die_counter;
27
28void printk_address(unsigned long address, int reliable)
29{
30 printk(" [<%p>] %s%pB\n", (void *) address,
31 reliable ? "" : "? ", (void *) address);
32}
33
34#ifdef CONFIG_FUNCTION_GRAPH_TRACER
35static void
36print_ftrace_graph_addr(unsigned long addr, void *data,
37 const struct stacktrace_ops *ops,
38 struct thread_info *tinfo, int *graph)
39{
40 struct task_struct *task;
41 unsigned long ret_addr;
42 int index;
43
44 if (addr != (unsigned long)return_to_handler)
45 return;
46
47 task = tinfo->task;
48 index = task->curr_ret_stack;
49
50 if (!task->ret_stack || index < *graph)
51 return;
52
53 index -= *graph;
54 ret_addr = task->ret_stack[index].ret;
55
56 ops->address(data, ret_addr, 1);
57
58 (*graph)++;
59}
60#else
61static inline void
62print_ftrace_graph_addr(unsigned long addr, void *data,
63 const struct stacktrace_ops *ops,
64 struct thread_info *tinfo, int *graph)
65{ }
66#endif
67
68/*
69 * x86-64 can have up to three kernel stacks:
70 * process stack
71 * interrupt stack
72 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
73 */
74
75static inline int valid_stack_ptr(struct thread_info *tinfo,
76 void *p, unsigned int size, void *end)
77{
78 void *t = tinfo;
79 if (end) {
80 if (p < end && p >= (end-THREAD_SIZE))
81 return 1;
82 else
83 return 0;
84 }
85 return p > t && p < t + THREAD_SIZE - size;
86}
87
88unsigned long
89print_context_stack(struct thread_info *tinfo,
90 unsigned long *stack, unsigned long bp,
91 const struct stacktrace_ops *ops, void *data,
92 unsigned long *end, int *graph)
93{
94 struct stack_frame *frame = (struct stack_frame *)bp;
95
96 while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
97 unsigned long addr;
98
99 addr = *stack;
100 if (__kernel_text_address(addr)) {
101 if ((unsigned long) stack == bp + sizeof(long)) {
102 ops->address(data, addr, 1);
103 frame = frame->next_frame;
104 bp = (unsigned long) frame;
105 } else {
106 ops->address(data, addr, 0);
107 }
108 print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
109 }
110 stack++;
111 }
112 return bp;
113}
114EXPORT_SYMBOL_GPL(print_context_stack);
115
116unsigned long
117print_context_stack_bp(struct thread_info *tinfo,
118 unsigned long *stack, unsigned long bp,
119 const struct stacktrace_ops *ops, void *data,
120 unsigned long *end, int *graph)
121{
122 struct stack_frame *frame = (struct stack_frame *)bp;
123 unsigned long *ret_addr = &frame->return_address;
124
125 while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
126 unsigned long addr = *ret_addr;
127
128 if (!__kernel_text_address(addr))
129 break;
130
131 ops->address(data, addr, 1);
132 frame = frame->next_frame;
133 ret_addr = &frame->return_address;
134 print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
135 }
136
137 return (unsigned long)frame;
138}
139EXPORT_SYMBOL_GPL(print_context_stack_bp);
140
141static int print_trace_stack(void *data, char *name)
142{
143 printk("%s <%s> ", (char *)data, name);
144 return 0;
145}
146
147/*
148 * Print one address/symbol entries per line.
149 */
150static void print_trace_address(void *data, unsigned long addr, int reliable)
151{
152 touch_nmi_watchdog();
153 printk(data);
154 printk_address(addr, reliable);
155}
156
157static const struct stacktrace_ops print_trace_ops = {
158 .stack = print_trace_stack,
159 .address = print_trace_address,
160 .walk_stack = print_context_stack,
161};
162
163void
164show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
165 unsigned long *stack, unsigned long bp, char *log_lvl)
166{
167 printk("%sCall Trace:\n", log_lvl);
168 dump_trace(task, regs, stack, bp, &print_trace_ops, log_lvl);
169}
170
171void show_trace(struct task_struct *task, struct pt_regs *regs,
172 unsigned long *stack, unsigned long bp)
173{
174 show_trace_log_lvl(task, regs, stack, bp, "");
175}
176
177void show_stack(struct task_struct *task, unsigned long *sp)
178{
179 show_stack_log_lvl(task, NULL, sp, 0, "");
180}
181
182/*
183 * The architecture-independent dump_stack generator
184 */
185void dump_stack(void)
186{
187 unsigned long bp;
188 unsigned long stack;
189
190 bp = stack_frame(current, NULL);
191 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
192 current->pid, current->comm, print_tainted(),
193 init_utsname()->release,
194 (int)strcspn(init_utsname()->version, " "),
195 init_utsname()->version);
196 show_trace(NULL, NULL, &stack, bp);
197}
198EXPORT_SYMBOL(dump_stack);
199
200static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
201static int die_owner = -1;
202static unsigned int die_nest_count;
203
204unsigned __kprobes long oops_begin(void)
205{
206 int cpu;
207 unsigned long flags;
208
209 oops_enter();
210
211 /* racy, but better than risking deadlock. */
212 raw_local_irq_save(flags);
213 cpu = smp_processor_id();
214 if (!arch_spin_trylock(&die_lock)) {
215 if (cpu == die_owner)
216 /* nested oops. should stop eventually */;
217 else
218 arch_spin_lock(&die_lock);
219 }
220 die_nest_count++;
221 die_owner = cpu;
222 console_verbose();
223 bust_spinlocks(1);
224 return flags;
225}
226EXPORT_SYMBOL_GPL(oops_begin);
227
228void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
229{
230 if (regs && kexec_should_crash(current))
231 crash_kexec(regs);
232
233 bust_spinlocks(0);
234 die_owner = -1;
235 add_taint(TAINT_DIE);
236 die_nest_count--;
237 if (!die_nest_count)
238 /* Nest count reaches zero, release the lock. */
239 arch_spin_unlock(&die_lock);
240 raw_local_irq_restore(flags);
241 oops_exit();
242
243 if (!signr)
244 return;
245 if (in_interrupt())
246 panic("Fatal exception in interrupt");
247 if (panic_on_oops)
248 panic("Fatal exception");
249 do_exit(signr);
250}
251
252int __kprobes __die(const char *str, struct pt_regs *regs, long err)
253{
254#ifdef CONFIG_X86_32
255 unsigned short ss;
256 unsigned long sp;
257#endif
258 printk(KERN_DEFAULT
259 "%s: %04lx [#%d] ", str, err & 0xffff, ++die_counter);
260#ifdef CONFIG_PREEMPT
261 printk("PREEMPT ");
262#endif
263#ifdef CONFIG_SMP
264 printk("SMP ");
265#endif
266#ifdef CONFIG_DEBUG_PAGEALLOC
267 printk("DEBUG_PAGEALLOC");
268#endif
269 printk("\n");
270 if (notify_die(DIE_OOPS, str, regs, err,
271 current->thread.trap_nr, SIGSEGV) == NOTIFY_STOP)
272 return 1;
273
274 show_regs(regs);
275#ifdef CONFIG_X86_32
276 if (user_mode_vm(regs)) {
277 sp = regs->sp;
278 ss = regs->ss & 0xffff;
279 } else {
280 sp = kernel_stack_pointer(regs);
281 savesegment(ss, ss);
282 }
283 printk(KERN_EMERG "EIP: [<%08lx>] ", regs->ip);
284 print_symbol("%s", regs->ip);
285 printk(" SS:ESP %04x:%08lx\n", ss, sp);
286#else
287 /* Executive summary in case the oops scrolled away */
288 printk(KERN_ALERT "RIP ");
289 printk_address(regs->ip, 1);
290 printk(" RSP <%016lx>\n", regs->sp);
291#endif
292 return 0;
293}
294
295/*
296 * This is gone through when something in the kernel has done something bad
297 * and is about to be terminated:
298 */
299void die(const char *str, struct pt_regs *regs, long err)
300{
301 unsigned long flags = oops_begin();
302 int sig = SIGSEGV;
303
304 if (!user_mode_vm(regs))
305 report_bug(regs->ip, regs);
306
307 if (__die(str, regs, err))
308 sig = 0;
309 oops_end(flags, regs, sig);
310}
311
312static int __init kstack_setup(char *s)
313{
314 ssize_t ret;
315 unsigned long val;
316
317 if (!s)
318 return -EINVAL;
319
320 ret = kstrtoul(s, 0, &val);
321 if (ret)
322 return ret;
323 kstack_depth_to_print = val;
324 return 0;
325}
326early_param("kstack", kstack_setup);
327
328static int __init code_bytes_setup(char *s)
329{
330 ssize_t ret;
331 unsigned long val;
332
333 if (!s)
334 return -EINVAL;
335
336 ret = kstrtoul(s, 0, &val);
337 if (ret)
338 return ret;
339
340 code_bytes = val;
341 if (code_bytes > 8192)
342 code_bytes = 8192;
343
344 return 1;
345}
346__setup("code_bytes=", code_bytes_setup);
1/*
2 * Copyright (C) 1991, 1992 Linus Torvalds
3 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
4 */
5#include <linux/kallsyms.h>
6#include <linux/kprobes.h>
7#include <linux/uaccess.h>
8#include <linux/utsname.h>
9#include <linux/hardirq.h>
10#include <linux/kdebug.h>
11#include <linux/module.h>
12#include <linux/ptrace.h>
13#include <linux/sched/debug.h>
14#include <linux/sched/task_stack.h>
15#include <linux/ftrace.h>
16#include <linux/kexec.h>
17#include <linux/bug.h>
18#include <linux/nmi.h>
19#include <linux/sysfs.h>
20#include <linux/kasan.h>
21
22#include <asm/cpu_entry_area.h>
23#include <asm/stacktrace.h>
24#include <asm/unwind.h>
25
26int panic_on_unrecovered_nmi;
27int panic_on_io_nmi;
28static int die_counter;
29
30static struct pt_regs exec_summary_regs;
31
32bool in_task_stack(unsigned long *stack, struct task_struct *task,
33 struct stack_info *info)
34{
35 unsigned long *begin = task_stack_page(task);
36 unsigned long *end = task_stack_page(task) + THREAD_SIZE;
37
38 if (stack < begin || stack >= end)
39 return false;
40
41 info->type = STACK_TYPE_TASK;
42 info->begin = begin;
43 info->end = end;
44 info->next_sp = NULL;
45
46 return true;
47}
48
49bool in_entry_stack(unsigned long *stack, struct stack_info *info)
50{
51 struct entry_stack *ss = cpu_entry_stack(smp_processor_id());
52
53 void *begin = ss;
54 void *end = ss + 1;
55
56 if ((void *)stack < begin || (void *)stack >= end)
57 return false;
58
59 info->type = STACK_TYPE_ENTRY;
60 info->begin = begin;
61 info->end = end;
62 info->next_sp = NULL;
63
64 return true;
65}
66
67static void printk_stack_address(unsigned long address, int reliable,
68 const char *log_lvl)
69{
70 touch_nmi_watchdog();
71 printk("%s %s%pB\n", log_lvl, reliable ? "" : "? ", (void *)address);
72}
73
74static int copy_code(struct pt_regs *regs, u8 *buf, unsigned long src,
75 unsigned int nbytes)
76{
77 if (!user_mode(regs))
78 return copy_from_kernel_nofault(buf, (u8 *)src, nbytes);
79
80 /*
81 * Make sure userspace isn't trying to trick us into dumping kernel
82 * memory by pointing the userspace instruction pointer at it.
83 */
84 if (__chk_range_not_ok(src, nbytes, TASK_SIZE_MAX))
85 return -EINVAL;
86
87 return copy_from_user_nmi(buf, (void __user *)src, nbytes);
88}
89
90/*
91 * There are a couple of reasons for the 2/3rd prologue, courtesy of Linus:
92 *
93 * In case where we don't have the exact kernel image (which, if we did, we can
94 * simply disassemble and navigate to the RIP), the purpose of the bigger
95 * prologue is to have more context and to be able to correlate the code from
96 * the different toolchains better.
97 *
98 * In addition, it helps in recreating the register allocation of the failing
99 * kernel and thus make sense of the register dump.
100 *
101 * What is more, the additional complication of a variable length insn arch like
102 * x86 warrants having longer byte sequence before rIP so that the disassembler
103 * can "sync" up properly and find instruction boundaries when decoding the
104 * opcode bytes.
105 *
106 * Thus, the 2/3rds prologue and 64 byte OPCODE_BUFSIZE is just a random
107 * guesstimate in attempt to achieve all of the above.
108 */
109void show_opcodes(struct pt_regs *regs, const char *loglvl)
110{
111#define PROLOGUE_SIZE 42
112#define EPILOGUE_SIZE 21
113#define OPCODE_BUFSIZE (PROLOGUE_SIZE + 1 + EPILOGUE_SIZE)
114 u8 opcodes[OPCODE_BUFSIZE];
115 unsigned long prologue = regs->ip - PROLOGUE_SIZE;
116
117 if (copy_code(regs, opcodes, prologue, sizeof(opcodes))) {
118 printk("%sCode: Bad RIP value.\n", loglvl);
119 } else {
120 printk("%sCode: %" __stringify(PROLOGUE_SIZE) "ph <%02x> %"
121 __stringify(EPILOGUE_SIZE) "ph\n", loglvl, opcodes,
122 opcodes[PROLOGUE_SIZE], opcodes + PROLOGUE_SIZE + 1);
123 }
124}
125
126void show_ip(struct pt_regs *regs, const char *loglvl)
127{
128#ifdef CONFIG_X86_32
129 printk("%sEIP: %pS\n", loglvl, (void *)regs->ip);
130#else
131 printk("%sRIP: %04x:%pS\n", loglvl, (int)regs->cs, (void *)regs->ip);
132#endif
133 show_opcodes(regs, loglvl);
134}
135
136void show_iret_regs(struct pt_regs *regs, const char *log_lvl)
137{
138 show_ip(regs, log_lvl);
139 printk("%sRSP: %04x:%016lx EFLAGS: %08lx", log_lvl, (int)regs->ss,
140 regs->sp, regs->flags);
141}
142
143static void show_regs_if_on_stack(struct stack_info *info, struct pt_regs *regs,
144 bool partial, const char *log_lvl)
145{
146 /*
147 * These on_stack() checks aren't strictly necessary: the unwind code
148 * has already validated the 'regs' pointer. The checks are done for
149 * ordering reasons: if the registers are on the next stack, we don't
150 * want to print them out yet. Otherwise they'll be shown as part of
151 * the wrong stack. Later, when show_trace_log_lvl() switches to the
152 * next stack, this function will be called again with the same regs so
153 * they can be printed in the right context.
154 */
155 if (!partial && on_stack(info, regs, sizeof(*regs))) {
156 __show_regs(regs, SHOW_REGS_SHORT, log_lvl);
157
158 } else if (partial && on_stack(info, (void *)regs + IRET_FRAME_OFFSET,
159 IRET_FRAME_SIZE)) {
160 /*
161 * When an interrupt or exception occurs in entry code, the
162 * full pt_regs might not have been saved yet. In that case
163 * just print the iret frame.
164 */
165 show_iret_regs(regs, log_lvl);
166 }
167}
168
169void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
170 unsigned long *stack, const char *log_lvl)
171{
172 struct unwind_state state;
173 struct stack_info stack_info = {0};
174 unsigned long visit_mask = 0;
175 int graph_idx = 0;
176 bool partial = false;
177
178 printk("%sCall Trace:\n", log_lvl);
179
180 unwind_start(&state, task, regs, stack);
181 stack = stack ? : get_stack_pointer(task, regs);
182 regs = unwind_get_entry_regs(&state, &partial);
183
184 /*
185 * Iterate through the stacks, starting with the current stack pointer.
186 * Each stack has a pointer to the next one.
187 *
188 * x86-64 can have several stacks:
189 * - task stack
190 * - interrupt stack
191 * - HW exception stacks (double fault, nmi, debug, mce)
192 * - entry stack
193 *
194 * x86-32 can have up to four stacks:
195 * - task stack
196 * - softirq stack
197 * - hardirq stack
198 * - entry stack
199 */
200 for ( ; stack; stack = PTR_ALIGN(stack_info.next_sp, sizeof(long))) {
201 const char *stack_name;
202
203 if (get_stack_info(stack, task, &stack_info, &visit_mask)) {
204 /*
205 * We weren't on a valid stack. It's possible that
206 * we overflowed a valid stack into a guard page.
207 * See if the next page up is valid so that we can
208 * generate some kind of backtrace if this happens.
209 */
210 stack = (unsigned long *)PAGE_ALIGN((unsigned long)stack);
211 if (get_stack_info(stack, task, &stack_info, &visit_mask))
212 break;
213 }
214
215 stack_name = stack_type_name(stack_info.type);
216 if (stack_name)
217 printk("%s <%s>\n", log_lvl, stack_name);
218
219 if (regs)
220 show_regs_if_on_stack(&stack_info, regs, partial, log_lvl);
221
222 /*
223 * Scan the stack, printing any text addresses we find. At the
224 * same time, follow proper stack frames with the unwinder.
225 *
226 * Addresses found during the scan which are not reported by
227 * the unwinder are considered to be additional clues which are
228 * sometimes useful for debugging and are prefixed with '?'.
229 * This also serves as a failsafe option in case the unwinder
230 * goes off in the weeds.
231 */
232 for (; stack < stack_info.end; stack++) {
233 unsigned long real_addr;
234 int reliable = 0;
235 unsigned long addr = READ_ONCE_NOCHECK(*stack);
236 unsigned long *ret_addr_p =
237 unwind_get_return_address_ptr(&state);
238
239 if (!__kernel_text_address(addr))
240 continue;
241
242 /*
243 * Don't print regs->ip again if it was already printed
244 * by show_regs_if_on_stack().
245 */
246 if (regs && stack == ®s->ip)
247 goto next;
248
249 if (stack == ret_addr_p)
250 reliable = 1;
251
252 /*
253 * When function graph tracing is enabled for a
254 * function, its return address on the stack is
255 * replaced with the address of an ftrace handler
256 * (return_to_handler). In that case, before printing
257 * the "real" address, we want to print the handler
258 * address as an "unreliable" hint that function graph
259 * tracing was involved.
260 */
261 real_addr = ftrace_graph_ret_addr(task, &graph_idx,
262 addr, stack);
263 if (real_addr != addr)
264 printk_stack_address(addr, 0, log_lvl);
265 printk_stack_address(real_addr, reliable, log_lvl);
266
267 if (!reliable)
268 continue;
269
270next:
271 /*
272 * Get the next frame from the unwinder. No need to
273 * check for an error: if anything goes wrong, the rest
274 * of the addresses will just be printed as unreliable.
275 */
276 unwind_next_frame(&state);
277
278 /* if the frame has entry regs, print them */
279 regs = unwind_get_entry_regs(&state, &partial);
280 if (regs)
281 show_regs_if_on_stack(&stack_info, regs, partial, log_lvl);
282 }
283
284 if (stack_name)
285 printk("%s </%s>\n", log_lvl, stack_name);
286 }
287}
288
289void show_stack(struct task_struct *task, unsigned long *sp,
290 const char *loglvl)
291{
292 task = task ? : current;
293
294 /*
295 * Stack frames below this one aren't interesting. Don't show them
296 * if we're printing for %current.
297 */
298 if (!sp && task == current)
299 sp = get_stack_pointer(current, NULL);
300
301 show_trace_log_lvl(task, NULL, sp, loglvl);
302}
303
304void show_stack_regs(struct pt_regs *regs)
305{
306 show_trace_log_lvl(current, regs, NULL, KERN_DEFAULT);
307}
308
309static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
310static int die_owner = -1;
311static unsigned int die_nest_count;
312
313unsigned long oops_begin(void)
314{
315 int cpu;
316 unsigned long flags;
317
318 oops_enter();
319
320 /* racy, but better than risking deadlock. */
321 raw_local_irq_save(flags);
322 cpu = smp_processor_id();
323 if (!arch_spin_trylock(&die_lock)) {
324 if (cpu == die_owner)
325 /* nested oops. should stop eventually */;
326 else
327 arch_spin_lock(&die_lock);
328 }
329 die_nest_count++;
330 die_owner = cpu;
331 console_verbose();
332 bust_spinlocks(1);
333 return flags;
334}
335NOKPROBE_SYMBOL(oops_begin);
336
337void __noreturn rewind_stack_do_exit(int signr);
338
339void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
340{
341 if (regs && kexec_should_crash(current))
342 crash_kexec(regs);
343
344 bust_spinlocks(0);
345 die_owner = -1;
346 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
347 die_nest_count--;
348 if (!die_nest_count)
349 /* Nest count reaches zero, release the lock. */
350 arch_spin_unlock(&die_lock);
351 raw_local_irq_restore(flags);
352 oops_exit();
353
354 /* Executive summary in case the oops scrolled away */
355 __show_regs(&exec_summary_regs, SHOW_REGS_ALL, KERN_DEFAULT);
356
357 if (!signr)
358 return;
359 if (in_interrupt())
360 panic("Fatal exception in interrupt");
361 if (panic_on_oops)
362 panic("Fatal exception");
363
364 /*
365 * We're not going to return, but we might be on an IST stack or
366 * have very little stack space left. Rewind the stack and kill
367 * the task.
368 * Before we rewind the stack, we have to tell KASAN that we're going to
369 * reuse the task stack and that existing poisons are invalid.
370 */
371 kasan_unpoison_task_stack(current);
372 rewind_stack_do_exit(signr);
373}
374NOKPROBE_SYMBOL(oops_end);
375
376static void __die_header(const char *str, struct pt_regs *regs, long err)
377{
378 const char *pr = "";
379
380 /* Save the regs of the first oops for the executive summary later. */
381 if (!die_counter)
382 exec_summary_regs = *regs;
383
384 if (IS_ENABLED(CONFIG_PREEMPTION))
385 pr = IS_ENABLED(CONFIG_PREEMPT_RT) ? " PREEMPT_RT" : " PREEMPT";
386
387 printk(KERN_DEFAULT
388 "%s: %04lx [#%d]%s%s%s%s%s\n", str, err & 0xffff, ++die_counter,
389 pr,
390 IS_ENABLED(CONFIG_SMP) ? " SMP" : "",
391 debug_pagealloc_enabled() ? " DEBUG_PAGEALLOC" : "",
392 IS_ENABLED(CONFIG_KASAN) ? " KASAN" : "",
393 IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION) ?
394 (boot_cpu_has(X86_FEATURE_PTI) ? " PTI" : " NOPTI") : "");
395}
396NOKPROBE_SYMBOL(__die_header);
397
398static int __die_body(const char *str, struct pt_regs *regs, long err)
399{
400 show_regs(regs);
401 print_modules();
402
403 if (notify_die(DIE_OOPS, str, regs, err,
404 current->thread.trap_nr, SIGSEGV) == NOTIFY_STOP)
405 return 1;
406
407 return 0;
408}
409NOKPROBE_SYMBOL(__die_body);
410
411int __die(const char *str, struct pt_regs *regs, long err)
412{
413 __die_header(str, regs, err);
414 return __die_body(str, regs, err);
415}
416NOKPROBE_SYMBOL(__die);
417
418/*
419 * This is gone through when something in the kernel has done something bad
420 * and is about to be terminated:
421 */
422void die(const char *str, struct pt_regs *regs, long err)
423{
424 unsigned long flags = oops_begin();
425 int sig = SIGSEGV;
426
427 if (__die(str, regs, err))
428 sig = 0;
429 oops_end(flags, regs, sig);
430}
431
432void die_addr(const char *str, struct pt_regs *regs, long err, long gp_addr)
433{
434 unsigned long flags = oops_begin();
435 int sig = SIGSEGV;
436
437 __die_header(str, regs, err);
438 if (gp_addr)
439 kasan_non_canonical_hook(gp_addr);
440 if (__die_body(str, regs, err))
441 sig = 0;
442 oops_end(flags, regs, sig);
443}
444
445void show_regs(struct pt_regs *regs)
446{
447 enum show_regs_mode print_kernel_regs;
448
449 show_regs_print_info(KERN_DEFAULT);
450
451 print_kernel_regs = user_mode(regs) ? SHOW_REGS_USER : SHOW_REGS_ALL;
452 __show_regs(regs, print_kernel_regs, KERN_DEFAULT);
453
454 /*
455 * When in-kernel, we also print out the stack at the time of the fault..
456 */
457 if (!user_mode(regs))
458 show_trace_log_lvl(current, regs, NULL, KERN_DEFAULT);
459}