Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Stack trace management functions
4 *
5 * Copyright IBM Corp. 2006
6 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
7 */
8
9#include <linux/stacktrace.h>
10#include <asm/stacktrace.h>
11#include <asm/unwind.h>
12#include <asm/kprobes.h>
13
14void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
15 struct task_struct *task, struct pt_regs *regs)
16{
17 struct unwind_state state;
18 unsigned long addr;
19
20 unwind_for_each_frame(&state, task, regs, 0) {
21 addr = unwind_get_return_address(&state);
22 if (!addr || !consume_entry(cookie, addr))
23 break;
24 }
25}
26
27int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
28 void *cookie, struct task_struct *task)
29{
30 struct unwind_state state;
31 unsigned long addr;
32
33 unwind_for_each_frame(&state, task, NULL, 0) {
34 if (state.stack_info.type != STACK_TYPE_TASK)
35 return -EINVAL;
36
37 if (state.regs)
38 return -EINVAL;
39
40 addr = unwind_get_return_address(&state);
41 if (!addr)
42 return -EINVAL;
43
44#ifdef CONFIG_KPROBES
45 /*
46 * Mark stacktraces with kretprobed functions on them
47 * as unreliable.
48 */
49 if (state.ip == (unsigned long)kretprobe_trampoline)
50 return -EINVAL;
51#endif
52
53 if (!consume_entry(cookie, addr))
54 return -EINVAL;
55 }
56
57 /* Check for stack corruption */
58 if (unwind_error(&state))
59 return -EINVAL;
60 return 0;
61}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Stack trace management functions
4 *
5 * Copyright IBM Corp. 2006
6 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
7 */
8
9#include <linux/sched.h>
10#include <linux/sched/debug.h>
11#include <linux/stacktrace.h>
12#include <linux/kallsyms.h>
13#include <linux/export.h>
14
15static int __save_address(void *data, unsigned long address, int nosched)
16{
17 struct stack_trace *trace = data;
18
19 if (nosched && in_sched_functions(address))
20 return 0;
21 if (trace->skip > 0) {
22 trace->skip--;
23 return 0;
24 }
25 if (trace->nr_entries < trace->max_entries) {
26 trace->entries[trace->nr_entries++] = address;
27 return 0;
28 }
29 return 1;
30}
31
32static int save_address(void *data, unsigned long address, int reliable)
33{
34 return __save_address(data, address, 0);
35}
36
37static int save_address_nosched(void *data, unsigned long address, int reliable)
38{
39 return __save_address(data, address, 1);
40}
41
42void save_stack_trace(struct stack_trace *trace)
43{
44 unsigned long sp;
45
46 sp = current_stack_pointer();
47 dump_trace(save_address, trace, NULL, sp);
48 if (trace->nr_entries < trace->max_entries)
49 trace->entries[trace->nr_entries++] = ULONG_MAX;
50}
51EXPORT_SYMBOL_GPL(save_stack_trace);
52
53void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
54{
55 unsigned long sp;
56
57 sp = tsk->thread.ksp;
58 if (tsk == current)
59 sp = current_stack_pointer();
60 dump_trace(save_address_nosched, trace, tsk, sp);
61 if (trace->nr_entries < trace->max_entries)
62 trace->entries[trace->nr_entries++] = ULONG_MAX;
63}
64EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
65
66void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
67{
68 unsigned long sp;
69
70 sp = kernel_stack_pointer(regs);
71 dump_trace(save_address, trace, NULL, sp);
72 if (trace->nr_entries < trace->max_entries)
73 trace->entries[trace->nr_entries++] = ULONG_MAX;
74}
75EXPORT_SYMBOL_GPL(save_stack_trace_regs);