Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Stack trace management functions
4 *
5 * Copyright IBM Corp. 2006
6 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
7 */
8
9#include <linux/stacktrace.h>
10#include <asm/stacktrace.h>
11#include <asm/unwind.h>
12#include <asm/kprobes.h>
13
14void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
15 struct task_struct *task, struct pt_regs *regs)
16{
17 struct unwind_state state;
18 unsigned long addr;
19
20 unwind_for_each_frame(&state, task, regs, 0) {
21 addr = unwind_get_return_address(&state);
22 if (!addr || !consume_entry(cookie, addr, false))
23 break;
24 }
25}
26
27/*
28 * This function returns an error if it detects any unreliable features of the
29 * stack. Otherwise it guarantees that the stack trace is reliable.
30 *
31 * If the task is not 'current', the caller *must* ensure the task is inactive.
32 */
33int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
34 void *cookie, struct task_struct *task)
35{
36 struct unwind_state state;
37 unsigned long addr;
38
39 unwind_for_each_frame(&state, task, NULL, 0) {
40 if (state.stack_info.type != STACK_TYPE_TASK)
41 return -EINVAL;
42
43 if (state.regs)
44 return -EINVAL;
45
46 addr = unwind_get_return_address(&state);
47 if (!addr)
48 return -EINVAL;
49
50#ifdef CONFIG_KPROBES
51 /*
52 * Mark stacktraces with kretprobed functions on them
53 * as unreliable.
54 */
55 if (state.ip == (unsigned long)kretprobe_trampoline)
56 return -EINVAL;
57#endif
58
59 if (!consume_entry(cookie, addr, false))
60 return -EINVAL;
61 }
62
63 /* Check for stack corruption */
64 if (unwind_error(&state))
65 return -EINVAL;
66 return 0;
67}
1/*
2 * Stack trace management functions
3 *
4 * Copyright IBM Corp. 2006
5 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
6 */
7
8#include <linux/sched.h>
9#include <linux/stacktrace.h>
10#include <linux/kallsyms.h>
11#include <linux/module.h>
12
13static int __save_address(void *data, unsigned long address, int nosched)
14{
15 struct stack_trace *trace = data;
16
17 if (nosched && in_sched_functions(address))
18 return 0;
19 if (trace->skip > 0) {
20 trace->skip--;
21 return 0;
22 }
23 if (trace->nr_entries < trace->max_entries) {
24 trace->entries[trace->nr_entries++] = address;
25 return 0;
26 }
27 return 1;
28}
29
30static int save_address(void *data, unsigned long address, int reliable)
31{
32 return __save_address(data, address, 0);
33}
34
35static int save_address_nosched(void *data, unsigned long address, int reliable)
36{
37 return __save_address(data, address, 1);
38}
39
40void save_stack_trace(struct stack_trace *trace)
41{
42 unsigned long sp;
43
44 sp = current_stack_pointer();
45 dump_trace(save_address, trace, NULL, sp);
46 if (trace->nr_entries < trace->max_entries)
47 trace->entries[trace->nr_entries++] = ULONG_MAX;
48}
49EXPORT_SYMBOL_GPL(save_stack_trace);
50
51void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
52{
53 unsigned long sp;
54
55 sp = tsk->thread.ksp;
56 if (tsk == current)
57 sp = current_stack_pointer();
58 dump_trace(save_address_nosched, trace, tsk, sp);
59 if (trace->nr_entries < trace->max_entries)
60 trace->entries[trace->nr_entries++] = ULONG_MAX;
61}
62EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
63
64void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
65{
66 unsigned long sp;
67
68 sp = kernel_stack_pointer(regs);
69 dump_trace(save_address, trace, NULL, sp);
70 if (trace->nr_entries < trace->max_entries)
71 trace->entries[trace->nr_entries++] = ULONG_MAX;
72}
73EXPORT_SYMBOL_GPL(save_stack_trace_regs);