Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Stack trace management functions
4 *
5 * Copyright IBM Corp. 2006
6 */
7
8#include <linux/perf_event.h>
9#include <linux/stacktrace.h>
10#include <linux/uaccess.h>
11#include <linux/compat.h>
12#include <asm/stacktrace.h>
13#include <asm/unwind.h>
14#include <asm/kprobes.h>
15#include <asm/ptrace.h>
16
17void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
18 struct task_struct *task, struct pt_regs *regs)
19{
20 struct unwind_state state;
21 unsigned long addr;
22
23 unwind_for_each_frame(&state, task, regs, 0) {
24 addr = unwind_get_return_address(&state);
25 if (!addr || !consume_entry(cookie, addr))
26 break;
27 }
28}
29
30int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
31 void *cookie, struct task_struct *task)
32{
33 struct unwind_state state;
34 unsigned long addr;
35
36 unwind_for_each_frame(&state, task, NULL, 0) {
37 if (state.stack_info.type != STACK_TYPE_TASK)
38 return -EINVAL;
39
40 if (state.regs)
41 return -EINVAL;
42
43 addr = unwind_get_return_address(&state);
44 if (!addr)
45 return -EINVAL;
46
47#ifdef CONFIG_RETHOOK
48 /*
49 * Mark stacktraces with krethook functions on them
50 * as unreliable.
51 */
52 if (state.ip == (unsigned long)arch_rethook_trampoline)
53 return -EINVAL;
54#endif
55
56 if (!consume_entry(cookie, addr))
57 return -EINVAL;
58 }
59
60 /* Check for stack corruption */
61 if (unwind_error(&state))
62 return -EINVAL;
63 return 0;
64}
65
66static inline bool store_ip(stack_trace_consume_fn consume_entry, void *cookie,
67 struct perf_callchain_entry_ctx *entry, bool perf,
68 unsigned long ip)
69{
70#ifdef CONFIG_PERF_EVENTS
71 if (perf) {
72 if (perf_callchain_store(entry, ip))
73 return false;
74 return true;
75 }
76#endif
77 return consume_entry(cookie, ip);
78}
79
80static inline bool ip_invalid(unsigned long ip)
81{
82 /*
83 * Perform some basic checks if an instruction address taken
84 * from unreliable source is invalid.
85 */
86 if (ip & 1)
87 return true;
88 if (ip < mmap_min_addr)
89 return true;
90 if (ip >= current->mm->context.asce_limit)
91 return true;
92 return false;
93}
94
95static inline bool ip_within_vdso(unsigned long ip)
96{
97 return in_range(ip, current->mm->context.vdso_base, vdso_text_size());
98}
99
100void arch_stack_walk_user_common(stack_trace_consume_fn consume_entry, void *cookie,
101 struct perf_callchain_entry_ctx *entry,
102 const struct pt_regs *regs, bool perf)
103{
104 struct stack_frame_vdso_wrapper __user *sf_vdso;
105 struct stack_frame_user __user *sf;
106 unsigned long ip, sp;
107 bool first = true;
108
109 if (is_compat_task())
110 return;
111 if (!current->mm)
112 return;
113 ip = instruction_pointer(regs);
114 if (!store_ip(consume_entry, cookie, entry, perf, ip))
115 return;
116 sf = (void __user *)user_stack_pointer(regs);
117 pagefault_disable();
118 while (1) {
119 if (__get_user(sp, &sf->back_chain))
120 break;
121 /*
122 * VDSO entry code has a non-standard stack frame layout.
123 * See VDSO user wrapper code for details.
124 */
125 if (!sp && ip_within_vdso(ip)) {
126 sf_vdso = (void __user *)sf;
127 if (__get_user(ip, &sf_vdso->return_address))
128 break;
129 sp = (unsigned long)sf + STACK_FRAME_VDSO_OVERHEAD;
130 sf = (void __user *)sp;
131 if (__get_user(sp, &sf->back_chain))
132 break;
133 } else {
134 sf = (void __user *)sp;
135 if (__get_user(ip, &sf->gprs[8]))
136 break;
137 }
138 /* Sanity check: ABI requires SP to be 8 byte aligned. */
139 if (sp & 0x7)
140 break;
141 if (ip_invalid(ip)) {
142 /*
143 * If the instruction address is invalid, and this
144 * is the first stack frame, assume r14 has not
145 * been written to the stack yet. Otherwise exit.
146 */
147 if (!first)
148 break;
149 ip = regs->gprs[14];
150 if (ip_invalid(ip))
151 break;
152 }
153 if (!store_ip(consume_entry, cookie, entry, perf, ip))
154 break;
155 first = false;
156 }
157 pagefault_enable();
158}
159
160void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
161 const struct pt_regs *regs)
162{
163 arch_stack_walk_user_common(consume_entry, cookie, NULL, regs, false);
164}
1/*
2 * Stack trace management functions
3 *
4 * Copyright IBM Corp. 2006
5 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
6 */
7
8#include <linux/sched.h>
9#include <linux/stacktrace.h>
10#include <linux/kallsyms.h>
11#include <linux/module.h>
12
13static int __save_address(void *data, unsigned long address, int nosched)
14{
15 struct stack_trace *trace = data;
16
17 if (nosched && in_sched_functions(address))
18 return 0;
19 if (trace->skip > 0) {
20 trace->skip--;
21 return 0;
22 }
23 if (trace->nr_entries < trace->max_entries) {
24 trace->entries[trace->nr_entries++] = address;
25 return 0;
26 }
27 return 1;
28}
29
30static int save_address(void *data, unsigned long address)
31{
32 return __save_address(data, address, 0);
33}
34
35static int save_address_nosched(void *data, unsigned long address)
36{
37 return __save_address(data, address, 1);
38}
39
40void save_stack_trace(struct stack_trace *trace)
41{
42 unsigned long sp;
43
44 sp = current_stack_pointer();
45 dump_trace(save_address, trace, NULL, sp);
46 if (trace->nr_entries < trace->max_entries)
47 trace->entries[trace->nr_entries++] = ULONG_MAX;
48}
49EXPORT_SYMBOL_GPL(save_stack_trace);
50
51void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
52{
53 unsigned long sp;
54
55 sp = tsk->thread.ksp;
56 if (tsk == current)
57 sp = current_stack_pointer();
58 dump_trace(save_address_nosched, trace, tsk, sp);
59 if (trace->nr_entries < trace->max_entries)
60 trace->entries[trace->nr_entries++] = ULONG_MAX;
61}
62EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
63
64void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
65{
66 unsigned long sp;
67
68 sp = kernel_stack_pointer(regs);
69 dump_trace(save_address, trace, NULL, sp);
70 if (trace->nr_entries < trace->max_entries)
71 trace->entries[trace->nr_entries++] = ULONG_MAX;
72}
73EXPORT_SYMBOL_GPL(save_stack_trace_regs);