Loading...
1/*
2 * Stack trace management functions
3 *
4 * Copyright (C) 2006-2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5 */
6#include <linux/sched.h>
7#include <linux/sched/debug.h>
8#include <linux/sched/task_stack.h>
9#include <linux/stacktrace.h>
10#include <linux/export.h>
11#include <linux/uaccess.h>
12#include <asm/stacktrace.h>
13#include <asm/unwind.h>
14
15void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
16 struct task_struct *task, struct pt_regs *regs)
17{
18 struct unwind_state state;
19 unsigned long addr;
20
21 if (regs && !consume_entry(cookie, regs->ip, false))
22 return;
23
24 for (unwind_start(&state, task, regs, NULL); !unwind_done(&state);
25 unwind_next_frame(&state)) {
26 addr = unwind_get_return_address(&state);
27 if (!addr || !consume_entry(cookie, addr, false))
28 break;
29 }
30}
31
32/*
33 * This function returns an error if it detects any unreliable features of the
34 * stack. Otherwise it guarantees that the stack trace is reliable.
35 *
36 * If the task is not 'current', the caller *must* ensure the task is inactive.
37 */
38int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
39 void *cookie, struct task_struct *task)
40{
41 struct unwind_state state;
42 struct pt_regs *regs;
43 unsigned long addr;
44
45 for (unwind_start(&state, task, NULL, NULL);
46 !unwind_done(&state) && !unwind_error(&state);
47 unwind_next_frame(&state)) {
48
49 regs = unwind_get_entry_regs(&state, NULL);
50 if (regs) {
51 /* Success path for user tasks */
52 if (user_mode(regs))
53 return 0;
54
55 /*
56 * Kernel mode registers on the stack indicate an
57 * in-kernel interrupt or exception (e.g., preemption
58 * or a page fault), which can make frame pointers
59 * unreliable.
60 */
61
62 if (IS_ENABLED(CONFIG_FRAME_POINTER))
63 return -EINVAL;
64 }
65
66 addr = unwind_get_return_address(&state);
67
68 /*
69 * A NULL or invalid return address probably means there's some
70 * generated code which __kernel_text_address() doesn't know
71 * about.
72 */
73 if (!addr)
74 return -EINVAL;
75
76 if (!consume_entry(cookie, addr, false))
77 return -EINVAL;
78 }
79
80 /* Check for stack corruption */
81 if (unwind_error(&state))
82 return -EINVAL;
83
84 /* Success path for non-user tasks, i.e. kthreads and idle tasks */
85 if (!(task->flags & (PF_KTHREAD | PF_IDLE)))
86 return -EINVAL;
87
88 return 0;
89}
90
91/* Userspace stacktrace - based on kernel/trace/trace_sysprof.c */
92
93struct stack_frame_user {
94 const void __user *next_fp;
95 unsigned long ret_addr;
96};
97
98static int
99copy_stack_frame(const void __user *fp, struct stack_frame_user *frame)
100{
101 int ret;
102
103 if (__range_not_ok(fp, sizeof(*frame), TASK_SIZE))
104 return 0;
105
106 ret = 1;
107 pagefault_disable();
108 if (__copy_from_user_inatomic(frame, fp, sizeof(*frame)))
109 ret = 0;
110 pagefault_enable();
111
112 return ret;
113}
114
115void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
116 const struct pt_regs *regs)
117{
118 const void __user *fp = (const void __user *)regs->bp;
119
120 if (!consume_entry(cookie, regs->ip, false))
121 return;
122
123 while (1) {
124 struct stack_frame_user frame;
125
126 frame.next_fp = NULL;
127 frame.ret_addr = 0;
128 if (!copy_stack_frame(fp, &frame))
129 break;
130 if ((unsigned long)fp < regs->sp)
131 break;
132 if (!frame.ret_addr)
133 break;
134 if (!consume_entry(cookie, frame.ret_addr, false))
135 break;
136 fp = frame.next_fp;
137 }
138}
139
1/*
2 * Stack trace management functions
3 *
4 * Copyright (C) 2006-2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5 */
6#include <linux/sched.h>
7#include <linux/sched/debug.h>
8#include <linux/sched/task_stack.h>
9#include <linux/stacktrace.h>
10#include <linux/export.h>
11#include <linux/uaccess.h>
12#include <asm/stacktrace.h>
13#include <asm/unwind.h>
14
15static int save_stack_address(struct stack_trace *trace, unsigned long addr,
16 bool nosched)
17{
18 if (nosched && in_sched_functions(addr))
19 return 0;
20
21 if (trace->skip > 0) {
22 trace->skip--;
23 return 0;
24 }
25
26 if (trace->nr_entries >= trace->max_entries)
27 return -1;
28
29 trace->entries[trace->nr_entries++] = addr;
30 return 0;
31}
32
33static void noinline __save_stack_trace(struct stack_trace *trace,
34 struct task_struct *task, struct pt_regs *regs,
35 bool nosched)
36{
37 struct unwind_state state;
38 unsigned long addr;
39
40 if (regs)
41 save_stack_address(trace, regs->ip, nosched);
42
43 for (unwind_start(&state, task, regs, NULL); !unwind_done(&state);
44 unwind_next_frame(&state)) {
45 addr = unwind_get_return_address(&state);
46 if (!addr || save_stack_address(trace, addr, nosched))
47 break;
48 }
49
50 if (trace->nr_entries < trace->max_entries)
51 trace->entries[trace->nr_entries++] = ULONG_MAX;
52}
53
54/*
55 * Save stack-backtrace addresses into a stack_trace buffer.
56 */
57void save_stack_trace(struct stack_trace *trace)
58{
59 trace->skip++;
60 __save_stack_trace(trace, current, NULL, false);
61}
62EXPORT_SYMBOL_GPL(save_stack_trace);
63
64void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
65{
66 __save_stack_trace(trace, current, regs, false);
67}
68
69void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
70{
71 if (!try_get_task_stack(tsk))
72 return;
73
74 if (tsk == current)
75 trace->skip++;
76 __save_stack_trace(trace, tsk, NULL, true);
77
78 put_task_stack(tsk);
79}
80EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
81
82#ifdef CONFIG_HAVE_RELIABLE_STACKTRACE
83
84#define STACKTRACE_DUMP_ONCE(task) ({ \
85 static bool __section(.data.unlikely) __dumped; \
86 \
87 if (!__dumped) { \
88 __dumped = true; \
89 WARN_ON(1); \
90 show_stack(task, NULL); \
91 } \
92})
93
94static int __always_inline
95__save_stack_trace_reliable(struct stack_trace *trace,
96 struct task_struct *task)
97{
98 struct unwind_state state;
99 struct pt_regs *regs;
100 unsigned long addr;
101
102 for (unwind_start(&state, task, NULL, NULL); !unwind_done(&state);
103 unwind_next_frame(&state)) {
104
105 regs = unwind_get_entry_regs(&state, NULL);
106 if (regs) {
107 /*
108 * Kernel mode registers on the stack indicate an
109 * in-kernel interrupt or exception (e.g., preemption
110 * or a page fault), which can make frame pointers
111 * unreliable.
112 */
113 if (!user_mode(regs))
114 return -EINVAL;
115
116 /*
117 * The last frame contains the user mode syscall
118 * pt_regs. Skip it and finish the unwind.
119 */
120 unwind_next_frame(&state);
121 if (!unwind_done(&state)) {
122 STACKTRACE_DUMP_ONCE(task);
123 return -EINVAL;
124 }
125 break;
126 }
127
128 addr = unwind_get_return_address(&state);
129
130 /*
131 * A NULL or invalid return address probably means there's some
132 * generated code which __kernel_text_address() doesn't know
133 * about.
134 */
135 if (!addr) {
136 STACKTRACE_DUMP_ONCE(task);
137 return -EINVAL;
138 }
139
140 if (save_stack_address(trace, addr, false))
141 return -EINVAL;
142 }
143
144 /* Check for stack corruption */
145 if (unwind_error(&state)) {
146 STACKTRACE_DUMP_ONCE(task);
147 return -EINVAL;
148 }
149
150 if (trace->nr_entries < trace->max_entries)
151 trace->entries[trace->nr_entries++] = ULONG_MAX;
152
153 return 0;
154}
155
156/*
157 * This function returns an error if it detects any unreliable features of the
158 * stack. Otherwise it guarantees that the stack trace is reliable.
159 *
160 * If the task is not 'current', the caller *must* ensure the task is inactive.
161 */
162int save_stack_trace_tsk_reliable(struct task_struct *tsk,
163 struct stack_trace *trace)
164{
165 int ret;
166
167 /*
168 * If the task doesn't have a stack (e.g., a zombie), the stack is
169 * "reliably" empty.
170 */
171 if (!try_get_task_stack(tsk))
172 return 0;
173
174 ret = __save_stack_trace_reliable(trace, tsk);
175
176 put_task_stack(tsk);
177
178 return ret;
179}
180#endif /* CONFIG_HAVE_RELIABLE_STACKTRACE */
181
182/* Userspace stacktrace - based on kernel/trace/trace_sysprof.c */
183
184struct stack_frame_user {
185 const void __user *next_fp;
186 unsigned long ret_addr;
187};
188
189static int
190copy_stack_frame(const void __user *fp, struct stack_frame_user *frame)
191{
192 int ret;
193
194 if (!access_ok(VERIFY_READ, fp, sizeof(*frame)))
195 return 0;
196
197 ret = 1;
198 pagefault_disable();
199 if (__copy_from_user_inatomic(frame, fp, sizeof(*frame)))
200 ret = 0;
201 pagefault_enable();
202
203 return ret;
204}
205
206static inline void __save_stack_trace_user(struct stack_trace *trace)
207{
208 const struct pt_regs *regs = task_pt_regs(current);
209 const void __user *fp = (const void __user *)regs->bp;
210
211 if (trace->nr_entries < trace->max_entries)
212 trace->entries[trace->nr_entries++] = regs->ip;
213
214 while (trace->nr_entries < trace->max_entries) {
215 struct stack_frame_user frame;
216
217 frame.next_fp = NULL;
218 frame.ret_addr = 0;
219 if (!copy_stack_frame(fp, &frame))
220 break;
221 if ((unsigned long)fp < regs->sp)
222 break;
223 if (frame.ret_addr) {
224 trace->entries[trace->nr_entries++] =
225 frame.ret_addr;
226 }
227 if (fp == frame.next_fp)
228 break;
229 fp = frame.next_fp;
230 }
231}
232
233void save_stack_trace_user(struct stack_trace *trace)
234{
235 /*
236 * Trace user stack if we are not a kernel thread
237 */
238 if (current->mm) {
239 __save_stack_trace_user(trace);
240 }
241 if (trace->nr_entries < trace->max_entries)
242 trace->entries[trace->nr_entries++] = ULONG_MAX;
243}