Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Stack tracing support
4 *
5 * Copyright (C) 2012 ARM Ltd.
6 */
7#include <linux/kernel.h>
8#include <linux/efi.h>
9#include <linux/export.h>
10#include <linux/ftrace.h>
11#include <linux/kprobes.h>
12#include <linux/sched.h>
13#include <linux/sched/debug.h>
14#include <linux/sched/task_stack.h>
15#include <linux/stacktrace.h>
16
17#include <asm/efi.h>
18#include <asm/irq.h>
19#include <asm/stack_pointer.h>
20#include <asm/stacktrace.h>
21
22/*
23 * Kernel unwind state
24 *
25 * @common: Common unwind state.
26 * @task: The task being unwound.
27 * @kr_cur: When KRETPROBES is selected, holds the kretprobe instance
28 * associated with the most recently encountered replacement lr
29 * value.
30 */
31struct kunwind_state {
32 struct unwind_state common;
33 struct task_struct *task;
34#ifdef CONFIG_KRETPROBES
35 struct llist_node *kr_cur;
36#endif
37};
38
39static __always_inline void
40kunwind_init(struct kunwind_state *state,
41 struct task_struct *task)
42{
43 unwind_init_common(&state->common);
44 state->task = task;
45}
46
47/*
48 * Start an unwind from a pt_regs.
49 *
50 * The unwind will begin at the PC within the regs.
51 *
52 * The regs must be on a stack currently owned by the calling task.
53 */
54static __always_inline void
55kunwind_init_from_regs(struct kunwind_state *state,
56 struct pt_regs *regs)
57{
58 kunwind_init(state, current);
59
60 state->common.fp = regs->regs[29];
61 state->common.pc = regs->pc;
62}
63
64/*
65 * Start an unwind from a caller.
66 *
67 * The unwind will begin at the caller of whichever function this is inlined
68 * into.
69 *
70 * The function which invokes this must be noinline.
71 */
72static __always_inline void
73kunwind_init_from_caller(struct kunwind_state *state)
74{
75 kunwind_init(state, current);
76
77 state->common.fp = (unsigned long)__builtin_frame_address(1);
78 state->common.pc = (unsigned long)__builtin_return_address(0);
79}
80
81/*
82 * Start an unwind from a blocked task.
83 *
84 * The unwind will begin at the blocked tasks saved PC (i.e. the caller of
85 * cpu_switch_to()).
86 *
87 * The caller should ensure the task is blocked in cpu_switch_to() for the
88 * duration of the unwind, or the unwind will be bogus. It is never valid to
89 * call this for the current task.
90 */
91static __always_inline void
92kunwind_init_from_task(struct kunwind_state *state,
93 struct task_struct *task)
94{
95 kunwind_init(state, task);
96
97 state->common.fp = thread_saved_fp(task);
98 state->common.pc = thread_saved_pc(task);
99}
100
101static __always_inline int
102kunwind_recover_return_address(struct kunwind_state *state)
103{
104#ifdef CONFIG_FUNCTION_GRAPH_TRACER
105 if (state->task->ret_stack &&
106 (state->common.pc == (unsigned long)return_to_handler)) {
107 unsigned long orig_pc;
108 orig_pc = ftrace_graph_ret_addr(state->task, NULL,
109 state->common.pc,
110 (void *)state->common.fp);
111 if (WARN_ON_ONCE(state->common.pc == orig_pc))
112 return -EINVAL;
113 state->common.pc = orig_pc;
114 }
115#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
116
117#ifdef CONFIG_KRETPROBES
118 if (is_kretprobe_trampoline(state->common.pc)) {
119 unsigned long orig_pc;
120 orig_pc = kretprobe_find_ret_addr(state->task,
121 (void *)state->common.fp,
122 &state->kr_cur);
123 state->common.pc = orig_pc;
124 }
125#endif /* CONFIG_KRETPROBES */
126
127 return 0;
128}
129
130/*
131 * Unwind from one frame record (A) to the next frame record (B).
132 *
133 * We terminate early if the location of B indicates a malformed chain of frame
134 * records (e.g. a cycle), determined based on the location and fp value of A
135 * and the location (but not the fp value) of B.
136 */
137static __always_inline int
138kunwind_next(struct kunwind_state *state)
139{
140 struct task_struct *tsk = state->task;
141 unsigned long fp = state->common.fp;
142 int err;
143
144 /* Final frame; nothing to unwind */
145 if (fp == (unsigned long)task_pt_regs(tsk)->stackframe)
146 return -ENOENT;
147
148 err = unwind_next_frame_record(&state->common);
149 if (err)
150 return err;
151
152 state->common.pc = ptrauth_strip_kernel_insn_pac(state->common.pc);
153
154 return kunwind_recover_return_address(state);
155}
156
157typedef bool (*kunwind_consume_fn)(const struct kunwind_state *state, void *cookie);
158
159static __always_inline void
160do_kunwind(struct kunwind_state *state, kunwind_consume_fn consume_state,
161 void *cookie)
162{
163 if (kunwind_recover_return_address(state))
164 return;
165
166 while (1) {
167 int ret;
168
169 if (!consume_state(state, cookie))
170 break;
171 ret = kunwind_next(state);
172 if (ret < 0)
173 break;
174 }
175}
176
177/*
178 * Per-cpu stacks are only accessible when unwinding the current task in a
179 * non-preemptible context.
180 */
181#define STACKINFO_CPU(name) \
182 ({ \
183 ((task == current) && !preemptible()) \
184 ? stackinfo_get_##name() \
185 : stackinfo_get_unknown(); \
186 })
187
188/*
189 * SDEI stacks are only accessible when unwinding the current task in an NMI
190 * context.
191 */
192#define STACKINFO_SDEI(name) \
193 ({ \
194 ((task == current) && in_nmi()) \
195 ? stackinfo_get_sdei_##name() \
196 : stackinfo_get_unknown(); \
197 })
198
199#define STACKINFO_EFI \
200 ({ \
201 ((task == current) && current_in_efi()) \
202 ? stackinfo_get_efi() \
203 : stackinfo_get_unknown(); \
204 })
205
206static __always_inline void
207kunwind_stack_walk(kunwind_consume_fn consume_state,
208 void *cookie, struct task_struct *task,
209 struct pt_regs *regs)
210{
211 struct stack_info stacks[] = {
212 stackinfo_get_task(task),
213 STACKINFO_CPU(irq),
214#if defined(CONFIG_VMAP_STACK)
215 STACKINFO_CPU(overflow),
216#endif
217#if defined(CONFIG_VMAP_STACK) && defined(CONFIG_ARM_SDE_INTERFACE)
218 STACKINFO_SDEI(normal),
219 STACKINFO_SDEI(critical),
220#endif
221#ifdef CONFIG_EFI
222 STACKINFO_EFI,
223#endif
224 };
225 struct kunwind_state state = {
226 .common = {
227 .stacks = stacks,
228 .nr_stacks = ARRAY_SIZE(stacks),
229 },
230 };
231
232 if (regs) {
233 if (task != current)
234 return;
235 kunwind_init_from_regs(&state, regs);
236 } else if (task == current) {
237 kunwind_init_from_caller(&state);
238 } else {
239 kunwind_init_from_task(&state, task);
240 }
241
242 do_kunwind(&state, consume_state, cookie);
243}
244
245struct kunwind_consume_entry_data {
246 stack_trace_consume_fn consume_entry;
247 void *cookie;
248};
249
250static __always_inline bool
251arch_kunwind_consume_entry(const struct kunwind_state *state, void *cookie)
252{
253 struct kunwind_consume_entry_data *data = cookie;
254 return data->consume_entry(data->cookie, state->common.pc);
255}
256
257noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry,
258 void *cookie, struct task_struct *task,
259 struct pt_regs *regs)
260{
261 struct kunwind_consume_entry_data data = {
262 .consume_entry = consume_entry,
263 .cookie = cookie,
264 };
265
266 kunwind_stack_walk(arch_kunwind_consume_entry, &data, task, regs);
267}
268
269static bool dump_backtrace_entry(void *arg, unsigned long where)
270{
271 char *loglvl = arg;
272 printk("%s %pSb\n", loglvl, (void *)where);
273 return true;
274}
275
276void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
277 const char *loglvl)
278{
279 pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
280
281 if (regs && user_mode(regs))
282 return;
283
284 if (!tsk)
285 tsk = current;
286
287 if (!try_get_task_stack(tsk))
288 return;
289
290 printk("%sCall trace:\n", loglvl);
291 arch_stack_walk(dump_backtrace_entry, (void *)loglvl, tsk, regs);
292
293 put_task_stack(tsk);
294}
295
296void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
297{
298 dump_backtrace(NULL, tsk, loglvl);
299 barrier();
300}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Stack tracing support
4 *
5 * Copyright (C) 2012 ARM Ltd.
6 */
7#include <linux/kernel.h>
8#include <linux/export.h>
9#include <linux/ftrace.h>
10#include <linux/kprobes.h>
11#include <linux/sched.h>
12#include <linux/sched/debug.h>
13#include <linux/sched/task_stack.h>
14#include <linux/stacktrace.h>
15
16#include <asm/irq.h>
17#include <asm/stack_pointer.h>
18#include <asm/stacktrace.h>
19
20/*
21 * AArch64 PCS assigns the frame pointer to x29.
22 *
23 * A simple function prologue looks like this:
24 * sub sp, sp, #0x10
25 * stp x29, x30, [sp]
26 * mov x29, sp
27 *
28 * A simple function epilogue looks like this:
29 * mov sp, x29
30 * ldp x29, x30, [sp]
31 * add sp, sp, #0x10
32 */
33
34/*
35 * Unwind from one frame record (A) to the next frame record (B).
36 *
37 * We terminate early if the location of B indicates a malformed chain of frame
38 * records (e.g. a cycle), determined based on the location and fp value of A
39 * and the location (but not the fp value) of B.
40 */
41int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
42{
43 unsigned long fp = frame->fp;
44 struct stack_info info;
45
46 if (fp & 0xf)
47 return -EINVAL;
48
49 if (!tsk)
50 tsk = current;
51
52 if (!on_accessible_stack(tsk, fp, &info))
53 return -EINVAL;
54
55 if (test_bit(info.type, frame->stacks_done))
56 return -EINVAL;
57
58 /*
59 * As stacks grow downward, any valid record on the same stack must be
60 * at a strictly higher address than the prior record.
61 *
62 * Stacks can nest in several valid orders, e.g.
63 *
64 * TASK -> IRQ -> OVERFLOW -> SDEI_NORMAL
65 * TASK -> SDEI_NORMAL -> SDEI_CRITICAL -> OVERFLOW
66 *
67 * ... but the nesting itself is strict. Once we transition from one
68 * stack to another, it's never valid to unwind back to that first
69 * stack.
70 */
71 if (info.type == frame->prev_type) {
72 if (fp <= frame->prev_fp)
73 return -EINVAL;
74 } else {
75 set_bit(frame->prev_type, frame->stacks_done);
76 }
77
78 /*
79 * Record this frame record's values and location. The prev_fp and
80 * prev_type are only meaningful to the next unwind_frame() invocation.
81 */
82 frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp));
83 frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp + 8));
84 frame->prev_fp = fp;
85 frame->prev_type = info.type;
86
87#ifdef CONFIG_FUNCTION_GRAPH_TRACER
88 if (tsk->ret_stack &&
89 (frame->pc == (unsigned long)return_to_handler)) {
90 struct ftrace_ret_stack *ret_stack;
91 /*
92 * This is a case where function graph tracer has
93 * modified a return address (LR) in a stack frame
94 * to hook a function return.
95 * So replace it to an original value.
96 */
97 ret_stack = ftrace_graph_get_ret_stack(tsk, frame->graph++);
98 if (WARN_ON_ONCE(!ret_stack))
99 return -EINVAL;
100 frame->pc = ret_stack->ret;
101 }
102#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
103
104 /*
105 * Frames created upon entry from EL0 have NULL FP and PC values, so
106 * don't bother reporting these. Frames created by __noreturn functions
107 * might have a valid FP even if PC is bogus, so only terminate where
108 * both are NULL.
109 */
110 if (!frame->fp && !frame->pc)
111 return -EINVAL;
112
113 return 0;
114}
115NOKPROBE_SYMBOL(unwind_frame);
116
117void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
118 int (*fn)(struct stackframe *, void *), void *data)
119{
120 while (1) {
121 int ret;
122
123 if (fn(frame, data))
124 break;
125 ret = unwind_frame(tsk, frame);
126 if (ret < 0)
127 break;
128 }
129}
130NOKPROBE_SYMBOL(walk_stackframe);
131
132#ifdef CONFIG_STACKTRACE
133struct stack_trace_data {
134 struct stack_trace *trace;
135 unsigned int no_sched_functions;
136 unsigned int skip;
137};
138
139static int save_trace(struct stackframe *frame, void *d)
140{
141 struct stack_trace_data *data = d;
142 struct stack_trace *trace = data->trace;
143 unsigned long addr = frame->pc;
144
145 if (data->no_sched_functions && in_sched_functions(addr))
146 return 0;
147 if (data->skip) {
148 data->skip--;
149 return 0;
150 }
151
152 trace->entries[trace->nr_entries++] = addr;
153
154 return trace->nr_entries >= trace->max_entries;
155}
156
157void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
158{
159 struct stack_trace_data data;
160 struct stackframe frame;
161
162 data.trace = trace;
163 data.skip = trace->skip;
164 data.no_sched_functions = 0;
165
166 start_backtrace(&frame, regs->regs[29], regs->pc);
167 walk_stackframe(current, &frame, save_trace, &data);
168}
169EXPORT_SYMBOL_GPL(save_stack_trace_regs);
170
171static noinline void __save_stack_trace(struct task_struct *tsk,
172 struct stack_trace *trace, unsigned int nosched)
173{
174 struct stack_trace_data data;
175 struct stackframe frame;
176
177 if (!try_get_task_stack(tsk))
178 return;
179
180 data.trace = trace;
181 data.skip = trace->skip;
182 data.no_sched_functions = nosched;
183
184 if (tsk != current) {
185 start_backtrace(&frame, thread_saved_fp(tsk),
186 thread_saved_pc(tsk));
187 } else {
188 /* We don't want this function nor the caller */
189 data.skip += 2;
190 start_backtrace(&frame,
191 (unsigned long)__builtin_frame_address(0),
192 (unsigned long)__save_stack_trace);
193 }
194
195 walk_stackframe(tsk, &frame, save_trace, &data);
196
197 put_task_stack(tsk);
198}
199EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
200
201void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
202{
203 __save_stack_trace(tsk, trace, 1);
204}
205
206void save_stack_trace(struct stack_trace *trace)
207{
208 __save_stack_trace(current, trace, 0);
209}
210
211EXPORT_SYMBOL_GPL(save_stack_trace);
212#endif