Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Stack tracing support
4 *
5 * Copyright (C) 2012 ARM Ltd.
6 */
7#include <linux/kernel.h>
8#include <linux/efi.h>
9#include <linux/export.h>
10#include <linux/ftrace.h>
11#include <linux/kprobes.h>
12#include <linux/sched.h>
13#include <linux/sched/debug.h>
14#include <linux/sched/task_stack.h>
15#include <linux/stacktrace.h>
16
17#include <asm/efi.h>
18#include <asm/irq.h>
19#include <asm/stack_pointer.h>
20#include <asm/stacktrace.h>
21
22/*
23 * Kernel unwind state
24 *
25 * @common: Common unwind state.
26 * @task: The task being unwound.
27 * @kr_cur: When KRETPROBES is selected, holds the kretprobe instance
28 * associated with the most recently encountered replacement lr
29 * value.
30 */
31struct kunwind_state {
32 struct unwind_state common;
33 struct task_struct *task;
34#ifdef CONFIG_KRETPROBES
35 struct llist_node *kr_cur;
36#endif
37};
38
39static __always_inline void
40kunwind_init(struct kunwind_state *state,
41 struct task_struct *task)
42{
43 unwind_init_common(&state->common);
44 state->task = task;
45}
46
47/*
48 * Start an unwind from a pt_regs.
49 *
50 * The unwind will begin at the PC within the regs.
51 *
52 * The regs must be on a stack currently owned by the calling task.
53 */
54static __always_inline void
55kunwind_init_from_regs(struct kunwind_state *state,
56 struct pt_regs *regs)
57{
58 kunwind_init(state, current);
59
60 state->common.fp = regs->regs[29];
61 state->common.pc = regs->pc;
62}
63
64/*
65 * Start an unwind from a caller.
66 *
67 * The unwind will begin at the caller of whichever function this is inlined
68 * into.
69 *
70 * The function which invokes this must be noinline.
71 */
72static __always_inline void
73kunwind_init_from_caller(struct kunwind_state *state)
74{
75 kunwind_init(state, current);
76
77 state->common.fp = (unsigned long)__builtin_frame_address(1);
78 state->common.pc = (unsigned long)__builtin_return_address(0);
79}
80
81/*
82 * Start an unwind from a blocked task.
83 *
84 * The unwind will begin at the blocked tasks saved PC (i.e. the caller of
85 * cpu_switch_to()).
86 *
87 * The caller should ensure the task is blocked in cpu_switch_to() for the
88 * duration of the unwind, or the unwind will be bogus. It is never valid to
89 * call this for the current task.
90 */
91static __always_inline void
92kunwind_init_from_task(struct kunwind_state *state,
93 struct task_struct *task)
94{
95 kunwind_init(state, task);
96
97 state->common.fp = thread_saved_fp(task);
98 state->common.pc = thread_saved_pc(task);
99}
100
101static __always_inline int
102kunwind_recover_return_address(struct kunwind_state *state)
103{
104#ifdef CONFIG_FUNCTION_GRAPH_TRACER
105 if (state->task->ret_stack &&
106 (state->common.pc == (unsigned long)return_to_handler)) {
107 unsigned long orig_pc;
108 orig_pc = ftrace_graph_ret_addr(state->task, NULL,
109 state->common.pc,
110 (void *)state->common.fp);
111 if (WARN_ON_ONCE(state->common.pc == orig_pc))
112 return -EINVAL;
113 state->common.pc = orig_pc;
114 }
115#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
116
117#ifdef CONFIG_KRETPROBES
118 if (is_kretprobe_trampoline(state->common.pc)) {
119 unsigned long orig_pc;
120 orig_pc = kretprobe_find_ret_addr(state->task,
121 (void *)state->common.fp,
122 &state->kr_cur);
123 state->common.pc = orig_pc;
124 }
125#endif /* CONFIG_KRETPROBES */
126
127 return 0;
128}
129
130/*
131 * Unwind from one frame record (A) to the next frame record (B).
132 *
133 * We terminate early if the location of B indicates a malformed chain of frame
134 * records (e.g. a cycle), determined based on the location and fp value of A
135 * and the location (but not the fp value) of B.
136 */
137static __always_inline int
138kunwind_next(struct kunwind_state *state)
139{
140 struct task_struct *tsk = state->task;
141 unsigned long fp = state->common.fp;
142 int err;
143
144 /* Final frame; nothing to unwind */
145 if (fp == (unsigned long)task_pt_regs(tsk)->stackframe)
146 return -ENOENT;
147
148 err = unwind_next_frame_record(&state->common);
149 if (err)
150 return err;
151
152 state->common.pc = ptrauth_strip_kernel_insn_pac(state->common.pc);
153
154 return kunwind_recover_return_address(state);
155}
156
157typedef bool (*kunwind_consume_fn)(const struct kunwind_state *state, void *cookie);
158
159static __always_inline void
160do_kunwind(struct kunwind_state *state, kunwind_consume_fn consume_state,
161 void *cookie)
162{
163 if (kunwind_recover_return_address(state))
164 return;
165
166 while (1) {
167 int ret;
168
169 if (!consume_state(state, cookie))
170 break;
171 ret = kunwind_next(state);
172 if (ret < 0)
173 break;
174 }
175}
176
177/*
178 * Per-cpu stacks are only accessible when unwinding the current task in a
179 * non-preemptible context.
180 */
181#define STACKINFO_CPU(name) \
182 ({ \
183 ((task == current) && !preemptible()) \
184 ? stackinfo_get_##name() \
185 : stackinfo_get_unknown(); \
186 })
187
188/*
189 * SDEI stacks are only accessible when unwinding the current task in an NMI
190 * context.
191 */
192#define STACKINFO_SDEI(name) \
193 ({ \
194 ((task == current) && in_nmi()) \
195 ? stackinfo_get_sdei_##name() \
196 : stackinfo_get_unknown(); \
197 })
198
199#define STACKINFO_EFI \
200 ({ \
201 ((task == current) && current_in_efi()) \
202 ? stackinfo_get_efi() \
203 : stackinfo_get_unknown(); \
204 })
205
206static __always_inline void
207kunwind_stack_walk(kunwind_consume_fn consume_state,
208 void *cookie, struct task_struct *task,
209 struct pt_regs *regs)
210{
211 struct stack_info stacks[] = {
212 stackinfo_get_task(task),
213 STACKINFO_CPU(irq),
214#if defined(CONFIG_VMAP_STACK)
215 STACKINFO_CPU(overflow),
216#endif
217#if defined(CONFIG_VMAP_STACK) && defined(CONFIG_ARM_SDE_INTERFACE)
218 STACKINFO_SDEI(normal),
219 STACKINFO_SDEI(critical),
220#endif
221#ifdef CONFIG_EFI
222 STACKINFO_EFI,
223#endif
224 };
225 struct kunwind_state state = {
226 .common = {
227 .stacks = stacks,
228 .nr_stacks = ARRAY_SIZE(stacks),
229 },
230 };
231
232 if (regs) {
233 if (task != current)
234 return;
235 kunwind_init_from_regs(&state, regs);
236 } else if (task == current) {
237 kunwind_init_from_caller(&state);
238 } else {
239 kunwind_init_from_task(&state, task);
240 }
241
242 do_kunwind(&state, consume_state, cookie);
243}
244
245struct kunwind_consume_entry_data {
246 stack_trace_consume_fn consume_entry;
247 void *cookie;
248};
249
250static __always_inline bool
251arch_kunwind_consume_entry(const struct kunwind_state *state, void *cookie)
252{
253 struct kunwind_consume_entry_data *data = cookie;
254 return data->consume_entry(data->cookie, state->common.pc);
255}
256
257noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry,
258 void *cookie, struct task_struct *task,
259 struct pt_regs *regs)
260{
261 struct kunwind_consume_entry_data data = {
262 .consume_entry = consume_entry,
263 .cookie = cookie,
264 };
265
266 kunwind_stack_walk(arch_kunwind_consume_entry, &data, task, regs);
267}
268
269static bool dump_backtrace_entry(void *arg, unsigned long where)
270{
271 char *loglvl = arg;
272 printk("%s %pSb\n", loglvl, (void *)where);
273 return true;
274}
275
276void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
277 const char *loglvl)
278{
279 pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
280
281 if (regs && user_mode(regs))
282 return;
283
284 if (!tsk)
285 tsk = current;
286
287 if (!try_get_task_stack(tsk))
288 return;
289
290 printk("%sCall trace:\n", loglvl);
291 arch_stack_walk(dump_backtrace_entry, (void *)loglvl, tsk, regs);
292
293 put_task_stack(tsk);
294}
295
296void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
297{
298 dump_backtrace(NULL, tsk, loglvl);
299 barrier();
300}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Stack tracing support
4 *
5 * Copyright (C) 2012 ARM Ltd.
6 */
7#include <linux/kernel.h>
8#include <linux/efi.h>
9#include <linux/export.h>
10#include <linux/ftrace.h>
11#include <linux/sched.h>
12#include <linux/sched/debug.h>
13#include <linux/sched/task_stack.h>
14#include <linux/stacktrace.h>
15
16#include <asm/efi.h>
17#include <asm/irq.h>
18#include <asm/stack_pointer.h>
19#include <asm/stacktrace.h>
20
21/*
22 * Start an unwind from a pt_regs.
23 *
24 * The unwind will begin at the PC within the regs.
25 *
26 * The regs must be on a stack currently owned by the calling task.
27 */
28static __always_inline void unwind_init_from_regs(struct unwind_state *state,
29 struct pt_regs *regs)
30{
31 unwind_init_common(state, current);
32
33 state->fp = regs->regs[29];
34 state->pc = regs->pc;
35}
36
37/*
38 * Start an unwind from a caller.
39 *
40 * The unwind will begin at the caller of whichever function this is inlined
41 * into.
42 *
43 * The function which invokes this must be noinline.
44 */
45static __always_inline void unwind_init_from_caller(struct unwind_state *state)
46{
47 unwind_init_common(state, current);
48
49 state->fp = (unsigned long)__builtin_frame_address(1);
50 state->pc = (unsigned long)__builtin_return_address(0);
51}
52
53/*
54 * Start an unwind from a blocked task.
55 *
56 * The unwind will begin at the blocked tasks saved PC (i.e. the caller of
57 * cpu_switch_to()).
58 *
59 * The caller should ensure the task is blocked in cpu_switch_to() for the
60 * duration of the unwind, or the unwind will be bogus. It is never valid to
61 * call this for the current task.
62 */
63static __always_inline void unwind_init_from_task(struct unwind_state *state,
64 struct task_struct *task)
65{
66 unwind_init_common(state, task);
67
68 state->fp = thread_saved_fp(task);
69 state->pc = thread_saved_pc(task);
70}
71
72/*
73 * Unwind from one frame record (A) to the next frame record (B).
74 *
75 * We terminate early if the location of B indicates a malformed chain of frame
76 * records (e.g. a cycle), determined based on the location and fp value of A
77 * and the location (but not the fp value) of B.
78 */
79static int notrace unwind_next(struct unwind_state *state)
80{
81 struct task_struct *tsk = state->task;
82 unsigned long fp = state->fp;
83 int err;
84
85 /* Final frame; nothing to unwind */
86 if (fp == (unsigned long)task_pt_regs(tsk)->stackframe)
87 return -ENOENT;
88
89 err = unwind_next_frame_record(state);
90 if (err)
91 return err;
92
93 state->pc = ptrauth_strip_insn_pac(state->pc);
94
95#ifdef CONFIG_FUNCTION_GRAPH_TRACER
96 if (tsk->ret_stack &&
97 (state->pc == (unsigned long)return_to_handler)) {
98 unsigned long orig_pc;
99 /*
100 * This is a case where function graph tracer has
101 * modified a return address (LR) in a stack frame
102 * to hook a function return.
103 * So replace it to an original value.
104 */
105 orig_pc = ftrace_graph_ret_addr(tsk, NULL, state->pc,
106 (void *)state->fp);
107 if (WARN_ON_ONCE(state->pc == orig_pc))
108 return -EINVAL;
109 state->pc = orig_pc;
110 }
111#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
112#ifdef CONFIG_KRETPROBES
113 if (is_kretprobe_trampoline(state->pc))
114 state->pc = kretprobe_find_ret_addr(tsk, (void *)state->fp, &state->kr_cur);
115#endif
116
117 return 0;
118}
119NOKPROBE_SYMBOL(unwind_next);
120
121static void notrace unwind(struct unwind_state *state,
122 stack_trace_consume_fn consume_entry, void *cookie)
123{
124 while (1) {
125 int ret;
126
127 if (!consume_entry(cookie, state->pc))
128 break;
129 ret = unwind_next(state);
130 if (ret < 0)
131 break;
132 }
133}
134NOKPROBE_SYMBOL(unwind);
135
136static bool dump_backtrace_entry(void *arg, unsigned long where)
137{
138 char *loglvl = arg;
139 printk("%s %pSb\n", loglvl, (void *)where);
140 return true;
141}
142
143void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
144 const char *loglvl)
145{
146 pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
147
148 if (regs && user_mode(regs))
149 return;
150
151 if (!tsk)
152 tsk = current;
153
154 if (!try_get_task_stack(tsk))
155 return;
156
157 printk("%sCall trace:\n", loglvl);
158 arch_stack_walk(dump_backtrace_entry, (void *)loglvl, tsk, regs);
159
160 put_task_stack(tsk);
161}
162
163void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
164{
165 dump_backtrace(NULL, tsk, loglvl);
166 barrier();
167}
168
169/*
170 * Per-cpu stacks are only accessible when unwinding the current task in a
171 * non-preemptible context.
172 */
173#define STACKINFO_CPU(name) \
174 ({ \
175 ((task == current) && !preemptible()) \
176 ? stackinfo_get_##name() \
177 : stackinfo_get_unknown(); \
178 })
179
180/*
181 * SDEI stacks are only accessible when unwinding the current task in an NMI
182 * context.
183 */
184#define STACKINFO_SDEI(name) \
185 ({ \
186 ((task == current) && in_nmi()) \
187 ? stackinfo_get_sdei_##name() \
188 : stackinfo_get_unknown(); \
189 })
190
191#define STACKINFO_EFI \
192 ({ \
193 ((task == current) && current_in_efi()) \
194 ? stackinfo_get_efi() \
195 : stackinfo_get_unknown(); \
196 })
197
198noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry,
199 void *cookie, struct task_struct *task,
200 struct pt_regs *regs)
201{
202 struct stack_info stacks[] = {
203 stackinfo_get_task(task),
204 STACKINFO_CPU(irq),
205#if defined(CONFIG_VMAP_STACK)
206 STACKINFO_CPU(overflow),
207#endif
208#if defined(CONFIG_VMAP_STACK) && defined(CONFIG_ARM_SDE_INTERFACE)
209 STACKINFO_SDEI(normal),
210 STACKINFO_SDEI(critical),
211#endif
212#ifdef CONFIG_EFI
213 STACKINFO_EFI,
214#endif
215 };
216 struct unwind_state state = {
217 .stacks = stacks,
218 .nr_stacks = ARRAY_SIZE(stacks),
219 };
220
221 if (regs) {
222 if (task != current)
223 return;
224 unwind_init_from_regs(&state, regs);
225 } else if (task == current) {
226 unwind_init_from_caller(&state);
227 } else {
228 unwind_init_from_task(&state, task);
229 }
230
231 unwind(&state, consume_entry, cookie);
232}