Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Stack tracing support
4 *
5 * Copyright (C) 2012 ARM Ltd.
6 */
7#include <linux/kernel.h>
8#include <linux/export.h>
9#include <linux/ftrace.h>
10#include <linux/kprobes.h>
11#include <linux/sched.h>
12#include <linux/sched/debug.h>
13#include <linux/sched/task_stack.h>
14#include <linux/stacktrace.h>
15
16#include <asm/irq.h>
17#include <asm/stack_pointer.h>
18#include <asm/stacktrace.h>
19
20/*
21 * AArch64 PCS assigns the frame pointer to x29.
22 *
23 * A simple function prologue looks like this:
24 * sub sp, sp, #0x10
25 * stp x29, x30, [sp]
26 * mov x29, sp
27 *
28 * A simple function epilogue looks like this:
29 * mov sp, x29
30 * ldp x29, x30, [sp]
31 * add sp, sp, #0x10
32 */
33
34/*
35 * Unwind from one frame record (A) to the next frame record (B).
36 *
37 * We terminate early if the location of B indicates a malformed chain of frame
38 * records (e.g. a cycle), determined based on the location and fp value of A
39 * and the location (but not the fp value) of B.
40 */
41int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
42{
43 unsigned long fp = frame->fp;
44 struct stack_info info;
45
46 if (fp & 0xf)
47 return -EINVAL;
48
49 if (!tsk)
50 tsk = current;
51
52 if (!on_accessible_stack(tsk, fp, &info))
53 return -EINVAL;
54
55 if (test_bit(info.type, frame->stacks_done))
56 return -EINVAL;
57
58 /*
59 * As stacks grow downward, any valid record on the same stack must be
60 * at a strictly higher address than the prior record.
61 *
62 * Stacks can nest in several valid orders, e.g.
63 *
64 * TASK -> IRQ -> OVERFLOW -> SDEI_NORMAL
65 * TASK -> SDEI_NORMAL -> SDEI_CRITICAL -> OVERFLOW
66 *
67 * ... but the nesting itself is strict. Once we transition from one
68 * stack to another, it's never valid to unwind back to that first
69 * stack.
70 */
71 if (info.type == frame->prev_type) {
72 if (fp <= frame->prev_fp)
73 return -EINVAL;
74 } else {
75 set_bit(frame->prev_type, frame->stacks_done);
76 }
77
78 /*
79 * Record this frame record's values and location. The prev_fp and
80 * prev_type are only meaningful to the next unwind_frame() invocation.
81 */
82 frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp));
83 frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp + 8));
84 frame->prev_fp = fp;
85 frame->prev_type = info.type;
86
87#ifdef CONFIG_FUNCTION_GRAPH_TRACER
88 if (tsk->ret_stack &&
89 (frame->pc == (unsigned long)return_to_handler)) {
90 struct ftrace_ret_stack *ret_stack;
91 /*
92 * This is a case where function graph tracer has
93 * modified a return address (LR) in a stack frame
94 * to hook a function return.
95 * So replace it to an original value.
96 */
97 ret_stack = ftrace_graph_get_ret_stack(tsk, frame->graph++);
98 if (WARN_ON_ONCE(!ret_stack))
99 return -EINVAL;
100 frame->pc = ret_stack->ret;
101 }
102#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
103
104 /*
105 * Frames created upon entry from EL0 have NULL FP and PC values, so
106 * don't bother reporting these. Frames created by __noreturn functions
107 * might have a valid FP even if PC is bogus, so only terminate where
108 * both are NULL.
109 */
110 if (!frame->fp && !frame->pc)
111 return -EINVAL;
112
113 return 0;
114}
115NOKPROBE_SYMBOL(unwind_frame);
116
117void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
118 int (*fn)(struct stackframe *, void *), void *data)
119{
120 while (1) {
121 int ret;
122
123 if (fn(frame, data))
124 break;
125 ret = unwind_frame(tsk, frame);
126 if (ret < 0)
127 break;
128 }
129}
130NOKPROBE_SYMBOL(walk_stackframe);
131
132#ifdef CONFIG_STACKTRACE
133struct stack_trace_data {
134 struct stack_trace *trace;
135 unsigned int no_sched_functions;
136 unsigned int skip;
137};
138
139static int save_trace(struct stackframe *frame, void *d)
140{
141 struct stack_trace_data *data = d;
142 struct stack_trace *trace = data->trace;
143 unsigned long addr = frame->pc;
144
145 if (data->no_sched_functions && in_sched_functions(addr))
146 return 0;
147 if (data->skip) {
148 data->skip--;
149 return 0;
150 }
151
152 trace->entries[trace->nr_entries++] = addr;
153
154 return trace->nr_entries >= trace->max_entries;
155}
156
157void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
158{
159 struct stack_trace_data data;
160 struct stackframe frame;
161
162 data.trace = trace;
163 data.skip = trace->skip;
164 data.no_sched_functions = 0;
165
166 start_backtrace(&frame, regs->regs[29], regs->pc);
167 walk_stackframe(current, &frame, save_trace, &data);
168}
169EXPORT_SYMBOL_GPL(save_stack_trace_regs);
170
171static noinline void __save_stack_trace(struct task_struct *tsk,
172 struct stack_trace *trace, unsigned int nosched)
173{
174 struct stack_trace_data data;
175 struct stackframe frame;
176
177 if (!try_get_task_stack(tsk))
178 return;
179
180 data.trace = trace;
181 data.skip = trace->skip;
182 data.no_sched_functions = nosched;
183
184 if (tsk != current) {
185 start_backtrace(&frame, thread_saved_fp(tsk),
186 thread_saved_pc(tsk));
187 } else {
188 /* We don't want this function nor the caller */
189 data.skip += 2;
190 start_backtrace(&frame,
191 (unsigned long)__builtin_frame_address(0),
192 (unsigned long)__save_stack_trace);
193 }
194
195 walk_stackframe(tsk, &frame, save_trace, &data);
196
197 put_task_stack(tsk);
198}
199EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
200
201void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
202{
203 __save_stack_trace(tsk, trace, 1);
204}
205
206void save_stack_trace(struct stack_trace *trace)
207{
208 __save_stack_trace(current, trace, 0);
209}
210
211EXPORT_SYMBOL_GPL(save_stack_trace);
212#endif
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Stack tracing support
4 *
5 * Copyright (C) 2012 ARM Ltd.
6 */
7#include <linux/kernel.h>
8#include <linux/efi.h>
9#include <linux/export.h>
10#include <linux/ftrace.h>
11#include <linux/sched.h>
12#include <linux/sched/debug.h>
13#include <linux/sched/task_stack.h>
14#include <linux/stacktrace.h>
15
16#include <asm/efi.h>
17#include <asm/irq.h>
18#include <asm/stack_pointer.h>
19#include <asm/stacktrace.h>
20
21/*
22 * Start an unwind from a pt_regs.
23 *
24 * The unwind will begin at the PC within the regs.
25 *
26 * The regs must be on a stack currently owned by the calling task.
27 */
28static __always_inline void unwind_init_from_regs(struct unwind_state *state,
29 struct pt_regs *regs)
30{
31 unwind_init_common(state, current);
32
33 state->fp = regs->regs[29];
34 state->pc = regs->pc;
35}
36
37/*
38 * Start an unwind from a caller.
39 *
40 * The unwind will begin at the caller of whichever function this is inlined
41 * into.
42 *
43 * The function which invokes this must be noinline.
44 */
45static __always_inline void unwind_init_from_caller(struct unwind_state *state)
46{
47 unwind_init_common(state, current);
48
49 state->fp = (unsigned long)__builtin_frame_address(1);
50 state->pc = (unsigned long)__builtin_return_address(0);
51}
52
53/*
54 * Start an unwind from a blocked task.
55 *
56 * The unwind will begin at the blocked tasks saved PC (i.e. the caller of
57 * cpu_switch_to()).
58 *
59 * The caller should ensure the task is blocked in cpu_switch_to() for the
60 * duration of the unwind, or the unwind will be bogus. It is never valid to
61 * call this for the current task.
62 */
63static __always_inline void unwind_init_from_task(struct unwind_state *state,
64 struct task_struct *task)
65{
66 unwind_init_common(state, task);
67
68 state->fp = thread_saved_fp(task);
69 state->pc = thread_saved_pc(task);
70}
71
72/*
73 * Unwind from one frame record (A) to the next frame record (B).
74 *
75 * We terminate early if the location of B indicates a malformed chain of frame
76 * records (e.g. a cycle), determined based on the location and fp value of A
77 * and the location (but not the fp value) of B.
78 */
79static int notrace unwind_next(struct unwind_state *state)
80{
81 struct task_struct *tsk = state->task;
82 unsigned long fp = state->fp;
83 int err;
84
85 /* Final frame; nothing to unwind */
86 if (fp == (unsigned long)task_pt_regs(tsk)->stackframe)
87 return -ENOENT;
88
89 err = unwind_next_frame_record(state);
90 if (err)
91 return err;
92
93 state->pc = ptrauth_strip_insn_pac(state->pc);
94
95#ifdef CONFIG_FUNCTION_GRAPH_TRACER
96 if (tsk->ret_stack &&
97 (state->pc == (unsigned long)return_to_handler)) {
98 unsigned long orig_pc;
99 /*
100 * This is a case where function graph tracer has
101 * modified a return address (LR) in a stack frame
102 * to hook a function return.
103 * So replace it to an original value.
104 */
105 orig_pc = ftrace_graph_ret_addr(tsk, NULL, state->pc,
106 (void *)state->fp);
107 if (WARN_ON_ONCE(state->pc == orig_pc))
108 return -EINVAL;
109 state->pc = orig_pc;
110 }
111#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
112#ifdef CONFIG_KRETPROBES
113 if (is_kretprobe_trampoline(state->pc))
114 state->pc = kretprobe_find_ret_addr(tsk, (void *)state->fp, &state->kr_cur);
115#endif
116
117 return 0;
118}
119NOKPROBE_SYMBOL(unwind_next);
120
121static void notrace unwind(struct unwind_state *state,
122 stack_trace_consume_fn consume_entry, void *cookie)
123{
124 while (1) {
125 int ret;
126
127 if (!consume_entry(cookie, state->pc))
128 break;
129 ret = unwind_next(state);
130 if (ret < 0)
131 break;
132 }
133}
134NOKPROBE_SYMBOL(unwind);
135
136static bool dump_backtrace_entry(void *arg, unsigned long where)
137{
138 char *loglvl = arg;
139 printk("%s %pSb\n", loglvl, (void *)where);
140 return true;
141}
142
143void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
144 const char *loglvl)
145{
146 pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
147
148 if (regs && user_mode(regs))
149 return;
150
151 if (!tsk)
152 tsk = current;
153
154 if (!try_get_task_stack(tsk))
155 return;
156
157 printk("%sCall trace:\n", loglvl);
158 arch_stack_walk(dump_backtrace_entry, (void *)loglvl, tsk, regs);
159
160 put_task_stack(tsk);
161}
162
163void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
164{
165 dump_backtrace(NULL, tsk, loglvl);
166 barrier();
167}
168
169/*
170 * Per-cpu stacks are only accessible when unwinding the current task in a
171 * non-preemptible context.
172 */
173#define STACKINFO_CPU(name) \
174 ({ \
175 ((task == current) && !preemptible()) \
176 ? stackinfo_get_##name() \
177 : stackinfo_get_unknown(); \
178 })
179
180/*
181 * SDEI stacks are only accessible when unwinding the current task in an NMI
182 * context.
183 */
184#define STACKINFO_SDEI(name) \
185 ({ \
186 ((task == current) && in_nmi()) \
187 ? stackinfo_get_sdei_##name() \
188 : stackinfo_get_unknown(); \
189 })
190
191#define STACKINFO_EFI \
192 ({ \
193 ((task == current) && current_in_efi()) \
194 ? stackinfo_get_efi() \
195 : stackinfo_get_unknown(); \
196 })
197
198noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry,
199 void *cookie, struct task_struct *task,
200 struct pt_regs *regs)
201{
202 struct stack_info stacks[] = {
203 stackinfo_get_task(task),
204 STACKINFO_CPU(irq),
205#if defined(CONFIG_VMAP_STACK)
206 STACKINFO_CPU(overflow),
207#endif
208#if defined(CONFIG_VMAP_STACK) && defined(CONFIG_ARM_SDE_INTERFACE)
209 STACKINFO_SDEI(normal),
210 STACKINFO_SDEI(critical),
211#endif
212#ifdef CONFIG_EFI
213 STACKINFO_EFI,
214#endif
215 };
216 struct unwind_state state = {
217 .stacks = stacks,
218 .nr_stacks = ARRAY_SIZE(stacks),
219 };
220
221 if (regs) {
222 if (task != current)
223 return;
224 unwind_init_from_regs(&state, regs);
225 } else if (task == current) {
226 unwind_init_from_caller(&state);
227 } else {
228 unwind_init_from_task(&state, task);
229 }
230
231 unwind(&state, consume_entry, cookie);
232}