Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Stack tracing support
  4 *
  5 * Copyright (C) 2012 ARM Ltd.
  6 */
  7#include <linux/kernel.h>
  8#include <linux/efi.h>
  9#include <linux/export.h>
 10#include <linux/ftrace.h>
 11#include <linux/kprobes.h>
 12#include <linux/sched.h>
 13#include <linux/sched/debug.h>
 14#include <linux/sched/task_stack.h>
 15#include <linux/stacktrace.h>
 16
 17#include <asm/efi.h>
 18#include <asm/irq.h>
 
 19#include <asm/stack_pointer.h>
 20#include <asm/stacktrace.h>
 21
 22/*
 23 * Kernel unwind state
 24 *
 25 * @common:      Common unwind state.
 26 * @task:        The task being unwound.
 27 * @kr_cur:      When KRETPROBES is selected, holds the kretprobe instance
 28 *               associated with the most recently encountered replacement lr
 29 *               value.
 
 
 
 
 30 */
 31struct kunwind_state {
 32	struct unwind_state common;
 33	struct task_struct *task;
 34#ifdef CONFIG_KRETPROBES
 35	struct llist_node *kr_cur;
 36#endif
 37};
 38
 39static __always_inline void
 40kunwind_init(struct kunwind_state *state,
 41	     struct task_struct *task)
 42{
 43	unwind_init_common(&state->common);
 44	state->task = task;
 45}
 46
 47/*
 48 * Start an unwind from a pt_regs.
 49 *
 50 * The unwind will begin at the PC within the regs.
 51 *
 52 * The regs must be on a stack currently owned by the calling task.
 
 
 53 */
 54static __always_inline void
 55kunwind_init_from_regs(struct kunwind_state *state,
 56		       struct pt_regs *regs)
 57{
 58	kunwind_init(state, current);
 
 59
 60	state->common.fp = regs->regs[29];
 61	state->common.pc = regs->pc;
 62}
 63
 64/*
 65 * Start an unwind from a caller.
 66 *
 67 * The unwind will begin at the caller of whichever function this is inlined
 68 * into.
 69 *
 70 * The function which invokes this must be noinline.
 71 */
 72static __always_inline void
 73kunwind_init_from_caller(struct kunwind_state *state)
 74{
 75	kunwind_init(state, current);
 76
 77	state->common.fp = (unsigned long)__builtin_frame_address(1);
 78	state->common.pc = (unsigned long)__builtin_return_address(0);
 79}
 80
 81/*
 82 * Start an unwind from a blocked task.
 83 *
 84 * The unwind will begin at the blocked tasks saved PC (i.e. the caller of
 85 * cpu_switch_to()).
 86 *
 87 * The caller should ensure the task is blocked in cpu_switch_to() for the
 88 * duration of the unwind, or the unwind will be bogus. It is never valid to
 89 * call this for the current task.
 90 */
 91static __always_inline void
 92kunwind_init_from_task(struct kunwind_state *state,
 93		       struct task_struct *task)
 94{
 95	kunwind_init(state, task);
 96
 97	state->common.fp = thread_saved_fp(task);
 98	state->common.pc = thread_saved_pc(task);
 99}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
100
101static __always_inline int
102kunwind_recover_return_address(struct kunwind_state *state)
103{
104#ifdef CONFIG_FUNCTION_GRAPH_TRACER
105	if (state->task->ret_stack &&
106	    (state->common.pc == (unsigned long)return_to_handler)) {
107		unsigned long orig_pc;
108		orig_pc = ftrace_graph_ret_addr(state->task, NULL,
109						state->common.pc,
110						(void *)state->common.fp);
111		if (WARN_ON_ONCE(state->common.pc == orig_pc))
 
 
 
 
112			return -EINVAL;
113		state->common.pc = orig_pc;
114	}
115#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
116
117#ifdef CONFIG_KRETPROBES
118	if (is_kretprobe_trampoline(state->common.pc)) {
119		unsigned long orig_pc;
120		orig_pc = kretprobe_find_ret_addr(state->task,
121						  (void *)state->common.fp,
122						  &state->kr_cur);
123		state->common.pc = orig_pc;
124	}
125#endif /* CONFIG_KRETPROBES */
126
127	return 0;
128}
 
 
 
 
 
 
129
130/*
131 * Unwind from one frame record (A) to the next frame record (B).
132 *
133 * We terminate early if the location of B indicates a malformed chain of frame
134 * records (e.g. a cycle), determined based on the location and fp value of A
135 * and the location (but not the fp value) of B.
136 */
137static __always_inline int
138kunwind_next(struct kunwind_state *state)
139{
140	struct task_struct *tsk = state->task;
141	unsigned long fp = state->common.fp;
142	int err;
143
144	/* Final frame; nothing to unwind */
145	if (fp == (unsigned long)task_pt_regs(tsk)->stackframe)
146		return -ENOENT;
147
148	err = unwind_next_frame_record(&state->common);
149	if (err)
150		return err;
151
152	state->common.pc = ptrauth_strip_kernel_insn_pac(state->common.pc);
153
154	return kunwind_recover_return_address(state);
155}
 
156
157typedef bool (*kunwind_consume_fn)(const struct kunwind_state *state, void *cookie);
158
159static __always_inline void
160do_kunwind(struct kunwind_state *state, kunwind_consume_fn consume_state,
161	   void *cookie)
162{
163	if (kunwind_recover_return_address(state))
164		return;
165
166	while (1) {
167		int ret;
168
169		if (!consume_state(state, cookie))
170			break;
171		ret = kunwind_next(state);
172		if (ret < 0)
173			break;
174	}
175}
 
176
177/*
178 * Per-cpu stacks are only accessible when unwinding the current task in a
179 * non-preemptible context.
180 */
181#define STACKINFO_CPU(name)					\
182	({							\
183		((task == current) && !preemptible())		\
184			? stackinfo_get_##name()		\
185			: stackinfo_get_unknown();		\
186	})
187
188/*
189 * SDEI stacks are only accessible when unwinding the current task in an NMI
190 * context.
191 */
192#define STACKINFO_SDEI(name)					\
193	({							\
194		((task == current) && in_nmi())			\
195			? stackinfo_get_sdei_##name()		\
196			: stackinfo_get_unknown();		\
197	})
198
199#define STACKINFO_EFI						\
200	({							\
201		((task == current) && current_in_efi())		\
202			? stackinfo_get_efi()			\
203			: stackinfo_get_unknown();		\
204	})
205
206static __always_inline void
207kunwind_stack_walk(kunwind_consume_fn consume_state,
208		   void *cookie, struct task_struct *task,
209		   struct pt_regs *regs)
210{
211	struct stack_info stacks[] = {
212		stackinfo_get_task(task),
213		STACKINFO_CPU(irq),
214#if defined(CONFIG_VMAP_STACK)
215		STACKINFO_CPU(overflow),
216#endif
217#if defined(CONFIG_VMAP_STACK) && defined(CONFIG_ARM_SDE_INTERFACE)
218		STACKINFO_SDEI(normal),
219		STACKINFO_SDEI(critical),
220#endif
221#ifdef CONFIG_EFI
222		STACKINFO_EFI,
223#endif
224	};
225	struct kunwind_state state = {
226		.common = {
227			.stacks = stacks,
228			.nr_stacks = ARRAY_SIZE(stacks),
229		},
230	};
231
232	if (regs) {
233		if (task != current)
234			return;
235		kunwind_init_from_regs(&state, regs);
236	} else if (task == current) {
237		kunwind_init_from_caller(&state);
238	} else {
239		kunwind_init_from_task(&state, task);
240	}
241
242	do_kunwind(&state, consume_state, cookie);
243}
244
245struct kunwind_consume_entry_data {
246	stack_trace_consume_fn consume_entry;
247	void *cookie;
248};
249
250static __always_inline bool
251arch_kunwind_consume_entry(const struct kunwind_state *state, void *cookie)
252{
253	struct kunwind_consume_entry_data *data = cookie;
254	return data->consume_entry(data->cookie, state->common.pc);
255}
256
257noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry,
258			      void *cookie, struct task_struct *task,
259			      struct pt_regs *regs)
260{
261	struct kunwind_consume_entry_data data = {
262		.consume_entry = consume_entry,
263		.cookie = cookie,
264	};
265
266	kunwind_stack_walk(arch_kunwind_consume_entry, &data, task, regs);
267}
 
268
269static bool dump_backtrace_entry(void *arg, unsigned long where)
270{
271	char *loglvl = arg;
272	printk("%s %pSb\n", loglvl, (void *)where);
273	return true;
274}
 
275
276void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
277		    const char *loglvl)
278{
279	pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
280
281	if (regs && user_mode(regs))
282		return;
283
284	if (!tsk)
285		tsk = current;
286
287	if (!try_get_task_stack(tsk))
288		return;
289
290	printk("%sCall trace:\n", loglvl);
291	arch_stack_walk(dump_backtrace_entry, (void *)loglvl, tsk, regs);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
292
293	put_task_stack(tsk);
294}
295
296void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
297{
298	dump_backtrace(NULL, tsk, loglvl);
299	barrier();
300}
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Stack tracing support
  4 *
  5 * Copyright (C) 2012 ARM Ltd.
  6 */
  7#include <linux/kernel.h>
 
  8#include <linux/export.h>
  9#include <linux/ftrace.h>
 10#include <linux/kprobes.h>
 11#include <linux/sched.h>
 12#include <linux/sched/debug.h>
 13#include <linux/sched/task_stack.h>
 14#include <linux/stacktrace.h>
 15
 
 16#include <asm/irq.h>
 17#include <asm/pointer_auth.h>
 18#include <asm/stack_pointer.h>
 19#include <asm/stacktrace.h>
 20
 21/*
 22 * AArch64 PCS assigns the frame pointer to x29.
 23 *
 24 * A simple function prologue looks like this:
 25 * 	sub	sp, sp, #0x10
 26 *   	stp	x29, x30, [sp]
 27 *	mov	x29, sp
 28 *
 29 * A simple function epilogue looks like this:
 30 *	mov	sp, x29
 31 *	ldp	x29, x30, [sp]
 32 *	add	sp, sp, #0x10
 33 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 34
 35/*
 36 * Unwind from one frame record (A) to the next frame record (B).
 
 
 37 *
 38 * We terminate early if the location of B indicates a malformed chain of frame
 39 * records (e.g. a cycle), determined based on the location and fp value of A
 40 * and the location (but not the fp value) of B.
 41 */
 42int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
 
 
 43{
 44	unsigned long fp = frame->fp;
 45	struct stack_info info;
 46
 47	if (fp & 0xf)
 48		return -EINVAL;
 
 49
 50	if (!tsk)
 51		tsk = current;
 
 
 
 
 
 
 
 
 
 
 52
 53	if (!on_accessible_stack(tsk, fp, &info))
 54		return -EINVAL;
 
 55
 56	if (test_bit(info.type, frame->stacks_done))
 57		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 58
 59	/*
 60	 * As stacks grow downward, any valid record on the same stack must be
 61	 * at a strictly higher address than the prior record.
 62	 *
 63	 * Stacks can nest in several valid orders, e.g.
 64	 *
 65	 * TASK -> IRQ -> OVERFLOW -> SDEI_NORMAL
 66	 * TASK -> SDEI_NORMAL -> SDEI_CRITICAL -> OVERFLOW
 67	 *
 68	 * ... but the nesting itself is strict. Once we transition from one
 69	 * stack to another, it's never valid to unwind back to that first
 70	 * stack.
 71	 */
 72	if (info.type == frame->prev_type) {
 73		if (fp <= frame->prev_fp)
 74			return -EINVAL;
 75	} else {
 76		set_bit(frame->prev_type, frame->stacks_done);
 77	}
 78
 79	/*
 80	 * Record this frame record's values and location. The prev_fp and
 81	 * prev_type are only meaningful to the next unwind_frame() invocation.
 82	 */
 83	frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp));
 84	frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp + 8));
 85	frame->prev_fp = fp;
 86	frame->prev_type = info.type;
 87
 
 
 
 88#ifdef CONFIG_FUNCTION_GRAPH_TRACER
 89	if (tsk->ret_stack &&
 90		(ptrauth_strip_insn_pac(frame->pc) == (unsigned long)return_to_handler)) {
 91		struct ftrace_ret_stack *ret_stack;
 92		/*
 93		 * This is a case where function graph tracer has
 94		 * modified a return address (LR) in a stack frame
 95		 * to hook a function return.
 96		 * So replace it to an original value.
 97		 */
 98		ret_stack = ftrace_graph_get_ret_stack(tsk, frame->graph++);
 99		if (WARN_ON_ONCE(!ret_stack))
100			return -EINVAL;
101		frame->pc = ret_stack->ret;
102	}
103#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
104
105	frame->pc = ptrauth_strip_insn_pac(frame->pc);
 
 
 
 
 
 
 
 
106
107	/*
108	 * Frames created upon entry from EL0 have NULL FP and PC values, so
109	 * don't bother reporting these. Frames created by __noreturn functions
110	 * might have a valid FP even if PC is bogus, so only terminate where
111	 * both are NULL.
112	 */
113	if (!frame->fp && !frame->pc)
114		return -EINVAL;
115
116	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
117}
118NOKPROBE_SYMBOL(unwind_frame);
119
120void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
121		     int (*fn)(struct stackframe *, void *), void *data)
 
 
 
122{
 
 
 
123	while (1) {
124		int ret;
125
126		if (fn(frame, data))
127			break;
128		ret = unwind_frame(tsk, frame);
129		if (ret < 0)
130			break;
131	}
132}
133NOKPROBE_SYMBOL(walk_stackframe);
134
135#ifdef CONFIG_STACKTRACE
136struct stack_trace_data {
137	struct stack_trace *trace;
138	unsigned int no_sched_functions;
139	unsigned int skip;
140};
 
 
 
 
141
142static int save_trace(struct stackframe *frame, void *d)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
143{
144	struct stack_trace_data *data = d;
145	struct stack_trace *trace = data->trace;
146	unsigned long addr = frame->pc;
147
148	if (data->no_sched_functions && in_sched_functions(addr))
149		return 0;
150	if (data->skip) {
151		data->skip--;
152		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
153	}
154
155	trace->entries[trace->nr_entries++] = addr;
 
 
 
 
 
 
156
157	return trace->nr_entries >= trace->max_entries;
 
 
 
 
158}
159
160void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
 
 
161{
162	struct stack_trace_data data;
163	struct stackframe frame;
 
 
164
165	data.trace = trace;
166	data.skip = trace->skip;
167	data.no_sched_functions = 0;
168
169	start_backtrace(&frame, regs->regs[29], regs->pc);
170	walk_stackframe(current, &frame, save_trace, &data);
 
 
 
171}
172EXPORT_SYMBOL_GPL(save_stack_trace_regs);
173
174static noinline void __save_stack_trace(struct task_struct *tsk,
175	struct stack_trace *trace, unsigned int nosched)
176{
177	struct stack_trace_data data;
178	struct stackframe frame;
 
 
 
 
 
179
180	if (!try_get_task_stack(tsk))
181		return;
182
183	data.trace = trace;
184	data.skip = trace->skip;
185	data.no_sched_functions = nosched;
186
187	if (tsk != current) {
188		start_backtrace(&frame, thread_saved_fp(tsk),
189				thread_saved_pc(tsk));
190	} else {
191		/* We don't want this function nor the caller */
192		data.skip += 2;
193		start_backtrace(&frame,
194				(unsigned long)__builtin_frame_address(0),
195				(unsigned long)__save_stack_trace);
196	}
197
198	walk_stackframe(tsk, &frame, save_trace, &data);
199
200	put_task_stack(tsk);
201}
202
203void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
204{
205	__save_stack_trace(tsk, trace, 1);
 
206}
207EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
208
209void save_stack_trace(struct stack_trace *trace)
210{
211	__save_stack_trace(current, trace, 0);
212}
213
214EXPORT_SYMBOL_GPL(save_stack_trace);
215#endif