Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2008 ARM Limited
4 * Copyright (C) 2014 Regents of the University of California
5 */
6
7#include <linux/export.h>
8#include <linux/kallsyms.h>
9#include <linux/sched.h>
10#include <linux/sched/debug.h>
11#include <linux/sched/task_stack.h>
12#include <linux/stacktrace.h>
13#include <linux/ftrace.h>
14
15#ifdef CONFIG_FRAME_POINTER
16
17struct stackframe {
18 unsigned long fp;
19 unsigned long ra;
20};
21
22void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
23 bool (*fn)(unsigned long, void *), void *arg)
24{
25 unsigned long fp, sp, pc;
26
27 if (regs) {
28 fp = frame_pointer(regs);
29 sp = user_stack_pointer(regs);
30 pc = instruction_pointer(regs);
31 } else if (task == NULL || task == current) {
32 const register unsigned long current_sp __asm__ ("sp");
33 fp = (unsigned long)__builtin_frame_address(0);
34 sp = current_sp;
35 pc = (unsigned long)walk_stackframe;
36 } else {
37 /* task blocked in __switch_to */
38 fp = task->thread.s[0];
39 sp = task->thread.sp;
40 pc = task->thread.ra;
41 }
42
43 for (;;) {
44 unsigned long low, high;
45 struct stackframe *frame;
46
47 if (unlikely(!__kernel_text_address(pc) || fn(pc, arg)))
48 break;
49
50 /* Validate frame pointer */
51 low = sp + sizeof(struct stackframe);
52 high = ALIGN(sp, THREAD_SIZE);
53 if (unlikely(fp < low || fp > high || fp & 0x7))
54 break;
55 /* Unwind stack frame */
56 frame = (struct stackframe *)fp - 1;
57 sp = fp;
58 fp = frame->fp;
59 pc = ftrace_graph_ret_addr(current, NULL, frame->ra,
60 (unsigned long *)(fp - 8));
61 }
62}
63
64#else /* !CONFIG_FRAME_POINTER */
65
66static void notrace walk_stackframe(struct task_struct *task,
67 struct pt_regs *regs, bool (*fn)(unsigned long, void *), void *arg)
68{
69 unsigned long sp, pc;
70 unsigned long *ksp;
71
72 if (regs) {
73 sp = user_stack_pointer(regs);
74 pc = instruction_pointer(regs);
75 } else if (task == NULL || task == current) {
76 const register unsigned long current_sp __asm__ ("sp");
77 sp = current_sp;
78 pc = (unsigned long)walk_stackframe;
79 } else {
80 /* task blocked in __switch_to */
81 sp = task->thread.sp;
82 pc = task->thread.ra;
83 }
84
85 if (unlikely(sp & 0x7))
86 return;
87
88 ksp = (unsigned long *)sp;
89 while (!kstack_end(ksp)) {
90 if (__kernel_text_address(pc) && unlikely(fn(pc, arg)))
91 break;
92 pc = (*ksp++) - 0x4;
93 }
94}
95
96#endif /* CONFIG_FRAME_POINTER */
97
98
99static bool print_trace_address(unsigned long pc, void *arg)
100{
101 print_ip_sym(pc);
102 return false;
103}
104
105void show_stack(struct task_struct *task, unsigned long *sp)
106{
107 pr_cont("Call Trace:\n");
108 walk_stackframe(task, NULL, print_trace_address, NULL);
109}
110
111
112static bool save_wchan(unsigned long pc, void *arg)
113{
114 if (!in_sched_functions(pc)) {
115 unsigned long *p = arg;
116 *p = pc;
117 return true;
118 }
119 return false;
120}
121
122unsigned long get_wchan(struct task_struct *task)
123{
124 unsigned long pc = 0;
125
126 if (likely(task && task != current && task->state != TASK_RUNNING))
127 walk_stackframe(task, NULL, save_wchan, &pc);
128 return pc;
129}
130
131
132#ifdef CONFIG_STACKTRACE
133
134static bool __save_trace(unsigned long pc, void *arg, bool nosched)
135{
136 struct stack_trace *trace = arg;
137
138 if (unlikely(nosched && in_sched_functions(pc)))
139 return false;
140 if (unlikely(trace->skip > 0)) {
141 trace->skip--;
142 return false;
143 }
144
145 trace->entries[trace->nr_entries++] = pc;
146 return (trace->nr_entries >= trace->max_entries);
147}
148
149static bool save_trace(unsigned long pc, void *arg)
150{
151 return __save_trace(pc, arg, false);
152}
153
154/*
155 * Save stack-backtrace addresses into a stack_trace buffer.
156 */
157void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
158{
159 walk_stackframe(tsk, NULL, save_trace, trace);
160}
161EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
162
163void save_stack_trace(struct stack_trace *trace)
164{
165 save_stack_trace_tsk(NULL, trace);
166}
167EXPORT_SYMBOL_GPL(save_stack_trace);
168
169#endif /* CONFIG_STACKTRACE */
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2008 ARM Limited
4 * Copyright (C) 2014 Regents of the University of California
5 */
6
7#include <linux/export.h>
8#include <linux/kallsyms.h>
9#include <linux/sched.h>
10#include <linux/sched/debug.h>
11#include <linux/sched/task_stack.h>
12#include <linux/stacktrace.h>
13#include <linux/ftrace.h>
14
15#include <asm/stacktrace.h>
16
17#ifdef CONFIG_FRAME_POINTER
18
19extern asmlinkage void handle_exception(void);
20extern unsigned long ret_from_exception_end;
21
22static inline int fp_is_valid(unsigned long fp, unsigned long sp)
23{
24 unsigned long low, high;
25
26 low = sp + sizeof(struct stackframe);
27 high = ALIGN(sp, THREAD_SIZE);
28
29 return !(fp < low || fp > high || fp & 0x07);
30}
31
32void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
33 bool (*fn)(void *, unsigned long), void *arg)
34{
35 unsigned long fp, sp, pc;
36 int graph_idx = 0;
37 int level = 0;
38
39 if (regs) {
40 fp = frame_pointer(regs);
41 sp = user_stack_pointer(regs);
42 pc = instruction_pointer(regs);
43 } else if (task == NULL || task == current) {
44 fp = (unsigned long)__builtin_frame_address(0);
45 sp = current_stack_pointer;
46 pc = (unsigned long)walk_stackframe;
47 level = -1;
48 } else {
49 /* task blocked in __switch_to */
50 fp = task->thread.s[0];
51 sp = task->thread.sp;
52 pc = task->thread.ra;
53 }
54
55 for (;;) {
56 struct stackframe *frame;
57
58 if (unlikely(!__kernel_text_address(pc) || (level++ >= 0 && !fn(arg, pc))))
59 break;
60
61 if (unlikely(!fp_is_valid(fp, sp)))
62 break;
63
64 /* Unwind stack frame */
65 frame = (struct stackframe *)fp - 1;
66 sp = fp;
67 if (regs && (regs->epc == pc) && fp_is_valid(frame->ra, sp)) {
68 /* We hit function where ra is not saved on the stack */
69 fp = frame->ra;
70 pc = regs->ra;
71 } else {
72 fp = frame->fp;
73 pc = ftrace_graph_ret_addr(current, &graph_idx, frame->ra,
74 &frame->ra);
75 if (pc >= (unsigned long)handle_exception &&
76 pc < (unsigned long)&ret_from_exception_end) {
77 if (unlikely(!__kernel_text_address(pc) || !fn(arg, pc)))
78 break;
79
80 pc = ((struct pt_regs *)sp)->epc;
81 fp = ((struct pt_regs *)sp)->s0;
82 }
83 }
84
85 }
86}
87
88#else /* !CONFIG_FRAME_POINTER */
89
90void notrace walk_stackframe(struct task_struct *task,
91 struct pt_regs *regs, bool (*fn)(void *, unsigned long), void *arg)
92{
93 unsigned long sp, pc;
94 unsigned long *ksp;
95
96 if (regs) {
97 sp = user_stack_pointer(regs);
98 pc = instruction_pointer(regs);
99 } else if (task == NULL || task == current) {
100 sp = current_stack_pointer;
101 pc = (unsigned long)walk_stackframe;
102 } else {
103 /* task blocked in __switch_to */
104 sp = task->thread.sp;
105 pc = task->thread.ra;
106 }
107
108 if (unlikely(sp & 0x7))
109 return;
110
111 ksp = (unsigned long *)sp;
112 while (!kstack_end(ksp)) {
113 if (__kernel_text_address(pc) && unlikely(!fn(arg, pc)))
114 break;
115 pc = READ_ONCE_NOCHECK(*ksp++) - 0x4;
116 }
117}
118
119#endif /* CONFIG_FRAME_POINTER */
120
121static bool print_trace_address(void *arg, unsigned long pc)
122{
123 const char *loglvl = arg;
124
125 print_ip_sym(loglvl, pc);
126 return true;
127}
128
129noinline void dump_backtrace(struct pt_regs *regs, struct task_struct *task,
130 const char *loglvl)
131{
132 walk_stackframe(task, regs, print_trace_address, (void *)loglvl);
133}
134
135void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
136{
137 pr_cont("%sCall Trace:\n", loglvl);
138 dump_backtrace(NULL, task, loglvl);
139}
140
141static bool save_wchan(void *arg, unsigned long pc)
142{
143 if (!in_sched_functions(pc)) {
144 unsigned long *p = arg;
145 *p = pc;
146 return false;
147 }
148 return true;
149}
150
151unsigned long __get_wchan(struct task_struct *task)
152{
153 unsigned long pc = 0;
154
155 if (!try_get_task_stack(task))
156 return 0;
157 walk_stackframe(task, NULL, save_wchan, &pc);
158 put_task_stack(task);
159 return pc;
160}
161
162noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
163 struct task_struct *task, struct pt_regs *regs)
164{
165 walk_stackframe(task, regs, consume_entry, cookie);
166}
167
168/*
169 * Get the return address for a single stackframe and return a pointer to the
170 * next frame tail.
171 */
172static unsigned long unwind_user_frame(stack_trace_consume_fn consume_entry,
173 void *cookie, unsigned long fp,
174 unsigned long reg_ra)
175{
176 struct stackframe buftail;
177 unsigned long ra = 0;
178 unsigned long __user *user_frame_tail =
179 (unsigned long __user *)(fp - sizeof(struct stackframe));
180
181 /* Check accessibility of one struct frame_tail beyond */
182 if (!access_ok(user_frame_tail, sizeof(buftail)))
183 return 0;
184 if (__copy_from_user_inatomic(&buftail, user_frame_tail,
185 sizeof(buftail)))
186 return 0;
187
188 ra = reg_ra ? : buftail.ra;
189
190 fp = buftail.fp;
191 if (!ra || !consume_entry(cookie, ra))
192 return 0;
193
194 return fp;
195}
196
197void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
198 const struct pt_regs *regs)
199{
200 unsigned long fp = 0;
201
202 fp = regs->s0;
203 if (!consume_entry(cookie, regs->epc))
204 return;
205
206 fp = unwind_user_frame(consume_entry, cookie, fp, regs->ra);
207 while (fp && !(fp & 0x7))
208 fp = unwind_user_frame(consume_entry, cookie, fp, 0);
209}