Loading...
1// SPDX-License-Identifier: GPL-2.0
2
3/*
4 * Stack trace utility functions etc.
5 *
6 * Copyright 2008 Christoph Hellwig, IBM Corp.
7 * Copyright 2018 SUSE Linux GmbH
8 * Copyright 2018 Nick Piggin, Michael Ellerman, IBM Corp.
9 */
10
11#include <linux/delay.h>
12#include <linux/export.h>
13#include <linux/kallsyms.h>
14#include <linux/module.h>
15#include <linux/nmi.h>
16#include <linux/sched.h>
17#include <linux/sched/debug.h>
18#include <linux/sched/task_stack.h>
19#include <linux/stacktrace.h>
20#include <asm/ptrace.h>
21#include <asm/processor.h>
22#include <linux/ftrace.h>
23#include <asm/kprobes.h>
24
25#include <asm/paca.h>
26
27void __no_sanitize_address arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
28 struct task_struct *task, struct pt_regs *regs)
29{
30 unsigned long sp;
31
32 if (regs && !consume_entry(cookie, regs->nip))
33 return;
34
35 if (regs)
36 sp = regs->gpr[1];
37 else if (task == current)
38 sp = current_stack_frame();
39 else
40 sp = task->thread.ksp;
41
42 for (;;) {
43 unsigned long *stack = (unsigned long *) sp;
44 unsigned long newsp, ip;
45
46 if (!validate_sp(sp, task))
47 return;
48
49 newsp = stack[0];
50 ip = stack[STACK_FRAME_LR_SAVE];
51
52 if (!consume_entry(cookie, ip))
53 return;
54
55 sp = newsp;
56 }
57}
58
59/*
60 * This function returns an error if it detects any unreliable features of the
61 * stack. Otherwise it guarantees that the stack trace is reliable.
62 *
63 * If the task is not 'current', the caller *must* ensure the task is inactive.
64 */
65int __no_sanitize_address arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
66 void *cookie, struct task_struct *task)
67{
68 unsigned long sp;
69 unsigned long newsp;
70 unsigned long stack_page = (unsigned long)task_stack_page(task);
71 unsigned long stack_end;
72 int graph_idx = 0;
73 bool firstframe;
74
75 stack_end = stack_page + THREAD_SIZE;
76 if (!is_idle_task(task)) {
77 /*
78 * For user tasks, this is the SP value loaded on
79 * kernel entry, see "PACAKSAVE(r13)" in _switch() and
80 * system_call_common().
81 *
82 * Likewise for non-swapper kernel threads,
83 * this also happens to be the top of the stack
84 * as setup by copy_thread().
85 *
86 * Note that stack backlinks are not properly setup by
87 * copy_thread() and thus, a forked task() will have
88 * an unreliable stack trace until it's been
89 * _switch()'ed to for the first time.
90 */
91 stack_end -= STACK_USER_INT_FRAME_SIZE;
92 } else {
93 /*
94 * idle tasks have a custom stack layout,
95 * c.f. cpu_idle_thread_init().
96 */
97 stack_end -= STACK_FRAME_MIN_SIZE;
98 }
99
100 if (task == current)
101 sp = current_stack_frame();
102 else
103 sp = task->thread.ksp;
104
105 if (sp < stack_page + sizeof(struct thread_struct) ||
106 sp > stack_end - STACK_FRAME_MIN_SIZE) {
107 return -EINVAL;
108 }
109
110 for (firstframe = true; sp != stack_end;
111 firstframe = false, sp = newsp) {
112 unsigned long *stack = (unsigned long *) sp;
113 unsigned long ip;
114
115 /* sanity check: ABI requires SP to be aligned 16 bytes. */
116 if (sp & 0xF)
117 return -EINVAL;
118
119 newsp = stack[0];
120 /* Stack grows downwards; unwinder may only go up. */
121 if (newsp <= sp)
122 return -EINVAL;
123
124 if (newsp != stack_end &&
125 newsp > stack_end - STACK_FRAME_MIN_SIZE) {
126 return -EINVAL; /* invalid backlink, too far up. */
127 }
128
129 /*
130 * We can only trust the bottom frame's backlink, the
131 * rest of the frame may be uninitialized, continue to
132 * the next.
133 */
134 if (firstframe)
135 continue;
136
137 /* Mark stacktraces with exception frames as unreliable. */
138 if (sp <= stack_end - STACK_INT_FRAME_SIZE &&
139 stack[STACK_INT_FRAME_MARKER_LONGS] == STACK_FRAME_REGS_MARKER) {
140 return -EINVAL;
141 }
142
143 /* Examine the saved LR: it must point into kernel code. */
144 ip = stack[STACK_FRAME_LR_SAVE];
145 if (!__kernel_text_address(ip))
146 return -EINVAL;
147
148 /*
149 * FIXME: IMHO these tests do not belong in
150 * arch-dependent code, they are generic.
151 */
152 ip = ftrace_graph_ret_addr(task, &graph_idx, ip, stack);
153#ifdef CONFIG_KPROBES
154 /*
155 * Mark stacktraces with kretprobed functions on them
156 * as unreliable.
157 */
158 if (ip == (unsigned long)__kretprobe_trampoline)
159 return -EINVAL;
160#endif
161
162 if (!consume_entry(cookie, ip))
163 return -EINVAL;
164 }
165 return 0;
166}
167
168#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_NMI_IPI)
169static void handle_backtrace_ipi(struct pt_regs *regs)
170{
171 nmi_cpu_backtrace(regs);
172}
173
174static void raise_backtrace_ipi(cpumask_t *mask)
175{
176 struct paca_struct *p;
177 unsigned int cpu;
178 u64 delay_us;
179
180 for_each_cpu(cpu, mask) {
181 if (cpu == smp_processor_id()) {
182 handle_backtrace_ipi(NULL);
183 continue;
184 }
185
186 delay_us = 5 * USEC_PER_SEC;
187
188 if (smp_send_safe_nmi_ipi(cpu, handle_backtrace_ipi, delay_us)) {
189 // Now wait up to 5s for the other CPU to do its backtrace
190 while (cpumask_test_cpu(cpu, mask) && delay_us) {
191 udelay(1);
192 delay_us--;
193 }
194
195 // Other CPU cleared itself from the mask
196 if (delay_us)
197 continue;
198 }
199
200 p = paca_ptrs[cpu];
201
202 cpumask_clear_cpu(cpu, mask);
203
204 pr_warn("CPU %d didn't respond to backtrace IPI, inspecting paca.\n", cpu);
205 if (!virt_addr_valid(p)) {
206 pr_warn("paca pointer appears corrupt? (%px)\n", p);
207 continue;
208 }
209
210 pr_warn("irq_soft_mask: 0x%02x in_mce: %d in_nmi: %d",
211 p->irq_soft_mask, p->in_mce, p->in_nmi);
212
213 if (virt_addr_valid(p->__current))
214 pr_cont(" current: %d (%s)\n", p->__current->pid,
215 p->__current->comm);
216 else
217 pr_cont(" current pointer corrupt? (%px)\n", p->__current);
218
219 pr_warn("Back trace of paca->saved_r1 (0x%016llx) (possibly stale):\n", p->saved_r1);
220 show_stack(p->__current, (unsigned long *)p->saved_r1, KERN_WARNING);
221 }
222}
223
224void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
225{
226 nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_backtrace_ipi);
227}
228#endif /* defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_NMI_IPI) */
1// SPDX-License-Identifier: GPL-2.0
2
3/*
4 * Stack trace utility functions etc.
5 *
6 * Copyright 2008 Christoph Hellwig, IBM Corp.
7 * Copyright 2018 SUSE Linux GmbH
8 * Copyright 2018 Nick Piggin, Michael Ellerman, IBM Corp.
9 */
10
11#include <linux/export.h>
12#include <linux/kallsyms.h>
13#include <linux/module.h>
14#include <linux/nmi.h>
15#include <linux/sched.h>
16#include <linux/sched/debug.h>
17#include <linux/sched/task_stack.h>
18#include <linux/stacktrace.h>
19#include <asm/ptrace.h>
20#include <asm/processor.h>
21#include <linux/ftrace.h>
22#include <asm/kprobes.h>
23
24#include <asm/paca.h>
25
26/*
27 * Save stack-backtrace addresses into a stack_trace buffer.
28 */
29static void save_context_stack(struct stack_trace *trace, unsigned long sp,
30 struct task_struct *tsk, int savesched)
31{
32 for (;;) {
33 unsigned long *stack = (unsigned long *) sp;
34 unsigned long newsp, ip;
35
36 if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD))
37 return;
38
39 newsp = stack[0];
40 ip = stack[STACK_FRAME_LR_SAVE];
41
42 if (savesched || !in_sched_functions(ip)) {
43 if (!trace->skip)
44 trace->entries[trace->nr_entries++] = ip;
45 else
46 trace->skip--;
47 }
48
49 if (trace->nr_entries >= trace->max_entries)
50 return;
51
52 sp = newsp;
53 }
54}
55
56void save_stack_trace(struct stack_trace *trace)
57{
58 unsigned long sp;
59
60 sp = current_stack_frame();
61
62 save_context_stack(trace, sp, current, 1);
63}
64EXPORT_SYMBOL_GPL(save_stack_trace);
65
66void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
67{
68 unsigned long sp;
69
70 if (!try_get_task_stack(tsk))
71 return;
72
73 if (tsk == current)
74 sp = current_stack_frame();
75 else
76 sp = tsk->thread.ksp;
77
78 save_context_stack(trace, sp, tsk, 0);
79
80 put_task_stack(tsk);
81}
82EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
83
84void
85save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
86{
87 save_context_stack(trace, regs->gpr[1], current, 0);
88}
89EXPORT_SYMBOL_GPL(save_stack_trace_regs);
90
91#ifdef CONFIG_HAVE_RELIABLE_STACKTRACE
92/*
93 * This function returns an error if it detects any unreliable features of the
94 * stack. Otherwise it guarantees that the stack trace is reliable.
95 *
96 * If the task is not 'current', the caller *must* ensure the task is inactive.
97 */
98static int __save_stack_trace_tsk_reliable(struct task_struct *tsk,
99 struct stack_trace *trace)
100{
101 unsigned long sp;
102 unsigned long newsp;
103 unsigned long stack_page = (unsigned long)task_stack_page(tsk);
104 unsigned long stack_end;
105 int graph_idx = 0;
106 bool firstframe;
107
108 stack_end = stack_page + THREAD_SIZE;
109 if (!is_idle_task(tsk)) {
110 /*
111 * For user tasks, this is the SP value loaded on
112 * kernel entry, see "PACAKSAVE(r13)" in _switch() and
113 * system_call_common()/EXCEPTION_PROLOG_COMMON().
114 *
115 * Likewise for non-swapper kernel threads,
116 * this also happens to be the top of the stack
117 * as setup by copy_thread().
118 *
119 * Note that stack backlinks are not properly setup by
120 * copy_thread() and thus, a forked task() will have
121 * an unreliable stack trace until it's been
122 * _switch()'ed to for the first time.
123 */
124 stack_end -= STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
125 } else {
126 /*
127 * idle tasks have a custom stack layout,
128 * c.f. cpu_idle_thread_init().
129 */
130 stack_end -= STACK_FRAME_OVERHEAD;
131 }
132
133 if (tsk == current)
134 sp = current_stack_frame();
135 else
136 sp = tsk->thread.ksp;
137
138 if (sp < stack_page + sizeof(struct thread_struct) ||
139 sp > stack_end - STACK_FRAME_MIN_SIZE) {
140 return -EINVAL;
141 }
142
143 for (firstframe = true; sp != stack_end;
144 firstframe = false, sp = newsp) {
145 unsigned long *stack = (unsigned long *) sp;
146 unsigned long ip;
147
148 /* sanity check: ABI requires SP to be aligned 16 bytes. */
149 if (sp & 0xF)
150 return -EINVAL;
151
152 newsp = stack[0];
153 /* Stack grows downwards; unwinder may only go up. */
154 if (newsp <= sp)
155 return -EINVAL;
156
157 if (newsp != stack_end &&
158 newsp > stack_end - STACK_FRAME_MIN_SIZE) {
159 return -EINVAL; /* invalid backlink, too far up. */
160 }
161
162 /*
163 * We can only trust the bottom frame's backlink, the
164 * rest of the frame may be uninitialized, continue to
165 * the next.
166 */
167 if (firstframe)
168 continue;
169
170 /* Mark stacktraces with exception frames as unreliable. */
171 if (sp <= stack_end - STACK_INT_FRAME_SIZE &&
172 stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
173 return -EINVAL;
174 }
175
176 /* Examine the saved LR: it must point into kernel code. */
177 ip = stack[STACK_FRAME_LR_SAVE];
178 if (!__kernel_text_address(ip))
179 return -EINVAL;
180
181 /*
182 * FIXME: IMHO these tests do not belong in
183 * arch-dependent code, they are generic.
184 */
185 ip = ftrace_graph_ret_addr(tsk, &graph_idx, ip, stack);
186#ifdef CONFIG_KPROBES
187 /*
188 * Mark stacktraces with kretprobed functions on them
189 * as unreliable.
190 */
191 if (ip == (unsigned long)kretprobe_trampoline)
192 return -EINVAL;
193#endif
194
195 if (trace->nr_entries >= trace->max_entries)
196 return -E2BIG;
197 if (!trace->skip)
198 trace->entries[trace->nr_entries++] = ip;
199 else
200 trace->skip--;
201 }
202 return 0;
203}
204
205int save_stack_trace_tsk_reliable(struct task_struct *tsk,
206 struct stack_trace *trace)
207{
208 int ret;
209
210 /*
211 * If the task doesn't have a stack (e.g., a zombie), the stack is
212 * "reliably" empty.
213 */
214 if (!try_get_task_stack(tsk))
215 return 0;
216
217 ret = __save_stack_trace_tsk_reliable(tsk, trace);
218
219 put_task_stack(tsk);
220
221 return ret;
222}
223#endif /* CONFIG_HAVE_RELIABLE_STACKTRACE */
224
225#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_NMI_IPI)
226static void handle_backtrace_ipi(struct pt_regs *regs)
227{
228 nmi_cpu_backtrace(regs);
229}
230
231static void raise_backtrace_ipi(cpumask_t *mask)
232{
233 unsigned int cpu;
234
235 for_each_cpu(cpu, mask) {
236 if (cpu == smp_processor_id())
237 handle_backtrace_ipi(NULL);
238 else
239 smp_send_safe_nmi_ipi(cpu, handle_backtrace_ipi, 5 * USEC_PER_SEC);
240 }
241
242 for_each_cpu(cpu, mask) {
243 struct paca_struct *p = paca_ptrs[cpu];
244
245 cpumask_clear_cpu(cpu, mask);
246
247 pr_warn("CPU %d didn't respond to backtrace IPI, inspecting paca.\n", cpu);
248 if (!virt_addr_valid(p)) {
249 pr_warn("paca pointer appears corrupt? (%px)\n", p);
250 continue;
251 }
252
253 pr_warn("irq_soft_mask: 0x%02x in_mce: %d in_nmi: %d",
254 p->irq_soft_mask, p->in_mce, p->in_nmi);
255
256 if (virt_addr_valid(p->__current))
257 pr_cont(" current: %d (%s)\n", p->__current->pid,
258 p->__current->comm);
259 else
260 pr_cont(" current pointer corrupt? (%px)\n", p->__current);
261
262 pr_warn("Back trace of paca->saved_r1 (0x%016llx) (possibly stale):\n", p->saved_r1);
263 show_stack(p->__current, (unsigned long *)p->saved_r1, KERN_WARNING);
264 }
265}
266
267void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
268{
269 nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_backtrace_ipi);
270}
271#endif /* defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_NMI_IPI) */