Loading...
1/*
2 * Stack trace utility
3 *
4 * Copyright 2008 Christoph Hellwig, IBM Corp.
5 *
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#include <linux/export.h>
14#include <linux/sched.h>
15#include <linux/sched/debug.h>
16#include <linux/stacktrace.h>
17#include <asm/ptrace.h>
18#include <asm/processor.h>
19
20/*
21 * Save stack-backtrace addresses into a stack_trace buffer.
22 */
23static void save_context_stack(struct stack_trace *trace, unsigned long sp,
24 struct task_struct *tsk, int savesched)
25{
26 for (;;) {
27 unsigned long *stack = (unsigned long *) sp;
28 unsigned long newsp, ip;
29
30 if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD))
31 return;
32
33 newsp = stack[0];
34 ip = stack[STACK_FRAME_LR_SAVE];
35
36 if (savesched || !in_sched_functions(ip)) {
37 if (!trace->skip)
38 trace->entries[trace->nr_entries++] = ip;
39 else
40 trace->skip--;
41 }
42
43 if (trace->nr_entries >= trace->max_entries)
44 return;
45
46 sp = newsp;
47 }
48}
49
50void save_stack_trace(struct stack_trace *trace)
51{
52 unsigned long sp;
53
54 sp = current_stack_pointer();
55
56 save_context_stack(trace, sp, current, 1);
57}
58EXPORT_SYMBOL_GPL(save_stack_trace);
59
60void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
61{
62 unsigned long sp;
63
64 if (tsk == current)
65 sp = current_stack_pointer();
66 else
67 sp = tsk->thread.ksp;
68
69 save_context_stack(trace, sp, tsk, 0);
70}
71EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
72
73void
74save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
75{
76 save_context_stack(trace, regs->gpr[1], current, 0);
77}
78EXPORT_SYMBOL_GPL(save_stack_trace_regs);
1// SPDX-License-Identifier: GPL-2.0
2
3/*
4 * Stack trace utility functions etc.
5 *
6 * Copyright 2008 Christoph Hellwig, IBM Corp.
7 * Copyright 2018 SUSE Linux GmbH
8 * Copyright 2018 Nick Piggin, Michael Ellerman, IBM Corp.
9 */
10
11#include <linux/delay.h>
12#include <linux/export.h>
13#include <linux/kallsyms.h>
14#include <linux/module.h>
15#include <linux/nmi.h>
16#include <linux/sched.h>
17#include <linux/sched/debug.h>
18#include <linux/sched/task_stack.h>
19#include <linux/stacktrace.h>
20#include <asm/ptrace.h>
21#include <asm/processor.h>
22#include <linux/ftrace.h>
23#include <asm/kprobes.h>
24#include <linux/rethook.h>
25
26#include <asm/paca.h>
27
28void __no_sanitize_address arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
29 struct task_struct *task, struct pt_regs *regs)
30{
31 unsigned long sp;
32
33 if (regs && !consume_entry(cookie, regs->nip))
34 return;
35
36 if (regs)
37 sp = regs->gpr[1];
38 else if (task == current)
39 sp = current_stack_frame();
40 else
41 sp = task->thread.ksp;
42
43 for (;;) {
44 unsigned long *stack = (unsigned long *) sp;
45 unsigned long newsp, ip;
46
47 if (!validate_sp(sp, task))
48 return;
49
50 newsp = stack[0];
51 ip = stack[STACK_FRAME_LR_SAVE];
52
53 if (!consume_entry(cookie, ip))
54 return;
55
56 sp = newsp;
57 }
58}
59
60/*
61 * This function returns an error if it detects any unreliable features of the
62 * stack. Otherwise it guarantees that the stack trace is reliable.
63 *
64 * If the task is not 'current', the caller *must* ensure the task is inactive.
65 */
66int __no_sanitize_address arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
67 void *cookie, struct task_struct *task)
68{
69 unsigned long sp;
70 unsigned long newsp;
71 unsigned long stack_page = (unsigned long)task_stack_page(task);
72 unsigned long stack_end;
73 int graph_idx = 0;
74 bool firstframe;
75
76 stack_end = stack_page + THREAD_SIZE;
77
78 // See copy_thread() for details.
79 if (task->flags & PF_KTHREAD)
80 stack_end -= STACK_FRAME_MIN_SIZE;
81 else
82 stack_end -= STACK_USER_INT_FRAME_SIZE;
83
84 if (task == current)
85 sp = current_stack_frame();
86 else
87 sp = task->thread.ksp;
88
89 if (sp < stack_page + sizeof(struct thread_struct) ||
90 sp > stack_end - STACK_FRAME_MIN_SIZE) {
91 return -EINVAL;
92 }
93
94 for (firstframe = true; sp != stack_end;
95 firstframe = false, sp = newsp) {
96 unsigned long *stack = (unsigned long *) sp;
97 unsigned long ip;
98
99 /* sanity check: ABI requires SP to be aligned 16 bytes. */
100 if (sp & 0xF)
101 return -EINVAL;
102
103 newsp = stack[0];
104 /* Stack grows downwards; unwinder may only go up. */
105 if (newsp <= sp)
106 return -EINVAL;
107
108 if (newsp != stack_end &&
109 newsp > stack_end - STACK_FRAME_MIN_SIZE) {
110 return -EINVAL; /* invalid backlink, too far up. */
111 }
112
113 /*
114 * We can only trust the bottom frame's backlink, the
115 * rest of the frame may be uninitialized, continue to
116 * the next.
117 */
118 if (firstframe)
119 continue;
120
121 /* Mark stacktraces with exception frames as unreliable. */
122 if (sp <= stack_end - STACK_INT_FRAME_SIZE &&
123 stack[STACK_INT_FRAME_MARKER_LONGS] == STACK_FRAME_REGS_MARKER) {
124 return -EINVAL;
125 }
126
127 /* Examine the saved LR: it must point into kernel code. */
128 ip = stack[STACK_FRAME_LR_SAVE];
129 if (!__kernel_text_address(ip))
130 return -EINVAL;
131
132 /*
133 * FIXME: IMHO these tests do not belong in
134 * arch-dependent code, they are generic.
135 */
136 ip = ftrace_graph_ret_addr(task, &graph_idx, ip, stack);
137
138 /*
139 * Mark stacktraces with kretprobed functions on them
140 * as unreliable.
141 */
142#ifdef CONFIG_RETHOOK
143 if (ip == (unsigned long)arch_rethook_trampoline)
144 return -EINVAL;
145#endif
146
147 if (!consume_entry(cookie, ip))
148 return -EINVAL;
149 }
150 return 0;
151}
152
153#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_NMI_IPI)
154static void handle_backtrace_ipi(struct pt_regs *regs)
155{
156 nmi_cpu_backtrace(regs);
157}
158
159static void raise_backtrace_ipi(cpumask_t *mask)
160{
161 struct paca_struct *p;
162 unsigned int cpu;
163 u64 delay_us;
164
165 for_each_cpu(cpu, mask) {
166 if (cpu == smp_processor_id()) {
167 handle_backtrace_ipi(NULL);
168 continue;
169 }
170
171 delay_us = 5 * USEC_PER_SEC;
172
173 if (smp_send_safe_nmi_ipi(cpu, handle_backtrace_ipi, delay_us)) {
174 // Now wait up to 5s for the other CPU to do its backtrace
175 while (cpumask_test_cpu(cpu, mask) && delay_us) {
176 udelay(1);
177 delay_us--;
178 }
179
180 // Other CPU cleared itself from the mask
181 if (delay_us)
182 continue;
183 }
184
185 p = paca_ptrs[cpu];
186
187 cpumask_clear_cpu(cpu, mask);
188
189 pr_warn("CPU %d didn't respond to backtrace IPI, inspecting paca.\n", cpu);
190 if (!virt_addr_valid(p)) {
191 pr_warn("paca pointer appears corrupt? (%px)\n", p);
192 continue;
193 }
194
195 pr_warn("irq_soft_mask: 0x%02x in_mce: %d in_nmi: %d",
196 p->irq_soft_mask, p->in_mce, p->in_nmi);
197
198 if (virt_addr_valid(p->__current))
199 pr_cont(" current: %d (%s)\n", p->__current->pid,
200 p->__current->comm);
201 else
202 pr_cont(" current pointer corrupt? (%px)\n", p->__current);
203
204 pr_warn("Back trace of paca->saved_r1 (0x%016llx) (possibly stale):\n", p->saved_r1);
205 show_stack(p->__current, (unsigned long *)p->saved_r1, KERN_WARNING);
206 }
207}
208
209void arch_trigger_cpumask_backtrace(const cpumask_t *mask, int exclude_cpu)
210{
211 nmi_trigger_cpumask_backtrace(mask, exclude_cpu, raise_backtrace_ipi);
212}
213#endif /* defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_NMI_IPI) */