Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0
  2
  3/*
  4 * Stack trace utility functions etc.
  5 *
  6 * Copyright 2008 Christoph Hellwig, IBM Corp.
  7 * Copyright 2018 SUSE Linux GmbH
  8 * Copyright 2018 Nick Piggin, Michael Ellerman, IBM Corp.
  9 */
 10
 
 11#include <linux/export.h>
 12#include <linux/kallsyms.h>
 13#include <linux/module.h>
 14#include <linux/nmi.h>
 15#include <linux/sched.h>
 16#include <linux/sched/debug.h>
 17#include <linux/sched/task_stack.h>
 18#include <linux/stacktrace.h>
 19#include <asm/ptrace.h>
 20#include <asm/processor.h>
 21#include <linux/ftrace.h>
 22#include <asm/kprobes.h>
 23
 24#include <asm/paca.h>
 25
 26/*
 27 * Save stack-backtrace addresses into a stack_trace buffer.
 28 */
 29static void save_context_stack(struct stack_trace *trace, unsigned long sp,
 30			struct task_struct *tsk, int savesched)
 31{
 
 
 
 
 
 
 
 
 
 
 
 
 32	for (;;) {
 33		unsigned long *stack = (unsigned long *) sp;
 34		unsigned long newsp, ip;
 35
 36		if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD))
 37			return;
 38
 39		newsp = stack[0];
 40		ip = stack[STACK_FRAME_LR_SAVE];
 41
 42		if (savesched || !in_sched_functions(ip)) {
 43			if (!trace->skip)
 44				trace->entries[trace->nr_entries++] = ip;
 45			else
 46				trace->skip--;
 47		}
 48
 49		if (trace->nr_entries >= trace->max_entries)
 50			return;
 51
 52		sp = newsp;
 53	}
 54}
 55
 56void save_stack_trace(struct stack_trace *trace)
 57{
 58	unsigned long sp;
 59
 60	sp = current_stack_frame();
 61
 62	save_context_stack(trace, sp, current, 1);
 63}
 64EXPORT_SYMBOL_GPL(save_stack_trace);
 65
 66void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
 67{
 68	unsigned long sp;
 69
 70	if (!try_get_task_stack(tsk))
 71		return;
 72
 73	if (tsk == current)
 74		sp = current_stack_frame();
 75	else
 76		sp = tsk->thread.ksp;
 77
 78	save_context_stack(trace, sp, tsk, 0);
 79
 80	put_task_stack(tsk);
 81}
 82EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
 83
 84void
 85save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
 86{
 87	save_context_stack(trace, regs->gpr[1], current, 0);
 88}
 89EXPORT_SYMBOL_GPL(save_stack_trace_regs);
 90
 91#ifdef CONFIG_HAVE_RELIABLE_STACKTRACE
 92/*
 93 * This function returns an error if it detects any unreliable features of the
 94 * stack.  Otherwise it guarantees that the stack trace is reliable.
 95 *
 96 * If the task is not 'current', the caller *must* ensure the task is inactive.
 97 */
 98static int __save_stack_trace_tsk_reliable(struct task_struct *tsk,
 99					   struct stack_trace *trace)
100{
101	unsigned long sp;
102	unsigned long newsp;
103	unsigned long stack_page = (unsigned long)task_stack_page(tsk);
104	unsigned long stack_end;
105	int graph_idx = 0;
106	bool firstframe;
107
108	stack_end = stack_page + THREAD_SIZE;
109	if (!is_idle_task(tsk)) {
110		/*
111		 * For user tasks, this is the SP value loaded on
112		 * kernel entry, see "PACAKSAVE(r13)" in _switch() and
113		 * system_call_common()/EXCEPTION_PROLOG_COMMON().
114		 *
115		 * Likewise for non-swapper kernel threads,
116		 * this also happens to be the top of the stack
117		 * as setup by copy_thread().
118		 *
119		 * Note that stack backlinks are not properly setup by
120		 * copy_thread() and thus, a forked task() will have
121		 * an unreliable stack trace until it's been
122		 * _switch()'ed to for the first time.
123		 */
124		stack_end -= STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
125	} else {
126		/*
127		 * idle tasks have a custom stack layout,
128		 * c.f. cpu_idle_thread_init().
129		 */
130		stack_end -= STACK_FRAME_OVERHEAD;
131	}
132
133	if (tsk == current)
 
 
 
 
 
 
134		sp = current_stack_frame();
135	else
136		sp = tsk->thread.ksp;
137
138	if (sp < stack_page + sizeof(struct thread_struct) ||
139	    sp > stack_end - STACK_FRAME_MIN_SIZE) {
140		return -EINVAL;
141	}
142
143	for (firstframe = true; sp != stack_end;
144	     firstframe = false, sp = newsp) {
145		unsigned long *stack = (unsigned long *) sp;
146		unsigned long ip;
147
148		/* sanity check: ABI requires SP to be aligned 16 bytes. */
149		if (sp & 0xF)
150			return -EINVAL;
151
152		newsp = stack[0];
153		/* Stack grows downwards; unwinder may only go up. */
154		if (newsp <= sp)
155			return -EINVAL;
156
157		if (newsp != stack_end &&
158		    newsp > stack_end - STACK_FRAME_MIN_SIZE) {
159			return -EINVAL; /* invalid backlink, too far up. */
160		}
161
162		/*
163		 * We can only trust the bottom frame's backlink, the
164		 * rest of the frame may be uninitialized, continue to
165		 * the next.
166		 */
167		if (firstframe)
168			continue;
169
170		/* Mark stacktraces with exception frames as unreliable. */
171		if (sp <= stack_end - STACK_INT_FRAME_SIZE &&
172		    stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
173			return -EINVAL;
174		}
175
176		/* Examine the saved LR: it must point into kernel code. */
177		ip = stack[STACK_FRAME_LR_SAVE];
178		if (!__kernel_text_address(ip))
179			return -EINVAL;
180
181		/*
182		 * FIXME: IMHO these tests do not belong in
183		 * arch-dependent code, they are generic.
184		 */
185		ip = ftrace_graph_ret_addr(tsk, &graph_idx, ip, stack);
186#ifdef CONFIG_KPROBES
187		/*
188		 * Mark stacktraces with kretprobed functions on them
189		 * as unreliable.
190		 */
191		if (ip == (unsigned long)kretprobe_trampoline)
192			return -EINVAL;
193#endif
194
195		if (trace->nr_entries >= trace->max_entries)
196			return -E2BIG;
197		if (!trace->skip)
198			trace->entries[trace->nr_entries++] = ip;
199		else
200			trace->skip--;
201	}
202	return 0;
203}
204
205int save_stack_trace_tsk_reliable(struct task_struct *tsk,
206				  struct stack_trace *trace)
207{
208	int ret;
209
210	/*
211	 * If the task doesn't have a stack (e.g., a zombie), the stack is
212	 * "reliably" empty.
213	 */
214	if (!try_get_task_stack(tsk))
215		return 0;
216
217	ret = __save_stack_trace_tsk_reliable(tsk, trace);
218
219	put_task_stack(tsk);
220
221	return ret;
222}
223#endif /* CONFIG_HAVE_RELIABLE_STACKTRACE */
224
225#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_NMI_IPI)
226static void handle_backtrace_ipi(struct pt_regs *regs)
227{
228	nmi_cpu_backtrace(regs);
229}
230
231static void raise_backtrace_ipi(cpumask_t *mask)
232{
 
233	unsigned int cpu;
 
234
235	for_each_cpu(cpu, mask) {
236		if (cpu == smp_processor_id())
237			handle_backtrace_ipi(NULL);
238		else
239			smp_send_safe_nmi_ipi(cpu, handle_backtrace_ipi, 5 * USEC_PER_SEC);
240	}
241
242	for_each_cpu(cpu, mask) {
243		struct paca_struct *p = paca_ptrs[cpu];
 
 
 
 
 
 
 
 
 
 
 
 
 
244
245		cpumask_clear_cpu(cpu, mask);
246
247		pr_warn("CPU %d didn't respond to backtrace IPI, inspecting paca.\n", cpu);
248		if (!virt_addr_valid(p)) {
249			pr_warn("paca pointer appears corrupt? (%px)\n", p);
250			continue;
251		}
252
253		pr_warn("irq_soft_mask: 0x%02x in_mce: %d in_nmi: %d",
254			p->irq_soft_mask, p->in_mce, p->in_nmi);
255
256		if (virt_addr_valid(p->__current))
257			pr_cont(" current: %d (%s)\n", p->__current->pid,
258				p->__current->comm);
259		else
260			pr_cont(" current pointer corrupt? (%px)\n", p->__current);
261
262		pr_warn("Back trace of paca->saved_r1 (0x%016llx) (possibly stale):\n", p->saved_r1);
263		show_stack(p->__current, (unsigned long *)p->saved_r1, KERN_WARNING);
264	}
265}
266
267void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
268{
269	nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_backtrace_ipi);
270}
271#endif /* defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_NMI_IPI) */
v6.9.4
  1// SPDX-License-Identifier: GPL-2.0
  2
  3/*
  4 * Stack trace utility functions etc.
  5 *
  6 * Copyright 2008 Christoph Hellwig, IBM Corp.
  7 * Copyright 2018 SUSE Linux GmbH
  8 * Copyright 2018 Nick Piggin, Michael Ellerman, IBM Corp.
  9 */
 10
 11#include <linux/delay.h>
 12#include <linux/export.h>
 13#include <linux/kallsyms.h>
 14#include <linux/module.h>
 15#include <linux/nmi.h>
 16#include <linux/sched.h>
 17#include <linux/sched/debug.h>
 18#include <linux/sched/task_stack.h>
 19#include <linux/stacktrace.h>
 20#include <asm/ptrace.h>
 21#include <asm/processor.h>
 22#include <linux/ftrace.h>
 23#include <asm/kprobes.h>
 24
 25#include <asm/paca.h>
 26
 27void __no_sanitize_address arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
 28					   struct task_struct *task, struct pt_regs *regs)
 
 
 
 29{
 30	unsigned long sp;
 31
 32	if (regs && !consume_entry(cookie, regs->nip))
 33		return;
 34
 35	if (regs)
 36		sp = regs->gpr[1];
 37	else if (task == current)
 38		sp = current_stack_frame();
 39	else
 40		sp = task->thread.ksp;
 41
 42	for (;;) {
 43		unsigned long *stack = (unsigned long *) sp;
 44		unsigned long newsp, ip;
 45
 46		if (!validate_sp(sp, task))
 47			return;
 48
 49		newsp = stack[0];
 50		ip = stack[STACK_FRAME_LR_SAVE];
 51
 52		if (!consume_entry(cookie, ip))
 
 
 
 
 
 
 
 53			return;
 54
 55		sp = newsp;
 56	}
 57}
 58
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 59/*
 60 * This function returns an error if it detects any unreliable features of the
 61 * stack.  Otherwise it guarantees that the stack trace is reliable.
 62 *
 63 * If the task is not 'current', the caller *must* ensure the task is inactive.
 64 */
 65int __no_sanitize_address arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
 66						   void *cookie, struct task_struct *task)
 67{
 68	unsigned long sp;
 69	unsigned long newsp;
 70	unsigned long stack_page = (unsigned long)task_stack_page(task);
 71	unsigned long stack_end;
 72	int graph_idx = 0;
 73	bool firstframe;
 74
 75	stack_end = stack_page + THREAD_SIZE;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 76
 77	// See copy_thread() for details.
 78	if (task->flags & PF_KTHREAD)
 79		stack_end -= STACK_FRAME_MIN_SIZE;
 80	else
 81		stack_end -= STACK_USER_INT_FRAME_SIZE;
 82
 83	if (task == current)
 84		sp = current_stack_frame();
 85	else
 86		sp = task->thread.ksp;
 87
 88	if (sp < stack_page + sizeof(struct thread_struct) ||
 89	    sp > stack_end - STACK_FRAME_MIN_SIZE) {
 90		return -EINVAL;
 91	}
 92
 93	for (firstframe = true; sp != stack_end;
 94	     firstframe = false, sp = newsp) {
 95		unsigned long *stack = (unsigned long *) sp;
 96		unsigned long ip;
 97
 98		/* sanity check: ABI requires SP to be aligned 16 bytes. */
 99		if (sp & 0xF)
100			return -EINVAL;
101
102		newsp = stack[0];
103		/* Stack grows downwards; unwinder may only go up. */
104		if (newsp <= sp)
105			return -EINVAL;
106
107		if (newsp != stack_end &&
108		    newsp > stack_end - STACK_FRAME_MIN_SIZE) {
109			return -EINVAL; /* invalid backlink, too far up. */
110		}
111
112		/*
113		 * We can only trust the bottom frame's backlink, the
114		 * rest of the frame may be uninitialized, continue to
115		 * the next.
116		 */
117		if (firstframe)
118			continue;
119
120		/* Mark stacktraces with exception frames as unreliable. */
121		if (sp <= stack_end - STACK_INT_FRAME_SIZE &&
122		    stack[STACK_INT_FRAME_MARKER_LONGS] == STACK_FRAME_REGS_MARKER) {
123			return -EINVAL;
124		}
125
126		/* Examine the saved LR: it must point into kernel code. */
127		ip = stack[STACK_FRAME_LR_SAVE];
128		if (!__kernel_text_address(ip))
129			return -EINVAL;
130
131		/*
132		 * FIXME: IMHO these tests do not belong in
133		 * arch-dependent code, they are generic.
134		 */
135		ip = ftrace_graph_ret_addr(task, &graph_idx, ip, stack);
136#ifdef CONFIG_KPROBES
137		/*
138		 * Mark stacktraces with kretprobed functions on them
139		 * as unreliable.
140		 */
141		if (ip == (unsigned long)__kretprobe_trampoline)
142			return -EINVAL;
143#endif
144
145		if (!consume_entry(cookie, ip))
146			return -EINVAL;
 
 
 
 
147	}
148	return 0;
149}
150
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
151#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_NMI_IPI)
152static void handle_backtrace_ipi(struct pt_regs *regs)
153{
154	nmi_cpu_backtrace(regs);
155}
156
157static void raise_backtrace_ipi(cpumask_t *mask)
158{
159	struct paca_struct *p;
160	unsigned int cpu;
161	u64 delay_us;
162
163	for_each_cpu(cpu, mask) {
164		if (cpu == smp_processor_id()) {
165			handle_backtrace_ipi(NULL);
166			continue;
167		}
 
168
169		delay_us = 5 * USEC_PER_SEC;
170
171		if (smp_send_safe_nmi_ipi(cpu, handle_backtrace_ipi, delay_us)) {
172			// Now wait up to 5s for the other CPU to do its backtrace
173			while (cpumask_test_cpu(cpu, mask) && delay_us) {
174				udelay(1);
175				delay_us--;
176			}
177
178			// Other CPU cleared itself from the mask
179			if (delay_us)
180				continue;
181		}
182
183		p = paca_ptrs[cpu];
184
185		cpumask_clear_cpu(cpu, mask);
186
187		pr_warn("CPU %d didn't respond to backtrace IPI, inspecting paca.\n", cpu);
188		if (!virt_addr_valid(p)) {
189			pr_warn("paca pointer appears corrupt? (%px)\n", p);
190			continue;
191		}
192
193		pr_warn("irq_soft_mask: 0x%02x in_mce: %d in_nmi: %d",
194			p->irq_soft_mask, p->in_mce, p->in_nmi);
195
196		if (virt_addr_valid(p->__current))
197			pr_cont(" current: %d (%s)\n", p->__current->pid,
198				p->__current->comm);
199		else
200			pr_cont(" current pointer corrupt? (%px)\n", p->__current);
201
202		pr_warn("Back trace of paca->saved_r1 (0x%016llx) (possibly stale):\n", p->saved_r1);
203		show_stack(p->__current, (unsigned long *)p->saved_r1, KERN_WARNING);
204	}
205}
206
207void arch_trigger_cpumask_backtrace(const cpumask_t *mask, int exclude_cpu)
208{
209	nmi_trigger_cpumask_backtrace(mask, exclude_cpu, raise_backtrace_ipi);
210}
211#endif /* defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_NMI_IPI) */