Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Stack trace management functions
4 *
5 * Copyright IBM Corp. 2006
6 */
7
8#include <linux/stacktrace.h>
9#include <linux/uaccess.h>
10#include <linux/compat.h>
11#include <asm/stacktrace.h>
12#include <asm/unwind.h>
13#include <asm/kprobes.h>
14#include <asm/ptrace.h>
15
16void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
17 struct task_struct *task, struct pt_regs *regs)
18{
19 struct unwind_state state;
20 unsigned long addr;
21
22 unwind_for_each_frame(&state, task, regs, 0) {
23 addr = unwind_get_return_address(&state);
24 if (!addr || !consume_entry(cookie, addr))
25 break;
26 }
27}
28
29int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
30 void *cookie, struct task_struct *task)
31{
32 struct unwind_state state;
33 unsigned long addr;
34
35 unwind_for_each_frame(&state, task, NULL, 0) {
36 if (state.stack_info.type != STACK_TYPE_TASK)
37 return -EINVAL;
38
39 if (state.regs)
40 return -EINVAL;
41
42 addr = unwind_get_return_address(&state);
43 if (!addr)
44 return -EINVAL;
45
46#ifdef CONFIG_RETHOOK
47 /*
48 * Mark stacktraces with krethook functions on them
49 * as unreliable.
50 */
51 if (state.ip == (unsigned long)arch_rethook_trampoline)
52 return -EINVAL;
53#endif
54
55 if (!consume_entry(cookie, addr))
56 return -EINVAL;
57 }
58
59 /* Check for stack corruption */
60 if (unwind_error(&state))
61 return -EINVAL;
62 return 0;
63}
64
65void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
66 const struct pt_regs *regs)
67{
68 struct stack_frame_user __user *sf;
69 unsigned long ip, sp;
70 bool first = true;
71
72 if (is_compat_task())
73 return;
74 if (!consume_entry(cookie, instruction_pointer(regs)))
75 return;
76 sf = (void __user *)user_stack_pointer(regs);
77 pagefault_disable();
78 while (1) {
79 if (__get_user(sp, &sf->back_chain))
80 break;
81 if (__get_user(ip, &sf->gprs[8]))
82 break;
83 if (ip & 0x1) {
84 /*
85 * If the instruction address is invalid, and this
86 * is the first stack frame, assume r14 has not
87 * been written to the stack yet. Otherwise exit.
88 */
89 if (first && !(regs->gprs[14] & 0x1))
90 ip = regs->gprs[14];
91 else
92 break;
93 }
94 if (!consume_entry(cookie, ip))
95 break;
96 /* Sanity check: ABI requires SP to be aligned 8 bytes. */
97 if (!sp || sp & 0x7)
98 break;
99 sf = (void __user *)sp;
100 first = false;
101 }
102 pagefault_enable();
103}
1/*
2 * Stack trace management functions
3 *
4 * Copyright IBM Corp. 2006
5 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
6 */
7
8#include <linux/sched.h>
9#include <linux/stacktrace.h>
10#include <linux/kallsyms.h>
11#include <linux/module.h>
12
13static unsigned long save_context_stack(struct stack_trace *trace,
14 unsigned long sp,
15 unsigned long low,
16 unsigned long high,
17 int savesched)
18{
19 struct stack_frame *sf;
20 struct pt_regs *regs;
21 unsigned long addr;
22
23 while(1) {
24 sp &= PSW_ADDR_INSN;
25 if (sp < low || sp > high)
26 return sp;
27 sf = (struct stack_frame *)sp;
28 while(1) {
29 addr = sf->gprs[8] & PSW_ADDR_INSN;
30 if (!trace->skip)
31 trace->entries[trace->nr_entries++] = addr;
32 else
33 trace->skip--;
34 if (trace->nr_entries >= trace->max_entries)
35 return sp;
36 low = sp;
37 sp = sf->back_chain & PSW_ADDR_INSN;
38 if (!sp)
39 break;
40 if (sp <= low || sp > high - sizeof(*sf))
41 return sp;
42 sf = (struct stack_frame *)sp;
43 }
44 /* Zero backchain detected, check for interrupt frame. */
45 sp = (unsigned long)(sf + 1);
46 if (sp <= low || sp > high - sizeof(*regs))
47 return sp;
48 regs = (struct pt_regs *)sp;
49 addr = regs->psw.addr & PSW_ADDR_INSN;
50 if (savesched || !in_sched_functions(addr)) {
51 if (!trace->skip)
52 trace->entries[trace->nr_entries++] = addr;
53 else
54 trace->skip--;
55 }
56 if (trace->nr_entries >= trace->max_entries)
57 return sp;
58 low = sp;
59 sp = regs->gprs[15];
60 }
61}
62
63void save_stack_trace(struct stack_trace *trace)
64{
65 register unsigned long sp asm ("15");
66 unsigned long orig_sp, new_sp;
67
68 orig_sp = sp & PSW_ADDR_INSN;
69 new_sp = save_context_stack(trace, orig_sp,
70 S390_lowcore.panic_stack - PAGE_SIZE,
71 S390_lowcore.panic_stack, 1);
72 if (new_sp != orig_sp)
73 return;
74 new_sp = save_context_stack(trace, new_sp,
75 S390_lowcore.async_stack - ASYNC_SIZE,
76 S390_lowcore.async_stack, 1);
77 if (new_sp != orig_sp)
78 return;
79 save_context_stack(trace, new_sp,
80 S390_lowcore.thread_info,
81 S390_lowcore.thread_info + THREAD_SIZE, 1);
82}
83EXPORT_SYMBOL_GPL(save_stack_trace);
84
85void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
86{
87 unsigned long sp, low, high;
88
89 sp = tsk->thread.ksp & PSW_ADDR_INSN;
90 low = (unsigned long) task_stack_page(tsk);
91 high = (unsigned long) task_pt_regs(tsk);
92 save_context_stack(trace, sp, low, high, 0);
93 if (trace->nr_entries < trace->max_entries)
94 trace->entries[trace->nr_entries++] = ULONG_MAX;
95}
96EXPORT_SYMBOL_GPL(save_stack_trace_tsk);