Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Stack trace management functions
4 *
5 * Copyright IBM Corp. 2006
6 */
7
8#include <linux/perf_event.h>
9#include <linux/stacktrace.h>
10#include <linux/uaccess.h>
11#include <linux/compat.h>
12#include <asm/stacktrace.h>
13#include <asm/unwind.h>
14#include <asm/kprobes.h>
15#include <asm/ptrace.h>
16
17void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
18 struct task_struct *task, struct pt_regs *regs)
19{
20 struct unwind_state state;
21 unsigned long addr;
22
23 unwind_for_each_frame(&state, task, regs, 0) {
24 addr = unwind_get_return_address(&state);
25 if (!addr || !consume_entry(cookie, addr))
26 break;
27 }
28}
29
30int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
31 void *cookie, struct task_struct *task)
32{
33 struct unwind_state state;
34 unsigned long addr;
35
36 unwind_for_each_frame(&state, task, NULL, 0) {
37 if (state.stack_info.type != STACK_TYPE_TASK)
38 return -EINVAL;
39
40 if (state.regs)
41 return -EINVAL;
42
43 addr = unwind_get_return_address(&state);
44 if (!addr)
45 return -EINVAL;
46
47#ifdef CONFIG_RETHOOK
48 /*
49 * Mark stacktraces with krethook functions on them
50 * as unreliable.
51 */
52 if (state.ip == (unsigned long)arch_rethook_trampoline)
53 return -EINVAL;
54#endif
55
56 if (!consume_entry(cookie, addr))
57 return -EINVAL;
58 }
59
60 /* Check for stack corruption */
61 if (unwind_error(&state))
62 return -EINVAL;
63 return 0;
64}
65
66static inline bool store_ip(stack_trace_consume_fn consume_entry, void *cookie,
67 struct perf_callchain_entry_ctx *entry, bool perf,
68 unsigned long ip)
69{
70#ifdef CONFIG_PERF_EVENTS
71 if (perf) {
72 if (perf_callchain_store(entry, ip))
73 return false;
74 return true;
75 }
76#endif
77 return consume_entry(cookie, ip);
78}
79
80static inline bool ip_invalid(unsigned long ip)
81{
82 /*
83 * Perform some basic checks if an instruction address taken
84 * from unreliable source is invalid.
85 */
86 if (ip & 1)
87 return true;
88 if (ip < mmap_min_addr)
89 return true;
90 if (ip >= current->mm->context.asce_limit)
91 return true;
92 return false;
93}
94
95static inline bool ip_within_vdso(unsigned long ip)
96{
97 return in_range(ip, current->mm->context.vdso_base, vdso_text_size());
98}
99
100void arch_stack_walk_user_common(stack_trace_consume_fn consume_entry, void *cookie,
101 struct perf_callchain_entry_ctx *entry,
102 const struct pt_regs *regs, bool perf)
103{
104 struct stack_frame_vdso_wrapper __user *sf_vdso;
105 struct stack_frame_user __user *sf;
106 unsigned long ip, sp;
107 bool first = true;
108
109 if (is_compat_task())
110 return;
111 if (!current->mm)
112 return;
113 ip = instruction_pointer(regs);
114 if (!store_ip(consume_entry, cookie, entry, perf, ip))
115 return;
116 sf = (void __user *)user_stack_pointer(regs);
117 pagefault_disable();
118 while (1) {
119 if (__get_user(sp, &sf->back_chain))
120 break;
121 /*
122 * VDSO entry code has a non-standard stack frame layout.
123 * See VDSO user wrapper code for details.
124 */
125 if (!sp && ip_within_vdso(ip)) {
126 sf_vdso = (void __user *)sf;
127 if (__get_user(ip, &sf_vdso->return_address))
128 break;
129 sp = (unsigned long)sf + STACK_FRAME_VDSO_OVERHEAD;
130 sf = (void __user *)sp;
131 if (__get_user(sp, &sf->back_chain))
132 break;
133 } else {
134 sf = (void __user *)sp;
135 if (__get_user(ip, &sf->gprs[8]))
136 break;
137 }
138 /* Sanity check: ABI requires SP to be 8 byte aligned. */
139 if (sp & 0x7)
140 break;
141 if (ip_invalid(ip)) {
142 /*
143 * If the instruction address is invalid, and this
144 * is the first stack frame, assume r14 has not
145 * been written to the stack yet. Otherwise exit.
146 */
147 if (!first)
148 break;
149 ip = regs->gprs[14];
150 if (ip_invalid(ip))
151 break;
152 }
153 if (!store_ip(consume_entry, cookie, entry, perf, ip))
154 break;
155 first = false;
156 }
157 pagefault_enable();
158}
159
160void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
161 const struct pt_regs *regs)
162{
163 arch_stack_walk_user_common(consume_entry, cookie, NULL, regs, false);
164}
1/*
2 * arch/s390/kernel/stacktrace.c
3 *
4 * Stack trace management functions
5 *
6 * Copyright (C) IBM Corp. 2006
7 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
8 */
9
10#include <linux/sched.h>
11#include <linux/stacktrace.h>
12#include <linux/kallsyms.h>
13#include <linux/module.h>
14
15static unsigned long save_context_stack(struct stack_trace *trace,
16 unsigned long sp,
17 unsigned long low,
18 unsigned long high,
19 int savesched)
20{
21 struct stack_frame *sf;
22 struct pt_regs *regs;
23 unsigned long addr;
24
25 while(1) {
26 sp &= PSW_ADDR_INSN;
27 if (sp < low || sp > high)
28 return sp;
29 sf = (struct stack_frame *)sp;
30 while(1) {
31 addr = sf->gprs[8] & PSW_ADDR_INSN;
32 if (!trace->skip)
33 trace->entries[trace->nr_entries++] = addr;
34 else
35 trace->skip--;
36 if (trace->nr_entries >= trace->max_entries)
37 return sp;
38 low = sp;
39 sp = sf->back_chain & PSW_ADDR_INSN;
40 if (!sp)
41 break;
42 if (sp <= low || sp > high - sizeof(*sf))
43 return sp;
44 sf = (struct stack_frame *)sp;
45 }
46 /* Zero backchain detected, check for interrupt frame. */
47 sp = (unsigned long)(sf + 1);
48 if (sp <= low || sp > high - sizeof(*regs))
49 return sp;
50 regs = (struct pt_regs *)sp;
51 addr = regs->psw.addr & PSW_ADDR_INSN;
52 if (savesched || !in_sched_functions(addr)) {
53 if (!trace->skip)
54 trace->entries[trace->nr_entries++] = addr;
55 else
56 trace->skip--;
57 }
58 if (trace->nr_entries >= trace->max_entries)
59 return sp;
60 low = sp;
61 sp = regs->gprs[15];
62 }
63}
64
65void save_stack_trace(struct stack_trace *trace)
66{
67 register unsigned long sp asm ("15");
68 unsigned long orig_sp, new_sp;
69
70 orig_sp = sp & PSW_ADDR_INSN;
71 new_sp = save_context_stack(trace, orig_sp,
72 S390_lowcore.panic_stack - PAGE_SIZE,
73 S390_lowcore.panic_stack, 1);
74 if (new_sp != orig_sp)
75 return;
76 new_sp = save_context_stack(trace, new_sp,
77 S390_lowcore.async_stack - ASYNC_SIZE,
78 S390_lowcore.async_stack, 1);
79 if (new_sp != orig_sp)
80 return;
81 save_context_stack(trace, new_sp,
82 S390_lowcore.thread_info,
83 S390_lowcore.thread_info + THREAD_SIZE, 1);
84}
85EXPORT_SYMBOL_GPL(save_stack_trace);
86
87void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
88{
89 unsigned long sp, low, high;
90
91 sp = tsk->thread.ksp & PSW_ADDR_INSN;
92 low = (unsigned long) task_stack_page(tsk);
93 high = (unsigned long) task_pt_regs(tsk);
94 save_context_stack(trace, sp, low, high, 0);
95 if (trace->nr_entries < trace->max_entries)
96 trace->entries[trace->nr_entries++] = ULONG_MAX;
97}
98EXPORT_SYMBOL_GPL(save_stack_trace_tsk);