Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Stack trace management functions
4 *
5 * Copyright IBM Corp. 2006
6 */
7
8#include <linux/stacktrace.h>
9#include <linux/uaccess.h>
10#include <linux/compat.h>
11#include <asm/stacktrace.h>
12#include <asm/unwind.h>
13#include <asm/kprobes.h>
14#include <asm/ptrace.h>
15
16void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
17 struct task_struct *task, struct pt_regs *regs)
18{
19 struct unwind_state state;
20 unsigned long addr;
21
22 unwind_for_each_frame(&state, task, regs, 0) {
23 addr = unwind_get_return_address(&state);
24 if (!addr || !consume_entry(cookie, addr))
25 break;
26 }
27}
28
29int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
30 void *cookie, struct task_struct *task)
31{
32 struct unwind_state state;
33 unsigned long addr;
34
35 unwind_for_each_frame(&state, task, NULL, 0) {
36 if (state.stack_info.type != STACK_TYPE_TASK)
37 return -EINVAL;
38
39 if (state.regs)
40 return -EINVAL;
41
42 addr = unwind_get_return_address(&state);
43 if (!addr)
44 return -EINVAL;
45
46#ifdef CONFIG_RETHOOK
47 /*
48 * Mark stacktraces with krethook functions on them
49 * as unreliable.
50 */
51 if (state.ip == (unsigned long)arch_rethook_trampoline)
52 return -EINVAL;
53#endif
54
55 if (!consume_entry(cookie, addr))
56 return -EINVAL;
57 }
58
59 /* Check for stack corruption */
60 if (unwind_error(&state))
61 return -EINVAL;
62 return 0;
63}
64
65void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
66 const struct pt_regs *regs)
67{
68 struct stack_frame_user __user *sf;
69 unsigned long ip, sp;
70 bool first = true;
71
72 if (is_compat_task())
73 return;
74 if (!consume_entry(cookie, instruction_pointer(regs)))
75 return;
76 sf = (void __user *)user_stack_pointer(regs);
77 pagefault_disable();
78 while (1) {
79 if (__get_user(sp, &sf->back_chain))
80 break;
81 if (__get_user(ip, &sf->gprs[8]))
82 break;
83 if (ip & 0x1) {
84 /*
85 * If the instruction address is invalid, and this
86 * is the first stack frame, assume r14 has not
87 * been written to the stack yet. Otherwise exit.
88 */
89 if (first && !(regs->gprs[14] & 0x1))
90 ip = regs->gprs[14];
91 else
92 break;
93 }
94 if (!consume_entry(cookie, ip))
95 break;
96 /* Sanity check: ABI requires SP to be aligned 8 bytes. */
97 if (!sp || sp & 0x7)
98 break;
99 sf = (void __user *)sp;
100 first = false;
101 }
102 pagefault_enable();
103}
1/*
2 * arch/s390/kernel/stacktrace.c
3 *
4 * Stack trace management functions
5 *
6 * Copyright (C) IBM Corp. 2006
7 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
8 */
9
10#include <linux/sched.h>
11#include <linux/stacktrace.h>
12#include <linux/kallsyms.h>
13#include <linux/module.h>
14
15static unsigned long save_context_stack(struct stack_trace *trace,
16 unsigned long sp,
17 unsigned long low,
18 unsigned long high,
19 int savesched)
20{
21 struct stack_frame *sf;
22 struct pt_regs *regs;
23 unsigned long addr;
24
25 while(1) {
26 sp &= PSW_ADDR_INSN;
27 if (sp < low || sp > high)
28 return sp;
29 sf = (struct stack_frame *)sp;
30 while(1) {
31 addr = sf->gprs[8] & PSW_ADDR_INSN;
32 if (!trace->skip)
33 trace->entries[trace->nr_entries++] = addr;
34 else
35 trace->skip--;
36 if (trace->nr_entries >= trace->max_entries)
37 return sp;
38 low = sp;
39 sp = sf->back_chain & PSW_ADDR_INSN;
40 if (!sp)
41 break;
42 if (sp <= low || sp > high - sizeof(*sf))
43 return sp;
44 sf = (struct stack_frame *)sp;
45 }
46 /* Zero backchain detected, check for interrupt frame. */
47 sp = (unsigned long)(sf + 1);
48 if (sp <= low || sp > high - sizeof(*regs))
49 return sp;
50 regs = (struct pt_regs *)sp;
51 addr = regs->psw.addr & PSW_ADDR_INSN;
52 if (savesched || !in_sched_functions(addr)) {
53 if (!trace->skip)
54 trace->entries[trace->nr_entries++] = addr;
55 else
56 trace->skip--;
57 }
58 if (trace->nr_entries >= trace->max_entries)
59 return sp;
60 low = sp;
61 sp = regs->gprs[15];
62 }
63}
64
65void save_stack_trace(struct stack_trace *trace)
66{
67 register unsigned long sp asm ("15");
68 unsigned long orig_sp, new_sp;
69
70 orig_sp = sp & PSW_ADDR_INSN;
71 new_sp = save_context_stack(trace, orig_sp,
72 S390_lowcore.panic_stack - PAGE_SIZE,
73 S390_lowcore.panic_stack, 1);
74 if (new_sp != orig_sp)
75 return;
76 new_sp = save_context_stack(trace, new_sp,
77 S390_lowcore.async_stack - ASYNC_SIZE,
78 S390_lowcore.async_stack, 1);
79 if (new_sp != orig_sp)
80 return;
81 save_context_stack(trace, new_sp,
82 S390_lowcore.thread_info,
83 S390_lowcore.thread_info + THREAD_SIZE, 1);
84}
85EXPORT_SYMBOL_GPL(save_stack_trace);
86
87void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
88{
89 unsigned long sp, low, high;
90
91 sp = tsk->thread.ksp & PSW_ADDR_INSN;
92 low = (unsigned long) task_stack_page(tsk);
93 high = (unsigned long) task_pt_regs(tsk);
94 save_context_stack(trace, sp, low, high, 0);
95 if (trace->nr_entries < trace->max_entries)
96 trace->entries[trace->nr_entries++] = ULONG_MAX;
97}
98EXPORT_SYMBOL_GPL(save_stack_trace_tsk);