Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2#include <linux/sched.h>
3#include <linux/sched/task.h>
4#include <linux/sched/task_stack.h>
5#include <linux/interrupt.h>
6#include <asm/sections.h>
7#include <asm/ptrace.h>
8#include <asm/bitops.h>
9#include <asm/stacktrace.h>
10#include <asm/unwind.h>
11
12unsigned long unwind_get_return_address(struct unwind_state *state)
13{
14 if (unwind_done(state))
15 return 0;
16 return __kernel_text_address(state->ip) ? state->ip : 0;
17}
18EXPORT_SYMBOL_GPL(unwind_get_return_address);
19
20static bool outside_of_stack(struct unwind_state *state, unsigned long sp)
21{
22 return (sp <= state->sp) ||
23 (sp > state->stack_info.end - sizeof(struct stack_frame));
24}
25
26static bool update_stack_info(struct unwind_state *state, unsigned long sp)
27{
28 struct stack_info *info = &state->stack_info;
29 unsigned long *mask = &state->stack_mask;
30
31 /* New stack pointer leaves the current stack */
32 if (get_stack_info(sp, state->task, info, mask) != 0 ||
33 !on_stack(info, sp, sizeof(struct stack_frame)))
34 /* 'sp' does not point to a valid stack */
35 return false;
36 return true;
37}
38
39static inline bool is_final_pt_regs(struct unwind_state *state,
40 struct pt_regs *regs)
41{
42 /* user mode or kernel thread pt_regs at the bottom of task stack */
43 if (task_pt_regs(state->task) == regs)
44 return true;
45
46 /* user mode pt_regs at the bottom of irq stack */
47 return state->stack_info.type == STACK_TYPE_IRQ &&
48 state->stack_info.end - sizeof(struct pt_regs) == (unsigned long)regs &&
49 READ_ONCE_NOCHECK(regs->psw.mask) & PSW_MASK_PSTATE;
50}
51
52bool unwind_next_frame(struct unwind_state *state)
53{
54 struct stack_info *info = &state->stack_info;
55 struct stack_frame *sf;
56 struct pt_regs *regs;
57 unsigned long sp, ip;
58 bool reliable;
59
60 regs = state->regs;
61 if (unlikely(regs)) {
62 sp = state->sp;
63 sf = (struct stack_frame *) sp;
64 ip = READ_ONCE_NOCHECK(sf->gprs[8]);
65 reliable = false;
66 regs = NULL;
67 /* skip bogus %r14 or if is the same as regs->psw.addr */
68 if (!__kernel_text_address(ip) || state->ip == unwind_recover_ret_addr(state, ip)) {
69 state->regs = NULL;
70 return unwind_next_frame(state);
71 }
72 } else {
73 sf = (struct stack_frame *) state->sp;
74 sp = READ_ONCE_NOCHECK(sf->back_chain);
75 if (likely(sp)) {
76 /* Non-zero back-chain points to the previous frame */
77 if (unlikely(outside_of_stack(state, sp))) {
78 if (!update_stack_info(state, sp))
79 goto out_err;
80 }
81 sf = (struct stack_frame *) sp;
82 ip = READ_ONCE_NOCHECK(sf->gprs[8]);
83 reliable = true;
84 } else {
85 /* No back-chain, look for a pt_regs structure */
86 sp = state->sp + STACK_FRAME_OVERHEAD;
87 if (!on_stack(info, sp, sizeof(struct pt_regs)))
88 goto out_err;
89 regs = (struct pt_regs *) sp;
90 if (is_final_pt_regs(state, regs))
91 goto out_stop;
92 ip = READ_ONCE_NOCHECK(regs->psw.addr);
93 sp = READ_ONCE_NOCHECK(regs->gprs[15]);
94 if (unlikely(outside_of_stack(state, sp))) {
95 if (!update_stack_info(state, sp))
96 goto out_err;
97 }
98 reliable = true;
99 }
100 }
101
102 /* Sanity check: ABI requires SP to be aligned 8 bytes. */
103 if (sp & 0x7)
104 goto out_err;
105
106 /* Update unwind state */
107 state->sp = sp;
108 state->regs = regs;
109 state->reliable = reliable;
110 state->ip = unwind_recover_ret_addr(state, ip);
111 return true;
112
113out_err:
114 state->error = true;
115out_stop:
116 state->stack_info.type = STACK_TYPE_UNKNOWN;
117 return false;
118}
119EXPORT_SYMBOL_GPL(unwind_next_frame);
120
121void __unwind_start(struct unwind_state *state, struct task_struct *task,
122 struct pt_regs *regs, unsigned long first_frame)
123{
124 struct stack_info *info = &state->stack_info;
125 struct stack_frame *sf;
126 unsigned long ip, sp;
127
128 memset(state, 0, sizeof(*state));
129 state->task = task;
130 state->regs = regs;
131
132 /* Don't even attempt to start from user mode regs: */
133 if (regs && user_mode(regs)) {
134 info->type = STACK_TYPE_UNKNOWN;
135 return;
136 }
137
138 /* Get the instruction pointer from pt_regs or the stack frame */
139 if (regs) {
140 ip = regs->psw.addr;
141 sp = regs->gprs[15];
142 } else if (task == current) {
143 sp = current_frame_address();
144 } else {
145 sp = task->thread.ksp;
146 }
147
148 /* Get current stack pointer and initialize stack info */
149 if (!update_stack_info(state, sp)) {
150 /* Something is wrong with the stack pointer */
151 info->type = STACK_TYPE_UNKNOWN;
152 state->error = true;
153 return;
154 }
155
156 if (!regs) {
157 /* Stack frame is within valid stack */
158 sf = (struct stack_frame *)sp;
159 ip = READ_ONCE_NOCHECK(sf->gprs[8]);
160 }
161
162 /* Update unwind state */
163 state->sp = sp;
164 state->reliable = true;
165 state->ip = unwind_recover_ret_addr(state, ip);
166
167 if (!first_frame)
168 return;
169 /* Skip through the call chain to the specified starting frame */
170 while (!unwind_done(state)) {
171 if (on_stack(&state->stack_info, first_frame, sizeof(struct stack_frame))) {
172 if (state->sp >= first_frame)
173 break;
174 }
175 unwind_next_frame(state);
176 }
177}
178EXPORT_SYMBOL_GPL(__unwind_start);
1/* SPDX-License-Identifier: GPL-2.0 */
2#include <linux/sched.h>
3#include <linux/sched/task.h>
4#include <linux/sched/task_stack.h>
5#include <linux/interrupt.h>
6#include <asm/sections.h>
7#include <asm/ptrace.h>
8#include <asm/bitops.h>
9#include <asm/stacktrace.h>
10#include <asm/unwind.h>
11
12unsigned long unwind_get_return_address(struct unwind_state *state)
13{
14 if (unwind_done(state))
15 return 0;
16 return __kernel_text_address(state->ip) ? state->ip : 0;
17}
18EXPORT_SYMBOL_GPL(unwind_get_return_address);
19
20static bool outside_of_stack(struct unwind_state *state, unsigned long sp)
21{
22 return (sp <= state->sp) ||
23 (sp > state->stack_info.end - sizeof(struct stack_frame));
24}
25
26static bool update_stack_info(struct unwind_state *state, unsigned long sp)
27{
28 struct stack_info *info = &state->stack_info;
29 unsigned long *mask = &state->stack_mask;
30
31 /* New stack pointer leaves the current stack */
32 if (get_stack_info(sp, state->task, info, mask) != 0 ||
33 !on_stack(info, sp, sizeof(struct stack_frame)))
34 /* 'sp' does not point to a valid stack */
35 return false;
36 return true;
37}
38
39bool unwind_next_frame(struct unwind_state *state)
40{
41 struct stack_info *info = &state->stack_info;
42 struct stack_frame *sf;
43 struct pt_regs *regs;
44 unsigned long sp, ip;
45 bool reliable;
46
47 regs = state->regs;
48 if (unlikely(regs)) {
49 if (state->reuse_sp) {
50 sp = state->sp;
51 state->reuse_sp = false;
52 } else {
53 sp = READ_ONCE_NOCHECK(regs->gprs[15]);
54 if (unlikely(outside_of_stack(state, sp))) {
55 if (!update_stack_info(state, sp))
56 goto out_err;
57 }
58 }
59 sf = (struct stack_frame *) sp;
60 ip = READ_ONCE_NOCHECK(sf->gprs[8]);
61 reliable = false;
62 regs = NULL;
63 } else {
64 sf = (struct stack_frame *) state->sp;
65 sp = READ_ONCE_NOCHECK(sf->back_chain);
66 if (likely(sp)) {
67 /* Non-zero back-chain points to the previous frame */
68 if (unlikely(outside_of_stack(state, sp))) {
69 if (!update_stack_info(state, sp))
70 goto out_err;
71 }
72 sf = (struct stack_frame *) sp;
73 ip = READ_ONCE_NOCHECK(sf->gprs[8]);
74 reliable = true;
75 } else {
76 /* No back-chain, look for a pt_regs structure */
77 sp = state->sp + STACK_FRAME_OVERHEAD;
78 if (!on_stack(info, sp, sizeof(struct pt_regs)))
79 goto out_stop;
80 regs = (struct pt_regs *) sp;
81 if (READ_ONCE_NOCHECK(regs->psw.mask) & PSW_MASK_PSTATE)
82 goto out_stop;
83 ip = READ_ONCE_NOCHECK(regs->psw.addr);
84 reliable = true;
85 }
86 }
87
88#ifdef CONFIG_FUNCTION_GRAPH_TRACER
89 /* Decode any ftrace redirection */
90 if (ip == (unsigned long) return_to_handler)
91 ip = ftrace_graph_ret_addr(state->task, &state->graph_idx,
92 ip, (void *) sp);
93#endif
94
95 /* Update unwind state */
96 state->sp = sp;
97 state->ip = ip;
98 state->regs = regs;
99 state->reliable = reliable;
100 return true;
101
102out_err:
103 state->error = true;
104out_stop:
105 state->stack_info.type = STACK_TYPE_UNKNOWN;
106 return false;
107}
108EXPORT_SYMBOL_GPL(unwind_next_frame);
109
110void __unwind_start(struct unwind_state *state, struct task_struct *task,
111 struct pt_regs *regs, unsigned long sp)
112{
113 struct stack_info *info = &state->stack_info;
114 unsigned long *mask = &state->stack_mask;
115 bool reliable, reuse_sp;
116 struct stack_frame *sf;
117 unsigned long ip;
118
119 memset(state, 0, sizeof(*state));
120 state->task = task;
121 state->regs = regs;
122
123 /* Don't even attempt to start from user mode regs: */
124 if (regs && user_mode(regs)) {
125 info->type = STACK_TYPE_UNKNOWN;
126 return;
127 }
128
129 /* Get current stack pointer and initialize stack info */
130 if (get_stack_info(sp, task, info, mask) != 0 ||
131 !on_stack(info, sp, sizeof(struct stack_frame))) {
132 /* Something is wrong with the stack pointer */
133 info->type = STACK_TYPE_UNKNOWN;
134 state->error = true;
135 return;
136 }
137
138 /* Get the instruction pointer from pt_regs or the stack frame */
139 if (regs) {
140 ip = READ_ONCE_NOCHECK(regs->psw.addr);
141 reliable = true;
142 reuse_sp = true;
143 } else {
144 sf = (struct stack_frame *) sp;
145 ip = READ_ONCE_NOCHECK(sf->gprs[8]);
146 reliable = false;
147 reuse_sp = false;
148 }
149
150#ifdef CONFIG_FUNCTION_GRAPH_TRACER
151 /* Decode any ftrace redirection */
152 if (ip == (unsigned long) return_to_handler)
153 ip = ftrace_graph_ret_addr(state->task, &state->graph_idx,
154 ip, NULL);
155#endif
156
157 /* Update unwind state */
158 state->sp = sp;
159 state->ip = ip;
160 state->reliable = reliable;
161 state->reuse_sp = reuse_sp;
162}
163EXPORT_SYMBOL_GPL(__unwind_start);