Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_S390_STACKTRACE_H
3#define _ASM_S390_STACKTRACE_H
4
5#include <linux/uaccess.h>
6#include <linux/ptrace.h>
7#include <asm/switch_to.h>
8
9enum stack_type {
10 STACK_TYPE_UNKNOWN,
11 STACK_TYPE_TASK,
12 STACK_TYPE_IRQ,
13 STACK_TYPE_NODAT,
14 STACK_TYPE_RESTART,
15 STACK_TYPE_MCCK,
16};
17
18struct stack_info {
19 enum stack_type type;
20 unsigned long begin, end;
21};
22
23const char *stack_type_name(enum stack_type type);
24int get_stack_info(unsigned long sp, struct task_struct *task,
25 struct stack_info *info, unsigned long *visit_mask);
26
27static inline bool on_stack(struct stack_info *info,
28 unsigned long addr, size_t len)
29{
30 if (info->type == STACK_TYPE_UNKNOWN)
31 return false;
32 if (addr + len < addr)
33 return false;
34 return addr >= info->begin && addr + len <= info->end;
35}
36
37/*
38 * Stack layout of a C stack frame.
39 * Kernel uses the packed stack layout (-mpacked-stack).
40 */
41struct stack_frame {
42 union {
43 unsigned long empty[9];
44 struct {
45 unsigned long sie_control_block;
46 unsigned long sie_savearea;
47 unsigned long sie_reason;
48 unsigned long sie_flags;
49 unsigned long sie_control_block_phys;
50 };
51 };
52 unsigned long gprs[10];
53 unsigned long back_chain;
54};
55
56/*
57 * Unlike current_stack_pointer which simply contains the current value of %r15
58 * current_frame_address() returns function stack frame address, which matches
59 * %r15 upon function invocation. It may differ from %r15 later if function
60 * allocates stack for local variables or new stack frame to call other
61 * functions.
62 */
63#define current_frame_address() \
64 ((unsigned long)__builtin_frame_address(0) - \
65 offsetof(struct stack_frame, back_chain))
66
67static __always_inline unsigned long get_stack_pointer(struct task_struct *task,
68 struct pt_regs *regs)
69{
70 if (regs)
71 return (unsigned long)kernel_stack_pointer(regs);
72 if (task == current)
73 return current_frame_address();
74 return (unsigned long)task->thread.ksp;
75}
76
77/*
78 * To keep this simple mark register 2-6 as being changed (volatile)
79 * by the called function, even though register 6 is saved/nonvolatile.
80 */
81#define CALL_FMT_0 "=&d" (r2)
82#define CALL_FMT_1 "+&d" (r2)
83#define CALL_FMT_2 CALL_FMT_1, "+&d" (r3)
84#define CALL_FMT_3 CALL_FMT_2, "+&d" (r4)
85#define CALL_FMT_4 CALL_FMT_3, "+&d" (r5)
86#define CALL_FMT_5 CALL_FMT_4, "+&d" (r6)
87
88#define CALL_CLOBBER_5 "0", "1", "14", "cc", "memory"
89#define CALL_CLOBBER_4 CALL_CLOBBER_5
90#define CALL_CLOBBER_3 CALL_CLOBBER_4, "5"
91#define CALL_CLOBBER_2 CALL_CLOBBER_3, "4"
92#define CALL_CLOBBER_1 CALL_CLOBBER_2, "3"
93#define CALL_CLOBBER_0 CALL_CLOBBER_1
94
95#define CALL_LARGS_0(...) \
96 long dummy = 0
97#define CALL_LARGS_1(t1, a1) \
98 long arg1 = (long)(t1)(a1)
99#define CALL_LARGS_2(t1, a1, t2, a2) \
100 CALL_LARGS_1(t1, a1); \
101 long arg2 = (long)(t2)(a2)
102#define CALL_LARGS_3(t1, a1, t2, a2, t3, a3) \
103 CALL_LARGS_2(t1, a1, t2, a2); \
104 long arg3 = (long)(t3)(a3)
105#define CALL_LARGS_4(t1, a1, t2, a2, t3, a3, t4, a4) \
106 CALL_LARGS_3(t1, a1, t2, a2, t3, a3); \
107 long arg4 = (long)(t4)(a4)
108#define CALL_LARGS_5(t1, a1, t2, a2, t3, a3, t4, a4, t5, a5) \
109 CALL_LARGS_4(t1, a1, t2, a2, t3, a3, t4, a4); \
110 long arg5 = (long)(t5)(a5)
111
112#define CALL_REGS_0 \
113 register long r2 asm("2") = dummy
114#define CALL_REGS_1 \
115 register long r2 asm("2") = arg1
116#define CALL_REGS_2 \
117 CALL_REGS_1; \
118 register long r3 asm("3") = arg2
119#define CALL_REGS_3 \
120 CALL_REGS_2; \
121 register long r4 asm("4") = arg3
122#define CALL_REGS_4 \
123 CALL_REGS_3; \
124 register long r5 asm("5") = arg4
125#define CALL_REGS_5 \
126 CALL_REGS_4; \
127 register long r6 asm("6") = arg5
128
129#define CALL_TYPECHECK_0(...)
130#define CALL_TYPECHECK_1(t, a, ...) \
131 typecheck(t, a)
132#define CALL_TYPECHECK_2(t, a, ...) \
133 CALL_TYPECHECK_1(__VA_ARGS__); \
134 typecheck(t, a)
135#define CALL_TYPECHECK_3(t, a, ...) \
136 CALL_TYPECHECK_2(__VA_ARGS__); \
137 typecheck(t, a)
138#define CALL_TYPECHECK_4(t, a, ...) \
139 CALL_TYPECHECK_3(__VA_ARGS__); \
140 typecheck(t, a)
141#define CALL_TYPECHECK_5(t, a, ...) \
142 CALL_TYPECHECK_4(__VA_ARGS__); \
143 typecheck(t, a)
144
145#define CALL_PARM_0(...) void
146#define CALL_PARM_1(t, a, ...) t
147#define CALL_PARM_2(t, a, ...) t, CALL_PARM_1(__VA_ARGS__)
148#define CALL_PARM_3(t, a, ...) t, CALL_PARM_2(__VA_ARGS__)
149#define CALL_PARM_4(t, a, ...) t, CALL_PARM_3(__VA_ARGS__)
150#define CALL_PARM_5(t, a, ...) t, CALL_PARM_4(__VA_ARGS__)
151#define CALL_PARM_6(t, a, ...) t, CALL_PARM_5(__VA_ARGS__)
152
153/*
154 * Use call_on_stack() to call a function switching to a specified
155 * stack. Proper sign and zero extension of function arguments is
156 * done. Usage:
157 *
158 * rc = call_on_stack(nr, stack, rettype, fn, t1, a1, t2, a2, ...)
159 *
160 * - nr specifies the number of function arguments of fn.
161 * - stack specifies the stack to be used.
162 * - fn is the function to be called.
163 * - rettype is the return type of fn.
164 * - t1, a1, ... are pairs, where t1 must match the type of the first
165 * argument of fn, t2 the second, etc. a1 is the corresponding
166 * first function argument (not name), etc.
167 */
168#define call_on_stack(nr, stack, rettype, fn, ...) \
169({ \
170 rettype (*__fn)(CALL_PARM_##nr(__VA_ARGS__)) = fn; \
171 unsigned long frame = current_frame_address(); \
172 unsigned long __stack = stack; \
173 unsigned long prev; \
174 CALL_LARGS_##nr(__VA_ARGS__); \
175 CALL_REGS_##nr; \
176 \
177 CALL_TYPECHECK_##nr(__VA_ARGS__); \
178 asm volatile( \
179 " lgr %[_prev],15\n" \
180 " lg 15,%[_stack]\n" \
181 " stg %[_frame],%[_bc](15)\n" \
182 " brasl 14,%[_fn]\n" \
183 " lgr 15,%[_prev]\n" \
184 : [_prev] "=&d" (prev), CALL_FMT_##nr \
185 : [_stack] "R" (__stack), \
186 [_bc] "i" (offsetof(struct stack_frame, back_chain)), \
187 [_frame] "d" (frame), \
188 [_fn] "X" (__fn) : CALL_CLOBBER_##nr); \
189 (rettype)r2; \
190})
191
192#define call_on_stack_noreturn(fn, stack) \
193({ \
194 void (*__fn)(void) = fn; \
195 \
196 asm volatile( \
197 " la 15,0(%[_stack])\n" \
198 " xc %[_bc](8,15),%[_bc](15)\n" \
199 " brasl 14,%[_fn]\n" \
200 ::[_bc] "i" (offsetof(struct stack_frame, back_chain)), \
201 [_stack] "a" (stack), [_fn] "X" (__fn)); \
202 BUG(); \
203})
204
205#endif /* _ASM_S390_STACKTRACE_H */
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_S390_STACKTRACE_H
3#define _ASM_S390_STACKTRACE_H
4
5#include <linux/stacktrace.h>
6#include <linux/uaccess.h>
7#include <linux/ptrace.h>
8
9struct stack_frame_user {
10 unsigned long back_chain;
11 unsigned long empty1[5];
12 unsigned long gprs[10];
13 unsigned long empty2[4];
14};
15
16struct stack_frame_vdso_wrapper {
17 struct stack_frame_user sf;
18 unsigned long return_address;
19};
20
21struct perf_callchain_entry_ctx;
22
23void arch_stack_walk_user_common(stack_trace_consume_fn consume_entry, void *cookie,
24 struct perf_callchain_entry_ctx *entry,
25 const struct pt_regs *regs, bool perf);
26
27enum stack_type {
28 STACK_TYPE_UNKNOWN,
29 STACK_TYPE_TASK,
30 STACK_TYPE_IRQ,
31 STACK_TYPE_NODAT,
32 STACK_TYPE_RESTART,
33 STACK_TYPE_MCCK,
34};
35
36struct stack_info {
37 enum stack_type type;
38 unsigned long begin, end;
39};
40
41const char *stack_type_name(enum stack_type type);
42int get_stack_info(unsigned long sp, struct task_struct *task,
43 struct stack_info *info, unsigned long *visit_mask);
44
45static inline bool on_stack(struct stack_info *info,
46 unsigned long addr, size_t len)
47{
48 if (info->type == STACK_TYPE_UNKNOWN)
49 return false;
50 if (addr + len < addr)
51 return false;
52 return addr >= info->begin && addr + len <= info->end;
53}
54
55/*
56 * Stack layout of a C stack frame.
57 * Kernel uses the packed stack layout (-mpacked-stack).
58 */
59struct stack_frame {
60 union {
61 unsigned long empty[9];
62 struct {
63 unsigned long sie_control_block;
64 unsigned long sie_savearea;
65 unsigned long sie_reason;
66 unsigned long sie_flags;
67 unsigned long sie_control_block_phys;
68 unsigned long sie_guest_asce;
69 };
70 };
71 unsigned long gprs[10];
72 unsigned long back_chain;
73};
74
75/*
76 * Unlike current_stack_pointer which simply contains the current value of %r15
77 * current_frame_address() returns function stack frame address, which matches
78 * %r15 upon function invocation. It may differ from %r15 later if function
79 * allocates stack for local variables or new stack frame to call other
80 * functions.
81 */
82#define current_frame_address() \
83 ((unsigned long)__builtin_frame_address(0) - \
84 offsetof(struct stack_frame, back_chain))
85
86static __always_inline unsigned long get_stack_pointer(struct task_struct *task,
87 struct pt_regs *regs)
88{
89 if (regs)
90 return (unsigned long)kernel_stack_pointer(regs);
91 if (task == current)
92 return current_frame_address();
93 return (unsigned long)task->thread.ksp;
94}
95
96/*
97 * To keep this simple mark register 2-6 as being changed (volatile)
98 * by the called function, even though register 6 is saved/nonvolatile.
99 */
100#define CALL_FMT_0 "=&d" (r2)
101#define CALL_FMT_1 "+&d" (r2)
102#define CALL_FMT_2 CALL_FMT_1, "+&d" (r3)
103#define CALL_FMT_3 CALL_FMT_2, "+&d" (r4)
104#define CALL_FMT_4 CALL_FMT_3, "+&d" (r5)
105#define CALL_FMT_5 CALL_FMT_4, "+&d" (r6)
106
107#define CALL_CLOBBER_5 "0", "1", "14", "cc", "memory"
108#define CALL_CLOBBER_4 CALL_CLOBBER_5
109#define CALL_CLOBBER_3 CALL_CLOBBER_4, "5"
110#define CALL_CLOBBER_2 CALL_CLOBBER_3, "4"
111#define CALL_CLOBBER_1 CALL_CLOBBER_2, "3"
112#define CALL_CLOBBER_0 CALL_CLOBBER_1
113
114#define CALL_LARGS_0(...) \
115 long dummy = 0
116#define CALL_LARGS_1(t1, a1) \
117 long arg1 = (long)(t1)(a1)
118#define CALL_LARGS_2(t1, a1, t2, a2) \
119 CALL_LARGS_1(t1, a1); \
120 long arg2 = (long)(t2)(a2)
121#define CALL_LARGS_3(t1, a1, t2, a2, t3, a3) \
122 CALL_LARGS_2(t1, a1, t2, a2); \
123 long arg3 = (long)(t3)(a3)
124#define CALL_LARGS_4(t1, a1, t2, a2, t3, a3, t4, a4) \
125 CALL_LARGS_3(t1, a1, t2, a2, t3, a3); \
126 long arg4 = (long)(t4)(a4)
127#define CALL_LARGS_5(t1, a1, t2, a2, t3, a3, t4, a4, t5, a5) \
128 CALL_LARGS_4(t1, a1, t2, a2, t3, a3, t4, a4); \
129 long arg5 = (long)(t5)(a5)
130
131#define CALL_REGS_0 \
132 register long r2 asm("2") = dummy
133#define CALL_REGS_1 \
134 register long r2 asm("2") = arg1
135#define CALL_REGS_2 \
136 CALL_REGS_1; \
137 register long r3 asm("3") = arg2
138#define CALL_REGS_3 \
139 CALL_REGS_2; \
140 register long r4 asm("4") = arg3
141#define CALL_REGS_4 \
142 CALL_REGS_3; \
143 register long r5 asm("5") = arg4
144#define CALL_REGS_5 \
145 CALL_REGS_4; \
146 register long r6 asm("6") = arg5
147
148#define CALL_TYPECHECK_0(...)
149#define CALL_TYPECHECK_1(t, a, ...) \
150 typecheck(t, a)
151#define CALL_TYPECHECK_2(t, a, ...) \
152 CALL_TYPECHECK_1(__VA_ARGS__); \
153 typecheck(t, a)
154#define CALL_TYPECHECK_3(t, a, ...) \
155 CALL_TYPECHECK_2(__VA_ARGS__); \
156 typecheck(t, a)
157#define CALL_TYPECHECK_4(t, a, ...) \
158 CALL_TYPECHECK_3(__VA_ARGS__); \
159 typecheck(t, a)
160#define CALL_TYPECHECK_5(t, a, ...) \
161 CALL_TYPECHECK_4(__VA_ARGS__); \
162 typecheck(t, a)
163
164#define CALL_PARM_0(...) void
165#define CALL_PARM_1(t, a, ...) t
166#define CALL_PARM_2(t, a, ...) t, CALL_PARM_1(__VA_ARGS__)
167#define CALL_PARM_3(t, a, ...) t, CALL_PARM_2(__VA_ARGS__)
168#define CALL_PARM_4(t, a, ...) t, CALL_PARM_3(__VA_ARGS__)
169#define CALL_PARM_5(t, a, ...) t, CALL_PARM_4(__VA_ARGS__)
170#define CALL_PARM_6(t, a, ...) t, CALL_PARM_5(__VA_ARGS__)
171
172/*
173 * Use call_on_stack() to call a function switching to a specified
174 * stack. Proper sign and zero extension of function arguments is
175 * done. Usage:
176 *
177 * rc = call_on_stack(nr, stack, rettype, fn, t1, a1, t2, a2, ...)
178 *
179 * - nr specifies the number of function arguments of fn.
180 * - stack specifies the stack to be used.
181 * - fn is the function to be called.
182 * - rettype is the return type of fn.
183 * - t1, a1, ... are pairs, where t1 must match the type of the first
184 * argument of fn, t2 the second, etc. a1 is the corresponding
185 * first function argument (not name), etc.
186 */
187#define call_on_stack(nr, stack, rettype, fn, ...) \
188({ \
189 rettype (*__fn)(CALL_PARM_##nr(__VA_ARGS__)) = fn; \
190 unsigned long frame = current_frame_address(); \
191 unsigned long __stack = stack; \
192 unsigned long prev; \
193 CALL_LARGS_##nr(__VA_ARGS__); \
194 CALL_REGS_##nr; \
195 \
196 CALL_TYPECHECK_##nr(__VA_ARGS__); \
197 asm volatile( \
198 " lgr %[_prev],15\n" \
199 " lg 15,%[_stack]\n" \
200 " stg %[_frame],%[_bc](15)\n" \
201 " brasl 14,%[_fn]\n" \
202 " lgr 15,%[_prev]\n" \
203 : [_prev] "=&d" (prev), CALL_FMT_##nr \
204 : [_stack] "R" (__stack), \
205 [_bc] "i" (offsetof(struct stack_frame, back_chain)), \
206 [_frame] "d" (frame), \
207 [_fn] "X" (__fn) : CALL_CLOBBER_##nr); \
208 (rettype)r2; \
209})
210
211/*
212 * Use call_nodat() to call a function with DAT disabled.
213 * Proper sign and zero extension of function arguments is done.
214 * Usage:
215 *
216 * rc = call_nodat(nr, rettype, fn, t1, a1, t2, a2, ...)
217 *
218 * - nr specifies the number of function arguments of fn.
219 * - fn is the function to be called, where fn is a physical address.
220 * - rettype is the return type of fn.
221 * - t1, a1, ... are pairs, where t1 must match the type of the first
222 * argument of fn, t2 the second, etc. a1 is the corresponding
223 * first function argument (not name), etc.
224 *
225 * fn() is called with standard C function call ABI, with the exception
226 * that no useful stackframe or stackpointer is passed via register 15.
227 * Therefore the called function must not use r15 to access the stack.
228 */
229#define call_nodat(nr, rettype, fn, ...) \
230({ \
231 rettype (*__fn)(CALL_PARM_##nr(__VA_ARGS__)) = (fn); \
232 /* aligned since psw_leave must not cross page boundary */ \
233 psw_t __aligned(16) psw_leave; \
234 psw_t psw_enter; \
235 CALL_LARGS_##nr(__VA_ARGS__); \
236 CALL_REGS_##nr; \
237 \
238 CALL_TYPECHECK_##nr(__VA_ARGS__); \
239 psw_enter.mask = PSW_KERNEL_BITS & ~PSW_MASK_DAT; \
240 psw_enter.addr = (unsigned long)__fn; \
241 asm volatile( \
242 " epsw 0,1\n" \
243 " risbg 1,0,0,31,32\n" \
244 " larl 7,1f\n" \
245 " stg 1,%[psw_leave]\n" \
246 " stg 7,8+%[psw_leave]\n" \
247 " la 7,%[psw_leave]\n" \
248 " lra 7,0(7)\n" \
249 " larl 1,0f\n" \
250 " lra 14,0(1)\n" \
251 " lpswe %[psw_enter]\n" \
252 "0: lpswe 0(7)\n" \
253 "1:\n" \
254 : CALL_FMT_##nr, [psw_leave] "=Q" (psw_leave) \
255 : [psw_enter] "Q" (psw_enter) \
256 : "7", CALL_CLOBBER_##nr); \
257 (rettype)r2; \
258})
259
260#endif /* _ASM_S390_STACKTRACE_H */