Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_S390_STACKTRACE_H
3#define _ASM_S390_STACKTRACE_H
4
5#include <linux/uaccess.h>
6#include <linux/ptrace.h>
7#include <asm/switch_to.h>
8
9enum stack_type {
10 STACK_TYPE_UNKNOWN,
11 STACK_TYPE_TASK,
12 STACK_TYPE_IRQ,
13 STACK_TYPE_NODAT,
14 STACK_TYPE_RESTART,
15 STACK_TYPE_MCCK,
16};
17
18struct stack_info {
19 enum stack_type type;
20 unsigned long begin, end;
21};
22
23const char *stack_type_name(enum stack_type type);
24int get_stack_info(unsigned long sp, struct task_struct *task,
25 struct stack_info *info, unsigned long *visit_mask);
26
27static inline bool on_stack(struct stack_info *info,
28 unsigned long addr, size_t len)
29{
30 if (info->type == STACK_TYPE_UNKNOWN)
31 return false;
32 if (addr + len < addr)
33 return false;
34 return addr >= info->begin && addr + len <= info->end;
35}
36
37/*
38 * Stack layout of a C stack frame.
39 * Kernel uses the packed stack layout (-mpacked-stack).
40 */
41struct stack_frame {
42 union {
43 unsigned long empty[9];
44 struct {
45 unsigned long sie_control_block;
46 unsigned long sie_savearea;
47 unsigned long sie_reason;
48 unsigned long sie_flags;
49 unsigned long sie_control_block_phys;
50 };
51 };
52 unsigned long gprs[10];
53 unsigned long back_chain;
54};
55
56/*
57 * Unlike current_stack_pointer which simply contains the current value of %r15
58 * current_frame_address() returns function stack frame address, which matches
59 * %r15 upon function invocation. It may differ from %r15 later if function
60 * allocates stack for local variables or new stack frame to call other
61 * functions.
62 */
63#define current_frame_address() \
64 ((unsigned long)__builtin_frame_address(0) - \
65 offsetof(struct stack_frame, back_chain))
66
67static __always_inline unsigned long get_stack_pointer(struct task_struct *task,
68 struct pt_regs *regs)
69{
70 if (regs)
71 return (unsigned long)kernel_stack_pointer(regs);
72 if (task == current)
73 return current_frame_address();
74 return (unsigned long)task->thread.ksp;
75}
76
77/*
78 * To keep this simple mark register 2-6 as being changed (volatile)
79 * by the called function, even though register 6 is saved/nonvolatile.
80 */
81#define CALL_FMT_0 "=&d" (r2)
82#define CALL_FMT_1 "+&d" (r2)
83#define CALL_FMT_2 CALL_FMT_1, "+&d" (r3)
84#define CALL_FMT_3 CALL_FMT_2, "+&d" (r4)
85#define CALL_FMT_4 CALL_FMT_3, "+&d" (r5)
86#define CALL_FMT_5 CALL_FMT_4, "+&d" (r6)
87
88#define CALL_CLOBBER_5 "0", "1", "14", "cc", "memory"
89#define CALL_CLOBBER_4 CALL_CLOBBER_5
90#define CALL_CLOBBER_3 CALL_CLOBBER_4, "5"
91#define CALL_CLOBBER_2 CALL_CLOBBER_3, "4"
92#define CALL_CLOBBER_1 CALL_CLOBBER_2, "3"
93#define CALL_CLOBBER_0 CALL_CLOBBER_1
94
95#define CALL_LARGS_0(...) \
96 long dummy = 0
97#define CALL_LARGS_1(t1, a1) \
98 long arg1 = (long)(t1)(a1)
99#define CALL_LARGS_2(t1, a1, t2, a2) \
100 CALL_LARGS_1(t1, a1); \
101 long arg2 = (long)(t2)(a2)
102#define CALL_LARGS_3(t1, a1, t2, a2, t3, a3) \
103 CALL_LARGS_2(t1, a1, t2, a2); \
104 long arg3 = (long)(t3)(a3)
105#define CALL_LARGS_4(t1, a1, t2, a2, t3, a3, t4, a4) \
106 CALL_LARGS_3(t1, a1, t2, a2, t3, a3); \
107 long arg4 = (long)(t4)(a4)
108#define CALL_LARGS_5(t1, a1, t2, a2, t3, a3, t4, a4, t5, a5) \
109 CALL_LARGS_4(t1, a1, t2, a2, t3, a3, t4, a4); \
110 long arg5 = (long)(t5)(a5)
111
112#define CALL_REGS_0 \
113 register long r2 asm("2") = dummy
114#define CALL_REGS_1 \
115 register long r2 asm("2") = arg1
116#define CALL_REGS_2 \
117 CALL_REGS_1; \
118 register long r3 asm("3") = arg2
119#define CALL_REGS_3 \
120 CALL_REGS_2; \
121 register long r4 asm("4") = arg3
122#define CALL_REGS_4 \
123 CALL_REGS_3; \
124 register long r5 asm("5") = arg4
125#define CALL_REGS_5 \
126 CALL_REGS_4; \
127 register long r6 asm("6") = arg5
128
129#define CALL_TYPECHECK_0(...)
130#define CALL_TYPECHECK_1(t, a, ...) \
131 typecheck(t, a)
132#define CALL_TYPECHECK_2(t, a, ...) \
133 CALL_TYPECHECK_1(__VA_ARGS__); \
134 typecheck(t, a)
135#define CALL_TYPECHECK_3(t, a, ...) \
136 CALL_TYPECHECK_2(__VA_ARGS__); \
137 typecheck(t, a)
138#define CALL_TYPECHECK_4(t, a, ...) \
139 CALL_TYPECHECK_3(__VA_ARGS__); \
140 typecheck(t, a)
141#define CALL_TYPECHECK_5(t, a, ...) \
142 CALL_TYPECHECK_4(__VA_ARGS__); \
143 typecheck(t, a)
144
145#define CALL_PARM_0(...) void
146#define CALL_PARM_1(t, a, ...) t
147#define CALL_PARM_2(t, a, ...) t, CALL_PARM_1(__VA_ARGS__)
148#define CALL_PARM_3(t, a, ...) t, CALL_PARM_2(__VA_ARGS__)
149#define CALL_PARM_4(t, a, ...) t, CALL_PARM_3(__VA_ARGS__)
150#define CALL_PARM_5(t, a, ...) t, CALL_PARM_4(__VA_ARGS__)
151#define CALL_PARM_6(t, a, ...) t, CALL_PARM_5(__VA_ARGS__)
152
153/*
154 * Use call_on_stack() to call a function switching to a specified
155 * stack. Proper sign and zero extension of function arguments is
156 * done. Usage:
157 *
158 * rc = call_on_stack(nr, stack, rettype, fn, t1, a1, t2, a2, ...)
159 *
160 * - nr specifies the number of function arguments of fn.
161 * - stack specifies the stack to be used.
162 * - fn is the function to be called.
163 * - rettype is the return type of fn.
164 * - t1, a1, ... are pairs, where t1 must match the type of the first
165 * argument of fn, t2 the second, etc. a1 is the corresponding
166 * first function argument (not name), etc.
167 */
168#define call_on_stack(nr, stack, rettype, fn, ...) \
169({ \
170 rettype (*__fn)(CALL_PARM_##nr(__VA_ARGS__)) = fn; \
171 unsigned long frame = current_frame_address(); \
172 unsigned long __stack = stack; \
173 unsigned long prev; \
174 CALL_LARGS_##nr(__VA_ARGS__); \
175 CALL_REGS_##nr; \
176 \
177 CALL_TYPECHECK_##nr(__VA_ARGS__); \
178 asm volatile( \
179 " lgr %[_prev],15\n" \
180 " lg 15,%[_stack]\n" \
181 " stg %[_frame],%[_bc](15)\n" \
182 " brasl 14,%[_fn]\n" \
183 " lgr 15,%[_prev]\n" \
184 : [_prev] "=&d" (prev), CALL_FMT_##nr \
185 : [_stack] "R" (__stack), \
186 [_bc] "i" (offsetof(struct stack_frame, back_chain)), \
187 [_frame] "d" (frame), \
188 [_fn] "X" (__fn) : CALL_CLOBBER_##nr); \
189 (rettype)r2; \
190})
191
192#define call_on_stack_noreturn(fn, stack) \
193({ \
194 void (*__fn)(void) = fn; \
195 \
196 asm volatile( \
197 " la 15,0(%[_stack])\n" \
198 " xc %[_bc](8,15),%[_bc](15)\n" \
199 " brasl 14,%[_fn]\n" \
200 ::[_bc] "i" (offsetof(struct stack_frame, back_chain)), \
201 [_stack] "a" (stack), [_fn] "X" (__fn)); \
202 BUG(); \
203})
204
205#endif /* _ASM_S390_STACKTRACE_H */
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_S390_STACKTRACE_H
3#define _ASM_S390_STACKTRACE_H
4
5#include <linux/stacktrace.h>
6#include <linux/uaccess.h>
7#include <linux/ptrace.h>
8
9struct stack_frame_user {
10 unsigned long back_chain;
11 unsigned long empty1[5];
12 unsigned long gprs[10];
13 unsigned long empty2[4];
14};
15
16struct stack_frame_vdso_wrapper {
17 struct stack_frame_user sf;
18 unsigned long return_address;
19};
20
21struct perf_callchain_entry_ctx;
22
23void arch_stack_walk_user_common(stack_trace_consume_fn consume_entry, void *cookie,
24 struct perf_callchain_entry_ctx *entry,
25 const struct pt_regs *regs, bool perf);
26
27enum stack_type {
28 STACK_TYPE_UNKNOWN,
29 STACK_TYPE_TASK,
30 STACK_TYPE_IRQ,
31 STACK_TYPE_NODAT,
32 STACK_TYPE_RESTART,
33 STACK_TYPE_MCCK,
34};
35
36struct stack_info {
37 enum stack_type type;
38 unsigned long begin, end;
39};
40
41const char *stack_type_name(enum stack_type type);
42int get_stack_info(unsigned long sp, struct task_struct *task,
43 struct stack_info *info, unsigned long *visit_mask);
44
45static inline bool on_stack(struct stack_info *info,
46 unsigned long addr, size_t len)
47{
48 if (info->type == STACK_TYPE_UNKNOWN)
49 return false;
50 if (addr + len < addr)
51 return false;
52 return addr >= info->begin && addr + len <= info->end;
53}
54
55/*
56 * Stack layout of a C stack frame.
57 * Kernel uses the packed stack layout (-mpacked-stack).
58 */
59struct stack_frame {
60 union {
61 unsigned long empty[9];
62 struct {
63 unsigned long sie_control_block;
64 unsigned long sie_savearea;
65 unsigned long sie_reason;
66 unsigned long sie_flags;
67 unsigned long sie_control_block_phys;
68 };
69 };
70 unsigned long gprs[10];
71 unsigned long back_chain;
72};
73
74/*
75 * Unlike current_stack_pointer which simply contains the current value of %r15
76 * current_frame_address() returns function stack frame address, which matches
77 * %r15 upon function invocation. It may differ from %r15 later if function
78 * allocates stack for local variables or new stack frame to call other
79 * functions.
80 */
81#define current_frame_address() \
82 ((unsigned long)__builtin_frame_address(0) - \
83 offsetof(struct stack_frame, back_chain))
84
85static __always_inline unsigned long get_stack_pointer(struct task_struct *task,
86 struct pt_regs *regs)
87{
88 if (regs)
89 return (unsigned long)kernel_stack_pointer(regs);
90 if (task == current)
91 return current_frame_address();
92 return (unsigned long)task->thread.ksp;
93}
94
95/*
96 * To keep this simple mark register 2-6 as being changed (volatile)
97 * by the called function, even though register 6 is saved/nonvolatile.
98 */
99#define CALL_FMT_0 "=&d" (r2)
100#define CALL_FMT_1 "+&d" (r2)
101#define CALL_FMT_2 CALL_FMT_1, "+&d" (r3)
102#define CALL_FMT_3 CALL_FMT_2, "+&d" (r4)
103#define CALL_FMT_4 CALL_FMT_3, "+&d" (r5)
104#define CALL_FMT_5 CALL_FMT_4, "+&d" (r6)
105
106#define CALL_CLOBBER_5 "0", "1", "14", "cc", "memory"
107#define CALL_CLOBBER_4 CALL_CLOBBER_5
108#define CALL_CLOBBER_3 CALL_CLOBBER_4, "5"
109#define CALL_CLOBBER_2 CALL_CLOBBER_3, "4"
110#define CALL_CLOBBER_1 CALL_CLOBBER_2, "3"
111#define CALL_CLOBBER_0 CALL_CLOBBER_1
112
113#define CALL_LARGS_0(...) \
114 long dummy = 0
115#define CALL_LARGS_1(t1, a1) \
116 long arg1 = (long)(t1)(a1)
117#define CALL_LARGS_2(t1, a1, t2, a2) \
118 CALL_LARGS_1(t1, a1); \
119 long arg2 = (long)(t2)(a2)
120#define CALL_LARGS_3(t1, a1, t2, a2, t3, a3) \
121 CALL_LARGS_2(t1, a1, t2, a2); \
122 long arg3 = (long)(t3)(a3)
123#define CALL_LARGS_4(t1, a1, t2, a2, t3, a3, t4, a4) \
124 CALL_LARGS_3(t1, a1, t2, a2, t3, a3); \
125 long arg4 = (long)(t4)(a4)
126#define CALL_LARGS_5(t1, a1, t2, a2, t3, a3, t4, a4, t5, a5) \
127 CALL_LARGS_4(t1, a1, t2, a2, t3, a3, t4, a4); \
128 long arg5 = (long)(t5)(a5)
129
130#define CALL_REGS_0 \
131 register long r2 asm("2") = dummy
132#define CALL_REGS_1 \
133 register long r2 asm("2") = arg1
134#define CALL_REGS_2 \
135 CALL_REGS_1; \
136 register long r3 asm("3") = arg2
137#define CALL_REGS_3 \
138 CALL_REGS_2; \
139 register long r4 asm("4") = arg3
140#define CALL_REGS_4 \
141 CALL_REGS_3; \
142 register long r5 asm("5") = arg4
143#define CALL_REGS_5 \
144 CALL_REGS_4; \
145 register long r6 asm("6") = arg5
146
147#define CALL_TYPECHECK_0(...)
148#define CALL_TYPECHECK_1(t, a, ...) \
149 typecheck(t, a)
150#define CALL_TYPECHECK_2(t, a, ...) \
151 CALL_TYPECHECK_1(__VA_ARGS__); \
152 typecheck(t, a)
153#define CALL_TYPECHECK_3(t, a, ...) \
154 CALL_TYPECHECK_2(__VA_ARGS__); \
155 typecheck(t, a)
156#define CALL_TYPECHECK_4(t, a, ...) \
157 CALL_TYPECHECK_3(__VA_ARGS__); \
158 typecheck(t, a)
159#define CALL_TYPECHECK_5(t, a, ...) \
160 CALL_TYPECHECK_4(__VA_ARGS__); \
161 typecheck(t, a)
162
163#define CALL_PARM_0(...) void
164#define CALL_PARM_1(t, a, ...) t
165#define CALL_PARM_2(t, a, ...) t, CALL_PARM_1(__VA_ARGS__)
166#define CALL_PARM_3(t, a, ...) t, CALL_PARM_2(__VA_ARGS__)
167#define CALL_PARM_4(t, a, ...) t, CALL_PARM_3(__VA_ARGS__)
168#define CALL_PARM_5(t, a, ...) t, CALL_PARM_4(__VA_ARGS__)
169#define CALL_PARM_6(t, a, ...) t, CALL_PARM_5(__VA_ARGS__)
170
171/*
172 * Use call_on_stack() to call a function switching to a specified
173 * stack. Proper sign and zero extension of function arguments is
174 * done. Usage:
175 *
176 * rc = call_on_stack(nr, stack, rettype, fn, t1, a1, t2, a2, ...)
177 *
178 * - nr specifies the number of function arguments of fn.
179 * - stack specifies the stack to be used.
180 * - fn is the function to be called.
181 * - rettype is the return type of fn.
182 * - t1, a1, ... are pairs, where t1 must match the type of the first
183 * argument of fn, t2 the second, etc. a1 is the corresponding
184 * first function argument (not name), etc.
185 */
186#define call_on_stack(nr, stack, rettype, fn, ...) \
187({ \
188 rettype (*__fn)(CALL_PARM_##nr(__VA_ARGS__)) = fn; \
189 unsigned long frame = current_frame_address(); \
190 unsigned long __stack = stack; \
191 unsigned long prev; \
192 CALL_LARGS_##nr(__VA_ARGS__); \
193 CALL_REGS_##nr; \
194 \
195 CALL_TYPECHECK_##nr(__VA_ARGS__); \
196 asm volatile( \
197 " lgr %[_prev],15\n" \
198 " lg 15,%[_stack]\n" \
199 " stg %[_frame],%[_bc](15)\n" \
200 " brasl 14,%[_fn]\n" \
201 " lgr 15,%[_prev]\n" \
202 : [_prev] "=&d" (prev), CALL_FMT_##nr \
203 : [_stack] "R" (__stack), \
204 [_bc] "i" (offsetof(struct stack_frame, back_chain)), \
205 [_frame] "d" (frame), \
206 [_fn] "X" (__fn) : CALL_CLOBBER_##nr); \
207 (rettype)r2; \
208})
209
210/*
211 * Use call_nodat() to call a function with DAT disabled.
212 * Proper sign and zero extension of function arguments is done.
213 * Usage:
214 *
215 * rc = call_nodat(nr, rettype, fn, t1, a1, t2, a2, ...)
216 *
217 * - nr specifies the number of function arguments of fn.
218 * - fn is the function to be called, where fn is a physical address.
219 * - rettype is the return type of fn.
220 * - t1, a1, ... are pairs, where t1 must match the type of the first
221 * argument of fn, t2 the second, etc. a1 is the corresponding
222 * first function argument (not name), etc.
223 *
224 * fn() is called with standard C function call ABI, with the exception
225 * that no useful stackframe or stackpointer is passed via register 15.
226 * Therefore the called function must not use r15 to access the stack.
227 */
228#define call_nodat(nr, rettype, fn, ...) \
229({ \
230 rettype (*__fn)(CALL_PARM_##nr(__VA_ARGS__)) = (fn); \
231 /* aligned since psw_leave must not cross page boundary */ \
232 psw_t __aligned(16) psw_leave; \
233 psw_t psw_enter; \
234 CALL_LARGS_##nr(__VA_ARGS__); \
235 CALL_REGS_##nr; \
236 \
237 CALL_TYPECHECK_##nr(__VA_ARGS__); \
238 psw_enter.mask = PSW_KERNEL_BITS & ~PSW_MASK_DAT; \
239 psw_enter.addr = (unsigned long)__fn; \
240 asm volatile( \
241 " epsw 0,1\n" \
242 " risbg 1,0,0,31,32\n" \
243 " larl 7,1f\n" \
244 " stg 1,%[psw_leave]\n" \
245 " stg 7,8+%[psw_leave]\n" \
246 " la 7,%[psw_leave]\n" \
247 " lra 7,0(7)\n" \
248 " larl 1,0f\n" \
249 " lra 14,0(1)\n" \
250 " lpswe %[psw_enter]\n" \
251 "0: lpswe 0(7)\n" \
252 "1:\n" \
253 : CALL_FMT_##nr, [psw_leave] "=Q" (psw_leave) \
254 : [psw_enter] "Q" (psw_enter) \
255 : "7", CALL_CLOBBER_##nr); \
256 (rettype)r2; \
257})
258
259#endif /* _ASM_S390_STACKTRACE_H */