Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_S390_STACKTRACE_H
3#define _ASM_S390_STACKTRACE_H
4
5#include <linux/uaccess.h>
6#include <linux/ptrace.h>
7#include <asm/switch_to.h>
8
9enum stack_type {
10 STACK_TYPE_UNKNOWN,
11 STACK_TYPE_TASK,
12 STACK_TYPE_IRQ,
13 STACK_TYPE_NODAT,
14 STACK_TYPE_RESTART,
15 STACK_TYPE_MCCK,
16};
17
18struct stack_info {
19 enum stack_type type;
20 unsigned long begin, end;
21};
22
23const char *stack_type_name(enum stack_type type);
24int get_stack_info(unsigned long sp, struct task_struct *task,
25 struct stack_info *info, unsigned long *visit_mask);
26
27static inline bool on_stack(struct stack_info *info,
28 unsigned long addr, size_t len)
29{
30 if (info->type == STACK_TYPE_UNKNOWN)
31 return false;
32 if (addr + len < addr)
33 return false;
34 return addr >= info->begin && addr + len <= info->end;
35}
36
37/*
38 * Stack layout of a C stack frame.
39 * Kernel uses the packed stack layout (-mpacked-stack).
40 */
41struct stack_frame {
42 union {
43 unsigned long empty[9];
44 struct {
45 unsigned long sie_control_block;
46 unsigned long sie_savearea;
47 unsigned long sie_reason;
48 unsigned long sie_flags;
49 unsigned long sie_control_block_phys;
50 };
51 };
52 unsigned long gprs[10];
53 unsigned long back_chain;
54};
55
56/*
57 * Unlike current_stack_pointer which simply contains the current value of %r15
58 * current_frame_address() returns function stack frame address, which matches
59 * %r15 upon function invocation. It may differ from %r15 later if function
60 * allocates stack for local variables or new stack frame to call other
61 * functions.
62 */
63#define current_frame_address() \
64 ((unsigned long)__builtin_frame_address(0) - \
65 offsetof(struct stack_frame, back_chain))
66
67static __always_inline unsigned long get_stack_pointer(struct task_struct *task,
68 struct pt_regs *regs)
69{
70 if (regs)
71 return (unsigned long)kernel_stack_pointer(regs);
72 if (task == current)
73 return current_frame_address();
74 return (unsigned long)task->thread.ksp;
75}
76
77/*
78 * To keep this simple mark register 2-6 as being changed (volatile)
79 * by the called function, even though register 6 is saved/nonvolatile.
80 */
81#define CALL_FMT_0 "=&d" (r2)
82#define CALL_FMT_1 "+&d" (r2)
83#define CALL_FMT_2 CALL_FMT_1, "+&d" (r3)
84#define CALL_FMT_3 CALL_FMT_2, "+&d" (r4)
85#define CALL_FMT_4 CALL_FMT_3, "+&d" (r5)
86#define CALL_FMT_5 CALL_FMT_4, "+&d" (r6)
87
88#define CALL_CLOBBER_5 "0", "1", "14", "cc", "memory"
89#define CALL_CLOBBER_4 CALL_CLOBBER_5
90#define CALL_CLOBBER_3 CALL_CLOBBER_4, "5"
91#define CALL_CLOBBER_2 CALL_CLOBBER_3, "4"
92#define CALL_CLOBBER_1 CALL_CLOBBER_2, "3"
93#define CALL_CLOBBER_0 CALL_CLOBBER_1
94
95#define CALL_LARGS_0(...) \
96 long dummy = 0
97#define CALL_LARGS_1(t1, a1) \
98 long arg1 = (long)(t1)(a1)
99#define CALL_LARGS_2(t1, a1, t2, a2) \
100 CALL_LARGS_1(t1, a1); \
101 long arg2 = (long)(t2)(a2)
102#define CALL_LARGS_3(t1, a1, t2, a2, t3, a3) \
103 CALL_LARGS_2(t1, a1, t2, a2); \
104 long arg3 = (long)(t3)(a3)
105#define CALL_LARGS_4(t1, a1, t2, a2, t3, a3, t4, a4) \
106 CALL_LARGS_3(t1, a1, t2, a2, t3, a3); \
107 long arg4 = (long)(t4)(a4)
108#define CALL_LARGS_5(t1, a1, t2, a2, t3, a3, t4, a4, t5, a5) \
109 CALL_LARGS_4(t1, a1, t2, a2, t3, a3, t4, a4); \
110 long arg5 = (long)(t5)(a5)
111
112#define CALL_REGS_0 \
113 register long r2 asm("2") = dummy
114#define CALL_REGS_1 \
115 register long r2 asm("2") = arg1
116#define CALL_REGS_2 \
117 CALL_REGS_1; \
118 register long r3 asm("3") = arg2
119#define CALL_REGS_3 \
120 CALL_REGS_2; \
121 register long r4 asm("4") = arg3
122#define CALL_REGS_4 \
123 CALL_REGS_3; \
124 register long r5 asm("5") = arg4
125#define CALL_REGS_5 \
126 CALL_REGS_4; \
127 register long r6 asm("6") = arg5
128
129#define CALL_TYPECHECK_0(...)
130#define CALL_TYPECHECK_1(t, a, ...) \
131 typecheck(t, a)
132#define CALL_TYPECHECK_2(t, a, ...) \
133 CALL_TYPECHECK_1(__VA_ARGS__); \
134 typecheck(t, a)
135#define CALL_TYPECHECK_3(t, a, ...) \
136 CALL_TYPECHECK_2(__VA_ARGS__); \
137 typecheck(t, a)
138#define CALL_TYPECHECK_4(t, a, ...) \
139 CALL_TYPECHECK_3(__VA_ARGS__); \
140 typecheck(t, a)
141#define CALL_TYPECHECK_5(t, a, ...) \
142 CALL_TYPECHECK_4(__VA_ARGS__); \
143 typecheck(t, a)
144
145#define CALL_PARM_0(...) void
146#define CALL_PARM_1(t, a, ...) t
147#define CALL_PARM_2(t, a, ...) t, CALL_PARM_1(__VA_ARGS__)
148#define CALL_PARM_3(t, a, ...) t, CALL_PARM_2(__VA_ARGS__)
149#define CALL_PARM_4(t, a, ...) t, CALL_PARM_3(__VA_ARGS__)
150#define CALL_PARM_5(t, a, ...) t, CALL_PARM_4(__VA_ARGS__)
151#define CALL_PARM_6(t, a, ...) t, CALL_PARM_5(__VA_ARGS__)
152
153/*
154 * Use call_on_stack() to call a function switching to a specified
155 * stack. Proper sign and zero extension of function arguments is
156 * done. Usage:
157 *
158 * rc = call_on_stack(nr, stack, rettype, fn, t1, a1, t2, a2, ...)
159 *
160 * - nr specifies the number of function arguments of fn.
161 * - stack specifies the stack to be used.
162 * - fn is the function to be called.
163 * - rettype is the return type of fn.
164 * - t1, a1, ... are pairs, where t1 must match the type of the first
165 * argument of fn, t2 the second, etc. a1 is the corresponding
166 * first function argument (not name), etc.
167 */
168#define call_on_stack(nr, stack, rettype, fn, ...) \
169({ \
170 rettype (*__fn)(CALL_PARM_##nr(__VA_ARGS__)) = fn; \
171 unsigned long frame = current_frame_address(); \
172 unsigned long __stack = stack; \
173 unsigned long prev; \
174 CALL_LARGS_##nr(__VA_ARGS__); \
175 CALL_REGS_##nr; \
176 \
177 CALL_TYPECHECK_##nr(__VA_ARGS__); \
178 asm volatile( \
179 " lgr %[_prev],15\n" \
180 " lg 15,%[_stack]\n" \
181 " stg %[_frame],%[_bc](15)\n" \
182 " brasl 14,%[_fn]\n" \
183 " lgr 15,%[_prev]\n" \
184 : [_prev] "=&d" (prev), CALL_FMT_##nr \
185 : [_stack] "R" (__stack), \
186 [_bc] "i" (offsetof(struct stack_frame, back_chain)), \
187 [_frame] "d" (frame), \
188 [_fn] "X" (__fn) : CALL_CLOBBER_##nr); \
189 (rettype)r2; \
190})
191
192#define call_on_stack_noreturn(fn, stack) \
193({ \
194 void (*__fn)(void) = fn; \
195 \
196 asm volatile( \
197 " la 15,0(%[_stack])\n" \
198 " xc %[_bc](8,15),%[_bc](15)\n" \
199 " brasl 14,%[_fn]\n" \
200 ::[_bc] "i" (offsetof(struct stack_frame, back_chain)), \
201 [_stack] "a" (stack), [_fn] "X" (__fn)); \
202 BUG(); \
203})
204
205#endif /* _ASM_S390_STACKTRACE_H */
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_S390_STACKTRACE_H
3#define _ASM_S390_STACKTRACE_H
4
5#include <linux/uaccess.h>
6#include <linux/ptrace.h>
7#include <asm/switch_to.h>
8
9enum stack_type {
10 STACK_TYPE_UNKNOWN,
11 STACK_TYPE_TASK,
12 STACK_TYPE_IRQ,
13 STACK_TYPE_NODAT,
14 STACK_TYPE_RESTART,
15};
16
17struct stack_info {
18 enum stack_type type;
19 unsigned long begin, end;
20};
21
22const char *stack_type_name(enum stack_type type);
23int get_stack_info(unsigned long sp, struct task_struct *task,
24 struct stack_info *info, unsigned long *visit_mask);
25
26static inline bool on_stack(struct stack_info *info,
27 unsigned long addr, size_t len)
28{
29 if (info->type == STACK_TYPE_UNKNOWN)
30 return false;
31 if (addr + len < addr)
32 return false;
33 return addr >= info->begin && addr + len <= info->end;
34}
35
36static __always_inline unsigned long get_stack_pointer(struct task_struct *task,
37 struct pt_regs *regs)
38{
39 if (regs)
40 return (unsigned long) kernel_stack_pointer(regs);
41 if (task == current)
42 return current_stack_pointer();
43 return (unsigned long) task->thread.ksp;
44}
45
46/*
47 * Stack layout of a C stack frame.
48 */
49#ifndef __PACK_STACK
50struct stack_frame {
51 unsigned long back_chain;
52 unsigned long empty1[5];
53 unsigned long gprs[10];
54 unsigned int empty2[8];
55};
56#else
57struct stack_frame {
58 unsigned long empty1[5];
59 unsigned int empty2[8];
60 unsigned long gprs[10];
61 unsigned long back_chain;
62};
63#endif
64
65/*
66 * Unlike current_stack_pointer() which simply returns current value of %r15
67 * current_frame_address() returns function stack frame address, which matches
68 * %r15 upon function invocation. It may differ from %r15 later if function
69 * allocates stack for local variables or new stack frame to call other
70 * functions.
71 */
72#define current_frame_address() \
73 ((unsigned long)__builtin_frame_address(0) - \
74 offsetof(struct stack_frame, back_chain))
75
76#define CALL_ARGS_0() \
77 register unsigned long r2 asm("2")
78#define CALL_ARGS_1(arg1) \
79 register unsigned long r2 asm("2") = (unsigned long)(arg1)
80#define CALL_ARGS_2(arg1, arg2) \
81 CALL_ARGS_1(arg1); \
82 register unsigned long r3 asm("3") = (unsigned long)(arg2)
83#define CALL_ARGS_3(arg1, arg2, arg3) \
84 CALL_ARGS_2(arg1, arg2); \
85 register unsigned long r4 asm("4") = (unsigned long)(arg3)
86#define CALL_ARGS_4(arg1, arg2, arg3, arg4) \
87 CALL_ARGS_3(arg1, arg2, arg3); \
88 register unsigned long r4 asm("5") = (unsigned long)(arg4)
89#define CALL_ARGS_5(arg1, arg2, arg3, arg4, arg5) \
90 CALL_ARGS_4(arg1, arg2, arg3, arg4); \
91 register unsigned long r4 asm("6") = (unsigned long)(arg5)
92
93#define CALL_FMT_0 "=&d" (r2) :
94#define CALL_FMT_1 "+&d" (r2) :
95#define CALL_FMT_2 CALL_FMT_1 "d" (r3),
96#define CALL_FMT_3 CALL_FMT_2 "d" (r4),
97#define CALL_FMT_4 CALL_FMT_3 "d" (r5),
98#define CALL_FMT_5 CALL_FMT_4 "d" (r6),
99
100#define CALL_CLOBBER_5 "0", "1", "14", "cc", "memory"
101#define CALL_CLOBBER_4 CALL_CLOBBER_5
102#define CALL_CLOBBER_3 CALL_CLOBBER_4, "5"
103#define CALL_CLOBBER_2 CALL_CLOBBER_3, "4"
104#define CALL_CLOBBER_1 CALL_CLOBBER_2, "3"
105#define CALL_CLOBBER_0 CALL_CLOBBER_1
106
107#define CALL_ON_STACK(fn, stack, nr, args...) \
108({ \
109 unsigned long frame = current_frame_address(); \
110 CALL_ARGS_##nr(args); \
111 unsigned long prev; \
112 \
113 asm volatile( \
114 " la %[_prev],0(15)\n" \
115 " lg 15,%[_stack]\n" \
116 " stg %[_frame],%[_bc](15)\n" \
117 " brasl 14,%[_fn]\n" \
118 " la 15,0(%[_prev])\n" \
119 : [_prev] "=&a" (prev), CALL_FMT_##nr \
120 [_stack] "R" (stack), \
121 [_bc] "i" (offsetof(struct stack_frame, back_chain)), \
122 [_frame] "d" (frame), \
123 [_fn] "X" (fn) : CALL_CLOBBER_##nr); \
124 r2; \
125})
126
127#define CALL_ON_STACK_NORETURN(fn, stack) \
128({ \
129 asm volatile( \
130 " la 15,0(%[_stack])\n" \
131 " xc %[_bc](8,15),%[_bc](15)\n" \
132 " brasl 14,%[_fn]\n" \
133 ::[_bc] "i" (offsetof(struct stack_frame, back_chain)), \
134 [_stack] "a" (stack), [_fn] "X" (fn)); \
135 BUG(); \
136})
137
138#endif /* _ASM_S390_STACKTRACE_H */