Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2/* thread_info.h: common low-level thread information accessors
3 *
4 * Copyright (C) 2002 David Howells (dhowells@redhat.com)
5 * - Incorporating suggestions made by Linus Torvalds
6 */
7
8#ifndef _LINUX_THREAD_INFO_H
9#define _LINUX_THREAD_INFO_H
10
11#include <linux/types.h>
12#include <linux/limits.h>
13#include <linux/bug.h>
14#include <linux/restart_block.h>
15#include <linux/errno.h>
16
17#ifdef CONFIG_THREAD_INFO_IN_TASK
18/*
19 * For CONFIG_THREAD_INFO_IN_TASK kernels we need <asm/current.h> for the
20 * definition of current, but for !CONFIG_THREAD_INFO_IN_TASK kernels,
21 * including <asm/current.h> can cause a circular dependency on some platforms.
22 */
23#include <asm/current.h>
24#define current_thread_info() ((struct thread_info *)current)
25#endif
26
27#include <linux/bitops.h>
28
29/*
30 * For per-arch arch_within_stack_frames() implementations, defined in
31 * asm/thread_info.h.
32 */
33enum {
34 BAD_STACK = -1,
35 NOT_STACK = 0,
36 GOOD_FRAME,
37 GOOD_STACK,
38};
39
40#ifdef CONFIG_GENERIC_ENTRY
41enum syscall_work_bit {
42 SYSCALL_WORK_BIT_SECCOMP,
43 SYSCALL_WORK_BIT_SYSCALL_TRACEPOINT,
44 SYSCALL_WORK_BIT_SYSCALL_TRACE,
45 SYSCALL_WORK_BIT_SYSCALL_EMU,
46 SYSCALL_WORK_BIT_SYSCALL_AUDIT,
47 SYSCALL_WORK_BIT_SYSCALL_USER_DISPATCH,
48 SYSCALL_WORK_BIT_SYSCALL_EXIT_TRAP,
49};
50
51#define SYSCALL_WORK_SECCOMP BIT(SYSCALL_WORK_BIT_SECCOMP)
52#define SYSCALL_WORK_SYSCALL_TRACEPOINT BIT(SYSCALL_WORK_BIT_SYSCALL_TRACEPOINT)
53#define SYSCALL_WORK_SYSCALL_TRACE BIT(SYSCALL_WORK_BIT_SYSCALL_TRACE)
54#define SYSCALL_WORK_SYSCALL_EMU BIT(SYSCALL_WORK_BIT_SYSCALL_EMU)
55#define SYSCALL_WORK_SYSCALL_AUDIT BIT(SYSCALL_WORK_BIT_SYSCALL_AUDIT)
56#define SYSCALL_WORK_SYSCALL_USER_DISPATCH BIT(SYSCALL_WORK_BIT_SYSCALL_USER_DISPATCH)
57#define SYSCALL_WORK_SYSCALL_EXIT_TRAP BIT(SYSCALL_WORK_BIT_SYSCALL_EXIT_TRAP)
58#endif
59
60#include <asm/thread_info.h>
61
62#ifdef __KERNEL__
63
64#ifndef arch_set_restart_data
65#define arch_set_restart_data(restart) do { } while (0)
66#endif
67
68static inline long set_restart_fn(struct restart_block *restart,
69 long (*fn)(struct restart_block *))
70{
71 restart->fn = fn;
72 arch_set_restart_data(restart);
73 return -ERESTART_RESTARTBLOCK;
74}
75
76#ifndef THREAD_ALIGN
77#define THREAD_ALIGN THREAD_SIZE
78#endif
79
80#define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_ZERO)
81
82/*
83 * flag set/clear/test wrappers
84 * - pass TIF_xxxx constants to these functions
85 */
86
87static inline void set_ti_thread_flag(struct thread_info *ti, int flag)
88{
89 set_bit(flag, (unsigned long *)&ti->flags);
90}
91
92static inline void clear_ti_thread_flag(struct thread_info *ti, int flag)
93{
94 clear_bit(flag, (unsigned long *)&ti->flags);
95}
96
97static inline void update_ti_thread_flag(struct thread_info *ti, int flag,
98 bool value)
99{
100 if (value)
101 set_ti_thread_flag(ti, flag);
102 else
103 clear_ti_thread_flag(ti, flag);
104}
105
106static inline int test_and_set_ti_thread_flag(struct thread_info *ti, int flag)
107{
108 return test_and_set_bit(flag, (unsigned long *)&ti->flags);
109}
110
111static inline int test_and_clear_ti_thread_flag(struct thread_info *ti, int flag)
112{
113 return test_and_clear_bit(flag, (unsigned long *)&ti->flags);
114}
115
116static inline int test_ti_thread_flag(struct thread_info *ti, int flag)
117{
118 return test_bit(flag, (unsigned long *)&ti->flags);
119}
120
121#define set_thread_flag(flag) \
122 set_ti_thread_flag(current_thread_info(), flag)
123#define clear_thread_flag(flag) \
124 clear_ti_thread_flag(current_thread_info(), flag)
125#define update_thread_flag(flag, value) \
126 update_ti_thread_flag(current_thread_info(), flag, value)
127#define test_and_set_thread_flag(flag) \
128 test_and_set_ti_thread_flag(current_thread_info(), flag)
129#define test_and_clear_thread_flag(flag) \
130 test_and_clear_ti_thread_flag(current_thread_info(), flag)
131#define test_thread_flag(flag) \
132 test_ti_thread_flag(current_thread_info(), flag)
133
134#ifdef CONFIG_GENERIC_ENTRY
135#define set_syscall_work(fl) \
136 set_bit(SYSCALL_WORK_BIT_##fl, ¤t_thread_info()->syscall_work)
137#define test_syscall_work(fl) \
138 test_bit(SYSCALL_WORK_BIT_##fl, ¤t_thread_info()->syscall_work)
139#define clear_syscall_work(fl) \
140 clear_bit(SYSCALL_WORK_BIT_##fl, ¤t_thread_info()->syscall_work)
141
142#define set_task_syscall_work(t, fl) \
143 set_bit(SYSCALL_WORK_BIT_##fl, &task_thread_info(t)->syscall_work)
144#define test_task_syscall_work(t, fl) \
145 test_bit(SYSCALL_WORK_BIT_##fl, &task_thread_info(t)->syscall_work)
146#define clear_task_syscall_work(t, fl) \
147 clear_bit(SYSCALL_WORK_BIT_##fl, &task_thread_info(t)->syscall_work)
148
149#else /* CONFIG_GENERIC_ENTRY */
150
151#define set_syscall_work(fl) \
152 set_ti_thread_flag(current_thread_info(), TIF_##fl)
153#define test_syscall_work(fl) \
154 test_ti_thread_flag(current_thread_info(), TIF_##fl)
155#define clear_syscall_work(fl) \
156 clear_ti_thread_flag(current_thread_info(), TIF_##fl)
157
158#define set_task_syscall_work(t, fl) \
159 set_ti_thread_flag(task_thread_info(t), TIF_##fl)
160#define test_task_syscall_work(t, fl) \
161 test_ti_thread_flag(task_thread_info(t), TIF_##fl)
162#define clear_task_syscall_work(t, fl) \
163 clear_ti_thread_flag(task_thread_info(t), TIF_##fl)
164#endif /* !CONFIG_GENERIC_ENTRY */
165
166#define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED)
167
168#ifndef CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES
169static inline int arch_within_stack_frames(const void * const stack,
170 const void * const stackend,
171 const void *obj, unsigned long len)
172{
173 return 0;
174}
175#endif
176
177#ifdef CONFIG_HARDENED_USERCOPY
178extern void __check_object_size(const void *ptr, unsigned long n,
179 bool to_user);
180
181static __always_inline void check_object_size(const void *ptr, unsigned long n,
182 bool to_user)
183{
184 if (!__builtin_constant_p(n))
185 __check_object_size(ptr, n, to_user);
186}
187#else
188static inline void check_object_size(const void *ptr, unsigned long n,
189 bool to_user)
190{ }
191#endif /* CONFIG_HARDENED_USERCOPY */
192
193extern void __compiletime_error("copy source size is too small")
194__bad_copy_from(void);
195extern void __compiletime_error("copy destination size is too small")
196__bad_copy_to(void);
197
198static inline void copy_overflow(int size, unsigned long count)
199{
200 WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
201}
202
203static __always_inline __must_check bool
204check_copy_size(const void *addr, size_t bytes, bool is_source)
205{
206 int sz = __compiletime_object_size(addr);
207 if (unlikely(sz >= 0 && sz < bytes)) {
208 if (!__builtin_constant_p(bytes))
209 copy_overflow(sz, bytes);
210 else if (is_source)
211 __bad_copy_from();
212 else
213 __bad_copy_to();
214 return false;
215 }
216 if (WARN_ON_ONCE(bytes > INT_MAX))
217 return false;
218 check_object_size(addr, bytes, is_source);
219 return true;
220}
221
222#ifndef arch_setup_new_exec
223static inline void arch_setup_new_exec(void) { }
224#endif
225
226#endif /* __KERNEL__ */
227
228#endif /* _LINUX_THREAD_INFO_H */
1/* SPDX-License-Identifier: GPL-2.0 */
2/* thread_info.h: common low-level thread information accessors
3 *
4 * Copyright (C) 2002 David Howells (dhowells@redhat.com)
5 * - Incorporating suggestions made by Linus Torvalds
6 */
7
8#ifndef _LINUX_THREAD_INFO_H
9#define _LINUX_THREAD_INFO_H
10
11#include <linux/types.h>
12#include <linux/limits.h>
13#include <linux/bug.h>
14#include <linux/restart_block.h>
15#include <linux/errno.h>
16
17#ifdef CONFIG_THREAD_INFO_IN_TASK
18/*
19 * For CONFIG_THREAD_INFO_IN_TASK kernels we need <asm/current.h> for the
20 * definition of current, but for !CONFIG_THREAD_INFO_IN_TASK kernels,
21 * including <asm/current.h> can cause a circular dependency on some platforms.
22 */
23#include <asm/current.h>
24#define current_thread_info() ((struct thread_info *)current)
25#endif
26
27#include <linux/bitops.h>
28
29/*
30 * For per-arch arch_within_stack_frames() implementations, defined in
31 * asm/thread_info.h.
32 */
33enum {
34 BAD_STACK = -1,
35 NOT_STACK = 0,
36 GOOD_FRAME,
37 GOOD_STACK,
38};
39
40#ifdef CONFIG_GENERIC_ENTRY
41enum syscall_work_bit {
42 SYSCALL_WORK_BIT_SECCOMP,
43 SYSCALL_WORK_BIT_SYSCALL_TRACEPOINT,
44 SYSCALL_WORK_BIT_SYSCALL_TRACE,
45 SYSCALL_WORK_BIT_SYSCALL_EMU,
46 SYSCALL_WORK_BIT_SYSCALL_AUDIT,
47 SYSCALL_WORK_BIT_SYSCALL_USER_DISPATCH,
48 SYSCALL_WORK_BIT_SYSCALL_EXIT_TRAP,
49};
50
51#define SYSCALL_WORK_SECCOMP BIT(SYSCALL_WORK_BIT_SECCOMP)
52#define SYSCALL_WORK_SYSCALL_TRACEPOINT BIT(SYSCALL_WORK_BIT_SYSCALL_TRACEPOINT)
53#define SYSCALL_WORK_SYSCALL_TRACE BIT(SYSCALL_WORK_BIT_SYSCALL_TRACE)
54#define SYSCALL_WORK_SYSCALL_EMU BIT(SYSCALL_WORK_BIT_SYSCALL_EMU)
55#define SYSCALL_WORK_SYSCALL_AUDIT BIT(SYSCALL_WORK_BIT_SYSCALL_AUDIT)
56#define SYSCALL_WORK_SYSCALL_USER_DISPATCH BIT(SYSCALL_WORK_BIT_SYSCALL_USER_DISPATCH)
57#define SYSCALL_WORK_SYSCALL_EXIT_TRAP BIT(SYSCALL_WORK_BIT_SYSCALL_EXIT_TRAP)
58#endif
59
60#include <asm/thread_info.h>
61
62#ifndef TIF_NEED_RESCHED_LAZY
63#ifdef CONFIG_ARCH_HAS_PREEMPT_LAZY
64#error Inconsistent PREEMPT_LAZY
65#endif
66#define TIF_NEED_RESCHED_LAZY TIF_NEED_RESCHED
67#define _TIF_NEED_RESCHED_LAZY _TIF_NEED_RESCHED
68#endif
69
70#ifdef __KERNEL__
71
72#ifndef arch_set_restart_data
73#define arch_set_restart_data(restart) do { } while (0)
74#endif
75
76static inline long set_restart_fn(struct restart_block *restart,
77 long (*fn)(struct restart_block *))
78{
79 restart->fn = fn;
80 arch_set_restart_data(restart);
81 return -ERESTART_RESTARTBLOCK;
82}
83
84#ifndef THREAD_ALIGN
85#define THREAD_ALIGN THREAD_SIZE
86#endif
87
88#define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_ZERO)
89
90/*
91 * flag set/clear/test wrappers
92 * - pass TIF_xxxx constants to these functions
93 */
94
95static inline void set_ti_thread_flag(struct thread_info *ti, int flag)
96{
97 set_bit(flag, (unsigned long *)&ti->flags);
98}
99
100static inline void clear_ti_thread_flag(struct thread_info *ti, int flag)
101{
102 clear_bit(flag, (unsigned long *)&ti->flags);
103}
104
105static inline void update_ti_thread_flag(struct thread_info *ti, int flag,
106 bool value)
107{
108 if (value)
109 set_ti_thread_flag(ti, flag);
110 else
111 clear_ti_thread_flag(ti, flag);
112}
113
114static inline int test_and_set_ti_thread_flag(struct thread_info *ti, int flag)
115{
116 return test_and_set_bit(flag, (unsigned long *)&ti->flags);
117}
118
119static inline int test_and_clear_ti_thread_flag(struct thread_info *ti, int flag)
120{
121 return test_and_clear_bit(flag, (unsigned long *)&ti->flags);
122}
123
124static inline int test_ti_thread_flag(struct thread_info *ti, int flag)
125{
126 return test_bit(flag, (unsigned long *)&ti->flags);
127}
128
129/*
130 * This may be used in noinstr code, and needs to be __always_inline to prevent
131 * inadvertent instrumentation.
132 */
133static __always_inline unsigned long read_ti_thread_flags(struct thread_info *ti)
134{
135 return READ_ONCE(ti->flags);
136}
137
138#define set_thread_flag(flag) \
139 set_ti_thread_flag(current_thread_info(), flag)
140#define clear_thread_flag(flag) \
141 clear_ti_thread_flag(current_thread_info(), flag)
142#define update_thread_flag(flag, value) \
143 update_ti_thread_flag(current_thread_info(), flag, value)
144#define test_and_set_thread_flag(flag) \
145 test_and_set_ti_thread_flag(current_thread_info(), flag)
146#define test_and_clear_thread_flag(flag) \
147 test_and_clear_ti_thread_flag(current_thread_info(), flag)
148#define test_thread_flag(flag) \
149 test_ti_thread_flag(current_thread_info(), flag)
150#define read_thread_flags() \
151 read_ti_thread_flags(current_thread_info())
152
153#define read_task_thread_flags(t) \
154 read_ti_thread_flags(task_thread_info(t))
155
156#ifdef CONFIG_GENERIC_ENTRY
157#define set_syscall_work(fl) \
158 set_bit(SYSCALL_WORK_BIT_##fl, ¤t_thread_info()->syscall_work)
159#define test_syscall_work(fl) \
160 test_bit(SYSCALL_WORK_BIT_##fl, ¤t_thread_info()->syscall_work)
161#define clear_syscall_work(fl) \
162 clear_bit(SYSCALL_WORK_BIT_##fl, ¤t_thread_info()->syscall_work)
163
164#define set_task_syscall_work(t, fl) \
165 set_bit(SYSCALL_WORK_BIT_##fl, &task_thread_info(t)->syscall_work)
166#define test_task_syscall_work(t, fl) \
167 test_bit(SYSCALL_WORK_BIT_##fl, &task_thread_info(t)->syscall_work)
168#define clear_task_syscall_work(t, fl) \
169 clear_bit(SYSCALL_WORK_BIT_##fl, &task_thread_info(t)->syscall_work)
170
171#else /* CONFIG_GENERIC_ENTRY */
172
173#define set_syscall_work(fl) \
174 set_ti_thread_flag(current_thread_info(), TIF_##fl)
175#define test_syscall_work(fl) \
176 test_ti_thread_flag(current_thread_info(), TIF_##fl)
177#define clear_syscall_work(fl) \
178 clear_ti_thread_flag(current_thread_info(), TIF_##fl)
179
180#define set_task_syscall_work(t, fl) \
181 set_ti_thread_flag(task_thread_info(t), TIF_##fl)
182#define test_task_syscall_work(t, fl) \
183 test_ti_thread_flag(task_thread_info(t), TIF_##fl)
184#define clear_task_syscall_work(t, fl) \
185 clear_ti_thread_flag(task_thread_info(t), TIF_##fl)
186#endif /* !CONFIG_GENERIC_ENTRY */
187
188#ifdef _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H
189
190static __always_inline bool tif_test_bit(int bit)
191{
192 return arch_test_bit(bit,
193 (unsigned long *)(¤t_thread_info()->flags));
194}
195
196#else
197
198static __always_inline bool tif_test_bit(int bit)
199{
200 return test_bit(bit,
201 (unsigned long *)(¤t_thread_info()->flags));
202}
203
204#endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H */
205
206static __always_inline bool tif_need_resched(void)
207{
208 return tif_test_bit(TIF_NEED_RESCHED);
209}
210
211#ifndef CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES
212static inline int arch_within_stack_frames(const void * const stack,
213 const void * const stackend,
214 const void *obj, unsigned long len)
215{
216 return 0;
217}
218#endif
219
220#ifdef CONFIG_HARDENED_USERCOPY
221extern void __check_object_size(const void *ptr, unsigned long n,
222 bool to_user);
223
224static __always_inline void check_object_size(const void *ptr, unsigned long n,
225 bool to_user)
226{
227 if (!__builtin_constant_p(n))
228 __check_object_size(ptr, n, to_user);
229}
230#else
231static inline void check_object_size(const void *ptr, unsigned long n,
232 bool to_user)
233{ }
234#endif /* CONFIG_HARDENED_USERCOPY */
235
236extern void __compiletime_error("copy source size is too small")
237__bad_copy_from(void);
238extern void __compiletime_error("copy destination size is too small")
239__bad_copy_to(void);
240
241void __copy_overflow(int size, unsigned long count);
242
243static inline void copy_overflow(int size, unsigned long count)
244{
245 if (IS_ENABLED(CONFIG_BUG))
246 __copy_overflow(size, count);
247}
248
249static __always_inline __must_check bool
250check_copy_size(const void *addr, size_t bytes, bool is_source)
251{
252 int sz = __builtin_object_size(addr, 0);
253 if (unlikely(sz >= 0 && sz < bytes)) {
254 if (!__builtin_constant_p(bytes))
255 copy_overflow(sz, bytes);
256 else if (is_source)
257 __bad_copy_from();
258 else
259 __bad_copy_to();
260 return false;
261 }
262 if (WARN_ON_ONCE(bytes > INT_MAX))
263 return false;
264 check_object_size(addr, bytes, is_source);
265 return true;
266}
267
268#ifndef arch_setup_new_exec
269static inline void arch_setup_new_exec(void) { }
270#endif
271
272void arch_task_cache_init(void); /* for CONFIG_SH */
273void arch_release_task_struct(struct task_struct *tsk);
274int arch_dup_task_struct(struct task_struct *dst,
275 struct task_struct *src);
276
277#endif /* __KERNEL__ */
278
279#endif /* _LINUX_THREAD_INFO_H */