Linux Audio

Check our new training course

Loading...
v4.17
  1/* SPDX-License-Identifier: GPL-2.0 */
  2/* thread_info.h: low-level thread information
  3 *
  4 * Copyright (C) 2002  David Howells (dhowells@redhat.com)
  5 * - Incorporating suggestions made by Linus Torvalds and Dave Miller
  6 */
  7
  8#ifndef _ASM_X86_THREAD_INFO_H
  9#define _ASM_X86_THREAD_INFO_H
 10
 11#include <linux/compiler.h>
 12#include <asm/page.h>
 13#include <asm/percpu.h>
 14#include <asm/types.h>
 15
 16/*
 17 * TOP_OF_KERNEL_STACK_PADDING is a number of unused bytes that we
 18 * reserve at the top of the kernel stack.  We do it because of a nasty
 19 * 32-bit corner case.  On x86_32, the hardware stack frame is
 20 * variable-length.  Except for vm86 mode, struct pt_regs assumes a
 21 * maximum-length frame.  If we enter from CPL 0, the top 8 bytes of
 22 * pt_regs don't actually exist.  Ordinarily this doesn't matter, but it
 23 * does in at least one case:
 24 *
 25 * If we take an NMI early enough in SYSENTER, then we can end up with
 26 * pt_regs that extends above sp0.  On the way out, in the espfix code,
 27 * we can read the saved SS value, but that value will be above sp0.
 28 * Without this offset, that can result in a page fault.  (We are
 29 * careful that, in this case, the value we read doesn't matter.)
 30 *
 31 * In vm86 mode, the hardware frame is much longer still, so add 16
 32 * bytes to make room for the real-mode segments.
 33 *
 34 * x86_64 has a fixed-length stack frame.
 35 */
 36#ifdef CONFIG_X86_32
 37# ifdef CONFIG_VM86
 38#  define TOP_OF_KERNEL_STACK_PADDING 16
 39# else
 40#  define TOP_OF_KERNEL_STACK_PADDING 8
 41# endif
 42#else
 43# define TOP_OF_KERNEL_STACK_PADDING 0
 44#endif
 45
 46/*
 47 * low level task data that entry.S needs immediate access to
 48 * - this struct should fit entirely inside of one cache line
 49 * - this struct shares the supervisor stack pages
 50 */
 51#ifndef __ASSEMBLY__
 52struct task_struct;
 53#include <asm/cpufeature.h>
 54#include <linux/atomic.h>
 55
 56struct thread_info {
 57	unsigned long		flags;		/* low level flags */
 
 58	u32			status;		/* thread synchronous flags */
 
 
 
 59};
 60
 61#define INIT_THREAD_INFO(tsk)			\
 62{						\
 63	.flags		= 0,			\
 64}
 65
 66#else /* !__ASSEMBLY__ */
 67
 68#include <asm/asm-offsets.h>
 69
 70#endif
 71
 72/*
 73 * thread information flags
 74 * - these are process state flags that various assembly files
 75 *   may need to access
 76 */
 77#define TIF_SYSCALL_TRACE	0	/* syscall trace active */
 78#define TIF_NOTIFY_RESUME	1	/* callback before returning to user */
 79#define TIF_SIGPENDING		2	/* signal pending */
 80#define TIF_NEED_RESCHED	3	/* rescheduling necessary */
 81#define TIF_SINGLESTEP		4	/* reenable singlestep on user return*/
 82#define TIF_SSBD			5	/* Reduced data speculation */
 83#define TIF_SYSCALL_EMU		6	/* syscall emulation active */
 84#define TIF_SYSCALL_AUDIT	7	/* syscall auditing active */
 85#define TIF_SECCOMP		8	/* secure computing */
 86#define TIF_USER_RETURN_NOTIFY	11	/* notify kernel of userspace return */
 87#define TIF_UPROBE		12	/* breakpointed or singlestepping */
 88#define TIF_PATCH_PENDING	13	/* pending live patching update */
 
 89#define TIF_NOCPUID		15	/* CPUID is not accessible in userland */
 90#define TIF_NOTSC		16	/* TSC is not accessible in userland */
 91#define TIF_IA32		17	/* IA32 compatibility process */
 92#define TIF_NOHZ		19	/* in adaptive nohz mode */
 93#define TIF_MEMDIE		20	/* is terminating due to OOM killer */
 94#define TIF_POLLING_NRFLAG	21	/* idle is polling for TIF_NEED_RESCHED */
 95#define TIF_IO_BITMAP		22	/* uses I/O bitmap */
 
 96#define TIF_FORCED_TF		24	/* true if TF in eflags artificially */
 97#define TIF_BLOCKSTEP		25	/* set when we want DEBUGCTLMSR_BTF */
 98#define TIF_LAZY_MMU_UPDATES	27	/* task is updating the mmu lazily */
 99#define TIF_SYSCALL_TRACEPOINT	28	/* syscall tracepoint instrumentation */
100#define TIF_ADDR32		29	/* 32-bit address space on 64 bits */
101#define TIF_X32			30	/* 32-bit native x86-64 binary */
102#define TIF_FSCHECK		31	/* Check FS is USER_DS on return */
103
104#define _TIF_SYSCALL_TRACE	(1 << TIF_SYSCALL_TRACE)
105#define _TIF_NOTIFY_RESUME	(1 << TIF_NOTIFY_RESUME)
106#define _TIF_SIGPENDING		(1 << TIF_SIGPENDING)
107#define _TIF_NEED_RESCHED	(1 << TIF_NEED_RESCHED)
108#define _TIF_SINGLESTEP		(1 << TIF_SINGLESTEP)
109#define _TIF_SSBD		(1 << TIF_SSBD)
110#define _TIF_SYSCALL_EMU	(1 << TIF_SYSCALL_EMU)
111#define _TIF_SYSCALL_AUDIT	(1 << TIF_SYSCALL_AUDIT)
112#define _TIF_SECCOMP		(1 << TIF_SECCOMP)
113#define _TIF_USER_RETURN_NOTIFY	(1 << TIF_USER_RETURN_NOTIFY)
114#define _TIF_UPROBE		(1 << TIF_UPROBE)
115#define _TIF_PATCH_PENDING	(1 << TIF_PATCH_PENDING)
 
116#define _TIF_NOCPUID		(1 << TIF_NOCPUID)
117#define _TIF_NOTSC		(1 << TIF_NOTSC)
118#define _TIF_IA32		(1 << TIF_IA32)
119#define _TIF_NOHZ		(1 << TIF_NOHZ)
120#define _TIF_POLLING_NRFLAG	(1 << TIF_POLLING_NRFLAG)
121#define _TIF_IO_BITMAP		(1 << TIF_IO_BITMAP)
 
122#define _TIF_FORCED_TF		(1 << TIF_FORCED_TF)
123#define _TIF_BLOCKSTEP		(1 << TIF_BLOCKSTEP)
124#define _TIF_LAZY_MMU_UPDATES	(1 << TIF_LAZY_MMU_UPDATES)
125#define _TIF_SYSCALL_TRACEPOINT	(1 << TIF_SYSCALL_TRACEPOINT)
126#define _TIF_ADDR32		(1 << TIF_ADDR32)
127#define _TIF_X32		(1 << TIF_X32)
128#define _TIF_FSCHECK		(1 << TIF_FSCHECK)
 
 
 
129
130/*
131 * work to do in syscall_trace_enter().  Also includes TIF_NOHZ for
132 * enter_from_user_mode()
133 */
134#define _TIF_WORK_SYSCALL_ENTRY	\
135	(_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT |	\
136	 _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT |	\
137	 _TIF_NOHZ)
138
139/* work to do on any return to user space */
140#define _TIF_ALLWORK_MASK						\
141	(_TIF_SYSCALL_TRACE | _TIF_NOTIFY_RESUME | _TIF_SIGPENDING |	\
142	 _TIF_NEED_RESCHED | _TIF_SINGLESTEP | _TIF_SYSCALL_EMU |	\
143	 _TIF_SYSCALL_AUDIT | _TIF_USER_RETURN_NOTIFY | _TIF_UPROBE |	\
144	 _TIF_PATCH_PENDING | _TIF_NOHZ | _TIF_SYSCALL_TRACEPOINT |	\
145	 _TIF_FSCHECK)
146
147/* flags to check in __switch_to() */
148#define _TIF_WORK_CTXSW							\
149	(_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP|_TIF_SSBD)
 
 
 
150
151#define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
152#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
153
154#define STACK_WARN		(THREAD_SIZE/8)
155
156/*
157 * macros/functions for gaining access to the thread information structure
158 *
159 * preempt_count needs to be 1 initially, until the scheduler is functional.
160 */
161#ifndef __ASSEMBLY__
162
163/*
164 * Walks up the stack frames to make sure that the specified object is
165 * entirely contained by a single stack frame.
166 *
167 * Returns:
168 *	GOOD_FRAME	if within a frame
169 *	BAD_STACK	if placed across a frame boundary (or outside stack)
170 *	NOT_STACK	unable to determine (no frame pointers, etc)
171 */
172static inline int arch_within_stack_frames(const void * const stack,
173					   const void * const stackend,
174					   const void *obj, unsigned long len)
175{
176#if defined(CONFIG_FRAME_POINTER)
177	const void *frame = NULL;
178	const void *oldframe;
179
180	oldframe = __builtin_frame_address(1);
181	if (oldframe)
182		frame = __builtin_frame_address(2);
183	/*
184	 * low ----------------------------------------------> high
185	 * [saved bp][saved ip][args][local vars][saved bp][saved ip]
186	 *                     ^----------------^
187	 *               allow copies only within here
188	 */
189	while (stack <= frame && frame < stackend) {
190		/*
191		 * If obj + len extends past the last frame, this
192		 * check won't pass and the next frame will be 0,
193		 * causing us to bail out and correctly report
194		 * the copy as invalid.
195		 */
196		if (obj + len <= frame)
197			return obj >= oldframe + 2 * sizeof(void *) ?
198				GOOD_FRAME : BAD_STACK;
199		oldframe = frame;
200		frame = *(const void * const *)frame;
201	}
202	return BAD_STACK;
203#else
204	return NOT_STACK;
205#endif
206}
207
208#else /* !__ASSEMBLY__ */
209
210#ifdef CONFIG_X86_64
211# define cpu_current_top_of_stack (cpu_tss_rw + TSS_sp1)
212#endif
213
214#endif
 
 
 
 
 
 
 
215
 
216#ifdef CONFIG_COMPAT
217#define TS_I386_REGS_POKED	0x0004	/* regs poked by 32-bit ptracer */
 
 
 
 
218#endif
219#ifndef __ASSEMBLY__
220
221#ifdef CONFIG_X86_32
222#define in_ia32_syscall() true
223#else
224#define in_ia32_syscall() (IS_ENABLED(CONFIG_IA32_EMULATION) && \
225			   current_thread_info()->status & TS_COMPAT)
226#endif
227
228/*
229 * Force syscall return via IRET by making it look as if there was
230 * some work pending. IRET is our most capable (but slowest) syscall
231 * return path, which is able to restore modified SS, CS and certain
232 * EFLAGS values that other (fast) syscall return instructions
233 * are not able to restore properly.
234 */
235#define force_iret() set_thread_flag(TIF_NOTIFY_RESUME)
236
237extern void arch_task_cache_init(void);
238extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
239extern void arch_release_task_struct(struct task_struct *tsk);
240extern void arch_setup_new_exec(void);
241#define arch_setup_new_exec arch_setup_new_exec
242#endif	/* !__ASSEMBLY__ */
243
244#endif /* _ASM_X86_THREAD_INFO_H */
v6.2
  1/* SPDX-License-Identifier: GPL-2.0 */
  2/* thread_info.h: low-level thread information
  3 *
  4 * Copyright (C) 2002  David Howells (dhowells@redhat.com)
  5 * - Incorporating suggestions made by Linus Torvalds and Dave Miller
  6 */
  7
  8#ifndef _ASM_X86_THREAD_INFO_H
  9#define _ASM_X86_THREAD_INFO_H
 10
 11#include <linux/compiler.h>
 12#include <asm/page.h>
 13#include <asm/percpu.h>
 14#include <asm/types.h>
 15
 16/*
 17 * TOP_OF_KERNEL_STACK_PADDING is a number of unused bytes that we
 18 * reserve at the top of the kernel stack.  We do it because of a nasty
 19 * 32-bit corner case.  On x86_32, the hardware stack frame is
 20 * variable-length.  Except for vm86 mode, struct pt_regs assumes a
 21 * maximum-length frame.  If we enter from CPL 0, the top 8 bytes of
 22 * pt_regs don't actually exist.  Ordinarily this doesn't matter, but it
 23 * does in at least one case:
 24 *
 25 * If we take an NMI early enough in SYSENTER, then we can end up with
 26 * pt_regs that extends above sp0.  On the way out, in the espfix code,
 27 * we can read the saved SS value, but that value will be above sp0.
 28 * Without this offset, that can result in a page fault.  (We are
 29 * careful that, in this case, the value we read doesn't matter.)
 30 *
 31 * In vm86 mode, the hardware frame is much longer still, so add 16
 32 * bytes to make room for the real-mode segments.
 33 *
 34 * x86_64 has a fixed-length stack frame.
 35 */
 36#ifdef CONFIG_X86_32
 37# ifdef CONFIG_VM86
 38#  define TOP_OF_KERNEL_STACK_PADDING 16
 39# else
 40#  define TOP_OF_KERNEL_STACK_PADDING 8
 41# endif
 42#else
 43# define TOP_OF_KERNEL_STACK_PADDING 0
 44#endif
 45
 46/*
 47 * low level task data that entry.S needs immediate access to
 48 * - this struct should fit entirely inside of one cache line
 49 * - this struct shares the supervisor stack pages
 50 */
 51#ifndef __ASSEMBLY__
 52struct task_struct;
 53#include <asm/cpufeature.h>
 54#include <linux/atomic.h>
 55
 56struct thread_info {
 57	unsigned long		flags;		/* low level flags */
 58	unsigned long		syscall_work;	/* SYSCALL_WORK_ flags */
 59	u32			status;		/* thread synchronous flags */
 60#ifdef CONFIG_SMP
 61	u32			cpu;		/* current CPU */
 62#endif
 63};
 64
 65#define INIT_THREAD_INFO(tsk)			\
 66{						\
 67	.flags		= 0,			\
 68}
 69
 70#else /* !__ASSEMBLY__ */
 71
 72#include <asm/asm-offsets.h>
 73
 74#endif
 75
 76/*
 77 * thread information flags
 78 * - these are process state flags that various assembly files
 79 *   may need to access
 80 */
 
 81#define TIF_NOTIFY_RESUME	1	/* callback before returning to user */
 82#define TIF_SIGPENDING		2	/* signal pending */
 83#define TIF_NEED_RESCHED	3	/* rescheduling necessary */
 84#define TIF_SINGLESTEP		4	/* reenable singlestep on user return*/
 85#define TIF_SSBD		5	/* Speculative store bypass disable */
 86#define TIF_SPEC_IB		9	/* Indirect branch speculation mitigation */
 87#define TIF_SPEC_L1D_FLUSH	10	/* Flush L1D on mm switches (processes) */
 
 88#define TIF_USER_RETURN_NOTIFY	11	/* notify kernel of userspace return */
 89#define TIF_UPROBE		12	/* breakpointed or singlestepping */
 90#define TIF_PATCH_PENDING	13	/* pending live patching update */
 91#define TIF_NEED_FPU_LOAD	14	/* load FPU on return to userspace */
 92#define TIF_NOCPUID		15	/* CPUID is not accessible in userland */
 93#define TIF_NOTSC		16	/* TSC is not accessible in userland */
 94#define TIF_NOTIFY_SIGNAL	17	/* signal notifications exist */
 
 95#define TIF_MEMDIE		20	/* is terminating due to OOM killer */
 96#define TIF_POLLING_NRFLAG	21	/* idle is polling for TIF_NEED_RESCHED */
 97#define TIF_IO_BITMAP		22	/* uses I/O bitmap */
 98#define TIF_SPEC_FORCE_UPDATE	23	/* Force speculation MSR update in context switch */
 99#define TIF_FORCED_TF		24	/* true if TF in eflags artificially */
100#define TIF_BLOCKSTEP		25	/* set when we want DEBUGCTLMSR_BTF */
101#define TIF_LAZY_MMU_UPDATES	27	/* task is updating the mmu lazily */
 
102#define TIF_ADDR32		29	/* 32-bit address space on 64 bits */
 
 
103
 
104#define _TIF_NOTIFY_RESUME	(1 << TIF_NOTIFY_RESUME)
105#define _TIF_SIGPENDING		(1 << TIF_SIGPENDING)
106#define _TIF_NEED_RESCHED	(1 << TIF_NEED_RESCHED)
107#define _TIF_SINGLESTEP		(1 << TIF_SINGLESTEP)
108#define _TIF_SSBD		(1 << TIF_SSBD)
109#define _TIF_SPEC_IB		(1 << TIF_SPEC_IB)
110#define _TIF_SPEC_L1D_FLUSH	(1 << TIF_SPEC_L1D_FLUSH)
 
111#define _TIF_USER_RETURN_NOTIFY	(1 << TIF_USER_RETURN_NOTIFY)
112#define _TIF_UPROBE		(1 << TIF_UPROBE)
113#define _TIF_PATCH_PENDING	(1 << TIF_PATCH_PENDING)
114#define _TIF_NEED_FPU_LOAD	(1 << TIF_NEED_FPU_LOAD)
115#define _TIF_NOCPUID		(1 << TIF_NOCPUID)
116#define _TIF_NOTSC		(1 << TIF_NOTSC)
117#define _TIF_NOTIFY_SIGNAL	(1 << TIF_NOTIFY_SIGNAL)
 
118#define _TIF_POLLING_NRFLAG	(1 << TIF_POLLING_NRFLAG)
119#define _TIF_IO_BITMAP		(1 << TIF_IO_BITMAP)
120#define _TIF_SPEC_FORCE_UPDATE	(1 << TIF_SPEC_FORCE_UPDATE)
121#define _TIF_FORCED_TF		(1 << TIF_FORCED_TF)
122#define _TIF_BLOCKSTEP		(1 << TIF_BLOCKSTEP)
123#define _TIF_LAZY_MMU_UPDATES	(1 << TIF_LAZY_MMU_UPDATES)
 
124#define _TIF_ADDR32		(1 << TIF_ADDR32)
125
126/* flags to check in __switch_to() */
127#define _TIF_WORK_CTXSW_BASE					\
128	(_TIF_NOCPUID | _TIF_NOTSC | _TIF_BLOCKSTEP |		\
129	 _TIF_SSBD | _TIF_SPEC_FORCE_UPDATE)
130
131/*
132 * Avoid calls to __switch_to_xtra() on UP as STIBP is not evaluated.
 
133 */
134#ifdef CONFIG_SMP
135# define _TIF_WORK_CTXSW	(_TIF_WORK_CTXSW_BASE | _TIF_SPEC_IB)
136#else
137# define _TIF_WORK_CTXSW	(_TIF_WORK_CTXSW_BASE)
138#endif
 
 
 
 
 
 
 
139
140#ifdef CONFIG_X86_IOPL_IOPERM
141# define _TIF_WORK_CTXSW_PREV	(_TIF_WORK_CTXSW| _TIF_USER_RETURN_NOTIFY | \
142				 _TIF_IO_BITMAP)
143#else
144# define _TIF_WORK_CTXSW_PREV	(_TIF_WORK_CTXSW| _TIF_USER_RETURN_NOTIFY)
145#endif
146
147#define _TIF_WORK_CTXSW_NEXT	(_TIF_WORK_CTXSW)
 
148
149#define STACK_WARN		(THREAD_SIZE/8)
150
151/*
152 * macros/functions for gaining access to the thread information structure
153 *
154 * preempt_count needs to be 1 initially, until the scheduler is functional.
155 */
156#ifndef __ASSEMBLY__
157
158/*
159 * Walks up the stack frames to make sure that the specified object is
160 * entirely contained by a single stack frame.
161 *
162 * Returns:
163 *	GOOD_FRAME	if within a frame
164 *	BAD_STACK	if placed across a frame boundary (or outside stack)
165 *	NOT_STACK	unable to determine (no frame pointers, etc)
166 */
167static inline int arch_within_stack_frames(const void * const stack,
168					   const void * const stackend,
169					   const void *obj, unsigned long len)
170{
171#if defined(CONFIG_FRAME_POINTER)
172	const void *frame = NULL;
173	const void *oldframe;
174
175	oldframe = __builtin_frame_address(1);
176	if (oldframe)
177		frame = __builtin_frame_address(2);
178	/*
179	 * low ----------------------------------------------> high
180	 * [saved bp][saved ip][args][local vars][saved bp][saved ip]
181	 *                     ^----------------^
182	 *               allow copies only within here
183	 */
184	while (stack <= frame && frame < stackend) {
185		/*
186		 * If obj + len extends past the last frame, this
187		 * check won't pass and the next frame will be 0,
188		 * causing us to bail out and correctly report
189		 * the copy as invalid.
190		 */
191		if (obj + len <= frame)
192			return obj >= oldframe + 2 * sizeof(void *) ?
193				GOOD_FRAME : BAD_STACK;
194		oldframe = frame;
195		frame = *(const void * const *)frame;
196	}
197	return BAD_STACK;
198#else
199	return NOT_STACK;
200#endif
201}
202
203#endif  /* !__ASSEMBLY__ */
 
 
 
 
204
205/*
206 * Thread-synchronous status.
207 *
208 * This is different from the flags in that nobody else
209 * ever touches our thread-synchronous status, so we don't
210 * have to worry about atomic accesses.
211 */
212#define TS_COMPAT		0x0002	/* 32bit syscall active (64BIT)*/
213
214#ifndef __ASSEMBLY__
215#ifdef CONFIG_COMPAT
216#define TS_I386_REGS_POKED	0x0004	/* regs poked by 32-bit ptracer */
217
218#define arch_set_restart_data(restart)	\
219	do { restart->arch_data = current_thread_info()->status; } while (0)
220
221#endif
 
222
223#ifdef CONFIG_X86_32
224#define in_ia32_syscall() true
225#else
226#define in_ia32_syscall() (IS_ENABLED(CONFIG_IA32_EMULATION) && \
227			   current_thread_info()->status & TS_COMPAT)
228#endif
 
 
 
 
 
 
 
 
 
229
230extern void arch_task_cache_init(void);
231extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
232extern void arch_release_task_struct(struct task_struct *tsk);
233extern void arch_setup_new_exec(void);
234#define arch_setup_new_exec arch_setup_new_exec
235#endif	/* !__ASSEMBLY__ */
236
237#endif /* _ASM_X86_THREAD_INFO_H */