Loading...
1/*
2 * arch/arm/include/asm/thread_info.h
3 *
4 * Copyright (C) 2002 Russell King.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#ifndef __ASM_ARM_THREAD_INFO_H
11#define __ASM_ARM_THREAD_INFO_H
12
13#ifdef __KERNEL__
14
15#include <linux/compiler.h>
16#include <asm/fpstate.h>
17
18#define THREAD_SIZE_ORDER 1
19#define THREAD_SIZE 8192
20#define THREAD_START_SP (THREAD_SIZE - 8)
21
22#ifndef __ASSEMBLY__
23
24struct task_struct;
25struct exec_domain;
26
27#include <asm/types.h>
28#include <asm/domain.h>
29
30typedef unsigned long mm_segment_t;
31
32struct cpu_context_save {
33 __u32 r4;
34 __u32 r5;
35 __u32 r6;
36 __u32 r7;
37 __u32 r8;
38 __u32 r9;
39 __u32 sl;
40 __u32 fp;
41 __u32 sp;
42 __u32 pc;
43 __u32 extra[2]; /* Xscale 'acc' register, etc */
44};
45
46struct arm_restart_block {
47 union {
48 /* For user cache flushing */
49 struct {
50 unsigned long start;
51 unsigned long end;
52 } cache;
53 };
54};
55
56/*
57 * low level task data that entry.S needs immediate access to.
58 * __switch_to() assumes cpu_context follows immediately after cpu_domain.
59 */
60struct thread_info {
61 unsigned long flags; /* low level flags */
62 int preempt_count; /* 0 => preemptable, <0 => bug */
63 mm_segment_t addr_limit; /* address limit */
64 struct task_struct *task; /* main task structure */
65 struct exec_domain *exec_domain; /* execution domain */
66 __u32 cpu; /* cpu */
67 __u32 cpu_domain; /* cpu domain */
68 struct cpu_context_save cpu_context; /* cpu context */
69 __u32 syscall; /* syscall number */
70 __u8 used_cp[16]; /* thread used copro */
71 unsigned long tp_value[2]; /* TLS registers */
72#ifdef CONFIG_CRUNCH
73 struct crunch_state crunchstate;
74#endif
75 union fp_state fpstate __attribute__((aligned(8)));
76 union vfp_state vfpstate;
77#ifdef CONFIG_ARM_THUMBEE
78 unsigned long thumbee_state; /* ThumbEE Handler Base register */
79#endif
80 struct restart_block restart_block;
81 struct arm_restart_block arm_restart_block;
82};
83
84#define INIT_THREAD_INFO(tsk) \
85{ \
86 .task = &tsk, \
87 .exec_domain = &default_exec_domain, \
88 .flags = 0, \
89 .preempt_count = INIT_PREEMPT_COUNT, \
90 .addr_limit = KERNEL_DS, \
91 .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
92 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
93 domain_val(DOMAIN_IO, DOMAIN_CLIENT), \
94 .restart_block = { \
95 .fn = do_no_restart_syscall, \
96 }, \
97}
98
99#define init_thread_info (init_thread_union.thread_info)
100#define init_stack (init_thread_union.stack)
101
102/*
103 * how to get the thread information struct from C
104 */
105static inline struct thread_info *current_thread_info(void) __attribute_const__;
106
107static inline struct thread_info *current_thread_info(void)
108{
109 register unsigned long sp asm ("sp");
110 return (struct thread_info *)(sp & ~(THREAD_SIZE - 1));
111}
112
113#define thread_saved_pc(tsk) \
114 ((unsigned long)(task_thread_info(tsk)->cpu_context.pc))
115#define thread_saved_sp(tsk) \
116 ((unsigned long)(task_thread_info(tsk)->cpu_context.sp))
117#define thread_saved_fp(tsk) \
118 ((unsigned long)(task_thread_info(tsk)->cpu_context.fp))
119
120extern void crunch_task_disable(struct thread_info *);
121extern void crunch_task_copy(struct thread_info *, void *);
122extern void crunch_task_restore(struct thread_info *, void *);
123extern void crunch_task_release(struct thread_info *);
124
125extern void iwmmxt_task_disable(struct thread_info *);
126extern void iwmmxt_task_copy(struct thread_info *, void *);
127extern void iwmmxt_task_restore(struct thread_info *, void *);
128extern void iwmmxt_task_release(struct thread_info *);
129extern void iwmmxt_task_switch(struct thread_info *);
130
131extern void vfp_sync_hwstate(struct thread_info *);
132extern void vfp_flush_hwstate(struct thread_info *);
133
134struct user_vfp;
135struct user_vfp_exc;
136
137extern int vfp_preserve_user_clear_hwstate(struct user_vfp __user *,
138 struct user_vfp_exc __user *);
139extern int vfp_restore_user_hwstate(struct user_vfp __user *,
140 struct user_vfp_exc __user *);
141#endif
142
143/*
144 * thread information flags:
145 * TIF_SYSCALL_TRACE - syscall trace active
146 * TIF_SYSCAL_AUDIT - syscall auditing active
147 * TIF_SIGPENDING - signal pending
148 * TIF_NEED_RESCHED - rescheduling necessary
149 * TIF_NOTIFY_RESUME - callback before returning to user
150 * TIF_USEDFPU - FPU was used by this task this quantum (SMP)
151 * TIF_POLLING_NRFLAG - true if poll_idle() is polling TIF_NEED_RESCHED
152 */
153#define TIF_SIGPENDING 0
154#define TIF_NEED_RESCHED 1
155#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
156#define TIF_UPROBE 7
157#define TIF_SYSCALL_TRACE 8
158#define TIF_SYSCALL_AUDIT 9
159#define TIF_SYSCALL_TRACEPOINT 10
160#define TIF_SECCOMP 11 /* seccomp syscall filtering active */
161#define TIF_NOHZ 12 /* in adaptive nohz mode */
162#define TIF_USING_IWMMXT 17
163#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
164#define TIF_RESTORE_SIGMASK 20
165
166#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
167#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
168#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
169#define _TIF_UPROBE (1 << TIF_UPROBE)
170#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
171#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
172#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
173#define _TIF_SECCOMP (1 << TIF_SECCOMP)
174#define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
175
176/* Checks for any syscall work in entry-common.S */
177#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
178 _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
179
180/*
181 * Change these and you break ASM code in entry-common.S
182 */
183#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
184 _TIF_NOTIFY_RESUME | _TIF_UPROBE)
185
186#endif /* __KERNEL__ */
187#endif /* __ASM_ARM_THREAD_INFO_H */
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * arch/arm/include/asm/thread_info.h
4 *
5 * Copyright (C) 2002 Russell King.
6 */
7#ifndef __ASM_ARM_THREAD_INFO_H
8#define __ASM_ARM_THREAD_INFO_H
9
10#ifdef __KERNEL__
11
12#include <linux/compiler.h>
13#include <asm/fpstate.h>
14#include <asm/page.h>
15
16#define THREAD_SIZE_ORDER 1
17#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
18#define THREAD_START_SP (THREAD_SIZE - 8)
19
20#ifndef __ASSEMBLY__
21
22struct task_struct;
23
24#include <asm/types.h>
25
26typedef unsigned long mm_segment_t;
27
28struct cpu_context_save {
29 __u32 r4;
30 __u32 r5;
31 __u32 r6;
32 __u32 r7;
33 __u32 r8;
34 __u32 r9;
35 __u32 sl;
36 __u32 fp;
37 __u32 sp;
38 __u32 pc;
39 __u32 extra[2]; /* Xscale 'acc' register, etc */
40};
41
42/*
43 * low level task data that entry.S needs immediate access to.
44 * __switch_to() assumes cpu_context follows immediately after cpu_domain.
45 */
46struct thread_info {
47 unsigned long flags; /* low level flags */
48 int preempt_count; /* 0 => preemptable, <0 => bug */
49 mm_segment_t addr_limit; /* address limit */
50 struct task_struct *task; /* main task structure */
51 __u32 cpu; /* cpu */
52 __u32 cpu_domain; /* cpu domain */
53#ifdef CONFIG_STACKPROTECTOR_PER_TASK
54 unsigned long stack_canary;
55#endif
56 struct cpu_context_save cpu_context; /* cpu context */
57 __u32 syscall; /* syscall number */
58 __u8 used_cp[16]; /* thread used copro */
59 unsigned long tp_value[2]; /* TLS registers */
60#ifdef CONFIG_CRUNCH
61 struct crunch_state crunchstate;
62#endif
63 union fp_state fpstate __attribute__((aligned(8)));
64 union vfp_state vfpstate;
65#ifdef CONFIG_ARM_THUMBEE
66 unsigned long thumbee_state; /* ThumbEE Handler Base register */
67#endif
68};
69
70#define INIT_THREAD_INFO(tsk) \
71{ \
72 .task = &tsk, \
73 .flags = 0, \
74 .preempt_count = INIT_PREEMPT_COUNT, \
75 .addr_limit = KERNEL_DS, \
76}
77
78/*
79 * how to get the thread information struct from C
80 */
81static inline struct thread_info *current_thread_info(void) __attribute_const__;
82
83static inline struct thread_info *current_thread_info(void)
84{
85 return (struct thread_info *)
86 (current_stack_pointer & ~(THREAD_SIZE - 1));
87}
88
89#define thread_saved_pc(tsk) \
90 ((unsigned long)(task_thread_info(tsk)->cpu_context.pc))
91#define thread_saved_sp(tsk) \
92 ((unsigned long)(task_thread_info(tsk)->cpu_context.sp))
93
94#ifndef CONFIG_THUMB2_KERNEL
95#define thread_saved_fp(tsk) \
96 ((unsigned long)(task_thread_info(tsk)->cpu_context.fp))
97#else
98#define thread_saved_fp(tsk) \
99 ((unsigned long)(task_thread_info(tsk)->cpu_context.r7))
100#endif
101
102extern void crunch_task_disable(struct thread_info *);
103extern void crunch_task_copy(struct thread_info *, void *);
104extern void crunch_task_restore(struct thread_info *, void *);
105extern void crunch_task_release(struct thread_info *);
106
107extern void iwmmxt_task_disable(struct thread_info *);
108extern void iwmmxt_task_copy(struct thread_info *, void *);
109extern void iwmmxt_task_restore(struct thread_info *, void *);
110extern void iwmmxt_task_release(struct thread_info *);
111extern void iwmmxt_task_switch(struct thread_info *);
112
113extern void vfp_sync_hwstate(struct thread_info *);
114extern void vfp_flush_hwstate(struct thread_info *);
115
116struct user_vfp;
117struct user_vfp_exc;
118
119extern int vfp_preserve_user_clear_hwstate(struct user_vfp *,
120 struct user_vfp_exc *);
121extern int vfp_restore_user_hwstate(struct user_vfp *,
122 struct user_vfp_exc *);
123#endif
124
125/*
126 * thread information flags:
127 * TIF_USEDFPU - FPU was used by this task this quantum (SMP)
128 * TIF_POLLING_NRFLAG - true if poll_idle() is polling TIF_NEED_RESCHED
129 */
130#define TIF_SIGPENDING 0 /* signal pending */
131#define TIF_NEED_RESCHED 1 /* rescheduling necessary */
132#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
133#define TIF_UPROBE 3 /* breakpointed or singlestepping */
134#define TIF_SYSCALL_TRACE 4 /* syscall trace active */
135#define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */
136#define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */
137#define TIF_SECCOMP 7 /* seccomp syscall filtering active */
138
139#define TIF_USING_IWMMXT 17
140#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
141#define TIF_RESTORE_SIGMASK 20
142
143#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
144#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
145#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
146#define _TIF_UPROBE (1 << TIF_UPROBE)
147#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
148#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
149#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
150#define _TIF_SECCOMP (1 << TIF_SECCOMP)
151#define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
152
153/* Checks for any syscall work in entry-common.S */
154#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
155 _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
156
157/*
158 * Change these and you break ASM code in entry-common.S
159 */
160#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
161 _TIF_NOTIFY_RESUME | _TIF_UPROBE)
162
163#endif /* __KERNEL__ */
164#endif /* __ASM_ARM_THREAD_INFO_H */