Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __ASM_SH_THREAD_INFO_H
3#define __ASM_SH_THREAD_INFO_H
4
5/* SuperH version
6 * Copyright (C) 2002 Niibe Yutaka
7 *
8 * The copyright of original i386 version is:
9 *
10 * Copyright (C) 2002 David Howells (dhowells@redhat.com)
11 * - Incorporating suggestions made by Linus Torvalds and Dave Miller
12 */
13#ifdef __KERNEL__
14
15#include <asm/page.h>
16
17/*
18 * Page fault error code bits
19 */
20#define FAULT_CODE_WRITE (1 << 0) /* write access */
21#define FAULT_CODE_INITIAL (1 << 1) /* initial page write */
22#define FAULT_CODE_ITLB (1 << 2) /* ITLB miss */
23#define FAULT_CODE_PROT (1 << 3) /* protection fault */
24#define FAULT_CODE_USER (1 << 4) /* user-mode access */
25
26#ifndef __ASSEMBLY__
27#include <asm/processor.h>
28
29struct thread_info {
30 struct task_struct *task; /* main task structure */
31 unsigned long flags; /* low level flags */
32 __u32 status; /* thread synchronous flags */
33 __u32 cpu;
34 int preempt_count; /* 0 => preemptable, <0 => BUG */
35 mm_segment_t addr_limit; /* thread address space */
36 unsigned long previous_sp; /* sp of previous stack in case
37 of nested IRQ stacks */
38 __u8 supervisor_stack[0];
39};
40
41#endif
42
43#if defined(CONFIG_4KSTACKS)
44#define THREAD_SHIFT 12
45#else
46#define THREAD_SHIFT 13
47#endif
48
49#define THREAD_SIZE (1 << THREAD_SHIFT)
50#define STACK_WARN (THREAD_SIZE >> 3)
51
52/*
53 * macros/functions for gaining access to the thread information structure
54 */
55#ifndef __ASSEMBLY__
56#define INIT_THREAD_INFO(tsk) \
57{ \
58 .task = &tsk, \
59 .flags = 0, \
60 .status = 0, \
61 .cpu = 0, \
62 .preempt_count = INIT_PREEMPT_COUNT, \
63 .addr_limit = KERNEL_DS, \
64}
65
66/* how to get the current stack pointer from C */
67register unsigned long current_stack_pointer asm("r15") __used;
68
69/* how to get the thread information struct from C */
70static inline struct thread_info *current_thread_info(void)
71{
72 struct thread_info *ti;
73#if defined(CONFIG_SUPERH64)
74 __asm__ __volatile__ ("getcon cr17, %0" : "=r" (ti));
75#elif defined(CONFIG_CPU_HAS_SR_RB)
76 __asm__ __volatile__ ("stc r7_bank, %0" : "=r" (ti));
77#else
78 unsigned long __dummy;
79
80 __asm__ __volatile__ (
81 "mov r15, %0\n\t"
82 "and %1, %0\n\t"
83 : "=&r" (ti), "=r" (__dummy)
84 : "1" (~(THREAD_SIZE - 1))
85 : "memory");
86#endif
87
88 return ti;
89}
90
91#define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT)
92
93extern void arch_task_cache_init(void);
94extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
95extern void arch_release_task_struct(struct task_struct *tsk);
96extern void init_thread_xstate(void);
97
98#endif /* __ASSEMBLY__ */
99
100/*
101 * Thread information flags
102 *
103 * - Limited to 24 bits, upper byte used for fault code encoding.
104 *
105 * - _TIF_ALLWORK_MASK and _TIF_WORK_MASK need to fit within 2 bytes, or
106 * we blow the tst immediate size constraints and need to fix up
107 * arch/sh/kernel/entry-common.S.
108 */
109#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
110#define TIF_SIGPENDING 1 /* signal pending */
111#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
112#define TIF_SINGLESTEP 4 /* singlestepping active */
113#define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */
114#define TIF_SECCOMP 6 /* secure computing */
115#define TIF_NOTIFY_RESUME 7 /* callback before returning to user */
116#define TIF_SYSCALL_TRACEPOINT 8 /* for ftrace syscall instrumentation */
117#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */
118#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
119
120#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
121#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
122#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
123#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
124#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
125#define _TIF_SECCOMP (1 << TIF_SECCOMP)
126#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
127#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
128#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
129
130/* work to do in syscall trace */
131#define _TIF_WORK_SYSCALL_MASK (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP | \
132 _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
133 _TIF_SYSCALL_TRACEPOINT)
134
135/* work to do on any return to u-space */
136#define _TIF_ALLWORK_MASK (_TIF_SYSCALL_TRACE | _TIF_SIGPENDING | \
137 _TIF_NEED_RESCHED | _TIF_SYSCALL_AUDIT | \
138 _TIF_SINGLESTEP | _TIF_NOTIFY_RESUME | \
139 _TIF_SYSCALL_TRACEPOINT)
140
141/* work to do on interrupt/exception return */
142#define _TIF_WORK_MASK (_TIF_ALLWORK_MASK & ~(_TIF_SYSCALL_TRACE | \
143 _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP))
144
145/*
146 * Thread-synchronous status.
147 *
148 * This is different from the flags in that nobody else
149 * ever touches our thread-synchronous status, so we don't
150 * have to worry about atomic accesses.
151 */
152#define TS_USEDFPU 0x0002 /* FPU used by this task this quantum */
153
154#ifndef __ASSEMBLY__
155
156#define TI_FLAG_FAULT_CODE_SHIFT 24
157
158/*
159 * Additional thread flag encoding
160 */
161static inline void set_thread_fault_code(unsigned int val)
162{
163 struct thread_info *ti = current_thread_info();
164 ti->flags = (ti->flags & (~0 >> (32 - TI_FLAG_FAULT_CODE_SHIFT)))
165 | (val << TI_FLAG_FAULT_CODE_SHIFT);
166}
167
168static inline unsigned int get_thread_fault_code(void)
169{
170 struct thread_info *ti = current_thread_info();
171 return ti->flags >> TI_FLAG_FAULT_CODE_SHIFT;
172}
173
174#endif /* !__ASSEMBLY__ */
175
176#endif /* __KERNEL__ */
177
178#endif /* __ASM_SH_THREAD_INFO_H */
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __ASM_SH_THREAD_INFO_H
3#define __ASM_SH_THREAD_INFO_H
4
5/* SuperH version
6 * Copyright (C) 2002 Niibe Yutaka
7 *
8 * The copyright of original i386 version is:
9 *
10 * Copyright (C) 2002 David Howells (dhowells@redhat.com)
11 * - Incorporating suggestions made by Linus Torvalds and Dave Miller
12 */
13#include <asm/page.h>
14
15/*
16 * Page fault error code bits
17 */
18#define FAULT_CODE_WRITE (1 << 0) /* write access */
19#define FAULT_CODE_INITIAL (1 << 1) /* initial page write */
20#define FAULT_CODE_ITLB (1 << 2) /* ITLB miss */
21#define FAULT_CODE_PROT (1 << 3) /* protection fault */
22#define FAULT_CODE_USER (1 << 4) /* user-mode access */
23
24#ifndef __ASSEMBLY__
25#include <asm/processor.h>
26
27struct thread_info {
28 struct task_struct *task; /* main task structure */
29 unsigned long flags; /* low level flags */
30 __u32 status; /* thread synchronous flags */
31 __u32 cpu;
32 int preempt_count; /* 0 => preemptable, <0 => BUG */
33 unsigned long previous_sp; /* sp of previous stack in case
34 of nested IRQ stacks */
35 __u8 supervisor_stack[];
36};
37
38#endif
39
40#if defined(CONFIG_4KSTACKS)
41#define THREAD_SHIFT 12
42#else
43#define THREAD_SHIFT 13
44#endif
45
46#define THREAD_SIZE (1 << THREAD_SHIFT)
47#define STACK_WARN (THREAD_SIZE >> 3)
48
49/*
50 * macros/functions for gaining access to the thread information structure
51 */
52#ifndef __ASSEMBLY__
53#define INIT_THREAD_INFO(tsk) \
54{ \
55 .task = &tsk, \
56 .flags = 0, \
57 .status = 0, \
58 .cpu = 0, \
59 .preempt_count = INIT_PREEMPT_COUNT, \
60}
61
62/* how to get the current stack pointer from C */
63register unsigned long current_stack_pointer asm("r15") __used;
64
65/* how to get the thread information struct from C */
66static inline struct thread_info *current_thread_info(void)
67{
68 struct thread_info *ti;
69#if defined(CONFIG_CPU_HAS_SR_RB)
70 __asm__ __volatile__ ("stc r7_bank, %0" : "=r" (ti));
71#else
72 unsigned long __dummy;
73
74 __asm__ __volatile__ (
75 "mov r15, %0\n\t"
76 "and %1, %0\n\t"
77 : "=&r" (ti), "=r" (__dummy)
78 : "1" (~(THREAD_SIZE - 1))
79 : "memory");
80#endif
81
82 return ti;
83}
84
85#define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT)
86
87extern void init_thread_xstate(void);
88
89#endif /* __ASSEMBLY__ */
90
91/*
92 * Thread information flags
93 *
94 * - Limited to 24 bits, upper byte used for fault code encoding.
95 *
96 * - _TIF_ALLWORK_MASK and _TIF_WORK_MASK need to fit within 2 bytes, or
97 * we blow the tst immediate size constraints and need to fix up
98 * arch/sh/kernel/entry-common.S.
99 */
100#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
101#define TIF_SIGPENDING 1 /* signal pending */
102#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
103#define TIF_NOTIFY_SIGNAL 3 /* signal notifications exist */
104#define TIF_SINGLESTEP 4 /* singlestepping active */
105#define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */
106#define TIF_SECCOMP 6 /* secure computing */
107#define TIF_NOTIFY_RESUME 7 /* callback before returning to user */
108#define TIF_SYSCALL_TRACEPOINT 8 /* for ftrace syscall instrumentation */
109#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */
110#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
111
112#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
113#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
114#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
115#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL)
116#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
117#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
118#define _TIF_SECCOMP (1 << TIF_SECCOMP)
119#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
120#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
121#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
122
123/* work to do in syscall trace */
124#define _TIF_WORK_SYSCALL_MASK (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP | \
125 _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
126 _TIF_SYSCALL_TRACEPOINT)
127
128/* work to do on any return to u-space */
129#define _TIF_ALLWORK_MASK (_TIF_SYSCALL_TRACE | _TIF_SIGPENDING | \
130 _TIF_NEED_RESCHED | _TIF_SYSCALL_AUDIT | \
131 _TIF_SINGLESTEP | _TIF_NOTIFY_RESUME | \
132 _TIF_SYSCALL_TRACEPOINT | _TIF_NOTIFY_SIGNAL)
133
134/* work to do on interrupt/exception return */
135#define _TIF_WORK_MASK (_TIF_ALLWORK_MASK & ~(_TIF_SYSCALL_TRACE | \
136 _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP))
137
138/*
139 * Thread-synchronous status.
140 *
141 * This is different from the flags in that nobody else
142 * ever touches our thread-synchronous status, so we don't
143 * have to worry about atomic accesses.
144 */
145#define TS_USEDFPU 0x0002 /* FPU used by this task this quantum */
146
147#ifndef __ASSEMBLY__
148
149#define TI_FLAG_FAULT_CODE_SHIFT 24
150
151/*
152 * Additional thread flag encoding
153 */
154static inline void set_thread_fault_code(unsigned int val)
155{
156 struct thread_info *ti = current_thread_info();
157 ti->flags = (ti->flags & (~0 >> (32 - TI_FLAG_FAULT_CODE_SHIFT)))
158 | (val << TI_FLAG_FAULT_CODE_SHIFT);
159}
160
161static inline unsigned int get_thread_fault_code(void)
162{
163 struct thread_info *ti = current_thread_info();
164 return ti->flags >> TI_FLAG_FAULT_CODE_SHIFT;
165}
166
167#endif /* !__ASSEMBLY__ */
168#endif /* __ASM_SH_THREAD_INFO_H */