Linux Audio

Check our new training course

Loading...
v3.5.6
  1#ifndef __ASM_SH_THREAD_INFO_H
  2#define __ASM_SH_THREAD_INFO_H
  3
  4/* SuperH version
  5 * Copyright (C) 2002  Niibe Yutaka
  6 *
  7 * The copyright of original i386 version is:
  8 *
  9 *  Copyright (C) 2002  David Howells (dhowells@redhat.com)
 10 *  - Incorporating suggestions made by Linus Torvalds and Dave Miller
 11 */
 12#ifdef __KERNEL__
 13
 14#include <asm/page.h>
 15
 16/*
 17 * Page fault error code bits
 18 */
 19#define FAULT_CODE_WRITE	(1 << 0)	/* write access */
 20#define FAULT_CODE_INITIAL	(1 << 1)	/* initial page write */
 21#define FAULT_CODE_ITLB		(1 << 2)	/* ITLB miss */
 22#define FAULT_CODE_PROT		(1 << 3)	/* protection fault */
 23#define FAULT_CODE_USER		(1 << 4)	/* user-mode access */
 24
 25#ifndef __ASSEMBLY__
 26#include <asm/processor.h>
 27
 28struct thread_info {
 29	struct task_struct	*task;		/* main task structure */
 30	struct exec_domain	*exec_domain;	/* execution domain */
 31	unsigned long		flags;		/* low level flags */
 32	__u32			status;		/* thread synchronous flags */
 33	__u32			cpu;
 34	int			preempt_count; /* 0 => preemptable, <0 => BUG */
 35	mm_segment_t		addr_limit;	/* thread address space */
 36	struct restart_block	restart_block;
 37	unsigned long		previous_sp;	/* sp of previous stack in case
 38						   of nested IRQ stacks */
 39	__u8			supervisor_stack[0];
 40};
 41
 42#endif
 43
 44#define PREEMPT_ACTIVE		0x10000000
 45
 46#if defined(CONFIG_4KSTACKS)
 47#define THREAD_SHIFT	12
 48#else
 49#define THREAD_SHIFT	13
 50#endif
 51
 52#define THREAD_SIZE	(1 << THREAD_SHIFT)
 53#define STACK_WARN	(THREAD_SIZE >> 3)
 54
 55/*
 56 * macros/functions for gaining access to the thread information structure
 57 */
 58#ifndef __ASSEMBLY__
 59#define INIT_THREAD_INFO(tsk)			\
 60{						\
 61	.task		= &tsk,			\
 62	.exec_domain	= &default_exec_domain,	\
 63	.flags		= 0,			\
 64	.status		= 0,			\
 65	.cpu		= 0,			\
 66	.preempt_count	= INIT_PREEMPT_COUNT,	\
 67	.addr_limit	= KERNEL_DS,		\
 68	.restart_block	= {			\
 69		.fn = do_no_restart_syscall,	\
 70	},					\
 71}
 72
 73#define init_thread_info	(init_thread_union.thread_info)
 74#define init_stack		(init_thread_union.stack)
 75
 76/* how to get the current stack pointer from C */
 77register unsigned long current_stack_pointer asm("r15") __used;
 78
 79/* how to get the thread information struct from C */
 80static inline struct thread_info *current_thread_info(void)
 81{
 82	struct thread_info *ti;
 83#if defined(CONFIG_SUPERH64)
 84	__asm__ __volatile__ ("getcon	cr17, %0" : "=r" (ti));
 85#elif defined(CONFIG_CPU_HAS_SR_RB)
 86	__asm__ __volatile__ ("stc	r7_bank, %0" : "=r" (ti));
 87#else
 88	unsigned long __dummy;
 89
 90	__asm__ __volatile__ (
 91		"mov	r15, %0\n\t"
 92		"and	%1, %0\n\t"
 93		: "=&r" (ti), "=r" (__dummy)
 94		: "1" (~(THREAD_SIZE - 1))
 95		: "memory");
 96#endif
 97
 98	return ti;
 99}
100
101#define THREAD_SIZE_ORDER	(THREAD_SHIFT - PAGE_SHIFT)
102
103extern void arch_task_cache_init(void);
104extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
105extern void arch_release_task_struct(struct task_struct *tsk);
106extern void init_thread_xstate(void);
107
108#endif /* __ASSEMBLY__ */
109
110/*
111 * Thread information flags
112 *
113 * - Limited to 24 bits, upper byte used for fault code encoding.
114 *
115 * - _TIF_ALLWORK_MASK and _TIF_WORK_MASK need to fit within 2 bytes, or
116 *   we blow the tst immediate size constraints and need to fix up
117 *   arch/sh/kernel/entry-common.S.
118 */
119#define TIF_SYSCALL_TRACE	0	/* syscall trace active */
120#define TIF_SIGPENDING		1	/* signal pending */
121#define TIF_NEED_RESCHED	2	/* rescheduling necessary */
122#define TIF_SINGLESTEP		4	/* singlestepping active */
123#define TIF_SYSCALL_AUDIT	5	/* syscall auditing active */
124#define TIF_SECCOMP		6	/* secure computing */
125#define TIF_NOTIFY_RESUME	7	/* callback before returning to user */
126#define TIF_SYSCALL_TRACEPOINT	8	/* for ftrace syscall instrumentation */
127#define TIF_POLLING_NRFLAG	17	/* true if poll_idle() is polling TIF_NEED_RESCHED */
128#define TIF_MEMDIE		18	/* is terminating due to OOM killer */
129
130#define _TIF_SYSCALL_TRACE	(1 << TIF_SYSCALL_TRACE)
131#define _TIF_SIGPENDING		(1 << TIF_SIGPENDING)
132#define _TIF_NEED_RESCHED	(1 << TIF_NEED_RESCHED)
133#define _TIF_SINGLESTEP		(1 << TIF_SINGLESTEP)
134#define _TIF_SYSCALL_AUDIT	(1 << TIF_SYSCALL_AUDIT)
135#define _TIF_SECCOMP		(1 << TIF_SECCOMP)
136#define _TIF_NOTIFY_RESUME	(1 << TIF_NOTIFY_RESUME)
137#define _TIF_SYSCALL_TRACEPOINT	(1 << TIF_SYSCALL_TRACEPOINT)
138#define _TIF_POLLING_NRFLAG	(1 << TIF_POLLING_NRFLAG)
139
140/* work to do in syscall trace */
141#define _TIF_WORK_SYSCALL_MASK	(_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP | \
142				 _TIF_SYSCALL_AUDIT | _TIF_SECCOMP    | \
143				 _TIF_SYSCALL_TRACEPOINT)
144
145/* work to do on any return to u-space */
146#define _TIF_ALLWORK_MASK	(_TIF_SYSCALL_TRACE | _TIF_SIGPENDING      | \
147				 _TIF_NEED_RESCHED  | _TIF_SYSCALL_AUDIT   | \
148				 _TIF_SINGLESTEP    | _TIF_NOTIFY_RESUME   | \
149				 _TIF_SYSCALL_TRACEPOINT)
150
151/* work to do on interrupt/exception return */
152#define _TIF_WORK_MASK		(_TIF_ALLWORK_MASK & ~(_TIF_SYSCALL_TRACE | \
153				 _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP))
154
155/*
156 * Thread-synchronous status.
157 *
158 * This is different from the flags in that nobody else
159 * ever touches our thread-synchronous status, so we don't
160 * have to worry about atomic accesses.
161 */
162#define TS_RESTORE_SIGMASK	0x0001	/* restore signal mask in do_signal() */
163#define TS_USEDFPU		0x0002	/* FPU used by this task this quantum */
164
165#ifndef __ASSEMBLY__
166
167#define HAVE_SET_RESTORE_SIGMASK	1
168static inline void set_restore_sigmask(void)
169{
170	struct thread_info *ti = current_thread_info();
171	ti->status |= TS_RESTORE_SIGMASK;
172	WARN_ON(!test_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags));
173}
174
175#define TI_FLAG_FAULT_CODE_SHIFT	24
176
177/*
178 * Additional thread flag encoding
179 */
180static inline void set_thread_fault_code(unsigned int val)
181{
182	struct thread_info *ti = current_thread_info();
183	ti->flags = (ti->flags & (~0 >> (32 - TI_FLAG_FAULT_CODE_SHIFT)))
184		| (val << TI_FLAG_FAULT_CODE_SHIFT);
185}
186
187static inline unsigned int get_thread_fault_code(void)
188{
189	struct thread_info *ti = current_thread_info();
190	return ti->flags >> TI_FLAG_FAULT_CODE_SHIFT;
191}
192
193static inline void clear_restore_sigmask(void)
194{
195	current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
196}
197static inline bool test_restore_sigmask(void)
198{
199	return current_thread_info()->status & TS_RESTORE_SIGMASK;
200}
201static inline bool test_and_clear_restore_sigmask(void)
202{
203	struct thread_info *ti = current_thread_info();
204	if (!(ti->status & TS_RESTORE_SIGMASK))
205		return false;
206	ti->status &= ~TS_RESTORE_SIGMASK;
207	return true;
208}
 
209#endif	/* !__ASSEMBLY__ */
210
211#endif /* __KERNEL__ */
212
213#endif /* __ASM_SH_THREAD_INFO_H */
v4.6
  1#ifndef __ASM_SH_THREAD_INFO_H
  2#define __ASM_SH_THREAD_INFO_H
  3
  4/* SuperH version
  5 * Copyright (C) 2002  Niibe Yutaka
  6 *
  7 * The copyright of original i386 version is:
  8 *
  9 *  Copyright (C) 2002  David Howells (dhowells@redhat.com)
 10 *  - Incorporating suggestions made by Linus Torvalds and Dave Miller
 11 */
 12#ifdef __KERNEL__
 13
 14#include <asm/page.h>
 15
 16/*
 17 * Page fault error code bits
 18 */
 19#define FAULT_CODE_WRITE	(1 << 0)	/* write access */
 20#define FAULT_CODE_INITIAL	(1 << 1)	/* initial page write */
 21#define FAULT_CODE_ITLB		(1 << 2)	/* ITLB miss */
 22#define FAULT_CODE_PROT		(1 << 3)	/* protection fault */
 23#define FAULT_CODE_USER		(1 << 4)	/* user-mode access */
 24
 25#ifndef __ASSEMBLY__
 26#include <asm/processor.h>
 27
 28struct thread_info {
 29	struct task_struct	*task;		/* main task structure */
 
 30	unsigned long		flags;		/* low level flags */
 31	__u32			status;		/* thread synchronous flags */
 32	__u32			cpu;
 33	int			preempt_count; /* 0 => preemptable, <0 => BUG */
 34	mm_segment_t		addr_limit;	/* thread address space */
 
 35	unsigned long		previous_sp;	/* sp of previous stack in case
 36						   of nested IRQ stacks */
 37	__u8			supervisor_stack[0];
 38};
 39
 40#endif
 41
 
 
 42#if defined(CONFIG_4KSTACKS)
 43#define THREAD_SHIFT	12
 44#else
 45#define THREAD_SHIFT	13
 46#endif
 47
 48#define THREAD_SIZE	(1 << THREAD_SHIFT)
 49#define STACK_WARN	(THREAD_SIZE >> 3)
 50
 51/*
 52 * macros/functions for gaining access to the thread information structure
 53 */
 54#ifndef __ASSEMBLY__
 55#define INIT_THREAD_INFO(tsk)			\
 56{						\
 57	.task		= &tsk,			\
 
 58	.flags		= 0,			\
 59	.status		= 0,			\
 60	.cpu		= 0,			\
 61	.preempt_count	= INIT_PREEMPT_COUNT,	\
 62	.addr_limit	= KERNEL_DS,		\
 
 
 
 63}
 64
 65#define init_thread_info	(init_thread_union.thread_info)
 66#define init_stack		(init_thread_union.stack)
 67
 68/* how to get the current stack pointer from C */
 69register unsigned long current_stack_pointer asm("r15") __used;
 70
 71/* how to get the thread information struct from C */
 72static inline struct thread_info *current_thread_info(void)
 73{
 74	struct thread_info *ti;
 75#if defined(CONFIG_SUPERH64)
 76	__asm__ __volatile__ ("getcon	cr17, %0" : "=r" (ti));
 77#elif defined(CONFIG_CPU_HAS_SR_RB)
 78	__asm__ __volatile__ ("stc	r7_bank, %0" : "=r" (ti));
 79#else
 80	unsigned long __dummy;
 81
 82	__asm__ __volatile__ (
 83		"mov	r15, %0\n\t"
 84		"and	%1, %0\n\t"
 85		: "=&r" (ti), "=r" (__dummy)
 86		: "1" (~(THREAD_SIZE - 1))
 87		: "memory");
 88#endif
 89
 90	return ti;
 91}
 92
 93#define THREAD_SIZE_ORDER	(THREAD_SHIFT - PAGE_SHIFT)
 94
 95extern void arch_task_cache_init(void);
 96extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
 97extern void arch_release_task_struct(struct task_struct *tsk);
 98extern void init_thread_xstate(void);
 99
100#endif /* __ASSEMBLY__ */
101
102/*
103 * Thread information flags
104 *
105 * - Limited to 24 bits, upper byte used for fault code encoding.
106 *
107 * - _TIF_ALLWORK_MASK and _TIF_WORK_MASK need to fit within 2 bytes, or
108 *   we blow the tst immediate size constraints and need to fix up
109 *   arch/sh/kernel/entry-common.S.
110 */
111#define TIF_SYSCALL_TRACE	0	/* syscall trace active */
112#define TIF_SIGPENDING		1	/* signal pending */
113#define TIF_NEED_RESCHED	2	/* rescheduling necessary */
114#define TIF_SINGLESTEP		4	/* singlestepping active */
115#define TIF_SYSCALL_AUDIT	5	/* syscall auditing active */
116#define TIF_SECCOMP		6	/* secure computing */
117#define TIF_NOTIFY_RESUME	7	/* callback before returning to user */
118#define TIF_SYSCALL_TRACEPOINT	8	/* for ftrace syscall instrumentation */
119#define TIF_POLLING_NRFLAG	17	/* true if poll_idle() is polling TIF_NEED_RESCHED */
120#define TIF_MEMDIE		18	/* is terminating due to OOM killer */
121
122#define _TIF_SYSCALL_TRACE	(1 << TIF_SYSCALL_TRACE)
123#define _TIF_SIGPENDING		(1 << TIF_SIGPENDING)
124#define _TIF_NEED_RESCHED	(1 << TIF_NEED_RESCHED)
125#define _TIF_SINGLESTEP		(1 << TIF_SINGLESTEP)
126#define _TIF_SYSCALL_AUDIT	(1 << TIF_SYSCALL_AUDIT)
127#define _TIF_SECCOMP		(1 << TIF_SECCOMP)
128#define _TIF_NOTIFY_RESUME	(1 << TIF_NOTIFY_RESUME)
129#define _TIF_SYSCALL_TRACEPOINT	(1 << TIF_SYSCALL_TRACEPOINT)
130#define _TIF_POLLING_NRFLAG	(1 << TIF_POLLING_NRFLAG)
131
132/* work to do in syscall trace */
133#define _TIF_WORK_SYSCALL_MASK	(_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP | \
134				 _TIF_SYSCALL_AUDIT | _TIF_SECCOMP    | \
135				 _TIF_SYSCALL_TRACEPOINT)
136
137/* work to do on any return to u-space */
138#define _TIF_ALLWORK_MASK	(_TIF_SYSCALL_TRACE | _TIF_SIGPENDING      | \
139				 _TIF_NEED_RESCHED  | _TIF_SYSCALL_AUDIT   | \
140				 _TIF_SINGLESTEP    | _TIF_NOTIFY_RESUME   | \
141				 _TIF_SYSCALL_TRACEPOINT)
142
143/* work to do on interrupt/exception return */
144#define _TIF_WORK_MASK		(_TIF_ALLWORK_MASK & ~(_TIF_SYSCALL_TRACE | \
145				 _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP))
146
147/*
148 * Thread-synchronous status.
149 *
150 * This is different from the flags in that nobody else
151 * ever touches our thread-synchronous status, so we don't
152 * have to worry about atomic accesses.
153 */
154#define TS_RESTORE_SIGMASK	0x0001	/* restore signal mask in do_signal() */
155#define TS_USEDFPU		0x0002	/* FPU used by this task this quantum */
156
157#ifndef __ASSEMBLY__
158
159#define HAVE_SET_RESTORE_SIGMASK	1
160static inline void set_restore_sigmask(void)
161{
162	struct thread_info *ti = current_thread_info();
163	ti->status |= TS_RESTORE_SIGMASK;
164	WARN_ON(!test_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags));
165}
166
167#define TI_FLAG_FAULT_CODE_SHIFT	24
168
169/*
170 * Additional thread flag encoding
171 */
172static inline void set_thread_fault_code(unsigned int val)
173{
174	struct thread_info *ti = current_thread_info();
175	ti->flags = (ti->flags & (~0 >> (32 - TI_FLAG_FAULT_CODE_SHIFT)))
176		| (val << TI_FLAG_FAULT_CODE_SHIFT);
177}
178
179static inline unsigned int get_thread_fault_code(void)
180{
181	struct thread_info *ti = current_thread_info();
182	return ti->flags >> TI_FLAG_FAULT_CODE_SHIFT;
183}
184
185static inline void clear_restore_sigmask(void)
186{
187	current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
188}
189static inline bool test_restore_sigmask(void)
190{
191	return current_thread_info()->status & TS_RESTORE_SIGMASK;
192}
193static inline bool test_and_clear_restore_sigmask(void)
194{
195	struct thread_info *ti = current_thread_info();
196	if (!(ti->status & TS_RESTORE_SIGMASK))
197		return false;
198	ti->status &= ~TS_RESTORE_SIGMASK;
199	return true;
200}
201
202#endif	/* !__ASSEMBLY__ */
203
204#endif /* __KERNEL__ */
205
206#endif /* __ASM_SH_THREAD_INFO_H */