Linux Audio

Check our new training course

Loading...
v3.15
 
   1#ifndef _LINUX_SCHED_H
   2#define _LINUX_SCHED_H
   3
   4#include <uapi/linux/sched.h>
   5
   6#include <linux/sched/prio.h>
   7
   8
   9struct sched_param {
  10	int sched_priority;
  11};
  12
  13#include <asm/param.h>	/* for HZ */
  14
  15#include <linux/capability.h>
  16#include <linux/threads.h>
  17#include <linux/kernel.h>
  18#include <linux/types.h>
  19#include <linux/timex.h>
  20#include <linux/jiffies.h>
  21#include <linux/plist.h>
  22#include <linux/rbtree.h>
  23#include <linux/thread_info.h>
  24#include <linux/cpumask.h>
  25#include <linux/errno.h>
  26#include <linux/nodemask.h>
  27#include <linux/mm_types.h>
  28#include <linux/preempt_mask.h>
  29
  30#include <asm/page.h>
  31#include <asm/ptrace.h>
  32#include <linux/cputime.h>
  33
  34#include <linux/smp.h>
  35#include <linux/sem.h>
  36#include <linux/signal.h>
  37#include <linux/compiler.h>
  38#include <linux/completion.h>
  39#include <linux/pid.h>
  40#include <linux/percpu.h>
  41#include <linux/topology.h>
  42#include <linux/proportions.h>
 
 
 
  43#include <linux/seccomp.h>
 
  44#include <linux/rcupdate.h>
  45#include <linux/rculist.h>
  46#include <linux/rtmutex.h>
  47
  48#include <linux/time.h>
  49#include <linux/param.h>
  50#include <linux/resource.h>
  51#include <linux/timer.h>
  52#include <linux/hrtimer.h>
  53#include <linux/task_io_accounting.h>
  54#include <linux/latencytop.h>
  55#include <linux/cred.h>
  56#include <linux/llist.h>
  57#include <linux/uidgid.h>
  58#include <linux/gfp.h>
  59
  60#include <asm/processor.h>
  61
  62#define SCHED_ATTR_SIZE_VER0	48	/* sizeof first published struct */
  63
  64/*
  65 * Extended scheduling parameters data structure.
  66 *
  67 * This is needed because the original struct sched_param can not be
  68 * altered without introducing ABI issues with legacy applications
  69 * (e.g., in sched_getparam()).
  70 *
  71 * However, the possibility of specifying more than just a priority for
  72 * the tasks may be useful for a wide variety of application fields, e.g.,
  73 * multimedia, streaming, automation and control, and many others.
  74 *
  75 * This variant (sched_attr) is meant at describing a so-called
  76 * sporadic time-constrained task. In such model a task is specified by:
  77 *  - the activation period or minimum instance inter-arrival time;
  78 *  - the maximum (or average, depending on the actual scheduling
  79 *    discipline) computation time of all instances, a.k.a. runtime;
  80 *  - the deadline (relative to the actual activation time) of each
  81 *    instance.
  82 * Very briefly, a periodic (sporadic) task asks for the execution of
  83 * some specific computation --which is typically called an instance--
  84 * (at most) every period. Moreover, each instance typically lasts no more
  85 * than the runtime and must be completed by time instant t equal to
  86 * the instance activation time + the deadline.
  87 *
  88 * This is reflected by the actual fields of the sched_attr structure:
  89 *
  90 *  @size		size of the structure, for fwd/bwd compat.
  91 *
  92 *  @sched_policy	task's scheduling policy
  93 *  @sched_flags	for customizing the scheduler behaviour
  94 *  @sched_nice		task's nice value      (SCHED_NORMAL/BATCH)
  95 *  @sched_priority	task's static priority (SCHED_FIFO/RR)
  96 *  @sched_deadline	representative of the task's deadline
  97 *  @sched_runtime	representative of the task's runtime
  98 *  @sched_period	representative of the task's period
  99 *
 100 * Given this task model, there are a multiplicity of scheduling algorithms
 101 * and policies, that can be used to ensure all the tasks will make their
 102 * timing constraints.
 103 *
 104 * As of now, the SCHED_DEADLINE policy (sched_dl scheduling class) is the
 105 * only user of this new interface. More information about the algorithm
 106 * available in the scheduling class file or in Documentation/.
 107 */
 108struct sched_attr {
 109	u32 size;
 110
 111	u32 sched_policy;
 112	u64 sched_flags;
 113
 114	/* SCHED_NORMAL, SCHED_BATCH */
 115	s32 sched_nice;
 116
 117	/* SCHED_FIFO, SCHED_RR */
 118	u32 sched_priority;
 119
 120	/* SCHED_DEADLINE */
 121	u64 sched_runtime;
 122	u64 sched_deadline;
 123	u64 sched_period;
 124};
 125
 126struct exec_domain;
 127struct futex_pi_state;
 128struct robust_list_head;
 129struct bio_list;
 
 
 130struct fs_struct;
 
 
 
 
 
 131struct perf_event_context;
 132struct blk_plug;
 133struct filename;
 134
 135#define VMACACHE_BITS 2
 136#define VMACACHE_SIZE (1U << VMACACHE_BITS)
 137#define VMACACHE_MASK (VMACACHE_SIZE - 1)
 138
 139/*
 140 * List of flags we want to share for kernel threads,
 141 * if only because they are not used by them anyway.
 142 */
 143#define CLONE_KERNEL	(CLONE_FS | CLONE_FILES | CLONE_SIGHAND)
 144
 145/*
 146 * These are the constant used to fake the fixed-point load-average
 147 * counting. Some notes:
 148 *  - 11 bit fractions expand to 22 bits by the multiplies: this gives
 149 *    a load-average precision of 10 bits integer + 11 bits fractional
 150 *  - if you want to count load-averages more often, you need more
 151 *    precision, or rounding will get you. With 2-second counting freq,
 152 *    the EXP_n values would be 1981, 2034 and 2043 if still using only
 153 *    11 bit fractions.
 154 */
 155extern unsigned long avenrun[];		/* Load averages */
 156extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift);
 157
 158#define FSHIFT		11		/* nr of bits of precision */
 159#define FIXED_1		(1<<FSHIFT)	/* 1.0 as fixed-point */
 160#define LOAD_FREQ	(5*HZ+1)	/* 5 sec intervals */
 161#define EXP_1		1884		/* 1/exp(5sec/1min) as fixed-point */
 162#define EXP_5		2014		/* 1/exp(5sec/5min) */
 163#define EXP_15		2037		/* 1/exp(5sec/15min) */
 164
 165#define CALC_LOAD(load,exp,n) \
 166	load *= exp; \
 167	load += n*(FIXED_1-exp); \
 168	load >>= FSHIFT;
 169
 170extern unsigned long total_forks;
 171extern int nr_threads;
 172DECLARE_PER_CPU(unsigned long, process_counts);
 173extern int nr_processes(void);
 174extern unsigned long nr_running(void);
 175extern unsigned long nr_iowait(void);
 176extern unsigned long nr_iowait_cpu(int cpu);
 177extern unsigned long this_cpu_load(void);
 178
 179
 180extern void calc_global_load(unsigned long ticks);
 181extern void update_cpu_load_nohz(void);
 182
 183extern unsigned long get_parent_ip(unsigned long addr);
 184
 185extern void dump_cpu_task(int cpu);
 186
 187struct seq_file;
 188struct cfs_rq;
 
 
 189struct task_group;
 190#ifdef CONFIG_SCHED_DEBUG
 191extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);
 192extern void proc_sched_set_task(struct task_struct *p);
 193extern void
 194print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
 195#endif
 196
 197/*
 198 * Task state bitmask. NOTE! These bits are also
 199 * encoded in fs/proc/array.c: get_task_state().
 200 *
 201 * We have two separate sets of flags: task->state
 202 * is about runnability, while task->exit_state are
 203 * about the task exiting. Confusing, but this way
 204 * modifying one set can't modify the other one by
 205 * mistake.
 206 */
 207#define TASK_RUNNING		0
 208#define TASK_INTERRUPTIBLE	1
 209#define TASK_UNINTERRUPTIBLE	2
 210#define __TASK_STOPPED		4
 211#define __TASK_TRACED		8
 212/* in tsk->exit_state */
 213#define EXIT_DEAD		16
 214#define EXIT_ZOMBIE		32
 215#define EXIT_TRACE		(EXIT_ZOMBIE | EXIT_DEAD)
 216/* in tsk->state again */
 217#define TASK_DEAD		64
 218#define TASK_WAKEKILL		128
 219#define TASK_WAKING		256
 220#define TASK_PARKED		512
 221#define TASK_STATE_MAX		1024
 222
 223#define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWP"
 224
 225extern char ___assert_task_state[1 - 2*!!(
 226		sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
 227
 228/* Convenience macros for the sake of set_task_state */
 229#define TASK_KILLABLE		(TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
 230#define TASK_STOPPED		(TASK_WAKEKILL | __TASK_STOPPED)
 231#define TASK_TRACED		(TASK_WAKEKILL | __TASK_TRACED)
 232
 233/* Convenience macros for the sake of wake_up */
 234#define TASK_NORMAL		(TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
 235#define TASK_ALL		(TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
 236
 237/* get_task_state() */
 238#define TASK_REPORT		(TASK_RUNNING | TASK_INTERRUPTIBLE | \
 239				 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
 240				 __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD)
 241
 242#define task_is_traced(task)	((task->state & __TASK_TRACED) != 0)
 243#define task_is_stopped(task)	((task->state & __TASK_STOPPED) != 0)
 244#define task_is_stopped_or_traced(task)	\
 245			((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
 246#define task_contributes_to_load(task)	\
 247				((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
 248				 (task->flags & PF_FROZEN) == 0)
 249
 250#define __set_task_state(tsk, state_value)		\
 251	do { (tsk)->state = (state_value); } while (0)
 252#define set_task_state(tsk, state_value)		\
 253	set_mb((tsk)->state, (state_value))
 254
 255/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 256 * set_current_state() includes a barrier so that the write of current->state
 257 * is correctly serialised wrt the caller's subsequent test of whether to
 258 * actually sleep:
 259 *
 
 260 *	set_current_state(TASK_UNINTERRUPTIBLE);
 261 *	if (do_i_need_to_sleep())
 262 *		schedule();
 
 
 
 
 
 
 
 
 
 
 263 *
 264 * If the caller does not need such serialisation then use __set_current_state()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 265 */
 266#define __set_current_state(state_value)			\
 267	do { current->state = (state_value); } while (0)
 268#define set_current_state(state_value)		\
 269	set_mb(current->state, (state_value))
 270
 271/* Task command name length */
 272#define TASK_COMM_LEN 16
 273
 274#include <linux/spinlock.h>
 
 275
 276/*
 277 * This serializes "schedule()" and also protects
 278 * the run-queue from deletions/modifications (but
 279 * _adding_ to the beginning of the run-queue has
 280 * a separate lock).
 281 */
 282extern rwlock_t tasklist_lock;
 283extern spinlock_t mmlist_lock;
 284
 285struct task_struct;
 286
 287#ifdef CONFIG_PROVE_RCU
 288extern int lockdep_tasklist_lock_is_held(void);
 289#endif /* #ifdef CONFIG_PROVE_RCU */
 290
 291extern void sched_init(void);
 292extern void sched_init_smp(void);
 293extern asmlinkage void schedule_tail(struct task_struct *prev);
 294extern void init_idle(struct task_struct *idle, int cpu);
 295extern void init_idle_bootup_task(struct task_struct *idle);
 296
 297extern int runqueue_is_locked(int cpu);
 298
 299#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
 300extern void nohz_balance_enter_idle(int cpu);
 301extern void set_cpu_sd_state_idle(void);
 302extern int get_nohz_timer_target(int pinned);
 303#else
 304static inline void nohz_balance_enter_idle(int cpu) { }
 305static inline void set_cpu_sd_state_idle(void) { }
 306static inline int get_nohz_timer_target(int pinned)
 307{
 308	return smp_processor_id();
 309}
 310#endif
 311
 312/*
 313 * Only dump TASK_* tasks. (0 for all tasks)
 314 */
 315extern void show_state_filter(unsigned long state_filter);
 316
 317static inline void show_state(void)
 318{
 319	show_state_filter(0);
 320}
 321
 322extern void show_regs(struct pt_regs *);
 323
 324/*
 325 * TASK is a pointer to the task whose backtrace we want to see (or NULL for current
 326 * task), SP is the stack pointer of the first frame that should be shown in the back
 327 * trace (or NULL if the entire call-chain of the task should be shown).
 328 */
 329extern void show_stack(struct task_struct *task, unsigned long *sp);
 330
 331void io_schedule(void);
 332long io_schedule_timeout(long timeout);
 333
 334extern void cpu_init (void);
 335extern void trap_init(void);
 336extern void update_process_times(int user);
 337extern void scheduler_tick(void);
 338
 339extern void sched_show_task(struct task_struct *p);
 340
 341#ifdef CONFIG_LOCKUP_DETECTOR
 342extern void touch_softlockup_watchdog(void);
 343extern void touch_softlockup_watchdog_sync(void);
 344extern void touch_all_softlockup_watchdogs(void);
 345extern int proc_dowatchdog_thresh(struct ctl_table *table, int write,
 346				  void __user *buffer,
 347				  size_t *lenp, loff_t *ppos);
 348extern unsigned int  softlockup_panic;
 349void lockup_detector_init(void);
 350#else
 351static inline void touch_softlockup_watchdog(void)
 352{
 353}
 354static inline void touch_softlockup_watchdog_sync(void)
 355{
 356}
 357static inline void touch_all_softlockup_watchdogs(void)
 358{
 359}
 360static inline void lockup_detector_init(void)
 361{
 362}
 363#endif
 364
 365#ifdef CONFIG_DETECT_HUNG_TASK
 366void reset_hung_task_detector(void);
 367#else
 368static inline void reset_hung_task_detector(void)
 369{
 370}
 371#endif
 372
 373/* Attach to any functions which should be ignored in wchan output. */
 374#define __sched		__attribute__((__section__(".sched.text")))
 375
 376/* Linker adds these: start and end of __sched functions */
 377extern char __sched_text_start[], __sched_text_end[];
 378
 379/* Is this address in the __sched functions? */
 380extern int in_sched_functions(unsigned long addr);
 381
 382#define	MAX_SCHEDULE_TIMEOUT	LONG_MAX
 383extern signed long schedule_timeout(signed long timeout);
 384extern signed long schedule_timeout_interruptible(signed long timeout);
 385extern signed long schedule_timeout_killable(signed long timeout);
 386extern signed long schedule_timeout_uninterruptible(signed long timeout);
 387asmlinkage void schedule(void);
 388extern void schedule_preempt_disabled(void);
 389
 390struct nsproxy;
 391struct user_namespace;
 392
 393#ifdef CONFIG_MMU
 394extern void arch_pick_mmap_layout(struct mm_struct *mm);
 395extern unsigned long
 396arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
 397		       unsigned long, unsigned long);
 398extern unsigned long
 399arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
 400			  unsigned long len, unsigned long pgoff,
 401			  unsigned long flags);
 402#else
 403static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
 404#endif
 405
 406#define SUID_DUMP_DISABLE	0	/* No setuid dumping */
 407#define SUID_DUMP_USER		1	/* Dump as user of process */
 408#define SUID_DUMP_ROOT		2	/* Dump as root */
 409
 410/* mm flags */
 411
 412/* for SUID_DUMP_* above */
 413#define MMF_DUMPABLE_BITS 2
 414#define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1)
 415
 416extern void set_dumpable(struct mm_struct *mm, int value);
 417/*
 418 * This returns the actual value of the suid_dumpable flag. For things
 419 * that are using this for checking for privilege transitions, it must
 420 * test against SUID_DUMP_USER rather than treating it as a boolean
 421 * value.
 422 */
 423static inline int __get_dumpable(unsigned long mm_flags)
 424{
 425	return mm_flags & MMF_DUMPABLE_MASK;
 426}
 427
 428static inline int get_dumpable(struct mm_struct *mm)
 429{
 430	return __get_dumpable(mm->flags);
 431}
 432
 433/* coredump filter bits */
 434#define MMF_DUMP_ANON_PRIVATE	2
 435#define MMF_DUMP_ANON_SHARED	3
 436#define MMF_DUMP_MAPPED_PRIVATE	4
 437#define MMF_DUMP_MAPPED_SHARED	5
 438#define MMF_DUMP_ELF_HEADERS	6
 439#define MMF_DUMP_HUGETLB_PRIVATE 7
 440#define MMF_DUMP_HUGETLB_SHARED  8
 441
 442#define MMF_DUMP_FILTER_SHIFT	MMF_DUMPABLE_BITS
 443#define MMF_DUMP_FILTER_BITS	7
 444#define MMF_DUMP_FILTER_MASK \
 445	(((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)
 446#define MMF_DUMP_FILTER_DEFAULT \
 447	((1 << MMF_DUMP_ANON_PRIVATE) |	(1 << MMF_DUMP_ANON_SHARED) |\
 448	 (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF)
 449
 450#ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS
 451# define MMF_DUMP_MASK_DEFAULT_ELF	(1 << MMF_DUMP_ELF_HEADERS)
 452#else
 453# define MMF_DUMP_MASK_DEFAULT_ELF	0
 454#endif
 455					/* leave room for more dump flags */
 456#define MMF_VM_MERGEABLE	16	/* KSM may merge identical pages */
 457#define MMF_VM_HUGEPAGE		17	/* set when VM_HUGEPAGE is set on vma */
 458#define MMF_EXE_FILE_CHANGED	18	/* see prctl_set_mm_exe_file() */
 459
 460#define MMF_HAS_UPROBES		19	/* has uprobes */
 461#define MMF_RECALC_UPROBES	20	/* MMF_HAS_UPROBES can be wrong */
 462
 463#define MMF_INIT_MASK		(MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK)
 464
 465struct sighand_struct {
 466	atomic_t		count;
 467	struct k_sigaction	action[_NSIG];
 468	spinlock_t		siglock;
 469	wait_queue_head_t	signalfd_wqh;
 470};
 471
 472struct pacct_struct {
 473	int			ac_flag;
 474	long			ac_exitcode;
 475	unsigned long		ac_mem;
 476	cputime_t		ac_utime, ac_stime;
 477	unsigned long		ac_minflt, ac_majflt;
 478};
 479
 480struct cpu_itimer {
 481	cputime_t expires;
 482	cputime_t incr;
 483	u32 error;
 484	u32 incr_error;
 485};
 486
 487/**
 488 * struct cputime - snaphsot of system and user cputime
 489 * @utime: time spent in user mode
 490 * @stime: time spent in system mode
 
 491 *
 492 * Gathers a generic snapshot of user and system time.
 
 493 */
 494struct cputime {
 495	cputime_t utime;
 496	cputime_t stime;
 
 
 
 497};
 498
 499/**
 500 * struct task_cputime - collected CPU time counts
 501 * @utime:		time spent in user mode, in &cputime_t units
 502 * @stime:		time spent in kernel mode, in &cputime_t units
 503 * @sum_exec_runtime:	total time spent on the CPU, in nanoseconds
 504 *
 505 * This is an extension of struct cputime that includes the total runtime
 506 * spent by the task from the scheduler point of view.
 507 *
 508 * As a result, this structure groups together three kinds of CPU time
 509 * that are tracked for threads and thread groups.  Most things considering
 510 * CPU time want to group these counts together and treat all three
 511 * of them in parallel.
 512 */
 513struct task_cputime {
 514	cputime_t utime;
 515	cputime_t stime;
 516	unsigned long long sum_exec_runtime;
 517};
 518/* Alternate field names when used to cache expirations. */
 519#define prof_exp	stime
 520#define virt_exp	utime
 521#define sched_exp	sum_exec_runtime
 522
 523#define INIT_CPUTIME	\
 524	(struct task_cputime) {					\
 525		.utime = 0,					\
 526		.stime = 0,					\
 527		.sum_exec_runtime = 0,				\
 528	}
 529
 530#ifdef CONFIG_PREEMPT_COUNT
 531#define PREEMPT_DISABLED	(1 + PREEMPT_ENABLED)
 532#else
 533#define PREEMPT_DISABLED	PREEMPT_ENABLED
 534#endif
 535
 536/*
 537 * Disable preemption until the scheduler is running.
 538 * Reset by start_kernel()->sched_init()->init_idle().
 539 *
 540 * We include PREEMPT_ACTIVE to avoid cond_resched() from working
 541 * before the scheduler is active -- see should_resched().
 542 */
 543#define INIT_PREEMPT_COUNT	(PREEMPT_DISABLED + PREEMPT_ACTIVE)
 544
 545/**
 546 * struct thread_group_cputimer - thread group interval timer counts
 547 * @cputime:		thread group interval timers.
 548 * @running:		non-zero when there are timers running and
 549 * 			@cputime receives updates.
 550 * @lock:		lock for fields in this struct.
 551 *
 552 * This structure contains the version of task_cputime, above, that is
 553 * used for thread group CPU timer calculations.
 554 */
 555struct thread_group_cputimer {
 556	struct task_cputime cputime;
 557	int running;
 558	raw_spinlock_t lock;
 559};
 560
 561#include <linux/rwsem.h>
 562struct autogroup;
 563
 564/*
 565 * NOTE! "signal_struct" does not have its own
 566 * locking, because a shared signal_struct always
 567 * implies a shared sighand_struct, so locking
 568 * sighand_struct is always a proper superset of
 569 * the locking of signal_struct.
 570 */
 571struct signal_struct {
 572	atomic_t		sigcnt;
 573	atomic_t		live;
 574	int			nr_threads;
 575	struct list_head	thread_head;
 576
 577	wait_queue_head_t	wait_chldexit;	/* for wait4() */
 578
 579	/* current thread group signal load-balancing target: */
 580	struct task_struct	*curr_target;
 581
 582	/* shared signal handling: */
 583	struct sigpending	shared_pending;
 584
 585	/* thread group exit support */
 586	int			group_exit_code;
 587	/* overloaded:
 588	 * - notify group_exit_task when ->count is equal to notify_count
 589	 * - everyone except group_exit_task is stopped during signal delivery
 590	 *   of fatal signals, group_exit_task processes the signal.
 591	 */
 592	int			notify_count;
 593	struct task_struct	*group_exit_task;
 594
 595	/* thread group stop support, overloads group_exit_code too */
 596	int			group_stop_count;
 597	unsigned int		flags; /* see SIGNAL_* flags below */
 598
 599	/*
 600	 * PR_SET_CHILD_SUBREAPER marks a process, like a service
 601	 * manager, to re-parent orphan (double-forking) child processes
 602	 * to this process instead of 'init'. The service manager is
 603	 * able to receive SIGCHLD signals and is able to investigate
 604	 * the process until it calls wait(). All children of this
 605	 * process will inherit a flag if they should look for a
 606	 * child_subreaper process at exit.
 607	 */
 608	unsigned int		is_child_subreaper:1;
 609	unsigned int		has_child_subreaper:1;
 610
 611	/* POSIX.1b Interval Timers */
 612	int			posix_timer_id;
 613	struct list_head	posix_timers;
 614
 615	/* ITIMER_REAL timer for the process */
 616	struct hrtimer real_timer;
 617	struct pid *leader_pid;
 618	ktime_t it_real_incr;
 619
 620	/*
 621	 * ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use
 622	 * CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these
 623	 * values are defined to 0 and 1 respectively
 624	 */
 625	struct cpu_itimer it[2];
 626
 627	/*
 628	 * Thread group totals for process CPU timers.
 629	 * See thread_group_cputimer(), et al, for details.
 630	 */
 631	struct thread_group_cputimer cputimer;
 632
 633	/* Earliest-expiration cache. */
 634	struct task_cputime cputime_expires;
 635
 636	struct list_head cpu_timers[3];
 637
 638	struct pid *tty_old_pgrp;
 639
 640	/* boolean value for session group leader */
 641	int leader;
 642
 643	struct tty_struct *tty; /* NULL if no tty */
 644
 645#ifdef CONFIG_SCHED_AUTOGROUP
 646	struct autogroup *autogroup;
 647#endif
 648	/*
 649	 * Cumulative resource counters for dead threads in the group,
 650	 * and for reaped dead child processes forked by this group.
 651	 * Live threads maintain their own counters and add to these
 652	 * in __exit_signal, except for the group leader.
 653	 */
 654	cputime_t utime, stime, cutime, cstime;
 655	cputime_t gtime;
 656	cputime_t cgtime;
 657#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
 658	struct cputime prev_cputime;
 659#endif
 660	unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
 661	unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
 662	unsigned long inblock, oublock, cinblock, coublock;
 663	unsigned long maxrss, cmaxrss;
 664	struct task_io_accounting ioac;
 665
 666	/*
 667	 * Cumulative ns of schedule CPU time fo dead threads in the
 668	 * group, not including a zombie group leader, (This only differs
 669	 * from jiffies_to_ns(utime + stime) if sched_clock uses something
 670	 * other than jiffies.)
 671	 */
 672	unsigned long long sum_sched_runtime;
 673
 674	/*
 675	 * We don't bother to synchronize most readers of this at all,
 676	 * because there is no reader checking a limit that actually needs
 677	 * to get both rlim_cur and rlim_max atomically, and either one
 678	 * alone is a single word that can safely be read normally.
 679	 * getrlimit/setrlimit use task_lock(current->group_leader) to
 680	 * protect this instead of the siglock, because they really
 681	 * have no need to disable irqs.
 682	 */
 683	struct rlimit rlim[RLIM_NLIMITS];
 684
 685#ifdef CONFIG_BSD_PROCESS_ACCT
 686	struct pacct_struct pacct;	/* per-process accounting information */
 687#endif
 688#ifdef CONFIG_TASKSTATS
 689	struct taskstats *stats;
 690#endif
 691#ifdef CONFIG_AUDIT
 692	unsigned audit_tty;
 693	unsigned audit_tty_log_passwd;
 694	struct tty_audit_buf *tty_audit_buf;
 695#endif
 696#ifdef CONFIG_CGROUPS
 697	/*
 698	 * group_rwsem prevents new tasks from entering the threadgroup and
 699	 * member tasks from exiting,a more specifically, setting of
 700	 * PF_EXITING.  fork and exit paths are protected with this rwsem
 701	 * using threadgroup_change_begin/end().  Users which require
 702	 * threadgroup to remain stable should use threadgroup_[un]lock()
 703	 * which also takes care of exec path.  Currently, cgroup is the
 704	 * only user.
 705	 */
 706	struct rw_semaphore group_rwsem;
 707#endif
 708
 709	oom_flags_t oom_flags;
 710	short oom_score_adj;		/* OOM kill score adjustment */
 711	short oom_score_adj_min;	/* OOM kill score adjustment min value.
 712					 * Only settable by CAP_SYS_RESOURCE. */
 713
 714	struct mutex cred_guard_mutex;	/* guard against foreign influences on
 715					 * credential calculations
 716					 * (notably. ptrace) */
 717};
 718
 719/*
 720 * Bits in flags field of signal_struct.
 721 */
 722#define SIGNAL_STOP_STOPPED	0x00000001 /* job control stop in effect */
 723#define SIGNAL_STOP_CONTINUED	0x00000002 /* SIGCONT since WCONTINUED reap */
 724#define SIGNAL_GROUP_EXIT	0x00000004 /* group exit in progress */
 725#define SIGNAL_GROUP_COREDUMP	0x00000008 /* coredump in progress */
 726/*
 727 * Pending notifications to parent.
 728 */
 729#define SIGNAL_CLD_STOPPED	0x00000010
 730#define SIGNAL_CLD_CONTINUED	0x00000020
 731#define SIGNAL_CLD_MASK		(SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED)
 732
 733#define SIGNAL_UNKILLABLE	0x00000040 /* for init: ignore fatal signals */
 734
 735/* If true, all threads except ->group_exit_task have pending SIGKILL */
 736static inline int signal_group_exit(const struct signal_struct *sig)
 737{
 738	return	(sig->flags & SIGNAL_GROUP_EXIT) ||
 739		(sig->group_exit_task != NULL);
 740}
 741
 742/*
 743 * Some day this will be a full-fledged user tracking system..
 744 */
 745struct user_struct {
 746	atomic_t __count;	/* reference count */
 747	atomic_t processes;	/* How many processes does this user have? */
 748	atomic_t files;		/* How many open files does this user have? */
 749	atomic_t sigpending;	/* How many pending signals does this user have? */
 750#ifdef CONFIG_INOTIFY_USER
 751	atomic_t inotify_watches; /* How many inotify watches does this user have? */
 752	atomic_t inotify_devs;	/* How many inotify devs does this user have opened? */
 753#endif
 754#ifdef CONFIG_FANOTIFY
 755	atomic_t fanotify_listeners;
 756#endif
 757#ifdef CONFIG_EPOLL
 758	atomic_long_t epoll_watches; /* The number of file descriptors currently watched */
 759#endif
 760#ifdef CONFIG_POSIX_MQUEUE
 761	/* protected by mq_lock	*/
 762	unsigned long mq_bytes;	/* How many bytes can be allocated to mqueue? */
 763#endif
 764	unsigned long locked_shm; /* How many pages of mlocked shm ? */
 765
 766#ifdef CONFIG_KEYS
 767	struct key *uid_keyring;	/* UID specific keyring */
 768	struct key *session_keyring;	/* UID's default session keyring */
 769#endif
 770
 771	/* Hash table maintenance information */
 772	struct hlist_node uidhash_node;
 773	kuid_t uid;
 774
 775#ifdef CONFIG_PERF_EVENTS
 776	atomic_long_t locked_vm;
 777#endif
 778};
 779
 780extern int uids_sysfs_init(void);
 781
 782extern struct user_struct *find_user(kuid_t);
 783
 784extern struct user_struct root_user;
 785#define INIT_USER (&root_user)
 786
 787
 788struct backing_dev_info;
 789struct reclaim_state;
 790
 791#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
 792struct sched_info {
 793	/* cumulative counters */
 794	unsigned long pcount;	      /* # of times run on this cpu */
 795	unsigned long long run_delay; /* time spent waiting on a runqueue */
 796
 797	/* timestamps */
 798	unsigned long long last_arrival,/* when we last ran on a cpu */
 799			   last_queued;	/* when we were last queued to run */
 800};
 801#endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */
 802
 803#ifdef CONFIG_TASK_DELAY_ACCT
 804struct task_delay_info {
 805	spinlock_t	lock;
 806	unsigned int	flags;	/* Private per-task flags */
 807
 808	/* For each stat XXX, add following, aligned appropriately
 809	 *
 810	 * struct timespec XXX_start, XXX_end;
 811	 * u64 XXX_delay;
 812	 * u32 XXX_count;
 813	 *
 814	 * Atomicity of updates to XXX_delay, XXX_count protected by
 815	 * single lock above (split into XXX_lock if contention is an issue).
 816	 */
 817
 818	/*
 819	 * XXX_count is incremented on every XXX operation, the delay
 820	 * associated with the operation is added to XXX_delay.
 821	 * XXX_delay contains the accumulated delay time in nanoseconds.
 822	 */
 823	struct timespec blkio_start, blkio_end;	/* Shared by blkio, swapin */
 824	u64 blkio_delay;	/* wait for sync block io completion */
 825	u64 swapin_delay;	/* wait for swapin block io completion */
 826	u32 blkio_count;	/* total count of the number of sync block */
 827				/* io operations performed */
 828	u32 swapin_count;	/* total count of the number of swapin block */
 829				/* io operations performed */
 830
 831	struct timespec freepages_start, freepages_end;
 832	u64 freepages_delay;	/* wait for memory reclaim */
 833	u32 freepages_count;	/* total count of memory reclaim */
 834};
 835#endif	/* CONFIG_TASK_DELAY_ACCT */
 836
 837static inline int sched_info_on(void)
 838{
 839#ifdef CONFIG_SCHEDSTATS
 840	return 1;
 841#elif defined(CONFIG_TASK_DELAY_ACCT)
 842	extern int delayacct_on;
 843	return delayacct_on;
 844#else
 845	return 0;
 846#endif
 847}
 848
 849enum cpu_idle_type {
 850	CPU_IDLE,
 851	CPU_NOT_IDLE,
 852	CPU_NEWLY_IDLE,
 853	CPU_MAX_IDLE_TYPES
 854};
 855
 856/*
 857 * Increase resolution of cpu_power calculations
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 858 */
 859#define SCHED_POWER_SHIFT	10
 860#define SCHED_POWER_SCALE	(1L << SCHED_POWER_SHIFT)
 
 
 
 861
 862/*
 863 * sched-domains (multiprocessor balancing) declarations:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 864 */
 865#ifdef CONFIG_SMP
 866#define SD_LOAD_BALANCE		0x0001	/* Do load balancing on this domain. */
 867#define SD_BALANCE_NEWIDLE	0x0002	/* Balance when about to become idle */
 868#define SD_BALANCE_EXEC		0x0004	/* Balance on exec */
 869#define SD_BALANCE_FORK		0x0008	/* Balance on fork, clone */
 870#define SD_BALANCE_WAKE		0x0010  /* Balance on wakeup */
 871#define SD_WAKE_AFFINE		0x0020	/* Wake task to waking CPU */
 872#define SD_SHARE_CPUPOWER	0x0080	/* Domain members share cpu power */
 873#define SD_SHARE_PKG_RESOURCES	0x0200	/* Domain members share cpu pkg resources */
 874#define SD_SERIALIZE		0x0400	/* Only a single load balancing instance */
 875#define SD_ASYM_PACKING		0x0800  /* Place busy groups earlier in the domain */
 876#define SD_PREFER_SIBLING	0x1000	/* Prefer to place tasks in a sibling domain */
 877#define SD_OVERLAP		0x2000	/* sched_domains of this level overlap */
 878#define SD_NUMA			0x4000	/* cross-node balancing */
 879
 880extern int __weak arch_sd_sibiling_asym_packing(void);
 881
 882struct sched_domain_attr {
 883	int relax_domain_level;
 884};
 885
 886#define SD_ATTR_INIT	(struct sched_domain_attr) {	\
 887	.relax_domain_level = -1,			\
 888}
 889
 890extern int sched_domain_level_max;
 891
 892struct sched_group;
 893
 894struct sched_domain {
 895	/* These fields must be setup */
 896	struct sched_domain *parent;	/* top domain must be null terminated */
 897	struct sched_domain *child;	/* bottom domain must be null terminated */
 898	struct sched_group *groups;	/* the balancing groups of the domain */
 899	unsigned long min_interval;	/* Minimum balance interval ms */
 900	unsigned long max_interval;	/* Maximum balance interval ms */
 901	unsigned int busy_factor;	/* less balancing by factor if busy */
 902	unsigned int imbalance_pct;	/* No balance until over watermark */
 903	unsigned int cache_nice_tries;	/* Leave cache hot tasks for # tries */
 904	unsigned int busy_idx;
 905	unsigned int idle_idx;
 906	unsigned int newidle_idx;
 907	unsigned int wake_idx;
 908	unsigned int forkexec_idx;
 909	unsigned int smt_gain;
 910
 911	int nohz_idle;			/* NOHZ IDLE status */
 912	int flags;			/* See SD_* */
 913	int level;
 914
 915	/* Runtime fields. */
 916	unsigned long last_balance;	/* init to jiffies. units in jiffies */
 917	unsigned int balance_interval;	/* initialise to 1. units in ms. */
 918	unsigned int nr_balance_failed; /* initialise to 0 */
 919
 920	/* idle_balance() stats */
 921	u64 max_newidle_lb_cost;
 922	unsigned long next_decay_max_lb_cost;
 923
 924#ifdef CONFIG_SCHEDSTATS
 925	/* load_balance() stats */
 926	unsigned int lb_count[CPU_MAX_IDLE_TYPES];
 927	unsigned int lb_failed[CPU_MAX_IDLE_TYPES];
 928	unsigned int lb_balanced[CPU_MAX_IDLE_TYPES];
 929	unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES];
 930	unsigned int lb_gained[CPU_MAX_IDLE_TYPES];
 931	unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES];
 932	unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES];
 933	unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES];
 934
 935	/* Active load balancing */
 936	unsigned int alb_count;
 937	unsigned int alb_failed;
 938	unsigned int alb_pushed;
 939
 940	/* SD_BALANCE_EXEC stats */
 941	unsigned int sbe_count;
 942	unsigned int sbe_balanced;
 943	unsigned int sbe_pushed;
 944
 945	/* SD_BALANCE_FORK stats */
 946	unsigned int sbf_count;
 947	unsigned int sbf_balanced;
 948	unsigned int sbf_pushed;
 949
 950	/* try_to_wake_up() stats */
 951	unsigned int ttwu_wake_remote;
 952	unsigned int ttwu_move_affine;
 953	unsigned int ttwu_move_balance;
 954#endif
 955#ifdef CONFIG_SCHED_DEBUG
 956	char *name;
 957#endif
 958	union {
 959		void *private;		/* used during construction */
 960		struct rcu_head rcu;	/* used during destruction */
 961	};
 962
 963	unsigned int span_weight;
 964	/*
 965	 * Span of all CPUs in this domain.
 966	 *
 967	 * NOTE: this field is variable length. (Allocated dynamically
 968	 * by attaching extra space to the end of the structure,
 969	 * depending on how many CPUs the kernel has booted up with)
 970	 */
 971	unsigned long span[0];
 972};
 973
 974static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
 975{
 976	return to_cpumask(sd->span);
 977}
 978
 979extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
 980				    struct sched_domain_attr *dattr_new);
 981
 982/* Allocate an array of sched domains, for partition_sched_domains(). */
 983cpumask_var_t *alloc_sched_domains(unsigned int ndoms);
 984void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);
 985
 986bool cpus_share_cache(int this_cpu, int that_cpu);
 987
 988#else /* CONFIG_SMP */
 989
 990struct sched_domain_attr;
 991
 992static inline void
 993partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
 994			struct sched_domain_attr *dattr_new)
 995{
 996}
 997
 998static inline bool cpus_share_cache(int this_cpu, int that_cpu)
 999{
1000	return true;
1001}
1002
1003#endif	/* !CONFIG_SMP */
1004
1005
1006struct io_context;			/* See blkdev.h */
1007
1008
1009#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
1010extern void prefetch_stack(struct task_struct *t);
1011#else
1012static inline void prefetch_stack(struct task_struct *t) { }
1013#endif
1014
1015struct audit_context;		/* See audit.c */
1016struct mempolicy;
1017struct pipe_inode_info;
1018struct uts_namespace;
1019
1020struct load_weight {
1021	unsigned long weight;
1022	u32 inv_weight;
1023};
1024
1025struct sched_avg {
1026	/*
1027	 * These sums represent an infinite geometric series and so are bound
1028	 * above by 1024/(1-y).  Thus we only need a u32 to store them for all
1029	 * choices of y < 1-2^(-32)*1024.
1030	 */
1031	u32 runnable_avg_sum, runnable_avg_period;
1032	u64 last_runnable_update;
1033	s64 decay_count;
1034	unsigned long load_avg_contrib;
1035};
1036
1037#ifdef CONFIG_SCHEDSTATS
1038struct sched_statistics {
1039	u64			wait_start;
1040	u64			wait_max;
1041	u64			wait_count;
1042	u64			wait_sum;
1043	u64			iowait_count;
1044	u64			iowait_sum;
1045
1046	u64			sleep_start;
1047	u64			sleep_max;
1048	s64			sum_sleep_runtime;
1049
1050	u64			block_start;
1051	u64			block_max;
1052	u64			exec_max;
1053	u64			slice_max;
1054
1055	u64			nr_migrations_cold;
1056	u64			nr_failed_migrations_affine;
1057	u64			nr_failed_migrations_running;
1058	u64			nr_failed_migrations_hot;
1059	u64			nr_forced_migrations;
1060
1061	u64			nr_wakeups;
1062	u64			nr_wakeups_sync;
1063	u64			nr_wakeups_migrate;
1064	u64			nr_wakeups_local;
1065	u64			nr_wakeups_remote;
1066	u64			nr_wakeups_affine;
1067	u64			nr_wakeups_affine_attempts;
1068	u64			nr_wakeups_passive;
1069	u64			nr_wakeups_idle;
1070};
1071#endif
 
1072
1073struct sched_entity {
1074	struct load_weight	load;		/* for load-balancing */
1075	struct rb_node		run_node;
1076	struct list_head	group_node;
1077	unsigned int		on_rq;
1078
1079	u64			exec_start;
1080	u64			sum_exec_runtime;
1081	u64			vruntime;
1082	u64			prev_sum_exec_runtime;
 
 
1083
1084	u64			nr_migrations;
1085
1086#ifdef CONFIG_SCHEDSTATS
1087	struct sched_statistics statistics;
1088#endif
1089
1090#ifdef CONFIG_FAIR_GROUP_SCHED
1091	int			depth;
1092	struct sched_entity	*parent;
1093	/* rq on which this entity is (to be) queued: */
1094	struct cfs_rq		*cfs_rq;
1095	/* rq "owned" by this entity/group: */
1096	struct cfs_rq		*my_q;
1097#endif
1098
1099#ifdef CONFIG_SMP
1100	/* Per-entity load-tracking */
1101	struct sched_avg	avg;
 
 
 
 
 
1102#endif
1103};
1104
1105struct sched_rt_entity {
1106	struct list_head run_list;
1107	unsigned long timeout;
1108	unsigned long watchdog_stamp;
1109	unsigned int time_slice;
 
 
1110
1111	struct sched_rt_entity *back;
1112#ifdef CONFIG_RT_GROUP_SCHED
1113	struct sched_rt_entity	*parent;
1114	/* rq on which this entity is (to be) queued: */
1115	struct rt_rq		*rt_rq;
1116	/* rq "owned" by this entity/group: */
1117	struct rt_rq		*my_q;
1118#endif
1119};
1120
1121struct sched_dl_entity {
1122	struct rb_node	rb_node;
1123
1124	/*
1125	 * Original scheduling parameters. Copied here from sched_attr
1126	 * during sched_setscheduler2(), they will remain the same until
1127	 * the next sched_setscheduler2().
1128	 */
1129	u64 dl_runtime;		/* maximum runtime for each instance	*/
1130	u64 dl_deadline;	/* relative deadline of each instance	*/
1131	u64 dl_period;		/* separation of two instances (period) */
1132	u64 dl_bw;		/* dl_runtime / dl_deadline		*/
 
1133
1134	/*
1135	 * Actual scheduling parameters. Initialized with the values above,
1136	 * they are continously updated during task execution. Note that
1137	 * the remaining runtime could be < 0 in case we are in overrun.
1138	 */
1139	s64 runtime;		/* remaining runtime for this instance	*/
1140	u64 deadline;		/* absolute deadline for this instance	*/
1141	unsigned int flags;	/* specifying the scheduler behaviour	*/
1142
1143	/*
1144	 * Some bool flags:
1145	 *
1146	 * @dl_throttled tells if we exhausted the runtime. If so, the
1147	 * task has to wait for a replenishment to be performed at the
1148	 * next firing of dl_timer.
1149	 *
1150	 * @dl_new tells if a new instance arrived. If so we must
1151	 * start executing it with full runtime and reset its absolute
1152	 * deadline;
1153	 *
1154	 * @dl_boosted tells if we are boosted due to DI. If so we are
1155	 * outside bandwidth enforcement mechanism (but only until we
1156	 * exit the critical section);
1157	 *
1158	 * @dl_yielded tells if task gave up the cpu before consuming
1159	 * all its available runtime during the last job.
 
 
 
 
 
 
 
 
 
 
1160	 */
1161	int dl_throttled, dl_new, dl_boosted, dl_yielded;
 
 
 
 
1162
1163	/*
1164	 * Bandwidth enforcement timer. Each -deadline task has its
1165	 * own bandwidth to be enforced, thus we need one timer per task.
1166	 */
1167	struct hrtimer dl_timer;
 
 
 
 
 
 
 
 
 
1168};
1169
1170struct rcu_node;
 
 
 
 
 
 
 
 
 
 
1171
1172enum perf_event_task_context {
1173	perf_invalid_context = -1,
1174	perf_hw_context = 0,
1175	perf_sw_context,
1176	perf_nr_task_contexts,
1177};
1178
 
 
 
 
1179struct task_struct {
1180	volatile long state;	/* -1 unrunnable, 0 runnable, >0 stopped */
1181	void *stack;
1182	atomic_t usage;
1183	unsigned int flags;	/* per process flags, defined below */
1184	unsigned int ptrace;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1185
1186#ifdef CONFIG_SMP
1187	struct llist_node wake_entry;
1188	int on_cpu;
1189	struct task_struct *last_wakee;
1190	unsigned long wakee_flips;
1191	unsigned long wakee_flip_decay_ts;
1192
1193	int wake_cpu;
1194#endif
1195	int on_rq;
1196
1197	int prio, static_prio, normal_prio;
1198	unsigned int rt_priority;
1199	const struct sched_class *sched_class;
1200	struct sched_entity se;
1201	struct sched_rt_entity rt;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1202#ifdef CONFIG_CGROUP_SCHED
1203	struct task_group *sched_task_group;
1204#endif
1205	struct sched_dl_entity dl;
1206
1207#ifdef CONFIG_PREEMPT_NOTIFIERS
1208	/* list of struct preempt_notifier: */
1209	struct hlist_head preempt_notifiers;
1210#endif
1211
1212#ifdef CONFIG_BLK_DEV_IO_TRACE
1213	unsigned int btrace_seq;
1214#endif
1215
1216	unsigned int policy;
1217	int nr_cpus_allowed;
1218	cpumask_t cpus_allowed;
1219
1220#ifdef CONFIG_PREEMPT_RCU
1221	int rcu_read_lock_nesting;
1222	char rcu_read_unlock_special;
1223	struct list_head rcu_node_entry;
 
1224#endif /* #ifdef CONFIG_PREEMPT_RCU */
1225#ifdef CONFIG_TREE_PREEMPT_RCU
1226	struct rcu_node *rcu_blocked_node;
1227#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
1228#ifdef CONFIG_RCU_BOOST
1229	struct rt_mutex *rcu_boost_mutex;
1230#endif /* #ifdef CONFIG_RCU_BOOST */
1231
1232#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
1233	struct sched_info sched_info;
1234#endif
 
 
 
 
 
 
1235
1236	struct list_head tasks;
1237#ifdef CONFIG_SMP
1238	struct plist_node pushable_tasks;
1239	struct rb_node pushable_dl_tasks;
1240#endif
1241
1242	struct mm_struct *mm, *active_mm;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1243#ifdef CONFIG_COMPAT_BRK
1244	unsigned brk_randomized:1;
1245#endif
1246	/* per-thread vma caching */
1247	u32 vmacache_seqnum;
1248	struct vm_area_struct *vmacache[VMACACHE_SIZE];
1249#if defined(SPLIT_RSS_COUNTING)
1250	struct task_rss_stat	rss_stat;
1251#endif
1252/* task state */
1253	int exit_state;
1254	int exit_code, exit_signal;
1255	int pdeath_signal;  /*  The signal sent when the parent dies  */
1256	unsigned int jobctl;	/* JOBCTL_*, siglock protected */
1257
1258	/* Used for emulating ABI behavior of previous Linux versions */
1259	unsigned int personality;
1260
1261	unsigned in_execve:1;	/* Tell the LSMs that the process is doing an
1262				 * execve */
1263	unsigned in_iowait:1;
1264
1265	/* task may not gain privileges */
1266	unsigned no_new_privs:1;
1267
1268	/* Revert to default priority/policy when forking */
1269	unsigned sched_reset_on_fork:1;
1270	unsigned sched_contributes_to_load:1;
1271
1272	pid_t pid;
1273	pid_t tgid;
1274
1275#ifdef CONFIG_CC_STACKPROTECTOR
1276	/* Canary value for the -fstack-protector gcc feature */
1277	unsigned long stack_canary;
1278#endif
1279	/*
1280	 * pointers to (original) parent process, youngest child, younger sibling,
1281	 * older sibling, respectively.  (p->father can be replaced with
1282	 * p->real_parent->pid)
1283	 */
1284	struct task_struct __rcu *real_parent; /* real parent process */
1285	struct task_struct __rcu *parent; /* recipient of SIGCHLD, wait4() reports */
 
 
 
 
 
1286	/*
1287	 * children/sibling forms the list of my natural children
1288	 */
1289	struct list_head children;	/* list of my children */
1290	struct list_head sibling;	/* linkage in my parent's children list */
1291	struct task_struct *group_leader;	/* threadgroup leader */
1292
1293	/*
1294	 * ptraced is the list of tasks this task is using ptrace on.
 
1295	 * This includes both natural children and PTRACE_ATTACH targets.
1296	 * p->ptrace_entry is p's link on the p->parent->ptraced list.
1297	 */
1298	struct list_head ptraced;
1299	struct list_head ptrace_entry;
1300
1301	/* PID/PID hash table linkage. */
1302	struct pid_link pids[PIDTYPE_MAX];
1303	struct list_head thread_group;
1304	struct list_head thread_node;
1305
1306	struct completion *vfork_done;		/* for vfork() */
1307	int __user *set_child_tid;		/* CLONE_CHILD_SETTID */
1308	int __user *clear_child_tid;		/* CLONE_CHILD_CLEARTID */
1309
1310	cputime_t utime, stime, utimescaled, stimescaled;
1311	cputime_t gtime;
1312#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
1313	struct cputime prev_cputime;
 
 
 
 
 
1314#endif
 
 
1315#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1316	seqlock_t vtime_seqlock;
1317	unsigned long long vtime_snap;
1318	enum {
1319		VTIME_SLEEPING = 0,
1320		VTIME_USER,
1321		VTIME_SYS,
1322	} vtime_snap_whence;
1323#endif
1324	unsigned long nvcsw, nivcsw; /* context switch counts */
1325	struct timespec start_time; 		/* monotonic time */
1326	struct timespec real_start_time;	/* boot based time */
1327/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
1328	unsigned long min_flt, maj_flt;
1329
1330	struct task_cputime cputime_expires;
1331	struct list_head cpu_timers[3];
1332
1333/* process credentials */
1334	const struct cred __rcu *real_cred; /* objective and real subjective task
1335					 * credentials (COW) */
1336	const struct cred __rcu *cred;	/* effective (overridable) subjective task
1337					 * credentials (COW) */
1338	char comm[TASK_COMM_LEN]; /* executable name excluding path
1339				     - access with [gs]et_task_comm (which lock
1340				       it with task_lock())
1341				     - initialized normally by setup_new_exec */
1342/* file system info */
1343	int link_count, total_link_count;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1344#ifdef CONFIG_SYSVIPC
1345/* ipc stuff */
1346	struct sysv_sem sysvsem;
1347#endif
1348#ifdef CONFIG_DETECT_HUNG_TASK
1349/* hung task detection */
1350	unsigned long last_switch_count;
1351#endif
1352/* CPU-specific state of this task */
1353	struct thread_struct thread;
1354/* filesystem information */
1355	struct fs_struct *fs;
1356/* open file information */
1357	struct files_struct *files;
1358/* namespaces */
1359	struct nsproxy *nsproxy;
1360/* signal handlers */
1361	struct signal_struct *signal;
1362	struct sighand_struct *sighand;
1363
1364	sigset_t blocked, real_blocked;
1365	sigset_t saved_sigmask;	/* restored if set_restore_sigmask() was used */
1366	struct sigpending pending;
1367
1368	unsigned long sas_ss_sp;
1369	size_t sas_ss_size;
1370	int (*notifier)(void *priv);
1371	void *notifier_data;
1372	sigset_t *notifier_mask;
1373	struct callback_head *task_works;
1374
1375	struct audit_context *audit_context;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1376#ifdef CONFIG_AUDITSYSCALL
1377	kuid_t loginuid;
1378	unsigned int sessionid;
1379#endif
1380	struct seccomp seccomp;
 
 
 
 
1381
1382/* Thread group tracking */
1383   	u32 parent_exec_id;
1384   	u32 self_exec_id;
1385/* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed,
1386 * mempolicy */
1387	spinlock_t alloc_lock;
1388
1389	/* Protection of the PI data structures: */
1390	raw_spinlock_t pi_lock;
 
 
1391
1392#ifdef CONFIG_RT_MUTEXES
1393	/* PI waiters blocked on a rt_mutex held by this task */
1394	struct rb_root pi_waiters;
1395	struct rb_node *pi_waiters_leftmost;
1396	/* Deadlock detection and priority inheritance handling */
1397	struct rt_mutex_waiter *pi_blocked_on;
1398	/* Top pi_waiters task */
1399	struct task_struct *pi_top_task;
1400#endif
1401
1402#ifdef CONFIG_DEBUG_MUTEXES
1403	/* mutex deadlock detection */
1404	struct mutex_waiter *blocked_on;
1405#endif
 
1406#ifdef CONFIG_TRACE_IRQFLAGS
1407	unsigned int irq_events;
1408	unsigned long hardirq_enable_ip;
1409	unsigned long hardirq_disable_ip;
1410	unsigned int hardirq_enable_event;
1411	unsigned int hardirq_disable_event;
1412	int hardirqs_enabled;
1413	int hardirq_context;
1414	unsigned long softirq_disable_ip;
1415	unsigned long softirq_enable_ip;
1416	unsigned int softirq_disable_event;
1417	unsigned int softirq_enable_event;
1418	int softirqs_enabled;
1419	int softirq_context;
1420#endif
 
1421#ifdef CONFIG_LOCKDEP
1422# define MAX_LOCK_DEPTH 48UL
1423	u64 curr_chain_key;
1424	int lockdep_depth;
1425	unsigned int lockdep_recursion;
1426	struct held_lock held_locks[MAX_LOCK_DEPTH];
1427	gfp_t lockdep_reclaim_gfp;
1428#endif
1429
1430/* journalling filesystem info */
1431	void *journal_info;
 
1432
1433/* stacked block device info */
1434	struct bio_list *bio_list;
 
 
 
1435
1436#ifdef CONFIG_BLOCK
1437/* stack plugging */
1438	struct blk_plug *plug;
1439#endif
1440
1441/* VM state */
1442	struct reclaim_state *reclaim_state;
 
 
1443
1444	struct backing_dev_info *backing_dev_info;
1445
1446	struct io_context *io_context;
 
 
1447
1448	unsigned long ptrace_message;
1449	siginfo_t *last_siginfo; /* For ptrace use.  */
1450	struct task_io_accounting ioac;
1451#if defined(CONFIG_TASK_XACCT)
1452	u64 acct_rss_mem1;	/* accumulated rss usage */
1453	u64 acct_vm_mem1;	/* accumulated virtual memory usage */
1454	cputime_t acct_timexpd;	/* stime + utime since last update */
 
1455#endif
1456#ifdef CONFIG_CPUSETS
1457	nodemask_t mems_allowed;	/* Protected by alloc_lock */
1458	seqcount_t mems_allowed_seq;	/* Seqence no to catch updates */
1459	int cpuset_mem_spread_rotor;
1460	int cpuset_slab_spread_rotor;
 
 
1461#endif
1462#ifdef CONFIG_CGROUPS
1463	/* Control Group info protected by css_set_lock */
1464	struct css_set __rcu *cgroups;
1465	/* cg_list protected by css_set_lock and tsk->alloc_lock */
1466	struct list_head cg_list;
 
 
 
 
1467#endif
1468#ifdef CONFIG_FUTEX
1469	struct robust_list_head __user *robust_list;
1470#ifdef CONFIG_COMPAT
1471	struct compat_robust_list_head __user *compat_robust_list;
1472#endif
1473	struct list_head pi_state_list;
1474	struct futex_pi_state *pi_state_cache;
1475#endif
1476#ifdef CONFIG_PERF_EVENTS
1477	struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
1478	struct mutex perf_event_mutex;
1479	struct list_head perf_event_list;
1480#endif
1481#ifdef CONFIG_DEBUG_PREEMPT
1482	unsigned long preempt_disable_ip;
1483#endif
1484#ifdef CONFIG_NUMA
1485	struct mempolicy *mempolicy;	/* Protected by alloc_lock */
1486	short il_next;
1487	short pref_node_fork;
 
1488#endif
1489#ifdef CONFIG_NUMA_BALANCING
1490	int numa_scan_seq;
1491	unsigned int numa_scan_period;
1492	unsigned int numa_scan_period_max;
1493	int numa_preferred_nid;
1494	unsigned long numa_migrate_retry;
1495	u64 node_stamp;			/* migration stamp  */
1496	u64 last_task_numa_placement;
1497	u64 last_sum_exec_runtime;
1498	struct callback_head numa_work;
1499
1500	struct list_head numa_entry;
1501	struct numa_group *numa_group;
1502
1503	/*
1504	 * Exponential decaying average of faults on a per-node basis.
1505	 * Scheduling placement decisions are made based on the these counts.
1506	 * The values remain static for the duration of a PTE scan
1507	 */
1508	unsigned long *numa_faults_memory;
1509	unsigned long total_numa_faults;
1510
1511	/*
1512	 * numa_faults_buffer records faults per node during the current
1513	 * scan window. When the scan completes, the counts in
1514	 * numa_faults_memory decay and these values are copied.
1515	 */
1516	unsigned long *numa_faults_buffer_memory;
1517
1518	/*
1519	 * Track the nodes the process was running on when a NUMA hinting
1520	 * fault was incurred.
1521	 */
1522	unsigned long *numa_faults_cpu;
1523	unsigned long *numa_faults_buffer_cpu;
1524
1525	/*
1526	 * numa_faults_locality tracks if faults recorded during the last
1527	 * scan window were remote/local. The task scan period is adapted
1528	 * based on the locality of the faults with different weights
1529	 * depending on whether they were shared or private faults
1530	 */
1531	unsigned long numa_faults_locality[2];
1532
1533	unsigned long numa_pages_migrated;
1534#endif /* CONFIG_NUMA_BALANCING */
1535
1536	struct rcu_head rcu;
1537
1538	/*
1539	 * cache last used pipe for splice
1540	 */
1541	struct pipe_inode_info *splice_pipe;
1542
1543	struct page_frag task_frag;
1544
1545#ifdef	CONFIG_TASK_DELAY_ACCT
1546	struct task_delay_info *delays;
1547#endif
 
1548#ifdef CONFIG_FAULT_INJECTION
1549	int make_it_fail;
 
1550#endif
1551	/*
1552	 * when (nr_dirtied >= nr_dirtied_pause), it's time to call
1553	 * balance_dirty_pages() for some dirty throttling pause
1554	 */
1555	int nr_dirtied;
1556	int nr_dirtied_pause;
1557	unsigned long dirty_paused_when; /* start of a write-and-pause period */
 
1558
1559#ifdef CONFIG_LATENCYTOP
1560	int latency_record_count;
1561	struct latency_record latency_record[LT_SAVECOUNT];
1562#endif
1563	/*
1564	 * time slack values; these are used to round up poll() and
1565	 * select() etc timeout values. These are in nanoseconds.
1566	 */
1567	unsigned long timer_slack_ns;
1568	unsigned long default_timer_slack_ns;
 
 
 
 
1569
1570#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1571	/* Index of current stored address in ret_stack */
1572	int curr_ret_stack;
1573	/* Stack of return addresses for return function tracing */
1574	struct ftrace_ret_stack	*ret_stack;
1575	/* time stamp for last schedule */
1576	unsigned long long ftrace_timestamp;
 
 
 
1577	/*
1578	 * Number of functions that haven't been traced
1579	 * because of depth overrun.
1580	 */
1581	atomic_t trace_overrun;
1582	/* Pause for the tracing */
1583	atomic_t tracing_graph_pause;
 
1584#endif
 
1585#ifdef CONFIG_TRACING
1586	/* state flags for use by tracers */
1587	unsigned long trace;
1588	/* bitmask and counter of trace recursion */
1589	unsigned long trace_recursion;
 
1590#endif /* CONFIG_TRACING */
1591#ifdef CONFIG_MEMCG /* memcg uses this to do batch job */
1592	struct memcg_batch_info {
1593		int do_batch;	/* incremented when batch uncharge started */
1594		struct mem_cgroup *memcg; /* target memcg of uncharge */
1595		unsigned long nr_pages;	/* uncharged usage */
1596		unsigned long memsw_nr_pages; /* uncharged mem+swap usage */
1597	} memcg_batch;
1598	unsigned int memcg_kmem_skip_account;
1599	struct memcg_oom_info {
1600		struct mem_cgroup *memcg;
1601		gfp_t gfp_mask;
1602		int order;
1603		unsigned int may_oom:1;
1604	} memcg_oom;
 
 
 
 
 
 
 
 
1605#endif
 
1606#ifdef CONFIG_UPROBES
1607	struct uprobe_task *utask;
1608#endif
1609#if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
1610	unsigned int	sequential_io;
1611	unsigned int	sequential_io_avg;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1612#endif
1613};
1614
1615/* Future-safe accessor for struct task_struct's cpus_allowed. */
1616#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
 
 
 
1617
1618#define TNF_MIGRATED	0x01
1619#define TNF_NO_GROUP	0x02
1620#define TNF_SHARED	0x04
1621#define TNF_FAULT_LOCAL	0x08
1622
1623#ifdef CONFIG_NUMA_BALANCING
1624extern void task_numa_fault(int last_node, int node, int pages, int flags);
1625extern pid_t task_numa_group_id(struct task_struct *p);
1626extern void set_numabalancing_state(bool enabled);
1627extern void task_numa_free(struct task_struct *p);
1628extern bool should_numa_migrate_memory(struct task_struct *p, struct page *page,
1629					int src_nid, int dst_cpu);
1630#else
1631static inline void task_numa_fault(int last_node, int node, int pages,
1632				   int flags)
1633{
1634}
1635static inline pid_t task_numa_group_id(struct task_struct *p)
1636{
1637	return 0;
1638}
1639static inline void set_numabalancing_state(bool enabled)
1640{
1641}
1642static inline void task_numa_free(struct task_struct *p)
1643{
1644}
1645static inline bool should_numa_migrate_memory(struct task_struct *p,
1646				struct page *page, int src_nid, int dst_cpu)
1647{
1648	return true;
1649}
1650#endif
1651
1652static inline struct pid *task_pid(struct task_struct *task)
1653{
1654	return task->pids[PIDTYPE_PID].pid;
1655}
1656
1657static inline struct pid *task_tgid(struct task_struct *task)
1658{
1659	return task->group_leader->pids[PIDTYPE_PID].pid;
1660}
1661
1662/*
1663 * Without tasklist or rcu lock it is not safe to dereference
1664 * the result of task_pgrp/task_session even if task == current,
1665 * we can race with another thread doing sys_setsid/sys_setpgid.
1666 */
1667static inline struct pid *task_pgrp(struct task_struct *task)
1668{
1669	return task->group_leader->pids[PIDTYPE_PGID].pid;
1670}
1671
1672static inline struct pid *task_session(struct task_struct *task)
1673{
1674	return task->group_leader->pids[PIDTYPE_SID].pid;
1675}
1676
1677struct pid_namespace;
1678
1679/*
1680 * the helpers to get the task's different pids as they are seen
1681 * from various namespaces
1682 *
1683 * task_xid_nr()     : global id, i.e. the id seen from the init namespace;
1684 * task_xid_vnr()    : virtual id, i.e. the id seen from the pid namespace of
1685 *                     current.
1686 * task_xid_nr_ns()  : id seen from the ns specified;
1687 *
1688 * set_task_vxid()   : assigns a virtual id to a task;
1689 *
1690 * see also pid_nr() etc in include/linux/pid.h
1691 */
1692pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
1693			struct pid_namespace *ns);
1694
1695static inline pid_t task_pid_nr(struct task_struct *tsk)
1696{
1697	return tsk->pid;
1698}
1699
1700static inline pid_t task_pid_nr_ns(struct task_struct *tsk,
1701					struct pid_namespace *ns)
1702{
1703	return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
1704}
1705
1706static inline pid_t task_pid_vnr(struct task_struct *tsk)
1707{
1708	return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
1709}
1710
1711
1712static inline pid_t task_tgid_nr(struct task_struct *tsk)
1713{
1714	return tsk->tgid;
1715}
1716
1717pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1718
1719static inline pid_t task_tgid_vnr(struct task_struct *tsk)
1720{
1721	return pid_vnr(task_tgid(tsk));
1722}
1723
1724
1725static inline int pid_alive(const struct task_struct *p);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1726static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
1727{
1728	pid_t pid = 0;
1729
1730	rcu_read_lock();
1731	if (pid_alive(tsk))
1732		pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
1733	rcu_read_unlock();
1734
1735	return pid;
1736}
1737
1738static inline pid_t task_ppid_nr(const struct task_struct *tsk)
1739{
1740	return task_ppid_nr_ns(tsk, &init_pid_ns);
1741}
1742
1743static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk,
1744					struct pid_namespace *ns)
1745{
1746	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
1747}
1748
1749static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
 
 
 
1750{
1751	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
1752}
1753
 
1754
1755static inline pid_t task_session_nr_ns(struct task_struct *tsk,
1756					struct pid_namespace *ns)
1757{
1758	return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
1759}
1760
1761static inline pid_t task_session_vnr(struct task_struct *tsk)
1762{
1763	return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
1764}
1765
1766/* obsolete, do not use */
1767static inline pid_t task_pgrp_nr(struct task_struct *tsk)
1768{
1769	return task_pgrp_nr_ns(tsk, &init_pid_ns);
 
 
 
 
1770}
1771
1772/**
1773 * pid_alive - check that a task structure is not stale
1774 * @p: Task structure to be checked.
1775 *
1776 * Test if a process is not yet dead (at most zombie state)
1777 * If pid_alive fails, then pointers within the task structure
1778 * can be stale and must not be dereferenced.
1779 *
1780 * Return: 1 if the process is alive. 0 otherwise.
1781 */
1782static inline int pid_alive(const struct task_struct *p)
1783{
1784	return p->pids[PIDTYPE_PID].pid != NULL;
1785}
1786
1787/**
1788 * is_global_init - check if a task structure is init
 
1789 * @tsk: Task structure to be checked.
1790 *
1791 * Check if a task structure is the first user space task the kernel created.
1792 *
1793 * Return: 1 if the task structure is init. 0 otherwise.
1794 */
1795static inline int is_global_init(struct task_struct *tsk)
1796{
1797	return tsk->pid == 1;
1798}
1799
1800extern struct pid *cad_pid;
1801
1802extern void free_task(struct task_struct *tsk);
1803#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
1804
1805extern void __put_task_struct(struct task_struct *t);
1806
1807static inline void put_task_struct(struct task_struct *t)
1808{
1809	if (atomic_dec_and_test(&t->usage))
1810		__put_task_struct(t);
1811}
1812
1813#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1814extern void task_cputime(struct task_struct *t,
1815			 cputime_t *utime, cputime_t *stime);
1816extern void task_cputime_scaled(struct task_struct *t,
1817				cputime_t *utimescaled, cputime_t *stimescaled);
1818extern cputime_t task_gtime(struct task_struct *t);
1819#else
1820static inline void task_cputime(struct task_struct *t,
1821				cputime_t *utime, cputime_t *stime)
1822{
1823	if (utime)
1824		*utime = t->utime;
1825	if (stime)
1826		*stime = t->stime;
1827}
1828
1829static inline void task_cputime_scaled(struct task_struct *t,
1830				       cputime_t *utimescaled,
1831				       cputime_t *stimescaled)
1832{
1833	if (utimescaled)
1834		*utimescaled = t->utimescaled;
1835	if (stimescaled)
1836		*stimescaled = t->stimescaled;
1837}
1838
1839static inline cputime_t task_gtime(struct task_struct *t)
1840{
1841	return t->gtime;
1842}
1843#endif
1844extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
1845extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
1846
1847/*
1848 * Per process flags
1849 */
1850#define PF_EXITING	0x00000004	/* getting shut down */
1851#define PF_EXITPIDONE	0x00000008	/* pi exit done on shut down */
1852#define PF_VCPU		0x00000010	/* I'm a virtual CPU */
1853#define PF_WQ_WORKER	0x00000020	/* I'm a workqueue worker */
1854#define PF_FORKNOEXEC	0x00000040	/* forked but didn't exec */
1855#define PF_MCE_PROCESS  0x00000080      /* process policy on mce errors */
1856#define PF_SUPERPRIV	0x00000100	/* used super-user privileges */
1857#define PF_DUMPCORE	0x00000200	/* dumped core */
1858#define PF_SIGNALED	0x00000400	/* killed by a signal */
1859#define PF_MEMALLOC	0x00000800	/* Allocating memory */
1860#define PF_NPROC_EXCEEDED 0x00001000	/* set_user noticed that RLIMIT_NPROC was exceeded */
1861#define PF_USED_MATH	0x00002000	/* if unset the fpu must be initialized before use */
1862#define PF_USED_ASYNC	0x00004000	/* used async_schedule*(), used by module init */
1863#define PF_NOFREEZE	0x00008000	/* this thread should not be frozen */
1864#define PF_FROZEN	0x00010000	/* frozen for system suspend */
1865#define PF_FSTRANS	0x00020000	/* inside a filesystem transaction */
1866#define PF_KSWAPD	0x00040000	/* I am kswapd */
1867#define PF_MEMALLOC_NOIO 0x00080000	/* Allocating memory without IO involved */
1868#define PF_LESS_THROTTLE 0x00100000	/* Throttle me less: I clean memory */
1869#define PF_KTHREAD	0x00200000	/* I am a kernel thread */
1870#define PF_RANDOMIZE	0x00400000	/* randomize virtual address space */
1871#define PF_SWAPWRITE	0x00800000	/* Allowed to write to swap */
1872#define PF_SPREAD_PAGE	0x01000000	/* Spread page cache over cpuset */
1873#define PF_SPREAD_SLAB	0x02000000	/* Spread some slab caches over cpuset */
1874#define PF_NO_SETAFFINITY 0x04000000	/* Userland is not allowed to meddle with cpus_allowed */
1875#define PF_MCE_EARLY    0x08000000      /* Early kill for mce process policy */
1876#define PF_MUTEX_TESTER	0x20000000	/* Thread belongs to the rt mutex tester */
1877#define PF_FREEZER_SKIP	0x40000000	/* Freezer should not count it as freezable */
1878#define PF_SUSPEND_TASK 0x80000000      /* this thread called freeze_processes and should not be frozen */
1879
1880/*
1881 * Only the _current_ task can read/write to tsk->flags, but other
1882 * tasks can access tsk->flags in readonly mode for example
1883 * with tsk_used_math (like during threaded core dumping).
1884 * There is however an exception to this rule during ptrace
1885 * or during fork: the ptracer task is allowed to write to the
1886 * child->flags of its traced child (same goes for fork, the parent
1887 * can write to the child->flags), because we're guaranteed the
1888 * child is not running and in turn not changing child->flags
1889 * at the same time the parent does it.
1890 */
1891#define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
1892#define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
1893#define clear_used_math() clear_stopped_child_used_math(current)
1894#define set_used_math() set_stopped_child_used_math(current)
 
1895#define conditional_stopped_child_used_math(condition, child) \
1896	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
1897#define conditional_used_math(condition) \
1898	conditional_stopped_child_used_math(condition, current)
1899#define copy_to_stopped_child_used_math(child) \
1900	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
1901/* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */
1902#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
1903#define used_math() tsk_used_math(current)
1904
1905/* __GFP_IO isn't allowed if PF_MEMALLOC_NOIO is set in current->flags */
1906static inline gfp_t memalloc_noio_flags(gfp_t flags)
1907{
1908	if (unlikely(current->flags & PF_MEMALLOC_NOIO))
1909		flags &= ~__GFP_IO;
1910	return flags;
1911}
1912
1913static inline unsigned int memalloc_noio_save(void)
1914{
1915	unsigned int flags = current->flags & PF_MEMALLOC_NOIO;
1916	current->flags |= PF_MEMALLOC_NOIO;
1917	return flags;
1918}
1919
1920static inline void memalloc_noio_restore(unsigned int flags)
1921{
1922	current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags;
1923}
1924
1925/*
1926 * task->jobctl flags
1927 */
1928#define JOBCTL_STOP_SIGMASK	0xffff	/* signr of the last group stop */
1929
1930#define JOBCTL_STOP_DEQUEUED_BIT 16	/* stop signal dequeued */
1931#define JOBCTL_STOP_PENDING_BIT	17	/* task should stop for group stop */
1932#define JOBCTL_STOP_CONSUME_BIT	18	/* consume group stop count */
1933#define JOBCTL_TRAP_STOP_BIT	19	/* trap for STOP */
1934#define JOBCTL_TRAP_NOTIFY_BIT	20	/* trap for NOTIFY */
1935#define JOBCTL_TRAPPING_BIT	21	/* switching to TRACED */
1936#define JOBCTL_LISTENING_BIT	22	/* ptracer is listening for events */
1937
1938#define JOBCTL_STOP_DEQUEUED	(1 << JOBCTL_STOP_DEQUEUED_BIT)
1939#define JOBCTL_STOP_PENDING	(1 << JOBCTL_STOP_PENDING_BIT)
1940#define JOBCTL_STOP_CONSUME	(1 << JOBCTL_STOP_CONSUME_BIT)
1941#define JOBCTL_TRAP_STOP	(1 << JOBCTL_TRAP_STOP_BIT)
1942#define JOBCTL_TRAP_NOTIFY	(1 << JOBCTL_TRAP_NOTIFY_BIT)
1943#define JOBCTL_TRAPPING		(1 << JOBCTL_TRAPPING_BIT)
1944#define JOBCTL_LISTENING	(1 << JOBCTL_LISTENING_BIT)
1945
1946#define JOBCTL_TRAP_MASK	(JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY)
1947#define JOBCTL_PENDING_MASK	(JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK)
1948
1949extern bool task_set_jobctl_pending(struct task_struct *task,
1950				    unsigned int mask);
1951extern void task_clear_jobctl_trapping(struct task_struct *task);
1952extern void task_clear_jobctl_pending(struct task_struct *task,
1953				      unsigned int mask);
1954
1955#ifdef CONFIG_PREEMPT_RCU
1956
1957#define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */
1958#define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */
 
1959
1960static inline void rcu_copy_process(struct task_struct *p)
1961{
1962	p->rcu_read_lock_nesting = 0;
1963	p->rcu_read_unlock_special = 0;
1964#ifdef CONFIG_TREE_PREEMPT_RCU
1965	p->rcu_blocked_node = NULL;
1966#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
1967#ifdef CONFIG_RCU_BOOST
1968	p->rcu_boost_mutex = NULL;
1969#endif /* #ifdef CONFIG_RCU_BOOST */
1970	INIT_LIST_HEAD(&p->rcu_node_entry);
1971}
1972
1973#else
1974
1975static inline void rcu_copy_process(struct task_struct *p)
1976{
1977}
1978
1979#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1980
1981static inline void tsk_restore_flags(struct task_struct *task,
1982				unsigned long orig_flags, unsigned long flags)
1983{
1984	task->flags &= ~flags;
1985	task->flags |= orig_flags & flags;
1986}
1987
 
 
1988#ifdef CONFIG_SMP
1989extern void do_set_cpus_allowed(struct task_struct *p,
1990			       const struct cpumask *new_mask);
1991
1992extern int set_cpus_allowed_ptr(struct task_struct *p,
1993				const struct cpumask *new_mask);
1994#else
1995static inline void do_set_cpus_allowed(struct task_struct *p,
1996				      const struct cpumask *new_mask)
1997{
1998}
1999static inline int set_cpus_allowed_ptr(struct task_struct *p,
2000				       const struct cpumask *new_mask)
2001{
2002	if (!cpumask_test_cpu(0, new_mask))
2003		return -EINVAL;
2004	return 0;
2005}
2006#endif
2007
2008#ifdef CONFIG_NO_HZ_COMMON
2009void calc_load_enter_idle(void);
2010void calc_load_exit_idle(void);
2011#else
2012static inline void calc_load_enter_idle(void) { }
2013static inline void calc_load_exit_idle(void) { }
2014#endif /* CONFIG_NO_HZ_COMMON */
2015
2016#ifndef CONFIG_CPUMASK_OFFSTACK
2017static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
2018{
2019	return set_cpus_allowed_ptr(p, &new_mask);
2020}
2021#endif
2022
2023/*
2024 * Do not use outside of architecture code which knows its limitations.
2025 *
2026 * sched_clock() has no promise of monotonicity or bounded drift between
2027 * CPUs, use (which you should not) requires disabling IRQs.
2028 *
2029 * Please use one of the three interfaces below.
2030 */
2031extern unsigned long long notrace sched_clock(void);
2032/*
2033 * See the comment in kernel/sched/clock.c
2034 */
2035extern u64 cpu_clock(int cpu);
2036extern u64 local_clock(void);
2037extern u64 sched_clock_cpu(int cpu);
2038
2039
2040extern void sched_clock_init(void);
2041
2042#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
2043static inline void sched_clock_tick(void)
2044{
2045}
2046
2047static inline void sched_clock_idle_sleep_event(void)
2048{
2049}
2050
2051static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
2052{
2053}
2054#else
2055/*
2056 * Architectures can set this to 1 if they have specified
2057 * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
2058 * but then during bootup it turns out that sched_clock()
2059 * is reliable after all:
2060 */
2061extern int sched_clock_stable(void);
2062extern void set_sched_clock_stable(void);
2063extern void clear_sched_clock_stable(void);
2064
2065extern void sched_clock_tick(void);
2066extern void sched_clock_idle_sleep_event(void);
2067extern void sched_clock_idle_wakeup_event(u64 delta_ns);
2068#endif
2069
2070#ifdef CONFIG_IRQ_TIME_ACCOUNTING
2071/*
2072 * An i/f to runtime opt-in for irq time accounting based off of sched_clock.
2073 * The reason for this explicit opt-in is not to have perf penalty with
2074 * slow sched_clocks.
2075 */
2076extern void enable_sched_clock_irqtime(void);
2077extern void disable_sched_clock_irqtime(void);
2078#else
2079static inline void enable_sched_clock_irqtime(void) {}
2080static inline void disable_sched_clock_irqtime(void) {}
2081#endif
2082
2083extern unsigned long long
2084task_sched_runtime(struct task_struct *task);
2085
2086/* sched_exec is called by processes performing an exec */
2087#ifdef CONFIG_SMP
2088extern void sched_exec(void);
2089#else
2090#define sched_exec()   {}
2091#endif
2092
2093extern void sched_clock_idle_sleep_event(void);
2094extern void sched_clock_idle_wakeup_event(u64 delta_ns);
2095
2096#ifdef CONFIG_HOTPLUG_CPU
2097extern void idle_task_exit(void);
2098#else
2099static inline void idle_task_exit(void) {}
2100#endif
2101
2102#if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP)
2103extern void wake_up_nohz_cpu(int cpu);
2104#else
2105static inline void wake_up_nohz_cpu(int cpu) { }
2106#endif
2107
2108#ifdef CONFIG_NO_HZ_FULL
2109extern bool sched_can_stop_tick(void);
2110extern u64 scheduler_tick_max_deferment(void);
2111#else
2112static inline bool sched_can_stop_tick(void) { return false; }
2113#endif
2114
2115#ifdef CONFIG_SCHED_AUTOGROUP
2116extern void sched_autogroup_create_attach(struct task_struct *p);
2117extern void sched_autogroup_detach(struct task_struct *p);
2118extern void sched_autogroup_fork(struct signal_struct *sig);
2119extern void sched_autogroup_exit(struct signal_struct *sig);
2120#ifdef CONFIG_PROC_FS
2121extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m);
2122extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice);
2123#endif
2124#else
2125static inline void sched_autogroup_create_attach(struct task_struct *p) { }
2126static inline void sched_autogroup_detach(struct task_struct *p) { }
2127static inline void sched_autogroup_fork(struct signal_struct *sig) { }
2128static inline void sched_autogroup_exit(struct signal_struct *sig) { }
2129#endif
2130
2131extern bool yield_to(struct task_struct *p, bool preempt);
2132extern void set_user_nice(struct task_struct *p, long nice);
2133extern int task_prio(const struct task_struct *p);
 
2134/**
2135 * task_nice - return the nice value of a given task.
2136 * @p: the task in question.
2137 *
2138 * Return: The nice value [ -20 ... 0 ... 19 ].
2139 */
2140static inline int task_nice(const struct task_struct *p)
2141{
2142	return PRIO_TO_NICE((p)->static_prio);
2143}
 
2144extern int can_nice(const struct task_struct *p, const int nice);
2145extern int task_curr(const struct task_struct *p);
2146extern int idle_cpu(int cpu);
2147extern int sched_setscheduler(struct task_struct *, int,
2148			      const struct sched_param *);
2149extern int sched_setscheduler_nocheck(struct task_struct *, int,
2150				      const struct sched_param *);
2151extern int sched_setattr(struct task_struct *,
2152			 const struct sched_attr *);
2153extern struct task_struct *idle_task(int cpu);
 
2154/**
2155 * is_idle_task - is the specified task an idle task?
2156 * @p: the task in question.
2157 *
2158 * Return: 1 if @p is an idle task. 0 otherwise.
2159 */
2160static inline bool is_idle_task(const struct task_struct *p)
2161{
2162	return p->pid == 0;
2163}
 
2164extern struct task_struct *curr_task(int cpu);
2165extern void set_curr_task(int cpu, struct task_struct *p);
2166
2167void yield(void);
2168
2169/*
2170 * The default (Linux) execution domain.
2171 */
2172extern struct exec_domain	default_exec_domain;
2173
2174union thread_union {
 
 
 
 
2175	struct thread_info thread_info;
 
2176	unsigned long stack[THREAD_SIZE/sizeof(long)];
2177};
2178
2179#ifndef __HAVE_ARCH_KSTACK_END
2180static inline int kstack_end(void *addr)
2181{
2182	/* Reliable end of stack detection:
2183	 * Some APM bios versions misalign the stack
2184	 */
2185	return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*)));
2186}
2187#endif
2188
2189extern union thread_union init_thread_union;
2190extern struct task_struct init_task;
2191
2192extern struct   mm_struct init_mm;
2193
2194extern struct pid_namespace init_pid_ns;
 
 
 
 
 
 
 
2195
2196/*
2197 * find a task by one of its numerical ids
2198 *
2199 * find_task_by_pid_ns():
2200 *      finds a task by its pid in the specified namespace
2201 * find_task_by_vpid():
2202 *      finds a task by its virtual pid
2203 *
2204 * see also find_vpid() etc in include/linux/pid.h
2205 */
2206
2207extern struct task_struct *find_task_by_vpid(pid_t nr);
2208extern struct task_struct *find_task_by_pid_ns(pid_t nr,
2209		struct pid_namespace *ns);
2210
2211/* per-UID process charging. */
2212extern struct user_struct * alloc_uid(kuid_t);
2213static inline struct user_struct *get_uid(struct user_struct *u)
2214{
2215	atomic_inc(&u->__count);
2216	return u;
2217}
2218extern void free_uid(struct user_struct *);
2219
2220#include <asm/current.h>
2221
2222extern void xtime_update(unsigned long ticks);
 
 
 
2223
2224extern int wake_up_state(struct task_struct *tsk, unsigned int state);
2225extern int wake_up_process(struct task_struct *tsk);
2226extern void wake_up_new_task(struct task_struct *tsk);
2227#ifdef CONFIG_SMP
2228 extern void kick_process(struct task_struct *tsk);
2229#else
2230 static inline void kick_process(struct task_struct *tsk) { }
2231#endif
2232extern int sched_fork(unsigned long clone_flags, struct task_struct *p);
2233extern void sched_dead(struct task_struct *p);
2234
2235extern void proc_caches_init(void);
2236extern void flush_signals(struct task_struct *);
2237extern void __flush_signals(struct task_struct *);
2238extern void ignore_signals(struct task_struct *);
2239extern void flush_signal_handlers(struct task_struct *, int force_default);
2240extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);
2241
2242static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
2243{
2244	unsigned long flags;
2245	int ret;
2246
2247	spin_lock_irqsave(&tsk->sighand->siglock, flags);
2248	ret = dequeue_signal(tsk, mask, info);
2249	spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
2250
2251	return ret;
2252}
2253
2254extern void block_all_signals(int (*notifier)(void *priv), void *priv,
2255			      sigset_t *mask);
2256extern void unblock_all_signals(void);
2257extern void release_task(struct task_struct * p);
2258extern int send_sig_info(int, struct siginfo *, struct task_struct *);
2259extern int force_sigsegv(int, struct task_struct *);
2260extern int force_sig_info(int, struct siginfo *, struct task_struct *);
2261extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp);
2262extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid);
2263extern int kill_pid_info_as_cred(int, struct siginfo *, struct pid *,
2264				const struct cred *, u32);
2265extern int kill_pgrp(struct pid *pid, int sig, int priv);
2266extern int kill_pid(struct pid *pid, int sig, int priv);
2267extern int kill_proc_info(int, struct siginfo *, pid_t);
2268extern __must_check bool do_notify_parent(struct task_struct *, int);
2269extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
2270extern void force_sig(int, struct task_struct *);
2271extern int send_sig(int, struct task_struct *, int);
2272extern int zap_other_threads(struct task_struct *p);
2273extern struct sigqueue *sigqueue_alloc(void);
2274extern void sigqueue_free(struct sigqueue *);
2275extern int send_sigqueue(struct sigqueue *,  struct task_struct *, int group);
2276extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
2277
2278static inline void restore_saved_sigmask(void)
2279{
2280	if (test_and_clear_restore_sigmask())
2281		__set_current_blocked(&current->saved_sigmask);
2282}
2283
2284static inline sigset_t *sigmask_to_save(void)
2285{
2286	sigset_t *res = &current->blocked;
2287	if (unlikely(test_restore_sigmask()))
2288		res = &current->saved_sigmask;
2289	return res;
2290}
2291
2292static inline int kill_cad_pid(int sig, int priv)
2293{
2294	return kill_pid(cad_pid, sig, priv);
2295}
2296
2297/* These can be the second arg to send_sig_info/send_group_sig_info.  */
2298#define SEND_SIG_NOINFO ((struct siginfo *) 0)
2299#define SEND_SIG_PRIV	((struct siginfo *) 1)
2300#define SEND_SIG_FORCED	((struct siginfo *) 2)
2301
2302/*
2303 * True if we are on the alternate signal stack.
2304 */
2305static inline int on_sig_stack(unsigned long sp)
2306{
2307#ifdef CONFIG_STACK_GROWSUP
2308	return sp >= current->sas_ss_sp &&
2309		sp - current->sas_ss_sp < current->sas_ss_size;
2310#else
2311	return sp > current->sas_ss_sp &&
2312		sp - current->sas_ss_sp <= current->sas_ss_size;
2313#endif
2314}
2315
2316static inline int sas_ss_flags(unsigned long sp)
2317{
2318	return (current->sas_ss_size == 0 ? SS_DISABLE
2319		: on_sig_stack(sp) ? SS_ONSTACK : 0);
2320}
2321
2322static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig)
2323{
2324	if (unlikely((ksig->ka.sa.sa_flags & SA_ONSTACK)) && ! sas_ss_flags(sp))
2325#ifdef CONFIG_STACK_GROWSUP
2326		return current->sas_ss_sp;
2327#else
2328		return current->sas_ss_sp + current->sas_ss_size;
2329#endif
2330	return sp;
2331}
2332
2333/*
2334 * Routines for handling mm_structs
2335 */
2336extern struct mm_struct * mm_alloc(void);
2337
2338/* mmdrop drops the mm and the page tables */
2339extern void __mmdrop(struct mm_struct *);
2340static inline void mmdrop(struct mm_struct * mm)
2341{
2342	if (unlikely(atomic_dec_and_test(&mm->mm_count)))
2343		__mmdrop(mm);
2344}
2345
2346/* mmput gets rid of the mappings and all user-space */
2347extern void mmput(struct mm_struct *);
2348/* Grab a reference to a task's mm, if it is not already going away */
2349extern struct mm_struct *get_task_mm(struct task_struct *task);
2350/*
2351 * Grab a reference to a task's mm, if it is not already going away
2352 * and ptrace_may_access with the mode parameter passed to it
2353 * succeeds.
2354 */
2355extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
2356/* Remove the current tasks stale references to the old mm_struct */
2357extern void mm_release(struct task_struct *, struct mm_struct *);
2358
2359extern int copy_thread(unsigned long, unsigned long, unsigned long,
2360			struct task_struct *);
2361extern void flush_thread(void);
2362extern void exit_thread(void);
2363
2364extern void exit_files(struct task_struct *);
2365extern void __cleanup_sighand(struct sighand_struct *);
2366
2367extern void exit_itimers(struct signal_struct *);
2368extern void flush_itimer_signals(void);
2369
2370extern void do_group_exit(int);
2371
2372extern int allow_signal(int);
2373extern int disallow_signal(int);
2374
2375extern int do_execve(struct filename *,
2376		     const char __user * const __user *,
2377		     const char __user * const __user *);
2378extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *);
2379struct task_struct *fork_idle(int);
2380extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
2381
2382extern void set_task_comm(struct task_struct *tsk, const char *from);
2383extern char *get_task_comm(char *to, struct task_struct *tsk);
2384
2385#ifdef CONFIG_SMP
2386void scheduler_ipi(void);
2387extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
2388#else
2389static inline void scheduler_ipi(void) { }
2390static inline unsigned long wait_task_inactive(struct task_struct *p,
2391					       long match_state)
2392{
2393	return 1;
2394}
2395#endif
2396
2397#define next_task(p) \
2398	list_entry_rcu((p)->tasks.next, struct task_struct, tasks)
2399
2400#define for_each_process(p) \
2401	for (p = &init_task ; (p = next_task(p)) != &init_task ; )
2402
2403extern bool current_is_single_threaded(void);
2404
2405/*
2406 * Careful: do_each_thread/while_each_thread is a double loop so
2407 *          'break' will not work as expected - use goto instead.
2408 */
2409#define do_each_thread(g, t) \
2410	for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do
2411
2412#define while_each_thread(g, t) \
2413	while ((t = next_thread(t)) != g)
2414
2415#define __for_each_thread(signal, t)	\
2416	list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node)
2417
2418#define for_each_thread(p, t)		\
2419	__for_each_thread((p)->signal, t)
2420
2421/* Careful: this is a double loop, 'break' won't work as expected. */
2422#define for_each_process_thread(p, t)	\
2423	for_each_process(p) for_each_thread(p, t)
2424
2425static inline int get_nr_threads(struct task_struct *tsk)
2426{
2427	return tsk->signal->nr_threads;
2428}
2429
2430static inline bool thread_group_leader(struct task_struct *p)
2431{
2432	return p->exit_signal >= 0;
2433}
2434
2435/* Do to the insanities of de_thread it is possible for a process
2436 * to have the pid of the thread group leader without actually being
2437 * the thread group leader.  For iteration through the pids in proc
2438 * all we care about is that we have a task with the appropriate
2439 * pid, we don't actually care if we have the right task.
2440 */
2441static inline bool has_group_leader_pid(struct task_struct *p)
2442{
2443	return task_pid(p) == p->signal->leader_pid;
2444}
2445
2446static inline
2447bool same_thread_group(struct task_struct *p1, struct task_struct *p2)
2448{
2449	return p1->signal == p2->signal;
2450}
2451
2452static inline struct task_struct *next_thread(const struct task_struct *p)
2453{
2454	return list_entry_rcu(p->thread_group.next,
2455			      struct task_struct, thread_group);
2456}
2457
2458static inline int thread_group_empty(struct task_struct *p)
2459{
2460	return list_empty(&p->thread_group);
2461}
2462
2463#define delay_group_leader(p) \
2464		(thread_group_leader(p) && !thread_group_empty(p))
2465
2466/*
2467 * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
2468 * subscriptions and synchronises with wait4().  Also used in procfs.  Also
2469 * pins the final release of task.io_context.  Also protects ->cpuset and
2470 * ->cgroup.subsys[]. And ->vfork_done.
2471 *
2472 * Nests both inside and outside of read_lock(&tasklist_lock).
2473 * It must not be nested with write_lock_irq(&tasklist_lock),
2474 * neither inside nor outside.
2475 */
2476static inline void task_lock(struct task_struct *p)
2477{
2478	spin_lock(&p->alloc_lock);
2479}
2480
2481static inline void task_unlock(struct task_struct *p)
2482{
2483	spin_unlock(&p->alloc_lock);
2484}
2485
2486extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
2487							unsigned long *flags);
2488
2489static inline struct sighand_struct *lock_task_sighand(struct task_struct *tsk,
2490						       unsigned long *flags)
2491{
2492	struct sighand_struct *ret;
2493
2494	ret = __lock_task_sighand(tsk, flags);
2495	(void)__cond_lock(&tsk->sighand->siglock, ret);
2496	return ret;
2497}
2498
2499static inline void unlock_task_sighand(struct task_struct *tsk,
2500						unsigned long *flags)
2501{
2502	spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
2503}
2504
2505#ifdef CONFIG_CGROUPS
2506static inline void threadgroup_change_begin(struct task_struct *tsk)
2507{
2508	down_read(&tsk->signal->group_rwsem);
2509}
2510static inline void threadgroup_change_end(struct task_struct *tsk)
2511{
2512	up_read(&tsk->signal->group_rwsem);
2513}
2514
2515/**
2516 * threadgroup_lock - lock threadgroup
2517 * @tsk: member task of the threadgroup to lock
2518 *
2519 * Lock the threadgroup @tsk belongs to.  No new task is allowed to enter
2520 * and member tasks aren't allowed to exit (as indicated by PF_EXITING) or
2521 * change ->group_leader/pid.  This is useful for cases where the threadgroup
2522 * needs to stay stable across blockable operations.
2523 *
2524 * fork and exit paths explicitly call threadgroup_change_{begin|end}() for
2525 * synchronization.  While held, no new task will be added to threadgroup
2526 * and no existing live task will have its PF_EXITING set.
2527 *
2528 * de_thread() does threadgroup_change_{begin|end}() when a non-leader
2529 * sub-thread becomes a new leader.
2530 */
2531static inline void threadgroup_lock(struct task_struct *tsk)
2532{
2533	down_write(&tsk->signal->group_rwsem);
2534}
2535
2536/**
2537 * threadgroup_unlock - unlock threadgroup
2538 * @tsk: member task of the threadgroup to unlock
2539 *
2540 * Reverse threadgroup_lock().
2541 */
2542static inline void threadgroup_unlock(struct task_struct *tsk)
2543{
2544	up_write(&tsk->signal->group_rwsem);
2545}
2546#else
2547static inline void threadgroup_change_begin(struct task_struct *tsk) {}
2548static inline void threadgroup_change_end(struct task_struct *tsk) {}
2549static inline void threadgroup_lock(struct task_struct *tsk) {}
2550static inline void threadgroup_unlock(struct task_struct *tsk) {}
2551#endif
2552
2553#ifndef __HAVE_THREAD_FUNCTIONS
2554
2555#define task_thread_info(task)	((struct thread_info *)(task)->stack)
2556#define task_stack_page(task)	((task)->stack)
2557
2558static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
2559{
2560	*task_thread_info(p) = *task_thread_info(org);
2561	task_thread_info(p)->task = p;
2562}
2563
2564static inline unsigned long *end_of_stack(struct task_struct *p)
2565{
2566	return (unsigned long *)(task_thread_info(p) + 1);
2567}
2568
2569#endif
2570
2571static inline int object_is_on_stack(void *obj)
2572{
2573	void *stack = task_stack_page(current);
2574
2575	return (obj >= stack) && (obj < (stack + THREAD_SIZE));
2576}
2577
2578extern void thread_info_cache_init(void);
2579
2580#ifdef CONFIG_DEBUG_STACK_USAGE
2581static inline unsigned long stack_not_used(struct task_struct *p)
2582{
2583	unsigned long *n = end_of_stack(p);
2584
2585	do { 	/* Skip over canary */
2586		n++;
2587	} while (!*n);
2588
2589	return (unsigned long)n - (unsigned long)end_of_stack(p);
2590}
2591#endif
2592
2593/* set thread flags in other task's structures
2594 * - see asm/thread_info.h for TIF_xxxx flags available
2595 */
2596static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
2597{
2598	set_ti_thread_flag(task_thread_info(tsk), flag);
2599}
2600
2601static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
2602{
2603	clear_ti_thread_flag(task_thread_info(tsk), flag);
2604}
2605
2606static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
2607{
2608	return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
2609}
2610
2611static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
2612{
2613	return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
2614}
2615
2616static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
2617{
2618	return test_ti_thread_flag(task_thread_info(tsk), flag);
2619}
2620
2621static inline void set_tsk_need_resched(struct task_struct *tsk)
2622{
2623	set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
2624}
2625
2626static inline void clear_tsk_need_resched(struct task_struct *tsk)
2627{
2628	clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
2629}
2630
2631static inline int test_tsk_need_resched(struct task_struct *tsk)
2632{
2633	return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
2634}
2635
2636static inline int restart_syscall(void)
2637{
2638	set_tsk_thread_flag(current, TIF_SIGPENDING);
2639	return -ERESTARTNOINTR;
2640}
2641
2642static inline int signal_pending(struct task_struct *p)
2643{
2644	return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
2645}
2646
2647static inline int __fatal_signal_pending(struct task_struct *p)
2648{
2649	return unlikely(sigismember(&p->pending.signal, SIGKILL));
2650}
2651
2652static inline int fatal_signal_pending(struct task_struct *p)
2653{
2654	return signal_pending(p) && __fatal_signal_pending(p);
2655}
2656
2657static inline int signal_pending_state(long state, struct task_struct *p)
2658{
2659	if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
2660		return 0;
2661	if (!signal_pending(p))
2662		return 0;
2663
2664	return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
2665}
2666
2667/*
2668 * cond_resched() and cond_resched_lock(): latency reduction via
2669 * explicit rescheduling in places that are safe. The return
2670 * value indicates whether a reschedule was done in fact.
2671 * cond_resched_lock() will drop the spinlock before scheduling,
2672 * cond_resched_softirq() will enable bhs before scheduling.
2673 */
 
2674extern int _cond_resched(void);
 
 
 
2675
2676#define cond_resched() ({			\
2677	__might_sleep(__FILE__, __LINE__, 0);	\
2678	_cond_resched();			\
2679})
2680
2681extern int __cond_resched_lock(spinlock_t *lock);
2682
2683#ifdef CONFIG_PREEMPT_COUNT
2684#define PREEMPT_LOCK_OFFSET	PREEMPT_OFFSET
2685#else
2686#define PREEMPT_LOCK_OFFSET	0
2687#endif
2688
2689#define cond_resched_lock(lock) ({				\
2690	__might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);	\
2691	__cond_resched_lock(lock);				\
2692})
2693
2694extern int __cond_resched_softirq(void);
2695
2696#define cond_resched_softirq() ({					\
2697	__might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET);	\
2698	__cond_resched_softirq();					\
2699})
2700
2701static inline void cond_resched_rcu(void)
2702{
2703#if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
2704	rcu_read_unlock();
2705	cond_resched();
2706	rcu_read_lock();
2707#endif
2708}
2709
2710/*
2711 * Does a critical section need to be broken due to another
2712 * task waiting?: (technically does not depend on CONFIG_PREEMPT,
2713 * but a general need for low latency)
2714 */
2715static inline int spin_needbreak(spinlock_t *lock)
2716{
2717#ifdef CONFIG_PREEMPT
2718	return spin_is_contended(lock);
2719#else
2720	return 0;
2721#endif
2722}
2723
2724/*
2725 * Idle thread specific functions to determine the need_resched
2726 * polling state. We have two versions, one based on TS_POLLING in
2727 * thread_info.status and one based on TIF_POLLING_NRFLAG in
2728 * thread_info.flags
2729 */
2730#ifdef TS_POLLING
2731static inline int tsk_is_polling(struct task_struct *p)
2732{
2733	return task_thread_info(p)->status & TS_POLLING;
2734}
2735static inline void __current_set_polling(void)
2736{
2737	current_thread_info()->status |= TS_POLLING;
2738}
2739
2740static inline bool __must_check current_set_polling_and_test(void)
2741{
2742	__current_set_polling();
2743
2744	/*
2745	 * Polling state must be visible before we test NEED_RESCHED,
2746	 * paired by resched_task()
2747	 */
2748	smp_mb();
2749
2750	return unlikely(tif_need_resched());
2751}
2752
2753static inline void __current_clr_polling(void)
2754{
2755	current_thread_info()->status &= ~TS_POLLING;
2756}
2757
2758static inline bool __must_check current_clr_polling_and_test(void)
2759{
2760	__current_clr_polling();
2761
2762	/*
2763	 * Polling state must be visible before we test NEED_RESCHED,
2764	 * paired by resched_task()
2765	 */
2766	smp_mb();
2767
2768	return unlikely(tif_need_resched());
2769}
2770#elif defined(TIF_POLLING_NRFLAG)
2771static inline int tsk_is_polling(struct task_struct *p)
2772{
2773	return test_tsk_thread_flag(p, TIF_POLLING_NRFLAG);
2774}
2775
2776static inline void __current_set_polling(void)
2777{
2778	set_thread_flag(TIF_POLLING_NRFLAG);
2779}
2780
2781static inline bool __must_check current_set_polling_and_test(void)
2782{
2783	__current_set_polling();
2784
2785	/*
2786	 * Polling state must be visible before we test NEED_RESCHED,
2787	 * paired by resched_task()
2788	 *
2789	 * XXX: assumes set/clear bit are identical barrier wise.
2790	 */
2791	smp_mb__after_clear_bit();
2792
2793	return unlikely(tif_need_resched());
2794}
2795
2796static inline void __current_clr_polling(void)
2797{
2798	clear_thread_flag(TIF_POLLING_NRFLAG);
2799}
2800
2801static inline bool __must_check current_clr_polling_and_test(void)
2802{
2803	__current_clr_polling();
2804
2805	/*
2806	 * Polling state must be visible before we test NEED_RESCHED,
2807	 * paired by resched_task()
2808	 */
2809	smp_mb__after_clear_bit();
2810
2811	return unlikely(tif_need_resched());
2812}
2813
2814#else
2815static inline int tsk_is_polling(struct task_struct *p) { return 0; }
2816static inline void __current_set_polling(void) { }
2817static inline void __current_clr_polling(void) { }
2818
2819static inline bool __must_check current_set_polling_and_test(void)
2820{
2821	return unlikely(tif_need_resched());
2822}
2823static inline bool __must_check current_clr_polling_and_test(void)
2824{
2825	return unlikely(tif_need_resched());
2826}
2827#endif
2828
2829static inline void current_clr_polling(void)
2830{
2831	__current_clr_polling();
2832
2833	/*
2834	 * Ensure we check TIF_NEED_RESCHED after we clear the polling bit.
2835	 * Once the bit is cleared, we'll get IPIs with every new
2836	 * TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also
2837	 * fold.
2838	 */
2839	smp_mb(); /* paired with resched_task() */
2840
2841	preempt_fold_need_resched();
2842}
2843
2844static __always_inline bool need_resched(void)
2845{
2846	return unlikely(tif_need_resched());
2847}
2848
2849/*
2850 * Thread group CPU time accounting.
2851 */
2852void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);
2853void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
2854
2855static inline void thread_group_cputime_init(struct signal_struct *sig)
2856{
2857	raw_spin_lock_init(&sig->cputimer.lock);
2858}
2859
2860/*
2861 * Reevaluate whether the task has signals pending delivery.
2862 * Wake the task if so.
2863 * This is required every time the blocked sigset_t changes.
2864 * callers must hold sighand->siglock.
2865 */
2866extern void recalc_sigpending_and_wake(struct task_struct *t);
2867extern void recalc_sigpending(void);
2868
2869extern void signal_wake_up_state(struct task_struct *t, unsigned int state);
2870
2871static inline void signal_wake_up(struct task_struct *t, bool resume)
2872{
2873	signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0);
2874}
2875static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
2876{
2877	signal_wake_up_state(t, resume ? __TASK_TRACED : 0);
2878}
2879
2880/*
2881 * Wrappers for p->thread_info->cpu access. No-op on UP.
2882 */
2883#ifdef CONFIG_SMP
2884
2885static inline unsigned int task_cpu(const struct task_struct *p)
2886{
 
 
 
2887	return task_thread_info(p)->cpu;
2888}
2889
2890static inline int task_node(const struct task_struct *p)
2891{
2892	return cpu_to_node(task_cpu(p));
2893}
2894
2895extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
2896
2897#else
2898
2899static inline unsigned int task_cpu(const struct task_struct *p)
2900{
2901	return 0;
2902}
2903
2904static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
2905{
2906}
2907
2908#endif /* CONFIG_SMP */
2909
 
 
 
 
 
 
 
 
 
 
 
 
2910extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
2911extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
2912
2913#ifdef CONFIG_CGROUP_SCHED
2914extern struct task_group root_task_group;
2915#endif /* CONFIG_CGROUP_SCHED */
2916
2917extern int task_can_switch_user(struct user_struct *up,
2918					struct task_struct *tsk);
2919
2920#ifdef CONFIG_TASK_XACCT
2921static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
2922{
2923	tsk->ioac.rchar += amt;
2924}
2925
2926static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
2927{
2928	tsk->ioac.wchar += amt;
2929}
2930
2931static inline void inc_syscr(struct task_struct *tsk)
2932{
2933	tsk->ioac.syscr++;
2934}
2935
2936static inline void inc_syscw(struct task_struct *tsk)
2937{
2938	tsk->ioac.syscw++;
2939}
2940#else
2941static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
2942{
2943}
2944
2945static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
2946{
2947}
2948
2949static inline void inc_syscr(struct task_struct *tsk)
2950{
2951}
2952
2953static inline void inc_syscw(struct task_struct *tsk)
2954{
2955}
2956#endif
2957
2958#ifndef TASK_SIZE_OF
2959#define TASK_SIZE_OF(tsk)	TASK_SIZE
2960#endif
2961
2962#ifdef CONFIG_MM_OWNER
2963extern void mm_update_next_owner(struct mm_struct *mm);
2964extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p);
2965#else
2966static inline void mm_update_next_owner(struct mm_struct *mm)
2967{
2968}
2969
2970static inline void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
2971{
2972}
2973#endif /* CONFIG_MM_OWNER */
2974
2975static inline unsigned long task_rlimit(const struct task_struct *tsk,
2976		unsigned int limit)
2977{
2978	return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_cur);
2979}
2980
2981static inline unsigned long task_rlimit_max(const struct task_struct *tsk,
2982		unsigned int limit)
2983{
2984	return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_max);
2985}
2986
2987static inline unsigned long rlimit(unsigned int limit)
2988{
2989	return task_rlimit(current, limit);
2990}
2991
2992static inline unsigned long rlimit_max(unsigned int limit)
2993{
2994	return task_rlimit_max(current, limit);
2995}
2996
2997#endif
v4.17
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _LINUX_SCHED_H
   3#define _LINUX_SCHED_H
   4
   5/*
   6 * Define 'struct task_struct' and provide the main scheduler
   7 * APIs (schedule(), wakeup variants, etc.)
   8 */
 
 
 
 
 
 
   9
  10#include <uapi/linux/sched.h>
 
 
 
 
 
 
 
 
 
 
 
 
 
  11
  12#include <asm/current.h>
 
 
  13
 
 
 
 
 
  14#include <linux/pid.h>
  15#include <linux/sem.h>
  16#include <linux/shm.h>
  17#include <linux/kcov.h>
  18#include <linux/mutex.h>
  19#include <linux/plist.h>
  20#include <linux/hrtimer.h>
  21#include <linux/seccomp.h>
  22#include <linux/nodemask.h>
  23#include <linux/rcupdate.h>
 
 
 
 
 
  24#include <linux/resource.h>
 
 
 
  25#include <linux/latencytop.h>
  26#include <linux/sched/prio.h>
  27#include <linux/signal_types.h>
  28#include <linux/mm_types_task.h>
  29#include <linux/task_io_accounting.h>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  30
  31/* task_struct member predeclarations (sorted alphabetically): */
  32struct audit_context;
  33struct backing_dev_info;
  34struct bio_list;
  35struct blk_plug;
  36struct cfs_rq;
  37struct fs_struct;
  38struct futex_pi_state;
  39struct io_context;
  40struct mempolicy;
  41struct nameidata;
  42struct nsproxy;
  43struct perf_event_context;
  44struct pid_namespace;
  45struct pipe_inode_info;
  46struct rcu_node;
  47struct reclaim_state;
  48struct robust_list_head;
  49struct sched_attr;
  50struct sched_param;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  51struct seq_file;
  52struct sighand_struct;
  53struct signal_struct;
  54struct task_delay_info;
  55struct task_group;
 
 
 
 
 
 
  56
  57/*
  58 * Task state bitmask. NOTE! These bits are also
  59 * encoded in fs/proc/array.c: get_task_state().
  60 *
  61 * We have two separate sets of flags: task->state
  62 * is about runnability, while task->exit_state are
  63 * about the task exiting. Confusing, but this way
  64 * modifying one set can't modify the other one by
  65 * mistake.
  66 */
  67
  68/* Used in tsk->state: */
  69#define TASK_RUNNING			0x0000
  70#define TASK_INTERRUPTIBLE		0x0001
  71#define TASK_UNINTERRUPTIBLE		0x0002
  72#define __TASK_STOPPED			0x0004
  73#define __TASK_TRACED			0x0008
  74/* Used in tsk->exit_state: */
  75#define EXIT_DEAD			0x0010
  76#define EXIT_ZOMBIE			0x0020
  77#define EXIT_TRACE			(EXIT_ZOMBIE | EXIT_DEAD)
  78/* Used in tsk->state again: */
  79#define TASK_PARKED			0x0040
  80#define TASK_DEAD			0x0080
  81#define TASK_WAKEKILL			0x0100
  82#define TASK_WAKING			0x0200
  83#define TASK_NOLOAD			0x0400
  84#define TASK_NEW			0x0800
  85#define TASK_STATE_MAX			0x1000
  86
  87/* Convenience macros for the sake of set_current_state: */
  88#define TASK_KILLABLE			(TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
  89#define TASK_STOPPED			(TASK_WAKEKILL | __TASK_STOPPED)
  90#define TASK_TRACED			(TASK_WAKEKILL | __TASK_TRACED)
  91
  92#define TASK_IDLE			(TASK_UNINTERRUPTIBLE | TASK_NOLOAD)
  93
  94/* Convenience macros for the sake of wake_up(): */
  95#define TASK_NORMAL			(TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
  96
  97/* get_task_state(): */
  98#define TASK_REPORT			(TASK_RUNNING | TASK_INTERRUPTIBLE | \
  99					 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
 100					 __TASK_TRACED | EXIT_DEAD | EXIT_ZOMBIE | \
 101					 TASK_PARKED)
 102
 103#define task_is_traced(task)		((task->state & __TASK_TRACED) != 0)
 104
 105#define task_is_stopped(task)		((task->state & __TASK_STOPPED) != 0)
 106
 107#define task_is_stopped_or_traced(task)	((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
 108
 109#define task_contributes_to_load(task)	((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
 110					 (task->flags & PF_FROZEN) == 0 && \
 111					 (task->state & TASK_NOLOAD) == 0)
 112
 113#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
 114
 115/*
 116 * Special states are those that do not use the normal wait-loop pattern. See
 117 * the comment with set_special_state().
 118 */
 119#define is_special_task_state(state)				\
 120	((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_DEAD))
 121
 122#define __set_current_state(state_value)			\
 123	do {							\
 124		WARN_ON_ONCE(is_special_task_state(state_value));\
 125		current->task_state_change = _THIS_IP_;		\
 126		current->state = (state_value);			\
 127	} while (0)
 128
 129#define set_current_state(state_value)				\
 130	do {							\
 131		WARN_ON_ONCE(is_special_task_state(state_value));\
 132		current->task_state_change = _THIS_IP_;		\
 133		smp_store_mb(current->state, (state_value));	\
 134	} while (0)
 135
 136#define set_special_state(state_value)					\
 137	do {								\
 138		unsigned long flags; /* may shadow */			\
 139		WARN_ON_ONCE(!is_special_task_state(state_value));	\
 140		raw_spin_lock_irqsave(&current->pi_lock, flags);	\
 141		current->task_state_change = _THIS_IP_;			\
 142		current->state = (state_value);				\
 143		raw_spin_unlock_irqrestore(&current->pi_lock, flags);	\
 144	} while (0)
 145#else
 146/*
 147 * set_current_state() includes a barrier so that the write of current->state
 148 * is correctly serialised wrt the caller's subsequent test of whether to
 149 * actually sleep:
 150 *
 151 *   for (;;) {
 152 *	set_current_state(TASK_UNINTERRUPTIBLE);
 153 *	if (!need_sleep)
 154 *		break;
 155 *
 156 *	schedule();
 157 *   }
 158 *   __set_current_state(TASK_RUNNING);
 159 *
 160 * If the caller does not need such serialisation (because, for instance, the
 161 * condition test and condition change and wakeup are under the same lock) then
 162 * use __set_current_state().
 163 *
 164 * The above is typically ordered against the wakeup, which does:
 165 *
 166 *   need_sleep = false;
 167 *   wake_up_state(p, TASK_UNINTERRUPTIBLE);
 168 *
 169 * Where wake_up_state() (and all other wakeup primitives) imply enough
 170 * barriers to order the store of the variable against wakeup.
 171 *
 172 * Wakeup will do: if (@state & p->state) p->state = TASK_RUNNING, that is,
 173 * once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a
 174 * TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING).
 175 *
 176 * However, with slightly different timing the wakeup TASK_RUNNING store can
 177 * also collide with the TASK_UNINTERRUPTIBLE store. Loosing that store is not
 178 * a problem either because that will result in one extra go around the loop
 179 * and our @cond test will save the day.
 180 *
 181 * Also see the comments of try_to_wake_up().
 182 */
 183#define __set_current_state(state_value)				\
 184	current->state = (state_value)
 
 
 
 
 
 185
 186#define set_current_state(state_value)					\
 187	smp_store_mb(current->state, (state_value))
 188
 189/*
 190 * set_special_state() should be used for those states when the blocking task
 191 * can not use the regular condition based wait-loop. In that case we must
 192 * serialize against wakeups such that any possible in-flight TASK_RUNNING stores
 193 * will not collide with our state change.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 194 */
 195#define set_special_state(state_value)					\
 196	do {								\
 197		unsigned long flags; /* may shadow */			\
 198		raw_spin_lock_irqsave(&current->pi_lock, flags);	\
 199		current->state = (state_value);				\
 200		raw_spin_unlock_irqrestore(&current->pi_lock, flags);	\
 201	} while (0)
 
 202
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 203#endif
 204
 205/* Task command name length: */
 206#define TASK_COMM_LEN			16
 207
 208extern void scheduler_tick(void);
 
 209
 210#define	MAX_SCHEDULE_TIMEOUT		LONG_MAX
 
 211
 212extern long schedule_timeout(long timeout);
 213extern long schedule_timeout_interruptible(long timeout);
 214extern long schedule_timeout_killable(long timeout);
 215extern long schedule_timeout_uninterruptible(long timeout);
 216extern long schedule_timeout_idle(long timeout);
 217asmlinkage void schedule(void);
 218extern void schedule_preempt_disabled(void);
 219
 220extern int __must_check io_schedule_prepare(void);
 221extern void io_schedule_finish(int token);
 222extern long io_schedule_timeout(long timeout);
 223extern void io_schedule(void);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 224
 225/**
 226 * struct prev_cputime - snapshot of system and user cputime
 227 * @utime: time spent in user mode
 228 * @stime: time spent in system mode
 229 * @lock: protects the above two fields
 230 *
 231 * Stores previous user/system time values such that we can guarantee
 232 * monotonicity.
 233 */
 234struct prev_cputime {
 235#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
 236	u64				utime;
 237	u64				stime;
 238	raw_spinlock_t			lock;
 239#endif
 240};
 241
 242/**
 243 * struct task_cputime - collected CPU time counts
 244 * @utime:		time spent in user mode, in nanoseconds
 245 * @stime:		time spent in kernel mode, in nanoseconds
 246 * @sum_exec_runtime:	total time spent on the CPU, in nanoseconds
 247 *
 248 * This structure groups together three kinds of CPU time that are tracked for
 249 * threads and thread groups.  Most things considering CPU time want to group
 250 * these counts together and treat all three of them in parallel.
 
 
 
 
 251 */
 252struct task_cputime {
 253	u64				utime;
 254	u64				stime;
 255	unsigned long long		sum_exec_runtime;
 256};
 257
 258/* Alternate field names when used on cache expirations: */
 259#define virt_exp			utime
 260#define prof_exp			stime
 261#define sched_exp			sum_exec_runtime
 262
 263enum vtime_state {
 264	/* Task is sleeping or running in a CPU with VTIME inactive: */
 265	VTIME_INACTIVE = 0,
 266	/* Task runs in userspace in a CPU with VTIME active: */
 267	VTIME_USER,
 268	/* Task runs in kernelspace in a CPU with VTIME active: */
 269	VTIME_SYS,
 270};
 271
 272struct vtime {
 273	seqcount_t		seqcount;
 274	unsigned long long	starttime;
 275	enum vtime_state	state;
 276	u64			utime;
 277	u64			stime;
 278	u64			gtime;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 279};
 280
 281struct sched_info {
 282#ifdef CONFIG_SCHED_INFO
 283	/* Cumulative counters: */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 284
 285	/* # of times we have run on this CPU: */
 286	unsigned long			pcount;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 287
 288	/* Time spent waiting on a runqueue: */
 289	unsigned long long		run_delay;
 
 
 
 
 
 290
 291	/* Timestamps: */
 
 
 
 
 
 
 
 
 
 292
 293	/* When did we last run on a CPU? */
 294	unsigned long long		last_arrival;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 295
 296	/* When were we last queued to run? */
 297	unsigned long long		last_queued;
 
 
 298
 299#endif /* CONFIG_SCHED_INFO */
 
 
 300};
 301
 302/*
 303 * Integer metrics need fixed point arithmetic, e.g., sched/fair
 304 * has a few: load, load_avg, util_avg, freq, and capacity.
 305 *
 306 * We define a basic fixed point arithmetic range, and then formalize
 307 * all these metrics based on that basic range.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 308 */
 309# define SCHED_FIXEDPOINT_SHIFT		10
 310# define SCHED_FIXEDPOINT_SCALE		(1L << SCHED_FIXEDPOINT_SHIFT)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 311
 312struct load_weight {
 313	unsigned long			weight;
 314	u32				inv_weight;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 315};
 316
 317/**
 318 * struct util_est - Estimation utilization of FAIR tasks
 319 * @enqueued: instantaneous estimated utilization of a task/cpu
 320 * @ewma:     the Exponential Weighted Moving Average (EWMA)
 321 *            utilization of a task
 322 *
 323 * Support data structure to track an Exponential Weighted Moving Average
 324 * (EWMA) of a FAIR task's utilization. New samples are added to the moving
 325 * average each time a task completes an activation. Sample's weight is chosen
 326 * so that the EWMA will be relatively insensitive to transient changes to the
 327 * task's workload.
 328 *
 329 * The enqueued attribute has a slightly different meaning for tasks and cpus:
 330 * - task:   the task's util_avg at last task dequeue time
 331 * - cfs_rq: the sum of util_est.enqueued for each RUNNABLE task on that CPU
 332 * Thus, the util_est.enqueued of a task represents the contribution on the
 333 * estimated utilization of the CPU where that task is currently enqueued.
 334 *
 335 * Only for tasks we track a moving average of the past instantaneous
 336 * estimated utilization. This allows to absorb sporadic drops in utilization
 337 * of an otherwise almost periodic task.
 338 */
 339struct util_est {
 340	unsigned int			enqueued;
 341	unsigned int			ewma;
 342#define UTIL_EST_WEIGHT_SHIFT		2
 343} __attribute__((__aligned__(sizeof(u64))));
 344
 345/*
 346 * The load_avg/util_avg accumulates an infinite geometric series
 347 * (see __update_load_avg() in kernel/sched/fair.c).
 348 *
 349 * [load_avg definition]
 350 *
 351 *   load_avg = runnable% * scale_load_down(load)
 352 *
 353 * where runnable% is the time ratio that a sched_entity is runnable.
 354 * For cfs_rq, it is the aggregated load_avg of all runnable and
 355 * blocked sched_entities.
 356 *
 357 * load_avg may also take frequency scaling into account:
 358 *
 359 *   load_avg = runnable% * scale_load_down(load) * freq%
 360 *
 361 * where freq% is the CPU frequency normalized to the highest frequency.
 362 *
 363 * [util_avg definition]
 364 *
 365 *   util_avg = running% * SCHED_CAPACITY_SCALE
 366 *
 367 * where running% is the time ratio that a sched_entity is running on
 368 * a CPU. For cfs_rq, it is the aggregated util_avg of all runnable
 369 * and blocked sched_entities.
 370 *
 371 * util_avg may also factor frequency scaling and CPU capacity scaling:
 372 *
 373 *   util_avg = running% * SCHED_CAPACITY_SCALE * freq% * capacity%
 374 *
 375 * where freq% is the same as above, and capacity% is the CPU capacity
 376 * normalized to the greatest capacity (due to uarch differences, etc).
 377 *
 378 * N.B., the above ratios (runnable%, running%, freq%, and capacity%)
 379 * themselves are in the range of [0, 1]. To do fixed point arithmetics,
 380 * we therefore scale them to as large a range as necessary. This is for
 381 * example reflected by util_avg's SCHED_CAPACITY_SCALE.
 382 *
 383 * [Overflow issue]
 384 *
 385 * The 64-bit load_sum can have 4353082796 (=2^64/47742/88761) entities
 386 * with the highest load (=88761), always runnable on a single cfs_rq,
 387 * and should not overflow as the number already hits PID_MAX_LIMIT.
 388 *
 389 * For all other cases (including 32-bit kernels), struct load_weight's
 390 * weight will overflow first before we do, because:
 391 *
 392 *    Max(load_avg) <= Max(load.weight)
 393 *
 394 * Then it is the load_weight's responsibility to consider overflow
 395 * issues.
 396 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 397struct sched_avg {
 398	u64				last_update_time;
 399	u64				load_sum;
 400	u64				runnable_load_sum;
 401	u32				util_sum;
 402	u32				period_contrib;
 403	unsigned long			load_avg;
 404	unsigned long			runnable_load_avg;
 405	unsigned long			util_avg;
 406	struct util_est			util_est;
 407} ____cacheline_aligned;
 408
 
 409struct sched_statistics {
 410#ifdef CONFIG_SCHEDSTATS
 411	u64				wait_start;
 412	u64				wait_max;
 413	u64				wait_count;
 414	u64				wait_sum;
 415	u64				iowait_count;
 416	u64				iowait_sum;
 417
 418	u64				sleep_start;
 419	u64				sleep_max;
 420	s64				sum_sleep_runtime;
 421
 422	u64				block_start;
 423	u64				block_max;
 424	u64				exec_max;
 425	u64				slice_max;
 426
 427	u64				nr_migrations_cold;
 428	u64				nr_failed_migrations_affine;
 429	u64				nr_failed_migrations_running;
 430	u64				nr_failed_migrations_hot;
 431	u64				nr_forced_migrations;
 432
 433	u64				nr_wakeups;
 434	u64				nr_wakeups_sync;
 435	u64				nr_wakeups_migrate;
 436	u64				nr_wakeups_local;
 437	u64				nr_wakeups_remote;
 438	u64				nr_wakeups_affine;
 439	u64				nr_wakeups_affine_attempts;
 440	u64				nr_wakeups_passive;
 441	u64				nr_wakeups_idle;
 442#endif
 443};
 444
 445struct sched_entity {
 446	/* For load-balancing: */
 447	struct load_weight		load;
 448	unsigned long			runnable_weight;
 449	struct rb_node			run_node;
 450	struct list_head		group_node;
 451	unsigned int			on_rq;
 452
 453	u64				exec_start;
 454	u64				sum_exec_runtime;
 455	u64				vruntime;
 456	u64				prev_sum_exec_runtime;
 457
 458	u64				nr_migrations;
 459
 460	struct sched_statistics		statistics;
 
 
 461
 462#ifdef CONFIG_FAIR_GROUP_SCHED
 463	int				depth;
 464	struct sched_entity		*parent;
 465	/* rq on which this entity is (to be) queued: */
 466	struct cfs_rq			*cfs_rq;
 467	/* rq "owned" by this entity/group: */
 468	struct cfs_rq			*my_q;
 469#endif
 470
 471#ifdef CONFIG_SMP
 472	/*
 473	 * Per entity load average tracking.
 474	 *
 475	 * Put into separate cache line so it does not
 476	 * collide with read-mostly values above.
 477	 */
 478	struct sched_avg		avg;
 479#endif
 480};
 481
 482struct sched_rt_entity {
 483	struct list_head		run_list;
 484	unsigned long			timeout;
 485	unsigned long			watchdog_stamp;
 486	unsigned int			time_slice;
 487	unsigned short			on_rq;
 488	unsigned short			on_list;
 489
 490	struct sched_rt_entity		*back;
 491#ifdef CONFIG_RT_GROUP_SCHED
 492	struct sched_rt_entity		*parent;
 493	/* rq on which this entity is (to be) queued: */
 494	struct rt_rq			*rt_rq;
 495	/* rq "owned" by this entity/group: */
 496	struct rt_rq			*my_q;
 497#endif
 498} __randomize_layout;
 499
 500struct sched_dl_entity {
 501	struct rb_node			rb_node;
 502
 503	/*
 504	 * Original scheduling parameters. Copied here from sched_attr
 505	 * during sched_setattr(), they will remain the same until
 506	 * the next sched_setattr().
 507	 */
 508	u64				dl_runtime;	/* Maximum runtime for each instance	*/
 509	u64				dl_deadline;	/* Relative deadline of each instance	*/
 510	u64				dl_period;	/* Separation of two instances (period) */
 511	u64				dl_bw;		/* dl_runtime / dl_period		*/
 512	u64				dl_density;	/* dl_runtime / dl_deadline		*/
 513
 514	/*
 515	 * Actual scheduling parameters. Initialized with the values above,
 516	 * they are continously updated during task execution. Note that
 517	 * the remaining runtime could be < 0 in case we are in overrun.
 518	 */
 519	s64				runtime;	/* Remaining runtime for this instance	*/
 520	u64				deadline;	/* Absolute deadline for this instance	*/
 521	unsigned int			flags;		/* Specifying the scheduler behaviour	*/
 522
 523	/*
 524	 * Some bool flags:
 525	 *
 526	 * @dl_throttled tells if we exhausted the runtime. If so, the
 527	 * task has to wait for a replenishment to be performed at the
 528	 * next firing of dl_timer.
 529	 *
 
 
 
 
 530	 * @dl_boosted tells if we are boosted due to DI. If so we are
 531	 * outside bandwidth enforcement mechanism (but only until we
 532	 * exit the critical section);
 533	 *
 534	 * @dl_yielded tells if task gave up the CPU before consuming
 535	 * all its available runtime during the last job.
 536	 *
 537	 * @dl_non_contending tells if the task is inactive while still
 538	 * contributing to the active utilization. In other words, it
 539	 * indicates if the inactive timer has been armed and its handler
 540	 * has not been executed yet. This flag is useful to avoid race
 541	 * conditions between the inactive timer handler and the wakeup
 542	 * code.
 543	 *
 544	 * @dl_overrun tells if the task asked to be informed about runtime
 545	 * overruns.
 546	 */
 547	unsigned int			dl_throttled      : 1;
 548	unsigned int			dl_boosted        : 1;
 549	unsigned int			dl_yielded        : 1;
 550	unsigned int			dl_non_contending : 1;
 551	unsigned int			dl_overrun	  : 1;
 552
 553	/*
 554	 * Bandwidth enforcement timer. Each -deadline task has its
 555	 * own bandwidth to be enforced, thus we need one timer per task.
 556	 */
 557	struct hrtimer			dl_timer;
 558
 559	/*
 560	 * Inactive timer, responsible for decreasing the active utilization
 561	 * at the "0-lag time". When a -deadline task blocks, it contributes
 562	 * to GRUB's active utilization until the "0-lag time", hence a
 563	 * timer is needed to decrease the active utilization at the correct
 564	 * time.
 565	 */
 566	struct hrtimer inactive_timer;
 567};
 568
 569union rcu_special {
 570	struct {
 571		u8			blocked;
 572		u8			need_qs;
 573		u8			exp_need_qs;
 574
 575		/* Otherwise the compiler can store garbage here: */
 576		u8			pad;
 577	} b; /* Bits. */
 578	u32 s; /* Set of bits. */
 579};
 580
 581enum perf_event_task_context {
 582	perf_invalid_context = -1,
 583	perf_hw_context = 0,
 584	perf_sw_context,
 585	perf_nr_task_contexts,
 586};
 587
 588struct wake_q_node {
 589	struct wake_q_node *next;
 590};
 591
 592struct task_struct {
 593#ifdef CONFIG_THREAD_INFO_IN_TASK
 594	/*
 595	 * For reasons of header soup (see current_thread_info()), this
 596	 * must be the first element of task_struct.
 597	 */
 598	struct thread_info		thread_info;
 599#endif
 600	/* -1 unrunnable, 0 runnable, >0 stopped: */
 601	volatile long			state;
 602
 603	/*
 604	 * This begins the randomizable portion of task_struct. Only
 605	 * scheduling-critical items should be added above here.
 606	 */
 607	randomized_struct_fields_start
 608
 609	void				*stack;
 610	atomic_t			usage;
 611	/* Per task flags (PF_*), defined further below: */
 612	unsigned int			flags;
 613	unsigned int			ptrace;
 614
 615#ifdef CONFIG_SMP
 616	struct llist_node		wake_entry;
 617	int				on_cpu;
 618#ifdef CONFIG_THREAD_INFO_IN_TASK
 619	/* Current CPU: */
 620	unsigned int			cpu;
 621#endif
 622	unsigned int			wakee_flips;
 623	unsigned long			wakee_flip_decay_ts;
 624	struct task_struct		*last_wakee;
 625
 626	/*
 627	 * recent_used_cpu is initially set as the last CPU used by a task
 628	 * that wakes affine another task. Waker/wakee relationships can
 629	 * push tasks around a CPU where each wakeup moves to the next one.
 630	 * Tracking a recently used CPU allows a quick search for a recently
 631	 * used CPU that may be idle.
 632	 */
 633	int				recent_used_cpu;
 634	int				wake_cpu;
 635#endif
 636	int				on_rq;
 637
 638	int				prio;
 639	int				static_prio;
 640	int				normal_prio;
 641	unsigned int			rt_priority;
 642
 643	const struct sched_class	*sched_class;
 644	struct sched_entity		se;
 645	struct sched_rt_entity		rt;
 646#ifdef CONFIG_CGROUP_SCHED
 647	struct task_group		*sched_task_group;
 648#endif
 649	struct sched_dl_entity		dl;
 650
 651#ifdef CONFIG_PREEMPT_NOTIFIERS
 652	/* List of struct preempt_notifier: */
 653	struct hlist_head		preempt_notifiers;
 654#endif
 655
 656#ifdef CONFIG_BLK_DEV_IO_TRACE
 657	unsigned int			btrace_seq;
 658#endif
 659
 660	unsigned int			policy;
 661	int				nr_cpus_allowed;
 662	cpumask_t			cpus_allowed;
 663
 664#ifdef CONFIG_PREEMPT_RCU
 665	int				rcu_read_lock_nesting;
 666	union rcu_special		rcu_read_unlock_special;
 667	struct list_head		rcu_node_entry;
 668	struct rcu_node			*rcu_blocked_node;
 669#endif /* #ifdef CONFIG_PREEMPT_RCU */
 
 
 
 
 
 
 670
 671#ifdef CONFIG_TASKS_RCU
 672	unsigned long			rcu_tasks_nvcsw;
 673	u8				rcu_tasks_holdout;
 674	u8				rcu_tasks_idx;
 675	int				rcu_tasks_idle_cpu;
 676	struct list_head		rcu_tasks_holdout_list;
 677#endif /* #ifdef CONFIG_TASKS_RCU */
 678
 679	struct sched_info		sched_info;
 680
 681	struct list_head		tasks;
 682#ifdef CONFIG_SMP
 683	struct plist_node		pushable_tasks;
 684	struct rb_node			pushable_dl_tasks;
 685#endif
 686
 687	struct mm_struct		*mm;
 688	struct mm_struct		*active_mm;
 689
 690	/* Per-thread vma caching: */
 691	struct vmacache			vmacache;
 692
 693#ifdef SPLIT_RSS_COUNTING
 694	struct task_rss_stat		rss_stat;
 695#endif
 696	int				exit_state;
 697	int				exit_code;
 698	int				exit_signal;
 699	/* The signal sent when the parent dies: */
 700	int				pdeath_signal;
 701	/* JOBCTL_*, siglock protected: */
 702	unsigned long			jobctl;
 703
 704	/* Used for emulating ABI behavior of previous Linux versions: */
 705	unsigned int			personality;
 706
 707	/* Scheduler bits, serialized by scheduler locks: */
 708	unsigned			sched_reset_on_fork:1;
 709	unsigned			sched_contributes_to_load:1;
 710	unsigned			sched_migrated:1;
 711	unsigned			sched_remote_wakeup:1;
 712	/* Force alignment to the next boundary: */
 713	unsigned			:0;
 714
 715	/* Unserialized, strictly 'current' */
 716
 717	/* Bit to tell LSMs we're in execve(): */
 718	unsigned			in_execve:1;
 719	unsigned			in_iowait:1;
 720#ifndef TIF_RESTORE_SIGMASK
 721	unsigned			restore_sigmask:1;
 722#endif
 723#ifdef CONFIG_MEMCG
 724	unsigned			memcg_may_oom:1;
 725#ifndef CONFIG_SLOB
 726	unsigned			memcg_kmem_skip_account:1;
 727#endif
 728#endif
 729#ifdef CONFIG_COMPAT_BRK
 730	unsigned			brk_randomized:1;
 731#endif
 732#ifdef CONFIG_CGROUPS
 733	/* disallow userland-initiated cgroup migration */
 734	unsigned			no_cgroup_migration:1;
 735#endif
 736
 737	unsigned long			atomic_flags; /* Flags requiring atomic access. */
 738
 739	struct restart_block		restart_block;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 740
 741	pid_t				pid;
 742	pid_t				tgid;
 743
 744#ifdef CONFIG_CC_STACKPROTECTOR
 745	/* Canary value for the -fstack-protector GCC feature: */
 746	unsigned long			stack_canary;
 747#endif
 748	/*
 749	 * Pointers to the (original) parent process, youngest child, younger sibling,
 750	 * older sibling, respectively.  (p->father can be replaced with
 751	 * p->real_parent->pid)
 752	 */
 753
 754	/* Real parent process: */
 755	struct task_struct __rcu	*real_parent;
 756
 757	/* Recipient of SIGCHLD, wait4() reports: */
 758	struct task_struct __rcu	*parent;
 759
 760	/*
 761	 * Children/sibling form the list of natural children:
 762	 */
 763	struct list_head		children;
 764	struct list_head		sibling;
 765	struct task_struct		*group_leader;
 766
 767	/*
 768	 * 'ptraced' is the list of tasks this task is using ptrace() on.
 769	 *
 770	 * This includes both natural children and PTRACE_ATTACH targets.
 771	 * 'ptrace_entry' is this task's link on the p->parent->ptraced list.
 772	 */
 773	struct list_head		ptraced;
 774	struct list_head		ptrace_entry;
 775
 776	/* PID/PID hash table linkage. */
 777	struct pid_link			pids[PIDTYPE_MAX];
 778	struct list_head		thread_group;
 779	struct list_head		thread_node;
 780
 781	struct completion		*vfork_done;
 782
 783	/* CLONE_CHILD_SETTID: */
 784	int __user			*set_child_tid;
 785
 786	/* CLONE_CHILD_CLEARTID: */
 787	int __user			*clear_child_tid;
 788
 789	u64				utime;
 790	u64				stime;
 791#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
 792	u64				utimescaled;
 793	u64				stimescaled;
 794#endif
 795	u64				gtime;
 796	struct prev_cputime		prev_cputime;
 797#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
 798	struct vtime			vtime;
 799#endif
 800
 801#ifdef CONFIG_NO_HZ_FULL
 802	atomic_t			tick_dep_mask;
 803#endif
 804	/* Context switch counts: */
 805	unsigned long			nvcsw;
 806	unsigned long			nivcsw;
 807
 808	/* Monotonic time in nsecs: */
 809	u64				start_time;
 810
 811	/* Boot based time in nsecs: */
 812	u64				real_start_time;
 813
 814	/* MM fault and swap info: this can arguably be seen as either mm-specific or thread-specific: */
 815	unsigned long			min_flt;
 816	unsigned long			maj_flt;
 817
 818#ifdef CONFIG_POSIX_TIMERS
 819	struct task_cputime		cputime_expires;
 820	struct list_head		cpu_timers[3];
 821#endif
 822
 823	/* Process credentials: */
 824
 825	/* Tracer's credentials at attach: */
 826	const struct cred __rcu		*ptracer_cred;
 827
 828	/* Objective and real subjective task credentials (COW): */
 829	const struct cred __rcu		*real_cred;
 830
 831	/* Effective (overridable) subjective task credentials (COW): */
 832	const struct cred __rcu		*cred;
 833
 834	/*
 835	 * executable name, excluding path.
 836	 *
 837	 * - normally initialized setup_new_exec()
 838	 * - access it with [gs]et_task_comm()
 839	 * - lock it with task_lock()
 840	 */
 841	char				comm[TASK_COMM_LEN];
 842
 843	struct nameidata		*nameidata;
 844
 845#ifdef CONFIG_SYSVIPC
 846	struct sysv_sem			sysvsem;
 847	struct sysv_shm			sysvshm;
 848#endif
 849#ifdef CONFIG_DETECT_HUNG_TASK
 850	unsigned long			last_switch_count;
 
 851#endif
 852	/* Filesystem information: */
 853	struct fs_struct		*fs;
 854
 855	/* Open file information: */
 856	struct files_struct		*files;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 857
 858	/* Namespaces: */
 859	struct nsproxy			*nsproxy;
 860
 861	/* Signal handlers: */
 862	struct signal_struct		*signal;
 863	struct sighand_struct		*sighand;
 864	sigset_t			blocked;
 865	sigset_t			real_blocked;
 866	/* Restored if set_restore_sigmask() was used: */
 867	sigset_t			saved_sigmask;
 868	struct sigpending		pending;
 869	unsigned long			sas_ss_sp;
 870	size_t				sas_ss_size;
 871	unsigned int			sas_ss_flags;
 872
 873	struct callback_head		*task_works;
 874
 875	struct audit_context		*audit_context;
 876#ifdef CONFIG_AUDITSYSCALL
 877	kuid_t				loginuid;
 878	unsigned int			sessionid;
 879#endif
 880	struct seccomp			seccomp;
 881
 882	/* Thread group tracking: */
 883	u32				parent_exec_id;
 884	u32				self_exec_id;
 885
 886	/* Protection against (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, mempolicy: */
 887	spinlock_t			alloc_lock;
 
 
 
 
 888
 889	/* Protection of the PI data structures: */
 890	raw_spinlock_t			pi_lock;
 891
 892	struct wake_q_node		wake_q;
 893
 894#ifdef CONFIG_RT_MUTEXES
 895	/* PI waiters blocked on a rt_mutex held by this task: */
 896	struct rb_root_cached		pi_waiters;
 897	/* Updated under owner's pi_lock and rq lock */
 898	struct task_struct		*pi_top_task;
 899	/* Deadlock detection and priority inheritance handling: */
 900	struct rt_mutex_waiter		*pi_blocked_on;
 
 901#endif
 902
 903#ifdef CONFIG_DEBUG_MUTEXES
 904	/* Mutex deadlock detection: */
 905	struct mutex_waiter		*blocked_on;
 906#endif
 907
 908#ifdef CONFIG_TRACE_IRQFLAGS
 909	unsigned int			irq_events;
 910	unsigned long			hardirq_enable_ip;
 911	unsigned long			hardirq_disable_ip;
 912	unsigned int			hardirq_enable_event;
 913	unsigned int			hardirq_disable_event;
 914	int				hardirqs_enabled;
 915	int				hardirq_context;
 916	unsigned long			softirq_disable_ip;
 917	unsigned long			softirq_enable_ip;
 918	unsigned int			softirq_disable_event;
 919	unsigned int			softirq_enable_event;
 920	int				softirqs_enabled;
 921	int				softirq_context;
 922#endif
 923
 924#ifdef CONFIG_LOCKDEP
 925# define MAX_LOCK_DEPTH			48UL
 926	u64				curr_chain_key;
 927	int				lockdep_depth;
 928	unsigned int			lockdep_recursion;
 929	struct held_lock		held_locks[MAX_LOCK_DEPTH];
 
 930#endif
 931
 932#ifdef CONFIG_UBSAN
 933	unsigned int			in_ubsan;
 934#endif
 935
 936	/* Journalling filesystem info: */
 937	void				*journal_info;
 938
 939	/* Stacked block device info: */
 940	struct bio_list			*bio_list;
 941
 942#ifdef CONFIG_BLOCK
 943	/* Stack plugging: */
 944	struct blk_plug			*plug;
 945#endif
 946
 947	/* VM state: */
 948	struct reclaim_state		*reclaim_state;
 949
 950	struct backing_dev_info		*backing_dev_info;
 951
 952	struct io_context		*io_context;
 953
 954	/* Ptrace state: */
 955	unsigned long			ptrace_message;
 956	siginfo_t			*last_siginfo;
 957
 958	struct task_io_accounting	ioac;
 959#ifdef CONFIG_TASK_XACCT
 960	/* Accumulated RSS usage: */
 961	u64				acct_rss_mem1;
 962	/* Accumulated virtual memory usage: */
 963	u64				acct_vm_mem1;
 964	/* stime + utime since last update: */
 965	u64				acct_timexpd;
 966#endif
 967#ifdef CONFIG_CPUSETS
 968	/* Protected by ->alloc_lock: */
 969	nodemask_t			mems_allowed;
 970	/* Seqence number to catch updates: */
 971	seqcount_t			mems_allowed_seq;
 972	int				cpuset_mem_spread_rotor;
 973	int				cpuset_slab_spread_rotor;
 974#endif
 975#ifdef CONFIG_CGROUPS
 976	/* Control Group info protected by css_set_lock: */
 977	struct css_set __rcu		*cgroups;
 978	/* cg_list protected by css_set_lock and tsk->alloc_lock: */
 979	struct list_head		cg_list;
 980#endif
 981#ifdef CONFIG_INTEL_RDT
 982	u32				closid;
 983	u32				rmid;
 984#endif
 985#ifdef CONFIG_FUTEX
 986	struct robust_list_head __user	*robust_list;
 987#ifdef CONFIG_COMPAT
 988	struct compat_robust_list_head __user *compat_robust_list;
 989#endif
 990	struct list_head		pi_state_list;
 991	struct futex_pi_state		*pi_state_cache;
 992#endif
 993#ifdef CONFIG_PERF_EVENTS
 994	struct perf_event_context	*perf_event_ctxp[perf_nr_task_contexts];
 995	struct mutex			perf_event_mutex;
 996	struct list_head		perf_event_list;
 997#endif
 998#ifdef CONFIG_DEBUG_PREEMPT
 999	unsigned long			preempt_disable_ip;
1000#endif
1001#ifdef CONFIG_NUMA
1002	/* Protected by alloc_lock: */
1003	struct mempolicy		*mempolicy;
1004	short				il_prev;
1005	short				pref_node_fork;
1006#endif
1007#ifdef CONFIG_NUMA_BALANCING
1008	int				numa_scan_seq;
1009	unsigned int			numa_scan_period;
1010	unsigned int			numa_scan_period_max;
1011	int				numa_preferred_nid;
1012	unsigned long			numa_migrate_retry;
1013	/* Migration stamp: */
1014	u64				node_stamp;
1015	u64				last_task_numa_placement;
1016	u64				last_sum_exec_runtime;
1017	struct callback_head		numa_work;
1018
1019	struct list_head		numa_entry;
1020	struct numa_group		*numa_group;
1021
1022	/*
1023	 * numa_faults is an array split into four regions:
1024	 * faults_memory, faults_cpu, faults_memory_buffer, faults_cpu_buffer
1025	 * in this precise order.
1026	 *
1027	 * faults_memory: Exponential decaying average of faults on a per-node
1028	 * basis. Scheduling placement decisions are made based on these
1029	 * counts. The values remain static for the duration of a PTE scan.
1030	 * faults_cpu: Track the nodes the process was running on when a NUMA
1031	 * hinting fault was incurred.
1032	 * faults_memory_buffer and faults_cpu_buffer: Record faults per node
1033	 * during the current scan window. When the scan completes, the counts
1034	 * in faults_memory and faults_cpu decay and these values are copied.
 
 
 
 
1035	 */
1036	unsigned long			*numa_faults;
1037	unsigned long			total_numa_faults;
1038
1039	/*
1040	 * numa_faults_locality tracks if faults recorded during the last
1041	 * scan window were remote/local or failed to migrate. The task scan
1042	 * period is adapted based on the locality of the faults with different
1043	 * weights depending on whether they were shared or private faults
1044	 */
1045	unsigned long			numa_faults_locality[3];
1046
1047	unsigned long			numa_pages_migrated;
1048#endif /* CONFIG_NUMA_BALANCING */
1049
1050	struct tlbflush_unmap_batch	tlb_ubc;
1051
1052	struct rcu_head			rcu;
1053
1054	/* Cache last used pipe for splice(): */
1055	struct pipe_inode_info		*splice_pipe;
1056
1057	struct page_frag		task_frag;
1058
1059#ifdef CONFIG_TASK_DELAY_ACCT
1060	struct task_delay_info		*delays;
1061#endif
1062
1063#ifdef CONFIG_FAULT_INJECTION
1064	int				make_it_fail;
1065	unsigned int			fail_nth;
1066#endif
1067	/*
1068	 * When (nr_dirtied >= nr_dirtied_pause), it's time to call
1069	 * balance_dirty_pages() for a dirty throttling pause:
1070	 */
1071	int				nr_dirtied;
1072	int				nr_dirtied_pause;
1073	/* Start of a write-and-pause period: */
1074	unsigned long			dirty_paused_when;
1075
1076#ifdef CONFIG_LATENCYTOP
1077	int				latency_record_count;
1078	struct latency_record		latency_record[LT_SAVECOUNT];
1079#endif
1080	/*
1081	 * Time slack values; these are used to round up poll() and
1082	 * select() etc timeout values. These are in nanoseconds.
1083	 */
1084	u64				timer_slack_ns;
1085	u64				default_timer_slack_ns;
1086
1087#ifdef CONFIG_KASAN
1088	unsigned int			kasan_depth;
1089#endif
1090
1091#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1092	/* Index of current stored address in ret_stack: */
1093	int				curr_ret_stack;
1094
1095	/* Stack of return addresses for return function tracing: */
1096	struct ftrace_ret_stack		*ret_stack;
1097
1098	/* Timestamp for last schedule: */
1099	unsigned long long		ftrace_timestamp;
1100
1101	/*
1102	 * Number of functions that haven't been traced
1103	 * because of depth overrun:
1104	 */
1105	atomic_t			trace_overrun;
1106
1107	/* Pause tracing: */
1108	atomic_t			tracing_graph_pause;
1109#endif
1110
1111#ifdef CONFIG_TRACING
1112	/* State flags for use by tracers: */
1113	unsigned long			trace;
1114
1115	/* Bitmask and counter of trace recursion: */
1116	unsigned long			trace_recursion;
1117#endif /* CONFIG_TRACING */
1118
1119#ifdef CONFIG_KCOV
1120	/* Coverage collection mode enabled for this task (0 if disabled): */
1121	enum kcov_mode			kcov_mode;
1122
1123	/* Size of the kcov_area: */
1124	unsigned int			kcov_size;
1125
1126	/* Buffer for coverage collection: */
1127	void				*kcov_area;
1128
1129	/* KCOV descriptor wired with this task or NULL: */
1130	struct kcov			*kcov;
1131#endif
1132
1133#ifdef CONFIG_MEMCG
1134	struct mem_cgroup		*memcg_in_oom;
1135	gfp_t				memcg_oom_gfp_mask;
1136	int				memcg_oom_order;
1137
1138	/* Number of pages to reclaim on returning to userland: */
1139	unsigned int			memcg_nr_pages_over_high;
1140#endif
1141
1142#ifdef CONFIG_UPROBES
1143	struct uprobe_task		*utask;
1144#endif
1145#if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
1146	unsigned int			sequential_io;
1147	unsigned int			sequential_io_avg;
1148#endif
1149#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
1150	unsigned long			task_state_change;
1151#endif
1152	int				pagefault_disabled;
1153#ifdef CONFIG_MMU
1154	struct task_struct		*oom_reaper_list;
1155#endif
1156#ifdef CONFIG_VMAP_STACK
1157	struct vm_struct		*stack_vm_area;
1158#endif
1159#ifdef CONFIG_THREAD_INFO_IN_TASK
1160	/* A live task holds one reference: */
1161	atomic_t			stack_refcount;
1162#endif
1163#ifdef CONFIG_LIVEPATCH
1164	int patch_state;
1165#endif
1166#ifdef CONFIG_SECURITY
1167	/* Used by LSM modules for access restriction: */
1168	void				*security;
1169#endif
 
1170
1171	/*
1172	 * New fields for task_struct should be added above here, so that
1173	 * they are included in the randomized portion of task_struct.
1174	 */
1175	randomized_struct_fields_end
1176
1177	/* CPU-specific state of this task: */
1178	struct thread_struct		thread;
 
 
1179
1180	/*
1181	 * WARNING: on x86, 'thread_struct' contains a variable-sized
1182	 * structure.  It *MUST* be at the end of 'task_struct'.
1183	 *
1184	 * Do not put anything below here!
1185	 */
1186};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1187
1188static inline struct pid *task_pid(struct task_struct *task)
1189{
1190	return task->pids[PIDTYPE_PID].pid;
1191}
1192
1193static inline struct pid *task_tgid(struct task_struct *task)
1194{
1195	return task->group_leader->pids[PIDTYPE_PID].pid;
1196}
1197
1198/*
1199 * Without tasklist or RCU lock it is not safe to dereference
1200 * the result of task_pgrp/task_session even if task == current,
1201 * we can race with another thread doing sys_setsid/sys_setpgid.
1202 */
1203static inline struct pid *task_pgrp(struct task_struct *task)
1204{
1205	return task->group_leader->pids[PIDTYPE_PGID].pid;
1206}
1207
1208static inline struct pid *task_session(struct task_struct *task)
1209{
1210	return task->group_leader->pids[PIDTYPE_SID].pid;
1211}
1212
 
 
1213/*
1214 * the helpers to get the task's different pids as they are seen
1215 * from various namespaces
1216 *
1217 * task_xid_nr()     : global id, i.e. the id seen from the init namespace;
1218 * task_xid_vnr()    : virtual id, i.e. the id seen from the pid namespace of
1219 *                     current.
1220 * task_xid_nr_ns()  : id seen from the ns specified;
1221 *
 
 
1222 * see also pid_nr() etc in include/linux/pid.h
1223 */
1224pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, struct pid_namespace *ns);
 
1225
1226static inline pid_t task_pid_nr(struct task_struct *tsk)
1227{
1228	return tsk->pid;
1229}
1230
1231static inline pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
 
1232{
1233	return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
1234}
1235
1236static inline pid_t task_pid_vnr(struct task_struct *tsk)
1237{
1238	return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
1239}
1240
1241
1242static inline pid_t task_tgid_nr(struct task_struct *tsk)
1243{
1244	return tsk->tgid;
1245}
1246
1247/**
1248 * pid_alive - check that a task structure is not stale
1249 * @p: Task structure to be checked.
1250 *
1251 * Test if a process is not yet dead (at most zombie state)
1252 * If pid_alive fails, then pointers within the task structure
1253 * can be stale and must not be dereferenced.
1254 *
1255 * Return: 1 if the process is alive. 0 otherwise.
1256 */
1257static inline int pid_alive(const struct task_struct *p)
1258{
1259	return p->pids[PIDTYPE_PID].pid != NULL;
1260}
1261
1262static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1263{
1264	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
1265}
1266
1267static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
1268{
1269	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
1270}
1271
1272
1273static inline pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1274{
1275	return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
1276}
1277
1278static inline pid_t task_session_vnr(struct task_struct *tsk)
1279{
1280	return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
1281}
1282
1283static inline pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1284{
1285	return __task_pid_nr_ns(tsk, __PIDTYPE_TGID, ns);
1286}
1287
1288static inline pid_t task_tgid_vnr(struct task_struct *tsk)
1289{
1290	return __task_pid_nr_ns(tsk, __PIDTYPE_TGID, NULL);
1291}
1292
1293static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
1294{
1295	pid_t pid = 0;
1296
1297	rcu_read_lock();
1298	if (pid_alive(tsk))
1299		pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
1300	rcu_read_unlock();
1301
1302	return pid;
1303}
1304
1305static inline pid_t task_ppid_nr(const struct task_struct *tsk)
1306{
1307	return task_ppid_nr_ns(tsk, &init_pid_ns);
1308}
1309
1310/* Obsolete, do not use: */
1311static inline pid_t task_pgrp_nr(struct task_struct *tsk)
1312{
1313	return task_pgrp_nr_ns(tsk, &init_pid_ns);
1314}
1315
1316#define TASK_REPORT_IDLE	(TASK_REPORT + 1)
1317#define TASK_REPORT_MAX		(TASK_REPORT_IDLE << 1)
1318
1319static inline unsigned int task_state_index(struct task_struct *tsk)
1320{
1321	unsigned int tsk_state = READ_ONCE(tsk->state);
1322	unsigned int state = (tsk_state | tsk->exit_state) & TASK_REPORT;
1323
1324	BUILD_BUG_ON_NOT_POWER_OF_2(TASK_REPORT_MAX);
1325
1326	if (tsk_state == TASK_IDLE)
1327		state = TASK_REPORT_IDLE;
 
 
 
1328
1329	return fls(state);
 
 
1330}
1331
1332static inline char task_index_to_char(unsigned int state)
 
1333{
1334	static const char state_char[] = "RSDTtXZPI";
1335
1336	BUILD_BUG_ON(1 + ilog2(TASK_REPORT_MAX) != sizeof(state_char) - 1);
1337
1338	return state_char[state];
1339}
1340
1341static inline char task_state_to_char(struct task_struct *tsk)
 
 
 
 
 
 
 
 
 
 
1342{
1343	return task_index_to_char(task_state_index(tsk));
1344}
1345
1346/**
1347 * is_global_init - check if a task structure is init. Since init
1348 * is free to have sub-threads we need to check tgid.
1349 * @tsk: Task structure to be checked.
1350 *
1351 * Check if a task structure is the first user space task the kernel created.
1352 *
1353 * Return: 1 if the task structure is init. 0 otherwise.
1354 */
1355static inline int is_global_init(struct task_struct *tsk)
1356{
1357	return task_tgid_nr(tsk) == 1;
1358}
1359
1360extern struct pid *cad_pid;
1361
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1362/*
1363 * Per process flags
1364 */
1365#define PF_IDLE			0x00000002	/* I am an IDLE thread */
1366#define PF_EXITING		0x00000004	/* Getting shut down */
1367#define PF_EXITPIDONE		0x00000008	/* PI exit done on shut down */
1368#define PF_VCPU			0x00000010	/* I'm a virtual CPU */
1369#define PF_WQ_WORKER		0x00000020	/* I'm a workqueue worker */
1370#define PF_FORKNOEXEC		0x00000040	/* Forked but didn't exec */
1371#define PF_MCE_PROCESS		0x00000080      /* Process policy on mce errors */
1372#define PF_SUPERPRIV		0x00000100	/* Used super-user privileges */
1373#define PF_DUMPCORE		0x00000200	/* Dumped core */
1374#define PF_SIGNALED		0x00000400	/* Killed by a signal */
1375#define PF_MEMALLOC		0x00000800	/* Allocating memory */
1376#define PF_NPROC_EXCEEDED	0x00001000	/* set_user() noticed that RLIMIT_NPROC was exceeded */
1377#define PF_USED_MATH		0x00002000	/* If unset the fpu must be initialized before use */
1378#define PF_USED_ASYNC		0x00004000	/* Used async_schedule*(), used by module init */
1379#define PF_NOFREEZE		0x00008000	/* This thread should not be frozen */
1380#define PF_FROZEN		0x00010000	/* Frozen for system suspend */
1381#define PF_KSWAPD		0x00020000	/* I am kswapd */
1382#define PF_MEMALLOC_NOFS	0x00040000	/* All allocation requests will inherit GFP_NOFS */
1383#define PF_MEMALLOC_NOIO	0x00080000	/* All allocation requests will inherit GFP_NOIO */
1384#define PF_LESS_THROTTLE	0x00100000	/* Throttle me less: I clean memory */
1385#define PF_KTHREAD		0x00200000	/* I am a kernel thread */
1386#define PF_RANDOMIZE		0x00400000	/* Randomize virtual address space */
1387#define PF_SWAPWRITE		0x00800000	/* Allowed to write to swap */
1388#define PF_NO_SETAFFINITY	0x04000000	/* Userland is not allowed to meddle with cpus_allowed */
1389#define PF_MCE_EARLY		0x08000000      /* Early kill for mce process policy */
1390#define PF_MUTEX_TESTER		0x20000000	/* Thread belongs to the rt mutex tester */
1391#define PF_FREEZER_SKIP		0x40000000	/* Freezer should not count it as freezable */
1392#define PF_SUSPEND_TASK		0x80000000      /* This thread called freeze_processes() and should not be frozen */
 
1393
1394/*
1395 * Only the _current_ task can read/write to tsk->flags, but other
1396 * tasks can access tsk->flags in readonly mode for example
1397 * with tsk_used_math (like during threaded core dumping).
1398 * There is however an exception to this rule during ptrace
1399 * or during fork: the ptracer task is allowed to write to the
1400 * child->flags of its traced child (same goes for fork, the parent
1401 * can write to the child->flags), because we're guaranteed the
1402 * child is not running and in turn not changing child->flags
1403 * at the same time the parent does it.
1404 */
1405#define clear_stopped_child_used_math(child)	do { (child)->flags &= ~PF_USED_MATH; } while (0)
1406#define set_stopped_child_used_math(child)	do { (child)->flags |= PF_USED_MATH; } while (0)
1407#define clear_used_math()			clear_stopped_child_used_math(current)
1408#define set_used_math()				set_stopped_child_used_math(current)
1409
1410#define conditional_stopped_child_used_math(condition, child) \
1411	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
 
 
 
 
 
 
 
1412
1413#define conditional_used_math(condition)	conditional_stopped_child_used_math(condition, current)
 
 
 
 
 
 
1414
1415#define copy_to_stopped_child_used_math(child) \
1416	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1417
1418/* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */
1419#define tsk_used_math(p)			((p)->flags & PF_USED_MATH)
1420#define used_math()				tsk_used_math(current)
1421
1422static inline bool is_percpu_thread(void)
1423{
1424#ifdef CONFIG_SMP
1425	return (current->flags & PF_NO_SETAFFINITY) &&
1426		(current->nr_cpus_allowed  == 1);
 
 
 
 
 
 
 
 
1427#else
1428	return true;
1429#endif
 
1430}
1431
1432/* Per-process atomic flags. */
1433#define PFA_NO_NEW_PRIVS		0	/* May not gain new privileges. */
1434#define PFA_SPREAD_PAGE			1	/* Spread page cache over cpuset */
1435#define PFA_SPREAD_SLAB			2	/* Spread some slab caches over cpuset */
1436#define PFA_SPEC_SSB_DISABLE		3	/* Speculative Store Bypass disabled */
1437#define PFA_SPEC_SSB_FORCE_DISABLE	4	/* Speculative Store Bypass force disabled*/
1438
1439#define TASK_PFA_TEST(name, func)					\
1440	static inline bool task_##func(struct task_struct *p)		\
1441	{ return test_bit(PFA_##name, &p->atomic_flags); }
1442
1443#define TASK_PFA_SET(name, func)					\
1444	static inline void task_set_##func(struct task_struct *p)	\
1445	{ set_bit(PFA_##name, &p->atomic_flags); }
1446
1447#define TASK_PFA_CLEAR(name, func)					\
1448	static inline void task_clear_##func(struct task_struct *p)	\
1449	{ clear_bit(PFA_##name, &p->atomic_flags); }
1450
1451TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs)
1452TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs)
1453
1454TASK_PFA_TEST(SPREAD_PAGE, spread_page)
1455TASK_PFA_SET(SPREAD_PAGE, spread_page)
1456TASK_PFA_CLEAR(SPREAD_PAGE, spread_page)
1457
1458TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
1459TASK_PFA_SET(SPREAD_SLAB, spread_slab)
1460TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
1461
1462TASK_PFA_TEST(SPEC_SSB_DISABLE, spec_ssb_disable)
1463TASK_PFA_SET(SPEC_SSB_DISABLE, spec_ssb_disable)
1464TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_ssb_disable)
1465
1466TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
1467TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
1468
1469static inline void
1470current_restore_flags(unsigned long orig_flags, unsigned long flags)
1471{
1472	current->flags &= ~flags;
1473	current->flags |= orig_flags & flags;
1474}
1475
1476extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
1477extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed);
1478#ifdef CONFIG_SMP
1479extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask);
1480extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask);
 
 
 
1481#else
1482static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
 
1483{
1484}
1485static inline int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
 
1486{
1487	if (!cpumask_test_cpu(0, new_mask))
1488		return -EINVAL;
1489	return 0;
1490}
1491#endif
1492
1493#ifndef cpu_relax_yield
1494#define cpu_relax_yield() cpu_relax()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1495#endif
1496
1497extern int yield_to(struct task_struct *p, bool preempt);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1498extern void set_user_nice(struct task_struct *p, long nice);
1499extern int task_prio(const struct task_struct *p);
1500
1501/**
1502 * task_nice - return the nice value of a given task.
1503 * @p: the task in question.
1504 *
1505 * Return: The nice value [ -20 ... 0 ... 19 ].
1506 */
1507static inline int task_nice(const struct task_struct *p)
1508{
1509	return PRIO_TO_NICE((p)->static_prio);
1510}
1511
1512extern int can_nice(const struct task_struct *p, const int nice);
1513extern int task_curr(const struct task_struct *p);
1514extern int idle_cpu(int cpu);
1515extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *);
1516extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *);
1517extern int sched_setattr(struct task_struct *, const struct sched_attr *);
1518extern int sched_setattr_nocheck(struct task_struct *, const struct sched_attr *);
 
 
1519extern struct task_struct *idle_task(int cpu);
1520
1521/**
1522 * is_idle_task - is the specified task an idle task?
1523 * @p: the task in question.
1524 *
1525 * Return: 1 if @p is an idle task. 0 otherwise.
1526 */
1527static inline bool is_idle_task(const struct task_struct *p)
1528{
1529	return !!(p->flags & PF_IDLE);
1530}
1531
1532extern struct task_struct *curr_task(int cpu);
1533extern void ia64_set_curr_task(int cpu, struct task_struct *p);
1534
1535void yield(void);
1536
 
 
 
 
 
1537union thread_union {
1538#ifndef CONFIG_ARCH_TASK_STRUCT_ON_STACK
1539	struct task_struct task;
1540#endif
1541#ifndef CONFIG_THREAD_INFO_IN_TASK
1542	struct thread_info thread_info;
1543#endif
1544	unsigned long stack[THREAD_SIZE/sizeof(long)];
1545};
1546
1547#ifndef CONFIG_THREAD_INFO_IN_TASK
1548extern struct thread_info init_thread_info;
 
 
 
 
 
 
1549#endif
1550
1551extern unsigned long init_stack[THREAD_SIZE / sizeof(unsigned long)];
 
 
 
1552
1553#ifdef CONFIG_THREAD_INFO_IN_TASK
1554static inline struct thread_info *task_thread_info(struct task_struct *task)
1555{
1556	return &task->thread_info;
1557}
1558#elif !defined(__HAVE_THREAD_FUNCTIONS)
1559# define task_thread_info(task)	((struct thread_info *)(task)->stack)
1560#endif
1561
1562/*
1563 * find a task by one of its numerical ids
1564 *
1565 * find_task_by_pid_ns():
1566 *      finds a task by its pid in the specified namespace
1567 * find_task_by_vpid():
1568 *      finds a task by its virtual pid
1569 *
1570 * see also find_vpid() etc in include/linux/pid.h
1571 */
1572
1573extern struct task_struct *find_task_by_vpid(pid_t nr);
1574extern struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns);
 
 
 
 
 
 
 
 
 
 
 
 
1575
1576/*
1577 * find a task by its virtual pid and get the task struct
1578 */
1579extern struct task_struct *find_get_task_by_vpid(pid_t nr);
1580
1581extern int wake_up_state(struct task_struct *tsk, unsigned int state);
1582extern int wake_up_process(struct task_struct *tsk);
1583extern void wake_up_new_task(struct task_struct *tsk);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1584
1585#ifdef CONFIG_SMP
1586extern void kick_process(struct task_struct *tsk);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1587#else
1588static inline void kick_process(struct task_struct *tsk) { }
 
1589#endif
 
1590
1591extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec);
 
 
 
 
1592
1593static inline void set_task_comm(struct task_struct *tsk, const char *from)
1594{
1595	__set_task_comm(tsk, from, false);
 
 
 
 
 
 
1596}
1597
1598extern char *__get_task_comm(char *to, size_t len, struct task_struct *tsk);
1599#define get_task_comm(buf, tsk) ({			\
1600	BUILD_BUG_ON(sizeof(buf) != TASK_COMM_LEN);	\
1601	__get_task_comm(buf, sizeof(buf), tsk);		\
1602})
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1603
1604#ifdef CONFIG_SMP
1605void scheduler_ipi(void);
1606extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
1607#else
1608static inline void scheduler_ipi(void) { }
1609static inline unsigned long wait_task_inactive(struct task_struct *p, long match_state)
 
1610{
1611	return 1;
1612}
1613#endif
1614
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1615/*
1616 * Set thread flags in other task's structures.
1617 * See asm/thread_info.h for TIF_xxxx flags available:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1618 */
1619static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
1620{
1621	set_ti_thread_flag(task_thread_info(tsk), flag);
1622}
1623
1624static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
1625{
1626	clear_ti_thread_flag(task_thread_info(tsk), flag);
1627}
1628
1629static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
1630{
1631	return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
1632}
1633
1634static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
1635{
1636	return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
1637}
1638
1639static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
1640{
1641	return test_ti_thread_flag(task_thread_info(tsk), flag);
1642}
1643
1644static inline void set_tsk_need_resched(struct task_struct *tsk)
1645{
1646	set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
1647}
1648
1649static inline void clear_tsk_need_resched(struct task_struct *tsk)
1650{
1651	clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
1652}
1653
1654static inline int test_tsk_need_resched(struct task_struct *tsk)
1655{
1656	return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
1657}
1658
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1659/*
1660 * cond_resched() and cond_resched_lock(): latency reduction via
1661 * explicit rescheduling in places that are safe. The return
1662 * value indicates whether a reschedule was done in fact.
1663 * cond_resched_lock() will drop the spinlock before scheduling,
1664 * cond_resched_softirq() will enable bhs before scheduling.
1665 */
1666#ifndef CONFIG_PREEMPT
1667extern int _cond_resched(void);
1668#else
1669static inline int _cond_resched(void) { return 0; }
1670#endif
1671
1672#define cond_resched() ({			\
1673	___might_sleep(__FILE__, __LINE__, 0);	\
1674	_cond_resched();			\
1675})
1676
1677extern int __cond_resched_lock(spinlock_t *lock);
1678
 
 
 
 
 
 
1679#define cond_resched_lock(lock) ({				\
1680	___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
1681	__cond_resched_lock(lock);				\
1682})
1683
1684extern int __cond_resched_softirq(void);
1685
1686#define cond_resched_softirq() ({					\
1687	___might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET);	\
1688	__cond_resched_softirq();					\
1689})
1690
1691static inline void cond_resched_rcu(void)
1692{
1693#if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
1694	rcu_read_unlock();
1695	cond_resched();
1696	rcu_read_lock();
1697#endif
1698}
1699
1700/*
1701 * Does a critical section need to be broken due to another
1702 * task waiting?: (technically does not depend on CONFIG_PREEMPT,
1703 * but a general need for low latency)
1704 */
1705static inline int spin_needbreak(spinlock_t *lock)
1706{
1707#ifdef CONFIG_PREEMPT
1708	return spin_is_contended(lock);
1709#else
1710	return 0;
1711#endif
1712}
1713
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1714static __always_inline bool need_resched(void)
1715{
1716	return unlikely(tif_need_resched());
1717}
1718
1719/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1720 * Wrappers for p->thread_info->cpu access. No-op on UP.
1721 */
1722#ifdef CONFIG_SMP
1723
1724static inline unsigned int task_cpu(const struct task_struct *p)
1725{
1726#ifdef CONFIG_THREAD_INFO_IN_TASK
1727	return p->cpu;
1728#else
1729	return task_thread_info(p)->cpu;
1730#endif
 
 
 
 
1731}
1732
1733extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
1734
1735#else
1736
1737static inline unsigned int task_cpu(const struct task_struct *p)
1738{
1739	return 0;
1740}
1741
1742static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
1743{
1744}
1745
1746#endif /* CONFIG_SMP */
1747
1748/*
1749 * In order to reduce various lock holder preemption latencies provide an
1750 * interface to see if a vCPU is currently running or not.
1751 *
1752 * This allows us to terminate optimistic spin loops and block, analogous to
1753 * the native optimistic spin heuristic of testing if the lock owner task is
1754 * running or not.
1755 */
1756#ifndef vcpu_is_preempted
1757# define vcpu_is_preempted(cpu)	false
1758#endif
1759
1760extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
1761extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
1762
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1763#ifndef TASK_SIZE_OF
1764#define TASK_SIZE_OF(tsk)	TASK_SIZE
1765#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1766
1767#endif