Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.2.
   1/*
   2 *  kernel/cpuset.c
   3 *
   4 *  Processor and Memory placement constraints for sets of tasks.
   5 *
   6 *  Copyright (C) 2003 BULL SA.
   7 *  Copyright (C) 2004-2007 Silicon Graphics, Inc.
   8 *  Copyright (C) 2006 Google, Inc
   9 *
  10 *  Portions derived from Patrick Mochel's sysfs code.
  11 *  sysfs is Copyright (c) 2001-3 Patrick Mochel
  12 *
  13 *  2003-10-10 Written by Simon Derr.
  14 *  2003-10-22 Updates by Stephen Hemminger.
  15 *  2004 May-July Rework by Paul Jackson.
  16 *  2006 Rework by Paul Menage to use generic cgroups
  17 *  2008 Rework of the scheduler domains and CPU hotplug handling
  18 *       by Max Krasnyansky
  19 *
  20 *  This file is subject to the terms and conditions of the GNU General Public
  21 *  License.  See the file COPYING in the main directory of the Linux
  22 *  distribution for more details.
  23 */
  24
  25#include <linux/cpu.h>
  26#include <linux/cpumask.h>
  27#include <linux/cpuset.h>
  28#include <linux/err.h>
  29#include <linux/errno.h>
  30#include <linux/file.h>
  31#include <linux/fs.h>
  32#include <linux/init.h>
  33#include <linux/interrupt.h>
  34#include <linux/kernel.h>
  35#include <linux/kmod.h>
  36#include <linux/list.h>
  37#include <linux/mempolicy.h>
  38#include <linux/mm.h>
  39#include <linux/memory.h>
  40#include <linux/module.h>
  41#include <linux/mount.h>
  42#include <linux/namei.h>
  43#include <linux/pagemap.h>
  44#include <linux/proc_fs.h>
  45#include <linux/rcupdate.h>
  46#include <linux/sched.h>
  47#include <linux/seq_file.h>
  48#include <linux/security.h>
  49#include <linux/slab.h>
  50#include <linux/spinlock.h>
  51#include <linux/stat.h>
  52#include <linux/string.h>
  53#include <linux/time.h>
  54#include <linux/backing-dev.h>
  55#include <linux/sort.h>
  56
  57#include <asm/uaccess.h>
  58#include <linux/atomic.h>
  59#include <linux/mutex.h>
  60#include <linux/workqueue.h>
  61#include <linux/cgroup.h>
  62
  63/*
  64 * Workqueue for cpuset related tasks.
  65 *
  66 * Using kevent workqueue may cause deadlock when memory_migrate
  67 * is set. So we create a separate workqueue thread for cpuset.
  68 */
  69static struct workqueue_struct *cpuset_wq;
  70
  71/*
  72 * Tracks how many cpusets are currently defined in system.
  73 * When there is only one cpuset (the root cpuset) we can
  74 * short circuit some hooks.
  75 */
  76int number_of_cpusets __read_mostly;
  77
  78/* Forward declare cgroup structures */
  79struct cgroup_subsys cpuset_subsys;
  80struct cpuset;
  81
  82/* See "Frequency meter" comments, below. */
  83
  84struct fmeter {
  85	int cnt;		/* unprocessed events count */
  86	int val;		/* most recent output value */
  87	time_t time;		/* clock (secs) when val computed */
  88	spinlock_t lock;	/* guards read or write of above */
  89};
  90
  91struct cpuset {
  92	struct cgroup_subsys_state css;
  93
  94	unsigned long flags;		/* "unsigned long" so bitops work */
  95	cpumask_var_t cpus_allowed;	/* CPUs allowed to tasks in cpuset */
  96	nodemask_t mems_allowed;	/* Memory Nodes allowed to tasks */
  97
  98	struct cpuset *parent;		/* my parent */
  99
 100	struct fmeter fmeter;		/* memory_pressure filter */
 101
 102	/* partition number for rebuild_sched_domains() */
 103	int pn;
 104
 105	/* for custom sched domain */
 106	int relax_domain_level;
 107
 108	/* used for walking a cpuset hierarchy */
 109	struct list_head stack_list;
 110};
 111
 112/* Retrieve the cpuset for a cgroup */
 113static inline struct cpuset *cgroup_cs(struct cgroup *cont)
 114{
 115	return container_of(cgroup_subsys_state(cont, cpuset_subsys_id),
 116			    struct cpuset, css);
 117}
 118
 119/* Retrieve the cpuset for a task */
 120static inline struct cpuset *task_cs(struct task_struct *task)
 121{
 122	return container_of(task_subsys_state(task, cpuset_subsys_id),
 123			    struct cpuset, css);
 124}
 125
 126/* bits in struct cpuset flags field */
 127typedef enum {
 128	CS_CPU_EXCLUSIVE,
 129	CS_MEM_EXCLUSIVE,
 130	CS_MEM_HARDWALL,
 131	CS_MEMORY_MIGRATE,
 132	CS_SCHED_LOAD_BALANCE,
 133	CS_SPREAD_PAGE,
 134	CS_SPREAD_SLAB,
 135} cpuset_flagbits_t;
 136
 137/* convenient tests for these bits */
 138static inline int is_cpu_exclusive(const struct cpuset *cs)
 139{
 140	return test_bit(CS_CPU_EXCLUSIVE, &cs->flags);
 141}
 142
 143static inline int is_mem_exclusive(const struct cpuset *cs)
 144{
 145	return test_bit(CS_MEM_EXCLUSIVE, &cs->flags);
 146}
 147
 148static inline int is_mem_hardwall(const struct cpuset *cs)
 149{
 150	return test_bit(CS_MEM_HARDWALL, &cs->flags);
 151}
 152
 153static inline int is_sched_load_balance(const struct cpuset *cs)
 154{
 155	return test_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
 156}
 157
 158static inline int is_memory_migrate(const struct cpuset *cs)
 159{
 160	return test_bit(CS_MEMORY_MIGRATE, &cs->flags);
 161}
 162
 163static inline int is_spread_page(const struct cpuset *cs)
 164{
 165	return test_bit(CS_SPREAD_PAGE, &cs->flags);
 166}
 167
 168static inline int is_spread_slab(const struct cpuset *cs)
 169{
 170	return test_bit(CS_SPREAD_SLAB, &cs->flags);
 171}
 172
 173static struct cpuset top_cpuset = {
 174	.flags = ((1 << CS_CPU_EXCLUSIVE) | (1 << CS_MEM_EXCLUSIVE)),
 175};
 176
 177/*
 178 * There are two global mutexes guarding cpuset structures.  The first
 179 * is the main control groups cgroup_mutex, accessed via
 180 * cgroup_lock()/cgroup_unlock().  The second is the cpuset-specific
 181 * callback_mutex, below. They can nest.  It is ok to first take
 182 * cgroup_mutex, then nest callback_mutex.  We also require taking
 183 * task_lock() when dereferencing a task's cpuset pointer.  See "The
 184 * task_lock() exception", at the end of this comment.
 185 *
 186 * A task must hold both mutexes to modify cpusets.  If a task
 187 * holds cgroup_mutex, then it blocks others wanting that mutex,
 188 * ensuring that it is the only task able to also acquire callback_mutex
 189 * and be able to modify cpusets.  It can perform various checks on
 190 * the cpuset structure first, knowing nothing will change.  It can
 191 * also allocate memory while just holding cgroup_mutex.  While it is
 192 * performing these checks, various callback routines can briefly
 193 * acquire callback_mutex to query cpusets.  Once it is ready to make
 194 * the changes, it takes callback_mutex, blocking everyone else.
 195 *
 196 * Calls to the kernel memory allocator can not be made while holding
 197 * callback_mutex, as that would risk double tripping on callback_mutex
 198 * from one of the callbacks into the cpuset code from within
 199 * __alloc_pages().
 200 *
 201 * If a task is only holding callback_mutex, then it has read-only
 202 * access to cpusets.
 203 *
 204 * Now, the task_struct fields mems_allowed and mempolicy may be changed
 205 * by other task, we use alloc_lock in the task_struct fields to protect
 206 * them.
 207 *
 208 * The cpuset_common_file_read() handlers only hold callback_mutex across
 209 * small pieces of code, such as when reading out possibly multi-word
 210 * cpumasks and nodemasks.
 211 *
 212 * Accessing a task's cpuset should be done in accordance with the
 213 * guidelines for accessing subsystem state in kernel/cgroup.c
 214 */
 215
 216static DEFINE_MUTEX(callback_mutex);
 217
 218/*
 219 * cpuset_buffer_lock protects both the cpuset_name and cpuset_nodelist
 220 * buffers.  They are statically allocated to prevent using excess stack
 221 * when calling cpuset_print_task_mems_allowed().
 222 */
 223#define CPUSET_NAME_LEN		(128)
 224#define	CPUSET_NODELIST_LEN	(256)
 225static char cpuset_name[CPUSET_NAME_LEN];
 226static char cpuset_nodelist[CPUSET_NODELIST_LEN];
 227static DEFINE_SPINLOCK(cpuset_buffer_lock);
 228
 229/*
 230 * This is ugly, but preserves the userspace API for existing cpuset
 231 * users. If someone tries to mount the "cpuset" filesystem, we
 232 * silently switch it to mount "cgroup" instead
 233 */
 234static struct dentry *cpuset_mount(struct file_system_type *fs_type,
 235			 int flags, const char *unused_dev_name, void *data)
 236{
 237	struct file_system_type *cgroup_fs = get_fs_type("cgroup");
 238	struct dentry *ret = ERR_PTR(-ENODEV);
 239	if (cgroup_fs) {
 240		char mountopts[] =
 241			"cpuset,noprefix,"
 242			"release_agent=/sbin/cpuset_release_agent";
 243		ret = cgroup_fs->mount(cgroup_fs, flags,
 244					   unused_dev_name, mountopts);
 245		put_filesystem(cgroup_fs);
 246	}
 247	return ret;
 248}
 249
 250static struct file_system_type cpuset_fs_type = {
 251	.name = "cpuset",
 252	.mount = cpuset_mount,
 253};
 254
 255/*
 256 * Return in pmask the portion of a cpusets's cpus_allowed that
 257 * are online.  If none are online, walk up the cpuset hierarchy
 258 * until we find one that does have some online cpus.  If we get
 259 * all the way to the top and still haven't found any online cpus,
 260 * return cpu_online_map.  Or if passed a NULL cs from an exit'ing
 261 * task, return cpu_online_map.
 262 *
 263 * One way or another, we guarantee to return some non-empty subset
 264 * of cpu_online_map.
 265 *
 266 * Call with callback_mutex held.
 267 */
 268
 269static void guarantee_online_cpus(const struct cpuset *cs,
 270				  struct cpumask *pmask)
 271{
 272	while (cs && !cpumask_intersects(cs->cpus_allowed, cpu_online_mask))
 273		cs = cs->parent;
 274	if (cs)
 275		cpumask_and(pmask, cs->cpus_allowed, cpu_online_mask);
 276	else
 277		cpumask_copy(pmask, cpu_online_mask);
 278	BUG_ON(!cpumask_intersects(pmask, cpu_online_mask));
 279}
 280
 281/*
 282 * Return in *pmask the portion of a cpusets's mems_allowed that
 283 * are online, with memory.  If none are online with memory, walk
 284 * up the cpuset hierarchy until we find one that does have some
 285 * online mems.  If we get all the way to the top and still haven't
 286 * found any online mems, return node_states[N_HIGH_MEMORY].
 287 *
 288 * One way or another, we guarantee to return some non-empty subset
 289 * of node_states[N_HIGH_MEMORY].
 290 *
 291 * Call with callback_mutex held.
 292 */
 293
 294static void guarantee_online_mems(const struct cpuset *cs, nodemask_t *pmask)
 295{
 296	while (cs && !nodes_intersects(cs->mems_allowed,
 297					node_states[N_HIGH_MEMORY]))
 298		cs = cs->parent;
 299	if (cs)
 300		nodes_and(*pmask, cs->mems_allowed,
 301					node_states[N_HIGH_MEMORY]);
 302	else
 303		*pmask = node_states[N_HIGH_MEMORY];
 304	BUG_ON(!nodes_intersects(*pmask, node_states[N_HIGH_MEMORY]));
 305}
 306
 307/*
 308 * update task's spread flag if cpuset's page/slab spread flag is set
 309 *
 310 * Called with callback_mutex/cgroup_mutex held
 311 */
 312static void cpuset_update_task_spread_flag(struct cpuset *cs,
 313					struct task_struct *tsk)
 314{
 315	if (is_spread_page(cs))
 316		tsk->flags |= PF_SPREAD_PAGE;
 317	else
 318		tsk->flags &= ~PF_SPREAD_PAGE;
 319	if (is_spread_slab(cs))
 320		tsk->flags |= PF_SPREAD_SLAB;
 321	else
 322		tsk->flags &= ~PF_SPREAD_SLAB;
 323}
 324
 325/*
 326 * is_cpuset_subset(p, q) - Is cpuset p a subset of cpuset q?
 327 *
 328 * One cpuset is a subset of another if all its allowed CPUs and
 329 * Memory Nodes are a subset of the other, and its exclusive flags
 330 * are only set if the other's are set.  Call holding cgroup_mutex.
 331 */
 332
 333static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
 334{
 335	return	cpumask_subset(p->cpus_allowed, q->cpus_allowed) &&
 336		nodes_subset(p->mems_allowed, q->mems_allowed) &&
 337		is_cpu_exclusive(p) <= is_cpu_exclusive(q) &&
 338		is_mem_exclusive(p) <= is_mem_exclusive(q);
 339}
 340
 341/**
 342 * alloc_trial_cpuset - allocate a trial cpuset
 343 * @cs: the cpuset that the trial cpuset duplicates
 344 */
 345static struct cpuset *alloc_trial_cpuset(const struct cpuset *cs)
 346{
 347	struct cpuset *trial;
 348
 349	trial = kmemdup(cs, sizeof(*cs), GFP_KERNEL);
 350	if (!trial)
 351		return NULL;
 352
 353	if (!alloc_cpumask_var(&trial->cpus_allowed, GFP_KERNEL)) {
 354		kfree(trial);
 355		return NULL;
 356	}
 357	cpumask_copy(trial->cpus_allowed, cs->cpus_allowed);
 358
 359	return trial;
 360}
 361
 362/**
 363 * free_trial_cpuset - free the trial cpuset
 364 * @trial: the trial cpuset to be freed
 365 */
 366static void free_trial_cpuset(struct cpuset *trial)
 367{
 368	free_cpumask_var(trial->cpus_allowed);
 369	kfree(trial);
 370}
 371
 372/*
 373 * validate_change() - Used to validate that any proposed cpuset change
 374 *		       follows the structural rules for cpusets.
 375 *
 376 * If we replaced the flag and mask values of the current cpuset
 377 * (cur) with those values in the trial cpuset (trial), would
 378 * our various subset and exclusive rules still be valid?  Presumes
 379 * cgroup_mutex held.
 380 *
 381 * 'cur' is the address of an actual, in-use cpuset.  Operations
 382 * such as list traversal that depend on the actual address of the
 383 * cpuset in the list must use cur below, not trial.
 384 *
 385 * 'trial' is the address of bulk structure copy of cur, with
 386 * perhaps one or more of the fields cpus_allowed, mems_allowed,
 387 * or flags changed to new, trial values.
 388 *
 389 * Return 0 if valid, -errno if not.
 390 */
 391
 392static int validate_change(const struct cpuset *cur, const struct cpuset *trial)
 393{
 394	struct cgroup *cont;
 395	struct cpuset *c, *par;
 396
 397	/* Each of our child cpusets must be a subset of us */
 398	list_for_each_entry(cont, &cur->css.cgroup->children, sibling) {
 399		if (!is_cpuset_subset(cgroup_cs(cont), trial))
 400			return -EBUSY;
 401	}
 402
 403	/* Remaining checks don't apply to root cpuset */
 404	if (cur == &top_cpuset)
 405		return 0;
 406
 407	par = cur->parent;
 408
 409	/* We must be a subset of our parent cpuset */
 410	if (!is_cpuset_subset(trial, par))
 411		return -EACCES;
 412
 413	/*
 414	 * If either I or some sibling (!= me) is exclusive, we can't
 415	 * overlap
 416	 */
 417	list_for_each_entry(cont, &par->css.cgroup->children, sibling) {
 418		c = cgroup_cs(cont);
 419		if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) &&
 420		    c != cur &&
 421		    cpumask_intersects(trial->cpus_allowed, c->cpus_allowed))
 422			return -EINVAL;
 423		if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) &&
 424		    c != cur &&
 425		    nodes_intersects(trial->mems_allowed, c->mems_allowed))
 426			return -EINVAL;
 427	}
 428
 429	/* Cpusets with tasks can't have empty cpus_allowed or mems_allowed */
 430	if (cgroup_task_count(cur->css.cgroup)) {
 431		if (cpumask_empty(trial->cpus_allowed) ||
 432		    nodes_empty(trial->mems_allowed)) {
 433			return -ENOSPC;
 434		}
 435	}
 436
 437	return 0;
 438}
 439
 440#ifdef CONFIG_SMP
 441/*
 442 * Helper routine for generate_sched_domains().
 443 * Do cpusets a, b have overlapping cpus_allowed masks?
 444 */
 445static int cpusets_overlap(struct cpuset *a, struct cpuset *b)
 446{
 447	return cpumask_intersects(a->cpus_allowed, b->cpus_allowed);
 448}
 449
 450static void
 451update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c)
 452{
 453	if (dattr->relax_domain_level < c->relax_domain_level)
 454		dattr->relax_domain_level = c->relax_domain_level;
 455	return;
 456}
 457
 458static void
 459update_domain_attr_tree(struct sched_domain_attr *dattr, struct cpuset *c)
 460{
 461	LIST_HEAD(q);
 462
 463	list_add(&c->stack_list, &q);
 464	while (!list_empty(&q)) {
 465		struct cpuset *cp;
 466		struct cgroup *cont;
 467		struct cpuset *child;
 468
 469		cp = list_first_entry(&q, struct cpuset, stack_list);
 470		list_del(q.next);
 471
 472		if (cpumask_empty(cp->cpus_allowed))
 473			continue;
 474
 475		if (is_sched_load_balance(cp))
 476			update_domain_attr(dattr, cp);
 477
 478		list_for_each_entry(cont, &cp->css.cgroup->children, sibling) {
 479			child = cgroup_cs(cont);
 480			list_add_tail(&child->stack_list, &q);
 481		}
 482	}
 483}
 484
 485/*
 486 * generate_sched_domains()
 487 *
 488 * This function builds a partial partition of the systems CPUs
 489 * A 'partial partition' is a set of non-overlapping subsets whose
 490 * union is a subset of that set.
 491 * The output of this function needs to be passed to kernel/sched.c
 492 * partition_sched_domains() routine, which will rebuild the scheduler's
 493 * load balancing domains (sched domains) as specified by that partial
 494 * partition.
 495 *
 496 * See "What is sched_load_balance" in Documentation/cgroups/cpusets.txt
 497 * for a background explanation of this.
 498 *
 499 * Does not return errors, on the theory that the callers of this
 500 * routine would rather not worry about failures to rebuild sched
 501 * domains when operating in the severe memory shortage situations
 502 * that could cause allocation failures below.
 503 *
 504 * Must be called with cgroup_lock held.
 505 *
 506 * The three key local variables below are:
 507 *    q  - a linked-list queue of cpuset pointers, used to implement a
 508 *	   top-down scan of all cpusets.  This scan loads a pointer
 509 *	   to each cpuset marked is_sched_load_balance into the
 510 *	   array 'csa'.  For our purposes, rebuilding the schedulers
 511 *	   sched domains, we can ignore !is_sched_load_balance cpusets.
 512 *  csa  - (for CpuSet Array) Array of pointers to all the cpusets
 513 *	   that need to be load balanced, for convenient iterative
 514 *	   access by the subsequent code that finds the best partition,
 515 *	   i.e the set of domains (subsets) of CPUs such that the
 516 *	   cpus_allowed of every cpuset marked is_sched_load_balance
 517 *	   is a subset of one of these domains, while there are as
 518 *	   many such domains as possible, each as small as possible.
 519 * doms  - Conversion of 'csa' to an array of cpumasks, for passing to
 520 *	   the kernel/sched.c routine partition_sched_domains() in a
 521 *	   convenient format, that can be easily compared to the prior
 522 *	   value to determine what partition elements (sched domains)
 523 *	   were changed (added or removed.)
 524 *
 525 * Finding the best partition (set of domains):
 526 *	The triple nested loops below over i, j, k scan over the
 527 *	load balanced cpusets (using the array of cpuset pointers in
 528 *	csa[]) looking for pairs of cpusets that have overlapping
 529 *	cpus_allowed, but which don't have the same 'pn' partition
 530 *	number and gives them in the same partition number.  It keeps
 531 *	looping on the 'restart' label until it can no longer find
 532 *	any such pairs.
 533 *
 534 *	The union of the cpus_allowed masks from the set of
 535 *	all cpusets having the same 'pn' value then form the one
 536 *	element of the partition (one sched domain) to be passed to
 537 *	partition_sched_domains().
 538 */
 539static int generate_sched_domains(cpumask_var_t **domains,
 540			struct sched_domain_attr **attributes)
 541{
 542	LIST_HEAD(q);		/* queue of cpusets to be scanned */
 543	struct cpuset *cp;	/* scans q */
 544	struct cpuset **csa;	/* array of all cpuset ptrs */
 545	int csn;		/* how many cpuset ptrs in csa so far */
 546	int i, j, k;		/* indices for partition finding loops */
 547	cpumask_var_t *doms;	/* resulting partition; i.e. sched domains */
 548	struct sched_domain_attr *dattr;  /* attributes for custom domains */
 549	int ndoms = 0;		/* number of sched domains in result */
 550	int nslot;		/* next empty doms[] struct cpumask slot */
 551
 552	doms = NULL;
 553	dattr = NULL;
 554	csa = NULL;
 555
 556	/* Special case for the 99% of systems with one, full, sched domain */
 557	if (is_sched_load_balance(&top_cpuset)) {
 558		ndoms = 1;
 559		doms = alloc_sched_domains(ndoms);
 560		if (!doms)
 561			goto done;
 562
 563		dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL);
 564		if (dattr) {
 565			*dattr = SD_ATTR_INIT;
 566			update_domain_attr_tree(dattr, &top_cpuset);
 567		}
 568		cpumask_copy(doms[0], top_cpuset.cpus_allowed);
 569
 570		goto done;
 571	}
 572
 573	csa = kmalloc(number_of_cpusets * sizeof(cp), GFP_KERNEL);
 574	if (!csa)
 575		goto done;
 576	csn = 0;
 577
 578	list_add(&top_cpuset.stack_list, &q);
 579	while (!list_empty(&q)) {
 580		struct cgroup *cont;
 581		struct cpuset *child;   /* scans child cpusets of cp */
 582
 583		cp = list_first_entry(&q, struct cpuset, stack_list);
 584		list_del(q.next);
 585
 586		if (cpumask_empty(cp->cpus_allowed))
 587			continue;
 588
 589		/*
 590		 * All child cpusets contain a subset of the parent's cpus, so
 591		 * just skip them, and then we call update_domain_attr_tree()
 592		 * to calc relax_domain_level of the corresponding sched
 593		 * domain.
 594		 */
 595		if (is_sched_load_balance(cp)) {
 596			csa[csn++] = cp;
 597			continue;
 598		}
 599
 600		list_for_each_entry(cont, &cp->css.cgroup->children, sibling) {
 601			child = cgroup_cs(cont);
 602			list_add_tail(&child->stack_list, &q);
 603		}
 604  	}
 605
 606	for (i = 0; i < csn; i++)
 607		csa[i]->pn = i;
 608	ndoms = csn;
 609
 610restart:
 611	/* Find the best partition (set of sched domains) */
 612	for (i = 0; i < csn; i++) {
 613		struct cpuset *a = csa[i];
 614		int apn = a->pn;
 615
 616		for (j = 0; j < csn; j++) {
 617			struct cpuset *b = csa[j];
 618			int bpn = b->pn;
 619
 620			if (apn != bpn && cpusets_overlap(a, b)) {
 621				for (k = 0; k < csn; k++) {
 622					struct cpuset *c = csa[k];
 623
 624					if (c->pn == bpn)
 625						c->pn = apn;
 626				}
 627				ndoms--;	/* one less element */
 628				goto restart;
 629			}
 630		}
 631	}
 632
 633	/*
 634	 * Now we know how many domains to create.
 635	 * Convert <csn, csa> to <ndoms, doms> and populate cpu masks.
 636	 */
 637	doms = alloc_sched_domains(ndoms);
 638	if (!doms)
 639		goto done;
 640
 641	/*
 642	 * The rest of the code, including the scheduler, can deal with
 643	 * dattr==NULL case. No need to abort if alloc fails.
 644	 */
 645	dattr = kmalloc(ndoms * sizeof(struct sched_domain_attr), GFP_KERNEL);
 646
 647	for (nslot = 0, i = 0; i < csn; i++) {
 648		struct cpuset *a = csa[i];
 649		struct cpumask *dp;
 650		int apn = a->pn;
 651
 652		if (apn < 0) {
 653			/* Skip completed partitions */
 654			continue;
 655		}
 656
 657		dp = doms[nslot];
 658
 659		if (nslot == ndoms) {
 660			static int warnings = 10;
 661			if (warnings) {
 662				printk(KERN_WARNING
 663				 "rebuild_sched_domains confused:"
 664				  " nslot %d, ndoms %d, csn %d, i %d,"
 665				  " apn %d\n",
 666				  nslot, ndoms, csn, i, apn);
 667				warnings--;
 668			}
 669			continue;
 670		}
 671
 672		cpumask_clear(dp);
 673		if (dattr)
 674			*(dattr + nslot) = SD_ATTR_INIT;
 675		for (j = i; j < csn; j++) {
 676			struct cpuset *b = csa[j];
 677
 678			if (apn == b->pn) {
 679				cpumask_or(dp, dp, b->cpus_allowed);
 680				if (dattr)
 681					update_domain_attr_tree(dattr + nslot, b);
 682
 683				/* Done with this partition */
 684				b->pn = -1;
 685			}
 686		}
 687		nslot++;
 688	}
 689	BUG_ON(nslot != ndoms);
 690
 691done:
 692	kfree(csa);
 693
 694	/*
 695	 * Fallback to the default domain if kmalloc() failed.
 696	 * See comments in partition_sched_domains().
 697	 */
 698	if (doms == NULL)
 699		ndoms = 1;
 700
 701	*domains    = doms;
 702	*attributes = dattr;
 703	return ndoms;
 704}
 705
 706/*
 707 * Rebuild scheduler domains.
 708 *
 709 * Call with neither cgroup_mutex held nor within get_online_cpus().
 710 * Takes both cgroup_mutex and get_online_cpus().
 711 *
 712 * Cannot be directly called from cpuset code handling changes
 713 * to the cpuset pseudo-filesystem, because it cannot be called
 714 * from code that already holds cgroup_mutex.
 715 */
 716static void do_rebuild_sched_domains(struct work_struct *unused)
 717{
 718	struct sched_domain_attr *attr;
 719	cpumask_var_t *doms;
 720	int ndoms;
 721
 722	get_online_cpus();
 723
 724	/* Generate domain masks and attrs */
 725	cgroup_lock();
 726	ndoms = generate_sched_domains(&doms, &attr);
 727	cgroup_unlock();
 728
 729	/* Have scheduler rebuild the domains */
 730	partition_sched_domains(ndoms, doms, attr);
 731
 732	put_online_cpus();
 733}
 734#else /* !CONFIG_SMP */
 735static void do_rebuild_sched_domains(struct work_struct *unused)
 736{
 737}
 738
 739static int generate_sched_domains(cpumask_var_t **domains,
 740			struct sched_domain_attr **attributes)
 741{
 742	*domains = NULL;
 743	return 1;
 744}
 745#endif /* CONFIG_SMP */
 746
 747static DECLARE_WORK(rebuild_sched_domains_work, do_rebuild_sched_domains);
 748
 749/*
 750 * Rebuild scheduler domains, asynchronously via workqueue.
 751 *
 752 * If the flag 'sched_load_balance' of any cpuset with non-empty
 753 * 'cpus' changes, or if the 'cpus' allowed changes in any cpuset
 754 * which has that flag enabled, or if any cpuset with a non-empty
 755 * 'cpus' is removed, then call this routine to rebuild the
 756 * scheduler's dynamic sched domains.
 757 *
 758 * The rebuild_sched_domains() and partition_sched_domains()
 759 * routines must nest cgroup_lock() inside get_online_cpus(),
 760 * but such cpuset changes as these must nest that locking the
 761 * other way, holding cgroup_lock() for much of the code.
 762 *
 763 * So in order to avoid an ABBA deadlock, the cpuset code handling
 764 * these user changes delegates the actual sched domain rebuilding
 765 * to a separate workqueue thread, which ends up processing the
 766 * above do_rebuild_sched_domains() function.
 767 */
 768static void async_rebuild_sched_domains(void)
 769{
 770	queue_work(cpuset_wq, &rebuild_sched_domains_work);
 771}
 772
 773/*
 774 * Accomplishes the same scheduler domain rebuild as the above
 775 * async_rebuild_sched_domains(), however it directly calls the
 776 * rebuild routine synchronously rather than calling it via an
 777 * asynchronous work thread.
 778 *
 779 * This can only be called from code that is not holding
 780 * cgroup_mutex (not nested in a cgroup_lock() call.)
 781 */
 782void rebuild_sched_domains(void)
 783{
 784	do_rebuild_sched_domains(NULL);
 785}
 786
 787/**
 788 * cpuset_test_cpumask - test a task's cpus_allowed versus its cpuset's
 789 * @tsk: task to test
 790 * @scan: struct cgroup_scanner contained in its struct cpuset_hotplug_scanner
 791 *
 792 * Call with cgroup_mutex held.  May take callback_mutex during call.
 793 * Called for each task in a cgroup by cgroup_scan_tasks().
 794 * Return nonzero if this tasks's cpus_allowed mask should be changed (in other
 795 * words, if its mask is not equal to its cpuset's mask).
 796 */
 797static int cpuset_test_cpumask(struct task_struct *tsk,
 798			       struct cgroup_scanner *scan)
 799{
 800	return !cpumask_equal(&tsk->cpus_allowed,
 801			(cgroup_cs(scan->cg))->cpus_allowed);
 802}
 803
 804/**
 805 * cpuset_change_cpumask - make a task's cpus_allowed the same as its cpuset's
 806 * @tsk: task to test
 807 * @scan: struct cgroup_scanner containing the cgroup of the task
 808 *
 809 * Called by cgroup_scan_tasks() for each task in a cgroup whose
 810 * cpus_allowed mask needs to be changed.
 811 *
 812 * We don't need to re-check for the cgroup/cpuset membership, since we're
 813 * holding cgroup_lock() at this point.
 814 */
 815static void cpuset_change_cpumask(struct task_struct *tsk,
 816				  struct cgroup_scanner *scan)
 817{
 818	set_cpus_allowed_ptr(tsk, ((cgroup_cs(scan->cg))->cpus_allowed));
 819}
 820
 821/**
 822 * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
 823 * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
 824 * @heap: if NULL, defer allocating heap memory to cgroup_scan_tasks()
 825 *
 826 * Called with cgroup_mutex held
 827 *
 828 * The cgroup_scan_tasks() function will scan all the tasks in a cgroup,
 829 * calling callback functions for each.
 830 *
 831 * No return value. It's guaranteed that cgroup_scan_tasks() always returns 0
 832 * if @heap != NULL.
 833 */
 834static void update_tasks_cpumask(struct cpuset *cs, struct ptr_heap *heap)
 835{
 836	struct cgroup_scanner scan;
 837
 838	scan.cg = cs->css.cgroup;
 839	scan.test_task = cpuset_test_cpumask;
 840	scan.process_task = cpuset_change_cpumask;
 841	scan.heap = heap;
 842	cgroup_scan_tasks(&scan);
 843}
 844
 845/**
 846 * update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it
 847 * @cs: the cpuset to consider
 848 * @buf: buffer of cpu numbers written to this cpuset
 849 */
 850static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
 851			  const char *buf)
 852{
 853	struct ptr_heap heap;
 854	int retval;
 855	int is_load_balanced;
 856
 857	/* top_cpuset.cpus_allowed tracks cpu_online_map; it's read-only */
 858	if (cs == &top_cpuset)
 859		return -EACCES;
 860
 861	/*
 862	 * An empty cpus_allowed is ok only if the cpuset has no tasks.
 863	 * Since cpulist_parse() fails on an empty mask, we special case
 864	 * that parsing.  The validate_change() call ensures that cpusets
 865	 * with tasks have cpus.
 866	 */
 867	if (!*buf) {
 868		cpumask_clear(trialcs->cpus_allowed);
 869	} else {
 870		retval = cpulist_parse(buf, trialcs->cpus_allowed);
 871		if (retval < 0)
 872			return retval;
 873
 874		if (!cpumask_subset(trialcs->cpus_allowed, cpu_active_mask))
 875			return -EINVAL;
 876	}
 877	retval = validate_change(cs, trialcs);
 878	if (retval < 0)
 879		return retval;
 880
 881	/* Nothing to do if the cpus didn't change */
 882	if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed))
 883		return 0;
 884
 885	retval = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, NULL);
 886	if (retval)
 887		return retval;
 888
 889	is_load_balanced = is_sched_load_balance(trialcs);
 890
 891	mutex_lock(&callback_mutex);
 892	cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed);
 893	mutex_unlock(&callback_mutex);
 894
 895	/*
 896	 * Scan tasks in the cpuset, and update the cpumasks of any
 897	 * that need an update.
 898	 */
 899	update_tasks_cpumask(cs, &heap);
 900
 901	heap_free(&heap);
 902
 903	if (is_load_balanced)
 904		async_rebuild_sched_domains();
 905	return 0;
 906}
 907
 908/*
 909 * cpuset_migrate_mm
 910 *
 911 *    Migrate memory region from one set of nodes to another.
 912 *
 913 *    Temporarilly set tasks mems_allowed to target nodes of migration,
 914 *    so that the migration code can allocate pages on these nodes.
 915 *
 916 *    Call holding cgroup_mutex, so current's cpuset won't change
 917 *    during this call, as manage_mutex holds off any cpuset_attach()
 918 *    calls.  Therefore we don't need to take task_lock around the
 919 *    call to guarantee_online_mems(), as we know no one is changing
 920 *    our task's cpuset.
 921 *
 922 *    While the mm_struct we are migrating is typically from some
 923 *    other task, the task_struct mems_allowed that we are hacking
 924 *    is for our current task, which must allocate new pages for that
 925 *    migrating memory region.
 926 */
 927
 928static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
 929							const nodemask_t *to)
 930{
 931	struct task_struct *tsk = current;
 932
 933	tsk->mems_allowed = *to;
 934
 935	do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL);
 936
 937	guarantee_online_mems(task_cs(tsk),&tsk->mems_allowed);
 938}
 939
 940/*
 941 * cpuset_change_task_nodemask - change task's mems_allowed and mempolicy
 942 * @tsk: the task to change
 943 * @newmems: new nodes that the task will be set
 944 *
 945 * In order to avoid seeing no nodes if the old and new nodes are disjoint,
 946 * we structure updates as setting all new allowed nodes, then clearing newly
 947 * disallowed ones.
 948 */
 949static void cpuset_change_task_nodemask(struct task_struct *tsk,
 950					nodemask_t *newmems)
 951{
 952repeat:
 953	/*
 954	 * Allow tasks that have access to memory reserves because they have
 955	 * been OOM killed to get memory anywhere.
 956	 */
 957	if (unlikely(test_thread_flag(TIF_MEMDIE)))
 958		return;
 959	if (current->flags & PF_EXITING) /* Let dying task have memory */
 960		return;
 961
 962	task_lock(tsk);
 963	nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems);
 964	mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP1);
 965
 966
 967	/*
 968	 * ensure checking ->mems_allowed_change_disable after setting all new
 969	 * allowed nodes.
 970	 *
 971	 * the read-side task can see an nodemask with new allowed nodes and
 972	 * old allowed nodes. and if it allocates page when cpuset clears newly
 973	 * disallowed ones continuous, it can see the new allowed bits.
 974	 *
 975	 * And if setting all new allowed nodes is after the checking, setting
 976	 * all new allowed nodes and clearing newly disallowed ones will be done
 977	 * continuous, and the read-side task may find no node to alloc page.
 978	 */
 979	smp_mb();
 980
 981	/*
 982	 * Allocation of memory is very fast, we needn't sleep when waiting
 983	 * for the read-side.
 984	 */
 985	while (ACCESS_ONCE(tsk->mems_allowed_change_disable)) {
 986		task_unlock(tsk);
 987		if (!task_curr(tsk))
 988			yield();
 989		goto repeat;
 990	}
 991
 992	/*
 993	 * ensure checking ->mems_allowed_change_disable before clearing all new
 994	 * disallowed nodes.
 995	 *
 996	 * if clearing newly disallowed bits before the checking, the read-side
 997	 * task may find no node to alloc page.
 998	 */
 999	smp_mb();
1000
1001	mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP2);
1002	tsk->mems_allowed = *newmems;
1003	task_unlock(tsk);
1004}
1005
1006/*
1007 * Update task's mems_allowed and rebind its mempolicy and vmas' mempolicy
1008 * of it to cpuset's new mems_allowed, and migrate pages to new nodes if
1009 * memory_migrate flag is set. Called with cgroup_mutex held.
1010 */
1011static void cpuset_change_nodemask(struct task_struct *p,
1012				   struct cgroup_scanner *scan)
1013{
1014	struct mm_struct *mm;
1015	struct cpuset *cs;
1016	int migrate;
1017	const nodemask_t *oldmem = scan->data;
1018	static nodemask_t newmems;	/* protected by cgroup_mutex */
1019
1020	cs = cgroup_cs(scan->cg);
1021	guarantee_online_mems(cs, &newmems);
1022
1023	cpuset_change_task_nodemask(p, &newmems);
1024
1025	mm = get_task_mm(p);
1026	if (!mm)
1027		return;
1028
1029	migrate = is_memory_migrate(cs);
1030
1031	mpol_rebind_mm(mm, &cs->mems_allowed);
1032	if (migrate)
1033		cpuset_migrate_mm(mm, oldmem, &cs->mems_allowed);
1034	mmput(mm);
1035}
1036
1037static void *cpuset_being_rebound;
1038
1039/**
1040 * update_tasks_nodemask - Update the nodemasks of tasks in the cpuset.
1041 * @cs: the cpuset in which each task's mems_allowed mask needs to be changed
1042 * @oldmem: old mems_allowed of cpuset cs
1043 * @heap: if NULL, defer allocating heap memory to cgroup_scan_tasks()
1044 *
1045 * Called with cgroup_mutex held
1046 * No return value. It's guaranteed that cgroup_scan_tasks() always returns 0
1047 * if @heap != NULL.
1048 */
1049static void update_tasks_nodemask(struct cpuset *cs, const nodemask_t *oldmem,
1050				 struct ptr_heap *heap)
1051{
1052	struct cgroup_scanner scan;
1053
1054	cpuset_being_rebound = cs;		/* causes mpol_dup() rebind */
1055
1056	scan.cg = cs->css.cgroup;
1057	scan.test_task = NULL;
1058	scan.process_task = cpuset_change_nodemask;
1059	scan.heap = heap;
1060	scan.data = (nodemask_t *)oldmem;
1061
1062	/*
1063	 * The mpol_rebind_mm() call takes mmap_sem, which we couldn't
1064	 * take while holding tasklist_lock.  Forks can happen - the
1065	 * mpol_dup() cpuset_being_rebound check will catch such forks,
1066	 * and rebind their vma mempolicies too.  Because we still hold
1067	 * the global cgroup_mutex, we know that no other rebind effort
1068	 * will be contending for the global variable cpuset_being_rebound.
1069	 * It's ok if we rebind the same mm twice; mpol_rebind_mm()
1070	 * is idempotent.  Also migrate pages in each mm to new nodes.
1071	 */
1072	cgroup_scan_tasks(&scan);
1073
1074	/* We're done rebinding vmas to this cpuset's new mems_allowed. */
1075	cpuset_being_rebound = NULL;
1076}
1077
1078/*
1079 * Handle user request to change the 'mems' memory placement
1080 * of a cpuset.  Needs to validate the request, update the
1081 * cpusets mems_allowed, and for each task in the cpuset,
1082 * update mems_allowed and rebind task's mempolicy and any vma
1083 * mempolicies and if the cpuset is marked 'memory_migrate',
1084 * migrate the tasks pages to the new memory.
1085 *
1086 * Call with cgroup_mutex held.  May take callback_mutex during call.
1087 * Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
1088 * lock each such tasks mm->mmap_sem, scan its vma's and rebind
1089 * their mempolicies to the cpusets new mems_allowed.
1090 */
1091static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
1092			   const char *buf)
1093{
1094	NODEMASK_ALLOC(nodemask_t, oldmem, GFP_KERNEL);
1095	int retval;
1096	struct ptr_heap heap;
1097
1098	if (!oldmem)
1099		return -ENOMEM;
1100
1101	/*
1102	 * top_cpuset.mems_allowed tracks node_stats[N_HIGH_MEMORY];
1103	 * it's read-only
1104	 */
1105	if (cs == &top_cpuset) {
1106		retval = -EACCES;
1107		goto done;
1108	}
1109
1110	/*
1111	 * An empty mems_allowed is ok iff there are no tasks in the cpuset.
1112	 * Since nodelist_parse() fails on an empty mask, we special case
1113	 * that parsing.  The validate_change() call ensures that cpusets
1114	 * with tasks have memory.
1115	 */
1116	if (!*buf) {
1117		nodes_clear(trialcs->mems_allowed);
1118	} else {
1119		retval = nodelist_parse(buf, trialcs->mems_allowed);
1120		if (retval < 0)
1121			goto done;
1122
1123		if (!nodes_subset(trialcs->mems_allowed,
1124				node_states[N_HIGH_MEMORY])) {
1125			retval =  -EINVAL;
1126			goto done;
1127		}
1128	}
1129	*oldmem = cs->mems_allowed;
1130	if (nodes_equal(*oldmem, trialcs->mems_allowed)) {
1131		retval = 0;		/* Too easy - nothing to do */
1132		goto done;
1133	}
1134	retval = validate_change(cs, trialcs);
1135	if (retval < 0)
1136		goto done;
1137
1138	retval = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, NULL);
1139	if (retval < 0)
1140		goto done;
1141
1142	mutex_lock(&callback_mutex);
1143	cs->mems_allowed = trialcs->mems_allowed;
1144	mutex_unlock(&callback_mutex);
1145
1146	update_tasks_nodemask(cs, oldmem, &heap);
1147
1148	heap_free(&heap);
1149done:
1150	NODEMASK_FREE(oldmem);
1151	return retval;
1152}
1153
1154int current_cpuset_is_being_rebound(void)
1155{
1156	return task_cs(current) == cpuset_being_rebound;
1157}
1158
1159static int update_relax_domain_level(struct cpuset *cs, s64 val)
1160{
1161#ifdef CONFIG_SMP
1162	if (val < -1 || val >= sched_domain_level_max)
1163		return -EINVAL;
1164#endif
1165
1166	if (val != cs->relax_domain_level) {
1167		cs->relax_domain_level = val;
1168		if (!cpumask_empty(cs->cpus_allowed) &&
1169		    is_sched_load_balance(cs))
1170			async_rebuild_sched_domains();
1171	}
1172
1173	return 0;
1174}
1175
1176/*
1177 * cpuset_change_flag - make a task's spread flags the same as its cpuset's
1178 * @tsk: task to be updated
1179 * @scan: struct cgroup_scanner containing the cgroup of the task
1180 *
1181 * Called by cgroup_scan_tasks() for each task in a cgroup.
1182 *
1183 * We don't need to re-check for the cgroup/cpuset membership, since we're
1184 * holding cgroup_lock() at this point.
1185 */
1186static void cpuset_change_flag(struct task_struct *tsk,
1187				struct cgroup_scanner *scan)
1188{
1189	cpuset_update_task_spread_flag(cgroup_cs(scan->cg), tsk);
1190}
1191
1192/*
1193 * update_tasks_flags - update the spread flags of tasks in the cpuset.
1194 * @cs: the cpuset in which each task's spread flags needs to be changed
1195 * @heap: if NULL, defer allocating heap memory to cgroup_scan_tasks()
1196 *
1197 * Called with cgroup_mutex held
1198 *
1199 * The cgroup_scan_tasks() function will scan all the tasks in a cgroup,
1200 * calling callback functions for each.
1201 *
1202 * No return value. It's guaranteed that cgroup_scan_tasks() always returns 0
1203 * if @heap != NULL.
1204 */
1205static void update_tasks_flags(struct cpuset *cs, struct ptr_heap *heap)
1206{
1207	struct cgroup_scanner scan;
1208
1209	scan.cg = cs->css.cgroup;
1210	scan.test_task = NULL;
1211	scan.process_task = cpuset_change_flag;
1212	scan.heap = heap;
1213	cgroup_scan_tasks(&scan);
1214}
1215
1216/*
1217 * update_flag - read a 0 or a 1 in a file and update associated flag
1218 * bit:		the bit to update (see cpuset_flagbits_t)
1219 * cs:		the cpuset to update
1220 * turning_on: 	whether the flag is being set or cleared
1221 *
1222 * Call with cgroup_mutex held.
1223 */
1224
1225static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
1226		       int turning_on)
1227{
1228	struct cpuset *trialcs;
1229	int balance_flag_changed;
1230	int spread_flag_changed;
1231	struct ptr_heap heap;
1232	int err;
1233
1234	trialcs = alloc_trial_cpuset(cs);
1235	if (!trialcs)
1236		return -ENOMEM;
1237
1238	if (turning_on)
1239		set_bit(bit, &trialcs->flags);
1240	else
1241		clear_bit(bit, &trialcs->flags);
1242
1243	err = validate_change(cs, trialcs);
1244	if (err < 0)
1245		goto out;
1246
1247	err = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, NULL);
1248	if (err < 0)
1249		goto out;
1250
1251	balance_flag_changed = (is_sched_load_balance(cs) !=
1252				is_sched_load_balance(trialcs));
1253
1254	spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs))
1255			|| (is_spread_page(cs) != is_spread_page(trialcs)));
1256
1257	mutex_lock(&callback_mutex);
1258	cs->flags = trialcs->flags;
1259	mutex_unlock(&callback_mutex);
1260
1261	if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed)
1262		async_rebuild_sched_domains();
1263
1264	if (spread_flag_changed)
1265		update_tasks_flags(cs, &heap);
1266	heap_free(&heap);
1267out:
1268	free_trial_cpuset(trialcs);
1269	return err;
1270}
1271
1272/*
1273 * Frequency meter - How fast is some event occurring?
1274 *
1275 * These routines manage a digitally filtered, constant time based,
1276 * event frequency meter.  There are four routines:
1277 *   fmeter_init() - initialize a frequency meter.
1278 *   fmeter_markevent() - called each time the event happens.
1279 *   fmeter_getrate() - returns the recent rate of such events.
1280 *   fmeter_update() - internal routine used to update fmeter.
1281 *
1282 * A common data structure is passed to each of these routines,
1283 * which is used to keep track of the state required to manage the
1284 * frequency meter and its digital filter.
1285 *
1286 * The filter works on the number of events marked per unit time.
1287 * The filter is single-pole low-pass recursive (IIR).  The time unit
1288 * is 1 second.  Arithmetic is done using 32-bit integers scaled to
1289 * simulate 3 decimal digits of precision (multiplied by 1000).
1290 *
1291 * With an FM_COEF of 933, and a time base of 1 second, the filter
1292 * has a half-life of 10 seconds, meaning that if the events quit
1293 * happening, then the rate returned from the fmeter_getrate()
1294 * will be cut in half each 10 seconds, until it converges to zero.
1295 *
1296 * It is not worth doing a real infinitely recursive filter.  If more
1297 * than FM_MAXTICKS ticks have elapsed since the last filter event,
1298 * just compute FM_MAXTICKS ticks worth, by which point the level
1299 * will be stable.
1300 *
1301 * Limit the count of unprocessed events to FM_MAXCNT, so as to avoid
1302 * arithmetic overflow in the fmeter_update() routine.
1303 *
1304 * Given the simple 32 bit integer arithmetic used, this meter works
1305 * best for reporting rates between one per millisecond (msec) and
1306 * one per 32 (approx) seconds.  At constant rates faster than one
1307 * per msec it maxes out at values just under 1,000,000.  At constant
1308 * rates between one per msec, and one per second it will stabilize
1309 * to a value N*1000, where N is the rate of events per second.
1310 * At constant rates between one per second and one per 32 seconds,
1311 * it will be choppy, moving up on the seconds that have an event,
1312 * and then decaying until the next event.  At rates slower than
1313 * about one in 32 seconds, it decays all the way back to zero between
1314 * each event.
1315 */
1316
1317#define FM_COEF 933		/* coefficient for half-life of 10 secs */
1318#define FM_MAXTICKS ((time_t)99) /* useless computing more ticks than this */
1319#define FM_MAXCNT 1000000	/* limit cnt to avoid overflow */
1320#define FM_SCALE 1000		/* faux fixed point scale */
1321
1322/* Initialize a frequency meter */
1323static void fmeter_init(struct fmeter *fmp)
1324{
1325	fmp->cnt = 0;
1326	fmp->val = 0;
1327	fmp->time = 0;
1328	spin_lock_init(&fmp->lock);
1329}
1330
1331/* Internal meter update - process cnt events and update value */
1332static void fmeter_update(struct fmeter *fmp)
1333{
1334	time_t now = get_seconds();
1335	time_t ticks = now - fmp->time;
1336
1337	if (ticks == 0)
1338		return;
1339
1340	ticks = min(FM_MAXTICKS, ticks);
1341	while (ticks-- > 0)
1342		fmp->val = (FM_COEF * fmp->val) / FM_SCALE;
1343	fmp->time = now;
1344
1345	fmp->val += ((FM_SCALE - FM_COEF) * fmp->cnt) / FM_SCALE;
1346	fmp->cnt = 0;
1347}
1348
1349/* Process any previous ticks, then bump cnt by one (times scale). */
1350static void fmeter_markevent(struct fmeter *fmp)
1351{
1352	spin_lock(&fmp->lock);
1353	fmeter_update(fmp);
1354	fmp->cnt = min(FM_MAXCNT, fmp->cnt + FM_SCALE);
1355	spin_unlock(&fmp->lock);
1356}
1357
1358/* Process any previous ticks, then return current value. */
1359static int fmeter_getrate(struct fmeter *fmp)
1360{
1361	int val;
1362
1363	spin_lock(&fmp->lock);
1364	fmeter_update(fmp);
1365	val = fmp->val;
1366	spin_unlock(&fmp->lock);
1367	return val;
1368}
1369
1370/* Called by cgroups to determine if a cpuset is usable; cgroup_mutex held */
1371static int cpuset_can_attach(struct cgroup_subsys *ss, struct cgroup *cont,
1372			     struct task_struct *tsk)
1373{
1374	struct cpuset *cs = cgroup_cs(cont);
1375
1376	if (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))
1377		return -ENOSPC;
1378
1379	/*
1380	 * Kthreads bound to specific cpus cannot be moved to a new cpuset; we
1381	 * cannot change their cpu affinity and isolating such threads by their
1382	 * set of allowed nodes is unnecessary.  Thus, cpusets are not
1383	 * applicable for such threads.  This prevents checking for success of
1384	 * set_cpus_allowed_ptr() on all attached tasks before cpus_allowed may
1385	 * be changed.
1386	 */
1387	if (tsk->flags & PF_THREAD_BOUND)
1388		return -EINVAL;
1389
1390	return 0;
1391}
1392
1393static int cpuset_can_attach_task(struct cgroup *cgrp, struct task_struct *task)
1394{
1395	return security_task_setscheduler(task);
1396}
1397
1398/*
1399 * Protected by cgroup_lock. The nodemasks must be stored globally because
1400 * dynamically allocating them is not allowed in pre_attach, and they must
1401 * persist among pre_attach, attach_task, and attach.
1402 */
1403static cpumask_var_t cpus_attach;
1404static nodemask_t cpuset_attach_nodemask_from;
1405static nodemask_t cpuset_attach_nodemask_to;
1406
1407/* Set-up work for before attaching each task. */
1408static void cpuset_pre_attach(struct cgroup *cont)
1409{
1410	struct cpuset *cs = cgroup_cs(cont);
1411
1412	if (cs == &top_cpuset)
1413		cpumask_copy(cpus_attach, cpu_possible_mask);
1414	else
1415		guarantee_online_cpus(cs, cpus_attach);
1416
1417	guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
1418}
1419
1420/* Per-thread attachment work. */
1421static void cpuset_attach_task(struct cgroup *cont, struct task_struct *tsk)
1422{
1423	int err;
1424	struct cpuset *cs = cgroup_cs(cont);
1425
1426	/*
1427	 * can_attach beforehand should guarantee that this doesn't fail.
1428	 * TODO: have a better way to handle failure here
1429	 */
1430	err = set_cpus_allowed_ptr(tsk, cpus_attach);
1431	WARN_ON_ONCE(err);
1432
1433	cpuset_change_task_nodemask(tsk, &cpuset_attach_nodemask_to);
1434	cpuset_update_task_spread_flag(cs, tsk);
1435}
1436
1437static void cpuset_attach(struct cgroup_subsys *ss, struct cgroup *cont,
1438			  struct cgroup *oldcont, struct task_struct *tsk)
1439{
1440	struct mm_struct *mm;
1441	struct cpuset *cs = cgroup_cs(cont);
1442	struct cpuset *oldcs = cgroup_cs(oldcont);
1443
1444	/*
1445	 * Change mm, possibly for multiple threads in a threadgroup. This is
1446	 * expensive and may sleep.
1447	 */
1448	cpuset_attach_nodemask_from = oldcs->mems_allowed;
1449	cpuset_attach_nodemask_to = cs->mems_allowed;
1450	mm = get_task_mm(tsk);
1451	if (mm) {
1452		mpol_rebind_mm(mm, &cpuset_attach_nodemask_to);
1453		if (is_memory_migrate(cs))
1454			cpuset_migrate_mm(mm, &cpuset_attach_nodemask_from,
1455					  &cpuset_attach_nodemask_to);
1456		mmput(mm);
1457	}
1458}
1459
1460/* The various types of files and directories in a cpuset file system */
1461
1462typedef enum {
1463	FILE_MEMORY_MIGRATE,
1464	FILE_CPULIST,
1465	FILE_MEMLIST,
1466	FILE_CPU_EXCLUSIVE,
1467	FILE_MEM_EXCLUSIVE,
1468	FILE_MEM_HARDWALL,
1469	FILE_SCHED_LOAD_BALANCE,
1470	FILE_SCHED_RELAX_DOMAIN_LEVEL,
1471	FILE_MEMORY_PRESSURE_ENABLED,
1472	FILE_MEMORY_PRESSURE,
1473	FILE_SPREAD_PAGE,
1474	FILE_SPREAD_SLAB,
1475} cpuset_filetype_t;
1476
1477static int cpuset_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val)
1478{
1479	int retval = 0;
1480	struct cpuset *cs = cgroup_cs(cgrp);
1481	cpuset_filetype_t type = cft->private;
1482
1483	if (!cgroup_lock_live_group(cgrp))
1484		return -ENODEV;
1485
1486	switch (type) {
1487	case FILE_CPU_EXCLUSIVE:
1488		retval = update_flag(CS_CPU_EXCLUSIVE, cs, val);
1489		break;
1490	case FILE_MEM_EXCLUSIVE:
1491		retval = update_flag(CS_MEM_EXCLUSIVE, cs, val);
1492		break;
1493	case FILE_MEM_HARDWALL:
1494		retval = update_flag(CS_MEM_HARDWALL, cs, val);
1495		break;
1496	case FILE_SCHED_LOAD_BALANCE:
1497		retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, val);
1498		break;
1499	case FILE_MEMORY_MIGRATE:
1500		retval = update_flag(CS_MEMORY_MIGRATE, cs, val);
1501		break;
1502	case FILE_MEMORY_PRESSURE_ENABLED:
1503		cpuset_memory_pressure_enabled = !!val;
1504		break;
1505	case FILE_MEMORY_PRESSURE:
1506		retval = -EACCES;
1507		break;
1508	case FILE_SPREAD_PAGE:
1509		retval = update_flag(CS_SPREAD_PAGE, cs, val);
1510		break;
1511	case FILE_SPREAD_SLAB:
1512		retval = update_flag(CS_SPREAD_SLAB, cs, val);
1513		break;
1514	default:
1515		retval = -EINVAL;
1516		break;
1517	}
1518	cgroup_unlock();
1519	return retval;
1520}
1521
1522static int cpuset_write_s64(struct cgroup *cgrp, struct cftype *cft, s64 val)
1523{
1524	int retval = 0;
1525	struct cpuset *cs = cgroup_cs(cgrp);
1526	cpuset_filetype_t type = cft->private;
1527
1528	if (!cgroup_lock_live_group(cgrp))
1529		return -ENODEV;
1530
1531	switch (type) {
1532	case FILE_SCHED_RELAX_DOMAIN_LEVEL:
1533		retval = update_relax_domain_level(cs, val);
1534		break;
1535	default:
1536		retval = -EINVAL;
1537		break;
1538	}
1539	cgroup_unlock();
1540	return retval;
1541}
1542
1543/*
1544 * Common handling for a write to a "cpus" or "mems" file.
1545 */
1546static int cpuset_write_resmask(struct cgroup *cgrp, struct cftype *cft,
1547				const char *buf)
1548{
1549	int retval = 0;
1550	struct cpuset *cs = cgroup_cs(cgrp);
1551	struct cpuset *trialcs;
1552
1553	if (!cgroup_lock_live_group(cgrp))
1554		return -ENODEV;
1555
1556	trialcs = alloc_trial_cpuset(cs);
1557	if (!trialcs) {
1558		retval = -ENOMEM;
1559		goto out;
1560	}
1561
1562	switch (cft->private) {
1563	case FILE_CPULIST:
1564		retval = update_cpumask(cs, trialcs, buf);
1565		break;
1566	case FILE_MEMLIST:
1567		retval = update_nodemask(cs, trialcs, buf);
1568		break;
1569	default:
1570		retval = -EINVAL;
1571		break;
1572	}
1573
1574	free_trial_cpuset(trialcs);
1575out:
1576	cgroup_unlock();
1577	return retval;
1578}
1579
1580/*
1581 * These ascii lists should be read in a single call, by using a user
1582 * buffer large enough to hold the entire map.  If read in smaller
1583 * chunks, there is no guarantee of atomicity.  Since the display format
1584 * used, list of ranges of sequential numbers, is variable length,
1585 * and since these maps can change value dynamically, one could read
1586 * gibberish by doing partial reads while a list was changing.
1587 * A single large read to a buffer that crosses a page boundary is
1588 * ok, because the result being copied to user land is not recomputed
1589 * across a page fault.
1590 */
1591
1592static size_t cpuset_sprintf_cpulist(char *page, struct cpuset *cs)
1593{
1594	size_t count;
1595
1596	mutex_lock(&callback_mutex);
1597	count = cpulist_scnprintf(page, PAGE_SIZE, cs->cpus_allowed);
1598	mutex_unlock(&callback_mutex);
1599
1600	return count;
1601}
1602
1603static size_t cpuset_sprintf_memlist(char *page, struct cpuset *cs)
1604{
1605	size_t count;
1606
1607	mutex_lock(&callback_mutex);
1608	count = nodelist_scnprintf(page, PAGE_SIZE, cs->mems_allowed);
1609	mutex_unlock(&callback_mutex);
1610
1611	return count;
1612}
1613
1614static ssize_t cpuset_common_file_read(struct cgroup *cont,
1615				       struct cftype *cft,
1616				       struct file *file,
1617				       char __user *buf,
1618				       size_t nbytes, loff_t *ppos)
1619{
1620	struct cpuset *cs = cgroup_cs(cont);
1621	cpuset_filetype_t type = cft->private;
1622	char *page;
1623	ssize_t retval = 0;
1624	char *s;
1625
1626	if (!(page = (char *)__get_free_page(GFP_TEMPORARY)))
1627		return -ENOMEM;
1628
1629	s = page;
1630
1631	switch (type) {
1632	case FILE_CPULIST:
1633		s += cpuset_sprintf_cpulist(s, cs);
1634		break;
1635	case FILE_MEMLIST:
1636		s += cpuset_sprintf_memlist(s, cs);
1637		break;
1638	default:
1639		retval = -EINVAL;
1640		goto out;
1641	}
1642	*s++ = '\n';
1643
1644	retval = simple_read_from_buffer(buf, nbytes, ppos, page, s - page);
1645out:
1646	free_page((unsigned long)page);
1647	return retval;
1648}
1649
1650static u64 cpuset_read_u64(struct cgroup *cont, struct cftype *cft)
1651{
1652	struct cpuset *cs = cgroup_cs(cont);
1653	cpuset_filetype_t type = cft->private;
1654	switch (type) {
1655	case FILE_CPU_EXCLUSIVE:
1656		return is_cpu_exclusive(cs);
1657	case FILE_MEM_EXCLUSIVE:
1658		return is_mem_exclusive(cs);
1659	case FILE_MEM_HARDWALL:
1660		return is_mem_hardwall(cs);
1661	case FILE_SCHED_LOAD_BALANCE:
1662		return is_sched_load_balance(cs);
1663	case FILE_MEMORY_MIGRATE:
1664		return is_memory_migrate(cs);
1665	case FILE_MEMORY_PRESSURE_ENABLED:
1666		return cpuset_memory_pressure_enabled;
1667	case FILE_MEMORY_PRESSURE:
1668		return fmeter_getrate(&cs->fmeter);
1669	case FILE_SPREAD_PAGE:
1670		return is_spread_page(cs);
1671	case FILE_SPREAD_SLAB:
1672		return is_spread_slab(cs);
1673	default:
1674		BUG();
1675	}
1676
1677	/* Unreachable but makes gcc happy */
1678	return 0;
1679}
1680
1681static s64 cpuset_read_s64(struct cgroup *cont, struct cftype *cft)
1682{
1683	struct cpuset *cs = cgroup_cs(cont);
1684	cpuset_filetype_t type = cft->private;
1685	switch (type) {
1686	case FILE_SCHED_RELAX_DOMAIN_LEVEL:
1687		return cs->relax_domain_level;
1688	default:
1689		BUG();
1690	}
1691
1692	/* Unrechable but makes gcc happy */
1693	return 0;
1694}
1695
1696
1697/*
1698 * for the common functions, 'private' gives the type of file
1699 */
1700
1701static struct cftype files[] = {
1702	{
1703		.name = "cpus",
1704		.read = cpuset_common_file_read,
1705		.write_string = cpuset_write_resmask,
1706		.max_write_len = (100U + 6 * NR_CPUS),
1707		.private = FILE_CPULIST,
1708	},
1709
1710	{
1711		.name = "mems",
1712		.read = cpuset_common_file_read,
1713		.write_string = cpuset_write_resmask,
1714		.max_write_len = (100U + 6 * MAX_NUMNODES),
1715		.private = FILE_MEMLIST,
1716	},
1717
1718	{
1719		.name = "cpu_exclusive",
1720		.read_u64 = cpuset_read_u64,
1721		.write_u64 = cpuset_write_u64,
1722		.private = FILE_CPU_EXCLUSIVE,
1723	},
1724
1725	{
1726		.name = "mem_exclusive",
1727		.read_u64 = cpuset_read_u64,
1728		.write_u64 = cpuset_write_u64,
1729		.private = FILE_MEM_EXCLUSIVE,
1730	},
1731
1732	{
1733		.name = "mem_hardwall",
1734		.read_u64 = cpuset_read_u64,
1735		.write_u64 = cpuset_write_u64,
1736		.private = FILE_MEM_HARDWALL,
1737	},
1738
1739	{
1740		.name = "sched_load_balance",
1741		.read_u64 = cpuset_read_u64,
1742		.write_u64 = cpuset_write_u64,
1743		.private = FILE_SCHED_LOAD_BALANCE,
1744	},
1745
1746	{
1747		.name = "sched_relax_domain_level",
1748		.read_s64 = cpuset_read_s64,
1749		.write_s64 = cpuset_write_s64,
1750		.private = FILE_SCHED_RELAX_DOMAIN_LEVEL,
1751	},
1752
1753	{
1754		.name = "memory_migrate",
1755		.read_u64 = cpuset_read_u64,
1756		.write_u64 = cpuset_write_u64,
1757		.private = FILE_MEMORY_MIGRATE,
1758	},
1759
1760	{
1761		.name = "memory_pressure",
1762		.read_u64 = cpuset_read_u64,
1763		.write_u64 = cpuset_write_u64,
1764		.private = FILE_MEMORY_PRESSURE,
1765		.mode = S_IRUGO,
1766	},
1767
1768	{
1769		.name = "memory_spread_page",
1770		.read_u64 = cpuset_read_u64,
1771		.write_u64 = cpuset_write_u64,
1772		.private = FILE_SPREAD_PAGE,
1773	},
1774
1775	{
1776		.name = "memory_spread_slab",
1777		.read_u64 = cpuset_read_u64,
1778		.write_u64 = cpuset_write_u64,
1779		.private = FILE_SPREAD_SLAB,
1780	},
1781};
1782
1783static struct cftype cft_memory_pressure_enabled = {
1784	.name = "memory_pressure_enabled",
1785	.read_u64 = cpuset_read_u64,
1786	.write_u64 = cpuset_write_u64,
1787	.private = FILE_MEMORY_PRESSURE_ENABLED,
1788};
1789
1790static int cpuset_populate(struct cgroup_subsys *ss, struct cgroup *cont)
1791{
1792	int err;
1793
1794	err = cgroup_add_files(cont, ss, files, ARRAY_SIZE(files));
1795	if (err)
1796		return err;
1797	/* memory_pressure_enabled is in root cpuset only */
1798	if (!cont->parent)
1799		err = cgroup_add_file(cont, ss,
1800				      &cft_memory_pressure_enabled);
1801	return err;
1802}
1803
1804/*
1805 * post_clone() is called during cgroup_create() when the
1806 * clone_children mount argument was specified.  The cgroup
1807 * can not yet have any tasks.
1808 *
1809 * Currently we refuse to set up the cgroup - thereby
1810 * refusing the task to be entered, and as a result refusing
1811 * the sys_unshare() or clone() which initiated it - if any
1812 * sibling cpusets have exclusive cpus or mem.
1813 *
1814 * If this becomes a problem for some users who wish to
1815 * allow that scenario, then cpuset_post_clone() could be
1816 * changed to grant parent->cpus_allowed-sibling_cpus_exclusive
1817 * (and likewise for mems) to the new cgroup. Called with cgroup_mutex
1818 * held.
1819 */
1820static void cpuset_post_clone(struct cgroup_subsys *ss,
1821			      struct cgroup *cgroup)
1822{
1823	struct cgroup *parent, *child;
1824	struct cpuset *cs, *parent_cs;
1825
1826	parent = cgroup->parent;
1827	list_for_each_entry(child, &parent->children, sibling) {
1828		cs = cgroup_cs(child);
1829		if (is_mem_exclusive(cs) || is_cpu_exclusive(cs))
1830			return;
1831	}
1832	cs = cgroup_cs(cgroup);
1833	parent_cs = cgroup_cs(parent);
1834
1835	mutex_lock(&callback_mutex);
1836	cs->mems_allowed = parent_cs->mems_allowed;
1837	cpumask_copy(cs->cpus_allowed, parent_cs->cpus_allowed);
1838	mutex_unlock(&callback_mutex);
1839	return;
1840}
1841
1842/*
1843 *	cpuset_create - create a cpuset
1844 *	ss:	cpuset cgroup subsystem
1845 *	cont:	control group that the new cpuset will be part of
1846 */
1847
1848static struct cgroup_subsys_state *cpuset_create(
1849	struct cgroup_subsys *ss,
1850	struct cgroup *cont)
1851{
1852	struct cpuset *cs;
1853	struct cpuset *parent;
1854
1855	if (!cont->parent) {
1856		return &top_cpuset.css;
1857	}
1858	parent = cgroup_cs(cont->parent);
1859	cs = kmalloc(sizeof(*cs), GFP_KERNEL);
1860	if (!cs)
1861		return ERR_PTR(-ENOMEM);
1862	if (!alloc_cpumask_var(&cs->cpus_allowed, GFP_KERNEL)) {
1863		kfree(cs);
1864		return ERR_PTR(-ENOMEM);
1865	}
1866
1867	cs->flags = 0;
1868	if (is_spread_page(parent))
1869		set_bit(CS_SPREAD_PAGE, &cs->flags);
1870	if (is_spread_slab(parent))
1871		set_bit(CS_SPREAD_SLAB, &cs->flags);
1872	set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
1873	cpumask_clear(cs->cpus_allowed);
1874	nodes_clear(cs->mems_allowed);
1875	fmeter_init(&cs->fmeter);
1876	cs->relax_domain_level = -1;
1877
1878	cs->parent = parent;
1879	number_of_cpusets++;
1880	return &cs->css ;
1881}
1882
1883/*
1884 * If the cpuset being removed has its flag 'sched_load_balance'
1885 * enabled, then simulate turning sched_load_balance off, which
1886 * will call async_rebuild_sched_domains().
1887 */
1888
1889static void cpuset_destroy(struct cgroup_subsys *ss, struct cgroup *cont)
1890{
1891	struct cpuset *cs = cgroup_cs(cont);
1892
1893	if (is_sched_load_balance(cs))
1894		update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
1895
1896	number_of_cpusets--;
1897	free_cpumask_var(cs->cpus_allowed);
1898	kfree(cs);
1899}
1900
1901struct cgroup_subsys cpuset_subsys = {
1902	.name = "cpuset",
1903	.create = cpuset_create,
1904	.destroy = cpuset_destroy,
1905	.can_attach = cpuset_can_attach,
1906	.can_attach_task = cpuset_can_attach_task,
1907	.pre_attach = cpuset_pre_attach,
1908	.attach_task = cpuset_attach_task,
1909	.attach = cpuset_attach,
1910	.populate = cpuset_populate,
1911	.post_clone = cpuset_post_clone,
1912	.subsys_id = cpuset_subsys_id,
1913	.early_init = 1,
1914};
1915
1916/**
1917 * cpuset_init - initialize cpusets at system boot
1918 *
1919 * Description: Initialize top_cpuset and the cpuset internal file system,
1920 **/
1921
1922int __init cpuset_init(void)
1923{
1924	int err = 0;
1925
1926	if (!alloc_cpumask_var(&top_cpuset.cpus_allowed, GFP_KERNEL))
1927		BUG();
1928
1929	cpumask_setall(top_cpuset.cpus_allowed);
1930	nodes_setall(top_cpuset.mems_allowed);
1931
1932	fmeter_init(&top_cpuset.fmeter);
1933	set_bit(CS_SCHED_LOAD_BALANCE, &top_cpuset.flags);
1934	top_cpuset.relax_domain_level = -1;
1935
1936	err = register_filesystem(&cpuset_fs_type);
1937	if (err < 0)
1938		return err;
1939
1940	if (!alloc_cpumask_var(&cpus_attach, GFP_KERNEL))
1941		BUG();
1942
1943	number_of_cpusets = 1;
1944	return 0;
1945}
1946
1947/**
1948 * cpuset_do_move_task - move a given task to another cpuset
1949 * @tsk: pointer to task_struct the task to move
1950 * @scan: struct cgroup_scanner contained in its struct cpuset_hotplug_scanner
1951 *
1952 * Called by cgroup_scan_tasks() for each task in a cgroup.
1953 * Return nonzero to stop the walk through the tasks.
1954 */
1955static void cpuset_do_move_task(struct task_struct *tsk,
1956				struct cgroup_scanner *scan)
1957{
1958	struct cgroup *new_cgroup = scan->data;
1959
1960	cgroup_attach_task(new_cgroup, tsk);
1961}
1962
1963/**
1964 * move_member_tasks_to_cpuset - move tasks from one cpuset to another
1965 * @from: cpuset in which the tasks currently reside
1966 * @to: cpuset to which the tasks will be moved
1967 *
1968 * Called with cgroup_mutex held
1969 * callback_mutex must not be held, as cpuset_attach() will take it.
1970 *
1971 * The cgroup_scan_tasks() function will scan all the tasks in a cgroup,
1972 * calling callback functions for each.
1973 */
1974static void move_member_tasks_to_cpuset(struct cpuset *from, struct cpuset *to)
1975{
1976	struct cgroup_scanner scan;
1977
1978	scan.cg = from->css.cgroup;
1979	scan.test_task = NULL; /* select all tasks in cgroup */
1980	scan.process_task = cpuset_do_move_task;
1981	scan.heap = NULL;
1982	scan.data = to->css.cgroup;
1983
1984	if (cgroup_scan_tasks(&scan))
1985		printk(KERN_ERR "move_member_tasks_to_cpuset: "
1986				"cgroup_scan_tasks failed\n");
1987}
1988
1989/*
1990 * If CPU and/or memory hotplug handlers, below, unplug any CPUs
1991 * or memory nodes, we need to walk over the cpuset hierarchy,
1992 * removing that CPU or node from all cpusets.  If this removes the
1993 * last CPU or node from a cpuset, then move the tasks in the empty
1994 * cpuset to its next-highest non-empty parent.
1995 *
1996 * Called with cgroup_mutex held
1997 * callback_mutex must not be held, as cpuset_attach() will take it.
1998 */
1999static void remove_tasks_in_empty_cpuset(struct cpuset *cs)
2000{
2001	struct cpuset *parent;
2002
2003	/*
2004	 * The cgroup's css_sets list is in use if there are tasks
2005	 * in the cpuset; the list is empty if there are none;
2006	 * the cs->css.refcnt seems always 0.
2007	 */
2008	if (list_empty(&cs->css.cgroup->css_sets))
2009		return;
2010
2011	/*
2012	 * Find its next-highest non-empty parent, (top cpuset
2013	 * has online cpus, so can't be empty).
2014	 */
2015	parent = cs->parent;
2016	while (cpumask_empty(parent->cpus_allowed) ||
2017			nodes_empty(parent->mems_allowed))
2018		parent = parent->parent;
2019
2020	move_member_tasks_to_cpuset(cs, parent);
2021}
2022
2023/*
2024 * Walk the specified cpuset subtree and look for empty cpusets.
2025 * The tasks of such cpuset must be moved to a parent cpuset.
2026 *
2027 * Called with cgroup_mutex held.  We take callback_mutex to modify
2028 * cpus_allowed and mems_allowed.
2029 *
2030 * This walk processes the tree from top to bottom, completing one layer
2031 * before dropping down to the next.  It always processes a node before
2032 * any of its children.
2033 *
2034 * For now, since we lack memory hot unplug, we'll never see a cpuset
2035 * that has tasks along with an empty 'mems'.  But if we did see such
2036 * a cpuset, we'd handle it just like we do if its 'cpus' was empty.
2037 */
2038static void scan_for_empty_cpusets(struct cpuset *root)
2039{
2040	LIST_HEAD(queue);
2041	struct cpuset *cp;	/* scans cpusets being updated */
2042	struct cpuset *child;	/* scans child cpusets of cp */
2043	struct cgroup *cont;
2044	static nodemask_t oldmems;	/* protected by cgroup_mutex */
2045
2046	list_add_tail((struct list_head *)&root->stack_list, &queue);
2047
2048	while (!list_empty(&queue)) {
2049		cp = list_first_entry(&queue, struct cpuset, stack_list);
2050		list_del(queue.next);
2051		list_for_each_entry(cont, &cp->css.cgroup->children, sibling) {
2052			child = cgroup_cs(cont);
2053			list_add_tail(&child->stack_list, &queue);
2054		}
2055
2056		/* Continue past cpusets with all cpus, mems online */
2057		if (cpumask_subset(cp->cpus_allowed, cpu_active_mask) &&
2058		    nodes_subset(cp->mems_allowed, node_states[N_HIGH_MEMORY]))
2059			continue;
2060
2061		oldmems = cp->mems_allowed;
2062
2063		/* Remove offline cpus and mems from this cpuset. */
2064		mutex_lock(&callback_mutex);
2065		cpumask_and(cp->cpus_allowed, cp->cpus_allowed,
2066			    cpu_active_mask);
2067		nodes_and(cp->mems_allowed, cp->mems_allowed,
2068						node_states[N_HIGH_MEMORY]);
2069		mutex_unlock(&callback_mutex);
2070
2071		/* Move tasks from the empty cpuset to a parent */
2072		if (cpumask_empty(cp->cpus_allowed) ||
2073		     nodes_empty(cp->mems_allowed))
2074			remove_tasks_in_empty_cpuset(cp);
2075		else {
2076			update_tasks_cpumask(cp, NULL);
2077			update_tasks_nodemask(cp, &oldmems, NULL);
2078		}
2079	}
2080}
2081
2082/*
2083 * The top_cpuset tracks what CPUs and Memory Nodes are online,
2084 * period.  This is necessary in order to make cpusets transparent
2085 * (of no affect) on systems that are actively using CPU hotplug
2086 * but making no active use of cpusets.
2087 *
2088 * This routine ensures that top_cpuset.cpus_allowed tracks
2089 * cpu_active_mask on each CPU hotplug (cpuhp) event.
2090 *
2091 * Called within get_online_cpus().  Needs to call cgroup_lock()
2092 * before calling generate_sched_domains().
2093 */
2094void cpuset_update_active_cpus(void)
2095{
2096	struct sched_domain_attr *attr;
2097	cpumask_var_t *doms;
2098	int ndoms;
2099
2100	cgroup_lock();
2101	mutex_lock(&callback_mutex);
2102	cpumask_copy(top_cpuset.cpus_allowed, cpu_active_mask);
2103	mutex_unlock(&callback_mutex);
2104	scan_for_empty_cpusets(&top_cpuset);
2105	ndoms = generate_sched_domains(&doms, &attr);
2106	cgroup_unlock();
2107
2108	/* Have scheduler rebuild the domains */
2109	partition_sched_domains(ndoms, doms, attr);
2110}
2111
2112#ifdef CONFIG_MEMORY_HOTPLUG
2113/*
2114 * Keep top_cpuset.mems_allowed tracking node_states[N_HIGH_MEMORY].
2115 * Call this routine anytime after node_states[N_HIGH_MEMORY] changes.
2116 * See also the previous routine cpuset_track_online_cpus().
2117 */
2118static int cpuset_track_online_nodes(struct notifier_block *self,
2119				unsigned long action, void *arg)
2120{
2121	static nodemask_t oldmems;	/* protected by cgroup_mutex */
2122
2123	cgroup_lock();
2124	switch (action) {
2125	case MEM_ONLINE:
2126		oldmems = top_cpuset.mems_allowed;
2127		mutex_lock(&callback_mutex);
2128		top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY];
2129		mutex_unlock(&callback_mutex);
2130		update_tasks_nodemask(&top_cpuset, &oldmems, NULL);
2131		break;
2132	case MEM_OFFLINE:
2133		/*
2134		 * needn't update top_cpuset.mems_allowed explicitly because
2135		 * scan_for_empty_cpusets() will update it.
2136		 */
2137		scan_for_empty_cpusets(&top_cpuset);
2138		break;
2139	default:
2140		break;
2141	}
2142	cgroup_unlock();
2143
2144	return NOTIFY_OK;
2145}
2146#endif
2147
2148/**
2149 * cpuset_init_smp - initialize cpus_allowed
2150 *
2151 * Description: Finish top cpuset after cpu, node maps are initialized
2152 **/
2153
2154void __init cpuset_init_smp(void)
2155{
2156	cpumask_copy(top_cpuset.cpus_allowed, cpu_active_mask);
2157	top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY];
2158
2159	hotplug_memory_notifier(cpuset_track_online_nodes, 10);
2160
2161	cpuset_wq = create_singlethread_workqueue("cpuset");
2162	BUG_ON(!cpuset_wq);
2163}
2164
2165/**
2166 * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset.
2167 * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
2168 * @pmask: pointer to struct cpumask variable to receive cpus_allowed set.
2169 *
2170 * Description: Returns the cpumask_var_t cpus_allowed of the cpuset
2171 * attached to the specified @tsk.  Guaranteed to return some non-empty
2172 * subset of cpu_online_map, even if this means going outside the
2173 * tasks cpuset.
2174 **/
2175
2176void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
2177{
2178	mutex_lock(&callback_mutex);
2179	task_lock(tsk);
2180	guarantee_online_cpus(task_cs(tsk), pmask);
2181	task_unlock(tsk);
2182	mutex_unlock(&callback_mutex);
2183}
2184
2185int cpuset_cpus_allowed_fallback(struct task_struct *tsk)
2186{
2187	const struct cpuset *cs;
2188	int cpu;
2189
2190	rcu_read_lock();
2191	cs = task_cs(tsk);
2192	if (cs)
2193		do_set_cpus_allowed(tsk, cs->cpus_allowed);
2194	rcu_read_unlock();
2195
2196	/*
2197	 * We own tsk->cpus_allowed, nobody can change it under us.
2198	 *
2199	 * But we used cs && cs->cpus_allowed lockless and thus can
2200	 * race with cgroup_attach_task() or update_cpumask() and get
2201	 * the wrong tsk->cpus_allowed. However, both cases imply the
2202	 * subsequent cpuset_change_cpumask()->set_cpus_allowed_ptr()
2203	 * which takes task_rq_lock().
2204	 *
2205	 * If we are called after it dropped the lock we must see all
2206	 * changes in tsk_cs()->cpus_allowed. Otherwise we can temporary
2207	 * set any mask even if it is not right from task_cs() pov,
2208	 * the pending set_cpus_allowed_ptr() will fix things.
2209	 */
2210
2211	cpu = cpumask_any_and(&tsk->cpus_allowed, cpu_active_mask);
2212	if (cpu >= nr_cpu_ids) {
2213		/*
2214		 * Either tsk->cpus_allowed is wrong (see above) or it
2215		 * is actually empty. The latter case is only possible
2216		 * if we are racing with remove_tasks_in_empty_cpuset().
2217		 * Like above we can temporary set any mask and rely on
2218		 * set_cpus_allowed_ptr() as synchronization point.
2219		 */
2220		do_set_cpus_allowed(tsk, cpu_possible_mask);
2221		cpu = cpumask_any(cpu_active_mask);
2222	}
2223
2224	return cpu;
2225}
2226
2227void cpuset_init_current_mems_allowed(void)
2228{
2229	nodes_setall(current->mems_allowed);
2230}
2231
2232/**
2233 * cpuset_mems_allowed - return mems_allowed mask from a tasks cpuset.
2234 * @tsk: pointer to task_struct from which to obtain cpuset->mems_allowed.
2235 *
2236 * Description: Returns the nodemask_t mems_allowed of the cpuset
2237 * attached to the specified @tsk.  Guaranteed to return some non-empty
2238 * subset of node_states[N_HIGH_MEMORY], even if this means going outside the
2239 * tasks cpuset.
2240 **/
2241
2242nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
2243{
2244	nodemask_t mask;
2245
2246	mutex_lock(&callback_mutex);
2247	task_lock(tsk);
2248	guarantee_online_mems(task_cs(tsk), &mask);
2249	task_unlock(tsk);
2250	mutex_unlock(&callback_mutex);
2251
2252	return mask;
2253}
2254
2255/**
2256 * cpuset_nodemask_valid_mems_allowed - check nodemask vs. curremt mems_allowed
2257 * @nodemask: the nodemask to be checked
2258 *
2259 * Are any of the nodes in the nodemask allowed in current->mems_allowed?
2260 */
2261int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
2262{
2263	return nodes_intersects(*nodemask, current->mems_allowed);
2264}
2265
2266/*
2267 * nearest_hardwall_ancestor() - Returns the nearest mem_exclusive or
2268 * mem_hardwall ancestor to the specified cpuset.  Call holding
2269 * callback_mutex.  If no ancestor is mem_exclusive or mem_hardwall
2270 * (an unusual configuration), then returns the root cpuset.
2271 */
2272static const struct cpuset *nearest_hardwall_ancestor(const struct cpuset *cs)
2273{
2274	while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && cs->parent)
2275		cs = cs->parent;
2276	return cs;
2277}
2278
2279/**
2280 * cpuset_node_allowed_softwall - Can we allocate on a memory node?
2281 * @node: is this an allowed node?
2282 * @gfp_mask: memory allocation flags
2283 *
2284 * If we're in interrupt, yes, we can always allocate.  If __GFP_THISNODE is
2285 * set, yes, we can always allocate.  If node is in our task's mems_allowed,
2286 * yes.  If it's not a __GFP_HARDWALL request and this node is in the nearest
2287 * hardwalled cpuset ancestor to this task's cpuset, yes.  If the task has been
2288 * OOM killed and has access to memory reserves as specified by the TIF_MEMDIE
2289 * flag, yes.
2290 * Otherwise, no.
2291 *
2292 * If __GFP_HARDWALL is set, cpuset_node_allowed_softwall() reduces to
2293 * cpuset_node_allowed_hardwall().  Otherwise, cpuset_node_allowed_softwall()
2294 * might sleep, and might allow a node from an enclosing cpuset.
2295 *
2296 * cpuset_node_allowed_hardwall() only handles the simpler case of hardwall
2297 * cpusets, and never sleeps.
2298 *
2299 * The __GFP_THISNODE placement logic is really handled elsewhere,
2300 * by forcibly using a zonelist starting at a specified node, and by
2301 * (in get_page_from_freelist()) refusing to consider the zones for
2302 * any node on the zonelist except the first.  By the time any such
2303 * calls get to this routine, we should just shut up and say 'yes'.
2304 *
2305 * GFP_USER allocations are marked with the __GFP_HARDWALL bit,
2306 * and do not allow allocations outside the current tasks cpuset
2307 * unless the task has been OOM killed as is marked TIF_MEMDIE.
2308 * GFP_KERNEL allocations are not so marked, so can escape to the
2309 * nearest enclosing hardwalled ancestor cpuset.
2310 *
2311 * Scanning up parent cpusets requires callback_mutex.  The
2312 * __alloc_pages() routine only calls here with __GFP_HARDWALL bit
2313 * _not_ set if it's a GFP_KERNEL allocation, and all nodes in the
2314 * current tasks mems_allowed came up empty on the first pass over
2315 * the zonelist.  So only GFP_KERNEL allocations, if all nodes in the
2316 * cpuset are short of memory, might require taking the callback_mutex
2317 * mutex.
2318 *
2319 * The first call here from mm/page_alloc:get_page_from_freelist()
2320 * has __GFP_HARDWALL set in gfp_mask, enforcing hardwall cpusets,
2321 * so no allocation on a node outside the cpuset is allowed (unless
2322 * in interrupt, of course).
2323 *
2324 * The second pass through get_page_from_freelist() doesn't even call
2325 * here for GFP_ATOMIC calls.  For those calls, the __alloc_pages()
2326 * variable 'wait' is not set, and the bit ALLOC_CPUSET is not set
2327 * in alloc_flags.  That logic and the checks below have the combined
2328 * affect that:
2329 *	in_interrupt - any node ok (current task context irrelevant)
2330 *	GFP_ATOMIC   - any node ok
2331 *	TIF_MEMDIE   - any node ok
2332 *	GFP_KERNEL   - any node in enclosing hardwalled cpuset ok
2333 *	GFP_USER     - only nodes in current tasks mems allowed ok.
2334 *
2335 * Rule:
2336 *    Don't call cpuset_node_allowed_softwall if you can't sleep, unless you
2337 *    pass in the __GFP_HARDWALL flag set in gfp_flag, which disables
2338 *    the code that might scan up ancestor cpusets and sleep.
2339 */
2340int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
2341{
2342	const struct cpuset *cs;	/* current cpuset ancestors */
2343	int allowed;			/* is allocation in zone z allowed? */
2344
2345	if (in_interrupt() || (gfp_mask & __GFP_THISNODE))
2346		return 1;
2347	might_sleep_if(!(gfp_mask & __GFP_HARDWALL));
2348	if (node_isset(node, current->mems_allowed))
2349		return 1;
2350	/*
2351	 * Allow tasks that have access to memory reserves because they have
2352	 * been OOM killed to get memory anywhere.
2353	 */
2354	if (unlikely(test_thread_flag(TIF_MEMDIE)))
2355		return 1;
2356	if (gfp_mask & __GFP_HARDWALL)	/* If hardwall request, stop here */
2357		return 0;
2358
2359	if (current->flags & PF_EXITING) /* Let dying task have memory */
2360		return 1;
2361
2362	/* Not hardwall and node outside mems_allowed: scan up cpusets */
2363	mutex_lock(&callback_mutex);
2364
2365	task_lock(current);
2366	cs = nearest_hardwall_ancestor(task_cs(current));
2367	task_unlock(current);
2368
2369	allowed = node_isset(node, cs->mems_allowed);
2370	mutex_unlock(&callback_mutex);
2371	return allowed;
2372}
2373
2374/*
2375 * cpuset_node_allowed_hardwall - Can we allocate on a memory node?
2376 * @node: is this an allowed node?
2377 * @gfp_mask: memory allocation flags
2378 *
2379 * If we're in interrupt, yes, we can always allocate.  If __GFP_THISNODE is
2380 * set, yes, we can always allocate.  If node is in our task's mems_allowed,
2381 * yes.  If the task has been OOM killed and has access to memory reserves as
2382 * specified by the TIF_MEMDIE flag, yes.
2383 * Otherwise, no.
2384 *
2385 * The __GFP_THISNODE placement logic is really handled elsewhere,
2386 * by forcibly using a zonelist starting at a specified node, and by
2387 * (in get_page_from_freelist()) refusing to consider the zones for
2388 * any node on the zonelist except the first.  By the time any such
2389 * calls get to this routine, we should just shut up and say 'yes'.
2390 *
2391 * Unlike the cpuset_node_allowed_softwall() variant, above,
2392 * this variant requires that the node be in the current task's
2393 * mems_allowed or that we're in interrupt.  It does not scan up the
2394 * cpuset hierarchy for the nearest enclosing mem_exclusive cpuset.
2395 * It never sleeps.
2396 */
2397int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
2398{
2399	if (in_interrupt() || (gfp_mask & __GFP_THISNODE))
2400		return 1;
2401	if (node_isset(node, current->mems_allowed))
2402		return 1;
2403	/*
2404	 * Allow tasks that have access to memory reserves because they have
2405	 * been OOM killed to get memory anywhere.
2406	 */
2407	if (unlikely(test_thread_flag(TIF_MEMDIE)))
2408		return 1;
2409	return 0;
2410}
2411
2412/**
2413 * cpuset_unlock - release lock on cpuset changes
2414 *
2415 * Undo the lock taken in a previous cpuset_lock() call.
2416 */
2417
2418void cpuset_unlock(void)
2419{
2420	mutex_unlock(&callback_mutex);
2421}
2422
2423/**
2424 * cpuset_mem_spread_node() - On which node to begin search for a file page
2425 * cpuset_slab_spread_node() - On which node to begin search for a slab page
2426 *
2427 * If a task is marked PF_SPREAD_PAGE or PF_SPREAD_SLAB (as for
2428 * tasks in a cpuset with is_spread_page or is_spread_slab set),
2429 * and if the memory allocation used cpuset_mem_spread_node()
2430 * to determine on which node to start looking, as it will for
2431 * certain page cache or slab cache pages such as used for file
2432 * system buffers and inode caches, then instead of starting on the
2433 * local node to look for a free page, rather spread the starting
2434 * node around the tasks mems_allowed nodes.
2435 *
2436 * We don't have to worry about the returned node being offline
2437 * because "it can't happen", and even if it did, it would be ok.
2438 *
2439 * The routines calling guarantee_online_mems() are careful to
2440 * only set nodes in task->mems_allowed that are online.  So it
2441 * should not be possible for the following code to return an
2442 * offline node.  But if it did, that would be ok, as this routine
2443 * is not returning the node where the allocation must be, only
2444 * the node where the search should start.  The zonelist passed to
2445 * __alloc_pages() will include all nodes.  If the slab allocator
2446 * is passed an offline node, it will fall back to the local node.
2447 * See kmem_cache_alloc_node().
2448 */
2449
2450static int cpuset_spread_node(int *rotor)
2451{
2452	int node;
2453
2454	node = next_node(*rotor, current->mems_allowed);
2455	if (node == MAX_NUMNODES)
2456		node = first_node(current->mems_allowed);
2457	*rotor = node;
2458	return node;
2459}
2460
2461int cpuset_mem_spread_node(void)
2462{
2463	if (current->cpuset_mem_spread_rotor == NUMA_NO_NODE)
2464		current->cpuset_mem_spread_rotor =
2465			node_random(&current->mems_allowed);
2466
2467	return cpuset_spread_node(&current->cpuset_mem_spread_rotor);
2468}
2469
2470int cpuset_slab_spread_node(void)
2471{
2472	if (current->cpuset_slab_spread_rotor == NUMA_NO_NODE)
2473		current->cpuset_slab_spread_rotor =
2474			node_random(&current->mems_allowed);
2475
2476	return cpuset_spread_node(&current->cpuset_slab_spread_rotor);
2477}
2478
2479EXPORT_SYMBOL_GPL(cpuset_mem_spread_node);
2480
2481/**
2482 * cpuset_mems_allowed_intersects - Does @tsk1's mems_allowed intersect @tsk2's?
2483 * @tsk1: pointer to task_struct of some task.
2484 * @tsk2: pointer to task_struct of some other task.
2485 *
2486 * Description: Return true if @tsk1's mems_allowed intersects the
2487 * mems_allowed of @tsk2.  Used by the OOM killer to determine if
2488 * one of the task's memory usage might impact the memory available
2489 * to the other.
2490 **/
2491
2492int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
2493				   const struct task_struct *tsk2)
2494{
2495	return nodes_intersects(tsk1->mems_allowed, tsk2->mems_allowed);
2496}
2497
2498/**
2499 * cpuset_print_task_mems_allowed - prints task's cpuset and mems_allowed
2500 * @task: pointer to task_struct of some task.
2501 *
2502 * Description: Prints @task's name, cpuset name, and cached copy of its
2503 * mems_allowed to the kernel log.  Must hold task_lock(task) to allow
2504 * dereferencing task_cs(task).
2505 */
2506void cpuset_print_task_mems_allowed(struct task_struct *tsk)
2507{
2508	struct dentry *dentry;
2509
2510	dentry = task_cs(tsk)->css.cgroup->dentry;
2511	spin_lock(&cpuset_buffer_lock);
2512	snprintf(cpuset_name, CPUSET_NAME_LEN,
2513		 dentry ? (const char *)dentry->d_name.name : "/");
2514	nodelist_scnprintf(cpuset_nodelist, CPUSET_NODELIST_LEN,
2515			   tsk->mems_allowed);
2516	printk(KERN_INFO "%s cpuset=%s mems_allowed=%s\n",
2517	       tsk->comm, cpuset_name, cpuset_nodelist);
2518	spin_unlock(&cpuset_buffer_lock);
2519}
2520
2521/*
2522 * Collection of memory_pressure is suppressed unless
2523 * this flag is enabled by writing "1" to the special
2524 * cpuset file 'memory_pressure_enabled' in the root cpuset.
2525 */
2526
2527int cpuset_memory_pressure_enabled __read_mostly;
2528
2529/**
2530 * cpuset_memory_pressure_bump - keep stats of per-cpuset reclaims.
2531 *
2532 * Keep a running average of the rate of synchronous (direct)
2533 * page reclaim efforts initiated by tasks in each cpuset.
2534 *
2535 * This represents the rate at which some task in the cpuset
2536 * ran low on memory on all nodes it was allowed to use, and
2537 * had to enter the kernels page reclaim code in an effort to
2538 * create more free memory by tossing clean pages or swapping
2539 * or writing dirty pages.
2540 *
2541 * Display to user space in the per-cpuset read-only file
2542 * "memory_pressure".  Value displayed is an integer
2543 * representing the recent rate of entry into the synchronous
2544 * (direct) page reclaim by any task attached to the cpuset.
2545 **/
2546
2547void __cpuset_memory_pressure_bump(void)
2548{
2549	task_lock(current);
2550	fmeter_markevent(&task_cs(current)->fmeter);
2551	task_unlock(current);
2552}
2553
2554#ifdef CONFIG_PROC_PID_CPUSET
2555/*
2556 * proc_cpuset_show()
2557 *  - Print tasks cpuset path into seq_file.
2558 *  - Used for /proc/<pid>/cpuset.
2559 *  - No need to task_lock(tsk) on this tsk->cpuset reference, as it
2560 *    doesn't really matter if tsk->cpuset changes after we read it,
2561 *    and we take cgroup_mutex, keeping cpuset_attach() from changing it
2562 *    anyway.
2563 */
2564static int proc_cpuset_show(struct seq_file *m, void *unused_v)
2565{
2566	struct pid *pid;
2567	struct task_struct *tsk;
2568	char *buf;
2569	struct cgroup_subsys_state *css;
2570	int retval;
2571
2572	retval = -ENOMEM;
2573	buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
2574	if (!buf)
2575		goto out;
2576
2577	retval = -ESRCH;
2578	pid = m->private;
2579	tsk = get_pid_task(pid, PIDTYPE_PID);
2580	if (!tsk)
2581		goto out_free;
2582
2583	retval = -EINVAL;
2584	cgroup_lock();
2585	css = task_subsys_state(tsk, cpuset_subsys_id);
2586	retval = cgroup_path(css->cgroup, buf, PAGE_SIZE);
2587	if (retval < 0)
2588		goto out_unlock;
2589	seq_puts(m, buf);
2590	seq_putc(m, '\n');
2591out_unlock:
2592	cgroup_unlock();
2593	put_task_struct(tsk);
2594out_free:
2595	kfree(buf);
2596out:
2597	return retval;
2598}
2599
2600static int cpuset_open(struct inode *inode, struct file *file)
2601{
2602	struct pid *pid = PROC_I(inode)->pid;
2603	return single_open(file, proc_cpuset_show, pid);
2604}
2605
2606const struct file_operations proc_cpuset_operations = {
2607	.open		= cpuset_open,
2608	.read		= seq_read,
2609	.llseek		= seq_lseek,
2610	.release	= single_release,
2611};
2612#endif /* CONFIG_PROC_PID_CPUSET */
2613
2614/* Display task mems_allowed in /proc/<pid>/status file. */
2615void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task)
2616{
2617	seq_printf(m, "Mems_allowed:\t");
2618	seq_nodemask(m, &task->mems_allowed);
2619	seq_printf(m, "\n");
2620	seq_printf(m, "Mems_allowed_list:\t");
2621	seq_nodemask_list(m, &task->mems_allowed);
2622	seq_printf(m, "\n");
2623}