Linux Audio

Check our new training course

Loading...
v3.15
   1/*
   2 *  kernel/cpuset.c
   3 *
   4 *  Processor and Memory placement constraints for sets of tasks.
   5 *
   6 *  Copyright (C) 2003 BULL SA.
   7 *  Copyright (C) 2004-2007 Silicon Graphics, Inc.
   8 *  Copyright (C) 2006 Google, Inc
   9 *
  10 *  Portions derived from Patrick Mochel's sysfs code.
  11 *  sysfs is Copyright (c) 2001-3 Patrick Mochel
  12 *
  13 *  2003-10-10 Written by Simon Derr.
  14 *  2003-10-22 Updates by Stephen Hemminger.
  15 *  2004 May-July Rework by Paul Jackson.
  16 *  2006 Rework by Paul Menage to use generic cgroups
  17 *  2008 Rework of the scheduler domains and CPU hotplug handling
  18 *       by Max Krasnyansky
  19 *
  20 *  This file is subject to the terms and conditions of the GNU General Public
  21 *  License.  See the file COPYING in the main directory of the Linux
  22 *  distribution for more details.
  23 */
  24
  25#include <linux/cpu.h>
  26#include <linux/cpumask.h>
  27#include <linux/cpuset.h>
  28#include <linux/err.h>
  29#include <linux/errno.h>
  30#include <linux/file.h>
  31#include <linux/fs.h>
  32#include <linux/init.h>
  33#include <linux/interrupt.h>
  34#include <linux/kernel.h>
  35#include <linux/kmod.h>
  36#include <linux/list.h>
  37#include <linux/mempolicy.h>
  38#include <linux/mm.h>
  39#include <linux/memory.h>
  40#include <linux/export.h>
  41#include <linux/mount.h>
  42#include <linux/namei.h>
  43#include <linux/pagemap.h>
  44#include <linux/proc_fs.h>
  45#include <linux/rcupdate.h>
  46#include <linux/sched.h>
  47#include <linux/seq_file.h>
  48#include <linux/security.h>
  49#include <linux/slab.h>
  50#include <linux/spinlock.h>
  51#include <linux/stat.h>
  52#include <linux/string.h>
  53#include <linux/time.h>
  54#include <linux/backing-dev.h>
  55#include <linux/sort.h>
  56
  57#include <asm/uaccess.h>
  58#include <linux/atomic.h>
  59#include <linux/mutex.h>
  60#include <linux/workqueue.h>
  61#include <linux/cgroup.h>
  62#include <linux/wait.h>
 
 
 
 
 
 
 
  63
  64/*
  65 * Tracks how many cpusets are currently defined in system.
  66 * When there is only one cpuset (the root cpuset) we can
  67 * short circuit some hooks.
  68 */
  69int number_of_cpusets __read_mostly;
  70
 
 
 
 
  71/* See "Frequency meter" comments, below. */
  72
  73struct fmeter {
  74	int cnt;		/* unprocessed events count */
  75	int val;		/* most recent output value */
  76	time_t time;		/* clock (secs) when val computed */
  77	spinlock_t lock;	/* guards read or write of above */
  78};
  79
  80struct cpuset {
  81	struct cgroup_subsys_state css;
  82
  83	unsigned long flags;		/* "unsigned long" so bitops work */
  84	cpumask_var_t cpus_allowed;	/* CPUs allowed to tasks in cpuset */
  85	nodemask_t mems_allowed;	/* Memory Nodes allowed to tasks */
  86
  87	/*
  88	 * This is old Memory Nodes tasks took on.
  89	 *
  90	 * - top_cpuset.old_mems_allowed is initialized to mems_allowed.
  91	 * - A new cpuset's old_mems_allowed is initialized when some
  92	 *   task is moved into it.
  93	 * - old_mems_allowed is used in cpuset_migrate_mm() when we change
  94	 *   cpuset.mems_allowed and have tasks' nodemask updated, and
  95	 *   then old_mems_allowed is updated to mems_allowed.
  96	 */
  97	nodemask_t old_mems_allowed;
  98
  99	struct fmeter fmeter;		/* memory_pressure filter */
 100
 101	/*
 102	 * Tasks are being attached to this cpuset.  Used to prevent
 103	 * zeroing cpus/mems_allowed between ->can_attach() and ->attach().
 104	 */
 105	int attach_in_progress;
 106
 107	/* partition number for rebuild_sched_domains() */
 108	int pn;
 109
 110	/* for custom sched domain */
 111	int relax_domain_level;
 
 
 
 112};
 113
 114static inline struct cpuset *css_cs(struct cgroup_subsys_state *css)
 
 115{
 116	return css ? container_of(css, struct cpuset, css) : NULL;
 
 117}
 118
 119/* Retrieve the cpuset for a task */
 120static inline struct cpuset *task_cs(struct task_struct *task)
 121{
 122	return css_cs(task_css(task, cpuset_cgrp_id));
 123}
 124
 125static inline struct cpuset *parent_cs(struct cpuset *cs)
 126{
 127	return css_cs(css_parent(&cs->css));
 128}
 129
 130#ifdef CONFIG_NUMA
 131static inline bool task_has_mempolicy(struct task_struct *task)
 132{
 133	return task->mempolicy;
 134}
 135#else
 136static inline bool task_has_mempolicy(struct task_struct *task)
 137{
 138	return false;
 139}
 140#endif
 141
 142
 143/* bits in struct cpuset flags field */
 144typedef enum {
 145	CS_ONLINE,
 146	CS_CPU_EXCLUSIVE,
 147	CS_MEM_EXCLUSIVE,
 148	CS_MEM_HARDWALL,
 149	CS_MEMORY_MIGRATE,
 150	CS_SCHED_LOAD_BALANCE,
 151	CS_SPREAD_PAGE,
 152	CS_SPREAD_SLAB,
 153} cpuset_flagbits_t;
 154
 155/* convenient tests for these bits */
 156static inline bool is_cpuset_online(const struct cpuset *cs)
 157{
 158	return test_bit(CS_ONLINE, &cs->flags);
 159}
 160
 161static inline int is_cpu_exclusive(const struct cpuset *cs)
 162{
 163	return test_bit(CS_CPU_EXCLUSIVE, &cs->flags);
 164}
 165
 166static inline int is_mem_exclusive(const struct cpuset *cs)
 167{
 168	return test_bit(CS_MEM_EXCLUSIVE, &cs->flags);
 169}
 170
 171static inline int is_mem_hardwall(const struct cpuset *cs)
 172{
 173	return test_bit(CS_MEM_HARDWALL, &cs->flags);
 174}
 175
 176static inline int is_sched_load_balance(const struct cpuset *cs)
 177{
 178	return test_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
 179}
 180
 181static inline int is_memory_migrate(const struct cpuset *cs)
 182{
 183	return test_bit(CS_MEMORY_MIGRATE, &cs->flags);
 184}
 185
 186static inline int is_spread_page(const struct cpuset *cs)
 187{
 188	return test_bit(CS_SPREAD_PAGE, &cs->flags);
 189}
 190
 191static inline int is_spread_slab(const struct cpuset *cs)
 192{
 193	return test_bit(CS_SPREAD_SLAB, &cs->flags);
 194}
 195
 196static struct cpuset top_cpuset = {
 197	.flags = ((1 << CS_ONLINE) | (1 << CS_CPU_EXCLUSIVE) |
 198		  (1 << CS_MEM_EXCLUSIVE)),
 199};
 200
 201/**
 202 * cpuset_for_each_child - traverse online children of a cpuset
 203 * @child_cs: loop cursor pointing to the current child
 204 * @pos_css: used for iteration
 205 * @parent_cs: target cpuset to walk children of
 206 *
 207 * Walk @child_cs through the online children of @parent_cs.  Must be used
 208 * with RCU read locked.
 209 */
 210#define cpuset_for_each_child(child_cs, pos_css, parent_cs)		\
 211	css_for_each_child((pos_css), &(parent_cs)->css)		\
 212		if (is_cpuset_online(((child_cs) = css_cs((pos_css)))))
 213
 214/**
 215 * cpuset_for_each_descendant_pre - pre-order walk of a cpuset's descendants
 216 * @des_cs: loop cursor pointing to the current descendant
 217 * @pos_css: used for iteration
 218 * @root_cs: target cpuset to walk ancestor of
 219 *
 220 * Walk @des_cs through the online descendants of @root_cs.  Must be used
 221 * with RCU read locked.  The caller may modify @pos_css by calling
 222 * css_rightmost_descendant() to skip subtree.  @root_cs is included in the
 223 * iteration and the first node to be visited.
 224 */
 225#define cpuset_for_each_descendant_pre(des_cs, pos_css, root_cs)	\
 226	css_for_each_descendant_pre((pos_css), &(root_cs)->css)		\
 227		if (is_cpuset_online(((des_cs) = css_cs((pos_css)))))
 228
 229/*
 230 * There are two global mutexes guarding cpuset structures - cpuset_mutex
 231 * and callback_mutex.  The latter may nest inside the former.  We also
 232 * require taking task_lock() when dereferencing a task's cpuset pointer.
 233 * See "The task_lock() exception", at the end of this comment.
 234 *
 235 * A task must hold both mutexes to modify cpusets.  If a task holds
 236 * cpuset_mutex, then it blocks others wanting that mutex, ensuring that it
 237 * is the only task able to also acquire callback_mutex and be able to
 238 * modify cpusets.  It can perform various checks on the cpuset structure
 239 * first, knowing nothing will change.  It can also allocate memory while
 240 * just holding cpuset_mutex.  While it is performing these checks, various
 241 * callback routines can briefly acquire callback_mutex to query cpusets.
 242 * Once it is ready to make the changes, it takes callback_mutex, blocking
 243 * everyone else.
 244 *
 245 * Calls to the kernel memory allocator can not be made while holding
 246 * callback_mutex, as that would risk double tripping on callback_mutex
 247 * from one of the callbacks into the cpuset code from within
 248 * __alloc_pages().
 249 *
 250 * If a task is only holding callback_mutex, then it has read-only
 251 * access to cpusets.
 252 *
 253 * Now, the task_struct fields mems_allowed and mempolicy may be changed
 254 * by other task, we use alloc_lock in the task_struct fields to protect
 255 * them.
 256 *
 257 * The cpuset_common_file_read() handlers only hold callback_mutex across
 258 * small pieces of code, such as when reading out possibly multi-word
 259 * cpumasks and nodemasks.
 260 *
 261 * Accessing a task's cpuset should be done in accordance with the
 262 * guidelines for accessing subsystem state in kernel/cgroup.c
 263 */
 264
 265static DEFINE_MUTEX(cpuset_mutex);
 266static DEFINE_MUTEX(callback_mutex);
 267
 268/*
 269 * CPU / memory hotplug is handled asynchronously.
 270 */
 271static void cpuset_hotplug_workfn(struct work_struct *work);
 272static DECLARE_WORK(cpuset_hotplug_work, cpuset_hotplug_workfn);
 273
 274static DECLARE_WAIT_QUEUE_HEAD(cpuset_attach_wq);
 
 
 
 275
 276/*
 277 * This is ugly, but preserves the userspace API for existing cpuset
 278 * users. If someone tries to mount the "cpuset" filesystem, we
 279 * silently switch it to mount "cgroup" instead
 280 */
 281static struct dentry *cpuset_mount(struct file_system_type *fs_type,
 282			 int flags, const char *unused_dev_name, void *data)
 283{
 284	struct file_system_type *cgroup_fs = get_fs_type("cgroup");
 285	struct dentry *ret = ERR_PTR(-ENODEV);
 286	if (cgroup_fs) {
 287		char mountopts[] =
 288			"cpuset,noprefix,"
 289			"release_agent=/sbin/cpuset_release_agent";
 290		ret = cgroup_fs->mount(cgroup_fs, flags,
 291					   unused_dev_name, mountopts);
 292		put_filesystem(cgroup_fs);
 293	}
 294	return ret;
 295}
 296
 297static struct file_system_type cpuset_fs_type = {
 298	.name = "cpuset",
 299	.mount = cpuset_mount,
 300};
 301
 302/*
 303 * Return in pmask the portion of a cpusets's cpus_allowed that
 304 * are online.  If none are online, walk up the cpuset hierarchy
 305 * until we find one that does have some online cpus.  The top
 306 * cpuset always has some cpus online.
 
 
 307 *
 308 * One way or another, we guarantee to return some non-empty subset
 309 * of cpu_online_mask.
 310 *
 311 * Call with callback_mutex held.
 312 */
 313static void guarantee_online_cpus(struct cpuset *cs, struct cpumask *pmask)
 
 
 314{
 315	while (!cpumask_intersects(cs->cpus_allowed, cpu_online_mask))
 316		cs = parent_cs(cs);
 317	cpumask_and(pmask, cs->cpus_allowed, cpu_online_mask);
 
 
 
 
 318}
 319
 320/*
 321 * Return in *pmask the portion of a cpusets's mems_allowed that
 322 * are online, with memory.  If none are online with memory, walk
 323 * up the cpuset hierarchy until we find one that does have some
 324 * online mems.  The top cpuset always has some mems online.
 
 325 *
 326 * One way or another, we guarantee to return some non-empty subset
 327 * of node_states[N_MEMORY].
 328 *
 329 * Call with callback_mutex held.
 330 */
 331static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask)
 
 332{
 333	while (!nodes_intersects(cs->mems_allowed, node_states[N_MEMORY]))
 334		cs = parent_cs(cs);
 335	nodes_and(*pmask, cs->mems_allowed, node_states[N_MEMORY]);
 
 
 
 
 
 
 336}
 337
 338/*
 339 * update task's spread flag if cpuset's page/slab spread flag is set
 340 *
 341 * Called with callback_mutex/cpuset_mutex held
 342 */
 343static void cpuset_update_task_spread_flag(struct cpuset *cs,
 344					struct task_struct *tsk)
 345{
 346	if (is_spread_page(cs))
 347		tsk->flags |= PF_SPREAD_PAGE;
 348	else
 349		tsk->flags &= ~PF_SPREAD_PAGE;
 350	if (is_spread_slab(cs))
 351		tsk->flags |= PF_SPREAD_SLAB;
 352	else
 353		tsk->flags &= ~PF_SPREAD_SLAB;
 354}
 355
 356/*
 357 * is_cpuset_subset(p, q) - Is cpuset p a subset of cpuset q?
 358 *
 359 * One cpuset is a subset of another if all its allowed CPUs and
 360 * Memory Nodes are a subset of the other, and its exclusive flags
 361 * are only set if the other's are set.  Call holding cpuset_mutex.
 362 */
 363
 364static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
 365{
 366	return	cpumask_subset(p->cpus_allowed, q->cpus_allowed) &&
 367		nodes_subset(p->mems_allowed, q->mems_allowed) &&
 368		is_cpu_exclusive(p) <= is_cpu_exclusive(q) &&
 369		is_mem_exclusive(p) <= is_mem_exclusive(q);
 370}
 371
 372/**
 373 * alloc_trial_cpuset - allocate a trial cpuset
 374 * @cs: the cpuset that the trial cpuset duplicates
 375 */
 376static struct cpuset *alloc_trial_cpuset(struct cpuset *cs)
 377{
 378	struct cpuset *trial;
 379
 380	trial = kmemdup(cs, sizeof(*cs), GFP_KERNEL);
 381	if (!trial)
 382		return NULL;
 383
 384	if (!alloc_cpumask_var(&trial->cpus_allowed, GFP_KERNEL)) {
 385		kfree(trial);
 386		return NULL;
 387	}
 388	cpumask_copy(trial->cpus_allowed, cs->cpus_allowed);
 389
 390	return trial;
 391}
 392
 393/**
 394 * free_trial_cpuset - free the trial cpuset
 395 * @trial: the trial cpuset to be freed
 396 */
 397static void free_trial_cpuset(struct cpuset *trial)
 398{
 399	free_cpumask_var(trial->cpus_allowed);
 400	kfree(trial);
 401}
 402
 403/*
 404 * validate_change() - Used to validate that any proposed cpuset change
 405 *		       follows the structural rules for cpusets.
 406 *
 407 * If we replaced the flag and mask values of the current cpuset
 408 * (cur) with those values in the trial cpuset (trial), would
 409 * our various subset and exclusive rules still be valid?  Presumes
 410 * cpuset_mutex held.
 411 *
 412 * 'cur' is the address of an actual, in-use cpuset.  Operations
 413 * such as list traversal that depend on the actual address of the
 414 * cpuset in the list must use cur below, not trial.
 415 *
 416 * 'trial' is the address of bulk structure copy of cur, with
 417 * perhaps one or more of the fields cpus_allowed, mems_allowed,
 418 * or flags changed to new, trial values.
 419 *
 420 * Return 0 if valid, -errno if not.
 421 */
 422
 423static int validate_change(struct cpuset *cur, struct cpuset *trial)
 424{
 425	struct cgroup_subsys_state *css;
 426	struct cpuset *c, *par;
 427	int ret;
 428
 429	rcu_read_lock();
 430
 431	/* Each of our child cpusets must be a subset of us */
 432	ret = -EBUSY;
 433	cpuset_for_each_child(c, css, cur)
 434		if (!is_cpuset_subset(c, trial))
 435			goto out;
 436
 437	/* Remaining checks don't apply to root cpuset */
 438	ret = 0;
 439	if (cur == &top_cpuset)
 440		goto out;
 441
 442	par = parent_cs(cur);
 443
 444	/* We must be a subset of our parent cpuset */
 445	ret = -EACCES;
 446	if (!is_cpuset_subset(trial, par))
 447		goto out;
 448
 449	/*
 450	 * If either I or some sibling (!= me) is exclusive, we can't
 451	 * overlap
 452	 */
 453	ret = -EINVAL;
 454	cpuset_for_each_child(c, css, par) {
 455		if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) &&
 456		    c != cur &&
 457		    cpumask_intersects(trial->cpus_allowed, c->cpus_allowed))
 458			goto out;
 459		if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) &&
 460		    c != cur &&
 461		    nodes_intersects(trial->mems_allowed, c->mems_allowed))
 462			goto out;
 463	}
 464
 465	/*
 466	 * Cpusets with tasks - existing or newly being attached - can't
 467	 * be changed to have empty cpus_allowed or mems_allowed.
 468	 */
 469	ret = -ENOSPC;
 470	if ((cgroup_has_tasks(cur->css.cgroup) || cur->attach_in_progress)) {
 471		if (!cpumask_empty(cur->cpus_allowed) &&
 472		    cpumask_empty(trial->cpus_allowed))
 473			goto out;
 474		if (!nodes_empty(cur->mems_allowed) &&
 475		    nodes_empty(trial->mems_allowed))
 476			goto out;
 477	}
 478
 479	ret = 0;
 480out:
 481	rcu_read_unlock();
 482	return ret;
 483}
 484
 485#ifdef CONFIG_SMP
 486/*
 487 * Helper routine for generate_sched_domains().
 488 * Do cpusets a, b have overlapping cpus_allowed masks?
 489 */
 490static int cpusets_overlap(struct cpuset *a, struct cpuset *b)
 491{
 492	return cpumask_intersects(a->cpus_allowed, b->cpus_allowed);
 493}
 494
 495static void
 496update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c)
 497{
 498	if (dattr->relax_domain_level < c->relax_domain_level)
 499		dattr->relax_domain_level = c->relax_domain_level;
 500	return;
 501}
 502
 503static void update_domain_attr_tree(struct sched_domain_attr *dattr,
 504				    struct cpuset *root_cs)
 505{
 506	struct cpuset *cp;
 507	struct cgroup_subsys_state *pos_css;
 508
 509	rcu_read_lock();
 510	cpuset_for_each_descendant_pre(cp, pos_css, root_cs) {
 511		if (cp == root_cs)
 512			continue;
 
 
 
 
 513
 514		/* skip the whole subtree if @cp doesn't have any CPU */
 515		if (cpumask_empty(cp->cpus_allowed)) {
 516			pos_css = css_rightmost_descendant(pos_css);
 517			continue;
 518		}
 519
 520		if (is_sched_load_balance(cp))
 521			update_domain_attr(dattr, cp);
 
 
 
 
 
 522	}
 523	rcu_read_unlock();
 524}
 525
 526/*
 527 * generate_sched_domains()
 528 *
 529 * This function builds a partial partition of the systems CPUs
 530 * A 'partial partition' is a set of non-overlapping subsets whose
 531 * union is a subset of that set.
 532 * The output of this function needs to be passed to kernel/sched/core.c
 533 * partition_sched_domains() routine, which will rebuild the scheduler's
 534 * load balancing domains (sched domains) as specified by that partial
 535 * partition.
 536 *
 537 * See "What is sched_load_balance" in Documentation/cgroups/cpusets.txt
 538 * for a background explanation of this.
 539 *
 540 * Does not return errors, on the theory that the callers of this
 541 * routine would rather not worry about failures to rebuild sched
 542 * domains when operating in the severe memory shortage situations
 543 * that could cause allocation failures below.
 544 *
 545 * Must be called with cpuset_mutex held.
 546 *
 547 * The three key local variables below are:
 548 *    q  - a linked-list queue of cpuset pointers, used to implement a
 549 *	   top-down scan of all cpusets.  This scan loads a pointer
 550 *	   to each cpuset marked is_sched_load_balance into the
 551 *	   array 'csa'.  For our purposes, rebuilding the schedulers
 552 *	   sched domains, we can ignore !is_sched_load_balance cpusets.
 553 *  csa  - (for CpuSet Array) Array of pointers to all the cpusets
 554 *	   that need to be load balanced, for convenient iterative
 555 *	   access by the subsequent code that finds the best partition,
 556 *	   i.e the set of domains (subsets) of CPUs such that the
 557 *	   cpus_allowed of every cpuset marked is_sched_load_balance
 558 *	   is a subset of one of these domains, while there are as
 559 *	   many such domains as possible, each as small as possible.
 560 * doms  - Conversion of 'csa' to an array of cpumasks, for passing to
 561 *	   the kernel/sched/core.c routine partition_sched_domains() in a
 562 *	   convenient format, that can be easily compared to the prior
 563 *	   value to determine what partition elements (sched domains)
 564 *	   were changed (added or removed.)
 565 *
 566 * Finding the best partition (set of domains):
 567 *	The triple nested loops below over i, j, k scan over the
 568 *	load balanced cpusets (using the array of cpuset pointers in
 569 *	csa[]) looking for pairs of cpusets that have overlapping
 570 *	cpus_allowed, but which don't have the same 'pn' partition
 571 *	number and gives them in the same partition number.  It keeps
 572 *	looping on the 'restart' label until it can no longer find
 573 *	any such pairs.
 574 *
 575 *	The union of the cpus_allowed masks from the set of
 576 *	all cpusets having the same 'pn' value then form the one
 577 *	element of the partition (one sched domain) to be passed to
 578 *	partition_sched_domains().
 579 */
 580static int generate_sched_domains(cpumask_var_t **domains,
 581			struct sched_domain_attr **attributes)
 582{
 
 583	struct cpuset *cp;	/* scans q */
 584	struct cpuset **csa;	/* array of all cpuset ptrs */
 585	int csn;		/* how many cpuset ptrs in csa so far */
 586	int i, j, k;		/* indices for partition finding loops */
 587	cpumask_var_t *doms;	/* resulting partition; i.e. sched domains */
 588	struct sched_domain_attr *dattr;  /* attributes for custom domains */
 589	int ndoms = 0;		/* number of sched domains in result */
 590	int nslot;		/* next empty doms[] struct cpumask slot */
 591	struct cgroup_subsys_state *pos_css;
 592
 593	doms = NULL;
 594	dattr = NULL;
 595	csa = NULL;
 596
 597	/* Special case for the 99% of systems with one, full, sched domain */
 598	if (is_sched_load_balance(&top_cpuset)) {
 599		ndoms = 1;
 600		doms = alloc_sched_domains(ndoms);
 601		if (!doms)
 602			goto done;
 603
 604		dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL);
 605		if (dattr) {
 606			*dattr = SD_ATTR_INIT;
 607			update_domain_attr_tree(dattr, &top_cpuset);
 608		}
 609		cpumask_copy(doms[0], top_cpuset.cpus_allowed);
 610
 611		goto done;
 612	}
 613
 614	csa = kmalloc(number_of_cpusets * sizeof(cp), GFP_KERNEL);
 615	if (!csa)
 616		goto done;
 617	csn = 0;
 618
 619	rcu_read_lock();
 620	cpuset_for_each_descendant_pre(cp, pos_css, &top_cpuset) {
 621		if (cp == &top_cpuset)
 
 
 
 
 
 
 622			continue;
 
 623		/*
 624		 * Continue traversing beyond @cp iff @cp has some CPUs and
 625		 * isn't load balancing.  The former is obvious.  The
 626		 * latter: All child cpusets contain a subset of the
 627		 * parent's cpus, so just skip them, and then we call
 628		 * update_domain_attr_tree() to calc relax_domain_level of
 629		 * the corresponding sched domain.
 630		 */
 631		if (!cpumask_empty(cp->cpus_allowed) &&
 632		    !is_sched_load_balance(cp))
 633			continue;
 634
 635		if (is_sched_load_balance(cp))
 636			csa[csn++] = cp;
 
 
 637
 638		/* skip @cp's subtree */
 639		pos_css = css_rightmost_descendant(pos_css);
 640	}
 641	rcu_read_unlock();
 
 642
 643	for (i = 0; i < csn; i++)
 644		csa[i]->pn = i;
 645	ndoms = csn;
 646
 647restart:
 648	/* Find the best partition (set of sched domains) */
 649	for (i = 0; i < csn; i++) {
 650		struct cpuset *a = csa[i];
 651		int apn = a->pn;
 652
 653		for (j = 0; j < csn; j++) {
 654			struct cpuset *b = csa[j];
 655			int bpn = b->pn;
 656
 657			if (apn != bpn && cpusets_overlap(a, b)) {
 658				for (k = 0; k < csn; k++) {
 659					struct cpuset *c = csa[k];
 660
 661					if (c->pn == bpn)
 662						c->pn = apn;
 663				}
 664				ndoms--;	/* one less element */
 665				goto restart;
 666			}
 667		}
 668	}
 669
 670	/*
 671	 * Now we know how many domains to create.
 672	 * Convert <csn, csa> to <ndoms, doms> and populate cpu masks.
 673	 */
 674	doms = alloc_sched_domains(ndoms);
 675	if (!doms)
 676		goto done;
 677
 678	/*
 679	 * The rest of the code, including the scheduler, can deal with
 680	 * dattr==NULL case. No need to abort if alloc fails.
 681	 */
 682	dattr = kmalloc(ndoms * sizeof(struct sched_domain_attr), GFP_KERNEL);
 683
 684	for (nslot = 0, i = 0; i < csn; i++) {
 685		struct cpuset *a = csa[i];
 686		struct cpumask *dp;
 687		int apn = a->pn;
 688
 689		if (apn < 0) {
 690			/* Skip completed partitions */
 691			continue;
 692		}
 693
 694		dp = doms[nslot];
 695
 696		if (nslot == ndoms) {
 697			static int warnings = 10;
 698			if (warnings) {
 699				printk(KERN_WARNING
 700				 "rebuild_sched_domains confused:"
 701				  " nslot %d, ndoms %d, csn %d, i %d,"
 702				  " apn %d\n",
 703				  nslot, ndoms, csn, i, apn);
 704				warnings--;
 705			}
 706			continue;
 707		}
 708
 709		cpumask_clear(dp);
 710		if (dattr)
 711			*(dattr + nslot) = SD_ATTR_INIT;
 712		for (j = i; j < csn; j++) {
 713			struct cpuset *b = csa[j];
 714
 715			if (apn == b->pn) {
 716				cpumask_or(dp, dp, b->cpus_allowed);
 717				if (dattr)
 718					update_domain_attr_tree(dattr + nslot, b);
 719
 720				/* Done with this partition */
 721				b->pn = -1;
 722			}
 723		}
 724		nslot++;
 725	}
 726	BUG_ON(nslot != ndoms);
 727
 728done:
 729	kfree(csa);
 730
 731	/*
 732	 * Fallback to the default domain if kmalloc() failed.
 733	 * See comments in partition_sched_domains().
 734	 */
 735	if (doms == NULL)
 736		ndoms = 1;
 737
 738	*domains    = doms;
 739	*attributes = dattr;
 740	return ndoms;
 741}
 742
 743/*
 744 * Rebuild scheduler domains.
 745 *
 746 * If the flag 'sched_load_balance' of any cpuset with non-empty
 747 * 'cpus' changes, or if the 'cpus' allowed changes in any cpuset
 748 * which has that flag enabled, or if any cpuset with a non-empty
 749 * 'cpus' is removed, then call this routine to rebuild the
 750 * scheduler's dynamic sched domains.
 751 *
 752 * Call with cpuset_mutex held.  Takes get_online_cpus().
 
 
 753 */
 754static void rebuild_sched_domains_locked(void)
 755{
 756	struct sched_domain_attr *attr;
 757	cpumask_var_t *doms;
 758	int ndoms;
 759
 760	lockdep_assert_held(&cpuset_mutex);
 761	get_online_cpus();
 762
 763	/*
 764	 * We have raced with CPU hotplug. Don't do anything to avoid
 765	 * passing doms with offlined cpu to partition_sched_domains().
 766	 * Anyways, hotplug work item will rebuild sched domains.
 767	 */
 768	if (!cpumask_equal(top_cpuset.cpus_allowed, cpu_active_mask))
 769		goto out;
 770
 771	/* Generate domain masks and attrs */
 
 772	ndoms = generate_sched_domains(&doms, &attr);
 
 773
 774	/* Have scheduler rebuild the domains */
 775	partition_sched_domains(ndoms, doms, attr);
 776out:
 777	put_online_cpus();
 778}
 779#else /* !CONFIG_SMP */
 780static void rebuild_sched_domains_locked(void)
 781{
 782}
 783#endif /* CONFIG_SMP */
 784
 785void rebuild_sched_domains(void)
 
 786{
 787	mutex_lock(&cpuset_mutex);
 788	rebuild_sched_domains_locked();
 789	mutex_unlock(&cpuset_mutex);
 790}
 
 
 
 791
 792/*
 793 * effective_cpumask_cpuset - return nearest ancestor with non-empty cpus
 794 * @cs: the cpuset in interest
 
 
 
 
 
 795 *
 796 * A cpuset's effective cpumask is the cpumask of the nearest ancestor
 797 * with non-empty cpus. We use effective cpumask whenever:
 798 * - we update tasks' cpus_allowed. (they take on the ancestor's cpumask
 799 *   if the cpuset they reside in has no cpus)
 800 * - we want to retrieve task_cs(tsk)'s cpus_allowed.
 801 *
 802 * Called with cpuset_mutex held. cpuset_cpus_allowed_fallback() is an
 803 * exception. See comments there.
 
 
 804 */
 805static struct cpuset *effective_cpumask_cpuset(struct cpuset *cs)
 806{
 807	while (cpumask_empty(cs->cpus_allowed))
 808		cs = parent_cs(cs);
 809	return cs;
 810}
 811
 812/*
 813 * effective_nodemask_cpuset - return nearest ancestor with non-empty mems
 814 * @cs: the cpuset in interest
 815 *
 816 * A cpuset's effective nodemask is the nodemask of the nearest ancestor
 817 * with non-empty memss. We use effective nodemask whenever:
 818 * - we update tasks' mems_allowed. (they take on the ancestor's nodemask
 819 *   if the cpuset they reside in has no mems)
 820 * - we want to retrieve task_cs(tsk)'s mems_allowed.
 821 *
 822 * Called with cpuset_mutex held.
 
 823 */
 824static struct cpuset *effective_nodemask_cpuset(struct cpuset *cs)
 825{
 826	while (nodes_empty(cs->mems_allowed))
 827		cs = parent_cs(cs);
 828	return cs;
 829}
 830
 831/**
 832 * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
 833 * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
 834 *
 835 * Iterate through each task of @cs updating its cpus_allowed to the
 836 * effective cpuset's.  As this function is called with cpuset_mutex held,
 837 * cpuset membership stays stable.
 
 
 838 */
 839static void update_tasks_cpumask(struct cpuset *cs)
 
 840{
 841	struct cpuset *cpus_cs = effective_cpumask_cpuset(cs);
 842	struct css_task_iter it;
 843	struct task_struct *task;
 844
 845	css_task_iter_start(&cs->css, &it);
 846	while ((task = css_task_iter_next(&it)))
 847		set_cpus_allowed_ptr(task, cpus_cs->cpus_allowed);
 848	css_task_iter_end(&it);
 849}
 850
 851/*
 852 * update_tasks_cpumask_hier - Update the cpumasks of tasks in the hierarchy.
 853 * @root_cs: the root cpuset of the hierarchy
 854 * @update_root: update root cpuset or not?
 855 *
 856 * This will update cpumasks of tasks in @root_cs and all other empty cpusets
 857 * which take on cpumask of @root_cs.
 858 *
 859 * Called with cpuset_mutex held
 
 860 */
 861static void update_tasks_cpumask_hier(struct cpuset *root_cs, bool update_root)
 
 862{
 863	struct cpuset *cp;
 864	struct cgroup_subsys_state *pos_css;
 865
 866	rcu_read_lock();
 867	cpuset_for_each_descendant_pre(cp, pos_css, root_cs) {
 868		if (cp == root_cs) {
 869			if (!update_root)
 870				continue;
 871		} else {
 872			/* skip the whole subtree if @cp have some CPU */
 873			if (!cpumask_empty(cp->cpus_allowed)) {
 874				pos_css = css_rightmost_descendant(pos_css);
 875				continue;
 876			}
 877		}
 878		if (!css_tryget(&cp->css))
 879			continue;
 880		rcu_read_unlock();
 881
 882		update_tasks_cpumask(cp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 883
 884		rcu_read_lock();
 885		css_put(&cp->css);
 886	}
 887	rcu_read_unlock();
 
 888}
 889
 890/**
 891 * update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it
 892 * @cs: the cpuset to consider
 893 * @buf: buffer of cpu numbers written to this cpuset
 894 */
 895static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
 896			  const char *buf)
 897{
 
 898	int retval;
 899	int is_load_balanced;
 900
 901	/* top_cpuset.cpus_allowed tracks cpu_online_mask; it's read-only */
 902	if (cs == &top_cpuset)
 903		return -EACCES;
 904
 905	/*
 906	 * An empty cpus_allowed is ok only if the cpuset has no tasks.
 907	 * Since cpulist_parse() fails on an empty mask, we special case
 908	 * that parsing.  The validate_change() call ensures that cpusets
 909	 * with tasks have cpus.
 910	 */
 911	if (!*buf) {
 912		cpumask_clear(trialcs->cpus_allowed);
 913	} else {
 914		retval = cpulist_parse(buf, trialcs->cpus_allowed);
 915		if (retval < 0)
 916			return retval;
 917
 918		if (!cpumask_subset(trialcs->cpus_allowed, cpu_active_mask))
 919			return -EINVAL;
 920	}
 
 
 
 921
 922	/* Nothing to do if the cpus didn't change */
 923	if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed))
 924		return 0;
 925
 926	retval = validate_change(cs, trialcs);
 927	if (retval < 0)
 928		return retval;
 929
 930	is_load_balanced = is_sched_load_balance(trialcs);
 931
 932	mutex_lock(&callback_mutex);
 933	cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed);
 934	mutex_unlock(&callback_mutex);
 935
 936	update_tasks_cpumask_hier(cs, true);
 
 
 
 
 
 
 937
 938	if (is_load_balanced)
 939		rebuild_sched_domains_locked();
 940	return 0;
 941}
 942
 943/*
 944 * cpuset_migrate_mm
 945 *
 946 *    Migrate memory region from one set of nodes to another.
 947 *
 948 *    Temporarilly set tasks mems_allowed to target nodes of migration,
 949 *    so that the migration code can allocate pages on these nodes.
 950 *
 
 
 
 
 
 
 951 *    While the mm_struct we are migrating is typically from some
 952 *    other task, the task_struct mems_allowed that we are hacking
 953 *    is for our current task, which must allocate new pages for that
 954 *    migrating memory region.
 955 */
 956
 957static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
 958							const nodemask_t *to)
 959{
 960	struct task_struct *tsk = current;
 961	struct cpuset *mems_cs;
 962
 963	tsk->mems_allowed = *to;
 964
 965	do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL);
 966
 967	rcu_read_lock();
 968	mems_cs = effective_nodemask_cpuset(task_cs(tsk));
 969	guarantee_online_mems(mems_cs, &tsk->mems_allowed);
 970	rcu_read_unlock();
 971}
 972
 973/*
 974 * cpuset_change_task_nodemask - change task's mems_allowed and mempolicy
 975 * @tsk: the task to change
 976 * @newmems: new nodes that the task will be set
 977 *
 978 * In order to avoid seeing no nodes if the old and new nodes are disjoint,
 979 * we structure updates as setting all new allowed nodes, then clearing newly
 980 * disallowed ones.
 981 */
 982static void cpuset_change_task_nodemask(struct task_struct *tsk,
 983					nodemask_t *newmems)
 984{
 985	bool need_loop;
 986
 987	/*
 988	 * Allow tasks that have access to memory reserves because they have
 989	 * been OOM killed to get memory anywhere.
 990	 */
 991	if (unlikely(test_thread_flag(TIF_MEMDIE)))
 992		return;
 993	if (current->flags & PF_EXITING) /* Let dying task have memory */
 994		return;
 995
 996	task_lock(tsk);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 997	/*
 998	 * Determine if a loop is necessary if another thread is doing
 999	 * read_mems_allowed_begin().  If at least one node remains unchanged and
1000	 * tsk does not have a mempolicy, then an empty nodemask will not be
1001	 * possible when mems_allowed is larger than a word.
1002	 */
1003	need_loop = task_has_mempolicy(tsk) ||
1004			!nodes_intersects(*newmems, tsk->mems_allowed);
1005
1006	if (need_loop) {
1007		local_irq_disable();
1008		write_seqcount_begin(&tsk->mems_allowed_seq);
1009	}
1010
1011	nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems);
1012	mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP1);
 
 
 
 
 
 
1013
1014	mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP2);
1015	tsk->mems_allowed = *newmems;
 
 
1016
1017	if (need_loop) {
1018		write_seqcount_end(&tsk->mems_allowed_seq);
1019		local_irq_enable();
1020	}
 
 
 
 
 
 
 
 
 
1021
1022	task_unlock(tsk);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1023}
1024
1025static void *cpuset_being_rebound;
1026
1027/**
1028 * update_tasks_nodemask - Update the nodemasks of tasks in the cpuset.
1029 * @cs: the cpuset in which each task's mems_allowed mask needs to be changed
 
 
1030 *
1031 * Iterate through each task of @cs updating its mems_allowed to the
1032 * effective cpuset's.  As this function is called with cpuset_mutex held,
1033 * cpuset membership stays stable.
1034 */
1035static void update_tasks_nodemask(struct cpuset *cs)
1036{
1037	static nodemask_t newmems;	/* protected by cpuset_mutex */
1038	struct cpuset *mems_cs = effective_nodemask_cpuset(cs);
1039	struct css_task_iter it;
1040	struct task_struct *task;
1041
1042	cpuset_being_rebound = cs;		/* causes mpol_dup() rebind */
1043
1044	guarantee_online_mems(mems_cs, &newmems);
 
 
 
 
1045
1046	/*
1047	 * The mpol_rebind_mm() call takes mmap_sem, which we couldn't
1048	 * take while holding tasklist_lock.  Forks can happen - the
1049	 * mpol_dup() cpuset_being_rebound check will catch such forks,
1050	 * and rebind their vma mempolicies too.  Because we still hold
1051	 * the global cpuset_mutex, we know that no other rebind effort
1052	 * will be contending for the global variable cpuset_being_rebound.
1053	 * It's ok if we rebind the same mm twice; mpol_rebind_mm()
1054	 * is idempotent.  Also migrate pages in each mm to new nodes.
1055	 */
1056	css_task_iter_start(&cs->css, &it);
1057	while ((task = css_task_iter_next(&it))) {
1058		struct mm_struct *mm;
1059		bool migrate;
1060
1061		cpuset_change_task_nodemask(task, &newmems);
1062
1063		mm = get_task_mm(task);
1064		if (!mm)
1065			continue;
1066
1067		migrate = is_memory_migrate(cs);
1068
1069		mpol_rebind_mm(mm, &cs->mems_allowed);
1070		if (migrate)
1071			cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems);
1072		mmput(mm);
1073	}
1074	css_task_iter_end(&it);
1075
1076	/*
1077	 * All the tasks' nodemasks have been updated, update
1078	 * cs->old_mems_allowed.
1079	 */
1080	cs->old_mems_allowed = newmems;
1081
1082	/* We're done rebinding vmas to this cpuset's new mems_allowed. */
1083	cpuset_being_rebound = NULL;
1084}
1085
1086/*
1087 * update_tasks_nodemask_hier - Update the nodemasks of tasks in the hierarchy.
1088 * @cs: the root cpuset of the hierarchy
1089 * @update_root: update the root cpuset or not?
1090 *
1091 * This will update nodemasks of tasks in @root_cs and all other empty cpusets
1092 * which take on nodemask of @root_cs.
1093 *
1094 * Called with cpuset_mutex held
1095 */
1096static void update_tasks_nodemask_hier(struct cpuset *root_cs, bool update_root)
1097{
1098	struct cpuset *cp;
1099	struct cgroup_subsys_state *pos_css;
1100
1101	rcu_read_lock();
1102	cpuset_for_each_descendant_pre(cp, pos_css, root_cs) {
1103		if (cp == root_cs) {
1104			if (!update_root)
1105				continue;
1106		} else {
1107			/* skip the whole subtree if @cp have some CPU */
1108			if (!nodes_empty(cp->mems_allowed)) {
1109				pos_css = css_rightmost_descendant(pos_css);
1110				continue;
1111			}
1112		}
1113		if (!css_tryget(&cp->css))
1114			continue;
1115		rcu_read_unlock();
1116
1117		update_tasks_nodemask(cp);
1118
1119		rcu_read_lock();
1120		css_put(&cp->css);
1121	}
1122	rcu_read_unlock();
1123}
1124
1125/*
1126 * Handle user request to change the 'mems' memory placement
1127 * of a cpuset.  Needs to validate the request, update the
1128 * cpusets mems_allowed, and for each task in the cpuset,
1129 * update mems_allowed and rebind task's mempolicy and any vma
1130 * mempolicies and if the cpuset is marked 'memory_migrate',
1131 * migrate the tasks pages to the new memory.
1132 *
1133 * Call with cpuset_mutex held.  May take callback_mutex during call.
1134 * Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
1135 * lock each such tasks mm->mmap_sem, scan its vma's and rebind
1136 * their mempolicies to the cpusets new mems_allowed.
1137 */
1138static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
1139			   const char *buf)
1140{
 
1141	int retval;
 
 
 
 
1142
1143	/*
1144	 * top_cpuset.mems_allowed tracks node_stats[N_MEMORY];
1145	 * it's read-only
1146	 */
1147	if (cs == &top_cpuset) {
1148		retval = -EACCES;
1149		goto done;
1150	}
1151
1152	/*
1153	 * An empty mems_allowed is ok iff there are no tasks in the cpuset.
1154	 * Since nodelist_parse() fails on an empty mask, we special case
1155	 * that parsing.  The validate_change() call ensures that cpusets
1156	 * with tasks have memory.
1157	 */
1158	if (!*buf) {
1159		nodes_clear(trialcs->mems_allowed);
1160	} else {
1161		retval = nodelist_parse(buf, trialcs->mems_allowed);
1162		if (retval < 0)
1163			goto done;
1164
1165		if (!nodes_subset(trialcs->mems_allowed,
1166				node_states[N_MEMORY])) {
1167			retval =  -EINVAL;
1168			goto done;
1169		}
1170	}
1171
1172	if (nodes_equal(cs->mems_allowed, trialcs->mems_allowed)) {
1173		retval = 0;		/* Too easy - nothing to do */
1174		goto done;
1175	}
1176	retval = validate_change(cs, trialcs);
1177	if (retval < 0)
1178		goto done;
1179
 
 
 
 
1180	mutex_lock(&callback_mutex);
1181	cs->mems_allowed = trialcs->mems_allowed;
1182	mutex_unlock(&callback_mutex);
1183
1184	update_tasks_nodemask_hier(cs, true);
 
 
1185done:
 
1186	return retval;
1187}
1188
1189int current_cpuset_is_being_rebound(void)
1190{
1191	return task_cs(current) == cpuset_being_rebound;
1192}
1193
1194static int update_relax_domain_level(struct cpuset *cs, s64 val)
1195{
1196#ifdef CONFIG_SMP
1197	if (val < -1 || val >= sched_domain_level_max)
1198		return -EINVAL;
1199#endif
1200
1201	if (val != cs->relax_domain_level) {
1202		cs->relax_domain_level = val;
1203		if (!cpumask_empty(cs->cpus_allowed) &&
1204		    is_sched_load_balance(cs))
1205			rebuild_sched_domains_locked();
1206	}
1207
1208	return 0;
1209}
1210
1211/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1212 * update_tasks_flags - update the spread flags of tasks in the cpuset.
1213 * @cs: the cpuset in which each task's spread flags needs to be changed
 
 
 
1214 *
1215 * Iterate through each task of @cs updating its spread flags.  As this
1216 * function is called with cpuset_mutex held, cpuset membership stays
1217 * stable.
1218 */
1219static void update_tasks_flags(struct cpuset *cs)
1220{
1221	struct css_task_iter it;
1222	struct task_struct *task;
1223
1224	css_task_iter_start(&cs->css, &it);
1225	while ((task = css_task_iter_next(&it)))
1226		cpuset_update_task_spread_flag(cs, task);
1227	css_task_iter_end(&it);
 
 
1228}
1229
1230/*
1231 * update_flag - read a 0 or a 1 in a file and update associated flag
1232 * bit:		the bit to update (see cpuset_flagbits_t)
1233 * cs:		the cpuset to update
1234 * turning_on: 	whether the flag is being set or cleared
1235 *
1236 * Call with cpuset_mutex held.
1237 */
1238
1239static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
1240		       int turning_on)
1241{
1242	struct cpuset *trialcs;
1243	int balance_flag_changed;
1244	int spread_flag_changed;
 
1245	int err;
1246
1247	trialcs = alloc_trial_cpuset(cs);
1248	if (!trialcs)
1249		return -ENOMEM;
1250
1251	if (turning_on)
1252		set_bit(bit, &trialcs->flags);
1253	else
1254		clear_bit(bit, &trialcs->flags);
1255
1256	err = validate_change(cs, trialcs);
1257	if (err < 0)
1258		goto out;
1259
 
 
 
 
1260	balance_flag_changed = (is_sched_load_balance(cs) !=
1261				is_sched_load_balance(trialcs));
1262
1263	spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs))
1264			|| (is_spread_page(cs) != is_spread_page(trialcs)));
1265
1266	mutex_lock(&callback_mutex);
1267	cs->flags = trialcs->flags;
1268	mutex_unlock(&callback_mutex);
1269
1270	if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed)
1271		rebuild_sched_domains_locked();
1272
1273	if (spread_flag_changed)
1274		update_tasks_flags(cs);
 
1275out:
1276	free_trial_cpuset(trialcs);
1277	return err;
1278}
1279
1280/*
1281 * Frequency meter - How fast is some event occurring?
1282 *
1283 * These routines manage a digitally filtered, constant time based,
1284 * event frequency meter.  There are four routines:
1285 *   fmeter_init() - initialize a frequency meter.
1286 *   fmeter_markevent() - called each time the event happens.
1287 *   fmeter_getrate() - returns the recent rate of such events.
1288 *   fmeter_update() - internal routine used to update fmeter.
1289 *
1290 * A common data structure is passed to each of these routines,
1291 * which is used to keep track of the state required to manage the
1292 * frequency meter and its digital filter.
1293 *
1294 * The filter works on the number of events marked per unit time.
1295 * The filter is single-pole low-pass recursive (IIR).  The time unit
1296 * is 1 second.  Arithmetic is done using 32-bit integers scaled to
1297 * simulate 3 decimal digits of precision (multiplied by 1000).
1298 *
1299 * With an FM_COEF of 933, and a time base of 1 second, the filter
1300 * has a half-life of 10 seconds, meaning that if the events quit
1301 * happening, then the rate returned from the fmeter_getrate()
1302 * will be cut in half each 10 seconds, until it converges to zero.
1303 *
1304 * It is not worth doing a real infinitely recursive filter.  If more
1305 * than FM_MAXTICKS ticks have elapsed since the last filter event,
1306 * just compute FM_MAXTICKS ticks worth, by which point the level
1307 * will be stable.
1308 *
1309 * Limit the count of unprocessed events to FM_MAXCNT, so as to avoid
1310 * arithmetic overflow in the fmeter_update() routine.
1311 *
1312 * Given the simple 32 bit integer arithmetic used, this meter works
1313 * best for reporting rates between one per millisecond (msec) and
1314 * one per 32 (approx) seconds.  At constant rates faster than one
1315 * per msec it maxes out at values just under 1,000,000.  At constant
1316 * rates between one per msec, and one per second it will stabilize
1317 * to a value N*1000, where N is the rate of events per second.
1318 * At constant rates between one per second and one per 32 seconds,
1319 * it will be choppy, moving up on the seconds that have an event,
1320 * and then decaying until the next event.  At rates slower than
1321 * about one in 32 seconds, it decays all the way back to zero between
1322 * each event.
1323 */
1324
1325#define FM_COEF 933		/* coefficient for half-life of 10 secs */
1326#define FM_MAXTICKS ((time_t)99) /* useless computing more ticks than this */
1327#define FM_MAXCNT 1000000	/* limit cnt to avoid overflow */
1328#define FM_SCALE 1000		/* faux fixed point scale */
1329
1330/* Initialize a frequency meter */
1331static void fmeter_init(struct fmeter *fmp)
1332{
1333	fmp->cnt = 0;
1334	fmp->val = 0;
1335	fmp->time = 0;
1336	spin_lock_init(&fmp->lock);
1337}
1338
1339/* Internal meter update - process cnt events and update value */
1340static void fmeter_update(struct fmeter *fmp)
1341{
1342	time_t now = get_seconds();
1343	time_t ticks = now - fmp->time;
1344
1345	if (ticks == 0)
1346		return;
1347
1348	ticks = min(FM_MAXTICKS, ticks);
1349	while (ticks-- > 0)
1350		fmp->val = (FM_COEF * fmp->val) / FM_SCALE;
1351	fmp->time = now;
1352
1353	fmp->val += ((FM_SCALE - FM_COEF) * fmp->cnt) / FM_SCALE;
1354	fmp->cnt = 0;
1355}
1356
1357/* Process any previous ticks, then bump cnt by one (times scale). */
1358static void fmeter_markevent(struct fmeter *fmp)
1359{
1360	spin_lock(&fmp->lock);
1361	fmeter_update(fmp);
1362	fmp->cnt = min(FM_MAXCNT, fmp->cnt + FM_SCALE);
1363	spin_unlock(&fmp->lock);
1364}
1365
1366/* Process any previous ticks, then return current value. */
1367static int fmeter_getrate(struct fmeter *fmp)
1368{
1369	int val;
1370
1371	spin_lock(&fmp->lock);
1372	fmeter_update(fmp);
1373	val = fmp->val;
1374	spin_unlock(&fmp->lock);
1375	return val;
1376}
1377
1378static struct cpuset *cpuset_attach_old_cs;
1379
1380/* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */
1381static int cpuset_can_attach(struct cgroup_subsys_state *css,
1382			     struct cgroup_taskset *tset)
1383{
1384	struct cpuset *cs = css_cs(css);
1385	struct task_struct *task;
1386	int ret;
1387
1388	/* used later by cpuset_attach() */
1389	cpuset_attach_old_cs = task_cs(cgroup_taskset_first(tset));
1390
1391	mutex_lock(&cpuset_mutex);
1392
1393	/*
1394	 * We allow to move tasks into an empty cpuset if sane_behavior
1395	 * flag is set.
 
 
 
 
1396	 */
1397	ret = -ENOSPC;
1398	if (!cgroup_sane_behavior(css->cgroup) &&
1399	    (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed)))
1400		goto out_unlock;
1401
1402	cgroup_taskset_for_each(task, tset) {
1403		/*
1404		 * Kthreads which disallow setaffinity shouldn't be moved
1405		 * to a new cpuset; we don't want to change their cpu
1406		 * affinity and isolating such threads by their set of
1407		 * allowed nodes is unnecessary.  Thus, cpusets are not
1408		 * applicable for such threads.  This prevents checking for
1409		 * success of set_cpus_allowed_ptr() on all attached tasks
1410		 * before cpus_allowed may be changed.
1411		 */
1412		ret = -EINVAL;
1413		if (task->flags & PF_NO_SETAFFINITY)
1414			goto out_unlock;
1415		ret = security_task_setscheduler(task);
1416		if (ret)
1417			goto out_unlock;
1418	}
1419
1420	/*
1421	 * Mark attach is in progress.  This makes validate_change() fail
1422	 * changes which zero cpus/mems_allowed.
1423	 */
1424	cs->attach_in_progress++;
1425	ret = 0;
1426out_unlock:
1427	mutex_unlock(&cpuset_mutex);
1428	return ret;
1429}
1430
1431static void cpuset_cancel_attach(struct cgroup_subsys_state *css,
1432				 struct cgroup_taskset *tset)
1433{
1434	mutex_lock(&cpuset_mutex);
1435	css_cs(css)->attach_in_progress--;
1436	mutex_unlock(&cpuset_mutex);
1437}
1438
1439/*
1440 * Protected by cpuset_mutex.  cpus_attach is used only by cpuset_attach()
1441 * but we can't allocate it dynamically there.  Define it global and
1442 * allocate from cpuset_init().
1443 */
1444static cpumask_var_t cpus_attach;
 
 
1445
1446static void cpuset_attach(struct cgroup_subsys_state *css,
1447			  struct cgroup_taskset *tset)
1448{
1449	/* static buf protected by cpuset_mutex */
1450	static nodemask_t cpuset_attach_nodemask_to;
1451	struct mm_struct *mm;
1452	struct task_struct *task;
1453	struct task_struct *leader = cgroup_taskset_first(tset);
1454	struct cpuset *cs = css_cs(css);
1455	struct cpuset *oldcs = cpuset_attach_old_cs;
1456	struct cpuset *cpus_cs = effective_cpumask_cpuset(cs);
1457	struct cpuset *mems_cs = effective_nodemask_cpuset(cs);
1458
1459	mutex_lock(&cpuset_mutex);
1460
1461	/* prepare for attach */
1462	if (cs == &top_cpuset)
1463		cpumask_copy(cpus_attach, cpu_possible_mask);
1464	else
1465		guarantee_online_cpus(cpus_cs, cpus_attach);
1466
1467	guarantee_online_mems(mems_cs, &cpuset_attach_nodemask_to);
 
1468
1469	cgroup_taskset_for_each(task, tset) {
1470		/*
1471		 * can_attach beforehand should guarantee that this doesn't
1472		 * fail.  TODO: have a better way to handle failure here
1473		 */
1474		WARN_ON_ONCE(set_cpus_allowed_ptr(task, cpus_attach));
1475
1476		cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to);
1477		cpuset_update_task_spread_flag(cs, task);
1478	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1479
1480	/*
1481	 * Change mm, possibly for multiple threads in a threadgroup. This is
1482	 * expensive and may sleep.
1483	 */
 
1484	cpuset_attach_nodemask_to = cs->mems_allowed;
1485	mm = get_task_mm(leader);
1486	if (mm) {
1487		struct cpuset *mems_oldcs = effective_nodemask_cpuset(oldcs);
1488
1489		mpol_rebind_mm(mm, &cpuset_attach_nodemask_to);
1490
1491		/*
1492		 * old_mems_allowed is the same with mems_allowed here, except
1493		 * if this task is being moved automatically due to hotplug.
1494		 * In that case @mems_allowed has been updated and is empty,
1495		 * so @old_mems_allowed is the right nodesets that we migrate
1496		 * mm from.
1497		 */
1498		if (is_memory_migrate(cs)) {
1499			cpuset_migrate_mm(mm, &mems_oldcs->old_mems_allowed,
1500					  &cpuset_attach_nodemask_to);
1501		}
1502		mmput(mm);
1503	}
1504
1505	cs->old_mems_allowed = cpuset_attach_nodemask_to;
1506
1507	cs->attach_in_progress--;
1508	if (!cs->attach_in_progress)
1509		wake_up(&cpuset_attach_wq);
1510
1511	mutex_unlock(&cpuset_mutex);
1512}
1513
1514/* The various types of files and directories in a cpuset file system */
1515
1516typedef enum {
1517	FILE_MEMORY_MIGRATE,
1518	FILE_CPULIST,
1519	FILE_MEMLIST,
1520	FILE_CPU_EXCLUSIVE,
1521	FILE_MEM_EXCLUSIVE,
1522	FILE_MEM_HARDWALL,
1523	FILE_SCHED_LOAD_BALANCE,
1524	FILE_SCHED_RELAX_DOMAIN_LEVEL,
1525	FILE_MEMORY_PRESSURE_ENABLED,
1526	FILE_MEMORY_PRESSURE,
1527	FILE_SPREAD_PAGE,
1528	FILE_SPREAD_SLAB,
1529} cpuset_filetype_t;
1530
1531static int cpuset_write_u64(struct cgroup_subsys_state *css, struct cftype *cft,
1532			    u64 val)
1533{
1534	struct cpuset *cs = css_cs(css);
1535	cpuset_filetype_t type = cft->private;
1536	int retval = 0;
 
 
1537
1538	mutex_lock(&cpuset_mutex);
1539	if (!is_cpuset_online(cs)) {
1540		retval = -ENODEV;
1541		goto out_unlock;
1542	}
1543
1544	switch (type) {
1545	case FILE_CPU_EXCLUSIVE:
1546		retval = update_flag(CS_CPU_EXCLUSIVE, cs, val);
1547		break;
1548	case FILE_MEM_EXCLUSIVE:
1549		retval = update_flag(CS_MEM_EXCLUSIVE, cs, val);
1550		break;
1551	case FILE_MEM_HARDWALL:
1552		retval = update_flag(CS_MEM_HARDWALL, cs, val);
1553		break;
1554	case FILE_SCHED_LOAD_BALANCE:
1555		retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, val);
1556		break;
1557	case FILE_MEMORY_MIGRATE:
1558		retval = update_flag(CS_MEMORY_MIGRATE, cs, val);
1559		break;
1560	case FILE_MEMORY_PRESSURE_ENABLED:
1561		cpuset_memory_pressure_enabled = !!val;
1562		break;
1563	case FILE_MEMORY_PRESSURE:
1564		retval = -EACCES;
1565		break;
1566	case FILE_SPREAD_PAGE:
1567		retval = update_flag(CS_SPREAD_PAGE, cs, val);
1568		break;
1569	case FILE_SPREAD_SLAB:
1570		retval = update_flag(CS_SPREAD_SLAB, cs, val);
1571		break;
1572	default:
1573		retval = -EINVAL;
1574		break;
1575	}
1576out_unlock:
1577	mutex_unlock(&cpuset_mutex);
1578	return retval;
1579}
1580
1581static int cpuset_write_s64(struct cgroup_subsys_state *css, struct cftype *cft,
1582			    s64 val)
1583{
1584	struct cpuset *cs = css_cs(css);
 
1585	cpuset_filetype_t type = cft->private;
1586	int retval = -ENODEV;
1587
1588	mutex_lock(&cpuset_mutex);
1589	if (!is_cpuset_online(cs))
1590		goto out_unlock;
1591
1592	switch (type) {
1593	case FILE_SCHED_RELAX_DOMAIN_LEVEL:
1594		retval = update_relax_domain_level(cs, val);
1595		break;
1596	default:
1597		retval = -EINVAL;
1598		break;
1599	}
1600out_unlock:
1601	mutex_unlock(&cpuset_mutex);
1602	return retval;
1603}
1604
1605/*
1606 * Common handling for a write to a "cpus" or "mems" file.
1607 */
1608static int cpuset_write_resmask(struct cgroup_subsys_state *css,
1609				struct cftype *cft, char *buf)
1610{
1611	struct cpuset *cs = css_cs(css);
 
1612	struct cpuset *trialcs;
1613	int retval = -ENODEV;
1614
1615	/*
1616	 * CPU or memory hotunplug may leave @cs w/o any execution
1617	 * resources, in which case the hotplug code asynchronously updates
1618	 * configuration and transfers all tasks to the nearest ancestor
1619	 * which can execute.
1620	 *
1621	 * As writes to "cpus" or "mems" may restore @cs's execution
1622	 * resources, wait for the previously scheduled operations before
1623	 * proceeding, so that we don't end up keep removing tasks added
1624	 * after execution capability is restored.
1625	 */
1626	flush_work(&cpuset_hotplug_work);
1627
1628	mutex_lock(&cpuset_mutex);
1629	if (!is_cpuset_online(cs))
1630		goto out_unlock;
1631
1632	trialcs = alloc_trial_cpuset(cs);
1633	if (!trialcs) {
1634		retval = -ENOMEM;
1635		goto out_unlock;
1636	}
1637
1638	switch (cft->private) {
1639	case FILE_CPULIST:
1640		retval = update_cpumask(cs, trialcs, buf);
1641		break;
1642	case FILE_MEMLIST:
1643		retval = update_nodemask(cs, trialcs, buf);
1644		break;
1645	default:
1646		retval = -EINVAL;
1647		break;
1648	}
1649
1650	free_trial_cpuset(trialcs);
1651out_unlock:
1652	mutex_unlock(&cpuset_mutex);
1653	return retval;
1654}
1655
1656/*
1657 * These ascii lists should be read in a single call, by using a user
1658 * buffer large enough to hold the entire map.  If read in smaller
1659 * chunks, there is no guarantee of atomicity.  Since the display format
1660 * used, list of ranges of sequential numbers, is variable length,
1661 * and since these maps can change value dynamically, one could read
1662 * gibberish by doing partial reads while a list was changing.
 
 
 
1663 */
1664static int cpuset_common_seq_show(struct seq_file *sf, void *v)
 
1665{
1666	struct cpuset *cs = css_cs(seq_css(sf));
1667	cpuset_filetype_t type = seq_cft(sf)->private;
1668	ssize_t count;
1669	char *buf, *s;
1670	int ret = 0;
 
 
 
1671
1672	count = seq_get_buf(sf, &buf);
1673	s = buf;
 
1674
1675	mutex_lock(&callback_mutex);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1676
1677	switch (type) {
1678	case FILE_CPULIST:
1679		s += cpulist_scnprintf(s, count, cs->cpus_allowed);
1680		break;
1681	case FILE_MEMLIST:
1682		s += nodelist_scnprintf(s, count, cs->mems_allowed);
1683		break;
1684	default:
1685		ret = -EINVAL;
1686		goto out_unlock;
1687	}
 
1688
1689	if (s < buf + count - 1) {
1690		*s++ = '\n';
1691		seq_commit(sf, s - buf);
1692	} else {
1693		seq_commit(sf, -1);
1694	}
1695out_unlock:
1696	mutex_unlock(&callback_mutex);
1697	return ret;
1698}
1699
1700static u64 cpuset_read_u64(struct cgroup_subsys_state *css, struct cftype *cft)
1701{
1702	struct cpuset *cs = css_cs(css);
1703	cpuset_filetype_t type = cft->private;
1704	switch (type) {
1705	case FILE_CPU_EXCLUSIVE:
1706		return is_cpu_exclusive(cs);
1707	case FILE_MEM_EXCLUSIVE:
1708		return is_mem_exclusive(cs);
1709	case FILE_MEM_HARDWALL:
1710		return is_mem_hardwall(cs);
1711	case FILE_SCHED_LOAD_BALANCE:
1712		return is_sched_load_balance(cs);
1713	case FILE_MEMORY_MIGRATE:
1714		return is_memory_migrate(cs);
1715	case FILE_MEMORY_PRESSURE_ENABLED:
1716		return cpuset_memory_pressure_enabled;
1717	case FILE_MEMORY_PRESSURE:
1718		return fmeter_getrate(&cs->fmeter);
1719	case FILE_SPREAD_PAGE:
1720		return is_spread_page(cs);
1721	case FILE_SPREAD_SLAB:
1722		return is_spread_slab(cs);
1723	default:
1724		BUG();
1725	}
1726
1727	/* Unreachable but makes gcc happy */
1728	return 0;
1729}
1730
1731static s64 cpuset_read_s64(struct cgroup_subsys_state *css, struct cftype *cft)
1732{
1733	struct cpuset *cs = css_cs(css);
1734	cpuset_filetype_t type = cft->private;
1735	switch (type) {
1736	case FILE_SCHED_RELAX_DOMAIN_LEVEL:
1737		return cs->relax_domain_level;
1738	default:
1739		BUG();
1740	}
1741
1742	/* Unrechable but makes gcc happy */
1743	return 0;
1744}
1745
1746
1747/*
1748 * for the common functions, 'private' gives the type of file
1749 */
1750
1751static struct cftype files[] = {
1752	{
1753		.name = "cpus",
1754		.seq_show = cpuset_common_seq_show,
1755		.write_string = cpuset_write_resmask,
1756		.max_write_len = (100U + 6 * NR_CPUS),
1757		.private = FILE_CPULIST,
1758	},
1759
1760	{
1761		.name = "mems",
1762		.seq_show = cpuset_common_seq_show,
1763		.write_string = cpuset_write_resmask,
1764		.max_write_len = (100U + 6 * MAX_NUMNODES),
1765		.private = FILE_MEMLIST,
1766	},
1767
1768	{
1769		.name = "cpu_exclusive",
1770		.read_u64 = cpuset_read_u64,
1771		.write_u64 = cpuset_write_u64,
1772		.private = FILE_CPU_EXCLUSIVE,
1773	},
1774
1775	{
1776		.name = "mem_exclusive",
1777		.read_u64 = cpuset_read_u64,
1778		.write_u64 = cpuset_write_u64,
1779		.private = FILE_MEM_EXCLUSIVE,
1780	},
1781
1782	{
1783		.name = "mem_hardwall",
1784		.read_u64 = cpuset_read_u64,
1785		.write_u64 = cpuset_write_u64,
1786		.private = FILE_MEM_HARDWALL,
1787	},
1788
1789	{
1790		.name = "sched_load_balance",
1791		.read_u64 = cpuset_read_u64,
1792		.write_u64 = cpuset_write_u64,
1793		.private = FILE_SCHED_LOAD_BALANCE,
1794	},
1795
1796	{
1797		.name = "sched_relax_domain_level",
1798		.read_s64 = cpuset_read_s64,
1799		.write_s64 = cpuset_write_s64,
1800		.private = FILE_SCHED_RELAX_DOMAIN_LEVEL,
1801	},
1802
1803	{
1804		.name = "memory_migrate",
1805		.read_u64 = cpuset_read_u64,
1806		.write_u64 = cpuset_write_u64,
1807		.private = FILE_MEMORY_MIGRATE,
1808	},
1809
1810	{
1811		.name = "memory_pressure",
1812		.read_u64 = cpuset_read_u64,
1813		.write_u64 = cpuset_write_u64,
1814		.private = FILE_MEMORY_PRESSURE,
1815		.mode = S_IRUGO,
1816	},
1817
1818	{
1819		.name = "memory_spread_page",
1820		.read_u64 = cpuset_read_u64,
1821		.write_u64 = cpuset_write_u64,
1822		.private = FILE_SPREAD_PAGE,
1823	},
1824
1825	{
1826		.name = "memory_spread_slab",
1827		.read_u64 = cpuset_read_u64,
1828		.write_u64 = cpuset_write_u64,
1829		.private = FILE_SPREAD_SLAB,
1830	},
 
1831
1832	{
1833		.name = "memory_pressure_enabled",
1834		.flags = CFTYPE_ONLY_ON_ROOT,
1835		.read_u64 = cpuset_read_u64,
1836		.write_u64 = cpuset_write_u64,
1837		.private = FILE_MEMORY_PRESSURE_ENABLED,
1838	},
1839
1840	{ }	/* terminate */
1841};
1842
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1843/*
1844 *	cpuset_css_alloc - allocate a cpuset css
1845 *	cgrp:	control group that the new cpuset will be part of
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1846 */
1847
1848static struct cgroup_subsys_state *
1849cpuset_css_alloc(struct cgroup_subsys_state *parent_css)
 
1850{
1851	struct cpuset *cs;
 
1852
1853	if (!parent_css)
1854		return &top_cpuset.css;
1855
1856	cs = kzalloc(sizeof(*cs), GFP_KERNEL);
 
1857	if (!cs)
1858		return ERR_PTR(-ENOMEM);
1859	if (!alloc_cpumask_var(&cs->cpus_allowed, GFP_KERNEL)) {
1860		kfree(cs);
1861		return ERR_PTR(-ENOMEM);
1862	}
1863
 
 
 
 
 
1864	set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
1865	cpumask_clear(cs->cpus_allowed);
1866	nodes_clear(cs->mems_allowed);
1867	fmeter_init(&cs->fmeter);
1868	cs->relax_domain_level = -1;
1869
1870	return &cs->css;
1871}
1872
1873static int cpuset_css_online(struct cgroup_subsys_state *css)
1874{
1875	struct cpuset *cs = css_cs(css);
1876	struct cpuset *parent = parent_cs(cs);
1877	struct cpuset *tmp_cs;
1878	struct cgroup_subsys_state *pos_css;
1879
1880	if (!parent)
1881		return 0;
1882
1883	mutex_lock(&cpuset_mutex);
1884
1885	set_bit(CS_ONLINE, &cs->flags);
1886	if (is_spread_page(parent))
1887		set_bit(CS_SPREAD_PAGE, &cs->flags);
1888	if (is_spread_slab(parent))
1889		set_bit(CS_SPREAD_SLAB, &cs->flags);
1890
1891	number_of_cpusets++;
1892
1893	if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags))
1894		goto out_unlock;
1895
1896	/*
1897	 * Clone @parent's configuration if CGRP_CPUSET_CLONE_CHILDREN is
1898	 * set.  This flag handling is implemented in cgroup core for
1899	 * histrical reasons - the flag may be specified during mount.
1900	 *
1901	 * Currently, if any sibling cpusets have exclusive cpus or mem, we
1902	 * refuse to clone the configuration - thereby refusing the task to
1903	 * be entered, and as a result refusing the sys_unshare() or
1904	 * clone() which initiated it.  If this becomes a problem for some
1905	 * users who wish to allow that scenario, then this could be
1906	 * changed to grant parent->cpus_allowed-sibling_cpus_exclusive
1907	 * (and likewise for mems) to the new cgroup.
1908	 */
1909	rcu_read_lock();
1910	cpuset_for_each_child(tmp_cs, pos_css, parent) {
1911		if (is_mem_exclusive(tmp_cs) || is_cpu_exclusive(tmp_cs)) {
1912			rcu_read_unlock();
1913			goto out_unlock;
1914		}
1915	}
1916	rcu_read_unlock();
1917
1918	mutex_lock(&callback_mutex);
1919	cs->mems_allowed = parent->mems_allowed;
1920	cpumask_copy(cs->cpus_allowed, parent->cpus_allowed);
1921	mutex_unlock(&callback_mutex);
1922out_unlock:
1923	mutex_unlock(&cpuset_mutex);
1924	return 0;
1925}
1926
1927/*
1928 * If the cpuset being removed has its flag 'sched_load_balance'
1929 * enabled, then simulate turning sched_load_balance off, which
1930 * will call rebuild_sched_domains_locked().
1931 */
1932
1933static void cpuset_css_offline(struct cgroup_subsys_state *css)
1934{
1935	struct cpuset *cs = css_cs(css);
1936
1937	mutex_lock(&cpuset_mutex);
1938
1939	if (is_sched_load_balance(cs))
1940		update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
1941
1942	number_of_cpusets--;
1943	clear_bit(CS_ONLINE, &cs->flags);
1944
1945	mutex_unlock(&cpuset_mutex);
1946}
1947
1948static void cpuset_css_free(struct cgroup_subsys_state *css)
1949{
1950	struct cpuset *cs = css_cs(css);
1951
1952	free_cpumask_var(cs->cpus_allowed);
1953	kfree(cs);
1954}
1955
1956struct cgroup_subsys cpuset_cgrp_subsys = {
1957	.css_alloc = cpuset_css_alloc,
1958	.css_online = cpuset_css_online,
1959	.css_offline = cpuset_css_offline,
1960	.css_free = cpuset_css_free,
1961	.can_attach = cpuset_can_attach,
1962	.cancel_attach = cpuset_cancel_attach,
 
 
1963	.attach = cpuset_attach,
1964	.base_cftypes = files,
 
 
1965	.early_init = 1,
1966};
1967
1968/**
1969 * cpuset_init - initialize cpusets at system boot
1970 *
1971 * Description: Initialize top_cpuset and the cpuset internal file system,
1972 **/
1973
1974int __init cpuset_init(void)
1975{
1976	int err = 0;
1977
1978	if (!alloc_cpumask_var(&top_cpuset.cpus_allowed, GFP_KERNEL))
1979		BUG();
1980
1981	cpumask_setall(top_cpuset.cpus_allowed);
1982	nodes_setall(top_cpuset.mems_allowed);
1983
1984	fmeter_init(&top_cpuset.fmeter);
1985	set_bit(CS_SCHED_LOAD_BALANCE, &top_cpuset.flags);
1986	top_cpuset.relax_domain_level = -1;
1987
1988	err = register_filesystem(&cpuset_fs_type);
1989	if (err < 0)
1990		return err;
1991
1992	if (!alloc_cpumask_var(&cpus_attach, GFP_KERNEL))
1993		BUG();
1994
1995	number_of_cpusets = 1;
1996	return 0;
1997}
1998
1999/*
2000 * If CPU and/or memory hotplug handlers, below, unplug any CPUs
2001 * or memory nodes, we need to walk over the cpuset hierarchy,
2002 * removing that CPU or node from all cpusets.  If this removes the
2003 * last CPU or node from a cpuset, then move the tasks in the empty
2004 * cpuset to its next-highest non-empty parent.
 
2005 */
2006static void remove_tasks_in_empty_cpuset(struct cpuset *cs)
 
2007{
2008	struct cpuset *parent;
2009
2010	/*
2011	 * Find its next-highest non-empty parent, (top cpuset
2012	 * has online cpus, so can't be empty).
2013	 */
2014	parent = parent_cs(cs);
2015	while (cpumask_empty(parent->cpus_allowed) ||
2016			nodes_empty(parent->mems_allowed))
2017		parent = parent_cs(parent);
2018
2019	if (cgroup_transfer_tasks(parent->css.cgroup, cs->css.cgroup)) {
2020		printk(KERN_ERR "cpuset: failed to transfer tasks out of empty cpuset ");
2021		pr_cont_cgroup_name(cs->css.cgroup);
2022		pr_cont("\n");
2023	}
2024}
2025
2026/**
2027 * cpuset_hotplug_update_tasks - update tasks in a cpuset for hotunplug
2028 * @cs: cpuset in interest
 
 
 
 
2029 *
2030 * Compare @cs's cpu and mem masks against top_cpuset and if some have gone
2031 * offline, update @cs accordingly.  If @cs ends up with no CPU or memory,
2032 * all its tasks are moved to the nearest ancestor with both resources.
2033 */
2034static void cpuset_hotplug_update_tasks(struct cpuset *cs)
2035{
2036	static cpumask_t off_cpus;
2037	static nodemask_t off_mems;
2038	bool is_empty;
2039	bool sane = cgroup_sane_behavior(cs->css.cgroup);
2040
2041retry:
2042	wait_event(cpuset_attach_wq, cs->attach_in_progress == 0);
2043
2044	mutex_lock(&cpuset_mutex);
 
 
 
 
2045
2046	/*
2047	 * We have raced with task attaching. We wait until attaching
2048	 * is finished, so we won't attach a task to an empty cpuset.
2049	 */
2050	if (cs->attach_in_progress) {
2051		mutex_unlock(&cpuset_mutex);
2052		goto retry;
2053	}
2054
2055	cpumask_andnot(&off_cpus, cs->cpus_allowed, top_cpuset.cpus_allowed);
2056	nodes_andnot(off_mems, cs->mems_allowed, top_cpuset.mems_allowed);
2057
2058	mutex_lock(&callback_mutex);
2059	cpumask_andnot(cs->cpus_allowed, cs->cpus_allowed, &off_cpus);
2060	mutex_unlock(&callback_mutex);
2061
2062	/*
2063	 * If sane_behavior flag is set, we need to update tasks' cpumask
2064	 * for empty cpuset to take on ancestor's cpumask. Otherwise, don't
2065	 * call update_tasks_cpumask() if the cpuset becomes empty, as
2066	 * the tasks in it will be migrated to an ancestor.
2067	 */
2068	if ((sane && cpumask_empty(cs->cpus_allowed)) ||
2069	    (!cpumask_empty(&off_cpus) && !cpumask_empty(cs->cpus_allowed)))
2070		update_tasks_cpumask(cs);
2071
2072	mutex_lock(&callback_mutex);
2073	nodes_andnot(cs->mems_allowed, cs->mems_allowed, off_mems);
2074	mutex_unlock(&callback_mutex);
 
 
 
 
 
 
 
 
 
 
2075
2076	/*
2077	 * If sane_behavior flag is set, we need to update tasks' nodemask
2078	 * for empty cpuset to take on ancestor's nodemask. Otherwise, don't
2079	 * call update_tasks_nodemask() if the cpuset becomes empty, as
2080	 * the tasks in it will be migratd to an ancestor.
2081	 */
2082	if ((sane && nodes_empty(cs->mems_allowed)) ||
2083	    (!nodes_empty(off_mems) && !nodes_empty(cs->mems_allowed)))
2084		update_tasks_nodemask(cs);
2085
2086	is_empty = cpumask_empty(cs->cpus_allowed) ||
2087		nodes_empty(cs->mems_allowed);
2088
2089	mutex_unlock(&cpuset_mutex);
2090
2091	/*
2092	 * If sane_behavior flag is set, we'll keep tasks in empty cpusets.
2093	 *
2094	 * Otherwise move tasks to the nearest ancestor with execution
2095	 * resources.  This is full cgroup operation which will
2096	 * also call back into cpuset.  Should be done outside any lock.
2097	 */
2098	if (!sane && is_empty)
2099		remove_tasks_in_empty_cpuset(cs);
 
 
 
 
2100}
2101
2102/**
2103 * cpuset_hotplug_workfn - handle CPU/memory hotunplug for a cpuset
 
2104 *
2105 * This function is called after either CPU or memory configuration has
2106 * changed and updates cpuset accordingly.  The top_cpuset is always
2107 * synchronized to cpu_active_mask and N_MEMORY, which is necessary in
2108 * order to make cpusets transparent (of no affect) on systems that are
2109 * actively using CPU hotplug but making no active use of cpusets.
2110 *
2111 * Non-root cpusets are only affected by offlining.  If any CPUs or memory
2112 * nodes have been taken down, cpuset_hotplug_update_tasks() is invoked on
2113 * all descendants.
2114 *
2115 * Note that CPU offlining during suspend is ignored.  We don't modify
2116 * cpusets across suspend/resume cycles at all.
 
2117 */
2118static void cpuset_hotplug_workfn(struct work_struct *work)
2119{
2120	static cpumask_t new_cpus;
2121	static nodemask_t new_mems;
2122	bool cpus_updated, mems_updated;
 
 
2123
2124	mutex_lock(&cpuset_mutex);
2125
2126	/* fetch the available cpus/mems and find out which changed how */
2127	cpumask_copy(&new_cpus, cpu_active_mask);
2128	new_mems = node_states[N_MEMORY];
 
 
 
 
2129
2130	cpus_updated = !cpumask_equal(top_cpuset.cpus_allowed, &new_cpus);
2131	mems_updated = !nodes_equal(top_cpuset.mems_allowed, new_mems);
 
 
2132
2133	/* synchronize cpus_allowed to cpu_active_mask */
2134	if (cpus_updated) {
2135		mutex_lock(&callback_mutex);
2136		cpumask_copy(top_cpuset.cpus_allowed, &new_cpus);
2137		mutex_unlock(&callback_mutex);
2138		/* we don't mess with cpumasks of tasks in top_cpuset */
2139	}
2140
2141	/* synchronize mems_allowed to N_MEMORY */
2142	if (mems_updated) {
2143		mutex_lock(&callback_mutex);
2144		top_cpuset.mems_allowed = new_mems;
 
 
 
2145		mutex_unlock(&callback_mutex);
2146		update_tasks_nodemask(&top_cpuset);
2147	}
2148
2149	mutex_unlock(&cpuset_mutex);
2150
2151	/* if cpus or mems changed, we need to propagate to descendants */
2152	if (cpus_updated || mems_updated) {
2153		struct cpuset *cs;
2154		struct cgroup_subsys_state *pos_css;
2155
2156		rcu_read_lock();
2157		cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {
2158			if (cs == &top_cpuset || !css_tryget(&cs->css))
2159				continue;
2160			rcu_read_unlock();
2161
2162			cpuset_hotplug_update_tasks(cs);
2163
2164			rcu_read_lock();
2165			css_put(&cs->css);
2166		}
2167		rcu_read_unlock();
2168	}
2169
2170	/* rebuild sched domains if cpus_allowed has changed */
2171	if (cpus_updated)
2172		rebuild_sched_domains();
2173}
2174
2175void cpuset_update_active_cpus(bool cpu_online)
 
 
 
 
 
 
 
 
 
 
 
 
2176{
2177	/*
2178	 * We're inside cpu hotplug critical region which usually nests
2179	 * inside cgroup synchronization.  Bounce actual hotplug processing
2180	 * to a work item to avoid reverse locking order.
2181	 *
2182	 * We still need to do partition_sched_domains() synchronously;
2183	 * otherwise, the scheduler will get confused and put tasks to the
2184	 * dead CPU.  Fall back to the default single domain.
2185	 * cpuset_hotplug_workfn() will rebuild it as necessary.
2186	 */
2187	partition_sched_domains(1, NULL, NULL);
2188	schedule_work(&cpuset_hotplug_work);
 
 
2189}
2190
 
2191/*
2192 * Keep top_cpuset.mems_allowed tracking node_states[N_MEMORY].
2193 * Call this routine anytime after node_states[N_MEMORY] changes.
2194 * See cpuset_update_active_cpus() for CPU hotplug handling.
2195 */
2196static int cpuset_track_online_nodes(struct notifier_block *self,
2197				unsigned long action, void *arg)
2198{
2199	schedule_work(&cpuset_hotplug_work);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2200	return NOTIFY_OK;
2201}
2202
2203static struct notifier_block cpuset_track_online_nodes_nb = {
2204	.notifier_call = cpuset_track_online_nodes,
2205	.priority = 10,		/* ??! */
2206};
2207
2208/**
2209 * cpuset_init_smp - initialize cpus_allowed
2210 *
2211 * Description: Finish top cpuset after cpu, node maps are initialized
2212 */
 
2213void __init cpuset_init_smp(void)
2214{
2215	cpumask_copy(top_cpuset.cpus_allowed, cpu_active_mask);
2216	top_cpuset.mems_allowed = node_states[N_MEMORY];
2217	top_cpuset.old_mems_allowed = top_cpuset.mems_allowed;
 
2218
2219	register_hotmemory_notifier(&cpuset_track_online_nodes_nb);
 
2220}
2221
2222/**
2223 * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset.
2224 * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
2225 * @pmask: pointer to struct cpumask variable to receive cpus_allowed set.
2226 *
2227 * Description: Returns the cpumask_var_t cpus_allowed of the cpuset
2228 * attached to the specified @tsk.  Guaranteed to return some non-empty
2229 * subset of cpu_online_mask, even if this means going outside the
2230 * tasks cpuset.
2231 **/
2232
2233void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
2234{
2235	struct cpuset *cpus_cs;
2236
2237	mutex_lock(&callback_mutex);
2238	rcu_read_lock();
2239	cpus_cs = effective_cpumask_cpuset(task_cs(tsk));
2240	guarantee_online_cpus(cpus_cs, pmask);
2241	rcu_read_unlock();
2242	mutex_unlock(&callback_mutex);
2243}
2244
2245void cpuset_cpus_allowed_fallback(struct task_struct *tsk)
2246{
2247	struct cpuset *cpus_cs;
 
2248
2249	rcu_read_lock();
2250	cpus_cs = effective_cpumask_cpuset(task_cs(tsk));
2251	do_set_cpus_allowed(tsk, cpus_cs->cpus_allowed);
 
2252	rcu_read_unlock();
2253
2254	/*
2255	 * We own tsk->cpus_allowed, nobody can change it under us.
2256	 *
2257	 * But we used cs && cs->cpus_allowed lockless and thus can
2258	 * race with cgroup_attach_task() or update_cpumask() and get
2259	 * the wrong tsk->cpus_allowed. However, both cases imply the
2260	 * subsequent cpuset_change_cpumask()->set_cpus_allowed_ptr()
2261	 * which takes task_rq_lock().
2262	 *
2263	 * If we are called after it dropped the lock we must see all
2264	 * changes in tsk_cs()->cpus_allowed. Otherwise we can temporary
2265	 * set any mask even if it is not right from task_cs() pov,
2266	 * the pending set_cpus_allowed_ptr() will fix things.
2267	 *
2268	 * select_fallback_rq() will fix things ups and set cpu_possible_mask
2269	 * if required.
2270	 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2271}
2272
2273void cpuset_init_current_mems_allowed(void)
2274{
2275	nodes_setall(current->mems_allowed);
2276}
2277
2278/**
2279 * cpuset_mems_allowed - return mems_allowed mask from a tasks cpuset.
2280 * @tsk: pointer to task_struct from which to obtain cpuset->mems_allowed.
2281 *
2282 * Description: Returns the nodemask_t mems_allowed of the cpuset
2283 * attached to the specified @tsk.  Guaranteed to return some non-empty
2284 * subset of node_states[N_MEMORY], even if this means going outside the
2285 * tasks cpuset.
2286 **/
2287
2288nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
2289{
2290	struct cpuset *mems_cs;
2291	nodemask_t mask;
2292
2293	mutex_lock(&callback_mutex);
2294	rcu_read_lock();
2295	mems_cs = effective_nodemask_cpuset(task_cs(tsk));
2296	guarantee_online_mems(mems_cs, &mask);
2297	rcu_read_unlock();
2298	mutex_unlock(&callback_mutex);
2299
2300	return mask;
2301}
2302
2303/**
2304 * cpuset_nodemask_valid_mems_allowed - check nodemask vs. curremt mems_allowed
2305 * @nodemask: the nodemask to be checked
2306 *
2307 * Are any of the nodes in the nodemask allowed in current->mems_allowed?
2308 */
2309int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
2310{
2311	return nodes_intersects(*nodemask, current->mems_allowed);
2312}
2313
2314/*
2315 * nearest_hardwall_ancestor() - Returns the nearest mem_exclusive or
2316 * mem_hardwall ancestor to the specified cpuset.  Call holding
2317 * callback_mutex.  If no ancestor is mem_exclusive or mem_hardwall
2318 * (an unusual configuration), then returns the root cpuset.
2319 */
2320static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs)
2321{
2322	while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && parent_cs(cs))
2323		cs = parent_cs(cs);
2324	return cs;
2325}
2326
2327/**
2328 * cpuset_node_allowed_softwall - Can we allocate on a memory node?
2329 * @node: is this an allowed node?
2330 * @gfp_mask: memory allocation flags
2331 *
2332 * If we're in interrupt, yes, we can always allocate.  If __GFP_THISNODE is
2333 * set, yes, we can always allocate.  If node is in our task's mems_allowed,
2334 * yes.  If it's not a __GFP_HARDWALL request and this node is in the nearest
2335 * hardwalled cpuset ancestor to this task's cpuset, yes.  If the task has been
2336 * OOM killed and has access to memory reserves as specified by the TIF_MEMDIE
2337 * flag, yes.
2338 * Otherwise, no.
2339 *
2340 * If __GFP_HARDWALL is set, cpuset_node_allowed_softwall() reduces to
2341 * cpuset_node_allowed_hardwall().  Otherwise, cpuset_node_allowed_softwall()
2342 * might sleep, and might allow a node from an enclosing cpuset.
2343 *
2344 * cpuset_node_allowed_hardwall() only handles the simpler case of hardwall
2345 * cpusets, and never sleeps.
2346 *
2347 * The __GFP_THISNODE placement logic is really handled elsewhere,
2348 * by forcibly using a zonelist starting at a specified node, and by
2349 * (in get_page_from_freelist()) refusing to consider the zones for
2350 * any node on the zonelist except the first.  By the time any such
2351 * calls get to this routine, we should just shut up and say 'yes'.
2352 *
2353 * GFP_USER allocations are marked with the __GFP_HARDWALL bit,
2354 * and do not allow allocations outside the current tasks cpuset
2355 * unless the task has been OOM killed as is marked TIF_MEMDIE.
2356 * GFP_KERNEL allocations are not so marked, so can escape to the
2357 * nearest enclosing hardwalled ancestor cpuset.
2358 *
2359 * Scanning up parent cpusets requires callback_mutex.  The
2360 * __alloc_pages() routine only calls here with __GFP_HARDWALL bit
2361 * _not_ set if it's a GFP_KERNEL allocation, and all nodes in the
2362 * current tasks mems_allowed came up empty on the first pass over
2363 * the zonelist.  So only GFP_KERNEL allocations, if all nodes in the
2364 * cpuset are short of memory, might require taking the callback_mutex
2365 * mutex.
2366 *
2367 * The first call here from mm/page_alloc:get_page_from_freelist()
2368 * has __GFP_HARDWALL set in gfp_mask, enforcing hardwall cpusets,
2369 * so no allocation on a node outside the cpuset is allowed (unless
2370 * in interrupt, of course).
2371 *
2372 * The second pass through get_page_from_freelist() doesn't even call
2373 * here for GFP_ATOMIC calls.  For those calls, the __alloc_pages()
2374 * variable 'wait' is not set, and the bit ALLOC_CPUSET is not set
2375 * in alloc_flags.  That logic and the checks below have the combined
2376 * affect that:
2377 *	in_interrupt - any node ok (current task context irrelevant)
2378 *	GFP_ATOMIC   - any node ok
2379 *	TIF_MEMDIE   - any node ok
2380 *	GFP_KERNEL   - any node in enclosing hardwalled cpuset ok
2381 *	GFP_USER     - only nodes in current tasks mems allowed ok.
2382 *
2383 * Rule:
2384 *    Don't call cpuset_node_allowed_softwall if you can't sleep, unless you
2385 *    pass in the __GFP_HARDWALL flag set in gfp_flag, which disables
2386 *    the code that might scan up ancestor cpusets and sleep.
2387 */
2388int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
2389{
2390	struct cpuset *cs;		/* current cpuset ancestors */
2391	int allowed;			/* is allocation in zone z allowed? */
2392
2393	if (in_interrupt() || (gfp_mask & __GFP_THISNODE))
2394		return 1;
2395	might_sleep_if(!(gfp_mask & __GFP_HARDWALL));
2396	if (node_isset(node, current->mems_allowed))
2397		return 1;
2398	/*
2399	 * Allow tasks that have access to memory reserves because they have
2400	 * been OOM killed to get memory anywhere.
2401	 */
2402	if (unlikely(test_thread_flag(TIF_MEMDIE)))
2403		return 1;
2404	if (gfp_mask & __GFP_HARDWALL)	/* If hardwall request, stop here */
2405		return 0;
2406
2407	if (current->flags & PF_EXITING) /* Let dying task have memory */
2408		return 1;
2409
2410	/* Not hardwall and node outside mems_allowed: scan up cpusets */
2411	mutex_lock(&callback_mutex);
2412
2413	rcu_read_lock();
2414	cs = nearest_hardwall_ancestor(task_cs(current));
2415	allowed = node_isset(node, cs->mems_allowed);
2416	rcu_read_unlock();
2417
 
2418	mutex_unlock(&callback_mutex);
2419	return allowed;
2420}
2421
2422/*
2423 * cpuset_node_allowed_hardwall - Can we allocate on a memory node?
2424 * @node: is this an allowed node?
2425 * @gfp_mask: memory allocation flags
2426 *
2427 * If we're in interrupt, yes, we can always allocate.  If __GFP_THISNODE is
2428 * set, yes, we can always allocate.  If node is in our task's mems_allowed,
2429 * yes.  If the task has been OOM killed and has access to memory reserves as
2430 * specified by the TIF_MEMDIE flag, yes.
2431 * Otherwise, no.
2432 *
2433 * The __GFP_THISNODE placement logic is really handled elsewhere,
2434 * by forcibly using a zonelist starting at a specified node, and by
2435 * (in get_page_from_freelist()) refusing to consider the zones for
2436 * any node on the zonelist except the first.  By the time any such
2437 * calls get to this routine, we should just shut up and say 'yes'.
2438 *
2439 * Unlike the cpuset_node_allowed_softwall() variant, above,
2440 * this variant requires that the node be in the current task's
2441 * mems_allowed or that we're in interrupt.  It does not scan up the
2442 * cpuset hierarchy for the nearest enclosing mem_exclusive cpuset.
2443 * It never sleeps.
2444 */
2445int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
2446{
2447	if (in_interrupt() || (gfp_mask & __GFP_THISNODE))
2448		return 1;
2449	if (node_isset(node, current->mems_allowed))
2450		return 1;
2451	/*
2452	 * Allow tasks that have access to memory reserves because they have
2453	 * been OOM killed to get memory anywhere.
2454	 */
2455	if (unlikely(test_thread_flag(TIF_MEMDIE)))
2456		return 1;
2457	return 0;
2458}
2459
2460/**
 
 
 
 
 
 
 
 
 
 
 
2461 * cpuset_mem_spread_node() - On which node to begin search for a file page
2462 * cpuset_slab_spread_node() - On which node to begin search for a slab page
2463 *
2464 * If a task is marked PF_SPREAD_PAGE or PF_SPREAD_SLAB (as for
2465 * tasks in a cpuset with is_spread_page or is_spread_slab set),
2466 * and if the memory allocation used cpuset_mem_spread_node()
2467 * to determine on which node to start looking, as it will for
2468 * certain page cache or slab cache pages such as used for file
2469 * system buffers and inode caches, then instead of starting on the
2470 * local node to look for a free page, rather spread the starting
2471 * node around the tasks mems_allowed nodes.
2472 *
2473 * We don't have to worry about the returned node being offline
2474 * because "it can't happen", and even if it did, it would be ok.
2475 *
2476 * The routines calling guarantee_online_mems() are careful to
2477 * only set nodes in task->mems_allowed that are online.  So it
2478 * should not be possible for the following code to return an
2479 * offline node.  But if it did, that would be ok, as this routine
2480 * is not returning the node where the allocation must be, only
2481 * the node where the search should start.  The zonelist passed to
2482 * __alloc_pages() will include all nodes.  If the slab allocator
2483 * is passed an offline node, it will fall back to the local node.
2484 * See kmem_cache_alloc_node().
2485 */
2486
2487static int cpuset_spread_node(int *rotor)
2488{
2489	int node;
2490
2491	node = next_node(*rotor, current->mems_allowed);
2492	if (node == MAX_NUMNODES)
2493		node = first_node(current->mems_allowed);
2494	*rotor = node;
2495	return node;
2496}
2497
2498int cpuset_mem_spread_node(void)
2499{
2500	if (current->cpuset_mem_spread_rotor == NUMA_NO_NODE)
2501		current->cpuset_mem_spread_rotor =
2502			node_random(&current->mems_allowed);
2503
2504	return cpuset_spread_node(&current->cpuset_mem_spread_rotor);
2505}
2506
2507int cpuset_slab_spread_node(void)
2508{
2509	if (current->cpuset_slab_spread_rotor == NUMA_NO_NODE)
2510		current->cpuset_slab_spread_rotor =
2511			node_random(&current->mems_allowed);
2512
2513	return cpuset_spread_node(&current->cpuset_slab_spread_rotor);
2514}
2515
2516EXPORT_SYMBOL_GPL(cpuset_mem_spread_node);
2517
2518/**
2519 * cpuset_mems_allowed_intersects - Does @tsk1's mems_allowed intersect @tsk2's?
2520 * @tsk1: pointer to task_struct of some task.
2521 * @tsk2: pointer to task_struct of some other task.
2522 *
2523 * Description: Return true if @tsk1's mems_allowed intersects the
2524 * mems_allowed of @tsk2.  Used by the OOM killer to determine if
2525 * one of the task's memory usage might impact the memory available
2526 * to the other.
2527 **/
2528
2529int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
2530				   const struct task_struct *tsk2)
2531{
2532	return nodes_intersects(tsk1->mems_allowed, tsk2->mems_allowed);
2533}
2534
2535#define CPUSET_NODELIST_LEN	(256)
2536
2537/**
2538 * cpuset_print_task_mems_allowed - prints task's cpuset and mems_allowed
2539 * @task: pointer to task_struct of some task.
2540 *
2541 * Description: Prints @task's name, cpuset name, and cached copy of its
2542 * mems_allowed to the kernel log.
 
2543 */
2544void cpuset_print_task_mems_allowed(struct task_struct *tsk)
2545{
2546	 /* Statically allocated to prevent using excess stack. */
2547	static char cpuset_nodelist[CPUSET_NODELIST_LEN];
2548	static DEFINE_SPINLOCK(cpuset_buffer_lock);
2549	struct cgroup *cgrp;
2550
 
2551	spin_lock(&cpuset_buffer_lock);
2552	rcu_read_lock();
2553
2554	cgrp = task_cs(tsk)->css.cgroup;
2555	nodelist_scnprintf(cpuset_nodelist, CPUSET_NODELIST_LEN,
2556			   tsk->mems_allowed);
2557	printk(KERN_INFO "%s cpuset=", tsk->comm);
2558	pr_cont_cgroup_name(cgrp);
2559	pr_cont(" mems_allowed=%s\n", cpuset_nodelist);
2560
2561	rcu_read_unlock();
2562	spin_unlock(&cpuset_buffer_lock);
2563}
2564
2565/*
2566 * Collection of memory_pressure is suppressed unless
2567 * this flag is enabled by writing "1" to the special
2568 * cpuset file 'memory_pressure_enabled' in the root cpuset.
2569 */
2570
2571int cpuset_memory_pressure_enabled __read_mostly;
2572
2573/**
2574 * cpuset_memory_pressure_bump - keep stats of per-cpuset reclaims.
2575 *
2576 * Keep a running average of the rate of synchronous (direct)
2577 * page reclaim efforts initiated by tasks in each cpuset.
2578 *
2579 * This represents the rate at which some task in the cpuset
2580 * ran low on memory on all nodes it was allowed to use, and
2581 * had to enter the kernels page reclaim code in an effort to
2582 * create more free memory by tossing clean pages or swapping
2583 * or writing dirty pages.
2584 *
2585 * Display to user space in the per-cpuset read-only file
2586 * "memory_pressure".  Value displayed is an integer
2587 * representing the recent rate of entry into the synchronous
2588 * (direct) page reclaim by any task attached to the cpuset.
2589 **/
2590
2591void __cpuset_memory_pressure_bump(void)
2592{
2593	rcu_read_lock();
2594	fmeter_markevent(&task_cs(current)->fmeter);
2595	rcu_read_unlock();
2596}
2597
2598#ifdef CONFIG_PROC_PID_CPUSET
2599/*
2600 * proc_cpuset_show()
2601 *  - Print tasks cpuset path into seq_file.
2602 *  - Used for /proc/<pid>/cpuset.
2603 *  - No need to task_lock(tsk) on this tsk->cpuset reference, as it
2604 *    doesn't really matter if tsk->cpuset changes after we read it,
2605 *    and we take cpuset_mutex, keeping cpuset_attach() from changing it
2606 *    anyway.
2607 */
2608int proc_cpuset_show(struct seq_file *m, void *unused_v)
2609{
2610	struct pid *pid;
2611	struct task_struct *tsk;
2612	char *buf, *p;
2613	struct cgroup_subsys_state *css;
2614	int retval;
2615
2616	retval = -ENOMEM;
2617	buf = kmalloc(PATH_MAX, GFP_KERNEL);
2618	if (!buf)
2619		goto out;
2620
2621	retval = -ESRCH;
2622	pid = m->private;
2623	tsk = get_pid_task(pid, PIDTYPE_PID);
2624	if (!tsk)
2625		goto out_free;
2626
2627	retval = -ENAMETOOLONG;
2628	rcu_read_lock();
2629	css = task_css(tsk, cpuset_cgrp_id);
2630	p = cgroup_path(css->cgroup, buf, PATH_MAX);
2631	rcu_read_unlock();
2632	if (!p)
2633		goto out_put_task;
2634	seq_puts(m, p);
2635	seq_putc(m, '\n');
2636	retval = 0;
2637out_put_task:
2638	put_task_struct(tsk);
2639out_free:
2640	kfree(buf);
2641out:
2642	return retval;
2643}
 
 
 
 
 
 
 
 
 
 
 
 
 
2644#endif /* CONFIG_PROC_PID_CPUSET */
2645
2646/* Display task mems_allowed in /proc/<pid>/status file. */
2647void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task)
2648{
2649	seq_printf(m, "Mems_allowed:\t");
2650	seq_nodemask(m, &task->mems_allowed);
2651	seq_printf(m, "\n");
2652	seq_printf(m, "Mems_allowed_list:\t");
2653	seq_nodemask_list(m, &task->mems_allowed);
2654	seq_printf(m, "\n");
2655}
v3.1
   1/*
   2 *  kernel/cpuset.c
   3 *
   4 *  Processor and Memory placement constraints for sets of tasks.
   5 *
   6 *  Copyright (C) 2003 BULL SA.
   7 *  Copyright (C) 2004-2007 Silicon Graphics, Inc.
   8 *  Copyright (C) 2006 Google, Inc
   9 *
  10 *  Portions derived from Patrick Mochel's sysfs code.
  11 *  sysfs is Copyright (c) 2001-3 Patrick Mochel
  12 *
  13 *  2003-10-10 Written by Simon Derr.
  14 *  2003-10-22 Updates by Stephen Hemminger.
  15 *  2004 May-July Rework by Paul Jackson.
  16 *  2006 Rework by Paul Menage to use generic cgroups
  17 *  2008 Rework of the scheduler domains and CPU hotplug handling
  18 *       by Max Krasnyansky
  19 *
  20 *  This file is subject to the terms and conditions of the GNU General Public
  21 *  License.  See the file COPYING in the main directory of the Linux
  22 *  distribution for more details.
  23 */
  24
  25#include <linux/cpu.h>
  26#include <linux/cpumask.h>
  27#include <linux/cpuset.h>
  28#include <linux/err.h>
  29#include <linux/errno.h>
  30#include <linux/file.h>
  31#include <linux/fs.h>
  32#include <linux/init.h>
  33#include <linux/interrupt.h>
  34#include <linux/kernel.h>
  35#include <linux/kmod.h>
  36#include <linux/list.h>
  37#include <linux/mempolicy.h>
  38#include <linux/mm.h>
  39#include <linux/memory.h>
  40#include <linux/module.h>
  41#include <linux/mount.h>
  42#include <linux/namei.h>
  43#include <linux/pagemap.h>
  44#include <linux/proc_fs.h>
  45#include <linux/rcupdate.h>
  46#include <linux/sched.h>
  47#include <linux/seq_file.h>
  48#include <linux/security.h>
  49#include <linux/slab.h>
  50#include <linux/spinlock.h>
  51#include <linux/stat.h>
  52#include <linux/string.h>
  53#include <linux/time.h>
  54#include <linux/backing-dev.h>
  55#include <linux/sort.h>
  56
  57#include <asm/uaccess.h>
  58#include <linux/atomic.h>
  59#include <linux/mutex.h>
  60#include <linux/workqueue.h>
  61#include <linux/cgroup.h>
  62
  63/*
  64 * Workqueue for cpuset related tasks.
  65 *
  66 * Using kevent workqueue may cause deadlock when memory_migrate
  67 * is set. So we create a separate workqueue thread for cpuset.
  68 */
  69static struct workqueue_struct *cpuset_wq;
  70
  71/*
  72 * Tracks how many cpusets are currently defined in system.
  73 * When there is only one cpuset (the root cpuset) we can
  74 * short circuit some hooks.
  75 */
  76int number_of_cpusets __read_mostly;
  77
  78/* Forward declare cgroup structures */
  79struct cgroup_subsys cpuset_subsys;
  80struct cpuset;
  81
  82/* See "Frequency meter" comments, below. */
  83
  84struct fmeter {
  85	int cnt;		/* unprocessed events count */
  86	int val;		/* most recent output value */
  87	time_t time;		/* clock (secs) when val computed */
  88	spinlock_t lock;	/* guards read or write of above */
  89};
  90
  91struct cpuset {
  92	struct cgroup_subsys_state css;
  93
  94	unsigned long flags;		/* "unsigned long" so bitops work */
  95	cpumask_var_t cpus_allowed;	/* CPUs allowed to tasks in cpuset */
  96	nodemask_t mems_allowed;	/* Memory Nodes allowed to tasks */
  97
  98	struct cpuset *parent;		/* my parent */
 
 
 
 
 
 
 
 
 
 
  99
 100	struct fmeter fmeter;		/* memory_pressure filter */
 101
 
 
 
 
 
 
 102	/* partition number for rebuild_sched_domains() */
 103	int pn;
 104
 105	/* for custom sched domain */
 106	int relax_domain_level;
 107
 108	/* used for walking a cpuset hierarchy */
 109	struct list_head stack_list;
 110};
 111
 112/* Retrieve the cpuset for a cgroup */
 113static inline struct cpuset *cgroup_cs(struct cgroup *cont)
 114{
 115	return container_of(cgroup_subsys_state(cont, cpuset_subsys_id),
 116			    struct cpuset, css);
 117}
 118
 119/* Retrieve the cpuset for a task */
 120static inline struct cpuset *task_cs(struct task_struct *task)
 121{
 122	return container_of(task_subsys_state(task, cpuset_subsys_id),
 123			    struct cpuset, css);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 124}
 
 
 125
 126/* bits in struct cpuset flags field */
 127typedef enum {
 
 128	CS_CPU_EXCLUSIVE,
 129	CS_MEM_EXCLUSIVE,
 130	CS_MEM_HARDWALL,
 131	CS_MEMORY_MIGRATE,
 132	CS_SCHED_LOAD_BALANCE,
 133	CS_SPREAD_PAGE,
 134	CS_SPREAD_SLAB,
 135} cpuset_flagbits_t;
 136
 137/* convenient tests for these bits */
 
 
 
 
 
 138static inline int is_cpu_exclusive(const struct cpuset *cs)
 139{
 140	return test_bit(CS_CPU_EXCLUSIVE, &cs->flags);
 141}
 142
 143static inline int is_mem_exclusive(const struct cpuset *cs)
 144{
 145	return test_bit(CS_MEM_EXCLUSIVE, &cs->flags);
 146}
 147
 148static inline int is_mem_hardwall(const struct cpuset *cs)
 149{
 150	return test_bit(CS_MEM_HARDWALL, &cs->flags);
 151}
 152
 153static inline int is_sched_load_balance(const struct cpuset *cs)
 154{
 155	return test_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
 156}
 157
 158static inline int is_memory_migrate(const struct cpuset *cs)
 159{
 160	return test_bit(CS_MEMORY_MIGRATE, &cs->flags);
 161}
 162
 163static inline int is_spread_page(const struct cpuset *cs)
 164{
 165	return test_bit(CS_SPREAD_PAGE, &cs->flags);
 166}
 167
 168static inline int is_spread_slab(const struct cpuset *cs)
 169{
 170	return test_bit(CS_SPREAD_SLAB, &cs->flags);
 171}
 172
 173static struct cpuset top_cpuset = {
 174	.flags = ((1 << CS_CPU_EXCLUSIVE) | (1 << CS_MEM_EXCLUSIVE)),
 
 175};
 176
 177/*
 178 * There are two global mutexes guarding cpuset structures.  The first
 179 * is the main control groups cgroup_mutex, accessed via
 180 * cgroup_lock()/cgroup_unlock().  The second is the cpuset-specific
 181 * callback_mutex, below. They can nest.  It is ok to first take
 182 * cgroup_mutex, then nest callback_mutex.  We also require taking
 183 * task_lock() when dereferencing a task's cpuset pointer.  See "The
 184 * task_lock() exception", at the end of this comment.
 185 *
 186 * A task must hold both mutexes to modify cpusets.  If a task
 187 * holds cgroup_mutex, then it blocks others wanting that mutex,
 188 * ensuring that it is the only task able to also acquire callback_mutex
 189 * and be able to modify cpusets.  It can perform various checks on
 190 * the cpuset structure first, knowing nothing will change.  It can
 191 * also allocate memory while just holding cgroup_mutex.  While it is
 192 * performing these checks, various callback routines can briefly
 193 * acquire callback_mutex to query cpusets.  Once it is ready to make
 194 * the changes, it takes callback_mutex, blocking everyone else.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 195 *
 196 * Calls to the kernel memory allocator can not be made while holding
 197 * callback_mutex, as that would risk double tripping on callback_mutex
 198 * from one of the callbacks into the cpuset code from within
 199 * __alloc_pages().
 200 *
 201 * If a task is only holding callback_mutex, then it has read-only
 202 * access to cpusets.
 203 *
 204 * Now, the task_struct fields mems_allowed and mempolicy may be changed
 205 * by other task, we use alloc_lock in the task_struct fields to protect
 206 * them.
 207 *
 208 * The cpuset_common_file_read() handlers only hold callback_mutex across
 209 * small pieces of code, such as when reading out possibly multi-word
 210 * cpumasks and nodemasks.
 211 *
 212 * Accessing a task's cpuset should be done in accordance with the
 213 * guidelines for accessing subsystem state in kernel/cgroup.c
 214 */
 215
 
 216static DEFINE_MUTEX(callback_mutex);
 217
 218/*
 219 * cpuset_buffer_lock protects both the cpuset_name and cpuset_nodelist
 220 * buffers.  They are statically allocated to prevent using excess stack
 221 * when calling cpuset_print_task_mems_allowed().
 222 */
 223#define CPUSET_NAME_LEN		(128)
 224#define	CPUSET_NODELIST_LEN	(256)
 225static char cpuset_name[CPUSET_NAME_LEN];
 226static char cpuset_nodelist[CPUSET_NODELIST_LEN];
 227static DEFINE_SPINLOCK(cpuset_buffer_lock);
 228
 229/*
 230 * This is ugly, but preserves the userspace API for existing cpuset
 231 * users. If someone tries to mount the "cpuset" filesystem, we
 232 * silently switch it to mount "cgroup" instead
 233 */
 234static struct dentry *cpuset_mount(struct file_system_type *fs_type,
 235			 int flags, const char *unused_dev_name, void *data)
 236{
 237	struct file_system_type *cgroup_fs = get_fs_type("cgroup");
 238	struct dentry *ret = ERR_PTR(-ENODEV);
 239	if (cgroup_fs) {
 240		char mountopts[] =
 241			"cpuset,noprefix,"
 242			"release_agent=/sbin/cpuset_release_agent";
 243		ret = cgroup_fs->mount(cgroup_fs, flags,
 244					   unused_dev_name, mountopts);
 245		put_filesystem(cgroup_fs);
 246	}
 247	return ret;
 248}
 249
 250static struct file_system_type cpuset_fs_type = {
 251	.name = "cpuset",
 252	.mount = cpuset_mount,
 253};
 254
 255/*
 256 * Return in pmask the portion of a cpusets's cpus_allowed that
 257 * are online.  If none are online, walk up the cpuset hierarchy
 258 * until we find one that does have some online cpus.  If we get
 259 * all the way to the top and still haven't found any online cpus,
 260 * return cpu_online_map.  Or if passed a NULL cs from an exit'ing
 261 * task, return cpu_online_map.
 262 *
 263 * One way or another, we guarantee to return some non-empty subset
 264 * of cpu_online_map.
 265 *
 266 * Call with callback_mutex held.
 267 */
 268
 269static void guarantee_online_cpus(const struct cpuset *cs,
 270				  struct cpumask *pmask)
 271{
 272	while (cs && !cpumask_intersects(cs->cpus_allowed, cpu_online_mask))
 273		cs = cs->parent;
 274	if (cs)
 275		cpumask_and(pmask, cs->cpus_allowed, cpu_online_mask);
 276	else
 277		cpumask_copy(pmask, cpu_online_mask);
 278	BUG_ON(!cpumask_intersects(pmask, cpu_online_mask));
 279}
 280
 281/*
 282 * Return in *pmask the portion of a cpusets's mems_allowed that
 283 * are online, with memory.  If none are online with memory, walk
 284 * up the cpuset hierarchy until we find one that does have some
 285 * online mems.  If we get all the way to the top and still haven't
 286 * found any online mems, return node_states[N_HIGH_MEMORY].
 287 *
 288 * One way or another, we guarantee to return some non-empty subset
 289 * of node_states[N_HIGH_MEMORY].
 290 *
 291 * Call with callback_mutex held.
 292 */
 293
 294static void guarantee_online_mems(const struct cpuset *cs, nodemask_t *pmask)
 295{
 296	while (cs && !nodes_intersects(cs->mems_allowed,
 297					node_states[N_HIGH_MEMORY]))
 298		cs = cs->parent;
 299	if (cs)
 300		nodes_and(*pmask, cs->mems_allowed,
 301					node_states[N_HIGH_MEMORY]);
 302	else
 303		*pmask = node_states[N_HIGH_MEMORY];
 304	BUG_ON(!nodes_intersects(*pmask, node_states[N_HIGH_MEMORY]));
 305}
 306
 307/*
 308 * update task's spread flag if cpuset's page/slab spread flag is set
 309 *
 310 * Called with callback_mutex/cgroup_mutex held
 311 */
 312static void cpuset_update_task_spread_flag(struct cpuset *cs,
 313					struct task_struct *tsk)
 314{
 315	if (is_spread_page(cs))
 316		tsk->flags |= PF_SPREAD_PAGE;
 317	else
 318		tsk->flags &= ~PF_SPREAD_PAGE;
 319	if (is_spread_slab(cs))
 320		tsk->flags |= PF_SPREAD_SLAB;
 321	else
 322		tsk->flags &= ~PF_SPREAD_SLAB;
 323}
 324
 325/*
 326 * is_cpuset_subset(p, q) - Is cpuset p a subset of cpuset q?
 327 *
 328 * One cpuset is a subset of another if all its allowed CPUs and
 329 * Memory Nodes are a subset of the other, and its exclusive flags
 330 * are only set if the other's are set.  Call holding cgroup_mutex.
 331 */
 332
 333static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
 334{
 335	return	cpumask_subset(p->cpus_allowed, q->cpus_allowed) &&
 336		nodes_subset(p->mems_allowed, q->mems_allowed) &&
 337		is_cpu_exclusive(p) <= is_cpu_exclusive(q) &&
 338		is_mem_exclusive(p) <= is_mem_exclusive(q);
 339}
 340
 341/**
 342 * alloc_trial_cpuset - allocate a trial cpuset
 343 * @cs: the cpuset that the trial cpuset duplicates
 344 */
 345static struct cpuset *alloc_trial_cpuset(const struct cpuset *cs)
 346{
 347	struct cpuset *trial;
 348
 349	trial = kmemdup(cs, sizeof(*cs), GFP_KERNEL);
 350	if (!trial)
 351		return NULL;
 352
 353	if (!alloc_cpumask_var(&trial->cpus_allowed, GFP_KERNEL)) {
 354		kfree(trial);
 355		return NULL;
 356	}
 357	cpumask_copy(trial->cpus_allowed, cs->cpus_allowed);
 358
 359	return trial;
 360}
 361
 362/**
 363 * free_trial_cpuset - free the trial cpuset
 364 * @trial: the trial cpuset to be freed
 365 */
 366static void free_trial_cpuset(struct cpuset *trial)
 367{
 368	free_cpumask_var(trial->cpus_allowed);
 369	kfree(trial);
 370}
 371
 372/*
 373 * validate_change() - Used to validate that any proposed cpuset change
 374 *		       follows the structural rules for cpusets.
 375 *
 376 * If we replaced the flag and mask values of the current cpuset
 377 * (cur) with those values in the trial cpuset (trial), would
 378 * our various subset and exclusive rules still be valid?  Presumes
 379 * cgroup_mutex held.
 380 *
 381 * 'cur' is the address of an actual, in-use cpuset.  Operations
 382 * such as list traversal that depend on the actual address of the
 383 * cpuset in the list must use cur below, not trial.
 384 *
 385 * 'trial' is the address of bulk structure copy of cur, with
 386 * perhaps one or more of the fields cpus_allowed, mems_allowed,
 387 * or flags changed to new, trial values.
 388 *
 389 * Return 0 if valid, -errno if not.
 390 */
 391
 392static int validate_change(const struct cpuset *cur, const struct cpuset *trial)
 393{
 394	struct cgroup *cont;
 395	struct cpuset *c, *par;
 
 
 
 396
 397	/* Each of our child cpusets must be a subset of us */
 398	list_for_each_entry(cont, &cur->css.cgroup->children, sibling) {
 399		if (!is_cpuset_subset(cgroup_cs(cont), trial))
 400			return -EBUSY;
 401	}
 402
 403	/* Remaining checks don't apply to root cpuset */
 
 404	if (cur == &top_cpuset)
 405		return 0;
 406
 407	par = cur->parent;
 408
 409	/* We must be a subset of our parent cpuset */
 
 410	if (!is_cpuset_subset(trial, par))
 411		return -EACCES;
 412
 413	/*
 414	 * If either I or some sibling (!= me) is exclusive, we can't
 415	 * overlap
 416	 */
 417	list_for_each_entry(cont, &par->css.cgroup->children, sibling) {
 418		c = cgroup_cs(cont);
 419		if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) &&
 420		    c != cur &&
 421		    cpumask_intersects(trial->cpus_allowed, c->cpus_allowed))
 422			return -EINVAL;
 423		if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) &&
 424		    c != cur &&
 425		    nodes_intersects(trial->mems_allowed, c->mems_allowed))
 426			return -EINVAL;
 427	}
 428
 429	/* Cpusets with tasks can't have empty cpus_allowed or mems_allowed */
 430	if (cgroup_task_count(cur->css.cgroup)) {
 431		if (cpumask_empty(trial->cpus_allowed) ||
 432		    nodes_empty(trial->mems_allowed)) {
 433			return -ENOSPC;
 434		}
 
 
 
 
 
 
 435	}
 436
 437	return 0;
 
 
 
 438}
 439
 440#ifdef CONFIG_SMP
 441/*
 442 * Helper routine for generate_sched_domains().
 443 * Do cpusets a, b have overlapping cpus_allowed masks?
 444 */
 445static int cpusets_overlap(struct cpuset *a, struct cpuset *b)
 446{
 447	return cpumask_intersects(a->cpus_allowed, b->cpus_allowed);
 448}
 449
 450static void
 451update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c)
 452{
 453	if (dattr->relax_domain_level < c->relax_domain_level)
 454		dattr->relax_domain_level = c->relax_domain_level;
 455	return;
 456}
 457
 458static void
 459update_domain_attr_tree(struct sched_domain_attr *dattr, struct cpuset *c)
 460{
 461	LIST_HEAD(q);
 
 462
 463	list_add(&c->stack_list, &q);
 464	while (!list_empty(&q)) {
 465		struct cpuset *cp;
 466		struct cgroup *cont;
 467		struct cpuset *child;
 468
 469		cp = list_first_entry(&q, struct cpuset, stack_list);
 470		list_del(q.next);
 471
 472		if (cpumask_empty(cp->cpus_allowed))
 
 
 473			continue;
 
 474
 475		if (is_sched_load_balance(cp))
 476			update_domain_attr(dattr, cp);
 477
 478		list_for_each_entry(cont, &cp->css.cgroup->children, sibling) {
 479			child = cgroup_cs(cont);
 480			list_add_tail(&child->stack_list, &q);
 481		}
 482	}
 
 483}
 484
 485/*
 486 * generate_sched_domains()
 487 *
 488 * This function builds a partial partition of the systems CPUs
 489 * A 'partial partition' is a set of non-overlapping subsets whose
 490 * union is a subset of that set.
 491 * The output of this function needs to be passed to kernel/sched.c
 492 * partition_sched_domains() routine, which will rebuild the scheduler's
 493 * load balancing domains (sched domains) as specified by that partial
 494 * partition.
 495 *
 496 * See "What is sched_load_balance" in Documentation/cgroups/cpusets.txt
 497 * for a background explanation of this.
 498 *
 499 * Does not return errors, on the theory that the callers of this
 500 * routine would rather not worry about failures to rebuild sched
 501 * domains when operating in the severe memory shortage situations
 502 * that could cause allocation failures below.
 503 *
 504 * Must be called with cgroup_lock held.
 505 *
 506 * The three key local variables below are:
 507 *    q  - a linked-list queue of cpuset pointers, used to implement a
 508 *	   top-down scan of all cpusets.  This scan loads a pointer
 509 *	   to each cpuset marked is_sched_load_balance into the
 510 *	   array 'csa'.  For our purposes, rebuilding the schedulers
 511 *	   sched domains, we can ignore !is_sched_load_balance cpusets.
 512 *  csa  - (for CpuSet Array) Array of pointers to all the cpusets
 513 *	   that need to be load balanced, for convenient iterative
 514 *	   access by the subsequent code that finds the best partition,
 515 *	   i.e the set of domains (subsets) of CPUs such that the
 516 *	   cpus_allowed of every cpuset marked is_sched_load_balance
 517 *	   is a subset of one of these domains, while there are as
 518 *	   many such domains as possible, each as small as possible.
 519 * doms  - Conversion of 'csa' to an array of cpumasks, for passing to
 520 *	   the kernel/sched.c routine partition_sched_domains() in a
 521 *	   convenient format, that can be easily compared to the prior
 522 *	   value to determine what partition elements (sched domains)
 523 *	   were changed (added or removed.)
 524 *
 525 * Finding the best partition (set of domains):
 526 *	The triple nested loops below over i, j, k scan over the
 527 *	load balanced cpusets (using the array of cpuset pointers in
 528 *	csa[]) looking for pairs of cpusets that have overlapping
 529 *	cpus_allowed, but which don't have the same 'pn' partition
 530 *	number and gives them in the same partition number.  It keeps
 531 *	looping on the 'restart' label until it can no longer find
 532 *	any such pairs.
 533 *
 534 *	The union of the cpus_allowed masks from the set of
 535 *	all cpusets having the same 'pn' value then form the one
 536 *	element of the partition (one sched domain) to be passed to
 537 *	partition_sched_domains().
 538 */
 539static int generate_sched_domains(cpumask_var_t **domains,
 540			struct sched_domain_attr **attributes)
 541{
 542	LIST_HEAD(q);		/* queue of cpusets to be scanned */
 543	struct cpuset *cp;	/* scans q */
 544	struct cpuset **csa;	/* array of all cpuset ptrs */
 545	int csn;		/* how many cpuset ptrs in csa so far */
 546	int i, j, k;		/* indices for partition finding loops */
 547	cpumask_var_t *doms;	/* resulting partition; i.e. sched domains */
 548	struct sched_domain_attr *dattr;  /* attributes for custom domains */
 549	int ndoms = 0;		/* number of sched domains in result */
 550	int nslot;		/* next empty doms[] struct cpumask slot */
 
 551
 552	doms = NULL;
 553	dattr = NULL;
 554	csa = NULL;
 555
 556	/* Special case for the 99% of systems with one, full, sched domain */
 557	if (is_sched_load_balance(&top_cpuset)) {
 558		ndoms = 1;
 559		doms = alloc_sched_domains(ndoms);
 560		if (!doms)
 561			goto done;
 562
 563		dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL);
 564		if (dattr) {
 565			*dattr = SD_ATTR_INIT;
 566			update_domain_attr_tree(dattr, &top_cpuset);
 567		}
 568		cpumask_copy(doms[0], top_cpuset.cpus_allowed);
 569
 570		goto done;
 571	}
 572
 573	csa = kmalloc(number_of_cpusets * sizeof(cp), GFP_KERNEL);
 574	if (!csa)
 575		goto done;
 576	csn = 0;
 577
 578	list_add(&top_cpuset.stack_list, &q);
 579	while (!list_empty(&q)) {
 580		struct cgroup *cont;
 581		struct cpuset *child;   /* scans child cpusets of cp */
 582
 583		cp = list_first_entry(&q, struct cpuset, stack_list);
 584		list_del(q.next);
 585
 586		if (cpumask_empty(cp->cpus_allowed))
 587			continue;
 588
 589		/*
 590		 * All child cpusets contain a subset of the parent's cpus, so
 591		 * just skip them, and then we call update_domain_attr_tree()
 592		 * to calc relax_domain_level of the corresponding sched
 593		 * domain.
 
 
 594		 */
 595		if (is_sched_load_balance(cp)) {
 
 
 
 
 596			csa[csn++] = cp;
 597			continue;
 598		}
 599
 600		list_for_each_entry(cont, &cp->css.cgroup->children, sibling) {
 601			child = cgroup_cs(cont);
 602			list_add_tail(&child->stack_list, &q);
 603		}
 604  	}
 605
 606	for (i = 0; i < csn; i++)
 607		csa[i]->pn = i;
 608	ndoms = csn;
 609
 610restart:
 611	/* Find the best partition (set of sched domains) */
 612	for (i = 0; i < csn; i++) {
 613		struct cpuset *a = csa[i];
 614		int apn = a->pn;
 615
 616		for (j = 0; j < csn; j++) {
 617			struct cpuset *b = csa[j];
 618			int bpn = b->pn;
 619
 620			if (apn != bpn && cpusets_overlap(a, b)) {
 621				for (k = 0; k < csn; k++) {
 622					struct cpuset *c = csa[k];
 623
 624					if (c->pn == bpn)
 625						c->pn = apn;
 626				}
 627				ndoms--;	/* one less element */
 628				goto restart;
 629			}
 630		}
 631	}
 632
 633	/*
 634	 * Now we know how many domains to create.
 635	 * Convert <csn, csa> to <ndoms, doms> and populate cpu masks.
 636	 */
 637	doms = alloc_sched_domains(ndoms);
 638	if (!doms)
 639		goto done;
 640
 641	/*
 642	 * The rest of the code, including the scheduler, can deal with
 643	 * dattr==NULL case. No need to abort if alloc fails.
 644	 */
 645	dattr = kmalloc(ndoms * sizeof(struct sched_domain_attr), GFP_KERNEL);
 646
 647	for (nslot = 0, i = 0; i < csn; i++) {
 648		struct cpuset *a = csa[i];
 649		struct cpumask *dp;
 650		int apn = a->pn;
 651
 652		if (apn < 0) {
 653			/* Skip completed partitions */
 654			continue;
 655		}
 656
 657		dp = doms[nslot];
 658
 659		if (nslot == ndoms) {
 660			static int warnings = 10;
 661			if (warnings) {
 662				printk(KERN_WARNING
 663				 "rebuild_sched_domains confused:"
 664				  " nslot %d, ndoms %d, csn %d, i %d,"
 665				  " apn %d\n",
 666				  nslot, ndoms, csn, i, apn);
 667				warnings--;
 668			}
 669			continue;
 670		}
 671
 672		cpumask_clear(dp);
 673		if (dattr)
 674			*(dattr + nslot) = SD_ATTR_INIT;
 675		for (j = i; j < csn; j++) {
 676			struct cpuset *b = csa[j];
 677
 678			if (apn == b->pn) {
 679				cpumask_or(dp, dp, b->cpus_allowed);
 680				if (dattr)
 681					update_domain_attr_tree(dattr + nslot, b);
 682
 683				/* Done with this partition */
 684				b->pn = -1;
 685			}
 686		}
 687		nslot++;
 688	}
 689	BUG_ON(nslot != ndoms);
 690
 691done:
 692	kfree(csa);
 693
 694	/*
 695	 * Fallback to the default domain if kmalloc() failed.
 696	 * See comments in partition_sched_domains().
 697	 */
 698	if (doms == NULL)
 699		ndoms = 1;
 700
 701	*domains    = doms;
 702	*attributes = dattr;
 703	return ndoms;
 704}
 705
 706/*
 707 * Rebuild scheduler domains.
 708 *
 709 * Call with neither cgroup_mutex held nor within get_online_cpus().
 710 * Takes both cgroup_mutex and get_online_cpus().
 
 
 
 711 *
 712 * Cannot be directly called from cpuset code handling changes
 713 * to the cpuset pseudo-filesystem, because it cannot be called
 714 * from code that already holds cgroup_mutex.
 715 */
 716static void do_rebuild_sched_domains(struct work_struct *unused)
 717{
 718	struct sched_domain_attr *attr;
 719	cpumask_var_t *doms;
 720	int ndoms;
 721
 
 722	get_online_cpus();
 723
 
 
 
 
 
 
 
 
 724	/* Generate domain masks and attrs */
 725	cgroup_lock();
 726	ndoms = generate_sched_domains(&doms, &attr);
 727	cgroup_unlock();
 728
 729	/* Have scheduler rebuild the domains */
 730	partition_sched_domains(ndoms, doms, attr);
 731
 732	put_online_cpus();
 733}
 734#else /* !CONFIG_SMP */
 735static void do_rebuild_sched_domains(struct work_struct *unused)
 736{
 737}
 
 738
 739static int generate_sched_domains(cpumask_var_t **domains,
 740			struct sched_domain_attr **attributes)
 741{
 742	*domains = NULL;
 743	return 1;
 
 744}
 745#endif /* CONFIG_SMP */
 746
 747static DECLARE_WORK(rebuild_sched_domains_work, do_rebuild_sched_domains);
 748
 749/*
 750 * Rebuild scheduler domains, asynchronously via workqueue.
 751 *
 752 * If the flag 'sched_load_balance' of any cpuset with non-empty
 753 * 'cpus' changes, or if the 'cpus' allowed changes in any cpuset
 754 * which has that flag enabled, or if any cpuset with a non-empty
 755 * 'cpus' is removed, then call this routine to rebuild the
 756 * scheduler's dynamic sched domains.
 757 *
 758 * The rebuild_sched_domains() and partition_sched_domains()
 759 * routines must nest cgroup_lock() inside get_online_cpus(),
 760 * but such cpuset changes as these must nest that locking the
 761 * other way, holding cgroup_lock() for much of the code.
 
 762 *
 763 * So in order to avoid an ABBA deadlock, the cpuset code handling
 764 * these user changes delegates the actual sched domain rebuilding
 765 * to a separate workqueue thread, which ends up processing the
 766 * above do_rebuild_sched_domains() function.
 767 */
 768static void async_rebuild_sched_domains(void)
 769{
 770	queue_work(cpuset_wq, &rebuild_sched_domains_work);
 
 
 771}
 772
 773/*
 774 * Accomplishes the same scheduler domain rebuild as the above
 775 * async_rebuild_sched_domains(), however it directly calls the
 776 * rebuild routine synchronously rather than calling it via an
 777 * asynchronous work thread.
 
 
 
 
 778 *
 779 * This can only be called from code that is not holding
 780 * cgroup_mutex (not nested in a cgroup_lock() call.)
 781 */
 782void rebuild_sched_domains(void)
 783{
 784	do_rebuild_sched_domains(NULL);
 
 
 785}
 786
 787/**
 788 * cpuset_test_cpumask - test a task's cpus_allowed versus its cpuset's
 789 * @tsk: task to test
 790 * @scan: struct cgroup_scanner contained in its struct cpuset_hotplug_scanner
 791 *
 792 * Call with cgroup_mutex held.  May take callback_mutex during call.
 793 * Called for each task in a cgroup by cgroup_scan_tasks().
 794 * Return nonzero if this tasks's cpus_allowed mask should be changed (in other
 795 * words, if its mask is not equal to its cpuset's mask).
 796 */
 797static int cpuset_test_cpumask(struct task_struct *tsk,
 798			       struct cgroup_scanner *scan)
 799{
 800	return !cpumask_equal(&tsk->cpus_allowed,
 801			(cgroup_cs(scan->cg))->cpus_allowed);
 
 
 
 
 
 
 802}
 803
 804/**
 805 * cpuset_change_cpumask - make a task's cpus_allowed the same as its cpuset's
 806 * @tsk: task to test
 807 * @scan: struct cgroup_scanner containing the cgroup of the task
 808 *
 809 * Called by cgroup_scan_tasks() for each task in a cgroup whose
 810 * cpus_allowed mask needs to be changed.
 811 *
 812 * We don't need to re-check for the cgroup/cpuset membership, since we're
 813 * holding cgroup_lock() at this point.
 814 */
 815static void cpuset_change_cpumask(struct task_struct *tsk,
 816				  struct cgroup_scanner *scan)
 817{
 818	set_cpus_allowed_ptr(tsk, ((cgroup_cs(scan->cg))->cpus_allowed));
 819}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 820
 821/**
 822 * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
 823 * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
 824 * @heap: if NULL, defer allocating heap memory to cgroup_scan_tasks()
 825 *
 826 * Called with cgroup_mutex held
 827 *
 828 * The cgroup_scan_tasks() function will scan all the tasks in a cgroup,
 829 * calling callback functions for each.
 830 *
 831 * No return value. It's guaranteed that cgroup_scan_tasks() always returns 0
 832 * if @heap != NULL.
 833 */
 834static void update_tasks_cpumask(struct cpuset *cs, struct ptr_heap *heap)
 835{
 836	struct cgroup_scanner scan;
 837
 838	scan.cg = cs->css.cgroup;
 839	scan.test_task = cpuset_test_cpumask;
 840	scan.process_task = cpuset_change_cpumask;
 841	scan.heap = heap;
 842	cgroup_scan_tasks(&scan);
 843}
 844
 845/**
 846 * update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it
 847 * @cs: the cpuset to consider
 848 * @buf: buffer of cpu numbers written to this cpuset
 849 */
 850static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
 851			  const char *buf)
 852{
 853	struct ptr_heap heap;
 854	int retval;
 855	int is_load_balanced;
 856
 857	/* top_cpuset.cpus_allowed tracks cpu_online_map; it's read-only */
 858	if (cs == &top_cpuset)
 859		return -EACCES;
 860
 861	/*
 862	 * An empty cpus_allowed is ok only if the cpuset has no tasks.
 863	 * Since cpulist_parse() fails on an empty mask, we special case
 864	 * that parsing.  The validate_change() call ensures that cpusets
 865	 * with tasks have cpus.
 866	 */
 867	if (!*buf) {
 868		cpumask_clear(trialcs->cpus_allowed);
 869	} else {
 870		retval = cpulist_parse(buf, trialcs->cpus_allowed);
 871		if (retval < 0)
 872			return retval;
 873
 874		if (!cpumask_subset(trialcs->cpus_allowed, cpu_active_mask))
 875			return -EINVAL;
 876	}
 877	retval = validate_change(cs, trialcs);
 878	if (retval < 0)
 879		return retval;
 880
 881	/* Nothing to do if the cpus didn't change */
 882	if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed))
 883		return 0;
 884
 885	retval = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, NULL);
 886	if (retval)
 887		return retval;
 888
 889	is_load_balanced = is_sched_load_balance(trialcs);
 890
 891	mutex_lock(&callback_mutex);
 892	cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed);
 893	mutex_unlock(&callback_mutex);
 894
 895	/*
 896	 * Scan tasks in the cpuset, and update the cpumasks of any
 897	 * that need an update.
 898	 */
 899	update_tasks_cpumask(cs, &heap);
 900
 901	heap_free(&heap);
 902
 903	if (is_load_balanced)
 904		async_rebuild_sched_domains();
 905	return 0;
 906}
 907
 908/*
 909 * cpuset_migrate_mm
 910 *
 911 *    Migrate memory region from one set of nodes to another.
 912 *
 913 *    Temporarilly set tasks mems_allowed to target nodes of migration,
 914 *    so that the migration code can allocate pages on these nodes.
 915 *
 916 *    Call holding cgroup_mutex, so current's cpuset won't change
 917 *    during this call, as manage_mutex holds off any cpuset_attach()
 918 *    calls.  Therefore we don't need to take task_lock around the
 919 *    call to guarantee_online_mems(), as we know no one is changing
 920 *    our task's cpuset.
 921 *
 922 *    While the mm_struct we are migrating is typically from some
 923 *    other task, the task_struct mems_allowed that we are hacking
 924 *    is for our current task, which must allocate new pages for that
 925 *    migrating memory region.
 926 */
 927
 928static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
 929							const nodemask_t *to)
 930{
 931	struct task_struct *tsk = current;
 
 932
 933	tsk->mems_allowed = *to;
 934
 935	do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL);
 936
 937	guarantee_online_mems(task_cs(tsk),&tsk->mems_allowed);
 
 
 
 938}
 939
 940/*
 941 * cpuset_change_task_nodemask - change task's mems_allowed and mempolicy
 942 * @tsk: the task to change
 943 * @newmems: new nodes that the task will be set
 944 *
 945 * In order to avoid seeing no nodes if the old and new nodes are disjoint,
 946 * we structure updates as setting all new allowed nodes, then clearing newly
 947 * disallowed ones.
 948 */
 949static void cpuset_change_task_nodemask(struct task_struct *tsk,
 950					nodemask_t *newmems)
 951{
 952repeat:
 
 953	/*
 954	 * Allow tasks that have access to memory reserves because they have
 955	 * been OOM killed to get memory anywhere.
 956	 */
 957	if (unlikely(test_thread_flag(TIF_MEMDIE)))
 958		return;
 959	if (current->flags & PF_EXITING) /* Let dying task have memory */
 960		return;
 961
 962	task_lock(tsk);
 963	nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems);
 964	mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP1);
 965
 966
 967	/*
 968	 * ensure checking ->mems_allowed_change_disable after setting all new
 969	 * allowed nodes.
 970	 *
 971	 * the read-side task can see an nodemask with new allowed nodes and
 972	 * old allowed nodes. and if it allocates page when cpuset clears newly
 973	 * disallowed ones continuous, it can see the new allowed bits.
 974	 *
 975	 * And if setting all new allowed nodes is after the checking, setting
 976	 * all new allowed nodes and clearing newly disallowed ones will be done
 977	 * continuous, and the read-side task may find no node to alloc page.
 978	 */
 979	smp_mb();
 980
 981	/*
 982	 * Allocation of memory is very fast, we needn't sleep when waiting
 983	 * for the read-side.
 984	 */
 985	while (ACCESS_ONCE(tsk->mems_allowed_change_disable)) {
 986		task_unlock(tsk);
 987		if (!task_curr(tsk))
 988			yield();
 989		goto repeat;
 
 
 
 990	}
 991
 992	/*
 993	 * ensure checking ->mems_allowed_change_disable before clearing all new
 994	 * disallowed nodes.
 995	 *
 996	 * if clearing newly disallowed bits before the checking, the read-side
 997	 * task may find no node to alloc page.
 998	 */
 999	smp_mb();
1000
1001	mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP2);
1002	tsk->mems_allowed = *newmems;
1003	task_unlock(tsk);
1004}
1005
1006/*
1007 * Update task's mems_allowed and rebind its mempolicy and vmas' mempolicy
1008 * of it to cpuset's new mems_allowed, and migrate pages to new nodes if
1009 * memory_migrate flag is set. Called with cgroup_mutex held.
1010 */
1011static void cpuset_change_nodemask(struct task_struct *p,
1012				   struct cgroup_scanner *scan)
1013{
1014	struct mm_struct *mm;
1015	struct cpuset *cs;
1016	int migrate;
1017	const nodemask_t *oldmem = scan->data;
1018	static nodemask_t newmems;	/* protected by cgroup_mutex */
1019
1020	cs = cgroup_cs(scan->cg);
1021	guarantee_online_mems(cs, &newmems);
1022
1023	cpuset_change_task_nodemask(p, &newmems);
1024
1025	mm = get_task_mm(p);
1026	if (!mm)
1027		return;
1028
1029	migrate = is_memory_migrate(cs);
1030
1031	mpol_rebind_mm(mm, &cs->mems_allowed);
1032	if (migrate)
1033		cpuset_migrate_mm(mm, oldmem, &cs->mems_allowed);
1034	mmput(mm);
1035}
1036
1037static void *cpuset_being_rebound;
1038
1039/**
1040 * update_tasks_nodemask - Update the nodemasks of tasks in the cpuset.
1041 * @cs: the cpuset in which each task's mems_allowed mask needs to be changed
1042 * @oldmem: old mems_allowed of cpuset cs
1043 * @heap: if NULL, defer allocating heap memory to cgroup_scan_tasks()
1044 *
1045 * Called with cgroup_mutex held
1046 * No return value. It's guaranteed that cgroup_scan_tasks() always returns 0
1047 * if @heap != NULL.
1048 */
1049static void update_tasks_nodemask(struct cpuset *cs, const nodemask_t *oldmem,
1050				 struct ptr_heap *heap)
1051{
1052	struct cgroup_scanner scan;
 
 
1053
1054	cpuset_being_rebound = cs;		/* causes mpol_dup() rebind */
1055
1056	scan.cg = cs->css.cgroup;
1057	scan.test_task = NULL;
1058	scan.process_task = cpuset_change_nodemask;
1059	scan.heap = heap;
1060	scan.data = (nodemask_t *)oldmem;
1061
1062	/*
1063	 * The mpol_rebind_mm() call takes mmap_sem, which we couldn't
1064	 * take while holding tasklist_lock.  Forks can happen - the
1065	 * mpol_dup() cpuset_being_rebound check will catch such forks,
1066	 * and rebind their vma mempolicies too.  Because we still hold
1067	 * the global cgroup_mutex, we know that no other rebind effort
1068	 * will be contending for the global variable cpuset_being_rebound.
1069	 * It's ok if we rebind the same mm twice; mpol_rebind_mm()
1070	 * is idempotent.  Also migrate pages in each mm to new nodes.
1071	 */
1072	cgroup_scan_tasks(&scan);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1073
1074	/* We're done rebinding vmas to this cpuset's new mems_allowed. */
1075	cpuset_being_rebound = NULL;
1076}
1077
1078/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1079 * Handle user request to change the 'mems' memory placement
1080 * of a cpuset.  Needs to validate the request, update the
1081 * cpusets mems_allowed, and for each task in the cpuset,
1082 * update mems_allowed and rebind task's mempolicy and any vma
1083 * mempolicies and if the cpuset is marked 'memory_migrate',
1084 * migrate the tasks pages to the new memory.
1085 *
1086 * Call with cgroup_mutex held.  May take callback_mutex during call.
1087 * Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
1088 * lock each such tasks mm->mmap_sem, scan its vma's and rebind
1089 * their mempolicies to the cpusets new mems_allowed.
1090 */
1091static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
1092			   const char *buf)
1093{
1094	NODEMASK_ALLOC(nodemask_t, oldmem, GFP_KERNEL);
1095	int retval;
1096	struct ptr_heap heap;
1097
1098	if (!oldmem)
1099		return -ENOMEM;
1100
1101	/*
1102	 * top_cpuset.mems_allowed tracks node_stats[N_HIGH_MEMORY];
1103	 * it's read-only
1104	 */
1105	if (cs == &top_cpuset) {
1106		retval = -EACCES;
1107		goto done;
1108	}
1109
1110	/*
1111	 * An empty mems_allowed is ok iff there are no tasks in the cpuset.
1112	 * Since nodelist_parse() fails on an empty mask, we special case
1113	 * that parsing.  The validate_change() call ensures that cpusets
1114	 * with tasks have memory.
1115	 */
1116	if (!*buf) {
1117		nodes_clear(trialcs->mems_allowed);
1118	} else {
1119		retval = nodelist_parse(buf, trialcs->mems_allowed);
1120		if (retval < 0)
1121			goto done;
1122
1123		if (!nodes_subset(trialcs->mems_allowed,
1124				node_states[N_HIGH_MEMORY])) {
1125			retval =  -EINVAL;
1126			goto done;
1127		}
1128	}
1129	*oldmem = cs->mems_allowed;
1130	if (nodes_equal(*oldmem, trialcs->mems_allowed)) {
1131		retval = 0;		/* Too easy - nothing to do */
1132		goto done;
1133	}
1134	retval = validate_change(cs, trialcs);
1135	if (retval < 0)
1136		goto done;
1137
1138	retval = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, NULL);
1139	if (retval < 0)
1140		goto done;
1141
1142	mutex_lock(&callback_mutex);
1143	cs->mems_allowed = trialcs->mems_allowed;
1144	mutex_unlock(&callback_mutex);
1145
1146	update_tasks_nodemask(cs, oldmem, &heap);
1147
1148	heap_free(&heap);
1149done:
1150	NODEMASK_FREE(oldmem);
1151	return retval;
1152}
1153
1154int current_cpuset_is_being_rebound(void)
1155{
1156	return task_cs(current) == cpuset_being_rebound;
1157}
1158
1159static int update_relax_domain_level(struct cpuset *cs, s64 val)
1160{
1161#ifdef CONFIG_SMP
1162	if (val < -1 || val >= sched_domain_level_max)
1163		return -EINVAL;
1164#endif
1165
1166	if (val != cs->relax_domain_level) {
1167		cs->relax_domain_level = val;
1168		if (!cpumask_empty(cs->cpus_allowed) &&
1169		    is_sched_load_balance(cs))
1170			async_rebuild_sched_domains();
1171	}
1172
1173	return 0;
1174}
1175
1176/*
1177 * cpuset_change_flag - make a task's spread flags the same as its cpuset's
1178 * @tsk: task to be updated
1179 * @scan: struct cgroup_scanner containing the cgroup of the task
1180 *
1181 * Called by cgroup_scan_tasks() for each task in a cgroup.
1182 *
1183 * We don't need to re-check for the cgroup/cpuset membership, since we're
1184 * holding cgroup_lock() at this point.
1185 */
1186static void cpuset_change_flag(struct task_struct *tsk,
1187				struct cgroup_scanner *scan)
1188{
1189	cpuset_update_task_spread_flag(cgroup_cs(scan->cg), tsk);
1190}
1191
1192/*
1193 * update_tasks_flags - update the spread flags of tasks in the cpuset.
1194 * @cs: the cpuset in which each task's spread flags needs to be changed
1195 * @heap: if NULL, defer allocating heap memory to cgroup_scan_tasks()
1196 *
1197 * Called with cgroup_mutex held
1198 *
1199 * The cgroup_scan_tasks() function will scan all the tasks in a cgroup,
1200 * calling callback functions for each.
1201 *
1202 * No return value. It's guaranteed that cgroup_scan_tasks() always returns 0
1203 * if @heap != NULL.
1204 */
1205static void update_tasks_flags(struct cpuset *cs, struct ptr_heap *heap)
1206{
1207	struct cgroup_scanner scan;
1208
1209	scan.cg = cs->css.cgroup;
1210	scan.test_task = NULL;
1211	scan.process_task = cpuset_change_flag;
1212	scan.heap = heap;
1213	cgroup_scan_tasks(&scan);
1214}
1215
1216/*
1217 * update_flag - read a 0 or a 1 in a file and update associated flag
1218 * bit:		the bit to update (see cpuset_flagbits_t)
1219 * cs:		the cpuset to update
1220 * turning_on: 	whether the flag is being set or cleared
1221 *
1222 * Call with cgroup_mutex held.
1223 */
1224
1225static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
1226		       int turning_on)
1227{
1228	struct cpuset *trialcs;
1229	int balance_flag_changed;
1230	int spread_flag_changed;
1231	struct ptr_heap heap;
1232	int err;
1233
1234	trialcs = alloc_trial_cpuset(cs);
1235	if (!trialcs)
1236		return -ENOMEM;
1237
1238	if (turning_on)
1239		set_bit(bit, &trialcs->flags);
1240	else
1241		clear_bit(bit, &trialcs->flags);
1242
1243	err = validate_change(cs, trialcs);
1244	if (err < 0)
1245		goto out;
1246
1247	err = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, NULL);
1248	if (err < 0)
1249		goto out;
1250
1251	balance_flag_changed = (is_sched_load_balance(cs) !=
1252				is_sched_load_balance(trialcs));
1253
1254	spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs))
1255			|| (is_spread_page(cs) != is_spread_page(trialcs)));
1256
1257	mutex_lock(&callback_mutex);
1258	cs->flags = trialcs->flags;
1259	mutex_unlock(&callback_mutex);
1260
1261	if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed)
1262		async_rebuild_sched_domains();
1263
1264	if (spread_flag_changed)
1265		update_tasks_flags(cs, &heap);
1266	heap_free(&heap);
1267out:
1268	free_trial_cpuset(trialcs);
1269	return err;
1270}
1271
1272/*
1273 * Frequency meter - How fast is some event occurring?
1274 *
1275 * These routines manage a digitally filtered, constant time based,
1276 * event frequency meter.  There are four routines:
1277 *   fmeter_init() - initialize a frequency meter.
1278 *   fmeter_markevent() - called each time the event happens.
1279 *   fmeter_getrate() - returns the recent rate of such events.
1280 *   fmeter_update() - internal routine used to update fmeter.
1281 *
1282 * A common data structure is passed to each of these routines,
1283 * which is used to keep track of the state required to manage the
1284 * frequency meter and its digital filter.
1285 *
1286 * The filter works on the number of events marked per unit time.
1287 * The filter is single-pole low-pass recursive (IIR).  The time unit
1288 * is 1 second.  Arithmetic is done using 32-bit integers scaled to
1289 * simulate 3 decimal digits of precision (multiplied by 1000).
1290 *
1291 * With an FM_COEF of 933, and a time base of 1 second, the filter
1292 * has a half-life of 10 seconds, meaning that if the events quit
1293 * happening, then the rate returned from the fmeter_getrate()
1294 * will be cut in half each 10 seconds, until it converges to zero.
1295 *
1296 * It is not worth doing a real infinitely recursive filter.  If more
1297 * than FM_MAXTICKS ticks have elapsed since the last filter event,
1298 * just compute FM_MAXTICKS ticks worth, by which point the level
1299 * will be stable.
1300 *
1301 * Limit the count of unprocessed events to FM_MAXCNT, so as to avoid
1302 * arithmetic overflow in the fmeter_update() routine.
1303 *
1304 * Given the simple 32 bit integer arithmetic used, this meter works
1305 * best for reporting rates between one per millisecond (msec) and
1306 * one per 32 (approx) seconds.  At constant rates faster than one
1307 * per msec it maxes out at values just under 1,000,000.  At constant
1308 * rates between one per msec, and one per second it will stabilize
1309 * to a value N*1000, where N is the rate of events per second.
1310 * At constant rates between one per second and one per 32 seconds,
1311 * it will be choppy, moving up on the seconds that have an event,
1312 * and then decaying until the next event.  At rates slower than
1313 * about one in 32 seconds, it decays all the way back to zero between
1314 * each event.
1315 */
1316
1317#define FM_COEF 933		/* coefficient for half-life of 10 secs */
1318#define FM_MAXTICKS ((time_t)99) /* useless computing more ticks than this */
1319#define FM_MAXCNT 1000000	/* limit cnt to avoid overflow */
1320#define FM_SCALE 1000		/* faux fixed point scale */
1321
1322/* Initialize a frequency meter */
1323static void fmeter_init(struct fmeter *fmp)
1324{
1325	fmp->cnt = 0;
1326	fmp->val = 0;
1327	fmp->time = 0;
1328	spin_lock_init(&fmp->lock);
1329}
1330
1331/* Internal meter update - process cnt events and update value */
1332static void fmeter_update(struct fmeter *fmp)
1333{
1334	time_t now = get_seconds();
1335	time_t ticks = now - fmp->time;
1336
1337	if (ticks == 0)
1338		return;
1339
1340	ticks = min(FM_MAXTICKS, ticks);
1341	while (ticks-- > 0)
1342		fmp->val = (FM_COEF * fmp->val) / FM_SCALE;
1343	fmp->time = now;
1344
1345	fmp->val += ((FM_SCALE - FM_COEF) * fmp->cnt) / FM_SCALE;
1346	fmp->cnt = 0;
1347}
1348
1349/* Process any previous ticks, then bump cnt by one (times scale). */
1350static void fmeter_markevent(struct fmeter *fmp)
1351{
1352	spin_lock(&fmp->lock);
1353	fmeter_update(fmp);
1354	fmp->cnt = min(FM_MAXCNT, fmp->cnt + FM_SCALE);
1355	spin_unlock(&fmp->lock);
1356}
1357
1358/* Process any previous ticks, then return current value. */
1359static int fmeter_getrate(struct fmeter *fmp)
1360{
1361	int val;
1362
1363	spin_lock(&fmp->lock);
1364	fmeter_update(fmp);
1365	val = fmp->val;
1366	spin_unlock(&fmp->lock);
1367	return val;
1368}
1369
1370/* Called by cgroups to determine if a cpuset is usable; cgroup_mutex held */
1371static int cpuset_can_attach(struct cgroup_subsys *ss, struct cgroup *cont,
1372			     struct task_struct *tsk)
 
 
1373{
1374	struct cpuset *cs = cgroup_cs(cont);
 
 
1375
1376	if (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))
1377		return -ENOSPC;
 
 
1378
1379	/*
1380	 * Kthreads bound to specific cpus cannot be moved to a new cpuset; we
1381	 * cannot change their cpu affinity and isolating such threads by their
1382	 * set of allowed nodes is unnecessary.  Thus, cpusets are not
1383	 * applicable for such threads.  This prevents checking for success of
1384	 * set_cpus_allowed_ptr() on all attached tasks before cpus_allowed may
1385	 * be changed.
1386	 */
1387	if (tsk->flags & PF_THREAD_BOUND)
1388		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1389
1390	return 0;
 
 
 
 
 
 
 
 
1391}
1392
1393static int cpuset_can_attach_task(struct cgroup *cgrp, struct task_struct *task)
 
1394{
1395	return security_task_setscheduler(task);
 
 
1396}
1397
1398/*
1399 * Protected by cgroup_lock. The nodemasks must be stored globally because
1400 * dynamically allocating them is not allowed in pre_attach, and they must
1401 * persist among pre_attach, attach_task, and attach.
1402 */
1403static cpumask_var_t cpus_attach;
1404static nodemask_t cpuset_attach_nodemask_from;
1405static nodemask_t cpuset_attach_nodemask_to;
1406
1407/* Set-up work for before attaching each task. */
1408static void cpuset_pre_attach(struct cgroup *cont)
1409{
1410	struct cpuset *cs = cgroup_cs(cont);
 
 
 
 
 
 
 
 
1411
 
 
 
1412	if (cs == &top_cpuset)
1413		cpumask_copy(cpus_attach, cpu_possible_mask);
1414	else
1415		guarantee_online_cpus(cs, cpus_attach);
1416
1417	guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
1418}
1419
1420/* Per-thread attachment work. */
1421static void cpuset_attach_task(struct cgroup *cont, struct task_struct *tsk)
1422{
1423	int err;
1424	struct cpuset *cs = cgroup_cs(cont);
 
1425
1426	/*
1427	 * can_attach beforehand should guarantee that this doesn't fail.
1428	 * TODO: have a better way to handle failure here
1429	 */
1430	err = set_cpus_allowed_ptr(tsk, cpus_attach);
1431	WARN_ON_ONCE(err);
1432
1433	cpuset_change_task_nodemask(tsk, &cpuset_attach_nodemask_to);
1434	cpuset_update_task_spread_flag(cs, tsk);
1435}
1436
1437static void cpuset_attach(struct cgroup_subsys *ss, struct cgroup *cont,
1438			  struct cgroup *oldcont, struct task_struct *tsk)
1439{
1440	struct mm_struct *mm;
1441	struct cpuset *cs = cgroup_cs(cont);
1442	struct cpuset *oldcs = cgroup_cs(oldcont);
1443
1444	/*
1445	 * Change mm, possibly for multiple threads in a threadgroup. This is
1446	 * expensive and may sleep.
1447	 */
1448	cpuset_attach_nodemask_from = oldcs->mems_allowed;
1449	cpuset_attach_nodemask_to = cs->mems_allowed;
1450	mm = get_task_mm(tsk);
1451	if (mm) {
 
 
1452		mpol_rebind_mm(mm, &cpuset_attach_nodemask_to);
1453		if (is_memory_migrate(cs))
1454			cpuset_migrate_mm(mm, &cpuset_attach_nodemask_from,
 
 
 
 
 
 
 
 
1455					  &cpuset_attach_nodemask_to);
 
1456		mmput(mm);
1457	}
 
 
 
 
 
 
 
 
1458}
1459
1460/* The various types of files and directories in a cpuset file system */
1461
1462typedef enum {
1463	FILE_MEMORY_MIGRATE,
1464	FILE_CPULIST,
1465	FILE_MEMLIST,
1466	FILE_CPU_EXCLUSIVE,
1467	FILE_MEM_EXCLUSIVE,
1468	FILE_MEM_HARDWALL,
1469	FILE_SCHED_LOAD_BALANCE,
1470	FILE_SCHED_RELAX_DOMAIN_LEVEL,
1471	FILE_MEMORY_PRESSURE_ENABLED,
1472	FILE_MEMORY_PRESSURE,
1473	FILE_SPREAD_PAGE,
1474	FILE_SPREAD_SLAB,
1475} cpuset_filetype_t;
1476
1477static int cpuset_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val)
 
1478{
 
 
1479	int retval = 0;
1480	struct cpuset *cs = cgroup_cs(cgrp);
1481	cpuset_filetype_t type = cft->private;
1482
1483	if (!cgroup_lock_live_group(cgrp))
1484		return -ENODEV;
 
 
 
1485
1486	switch (type) {
1487	case FILE_CPU_EXCLUSIVE:
1488		retval = update_flag(CS_CPU_EXCLUSIVE, cs, val);
1489		break;
1490	case FILE_MEM_EXCLUSIVE:
1491		retval = update_flag(CS_MEM_EXCLUSIVE, cs, val);
1492		break;
1493	case FILE_MEM_HARDWALL:
1494		retval = update_flag(CS_MEM_HARDWALL, cs, val);
1495		break;
1496	case FILE_SCHED_LOAD_BALANCE:
1497		retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, val);
1498		break;
1499	case FILE_MEMORY_MIGRATE:
1500		retval = update_flag(CS_MEMORY_MIGRATE, cs, val);
1501		break;
1502	case FILE_MEMORY_PRESSURE_ENABLED:
1503		cpuset_memory_pressure_enabled = !!val;
1504		break;
1505	case FILE_MEMORY_PRESSURE:
1506		retval = -EACCES;
1507		break;
1508	case FILE_SPREAD_PAGE:
1509		retval = update_flag(CS_SPREAD_PAGE, cs, val);
1510		break;
1511	case FILE_SPREAD_SLAB:
1512		retval = update_flag(CS_SPREAD_SLAB, cs, val);
1513		break;
1514	default:
1515		retval = -EINVAL;
1516		break;
1517	}
1518	cgroup_unlock();
 
1519	return retval;
1520}
1521
1522static int cpuset_write_s64(struct cgroup *cgrp, struct cftype *cft, s64 val)
 
1523{
1524	int retval = 0;
1525	struct cpuset *cs = cgroup_cs(cgrp);
1526	cpuset_filetype_t type = cft->private;
 
1527
1528	if (!cgroup_lock_live_group(cgrp))
1529		return -ENODEV;
 
1530
1531	switch (type) {
1532	case FILE_SCHED_RELAX_DOMAIN_LEVEL:
1533		retval = update_relax_domain_level(cs, val);
1534		break;
1535	default:
1536		retval = -EINVAL;
1537		break;
1538	}
1539	cgroup_unlock();
 
1540	return retval;
1541}
1542
1543/*
1544 * Common handling for a write to a "cpus" or "mems" file.
1545 */
1546static int cpuset_write_resmask(struct cgroup *cgrp, struct cftype *cft,
1547				const char *buf)
1548{
1549	int retval = 0;
1550	struct cpuset *cs = cgroup_cs(cgrp);
1551	struct cpuset *trialcs;
 
1552
1553	if (!cgroup_lock_live_group(cgrp))
1554		return -ENODEV;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1555
1556	trialcs = alloc_trial_cpuset(cs);
1557	if (!trialcs) {
1558		retval = -ENOMEM;
1559		goto out;
1560	}
1561
1562	switch (cft->private) {
1563	case FILE_CPULIST:
1564		retval = update_cpumask(cs, trialcs, buf);
1565		break;
1566	case FILE_MEMLIST:
1567		retval = update_nodemask(cs, trialcs, buf);
1568		break;
1569	default:
1570		retval = -EINVAL;
1571		break;
1572	}
1573
1574	free_trial_cpuset(trialcs);
1575out:
1576	cgroup_unlock();
1577	return retval;
1578}
1579
1580/*
1581 * These ascii lists should be read in a single call, by using a user
1582 * buffer large enough to hold the entire map.  If read in smaller
1583 * chunks, there is no guarantee of atomicity.  Since the display format
1584 * used, list of ranges of sequential numbers, is variable length,
1585 * and since these maps can change value dynamically, one could read
1586 * gibberish by doing partial reads while a list was changing.
1587 * A single large read to a buffer that crosses a page boundary is
1588 * ok, because the result being copied to user land is not recomputed
1589 * across a page fault.
1590 */
1591
1592static size_t cpuset_sprintf_cpulist(char *page, struct cpuset *cs)
1593{
1594	size_t count;
1595
1596	mutex_lock(&callback_mutex);
1597	count = cpulist_scnprintf(page, PAGE_SIZE, cs->cpus_allowed);
1598	mutex_unlock(&callback_mutex);
1599
1600	return count;
1601}
1602
1603static size_t cpuset_sprintf_memlist(char *page, struct cpuset *cs)
1604{
1605	size_t count;
1606
1607	mutex_lock(&callback_mutex);
1608	count = nodelist_scnprintf(page, PAGE_SIZE, cs->mems_allowed);
1609	mutex_unlock(&callback_mutex);
1610
1611	return count;
1612}
1613
1614static ssize_t cpuset_common_file_read(struct cgroup *cont,
1615				       struct cftype *cft,
1616				       struct file *file,
1617				       char __user *buf,
1618				       size_t nbytes, loff_t *ppos)
1619{
1620	struct cpuset *cs = cgroup_cs(cont);
1621	cpuset_filetype_t type = cft->private;
1622	char *page;
1623	ssize_t retval = 0;
1624	char *s;
1625
1626	if (!(page = (char *)__get_free_page(GFP_TEMPORARY)))
1627		return -ENOMEM;
1628
1629	s = page;
1630
1631	switch (type) {
1632	case FILE_CPULIST:
1633		s += cpuset_sprintf_cpulist(s, cs);
1634		break;
1635	case FILE_MEMLIST:
1636		s += cpuset_sprintf_memlist(s, cs);
1637		break;
1638	default:
1639		retval = -EINVAL;
1640		goto out;
1641	}
1642	*s++ = '\n';
1643
1644	retval = simple_read_from_buffer(buf, nbytes, ppos, page, s - page);
1645out:
1646	free_page((unsigned long)page);
1647	return retval;
 
 
 
 
 
1648}
1649
1650static u64 cpuset_read_u64(struct cgroup *cont, struct cftype *cft)
1651{
1652	struct cpuset *cs = cgroup_cs(cont);
1653	cpuset_filetype_t type = cft->private;
1654	switch (type) {
1655	case FILE_CPU_EXCLUSIVE:
1656		return is_cpu_exclusive(cs);
1657	case FILE_MEM_EXCLUSIVE:
1658		return is_mem_exclusive(cs);
1659	case FILE_MEM_HARDWALL:
1660		return is_mem_hardwall(cs);
1661	case FILE_SCHED_LOAD_BALANCE:
1662		return is_sched_load_balance(cs);
1663	case FILE_MEMORY_MIGRATE:
1664		return is_memory_migrate(cs);
1665	case FILE_MEMORY_PRESSURE_ENABLED:
1666		return cpuset_memory_pressure_enabled;
1667	case FILE_MEMORY_PRESSURE:
1668		return fmeter_getrate(&cs->fmeter);
1669	case FILE_SPREAD_PAGE:
1670		return is_spread_page(cs);
1671	case FILE_SPREAD_SLAB:
1672		return is_spread_slab(cs);
1673	default:
1674		BUG();
1675	}
1676
1677	/* Unreachable but makes gcc happy */
1678	return 0;
1679}
1680
1681static s64 cpuset_read_s64(struct cgroup *cont, struct cftype *cft)
1682{
1683	struct cpuset *cs = cgroup_cs(cont);
1684	cpuset_filetype_t type = cft->private;
1685	switch (type) {
1686	case FILE_SCHED_RELAX_DOMAIN_LEVEL:
1687		return cs->relax_domain_level;
1688	default:
1689		BUG();
1690	}
1691
1692	/* Unrechable but makes gcc happy */
1693	return 0;
1694}
1695
1696
1697/*
1698 * for the common functions, 'private' gives the type of file
1699 */
1700
1701static struct cftype files[] = {
1702	{
1703		.name = "cpus",
1704		.read = cpuset_common_file_read,
1705		.write_string = cpuset_write_resmask,
1706		.max_write_len = (100U + 6 * NR_CPUS),
1707		.private = FILE_CPULIST,
1708	},
1709
1710	{
1711		.name = "mems",
1712		.read = cpuset_common_file_read,
1713		.write_string = cpuset_write_resmask,
1714		.max_write_len = (100U + 6 * MAX_NUMNODES),
1715		.private = FILE_MEMLIST,
1716	},
1717
1718	{
1719		.name = "cpu_exclusive",
1720		.read_u64 = cpuset_read_u64,
1721		.write_u64 = cpuset_write_u64,
1722		.private = FILE_CPU_EXCLUSIVE,
1723	},
1724
1725	{
1726		.name = "mem_exclusive",
1727		.read_u64 = cpuset_read_u64,
1728		.write_u64 = cpuset_write_u64,
1729		.private = FILE_MEM_EXCLUSIVE,
1730	},
1731
1732	{
1733		.name = "mem_hardwall",
1734		.read_u64 = cpuset_read_u64,
1735		.write_u64 = cpuset_write_u64,
1736		.private = FILE_MEM_HARDWALL,
1737	},
1738
1739	{
1740		.name = "sched_load_balance",
1741		.read_u64 = cpuset_read_u64,
1742		.write_u64 = cpuset_write_u64,
1743		.private = FILE_SCHED_LOAD_BALANCE,
1744	},
1745
1746	{
1747		.name = "sched_relax_domain_level",
1748		.read_s64 = cpuset_read_s64,
1749		.write_s64 = cpuset_write_s64,
1750		.private = FILE_SCHED_RELAX_DOMAIN_LEVEL,
1751	},
1752
1753	{
1754		.name = "memory_migrate",
1755		.read_u64 = cpuset_read_u64,
1756		.write_u64 = cpuset_write_u64,
1757		.private = FILE_MEMORY_MIGRATE,
1758	},
1759
1760	{
1761		.name = "memory_pressure",
1762		.read_u64 = cpuset_read_u64,
1763		.write_u64 = cpuset_write_u64,
1764		.private = FILE_MEMORY_PRESSURE,
1765		.mode = S_IRUGO,
1766	},
1767
1768	{
1769		.name = "memory_spread_page",
1770		.read_u64 = cpuset_read_u64,
1771		.write_u64 = cpuset_write_u64,
1772		.private = FILE_SPREAD_PAGE,
1773	},
1774
1775	{
1776		.name = "memory_spread_slab",
1777		.read_u64 = cpuset_read_u64,
1778		.write_u64 = cpuset_write_u64,
1779		.private = FILE_SPREAD_SLAB,
1780	},
1781};
1782
1783static struct cftype cft_memory_pressure_enabled = {
1784	.name = "memory_pressure_enabled",
1785	.read_u64 = cpuset_read_u64,
1786	.write_u64 = cpuset_write_u64,
1787	.private = FILE_MEMORY_PRESSURE_ENABLED,
 
 
 
 
1788};
1789
1790static int cpuset_populate(struct cgroup_subsys *ss, struct cgroup *cont)
1791{
1792	int err;
1793
1794	err = cgroup_add_files(cont, ss, files, ARRAY_SIZE(files));
1795	if (err)
1796		return err;
1797	/* memory_pressure_enabled is in root cpuset only */
1798	if (!cont->parent)
1799		err = cgroup_add_file(cont, ss,
1800				      &cft_memory_pressure_enabled);
1801	return err;
1802}
1803
1804/*
1805 * post_clone() is called during cgroup_create() when the
1806 * clone_children mount argument was specified.  The cgroup
1807 * can not yet have any tasks.
1808 *
1809 * Currently we refuse to set up the cgroup - thereby
1810 * refusing the task to be entered, and as a result refusing
1811 * the sys_unshare() or clone() which initiated it - if any
1812 * sibling cpusets have exclusive cpus or mem.
1813 *
1814 * If this becomes a problem for some users who wish to
1815 * allow that scenario, then cpuset_post_clone() could be
1816 * changed to grant parent->cpus_allowed-sibling_cpus_exclusive
1817 * (and likewise for mems) to the new cgroup. Called with cgroup_mutex
1818 * held.
1819 */
1820static void cpuset_post_clone(struct cgroup_subsys *ss,
1821			      struct cgroup *cgroup)
1822{
1823	struct cgroup *parent, *child;
1824	struct cpuset *cs, *parent_cs;
1825
1826	parent = cgroup->parent;
1827	list_for_each_entry(child, &parent->children, sibling) {
1828		cs = cgroup_cs(child);
1829		if (is_mem_exclusive(cs) || is_cpu_exclusive(cs))
1830			return;
1831	}
1832	cs = cgroup_cs(cgroup);
1833	parent_cs = cgroup_cs(parent);
1834
1835	mutex_lock(&callback_mutex);
1836	cs->mems_allowed = parent_cs->mems_allowed;
1837	cpumask_copy(cs->cpus_allowed, parent_cs->cpus_allowed);
1838	mutex_unlock(&callback_mutex);
1839	return;
1840}
1841
1842/*
1843 *	cpuset_create - create a cpuset
1844 *	ss:	cpuset cgroup subsystem
1845 *	cont:	control group that the new cpuset will be part of
1846 */
1847
1848static struct cgroup_subsys_state *cpuset_create(
1849	struct cgroup_subsys *ss,
1850	struct cgroup *cont)
1851{
1852	struct cpuset *cs;
1853	struct cpuset *parent;
1854
1855	if (!cont->parent) {
1856		return &top_cpuset.css;
1857	}
1858	parent = cgroup_cs(cont->parent);
1859	cs = kmalloc(sizeof(*cs), GFP_KERNEL);
1860	if (!cs)
1861		return ERR_PTR(-ENOMEM);
1862	if (!alloc_cpumask_var(&cs->cpus_allowed, GFP_KERNEL)) {
1863		kfree(cs);
1864		return ERR_PTR(-ENOMEM);
1865	}
1866
1867	cs->flags = 0;
1868	if (is_spread_page(parent))
1869		set_bit(CS_SPREAD_PAGE, &cs->flags);
1870	if (is_spread_slab(parent))
1871		set_bit(CS_SPREAD_SLAB, &cs->flags);
1872	set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
1873	cpumask_clear(cs->cpus_allowed);
1874	nodes_clear(cs->mems_allowed);
1875	fmeter_init(&cs->fmeter);
1876	cs->relax_domain_level = -1;
1877
1878	cs->parent = parent;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1879	number_of_cpusets++;
1880	return &cs->css ;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1881}
1882
1883/*
1884 * If the cpuset being removed has its flag 'sched_load_balance'
1885 * enabled, then simulate turning sched_load_balance off, which
1886 * will call async_rebuild_sched_domains().
1887 */
1888
1889static void cpuset_destroy(struct cgroup_subsys *ss, struct cgroup *cont)
1890{
1891	struct cpuset *cs = cgroup_cs(cont);
 
 
1892
1893	if (is_sched_load_balance(cs))
1894		update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
1895
1896	number_of_cpusets--;
 
 
 
 
 
 
 
 
 
1897	free_cpumask_var(cs->cpus_allowed);
1898	kfree(cs);
1899}
1900
1901struct cgroup_subsys cpuset_subsys = {
1902	.name = "cpuset",
1903	.create = cpuset_create,
1904	.destroy = cpuset_destroy,
 
1905	.can_attach = cpuset_can_attach,
1906	.can_attach_task = cpuset_can_attach_task,
1907	.pre_attach = cpuset_pre_attach,
1908	.attach_task = cpuset_attach_task,
1909	.attach = cpuset_attach,
1910	.populate = cpuset_populate,
1911	.post_clone = cpuset_post_clone,
1912	.subsys_id = cpuset_subsys_id,
1913	.early_init = 1,
1914};
1915
1916/**
1917 * cpuset_init - initialize cpusets at system boot
1918 *
1919 * Description: Initialize top_cpuset and the cpuset internal file system,
1920 **/
1921
1922int __init cpuset_init(void)
1923{
1924	int err = 0;
1925
1926	if (!alloc_cpumask_var(&top_cpuset.cpus_allowed, GFP_KERNEL))
1927		BUG();
1928
1929	cpumask_setall(top_cpuset.cpus_allowed);
1930	nodes_setall(top_cpuset.mems_allowed);
1931
1932	fmeter_init(&top_cpuset.fmeter);
1933	set_bit(CS_SCHED_LOAD_BALANCE, &top_cpuset.flags);
1934	top_cpuset.relax_domain_level = -1;
1935
1936	err = register_filesystem(&cpuset_fs_type);
1937	if (err < 0)
1938		return err;
1939
1940	if (!alloc_cpumask_var(&cpus_attach, GFP_KERNEL))
1941		BUG();
1942
1943	number_of_cpusets = 1;
1944	return 0;
1945}
1946
1947/**
1948 * cpuset_do_move_task - move a given task to another cpuset
1949 * @tsk: pointer to task_struct the task to move
1950 * @scan: struct cgroup_scanner contained in its struct cpuset_hotplug_scanner
1951 *
1952 * Called by cgroup_scan_tasks() for each task in a cgroup.
1953 * Return nonzero to stop the walk through the tasks.
1954 */
1955static void cpuset_do_move_task(struct task_struct *tsk,
1956				struct cgroup_scanner *scan)
1957{
1958	struct cgroup *new_cgroup = scan->data;
1959
1960	cgroup_attach_task(new_cgroup, tsk);
 
 
 
 
 
 
 
 
 
 
 
 
 
1961}
1962
1963/**
1964 * move_member_tasks_to_cpuset - move tasks from one cpuset to another
1965 * @from: cpuset in which the tasks currently reside
1966 * @to: cpuset to which the tasks will be moved
1967 *
1968 * Called with cgroup_mutex held
1969 * callback_mutex must not be held, as cpuset_attach() will take it.
1970 *
1971 * The cgroup_scan_tasks() function will scan all the tasks in a cgroup,
1972 * calling callback functions for each.
 
1973 */
1974static void move_member_tasks_to_cpuset(struct cpuset *from, struct cpuset *to)
1975{
1976	struct cgroup_scanner scan;
 
 
 
 
 
 
1977
1978	scan.cg = from->css.cgroup;
1979	scan.test_task = NULL; /* select all tasks in cgroup */
1980	scan.process_task = cpuset_do_move_task;
1981	scan.heap = NULL;
1982	scan.data = to->css.cgroup;
1983
1984	if (cgroup_scan_tasks(&scan))
1985		printk(KERN_ERR "move_member_tasks_to_cpuset: "
1986				"cgroup_scan_tasks failed\n");
1987}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1988
1989/*
1990 * If CPU and/or memory hotplug handlers, below, unplug any CPUs
1991 * or memory nodes, we need to walk over the cpuset hierarchy,
1992 * removing that CPU or node from all cpusets.  If this removes the
1993 * last CPU or node from a cpuset, then move the tasks in the empty
1994 * cpuset to its next-highest non-empty parent.
1995 *
1996 * Called with cgroup_mutex held
1997 * callback_mutex must not be held, as cpuset_attach() will take it.
1998 */
1999static void remove_tasks_in_empty_cpuset(struct cpuset *cs)
2000{
2001	struct cpuset *parent;
2002
2003	/*
2004	 * The cgroup's css_sets list is in use if there are tasks
2005	 * in the cpuset; the list is empty if there are none;
2006	 * the cs->css.refcnt seems always 0.
 
2007	 */
2008	if (list_empty(&cs->css.cgroup->css_sets))
2009		return;
 
 
 
 
 
 
2010
2011	/*
2012	 * Find its next-highest non-empty parent, (top cpuset
2013	 * has online cpus, so can't be empty).
 
 
 
2014	 */
2015	parent = cs->parent;
2016	while (cpumask_empty(parent->cpus_allowed) ||
2017			nodes_empty(parent->mems_allowed))
2018		parent = parent->parent;
2019
2020	move_member_tasks_to_cpuset(cs, parent);
2021}
2022
2023/*
2024 * Walk the specified cpuset subtree and look for empty cpusets.
2025 * The tasks of such cpuset must be moved to a parent cpuset.
2026 *
2027 * Called with cgroup_mutex held.  We take callback_mutex to modify
2028 * cpus_allowed and mems_allowed.
 
 
 
2029 *
2030 * This walk processes the tree from top to bottom, completing one layer
2031 * before dropping down to the next.  It always processes a node before
2032 * any of its children.
2033 *
2034 * For now, since we lack memory hot unplug, we'll never see a cpuset
2035 * that has tasks along with an empty 'mems'.  But if we did see such
2036 * a cpuset, we'd handle it just like we do if its 'cpus' was empty.
2037 */
2038static void scan_for_empty_cpusets(struct cpuset *root)
2039{
2040	LIST_HEAD(queue);
2041	struct cpuset *cp;	/* scans cpusets being updated */
2042	struct cpuset *child;	/* scans child cpusets of cp */
2043	struct cgroup *cont;
2044	static nodemask_t oldmems;	/* protected by cgroup_mutex */
2045
2046	list_add_tail((struct list_head *)&root->stack_list, &queue);
2047
2048	while (!list_empty(&queue)) {
2049		cp = list_first_entry(&queue, struct cpuset, stack_list);
2050		list_del(queue.next);
2051		list_for_each_entry(cont, &cp->css.cgroup->children, sibling) {
2052			child = cgroup_cs(cont);
2053			list_add_tail(&child->stack_list, &queue);
2054		}
2055
2056		/* Continue past cpusets with all cpus, mems online */
2057		if (cpumask_subset(cp->cpus_allowed, cpu_active_mask) &&
2058		    nodes_subset(cp->mems_allowed, node_states[N_HIGH_MEMORY]))
2059			continue;
2060
2061		oldmems = cp->mems_allowed;
 
 
 
 
 
 
2062
2063		/* Remove offline cpus and mems from this cpuset. */
 
2064		mutex_lock(&callback_mutex);
2065		cpumask_and(cp->cpus_allowed, cp->cpus_allowed,
2066			    cpu_active_mask);
2067		nodes_and(cp->mems_allowed, cp->mems_allowed,
2068						node_states[N_HIGH_MEMORY]);
2069		mutex_unlock(&callback_mutex);
 
 
2070
2071		/* Move tasks from the empty cpuset to a parent */
2072		if (cpumask_empty(cp->cpus_allowed) ||
2073		     nodes_empty(cp->mems_allowed))
2074			remove_tasks_in_empty_cpuset(cp);
2075		else {
2076			update_tasks_cpumask(cp, NULL);
2077			update_tasks_nodemask(cp, &oldmems, NULL);
 
 
 
 
 
 
 
 
 
 
2078		}
 
2079	}
 
 
 
 
2080}
2081
2082/*
2083 * The top_cpuset tracks what CPUs and Memory Nodes are online,
2084 * period.  This is necessary in order to make cpusets transparent
2085 * (of no affect) on systems that are actively using CPU hotplug
2086 * but making no active use of cpusets.
2087 *
2088 * This routine ensures that top_cpuset.cpus_allowed tracks
2089 * cpu_active_mask on each CPU hotplug (cpuhp) event.
2090 *
2091 * Called within get_online_cpus().  Needs to call cgroup_lock()
2092 * before calling generate_sched_domains().
2093 */
2094void cpuset_update_active_cpus(void)
2095{
2096	struct sched_domain_attr *attr;
2097	cpumask_var_t *doms;
2098	int ndoms;
2099
2100	cgroup_lock();
2101	mutex_lock(&callback_mutex);
2102	cpumask_copy(top_cpuset.cpus_allowed, cpu_active_mask);
2103	mutex_unlock(&callback_mutex);
2104	scan_for_empty_cpusets(&top_cpuset);
2105	ndoms = generate_sched_domains(&doms, &attr);
2106	cgroup_unlock();
2107
2108	/* Have scheduler rebuild the domains */
2109	partition_sched_domains(ndoms, doms, attr);
2110}
2111
2112#ifdef CONFIG_MEMORY_HOTPLUG
2113/*
2114 * Keep top_cpuset.mems_allowed tracking node_states[N_HIGH_MEMORY].
2115 * Call this routine anytime after node_states[N_HIGH_MEMORY] changes.
2116 * See also the previous routine cpuset_track_online_cpus().
2117 */
2118static int cpuset_track_online_nodes(struct notifier_block *self,
2119				unsigned long action, void *arg)
2120{
2121	static nodemask_t oldmems;	/* protected by cgroup_mutex */
2122
2123	cgroup_lock();
2124	switch (action) {
2125	case MEM_ONLINE:
2126		oldmems = top_cpuset.mems_allowed;
2127		mutex_lock(&callback_mutex);
2128		top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY];
2129		mutex_unlock(&callback_mutex);
2130		update_tasks_nodemask(&top_cpuset, &oldmems, NULL);
2131		break;
2132	case MEM_OFFLINE:
2133		/*
2134		 * needn't update top_cpuset.mems_allowed explicitly because
2135		 * scan_for_empty_cpusets() will update it.
2136		 */
2137		scan_for_empty_cpusets(&top_cpuset);
2138		break;
2139	default:
2140		break;
2141	}
2142	cgroup_unlock();
2143
2144	return NOTIFY_OK;
2145}
2146#endif
 
 
 
 
2147
2148/**
2149 * cpuset_init_smp - initialize cpus_allowed
2150 *
2151 * Description: Finish top cpuset after cpu, node maps are initialized
2152 **/
2153
2154void __init cpuset_init_smp(void)
2155{
2156	cpumask_copy(top_cpuset.cpus_allowed, cpu_active_mask);
2157	top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY];
2158
2159	hotplug_memory_notifier(cpuset_track_online_nodes, 10);
2160
2161	cpuset_wq = create_singlethread_workqueue("cpuset");
2162	BUG_ON(!cpuset_wq);
2163}
2164
2165/**
2166 * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset.
2167 * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
2168 * @pmask: pointer to struct cpumask variable to receive cpus_allowed set.
2169 *
2170 * Description: Returns the cpumask_var_t cpus_allowed of the cpuset
2171 * attached to the specified @tsk.  Guaranteed to return some non-empty
2172 * subset of cpu_online_map, even if this means going outside the
2173 * tasks cpuset.
2174 **/
2175
2176void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
2177{
 
 
2178	mutex_lock(&callback_mutex);
2179	task_lock(tsk);
2180	guarantee_online_cpus(task_cs(tsk), pmask);
2181	task_unlock(tsk);
 
2182	mutex_unlock(&callback_mutex);
2183}
2184
2185int cpuset_cpus_allowed_fallback(struct task_struct *tsk)
2186{
2187	const struct cpuset *cs;
2188	int cpu;
2189
2190	rcu_read_lock();
2191	cs = task_cs(tsk);
2192	if (cs)
2193		do_set_cpus_allowed(tsk, cs->cpus_allowed);
2194	rcu_read_unlock();
2195
2196	/*
2197	 * We own tsk->cpus_allowed, nobody can change it under us.
2198	 *
2199	 * But we used cs && cs->cpus_allowed lockless and thus can
2200	 * race with cgroup_attach_task() or update_cpumask() and get
2201	 * the wrong tsk->cpus_allowed. However, both cases imply the
2202	 * subsequent cpuset_change_cpumask()->set_cpus_allowed_ptr()
2203	 * which takes task_rq_lock().
2204	 *
2205	 * If we are called after it dropped the lock we must see all
2206	 * changes in tsk_cs()->cpus_allowed. Otherwise we can temporary
2207	 * set any mask even if it is not right from task_cs() pov,
2208	 * the pending set_cpus_allowed_ptr() will fix things.
 
 
 
2209	 */
2210
2211	cpu = cpumask_any_and(&tsk->cpus_allowed, cpu_active_mask);
2212	if (cpu >= nr_cpu_ids) {
2213		/*
2214		 * Either tsk->cpus_allowed is wrong (see above) or it
2215		 * is actually empty. The latter case is only possible
2216		 * if we are racing with remove_tasks_in_empty_cpuset().
2217		 * Like above we can temporary set any mask and rely on
2218		 * set_cpus_allowed_ptr() as synchronization point.
2219		 */
2220		do_set_cpus_allowed(tsk, cpu_possible_mask);
2221		cpu = cpumask_any(cpu_active_mask);
2222	}
2223
2224	return cpu;
2225}
2226
2227void cpuset_init_current_mems_allowed(void)
2228{
2229	nodes_setall(current->mems_allowed);
2230}
2231
2232/**
2233 * cpuset_mems_allowed - return mems_allowed mask from a tasks cpuset.
2234 * @tsk: pointer to task_struct from which to obtain cpuset->mems_allowed.
2235 *
2236 * Description: Returns the nodemask_t mems_allowed of the cpuset
2237 * attached to the specified @tsk.  Guaranteed to return some non-empty
2238 * subset of node_states[N_HIGH_MEMORY], even if this means going outside the
2239 * tasks cpuset.
2240 **/
2241
2242nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
2243{
 
2244	nodemask_t mask;
2245
2246	mutex_lock(&callback_mutex);
2247	task_lock(tsk);
2248	guarantee_online_mems(task_cs(tsk), &mask);
2249	task_unlock(tsk);
 
2250	mutex_unlock(&callback_mutex);
2251
2252	return mask;
2253}
2254
2255/**
2256 * cpuset_nodemask_valid_mems_allowed - check nodemask vs. curremt mems_allowed
2257 * @nodemask: the nodemask to be checked
2258 *
2259 * Are any of the nodes in the nodemask allowed in current->mems_allowed?
2260 */
2261int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
2262{
2263	return nodes_intersects(*nodemask, current->mems_allowed);
2264}
2265
2266/*
2267 * nearest_hardwall_ancestor() - Returns the nearest mem_exclusive or
2268 * mem_hardwall ancestor to the specified cpuset.  Call holding
2269 * callback_mutex.  If no ancestor is mem_exclusive or mem_hardwall
2270 * (an unusual configuration), then returns the root cpuset.
2271 */
2272static const struct cpuset *nearest_hardwall_ancestor(const struct cpuset *cs)
2273{
2274	while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && cs->parent)
2275		cs = cs->parent;
2276	return cs;
2277}
2278
2279/**
2280 * cpuset_node_allowed_softwall - Can we allocate on a memory node?
2281 * @node: is this an allowed node?
2282 * @gfp_mask: memory allocation flags
2283 *
2284 * If we're in interrupt, yes, we can always allocate.  If __GFP_THISNODE is
2285 * set, yes, we can always allocate.  If node is in our task's mems_allowed,
2286 * yes.  If it's not a __GFP_HARDWALL request and this node is in the nearest
2287 * hardwalled cpuset ancestor to this task's cpuset, yes.  If the task has been
2288 * OOM killed and has access to memory reserves as specified by the TIF_MEMDIE
2289 * flag, yes.
2290 * Otherwise, no.
2291 *
2292 * If __GFP_HARDWALL is set, cpuset_node_allowed_softwall() reduces to
2293 * cpuset_node_allowed_hardwall().  Otherwise, cpuset_node_allowed_softwall()
2294 * might sleep, and might allow a node from an enclosing cpuset.
2295 *
2296 * cpuset_node_allowed_hardwall() only handles the simpler case of hardwall
2297 * cpusets, and never sleeps.
2298 *
2299 * The __GFP_THISNODE placement logic is really handled elsewhere,
2300 * by forcibly using a zonelist starting at a specified node, and by
2301 * (in get_page_from_freelist()) refusing to consider the zones for
2302 * any node on the zonelist except the first.  By the time any such
2303 * calls get to this routine, we should just shut up and say 'yes'.
2304 *
2305 * GFP_USER allocations are marked with the __GFP_HARDWALL bit,
2306 * and do not allow allocations outside the current tasks cpuset
2307 * unless the task has been OOM killed as is marked TIF_MEMDIE.
2308 * GFP_KERNEL allocations are not so marked, so can escape to the
2309 * nearest enclosing hardwalled ancestor cpuset.
2310 *
2311 * Scanning up parent cpusets requires callback_mutex.  The
2312 * __alloc_pages() routine only calls here with __GFP_HARDWALL bit
2313 * _not_ set if it's a GFP_KERNEL allocation, and all nodes in the
2314 * current tasks mems_allowed came up empty on the first pass over
2315 * the zonelist.  So only GFP_KERNEL allocations, if all nodes in the
2316 * cpuset are short of memory, might require taking the callback_mutex
2317 * mutex.
2318 *
2319 * The first call here from mm/page_alloc:get_page_from_freelist()
2320 * has __GFP_HARDWALL set in gfp_mask, enforcing hardwall cpusets,
2321 * so no allocation on a node outside the cpuset is allowed (unless
2322 * in interrupt, of course).
2323 *
2324 * The second pass through get_page_from_freelist() doesn't even call
2325 * here for GFP_ATOMIC calls.  For those calls, the __alloc_pages()
2326 * variable 'wait' is not set, and the bit ALLOC_CPUSET is not set
2327 * in alloc_flags.  That logic and the checks below have the combined
2328 * affect that:
2329 *	in_interrupt - any node ok (current task context irrelevant)
2330 *	GFP_ATOMIC   - any node ok
2331 *	TIF_MEMDIE   - any node ok
2332 *	GFP_KERNEL   - any node in enclosing hardwalled cpuset ok
2333 *	GFP_USER     - only nodes in current tasks mems allowed ok.
2334 *
2335 * Rule:
2336 *    Don't call cpuset_node_allowed_softwall if you can't sleep, unless you
2337 *    pass in the __GFP_HARDWALL flag set in gfp_flag, which disables
2338 *    the code that might scan up ancestor cpusets and sleep.
2339 */
2340int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
2341{
2342	const struct cpuset *cs;	/* current cpuset ancestors */
2343	int allowed;			/* is allocation in zone z allowed? */
2344
2345	if (in_interrupt() || (gfp_mask & __GFP_THISNODE))
2346		return 1;
2347	might_sleep_if(!(gfp_mask & __GFP_HARDWALL));
2348	if (node_isset(node, current->mems_allowed))
2349		return 1;
2350	/*
2351	 * Allow tasks that have access to memory reserves because they have
2352	 * been OOM killed to get memory anywhere.
2353	 */
2354	if (unlikely(test_thread_flag(TIF_MEMDIE)))
2355		return 1;
2356	if (gfp_mask & __GFP_HARDWALL)	/* If hardwall request, stop here */
2357		return 0;
2358
2359	if (current->flags & PF_EXITING) /* Let dying task have memory */
2360		return 1;
2361
2362	/* Not hardwall and node outside mems_allowed: scan up cpusets */
2363	mutex_lock(&callback_mutex);
2364
2365	task_lock(current);
2366	cs = nearest_hardwall_ancestor(task_cs(current));
2367	task_unlock(current);
 
2368
2369	allowed = node_isset(node, cs->mems_allowed);
2370	mutex_unlock(&callback_mutex);
2371	return allowed;
2372}
2373
2374/*
2375 * cpuset_node_allowed_hardwall - Can we allocate on a memory node?
2376 * @node: is this an allowed node?
2377 * @gfp_mask: memory allocation flags
2378 *
2379 * If we're in interrupt, yes, we can always allocate.  If __GFP_THISNODE is
2380 * set, yes, we can always allocate.  If node is in our task's mems_allowed,
2381 * yes.  If the task has been OOM killed and has access to memory reserves as
2382 * specified by the TIF_MEMDIE flag, yes.
2383 * Otherwise, no.
2384 *
2385 * The __GFP_THISNODE placement logic is really handled elsewhere,
2386 * by forcibly using a zonelist starting at a specified node, and by
2387 * (in get_page_from_freelist()) refusing to consider the zones for
2388 * any node on the zonelist except the first.  By the time any such
2389 * calls get to this routine, we should just shut up and say 'yes'.
2390 *
2391 * Unlike the cpuset_node_allowed_softwall() variant, above,
2392 * this variant requires that the node be in the current task's
2393 * mems_allowed or that we're in interrupt.  It does not scan up the
2394 * cpuset hierarchy for the nearest enclosing mem_exclusive cpuset.
2395 * It never sleeps.
2396 */
2397int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
2398{
2399	if (in_interrupt() || (gfp_mask & __GFP_THISNODE))
2400		return 1;
2401	if (node_isset(node, current->mems_allowed))
2402		return 1;
2403	/*
2404	 * Allow tasks that have access to memory reserves because they have
2405	 * been OOM killed to get memory anywhere.
2406	 */
2407	if (unlikely(test_thread_flag(TIF_MEMDIE)))
2408		return 1;
2409	return 0;
2410}
2411
2412/**
2413 * cpuset_unlock - release lock on cpuset changes
2414 *
2415 * Undo the lock taken in a previous cpuset_lock() call.
2416 */
2417
2418void cpuset_unlock(void)
2419{
2420	mutex_unlock(&callback_mutex);
2421}
2422
2423/**
2424 * cpuset_mem_spread_node() - On which node to begin search for a file page
2425 * cpuset_slab_spread_node() - On which node to begin search for a slab page
2426 *
2427 * If a task is marked PF_SPREAD_PAGE or PF_SPREAD_SLAB (as for
2428 * tasks in a cpuset with is_spread_page or is_spread_slab set),
2429 * and if the memory allocation used cpuset_mem_spread_node()
2430 * to determine on which node to start looking, as it will for
2431 * certain page cache or slab cache pages such as used for file
2432 * system buffers and inode caches, then instead of starting on the
2433 * local node to look for a free page, rather spread the starting
2434 * node around the tasks mems_allowed nodes.
2435 *
2436 * We don't have to worry about the returned node being offline
2437 * because "it can't happen", and even if it did, it would be ok.
2438 *
2439 * The routines calling guarantee_online_mems() are careful to
2440 * only set nodes in task->mems_allowed that are online.  So it
2441 * should not be possible for the following code to return an
2442 * offline node.  But if it did, that would be ok, as this routine
2443 * is not returning the node where the allocation must be, only
2444 * the node where the search should start.  The zonelist passed to
2445 * __alloc_pages() will include all nodes.  If the slab allocator
2446 * is passed an offline node, it will fall back to the local node.
2447 * See kmem_cache_alloc_node().
2448 */
2449
2450static int cpuset_spread_node(int *rotor)
2451{
2452	int node;
2453
2454	node = next_node(*rotor, current->mems_allowed);
2455	if (node == MAX_NUMNODES)
2456		node = first_node(current->mems_allowed);
2457	*rotor = node;
2458	return node;
2459}
2460
2461int cpuset_mem_spread_node(void)
2462{
2463	if (current->cpuset_mem_spread_rotor == NUMA_NO_NODE)
2464		current->cpuset_mem_spread_rotor =
2465			node_random(&current->mems_allowed);
2466
2467	return cpuset_spread_node(&current->cpuset_mem_spread_rotor);
2468}
2469
2470int cpuset_slab_spread_node(void)
2471{
2472	if (current->cpuset_slab_spread_rotor == NUMA_NO_NODE)
2473		current->cpuset_slab_spread_rotor =
2474			node_random(&current->mems_allowed);
2475
2476	return cpuset_spread_node(&current->cpuset_slab_spread_rotor);
2477}
2478
2479EXPORT_SYMBOL_GPL(cpuset_mem_spread_node);
2480
2481/**
2482 * cpuset_mems_allowed_intersects - Does @tsk1's mems_allowed intersect @tsk2's?
2483 * @tsk1: pointer to task_struct of some task.
2484 * @tsk2: pointer to task_struct of some other task.
2485 *
2486 * Description: Return true if @tsk1's mems_allowed intersects the
2487 * mems_allowed of @tsk2.  Used by the OOM killer to determine if
2488 * one of the task's memory usage might impact the memory available
2489 * to the other.
2490 **/
2491
2492int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
2493				   const struct task_struct *tsk2)
2494{
2495	return nodes_intersects(tsk1->mems_allowed, tsk2->mems_allowed);
2496}
2497
 
 
2498/**
2499 * cpuset_print_task_mems_allowed - prints task's cpuset and mems_allowed
2500 * @task: pointer to task_struct of some task.
2501 *
2502 * Description: Prints @task's name, cpuset name, and cached copy of its
2503 * mems_allowed to the kernel log.  Must hold task_lock(task) to allow
2504 * dereferencing task_cs(task).
2505 */
2506void cpuset_print_task_mems_allowed(struct task_struct *tsk)
2507{
2508	struct dentry *dentry;
 
 
 
2509
2510	dentry = task_cs(tsk)->css.cgroup->dentry;
2511	spin_lock(&cpuset_buffer_lock);
2512	snprintf(cpuset_name, CPUSET_NAME_LEN,
2513		 dentry ? (const char *)dentry->d_name.name : "/");
 
2514	nodelist_scnprintf(cpuset_nodelist, CPUSET_NODELIST_LEN,
2515			   tsk->mems_allowed);
2516	printk(KERN_INFO "%s cpuset=%s mems_allowed=%s\n",
2517	       tsk->comm, cpuset_name, cpuset_nodelist);
 
 
 
2518	spin_unlock(&cpuset_buffer_lock);
2519}
2520
2521/*
2522 * Collection of memory_pressure is suppressed unless
2523 * this flag is enabled by writing "1" to the special
2524 * cpuset file 'memory_pressure_enabled' in the root cpuset.
2525 */
2526
2527int cpuset_memory_pressure_enabled __read_mostly;
2528
2529/**
2530 * cpuset_memory_pressure_bump - keep stats of per-cpuset reclaims.
2531 *
2532 * Keep a running average of the rate of synchronous (direct)
2533 * page reclaim efforts initiated by tasks in each cpuset.
2534 *
2535 * This represents the rate at which some task in the cpuset
2536 * ran low on memory on all nodes it was allowed to use, and
2537 * had to enter the kernels page reclaim code in an effort to
2538 * create more free memory by tossing clean pages or swapping
2539 * or writing dirty pages.
2540 *
2541 * Display to user space in the per-cpuset read-only file
2542 * "memory_pressure".  Value displayed is an integer
2543 * representing the recent rate of entry into the synchronous
2544 * (direct) page reclaim by any task attached to the cpuset.
2545 **/
2546
2547void __cpuset_memory_pressure_bump(void)
2548{
2549	task_lock(current);
2550	fmeter_markevent(&task_cs(current)->fmeter);
2551	task_unlock(current);
2552}
2553
2554#ifdef CONFIG_PROC_PID_CPUSET
2555/*
2556 * proc_cpuset_show()
2557 *  - Print tasks cpuset path into seq_file.
2558 *  - Used for /proc/<pid>/cpuset.
2559 *  - No need to task_lock(tsk) on this tsk->cpuset reference, as it
2560 *    doesn't really matter if tsk->cpuset changes after we read it,
2561 *    and we take cgroup_mutex, keeping cpuset_attach() from changing it
2562 *    anyway.
2563 */
2564static int proc_cpuset_show(struct seq_file *m, void *unused_v)
2565{
2566	struct pid *pid;
2567	struct task_struct *tsk;
2568	char *buf;
2569	struct cgroup_subsys_state *css;
2570	int retval;
2571
2572	retval = -ENOMEM;
2573	buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
2574	if (!buf)
2575		goto out;
2576
2577	retval = -ESRCH;
2578	pid = m->private;
2579	tsk = get_pid_task(pid, PIDTYPE_PID);
2580	if (!tsk)
2581		goto out_free;
2582
2583	retval = -EINVAL;
2584	cgroup_lock();
2585	css = task_subsys_state(tsk, cpuset_subsys_id);
2586	retval = cgroup_path(css->cgroup, buf, PAGE_SIZE);
2587	if (retval < 0)
2588		goto out_unlock;
2589	seq_puts(m, buf);
 
2590	seq_putc(m, '\n');
2591out_unlock:
2592	cgroup_unlock();
2593	put_task_struct(tsk);
2594out_free:
2595	kfree(buf);
2596out:
2597	return retval;
2598}
2599
2600static int cpuset_open(struct inode *inode, struct file *file)
2601{
2602	struct pid *pid = PROC_I(inode)->pid;
2603	return single_open(file, proc_cpuset_show, pid);
2604}
2605
2606const struct file_operations proc_cpuset_operations = {
2607	.open		= cpuset_open,
2608	.read		= seq_read,
2609	.llseek		= seq_lseek,
2610	.release	= single_release,
2611};
2612#endif /* CONFIG_PROC_PID_CPUSET */
2613
2614/* Display task mems_allowed in /proc/<pid>/status file. */
2615void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task)
2616{
2617	seq_printf(m, "Mems_allowed:\t");
2618	seq_nodemask(m, &task->mems_allowed);
2619	seq_printf(m, "\n");
2620	seq_printf(m, "Mems_allowed_list:\t");
2621	seq_nodemask_list(m, &task->mems_allowed);
2622	seq_printf(m, "\n");
2623}