Linux Audio

Check our new training course

Loading...
v6.2
   1/*
   2 *  kernel/cpuset.c
   3 *
   4 *  Processor and Memory placement constraints for sets of tasks.
   5 *
   6 *  Copyright (C) 2003 BULL SA.
   7 *  Copyright (C) 2004-2007 Silicon Graphics, Inc.
   8 *  Copyright (C) 2006 Google, Inc
   9 *
  10 *  Portions derived from Patrick Mochel's sysfs code.
  11 *  sysfs is Copyright (c) 2001-3 Patrick Mochel
  12 *
  13 *  2003-10-10 Written by Simon Derr.
  14 *  2003-10-22 Updates by Stephen Hemminger.
  15 *  2004 May-July Rework by Paul Jackson.
  16 *  2006 Rework by Paul Menage to use generic cgroups
  17 *  2008 Rework of the scheduler domains and CPU hotplug handling
  18 *       by Max Krasnyansky
  19 *
  20 *  This file is subject to the terms and conditions of the GNU General Public
  21 *  License.  See the file COPYING in the main directory of the Linux
  22 *  distribution for more details.
  23 */
  24
  25#include <linux/cpu.h>
  26#include <linux/cpumask.h>
  27#include <linux/cpuset.h>
  28#include <linux/err.h>
  29#include <linux/errno.h>
  30#include <linux/file.h>
  31#include <linux/fs.h>
  32#include <linux/init.h>
  33#include <linux/interrupt.h>
  34#include <linux/kernel.h>
  35#include <linux/kmod.h>
  36#include <linux/kthread.h>
  37#include <linux/list.h>
  38#include <linux/mempolicy.h>
  39#include <linux/mm.h>
  40#include <linux/memory.h>
  41#include <linux/export.h>
  42#include <linux/mount.h>
  43#include <linux/fs_context.h>
  44#include <linux/namei.h>
  45#include <linux/pagemap.h>
  46#include <linux/proc_fs.h>
  47#include <linux/rcupdate.h>
  48#include <linux/sched.h>
  49#include <linux/sched/deadline.h>
  50#include <linux/sched/mm.h>
  51#include <linux/sched/task.h>
  52#include <linux/seq_file.h>
  53#include <linux/security.h>
  54#include <linux/slab.h>
  55#include <linux/spinlock.h>
  56#include <linux/stat.h>
  57#include <linux/string.h>
  58#include <linux/time.h>
  59#include <linux/time64.h>
  60#include <linux/backing-dev.h>
  61#include <linux/sort.h>
  62#include <linux/oom.h>
  63#include <linux/sched/isolation.h>
  64#include <linux/uaccess.h>
  65#include <linux/atomic.h>
  66#include <linux/mutex.h>
  67#include <linux/cgroup.h>
  68#include <linux/wait.h>
 
  69
  70DEFINE_STATIC_KEY_FALSE(cpusets_pre_enable_key);
  71DEFINE_STATIC_KEY_FALSE(cpusets_enabled_key);
  72
  73/*
  74 * There could be abnormal cpuset configurations for cpu or memory
  75 * node binding, add this key to provide a quick low-cost judgment
  76 * of the situation.
  77 */
  78DEFINE_STATIC_KEY_FALSE(cpusets_insane_config_key);
  79
  80/* See "Frequency meter" comments, below. */
  81
  82struct fmeter {
  83	int cnt;		/* unprocessed events count */
  84	int val;		/* most recent output value */
  85	time64_t time;		/* clock (secs) when val computed */
  86	spinlock_t lock;	/* guards read or write of above */
  87};
  88
  89/*
  90 * Invalid partition error code
  91 */
  92enum prs_errcode {
  93	PERR_NONE = 0,
  94	PERR_INVCPUS,
  95	PERR_INVPARENT,
  96	PERR_NOTPART,
  97	PERR_NOTEXCL,
  98	PERR_NOCPUS,
  99	PERR_HOTPLUG,
 100	PERR_CPUSEMPTY,
 
 101};
 102
 103static const char * const perr_strings[] = {
 104	[PERR_INVCPUS]   = "Invalid cpu list in cpuset.cpus",
 105	[PERR_INVPARENT] = "Parent is an invalid partition root",
 106	[PERR_NOTPART]   = "Parent is not a partition root",
 107	[PERR_NOTEXCL]   = "Cpu list in cpuset.cpus not exclusive",
 108	[PERR_NOCPUS]    = "Parent unable to distribute cpu downstream",
 109	[PERR_HOTPLUG]   = "No cpu available due to hotplug",
 110	[PERR_CPUSEMPTY] = "cpuset.cpus is empty",
 
 111};
 112
 113struct cpuset {
 114	struct cgroup_subsys_state css;
 115
 116	unsigned long flags;		/* "unsigned long" so bitops work */
 117
 118	/*
 119	 * On default hierarchy:
 120	 *
 121	 * The user-configured masks can only be changed by writing to
 122	 * cpuset.cpus and cpuset.mems, and won't be limited by the
 123	 * parent masks.
 124	 *
 125	 * The effective masks is the real masks that apply to the tasks
 126	 * in the cpuset. They may be changed if the configured masks are
 127	 * changed or hotplug happens.
 128	 *
 129	 * effective_mask == configured_mask & parent's effective_mask,
 130	 * and if it ends up empty, it will inherit the parent's mask.
 131	 *
 132	 *
 133	 * On legacy hierarchy:
 134	 *
 135	 * The user-configured masks are always the same with effective masks.
 136	 */
 137
 138	/* user-configured CPUs and Memory Nodes allow to tasks */
 139	cpumask_var_t cpus_allowed;
 140	nodemask_t mems_allowed;
 141
 142	/* effective CPUs and Memory Nodes allow to tasks */
 143	cpumask_var_t effective_cpus;
 144	nodemask_t effective_mems;
 145
 146	/*
 147	 * CPUs allocated to child sub-partitions (default hierarchy only)
 148	 * - CPUs granted by the parent = effective_cpus U subparts_cpus
 149	 * - effective_cpus and subparts_cpus are mutually exclusive.
 150	 *
 151	 * effective_cpus contains only onlined CPUs, but subparts_cpus
 152	 * may have offlined ones.
 
 
 
 
 
 
 
 
 
 
 
 153	 */
 154	cpumask_var_t subparts_cpus;
 155
 156	/*
 157	 * This is old Memory Nodes tasks took on.
 158	 *
 159	 * - top_cpuset.old_mems_allowed is initialized to mems_allowed.
 160	 * - A new cpuset's old_mems_allowed is initialized when some
 161	 *   task is moved into it.
 162	 * - old_mems_allowed is used in cpuset_migrate_mm() when we change
 163	 *   cpuset.mems_allowed and have tasks' nodemask updated, and
 164	 *   then old_mems_allowed is updated to mems_allowed.
 165	 */
 166	nodemask_t old_mems_allowed;
 167
 168	struct fmeter fmeter;		/* memory_pressure filter */
 169
 170	/*
 171	 * Tasks are being attached to this cpuset.  Used to prevent
 172	 * zeroing cpus/mems_allowed between ->can_attach() and ->attach().
 173	 */
 174	int attach_in_progress;
 175
 176	/* partition number for rebuild_sched_domains() */
 177	int pn;
 178
 179	/* for custom sched domain */
 180	int relax_domain_level;
 181
 182	/* number of CPUs in subparts_cpus */
 183	int nr_subparts_cpus;
 184
 185	/* partition root state */
 186	int partition_root_state;
 187
 188	/*
 189	 * Default hierarchy only:
 190	 * use_parent_ecpus - set if using parent's effective_cpus
 191	 * child_ecpus_count - # of children with use_parent_ecpus set
 192	 */
 193	int use_parent_ecpus;
 194	int child_ecpus_count;
 195
 
 
 
 
 
 
 
 
 196	/* Invalid partition error code, not lock protected */
 197	enum prs_errcode prs_err;
 198
 199	/* Handle for cpuset.cpus.partition */
 200	struct cgroup_file partition_file;
 
 
 
 201};
 202
 203/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 204 * Partition root states:
 205 *
 206 *   0 - member (not a partition root)
 207 *   1 - partition root
 208 *   2 - partition root without load balancing (isolated)
 209 *  -1 - invalid partition root
 210 *  -2 - invalid isolated partition root
 211 */
 212#define PRS_MEMBER		0
 213#define PRS_ROOT		1
 214#define PRS_ISOLATED		2
 215#define PRS_INVALID_ROOT	-1
 216#define PRS_INVALID_ISOLATED	-2
 217
 218static inline bool is_prs_invalid(int prs_state)
 219{
 220	return prs_state < 0;
 221}
 222
 223/*
 224 * Temporary cpumasks for working with partitions that are passed among
 225 * functions to avoid memory allocation in inner functions.
 226 */
 227struct tmpmasks {
 228	cpumask_var_t addmask, delmask;	/* For partition root */
 229	cpumask_var_t new_cpus;		/* For update_cpumasks_hier() */
 230};
 231
 232static inline struct cpuset *css_cs(struct cgroup_subsys_state *css)
 233{
 234	return css ? container_of(css, struct cpuset, css) : NULL;
 235}
 236
 237/* Retrieve the cpuset for a task */
 238static inline struct cpuset *task_cs(struct task_struct *task)
 239{
 240	return css_cs(task_css(task, cpuset_cgrp_id));
 241}
 242
 243static inline struct cpuset *parent_cs(struct cpuset *cs)
 244{
 245	return css_cs(cs->css.parent);
 246}
 247
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 248/* bits in struct cpuset flags field */
 249typedef enum {
 250	CS_ONLINE,
 251	CS_CPU_EXCLUSIVE,
 252	CS_MEM_EXCLUSIVE,
 253	CS_MEM_HARDWALL,
 254	CS_MEMORY_MIGRATE,
 255	CS_SCHED_LOAD_BALANCE,
 256	CS_SPREAD_PAGE,
 257	CS_SPREAD_SLAB,
 258} cpuset_flagbits_t;
 259
 260/* convenient tests for these bits */
 261static inline bool is_cpuset_online(struct cpuset *cs)
 262{
 263	return test_bit(CS_ONLINE, &cs->flags) && !css_is_dying(&cs->css);
 264}
 265
 266static inline int is_cpu_exclusive(const struct cpuset *cs)
 267{
 268	return test_bit(CS_CPU_EXCLUSIVE, &cs->flags);
 269}
 270
 271static inline int is_mem_exclusive(const struct cpuset *cs)
 272{
 273	return test_bit(CS_MEM_EXCLUSIVE, &cs->flags);
 274}
 275
 276static inline int is_mem_hardwall(const struct cpuset *cs)
 277{
 278	return test_bit(CS_MEM_HARDWALL, &cs->flags);
 279}
 280
 281static inline int is_sched_load_balance(const struct cpuset *cs)
 282{
 283	return test_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
 284}
 285
 286static inline int is_memory_migrate(const struct cpuset *cs)
 287{
 288	return test_bit(CS_MEMORY_MIGRATE, &cs->flags);
 289}
 290
 291static inline int is_spread_page(const struct cpuset *cs)
 292{
 293	return test_bit(CS_SPREAD_PAGE, &cs->flags);
 294}
 295
 296static inline int is_spread_slab(const struct cpuset *cs)
 297{
 298	return test_bit(CS_SPREAD_SLAB, &cs->flags);
 299}
 300
 301static inline int is_partition_valid(const struct cpuset *cs)
 302{
 303	return cs->partition_root_state > 0;
 304}
 305
 306static inline int is_partition_invalid(const struct cpuset *cs)
 307{
 308	return cs->partition_root_state < 0;
 309}
 310
 311/*
 312 * Callers should hold callback_lock to modify partition_root_state.
 313 */
 314static inline void make_partition_invalid(struct cpuset *cs)
 315{
 316	if (is_partition_valid(cs))
 317		cs->partition_root_state = -cs->partition_root_state;
 318}
 319
 320/*
 321 * Send notification event of whenever partition_root_state changes.
 322 */
 323static inline void notify_partition_change(struct cpuset *cs, int old_prs)
 324{
 325	if (old_prs == cs->partition_root_state)
 326		return;
 327	cgroup_file_notify(&cs->partition_file);
 328
 329	/* Reset prs_err if not invalid */
 330	if (is_partition_valid(cs))
 331		WRITE_ONCE(cs->prs_err, PERR_NONE);
 332}
 333
 334static struct cpuset top_cpuset = {
 335	.flags = ((1 << CS_ONLINE) | (1 << CS_CPU_EXCLUSIVE) |
 336		  (1 << CS_MEM_EXCLUSIVE)),
 337	.partition_root_state = PRS_ROOT,
 
 338};
 339
 340/**
 341 * cpuset_for_each_child - traverse online children of a cpuset
 342 * @child_cs: loop cursor pointing to the current child
 343 * @pos_css: used for iteration
 344 * @parent_cs: target cpuset to walk children of
 345 *
 346 * Walk @child_cs through the online children of @parent_cs.  Must be used
 347 * with RCU read locked.
 348 */
 349#define cpuset_for_each_child(child_cs, pos_css, parent_cs)		\
 350	css_for_each_child((pos_css), &(parent_cs)->css)		\
 351		if (is_cpuset_online(((child_cs) = css_cs((pos_css)))))
 352
 353/**
 354 * cpuset_for_each_descendant_pre - pre-order walk of a cpuset's descendants
 355 * @des_cs: loop cursor pointing to the current descendant
 356 * @pos_css: used for iteration
 357 * @root_cs: target cpuset to walk ancestor of
 358 *
 359 * Walk @des_cs through the online descendants of @root_cs.  Must be used
 360 * with RCU read locked.  The caller may modify @pos_css by calling
 361 * css_rightmost_descendant() to skip subtree.  @root_cs is included in the
 362 * iteration and the first node to be visited.
 363 */
 364#define cpuset_for_each_descendant_pre(des_cs, pos_css, root_cs)	\
 365	css_for_each_descendant_pre((pos_css), &(root_cs)->css)		\
 366		if (is_cpuset_online(((des_cs) = css_cs((pos_css)))))
 367
 368/*
 369 * There are two global locks guarding cpuset structures - cpuset_rwsem and
 370 * callback_lock. We also require taking task_lock() when dereferencing a
 371 * task's cpuset pointer. See "The task_lock() exception", at the end of this
 372 * comment.  The cpuset code uses only cpuset_rwsem write lock.  Other
 373 * kernel subsystems can use cpuset_read_lock()/cpuset_read_unlock() to
 374 * prevent change to cpuset structures.
 
 
 375 *
 376 * A task must hold both locks to modify cpusets.  If a task holds
 377 * cpuset_rwsem, it blocks others wanting that rwsem, ensuring that it
 378 * is the only task able to also acquire callback_lock and be able to
 379 * modify cpusets.  It can perform various checks on the cpuset structure
 380 * first, knowing nothing will change.  It can also allocate memory while
 381 * just holding cpuset_rwsem.  While it is performing these checks, various
 382 * callback routines can briefly acquire callback_lock to query cpusets.
 383 * Once it is ready to make the changes, it takes callback_lock, blocking
 384 * everyone else.
 385 *
 386 * Calls to the kernel memory allocator can not be made while holding
 387 * callback_lock, as that would risk double tripping on callback_lock
 388 * from one of the callbacks into the cpuset code from within
 389 * __alloc_pages().
 390 *
 391 * If a task is only holding callback_lock, then it has read-only
 392 * access to cpusets.
 393 *
 394 * Now, the task_struct fields mems_allowed and mempolicy may be changed
 395 * by other task, we use alloc_lock in the task_struct fields to protect
 396 * them.
 397 *
 398 * The cpuset_common_file_read() handlers only hold callback_lock across
 399 * small pieces of code, such as when reading out possibly multi-word
 400 * cpumasks and nodemasks.
 401 *
 402 * Accessing a task's cpuset should be done in accordance with the
 403 * guidelines for accessing subsystem state in kernel/cgroup.c
 404 */
 405
 406DEFINE_STATIC_PERCPU_RWSEM(cpuset_rwsem);
 407
 408void cpuset_read_lock(void)
 409{
 410	percpu_down_read(&cpuset_rwsem);
 411}
 412
 413void cpuset_read_unlock(void)
 414{
 415	percpu_up_read(&cpuset_rwsem);
 416}
 417
 418static DEFINE_SPINLOCK(callback_lock);
 419
 420static struct workqueue_struct *cpuset_migrate_mm_wq;
 421
 422/*
 423 * CPU / memory hotplug is handled asynchronously.
 424 */
 425static void cpuset_hotplug_workfn(struct work_struct *work);
 426static DECLARE_WORK(cpuset_hotplug_work, cpuset_hotplug_workfn);
 427
 428static DECLARE_WAIT_QUEUE_HEAD(cpuset_attach_wq);
 429
 430static inline void check_insane_mems_config(nodemask_t *nodes)
 431{
 432	if (!cpusets_insane_config() &&
 433		movable_only_nodes(nodes)) {
 434		static_branch_enable(&cpusets_insane_config_key);
 435		pr_info("Unsupported (movable nodes only) cpuset configuration detected (nmask=%*pbl)!\n"
 436			"Cpuset allocations might fail even with a lot of memory available.\n",
 437			nodemask_pr_args(nodes));
 438	}
 439}
 440
 441/*
 442 * Cgroup v2 behavior is used on the "cpus" and "mems" control files when
 443 * on default hierarchy or when the cpuset_v2_mode flag is set by mounting
 444 * the v1 cpuset cgroup filesystem with the "cpuset_v2_mode" mount option.
 445 * With v2 behavior, "cpus" and "mems" are always what the users have
 446 * requested and won't be changed by hotplug events. Only the effective
 447 * cpus or mems will be affected.
 448 */
 449static inline bool is_in_v2_mode(void)
 450{
 451	return cgroup_subsys_on_dfl(cpuset_cgrp_subsys) ||
 452	      (cpuset_cgrp_subsys.root->flags & CGRP_ROOT_CPUSET_V2_MODE);
 453}
 454
 455/**
 456 * partition_is_populated - check if partition has tasks
 457 * @cs: partition root to be checked
 458 * @excluded_child: a child cpuset to be excluded in task checking
 459 * Return: true if there are tasks, false otherwise
 460 *
 461 * It is assumed that @cs is a valid partition root. @excluded_child should
 462 * be non-NULL when this cpuset is going to become a partition itself.
 463 */
 464static inline bool partition_is_populated(struct cpuset *cs,
 465					  struct cpuset *excluded_child)
 466{
 467	struct cgroup_subsys_state *css;
 468	struct cpuset *child;
 469
 470	if (cs->css.cgroup->nr_populated_csets)
 471		return true;
 472	if (!excluded_child && !cs->nr_subparts_cpus)
 473		return cgroup_is_populated(cs->css.cgroup);
 474
 475	rcu_read_lock();
 476	cpuset_for_each_child(child, css, cs) {
 477		if (child == excluded_child)
 478			continue;
 479		if (is_partition_valid(child))
 480			continue;
 481		if (cgroup_is_populated(child->css.cgroup)) {
 482			rcu_read_unlock();
 483			return true;
 484		}
 485	}
 486	rcu_read_unlock();
 487	return false;
 488}
 489
 490/*
 491 * Return in pmask the portion of a task's cpusets's cpus_allowed that
 492 * are online and are capable of running the task.  If none are found,
 493 * walk up the cpuset hierarchy until we find one that does have some
 494 * appropriate cpus.
 495 *
 496 * One way or another, we guarantee to return some non-empty subset
 497 * of cpu_online_mask.
 498 *
 499 * Call with callback_lock or cpuset_rwsem held.
 500 */
 501static void guarantee_online_cpus(struct task_struct *tsk,
 502				  struct cpumask *pmask)
 503{
 504	const struct cpumask *possible_mask = task_cpu_possible_mask(tsk);
 505	struct cpuset *cs;
 506
 507	if (WARN_ON(!cpumask_and(pmask, possible_mask, cpu_online_mask)))
 508		cpumask_copy(pmask, cpu_online_mask);
 509
 510	rcu_read_lock();
 511	cs = task_cs(tsk);
 512
 513	while (!cpumask_intersects(cs->effective_cpus, pmask)) {
 514		cs = parent_cs(cs);
 515		if (unlikely(!cs)) {
 516			/*
 517			 * The top cpuset doesn't have any online cpu as a
 518			 * consequence of a race between cpuset_hotplug_work
 519			 * and cpu hotplug notifier.  But we know the top
 520			 * cpuset's effective_cpus is on its way to be
 521			 * identical to cpu_online_mask.
 522			 */
 523			goto out_unlock;
 524		}
 525	}
 526	cpumask_and(pmask, pmask, cs->effective_cpus);
 527
 528out_unlock:
 529	rcu_read_unlock();
 530}
 531
 532/*
 533 * Return in *pmask the portion of a cpusets's mems_allowed that
 534 * are online, with memory.  If none are online with memory, walk
 535 * up the cpuset hierarchy until we find one that does have some
 536 * online mems.  The top cpuset always has some mems online.
 537 *
 538 * One way or another, we guarantee to return some non-empty subset
 539 * of node_states[N_MEMORY].
 540 *
 541 * Call with callback_lock or cpuset_rwsem held.
 542 */
 543static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask)
 544{
 545	while (!nodes_intersects(cs->effective_mems, node_states[N_MEMORY]))
 546		cs = parent_cs(cs);
 547	nodes_and(*pmask, cs->effective_mems, node_states[N_MEMORY]);
 548}
 549
 550/*
 551 * update task's spread flag if cpuset's page/slab spread flag is set
 552 *
 553 * Call with callback_lock or cpuset_rwsem held. The check can be skipped
 554 * if on default hierarchy.
 555 */
 556static void cpuset_update_task_spread_flags(struct cpuset *cs,
 557					struct task_struct *tsk)
 558{
 559	if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys))
 560		return;
 561
 562	if (is_spread_page(cs))
 563		task_set_spread_page(tsk);
 564	else
 565		task_clear_spread_page(tsk);
 566
 567	if (is_spread_slab(cs))
 568		task_set_spread_slab(tsk);
 569	else
 570		task_clear_spread_slab(tsk);
 571}
 572
 573/*
 574 * is_cpuset_subset(p, q) - Is cpuset p a subset of cpuset q?
 575 *
 576 * One cpuset is a subset of another if all its allowed CPUs and
 577 * Memory Nodes are a subset of the other, and its exclusive flags
 578 * are only set if the other's are set.  Call holding cpuset_rwsem.
 579 */
 580
 581static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
 582{
 583	return	cpumask_subset(p->cpus_allowed, q->cpus_allowed) &&
 584		nodes_subset(p->mems_allowed, q->mems_allowed) &&
 585		is_cpu_exclusive(p) <= is_cpu_exclusive(q) &&
 586		is_mem_exclusive(p) <= is_mem_exclusive(q);
 587}
 588
 589/**
 590 * alloc_cpumasks - allocate three cpumasks for cpuset
 591 * @cs:  the cpuset that have cpumasks to be allocated.
 592 * @tmp: the tmpmasks structure pointer
 593 * Return: 0 if successful, -ENOMEM otherwise.
 594 *
 595 * Only one of the two input arguments should be non-NULL.
 596 */
 597static inline int alloc_cpumasks(struct cpuset *cs, struct tmpmasks *tmp)
 598{
 599	cpumask_var_t *pmask1, *pmask2, *pmask3;
 600
 601	if (cs) {
 602		pmask1 = &cs->cpus_allowed;
 603		pmask2 = &cs->effective_cpus;
 604		pmask3 = &cs->subparts_cpus;
 
 605	} else {
 606		pmask1 = &tmp->new_cpus;
 607		pmask2 = &tmp->addmask;
 608		pmask3 = &tmp->delmask;
 
 609	}
 610
 611	if (!zalloc_cpumask_var(pmask1, GFP_KERNEL))
 612		return -ENOMEM;
 613
 614	if (!zalloc_cpumask_var(pmask2, GFP_KERNEL))
 615		goto free_one;
 616
 617	if (!zalloc_cpumask_var(pmask3, GFP_KERNEL))
 618		goto free_two;
 619
 
 
 
 
 620	return 0;
 621
 
 
 622free_two:
 623	free_cpumask_var(*pmask2);
 624free_one:
 625	free_cpumask_var(*pmask1);
 626	return -ENOMEM;
 627}
 628
 629/**
 630 * free_cpumasks - free cpumasks in a tmpmasks structure
 631 * @cs:  the cpuset that have cpumasks to be free.
 632 * @tmp: the tmpmasks structure pointer
 633 */
 634static inline void free_cpumasks(struct cpuset *cs, struct tmpmasks *tmp)
 635{
 636	if (cs) {
 637		free_cpumask_var(cs->cpus_allowed);
 638		free_cpumask_var(cs->effective_cpus);
 639		free_cpumask_var(cs->subparts_cpus);
 
 640	}
 641	if (tmp) {
 642		free_cpumask_var(tmp->new_cpus);
 643		free_cpumask_var(tmp->addmask);
 644		free_cpumask_var(tmp->delmask);
 645	}
 646}
 647
 648/**
 649 * alloc_trial_cpuset - allocate a trial cpuset
 650 * @cs: the cpuset that the trial cpuset duplicates
 651 */
 652static struct cpuset *alloc_trial_cpuset(struct cpuset *cs)
 653{
 654	struct cpuset *trial;
 655
 656	trial = kmemdup(cs, sizeof(*cs), GFP_KERNEL);
 657	if (!trial)
 658		return NULL;
 659
 660	if (alloc_cpumasks(trial, NULL)) {
 661		kfree(trial);
 662		return NULL;
 663	}
 664
 665	cpumask_copy(trial->cpus_allowed, cs->cpus_allowed);
 666	cpumask_copy(trial->effective_cpus, cs->effective_cpus);
 
 
 667	return trial;
 668}
 669
 670/**
 671 * free_cpuset - free the cpuset
 672 * @cs: the cpuset to be freed
 673 */
 674static inline void free_cpuset(struct cpuset *cs)
 675{
 676	free_cpumasks(cs, NULL);
 677	kfree(cs);
 678}
 679
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 680/*
 681 * validate_change_legacy() - Validate conditions specific to legacy (v1)
 682 *                            behavior.
 683 */
 684static int validate_change_legacy(struct cpuset *cur, struct cpuset *trial)
 685{
 686	struct cgroup_subsys_state *css;
 687	struct cpuset *c, *par;
 688	int ret;
 689
 690	WARN_ON_ONCE(!rcu_read_lock_held());
 691
 692	/* Each of our child cpusets must be a subset of us */
 693	ret = -EBUSY;
 694	cpuset_for_each_child(c, css, cur)
 695		if (!is_cpuset_subset(c, trial))
 696			goto out;
 697
 698	/* On legacy hierarchy, we must be a subset of our parent cpuset. */
 699	ret = -EACCES;
 700	par = parent_cs(cur);
 701	if (par && !is_cpuset_subset(trial, par))
 702		goto out;
 703
 704	ret = 0;
 705out:
 706	return ret;
 707}
 708
 709/*
 710 * validate_change() - Used to validate that any proposed cpuset change
 711 *		       follows the structural rules for cpusets.
 712 *
 713 * If we replaced the flag and mask values of the current cpuset
 714 * (cur) with those values in the trial cpuset (trial), would
 715 * our various subset and exclusive rules still be valid?  Presumes
 716 * cpuset_rwsem held.
 717 *
 718 * 'cur' is the address of an actual, in-use cpuset.  Operations
 719 * such as list traversal that depend on the actual address of the
 720 * cpuset in the list must use cur below, not trial.
 721 *
 722 * 'trial' is the address of bulk structure copy of cur, with
 723 * perhaps one or more of the fields cpus_allowed, mems_allowed,
 724 * or flags changed to new, trial values.
 725 *
 726 * Return 0 if valid, -errno if not.
 727 */
 728
 729static int validate_change(struct cpuset *cur, struct cpuset *trial)
 730{
 731	struct cgroup_subsys_state *css;
 732	struct cpuset *c, *par;
 733	int ret = 0;
 734
 735	rcu_read_lock();
 736
 737	if (!is_in_v2_mode())
 738		ret = validate_change_legacy(cur, trial);
 739	if (ret)
 740		goto out;
 741
 742	/* Remaining checks don't apply to root cpuset */
 743	if (cur == &top_cpuset)
 744		goto out;
 745
 746	par = parent_cs(cur);
 747
 748	/*
 749	 * Cpusets with tasks - existing or newly being attached - can't
 750	 * be changed to have empty cpus_allowed or mems_allowed.
 751	 */
 752	ret = -ENOSPC;
 753	if ((cgroup_is_populated(cur->css.cgroup) || cur->attach_in_progress)) {
 754		if (!cpumask_empty(cur->cpus_allowed) &&
 755		    cpumask_empty(trial->cpus_allowed))
 756			goto out;
 757		if (!nodes_empty(cur->mems_allowed) &&
 758		    nodes_empty(trial->mems_allowed))
 759			goto out;
 760	}
 761
 762	/*
 763	 * We can't shrink if we won't have enough room for SCHED_DEADLINE
 764	 * tasks.
 765	 */
 766	ret = -EBUSY;
 767	if (is_cpu_exclusive(cur) &&
 768	    !cpuset_cpumask_can_shrink(cur->cpus_allowed,
 769				       trial->cpus_allowed))
 770		goto out;
 771
 772	/*
 773	 * If either I or some sibling (!= me) is exclusive, we can't
 774	 * overlap
 775	 */
 776	ret = -EINVAL;
 777	cpuset_for_each_child(c, css, par) {
 778		if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) &&
 779		    c != cur &&
 780		    cpumask_intersects(trial->cpus_allowed, c->cpus_allowed))
 781			goto out;
 
 782		if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) &&
 783		    c != cur &&
 784		    nodes_intersects(trial->mems_allowed, c->mems_allowed))
 785			goto out;
 786	}
 787
 788	ret = 0;
 789out:
 790	rcu_read_unlock();
 791	return ret;
 792}
 793
 794#ifdef CONFIG_SMP
 795/*
 796 * Helper routine for generate_sched_domains().
 797 * Do cpusets a, b have overlapping effective cpus_allowed masks?
 798 */
 799static int cpusets_overlap(struct cpuset *a, struct cpuset *b)
 800{
 801	return cpumask_intersects(a->effective_cpus, b->effective_cpus);
 802}
 803
 804static void
 805update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c)
 806{
 807	if (dattr->relax_domain_level < c->relax_domain_level)
 808		dattr->relax_domain_level = c->relax_domain_level;
 809	return;
 810}
 811
 812static void update_domain_attr_tree(struct sched_domain_attr *dattr,
 813				    struct cpuset *root_cs)
 814{
 815	struct cpuset *cp;
 816	struct cgroup_subsys_state *pos_css;
 817
 818	rcu_read_lock();
 819	cpuset_for_each_descendant_pre(cp, pos_css, root_cs) {
 820		/* skip the whole subtree if @cp doesn't have any CPU */
 821		if (cpumask_empty(cp->cpus_allowed)) {
 822			pos_css = css_rightmost_descendant(pos_css);
 823			continue;
 824		}
 825
 826		if (is_sched_load_balance(cp))
 827			update_domain_attr(dattr, cp);
 828	}
 829	rcu_read_unlock();
 830}
 831
 832/* Must be called with cpuset_rwsem held.  */
 833static inline int nr_cpusets(void)
 834{
 835	/* jump label reference count + the top-level cpuset */
 836	return static_key_count(&cpusets_enabled_key.key) + 1;
 837}
 838
 839/*
 840 * generate_sched_domains()
 841 *
 842 * This function builds a partial partition of the systems CPUs
 843 * A 'partial partition' is a set of non-overlapping subsets whose
 844 * union is a subset of that set.
 845 * The output of this function needs to be passed to kernel/sched/core.c
 846 * partition_sched_domains() routine, which will rebuild the scheduler's
 847 * load balancing domains (sched domains) as specified by that partial
 848 * partition.
 849 *
 850 * See "What is sched_load_balance" in Documentation/admin-guide/cgroup-v1/cpusets.rst
 851 * for a background explanation of this.
 852 *
 853 * Does not return errors, on the theory that the callers of this
 854 * routine would rather not worry about failures to rebuild sched
 855 * domains when operating in the severe memory shortage situations
 856 * that could cause allocation failures below.
 857 *
 858 * Must be called with cpuset_rwsem held.
 859 *
 860 * The three key local variables below are:
 861 *    cp - cpuset pointer, used (together with pos_css) to perform a
 862 *	   top-down scan of all cpusets. For our purposes, rebuilding
 863 *	   the schedulers sched domains, we can ignore !is_sched_load_
 864 *	   balance cpusets.
 865 *  csa  - (for CpuSet Array) Array of pointers to all the cpusets
 866 *	   that need to be load balanced, for convenient iterative
 867 *	   access by the subsequent code that finds the best partition,
 868 *	   i.e the set of domains (subsets) of CPUs such that the
 869 *	   cpus_allowed of every cpuset marked is_sched_load_balance
 870 *	   is a subset of one of these domains, while there are as
 871 *	   many such domains as possible, each as small as possible.
 872 * doms  - Conversion of 'csa' to an array of cpumasks, for passing to
 873 *	   the kernel/sched/core.c routine partition_sched_domains() in a
 874 *	   convenient format, that can be easily compared to the prior
 875 *	   value to determine what partition elements (sched domains)
 876 *	   were changed (added or removed.)
 877 *
 878 * Finding the best partition (set of domains):
 879 *	The triple nested loops below over i, j, k scan over the
 880 *	load balanced cpusets (using the array of cpuset pointers in
 881 *	csa[]) looking for pairs of cpusets that have overlapping
 882 *	cpus_allowed, but which don't have the same 'pn' partition
 883 *	number and gives them in the same partition number.  It keeps
 884 *	looping on the 'restart' label until it can no longer find
 885 *	any such pairs.
 886 *
 887 *	The union of the cpus_allowed masks from the set of
 888 *	all cpusets having the same 'pn' value then form the one
 889 *	element of the partition (one sched domain) to be passed to
 890 *	partition_sched_domains().
 891 */
 892static int generate_sched_domains(cpumask_var_t **domains,
 893			struct sched_domain_attr **attributes)
 894{
 895	struct cpuset *cp;	/* top-down scan of cpusets */
 896	struct cpuset **csa;	/* array of all cpuset ptrs */
 897	int csn;		/* how many cpuset ptrs in csa so far */
 898	int i, j, k;		/* indices for partition finding loops */
 899	cpumask_var_t *doms;	/* resulting partition; i.e. sched domains */
 900	struct sched_domain_attr *dattr;  /* attributes for custom domains */
 901	int ndoms = 0;		/* number of sched domains in result */
 902	int nslot;		/* next empty doms[] struct cpumask slot */
 903	struct cgroup_subsys_state *pos_css;
 904	bool root_load_balance = is_sched_load_balance(&top_cpuset);
 905
 906	doms = NULL;
 907	dattr = NULL;
 908	csa = NULL;
 909
 910	/* Special case for the 99% of systems with one, full, sched domain */
 911	if (root_load_balance && !top_cpuset.nr_subparts_cpus) {
 912		ndoms = 1;
 913		doms = alloc_sched_domains(ndoms);
 914		if (!doms)
 915			goto done;
 916
 917		dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL);
 918		if (dattr) {
 919			*dattr = SD_ATTR_INIT;
 920			update_domain_attr_tree(dattr, &top_cpuset);
 921		}
 922		cpumask_and(doms[0], top_cpuset.effective_cpus,
 923			    housekeeping_cpumask(HK_TYPE_DOMAIN));
 924
 925		goto done;
 926	}
 927
 928	csa = kmalloc_array(nr_cpusets(), sizeof(cp), GFP_KERNEL);
 929	if (!csa)
 930		goto done;
 931	csn = 0;
 932
 933	rcu_read_lock();
 934	if (root_load_balance)
 935		csa[csn++] = &top_cpuset;
 936	cpuset_for_each_descendant_pre(cp, pos_css, &top_cpuset) {
 937		if (cp == &top_cpuset)
 938			continue;
 939		/*
 940		 * Continue traversing beyond @cp iff @cp has some CPUs and
 941		 * isn't load balancing.  The former is obvious.  The
 942		 * latter: All child cpusets contain a subset of the
 943		 * parent's cpus, so just skip them, and then we call
 944		 * update_domain_attr_tree() to calc relax_domain_level of
 945		 * the corresponding sched domain.
 946		 *
 947		 * If root is load-balancing, we can skip @cp if it
 948		 * is a subset of the root's effective_cpus.
 949		 */
 950		if (!cpumask_empty(cp->cpus_allowed) &&
 951		    !(is_sched_load_balance(cp) &&
 952		      cpumask_intersects(cp->cpus_allowed,
 953					 housekeeping_cpumask(HK_TYPE_DOMAIN))))
 954			continue;
 955
 956		if (root_load_balance &&
 957		    cpumask_subset(cp->cpus_allowed, top_cpuset.effective_cpus))
 958			continue;
 959
 960		if (is_sched_load_balance(cp) &&
 961		    !cpumask_empty(cp->effective_cpus))
 962			csa[csn++] = cp;
 963
 964		/* skip @cp's subtree if not a partition root */
 965		if (!is_partition_valid(cp))
 966			pos_css = css_rightmost_descendant(pos_css);
 967	}
 968	rcu_read_unlock();
 969
 970	for (i = 0; i < csn; i++)
 971		csa[i]->pn = i;
 972	ndoms = csn;
 973
 974restart:
 975	/* Find the best partition (set of sched domains) */
 976	for (i = 0; i < csn; i++) {
 977		struct cpuset *a = csa[i];
 978		int apn = a->pn;
 979
 980		for (j = 0; j < csn; j++) {
 981			struct cpuset *b = csa[j];
 982			int bpn = b->pn;
 983
 984			if (apn != bpn && cpusets_overlap(a, b)) {
 985				for (k = 0; k < csn; k++) {
 986					struct cpuset *c = csa[k];
 987
 988					if (c->pn == bpn)
 989						c->pn = apn;
 990				}
 991				ndoms--;	/* one less element */
 992				goto restart;
 993			}
 994		}
 995	}
 996
 997	/*
 998	 * Now we know how many domains to create.
 999	 * Convert <csn, csa> to <ndoms, doms> and populate cpu masks.
1000	 */
1001	doms = alloc_sched_domains(ndoms);
1002	if (!doms)
1003		goto done;
1004
1005	/*
1006	 * The rest of the code, including the scheduler, can deal with
1007	 * dattr==NULL case. No need to abort if alloc fails.
1008	 */
1009	dattr = kmalloc_array(ndoms, sizeof(struct sched_domain_attr),
1010			      GFP_KERNEL);
1011
1012	for (nslot = 0, i = 0; i < csn; i++) {
1013		struct cpuset *a = csa[i];
1014		struct cpumask *dp;
1015		int apn = a->pn;
1016
1017		if (apn < 0) {
1018			/* Skip completed partitions */
1019			continue;
1020		}
1021
1022		dp = doms[nslot];
1023
1024		if (nslot == ndoms) {
1025			static int warnings = 10;
1026			if (warnings) {
1027				pr_warn("rebuild_sched_domains confused: nslot %d, ndoms %d, csn %d, i %d, apn %d\n",
1028					nslot, ndoms, csn, i, apn);
1029				warnings--;
1030			}
1031			continue;
1032		}
1033
1034		cpumask_clear(dp);
1035		if (dattr)
1036			*(dattr + nslot) = SD_ATTR_INIT;
1037		for (j = i; j < csn; j++) {
1038			struct cpuset *b = csa[j];
1039
1040			if (apn == b->pn) {
1041				cpumask_or(dp, dp, b->effective_cpus);
1042				cpumask_and(dp, dp, housekeeping_cpumask(HK_TYPE_DOMAIN));
1043				if (dattr)
1044					update_domain_attr_tree(dattr + nslot, b);
1045
1046				/* Done with this partition */
1047				b->pn = -1;
1048			}
1049		}
1050		nslot++;
1051	}
1052	BUG_ON(nslot != ndoms);
1053
1054done:
1055	kfree(csa);
1056
1057	/*
1058	 * Fallback to the default domain if kmalloc() failed.
1059	 * See comments in partition_sched_domains().
1060	 */
1061	if (doms == NULL)
1062		ndoms = 1;
1063
1064	*domains    = doms;
1065	*attributes = dattr;
1066	return ndoms;
1067}
1068
1069static void update_tasks_root_domain(struct cpuset *cs)
1070{
1071	struct css_task_iter it;
1072	struct task_struct *task;
1073
 
 
 
1074	css_task_iter_start(&cs->css, 0, &it);
1075
1076	while ((task = css_task_iter_next(&it)))
1077		dl_add_task_root_domain(task);
1078
1079	css_task_iter_end(&it);
1080}
1081
1082static void rebuild_root_domains(void)
1083{
1084	struct cpuset *cs = NULL;
1085	struct cgroup_subsys_state *pos_css;
1086
1087	percpu_rwsem_assert_held(&cpuset_rwsem);
1088	lockdep_assert_cpus_held();
1089	lockdep_assert_held(&sched_domains_mutex);
1090
1091	rcu_read_lock();
1092
1093	/*
1094	 * Clear default root domain DL accounting, it will be computed again
1095	 * if a task belongs to it.
1096	 */
1097	dl_clear_root_domain(&def_root_domain);
1098
1099	cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {
1100
1101		if (cpumask_empty(cs->effective_cpus)) {
1102			pos_css = css_rightmost_descendant(pos_css);
1103			continue;
1104		}
1105
1106		css_get(&cs->css);
1107
1108		rcu_read_unlock();
1109
1110		update_tasks_root_domain(cs);
1111
1112		rcu_read_lock();
1113		css_put(&cs->css);
1114	}
1115	rcu_read_unlock();
1116}
1117
1118static void
1119partition_and_rebuild_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1120				    struct sched_domain_attr *dattr_new)
1121{
1122	mutex_lock(&sched_domains_mutex);
1123	partition_sched_domains_locked(ndoms_new, doms_new, dattr_new);
1124	rebuild_root_domains();
1125	mutex_unlock(&sched_domains_mutex);
1126}
1127
1128/*
1129 * Rebuild scheduler domains.
1130 *
1131 * If the flag 'sched_load_balance' of any cpuset with non-empty
1132 * 'cpus' changes, or if the 'cpus' allowed changes in any cpuset
1133 * which has that flag enabled, or if any cpuset with a non-empty
1134 * 'cpus' is removed, then call this routine to rebuild the
1135 * scheduler's dynamic sched domains.
1136 *
1137 * Call with cpuset_rwsem held.  Takes cpus_read_lock().
1138 */
1139static void rebuild_sched_domains_locked(void)
1140{
1141	struct cgroup_subsys_state *pos_css;
1142	struct sched_domain_attr *attr;
1143	cpumask_var_t *doms;
1144	struct cpuset *cs;
1145	int ndoms;
1146
1147	lockdep_assert_cpus_held();
1148	percpu_rwsem_assert_held(&cpuset_rwsem);
1149
1150	/*
1151	 * If we have raced with CPU hotplug, return early to avoid
1152	 * passing doms with offlined cpu to partition_sched_domains().
1153	 * Anyways, cpuset_hotplug_workfn() will rebuild sched domains.
1154	 *
1155	 * With no CPUs in any subpartitions, top_cpuset's effective CPUs
1156	 * should be the same as the active CPUs, so checking only top_cpuset
1157	 * is enough to detect racing CPU offlines.
1158	 */
1159	if (!top_cpuset.nr_subparts_cpus &&
1160	    !cpumask_equal(top_cpuset.effective_cpus, cpu_active_mask))
1161		return;
1162
1163	/*
1164	 * With subpartition CPUs, however, the effective CPUs of a partition
1165	 * root should be only a subset of the active CPUs.  Since a CPU in any
1166	 * partition root could be offlined, all must be checked.
1167	 */
1168	if (top_cpuset.nr_subparts_cpus) {
1169		rcu_read_lock();
1170		cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {
1171			if (!is_partition_valid(cs)) {
1172				pos_css = css_rightmost_descendant(pos_css);
1173				continue;
1174			}
1175			if (!cpumask_subset(cs->effective_cpus,
1176					    cpu_active_mask)) {
1177				rcu_read_unlock();
1178				return;
1179			}
1180		}
1181		rcu_read_unlock();
1182	}
1183
1184	/* Generate domain masks and attrs */
1185	ndoms = generate_sched_domains(&doms, &attr);
1186
1187	/* Have scheduler rebuild the domains */
1188	partition_and_rebuild_sched_domains(ndoms, doms, attr);
1189}
1190#else /* !CONFIG_SMP */
1191static void rebuild_sched_domains_locked(void)
1192{
1193}
1194#endif /* CONFIG_SMP */
1195
1196void rebuild_sched_domains(void)
1197{
1198	cpus_read_lock();
1199	percpu_down_write(&cpuset_rwsem);
1200	rebuild_sched_domains_locked();
1201	percpu_up_write(&cpuset_rwsem);
1202	cpus_read_unlock();
1203}
1204
1205/**
1206 * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
1207 * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
1208 * @new_cpus: the temp variable for the new effective_cpus mask
1209 *
1210 * Iterate through each task of @cs updating its cpus_allowed to the
1211 * effective cpuset's.  As this function is called with cpuset_rwsem held,
1212 * cpuset membership stays stable.
 
 
1213 */
1214static void update_tasks_cpumask(struct cpuset *cs, struct cpumask *new_cpus)
1215{
1216	struct css_task_iter it;
1217	struct task_struct *task;
1218	bool top_cs = cs == &top_cpuset;
1219
1220	css_task_iter_start(&cs->css, 0, &it);
1221	while ((task = css_task_iter_next(&it))) {
1222		/*
1223		 * Percpu kthreads in top_cpuset are ignored
1224		 */
1225		if (top_cs && (task->flags & PF_KTHREAD) &&
1226		    kthread_is_per_cpu(task))
1227			continue;
1228
1229		cpumask_and(new_cpus, cs->effective_cpus,
1230			    task_cpu_possible_mask(task));
 
 
 
 
 
 
 
 
1231		set_cpus_allowed_ptr(task, new_cpus);
1232	}
1233	css_task_iter_end(&it);
1234}
1235
1236/**
1237 * compute_effective_cpumask - Compute the effective cpumask of the cpuset
1238 * @new_cpus: the temp variable for the new effective_cpus mask
1239 * @cs: the cpuset the need to recompute the new effective_cpus mask
1240 * @parent: the parent cpuset
1241 *
1242 * If the parent has subpartition CPUs, include them in the list of
1243 * allowable CPUs in computing the new effective_cpus mask. Since offlined
1244 * CPUs are not removed from subparts_cpus, we have to use cpu_active_mask
1245 * to mask those out.
1246 */
1247static void compute_effective_cpumask(struct cpumask *new_cpus,
1248				      struct cpuset *cs, struct cpuset *parent)
1249{
1250	if (parent->nr_subparts_cpus) {
1251		cpumask_or(new_cpus, parent->effective_cpus,
1252			   parent->subparts_cpus);
1253		cpumask_and(new_cpus, new_cpus, cs->cpus_allowed);
1254		cpumask_and(new_cpus, new_cpus, cpu_active_mask);
1255	} else {
1256		cpumask_and(new_cpus, cs->cpus_allowed, parent->effective_cpus);
1257	}
1258}
1259
1260/*
1261 * Commands for update_parent_subparts_cpumask
1262 */
1263enum subparts_cmd {
1264	partcmd_enable,		/* Enable partition root	 */
1265	partcmd_disable,	/* Disable partition root	 */
1266	partcmd_update,		/* Update parent's subparts_cpus */
1267	partcmd_invalidate,	/* Make partition invalid	 */
 
1268};
1269
1270static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
1271		       int turning_on);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1272/**
1273 * update_parent_subparts_cpumask - update subparts_cpus mask of parent cpuset
1274 * @cpuset:  The cpuset that requests change in partition root state
1275 * @cmd:     Partition root state change command
1276 * @newmask: Optional new cpumask for partcmd_update
1277 * @tmp:     Temporary addmask and delmask
1278 * Return:   0 or a partition root state error code
1279 *
1280 * For partcmd_enable, the cpuset is being transformed from a non-partition
1281 * root to a partition root. The cpus_allowed mask of the given cpuset will
1282 * be put into parent's subparts_cpus and taken away from parent's
1283 * effective_cpus. The function will return 0 if all the CPUs listed in
1284 * cpus_allowed can be granted or an error code will be returned.
1285 *
1286 * For partcmd_disable, the cpuset is being transformed from a partition
1287 * root back to a non-partition root. Any CPUs in cpus_allowed that are in
1288 * parent's subparts_cpus will be taken away from that cpumask and put back
1289 * into parent's effective_cpus. 0 will always be returned.
1290 *
1291 * For partcmd_update, if the optional newmask is specified, the cpu list is
1292 * to be changed from cpus_allowed to newmask. Otherwise, cpus_allowed is
1293 * assumed to remain the same. The cpuset should either be a valid or invalid
1294 * partition root. The partition root state may change from valid to invalid
1295 * or vice versa. An error code will only be returned if transitioning from
1296 * invalid to valid violates the exclusivity rule.
1297 *
1298 * For partcmd_invalidate, the current partition will be made invalid.
1299 *
1300 * The partcmd_enable and partcmd_disable commands are used by
1301 * update_prstate(). An error code may be returned and the caller will check
1302 * for error.
1303 *
1304 * The partcmd_update command is used by update_cpumasks_hier() with newmask
1305 * NULL and update_cpumask() with newmask set. The partcmd_invalidate is used
1306 * by update_cpumask() with NULL newmask. In both cases, the callers won't
1307 * check for error and so partition_root_state and prs_error will be updated
1308 * directly.
1309 */
1310static int update_parent_subparts_cpumask(struct cpuset *cs, int cmd,
1311					  struct cpumask *newmask,
1312					  struct tmpmasks *tmp)
1313{
1314	struct cpuset *parent = parent_cs(cs);
1315	int adding;	/* Moving cpus from effective_cpus to subparts_cpus */
1316	int deleting;	/* Moving cpus from subparts_cpus to effective_cpus */
1317	int old_prs, new_prs;
1318	int part_error = PERR_NONE;	/* Partition error? */
 
 
 
 
 
 
1319
1320	percpu_rwsem_assert_held(&cpuset_rwsem);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1321
1322	/*
1323	 * The parent must be a partition root.
1324	 * The new cpumask, if present, or the current cpus_allowed must
1325	 * not be empty.
1326	 */
1327	if (!is_partition_valid(parent)) {
1328		return is_partition_invalid(parent)
1329		       ? PERR_INVPARENT : PERR_NOTPART;
1330	}
1331	if ((newmask && cpumask_empty(newmask)) ||
1332	   (!newmask && cpumask_empty(cs->cpus_allowed)))
1333		return PERR_CPUSEMPTY;
1334
1335	/*
1336	 * new_prs will only be changed for the partcmd_update and
1337	 * partcmd_invalidate commands.
1338	 */
1339	adding = deleting = false;
1340	old_prs = new_prs = cs->partition_root_state;
1341	if (cmd == partcmd_enable) {
1342		/*
1343		 * Enabling partition root is not allowed if cpus_allowed
1344		 * doesn't overlap parent's cpus_allowed.
 
1345		 */
1346		if (!cpumask_intersects(cs->cpus_allowed, parent->cpus_allowed))
 
1347			return PERR_INVCPUS;
1348
 
 
 
1349		/*
1350		 * A parent can be left with no CPU as long as there is no
1351		 * task directly associated with the parent partition.
1352		 */
1353		if (cpumask_subset(parent->effective_cpus, cs->cpus_allowed) &&
1354		    partition_is_populated(parent, cs))
1355			return PERR_NOCPUS;
1356
1357		cpumask_copy(tmp->addmask, cs->cpus_allowed);
1358		adding = true;
 
 
1359	} else if (cmd == partcmd_disable) {
1360		/*
1361		 * Need to remove cpus from parent's subparts_cpus for valid
1362		 * partition root.
1363		 */
1364		deleting = !is_prs_invalid(old_prs) &&
1365			   cpumask_and(tmp->delmask, cs->cpus_allowed,
1366				       parent->subparts_cpus);
1367	} else if (cmd == partcmd_invalidate) {
1368		if (is_prs_invalid(old_prs))
1369			return 0;
1370
1371		/*
1372		 * Make the current partition invalid. It is assumed that
1373		 * invalidation is caused by violating cpu exclusivity rule.
1374		 */
1375		deleting = cpumask_and(tmp->delmask, cs->cpus_allowed,
1376				       parent->subparts_cpus);
1377		if (old_prs > 0) {
1378			new_prs = -old_prs;
1379			part_error = PERR_NOTEXCL;
1380		}
1381	} else if (newmask) {
1382		/*
1383		 * partcmd_update with newmask:
1384		 *
1385		 * Compute add/delete mask to/from subparts_cpus
 
 
 
 
 
 
1386		 *
1387		 * delmask = cpus_allowed & ~newmask & parent->subparts_cpus
1388		 * addmask = newmask & parent->cpus_allowed
1389		 *		     & ~parent->subparts_cpus
1390		 */
1391		cpumask_andnot(tmp->delmask, cs->cpus_allowed, newmask);
1392		deleting = cpumask_and(tmp->delmask, tmp->delmask,
1393				       parent->subparts_cpus);
1394
1395		cpumask_and(tmp->addmask, newmask, parent->cpus_allowed);
1396		adding = cpumask_andnot(tmp->addmask, tmp->addmask,
1397					parent->subparts_cpus);
 
 
 
 
 
1398		/*
1399		 * Make partition invalid if parent's effective_cpus could
1400		 * become empty and there are tasks in the parent.
1401		 */
1402		if (adding &&
1403		    cpumask_subset(parent->effective_cpus, tmp->addmask) &&
1404		    !cpumask_intersects(tmp->delmask, cpu_active_mask) &&
1405		    partition_is_populated(parent, cs)) {
1406			part_error = PERR_NOCPUS;
1407			adding = false;
1408			deleting = cpumask_and(tmp->delmask, cs->cpus_allowed,
1409					       parent->subparts_cpus);
1410		}
1411	} else {
1412		/*
1413		 * partcmd_update w/o newmask:
 
 
1414		 *
1415		 * delmask = cpus_allowed & parent->subparts_cpus
1416		 * addmask = cpus_allowed & parent->cpus_allowed
1417		 *			  & ~parent->subparts_cpus
1418		 *
1419		 * This gets invoked either due to a hotplug event or from
1420		 * update_cpumasks_hier(). This can cause the state of a
1421		 * partition root to transition from valid to invalid or vice
1422		 * versa. So we still need to compute the addmask and delmask.
1423
1424		 * A partition error happens when:
1425		 * 1) Cpuset is valid partition, but parent does not distribute
1426		 *    out any CPUs.
1427		 * 2) Parent has tasks and all its effective CPUs will have
1428		 *    to be distributed out.
1429		 */
1430		cpumask_and(tmp->addmask, cs->cpus_allowed,
1431					  parent->cpus_allowed);
1432		adding = cpumask_andnot(tmp->addmask, tmp->addmask,
1433					parent->subparts_cpus);
1434
1435		if ((is_partition_valid(cs) && !parent->nr_subparts_cpus) ||
1436		    (adding &&
1437		     cpumask_subset(parent->effective_cpus, tmp->addmask) &&
1438		     partition_is_populated(parent, cs))) {
1439			part_error = PERR_NOCPUS;
1440			adding = false;
1441		}
 
 
 
 
 
 
1442
1443		if (part_error && is_partition_valid(cs) &&
1444		    parent->nr_subparts_cpus)
1445			deleting = cpumask_and(tmp->delmask, cs->cpus_allowed,
1446					       parent->subparts_cpus);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1447	}
 
 
1448	if (part_error)
1449		WRITE_ONCE(cs->prs_err, part_error);
1450
1451	if (cmd == partcmd_update) {
1452		/*
1453		 * Check for possible transition between valid and invalid
1454		 * partition root.
1455		 */
1456		switch (cs->partition_root_state) {
1457		case PRS_ROOT:
1458		case PRS_ISOLATED:
1459			if (part_error)
1460				new_prs = -old_prs;
 
 
1461			break;
1462		case PRS_INVALID_ROOT:
1463		case PRS_INVALID_ISOLATED:
1464			if (!part_error)
1465				new_prs = -old_prs;
 
 
1466			break;
1467		}
1468	}
1469
1470	if (!adding && !deleting && (new_prs == old_prs))
1471		return 0;
1472
1473	/*
1474	 * Transitioning between invalid to valid or vice versa may require
1475	 * changing CS_CPU_EXCLUSIVE and CS_SCHED_LOAD_BALANCE.
 
 
1476	 */
1477	if (old_prs != new_prs) {
1478		if (is_prs_invalid(old_prs) && !is_cpu_exclusive(cs) &&
1479		    (update_flag(CS_CPU_EXCLUSIVE, cs, 1) < 0))
1480			return PERR_NOTEXCL;
1481		if (is_prs_invalid(new_prs) && is_cpu_exclusive(cs))
1482			update_flag(CS_CPU_EXCLUSIVE, cs, 0);
1483	}
1484
1485	/*
1486	 * Change the parent's subparts_cpus.
 
 
1487	 * Newly added CPUs will be removed from effective_cpus and
1488	 * newly deleted ones will be added back to effective_cpus.
1489	 */
1490	spin_lock_irq(&callback_lock);
1491	if (adding) {
1492		cpumask_or(parent->subparts_cpus,
1493			   parent->subparts_cpus, tmp->addmask);
1494		cpumask_andnot(parent->effective_cpus,
1495			       parent->effective_cpus, tmp->addmask);
1496	}
1497	if (deleting) {
1498		cpumask_andnot(parent->subparts_cpus,
1499			       parent->subparts_cpus, tmp->delmask);
1500		/*
1501		 * Some of the CPUs in subparts_cpus might have been offlined.
1502		 */
1503		cpumask_and(tmp->delmask, tmp->delmask, cpu_active_mask);
1504		cpumask_or(parent->effective_cpus,
1505			   parent->effective_cpus, tmp->delmask);
 
 
 
 
 
1506	}
1507
1508	parent->nr_subparts_cpus = cpumask_weight(parent->subparts_cpus);
1509
1510	if (old_prs != new_prs)
1511		cs->partition_root_state = new_prs;
1512
1513	spin_unlock_irq(&callback_lock);
 
 
 
 
1514
1515	if (adding || deleting)
1516		update_tasks_cpumask(parent, tmp->new_cpus);
 
 
1517
1518	/*
1519	 * Set or clear CS_SCHED_LOAD_BALANCE when partcmd_update, if necessary.
1520	 * rebuild_sched_domains_locked() may be called.
 
 
1521	 */
1522	if (old_prs != new_prs) {
1523		if (old_prs == PRS_ISOLATED)
1524			update_flag(CS_SCHED_LOAD_BALANCE, cs, 1);
1525		else if (new_prs == PRS_ISOLATED)
1526			update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
1527	}
 
1528	notify_partition_change(cs, old_prs);
1529	return 0;
1530}
1531
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1532/*
1533 * update_cpumasks_hier - Update effective cpumasks and tasks in the subtree
1534 * @cs:  the cpuset to consider
1535 * @tmp: temp variables for calculating effective_cpus & partition setup
1536 * @force: don't skip any descendant cpusets if set
1537 *
1538 * When configured cpumask is changed, the effective cpumasks of this cpuset
1539 * and all its descendants need to be updated.
1540 *
1541 * On legacy hierarchy, effective_cpus will be the same with cpu_allowed.
1542 *
1543 * Called with cpuset_rwsem held
1544 */
1545static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp,
1546				 bool force)
1547{
1548	struct cpuset *cp;
1549	struct cgroup_subsys_state *pos_css;
1550	bool need_rebuild_sched_domains = false;
1551	int old_prs, new_prs;
1552
1553	rcu_read_lock();
1554	cpuset_for_each_descendant_pre(cp, pos_css, cs) {
1555		struct cpuset *parent = parent_cs(cp);
 
1556		bool update_parent = false;
1557
1558		compute_effective_cpumask(tmp->new_cpus, cp, parent);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1559
1560		/*
1561		 * If it becomes empty, inherit the effective mask of the
1562		 * parent, which is guaranteed to have some CPUs unless
1563		 * it is a partition root that has explicitly distributed
1564		 * out all its CPUs.
1565		 */
1566		if (is_in_v2_mode() && cpumask_empty(tmp->new_cpus)) {
1567			if (is_partition_valid(cp) &&
1568			    cpumask_equal(cp->cpus_allowed, cp->subparts_cpus))
1569				goto update_parent_subparts;
1570
1571			cpumask_copy(tmp->new_cpus, parent->effective_cpus);
1572			if (!cp->use_parent_ecpus) {
1573				cp->use_parent_ecpus = true;
1574				parent->child_ecpus_count++;
1575			}
1576		} else if (cp->use_parent_ecpus) {
1577			cp->use_parent_ecpus = false;
1578			WARN_ON_ONCE(!parent->child_ecpus_count);
1579			parent->child_ecpus_count--;
1580		}
1581
 
 
 
1582		/*
1583		 * Skip the whole subtree if the cpumask remains the same
1584		 * and has no partition root state and force flag not set.
 
 
 
1585		 */
1586		if (!cp->partition_root_state && !force &&
1587		    cpumask_equal(tmp->new_cpus, cp->effective_cpus)) {
 
 
1588			pos_css = css_rightmost_descendant(pos_css);
1589			continue;
1590		}
1591
1592update_parent_subparts:
1593		/*
1594		 * update_parent_subparts_cpumask() should have been called
1595		 * for cs already in update_cpumask(). We should also call
1596		 * update_tasks_cpumask() again for tasks in the parent
1597		 * cpuset if the parent's subparts_cpus changes.
1598		 */
1599		old_prs = new_prs = cp->partition_root_state;
1600		if ((cp != cs) && old_prs) {
1601			switch (parent->partition_root_state) {
1602			case PRS_ROOT:
1603			case PRS_ISOLATED:
1604				update_parent = true;
1605				break;
1606
1607			default:
1608				/*
1609				 * When parent is not a partition root or is
1610				 * invalid, child partition roots become
1611				 * invalid too.
1612				 */
1613				if (is_partition_valid(cp))
1614					new_prs = -cp->partition_root_state;
1615				WRITE_ONCE(cp->prs_err,
1616					   is_partition_invalid(parent)
1617					   ? PERR_INVPARENT : PERR_NOTPART);
1618				break;
1619			}
1620		}
1621
1622		if (!css_tryget_online(&cp->css))
1623			continue;
1624		rcu_read_unlock();
1625
1626		if (update_parent) {
1627			update_parent_subparts_cpumask(cp, partcmd_update, NULL,
1628						       tmp);
1629			/*
1630			 * The cpuset partition_root_state may become
1631			 * invalid. Capture it.
1632			 */
1633			new_prs = cp->partition_root_state;
1634		}
1635
1636		spin_lock_irq(&callback_lock);
1637
1638		if (cp->nr_subparts_cpus && !is_partition_valid(cp)) {
1639			/*
1640			 * Put all active subparts_cpus back to effective_cpus.
1641			 */
1642			cpumask_or(tmp->new_cpus, tmp->new_cpus,
1643				   cp->subparts_cpus);
1644			cpumask_and(tmp->new_cpus, tmp->new_cpus,
1645				   cpu_active_mask);
1646			cp->nr_subparts_cpus = 0;
1647			cpumask_clear(cp->subparts_cpus);
1648		}
1649
1650		cpumask_copy(cp->effective_cpus, tmp->new_cpus);
1651		if (cp->nr_subparts_cpus) {
1652			/*
1653			 * Make sure that effective_cpus & subparts_cpus
1654			 * are mutually exclusive.
1655			 */
1656			cpumask_andnot(cp->effective_cpus, cp->effective_cpus,
1657				       cp->subparts_cpus);
1658		}
1659
1660		cp->partition_root_state = new_prs;
 
 
 
 
 
 
 
 
 
1661		spin_unlock_irq(&callback_lock);
1662
1663		notify_partition_change(cp, old_prs);
1664
1665		WARN_ON(!is_in_v2_mode() &&
1666			!cpumask_equal(cp->cpus_allowed, cp->effective_cpus));
1667
1668		update_tasks_cpumask(cp, tmp->new_cpus);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1669
1670		/*
1671		 * On legacy hierarchy, if the effective cpumask of any non-
1672		 * empty cpuset is changed, we need to rebuild sched domains.
1673		 * On default hierarchy, the cpuset needs to be a partition
1674		 * root as well.
1675		 */
1676		if (!cpumask_empty(cp->cpus_allowed) &&
1677		    is_sched_load_balance(cp) &&
1678		   (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) ||
1679		    is_partition_valid(cp)))
1680			need_rebuild_sched_domains = true;
1681
1682		rcu_read_lock();
1683		css_put(&cp->css);
1684	}
1685	rcu_read_unlock();
1686
1687	if (need_rebuild_sched_domains)
1688		rebuild_sched_domains_locked();
1689}
1690
1691/**
1692 * update_sibling_cpumasks - Update siblings cpumasks
1693 * @parent:  Parent cpuset
1694 * @cs:      Current cpuset
1695 * @tmp:     Temp variables
1696 */
1697static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs,
1698				    struct tmpmasks *tmp)
1699{
1700	struct cpuset *sibling;
1701	struct cgroup_subsys_state *pos_css;
1702
1703	percpu_rwsem_assert_held(&cpuset_rwsem);
1704
1705	/*
1706	 * Check all its siblings and call update_cpumasks_hier()
1707	 * if their use_parent_ecpus flag is set in order for them
1708	 * to use the right effective_cpus value.
 
 
 
 
 
1709	 *
1710	 * The update_cpumasks_hier() function may sleep. So we have to
1711	 * release the RCU read lock before calling it.
 
 
1712	 */
1713	rcu_read_lock();
1714	cpuset_for_each_child(sibling, pos_css, parent) {
1715		if (sibling == cs)
1716			continue;
1717		if (!sibling->use_parent_ecpus)
1718			continue;
 
 
 
 
 
1719		if (!css_tryget_online(&sibling->css))
1720			continue;
1721
1722		rcu_read_unlock();
1723		update_cpumasks_hier(sibling, tmp, false);
1724		rcu_read_lock();
1725		css_put(&sibling->css);
1726	}
1727	rcu_read_unlock();
1728}
1729
1730/**
1731 * update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it
1732 * @cs: the cpuset to consider
1733 * @trialcs: trial cpuset
1734 * @buf: buffer of cpu numbers written to this cpuset
1735 */
1736static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
1737			  const char *buf)
1738{
1739	int retval;
1740	struct tmpmasks tmp;
 
1741	bool invalidate = false;
 
 
1742
1743	/* top_cpuset.cpus_allowed tracks cpu_online_mask; it's read-only */
1744	if (cs == &top_cpuset)
1745		return -EACCES;
1746
1747	/*
1748	 * An empty cpus_allowed is ok only if the cpuset has no tasks.
1749	 * Since cpulist_parse() fails on an empty mask, we special case
1750	 * that parsing.  The validate_change() call ensures that cpusets
1751	 * with tasks have cpus.
1752	 */
1753	if (!*buf) {
1754		cpumask_clear(trialcs->cpus_allowed);
 
1755	} else {
1756		retval = cpulist_parse(buf, trialcs->cpus_allowed);
1757		if (retval < 0)
1758			return retval;
1759
1760		if (!cpumask_subset(trialcs->cpus_allowed,
1761				    top_cpuset.cpus_allowed))
1762			return -EINVAL;
 
 
 
 
 
 
 
 
 
1763	}
1764
1765	/* Nothing to do if the cpus didn't change */
1766	if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed))
1767		return 0;
1768
1769#ifdef CONFIG_CPUMASK_OFFSTACK
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1770	/*
1771	 * Use the cpumasks in trialcs for tmpmasks when they are pointers
1772	 * to allocated cpumasks.
1773	 */
1774	tmp.addmask  = trialcs->subparts_cpus;
1775	tmp.delmask  = trialcs->effective_cpus;
1776	tmp.new_cpus = trialcs->cpus_allowed;
1777#endif
1778
1779	retval = validate_change(cs, trialcs);
1780
1781	if ((retval == -EINVAL) && cgroup_subsys_on_dfl(cpuset_cgrp_subsys)) {
1782		struct cpuset *cp, *parent;
1783		struct cgroup_subsys_state *css;
 
1784
1785		/*
1786		 * The -EINVAL error code indicates that partition sibling
1787		 * CPU exclusivity rule has been violated. We still allow
1788		 * the cpumask change to proceed while invalidating the
1789		 * partition. However, any conflicting sibling partitions
1790		 * have to be marked as invalid too.
1791		 */
1792		invalidate = true;
1793		rcu_read_lock();
1794		parent = parent_cs(cs);
1795		cpuset_for_each_child(cp, css, parent)
 
1796			if (is_partition_valid(cp) &&
1797			    cpumask_intersects(trialcs->cpus_allowed, cp->cpus_allowed)) {
1798				rcu_read_unlock();
1799				update_parent_subparts_cpumask(cp, partcmd_invalidate, NULL, &tmp);
1800				rcu_read_lock();
1801			}
 
1802		rcu_read_unlock();
1803		retval = 0;
1804	}
 
1805	if (retval < 0)
1806		return retval;
 
 
 
 
 
 
 
1807
1808	if (cs->partition_root_state) {
1809		if (invalidate)
1810			update_parent_subparts_cpumask(cs, partcmd_invalidate,
1811						       NULL, &tmp);
 
 
 
 
1812		else
1813			update_parent_subparts_cpumask(cs, partcmd_update,
1814						trialcs->cpus_allowed, &tmp);
 
 
 
 
 
 
1815	}
1816
1817	compute_effective_cpumask(trialcs->effective_cpus, trialcs,
1818				  parent_cs(cs));
1819	spin_lock_irq(&callback_lock);
1820	cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed);
 
 
 
 
1821
1822	/*
1823	 * Make sure that subparts_cpus, if not empty, is a subset of
1824	 * cpus_allowed. Clear subparts_cpus if partition not valid or
1825	 * empty effective cpus with tasks.
1826	 */
1827	if (cs->nr_subparts_cpus) {
1828		if (!is_partition_valid(cs) ||
1829		   (cpumask_subset(trialcs->effective_cpus, cs->subparts_cpus) &&
1830		    partition_is_populated(cs, NULL))) {
1831			cs->nr_subparts_cpus = 0;
1832			cpumask_clear(cs->subparts_cpus);
1833		} else {
1834			cpumask_and(cs->subparts_cpus, cs->subparts_cpus,
1835				    cs->cpus_allowed);
1836			cs->nr_subparts_cpus = cpumask_weight(cs->subparts_cpus);
1837		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1838	}
1839	spin_unlock_irq(&callback_lock);
1840
1841	/* effective_cpus will be updated here */
1842	update_cpumasks_hier(cs, &tmp, false);
 
1843
1844	if (cs->partition_root_state) {
1845		struct cpuset *parent = parent_cs(cs);
 
 
 
 
 
 
 
 
 
 
 
1846
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1847		/*
1848		 * For partition root, update the cpumasks of sibling
1849		 * cpusets if they use parent's effective_cpus.
1850		 */
1851		if (parent->child_ecpus_count)
1852			update_sibling_cpumasks(parent, cs, &tmp);
1853	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1854	return 0;
1855}
1856
1857/*
1858 * Migrate memory region from one set of nodes to another.  This is
1859 * performed asynchronously as it can be called from process migration path
1860 * holding locks involved in process management.  All mm migrations are
1861 * performed in the queued order and can be waited for by flushing
1862 * cpuset_migrate_mm_wq.
1863 */
1864
1865struct cpuset_migrate_mm_work {
1866	struct work_struct	work;
1867	struct mm_struct	*mm;
1868	nodemask_t		from;
1869	nodemask_t		to;
1870};
1871
1872static void cpuset_migrate_mm_workfn(struct work_struct *work)
1873{
1874	struct cpuset_migrate_mm_work *mwork =
1875		container_of(work, struct cpuset_migrate_mm_work, work);
1876
1877	/* on a wq worker, no need to worry about %current's mems_allowed */
1878	do_migrate_pages(mwork->mm, &mwork->from, &mwork->to, MPOL_MF_MOVE_ALL);
1879	mmput(mwork->mm);
1880	kfree(mwork);
1881}
1882
1883static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
1884							const nodemask_t *to)
1885{
1886	struct cpuset_migrate_mm_work *mwork;
1887
1888	if (nodes_equal(*from, *to)) {
1889		mmput(mm);
1890		return;
1891	}
1892
1893	mwork = kzalloc(sizeof(*mwork), GFP_KERNEL);
1894	if (mwork) {
1895		mwork->mm = mm;
1896		mwork->from = *from;
1897		mwork->to = *to;
1898		INIT_WORK(&mwork->work, cpuset_migrate_mm_workfn);
1899		queue_work(cpuset_migrate_mm_wq, &mwork->work);
1900	} else {
1901		mmput(mm);
1902	}
1903}
1904
1905static void cpuset_post_attach(void)
1906{
1907	flush_workqueue(cpuset_migrate_mm_wq);
1908}
1909
1910/*
1911 * cpuset_change_task_nodemask - change task's mems_allowed and mempolicy
1912 * @tsk: the task to change
1913 * @newmems: new nodes that the task will be set
1914 *
1915 * We use the mems_allowed_seq seqlock to safely update both tsk->mems_allowed
1916 * and rebind an eventual tasks' mempolicy. If the task is allocating in
1917 * parallel, it might temporarily see an empty intersection, which results in
1918 * a seqlock check and retry before OOM or allocation failure.
1919 */
1920static void cpuset_change_task_nodemask(struct task_struct *tsk,
1921					nodemask_t *newmems)
1922{
1923	task_lock(tsk);
1924
1925	local_irq_disable();
1926	write_seqcount_begin(&tsk->mems_allowed_seq);
1927
1928	nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems);
1929	mpol_rebind_task(tsk, newmems);
1930	tsk->mems_allowed = *newmems;
1931
1932	write_seqcount_end(&tsk->mems_allowed_seq);
1933	local_irq_enable();
1934
1935	task_unlock(tsk);
1936}
1937
1938static void *cpuset_being_rebound;
1939
1940/**
1941 * update_tasks_nodemask - Update the nodemasks of tasks in the cpuset.
1942 * @cs: the cpuset in which each task's mems_allowed mask needs to be changed
1943 *
1944 * Iterate through each task of @cs updating its mems_allowed to the
1945 * effective cpuset's.  As this function is called with cpuset_rwsem held,
1946 * cpuset membership stays stable.
1947 */
1948static void update_tasks_nodemask(struct cpuset *cs)
1949{
1950	static nodemask_t newmems;	/* protected by cpuset_rwsem */
1951	struct css_task_iter it;
1952	struct task_struct *task;
1953
1954	cpuset_being_rebound = cs;		/* causes mpol_dup() rebind */
1955
1956	guarantee_online_mems(cs, &newmems);
1957
1958	/*
1959	 * The mpol_rebind_mm() call takes mmap_lock, which we couldn't
1960	 * take while holding tasklist_lock.  Forks can happen - the
1961	 * mpol_dup() cpuset_being_rebound check will catch such forks,
1962	 * and rebind their vma mempolicies too.  Because we still hold
1963	 * the global cpuset_rwsem, we know that no other rebind effort
1964	 * will be contending for the global variable cpuset_being_rebound.
1965	 * It's ok if we rebind the same mm twice; mpol_rebind_mm()
1966	 * is idempotent.  Also migrate pages in each mm to new nodes.
1967	 */
1968	css_task_iter_start(&cs->css, 0, &it);
1969	while ((task = css_task_iter_next(&it))) {
1970		struct mm_struct *mm;
1971		bool migrate;
1972
1973		cpuset_change_task_nodemask(task, &newmems);
1974
1975		mm = get_task_mm(task);
1976		if (!mm)
1977			continue;
1978
1979		migrate = is_memory_migrate(cs);
1980
1981		mpol_rebind_mm(mm, &cs->mems_allowed);
1982		if (migrate)
1983			cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems);
1984		else
1985			mmput(mm);
1986	}
1987	css_task_iter_end(&it);
1988
1989	/*
1990	 * All the tasks' nodemasks have been updated, update
1991	 * cs->old_mems_allowed.
1992	 */
1993	cs->old_mems_allowed = newmems;
1994
1995	/* We're done rebinding vmas to this cpuset's new mems_allowed. */
1996	cpuset_being_rebound = NULL;
1997}
1998
1999/*
2000 * update_nodemasks_hier - Update effective nodemasks and tasks in the subtree
2001 * @cs: the cpuset to consider
2002 * @new_mems: a temp variable for calculating new effective_mems
2003 *
2004 * When configured nodemask is changed, the effective nodemasks of this cpuset
2005 * and all its descendants need to be updated.
2006 *
2007 * On legacy hierarchy, effective_mems will be the same with mems_allowed.
2008 *
2009 * Called with cpuset_rwsem held
2010 */
2011static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
2012{
2013	struct cpuset *cp;
2014	struct cgroup_subsys_state *pos_css;
2015
2016	rcu_read_lock();
2017	cpuset_for_each_descendant_pre(cp, pos_css, cs) {
2018		struct cpuset *parent = parent_cs(cp);
2019
2020		nodes_and(*new_mems, cp->mems_allowed, parent->effective_mems);
2021
2022		/*
2023		 * If it becomes empty, inherit the effective mask of the
2024		 * parent, which is guaranteed to have some MEMs.
2025		 */
2026		if (is_in_v2_mode() && nodes_empty(*new_mems))
2027			*new_mems = parent->effective_mems;
2028
2029		/* Skip the whole subtree if the nodemask remains the same. */
2030		if (nodes_equal(*new_mems, cp->effective_mems)) {
2031			pos_css = css_rightmost_descendant(pos_css);
2032			continue;
2033		}
2034
2035		if (!css_tryget_online(&cp->css))
2036			continue;
2037		rcu_read_unlock();
2038
2039		spin_lock_irq(&callback_lock);
2040		cp->effective_mems = *new_mems;
2041		spin_unlock_irq(&callback_lock);
2042
2043		WARN_ON(!is_in_v2_mode() &&
2044			!nodes_equal(cp->mems_allowed, cp->effective_mems));
2045
2046		update_tasks_nodemask(cp);
2047
2048		rcu_read_lock();
2049		css_put(&cp->css);
2050	}
2051	rcu_read_unlock();
2052}
2053
2054/*
2055 * Handle user request to change the 'mems' memory placement
2056 * of a cpuset.  Needs to validate the request, update the
2057 * cpusets mems_allowed, and for each task in the cpuset,
2058 * update mems_allowed and rebind task's mempolicy and any vma
2059 * mempolicies and if the cpuset is marked 'memory_migrate',
2060 * migrate the tasks pages to the new memory.
2061 *
2062 * Call with cpuset_rwsem held. May take callback_lock during call.
2063 * Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
2064 * lock each such tasks mm->mmap_lock, scan its vma's and rebind
2065 * their mempolicies to the cpusets new mems_allowed.
2066 */
2067static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
2068			   const char *buf)
2069{
2070	int retval;
2071
2072	/*
2073	 * top_cpuset.mems_allowed tracks node_stats[N_MEMORY];
2074	 * it's read-only
2075	 */
2076	if (cs == &top_cpuset) {
2077		retval = -EACCES;
2078		goto done;
2079	}
2080
2081	/*
2082	 * An empty mems_allowed is ok iff there are no tasks in the cpuset.
2083	 * Since nodelist_parse() fails on an empty mask, we special case
2084	 * that parsing.  The validate_change() call ensures that cpusets
2085	 * with tasks have memory.
2086	 */
2087	if (!*buf) {
2088		nodes_clear(trialcs->mems_allowed);
2089	} else {
2090		retval = nodelist_parse(buf, trialcs->mems_allowed);
2091		if (retval < 0)
2092			goto done;
2093
2094		if (!nodes_subset(trialcs->mems_allowed,
2095				  top_cpuset.mems_allowed)) {
2096			retval = -EINVAL;
2097			goto done;
2098		}
2099	}
2100
2101	if (nodes_equal(cs->mems_allowed, trialcs->mems_allowed)) {
2102		retval = 0;		/* Too easy - nothing to do */
2103		goto done;
2104	}
2105	retval = validate_change(cs, trialcs);
2106	if (retval < 0)
2107		goto done;
2108
2109	check_insane_mems_config(&trialcs->mems_allowed);
2110
2111	spin_lock_irq(&callback_lock);
2112	cs->mems_allowed = trialcs->mems_allowed;
2113	spin_unlock_irq(&callback_lock);
2114
2115	/* use trialcs->mems_allowed as a temp variable */
2116	update_nodemasks_hier(cs, &trialcs->mems_allowed);
2117done:
2118	return retval;
2119}
2120
2121bool current_cpuset_is_being_rebound(void)
2122{
2123	bool ret;
2124
2125	rcu_read_lock();
2126	ret = task_cs(current) == cpuset_being_rebound;
2127	rcu_read_unlock();
2128
2129	return ret;
2130}
2131
2132static int update_relax_domain_level(struct cpuset *cs, s64 val)
2133{
2134#ifdef CONFIG_SMP
2135	if (val < -1 || val >= sched_domain_level_max)
2136		return -EINVAL;
2137#endif
2138
2139	if (val != cs->relax_domain_level) {
2140		cs->relax_domain_level = val;
2141		if (!cpumask_empty(cs->cpus_allowed) &&
2142		    is_sched_load_balance(cs))
2143			rebuild_sched_domains_locked();
2144	}
2145
2146	return 0;
2147}
2148
2149/**
2150 * update_tasks_flags - update the spread flags of tasks in the cpuset.
2151 * @cs: the cpuset in which each task's spread flags needs to be changed
2152 *
2153 * Iterate through each task of @cs updating its spread flags.  As this
2154 * function is called with cpuset_rwsem held, cpuset membership stays
2155 * stable.
2156 */
2157static void update_tasks_flags(struct cpuset *cs)
2158{
2159	struct css_task_iter it;
2160	struct task_struct *task;
2161
2162	css_task_iter_start(&cs->css, 0, &it);
2163	while ((task = css_task_iter_next(&it)))
2164		cpuset_update_task_spread_flags(cs, task);
2165	css_task_iter_end(&it);
2166}
2167
2168/*
2169 * update_flag - read a 0 or a 1 in a file and update associated flag
2170 * bit:		the bit to update (see cpuset_flagbits_t)
2171 * cs:		the cpuset to update
2172 * turning_on: 	whether the flag is being set or cleared
2173 *
2174 * Call with cpuset_rwsem held.
2175 */
2176
2177static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
2178		       int turning_on)
2179{
2180	struct cpuset *trialcs;
2181	int balance_flag_changed;
2182	int spread_flag_changed;
2183	int err;
2184
2185	trialcs = alloc_trial_cpuset(cs);
2186	if (!trialcs)
2187		return -ENOMEM;
2188
2189	if (turning_on)
2190		set_bit(bit, &trialcs->flags);
2191	else
2192		clear_bit(bit, &trialcs->flags);
2193
2194	err = validate_change(cs, trialcs);
2195	if (err < 0)
2196		goto out;
2197
2198	balance_flag_changed = (is_sched_load_balance(cs) !=
2199				is_sched_load_balance(trialcs));
2200
2201	spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs))
2202			|| (is_spread_page(cs) != is_spread_page(trialcs)));
2203
2204	spin_lock_irq(&callback_lock);
2205	cs->flags = trialcs->flags;
2206	spin_unlock_irq(&callback_lock);
2207
2208	if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed)
2209		rebuild_sched_domains_locked();
2210
2211	if (spread_flag_changed)
2212		update_tasks_flags(cs);
2213out:
2214	free_cpuset(trialcs);
2215	return err;
2216}
2217
2218/**
2219 * update_prstate - update partition_root_state
2220 * @cs: the cpuset to update
2221 * @new_prs: new partition root state
2222 * Return: 0 if successful, != 0 if error
2223 *
2224 * Call with cpuset_rwsem held.
2225 */
2226static int update_prstate(struct cpuset *cs, int new_prs)
2227{
2228	int err = PERR_NONE, old_prs = cs->partition_root_state;
2229	bool sched_domain_rebuilt = false;
2230	struct cpuset *parent = parent_cs(cs);
2231	struct tmpmasks tmpmask;
 
2232
2233	if (old_prs == new_prs)
2234		return 0;
2235
2236	/*
2237	 * For a previously invalid partition root, leave it at being
2238	 * invalid if new_prs is not "member".
2239	 */
2240	if (new_prs && is_prs_invalid(old_prs)) {
2241		cs->partition_root_state = -new_prs;
2242		return 0;
2243	}
2244
2245	if (alloc_cpumasks(NULL, &tmpmask))
2246		return -ENOMEM;
2247
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2248	if (!old_prs) {
 
 
 
2249		/*
2250		 * Turning on partition root requires setting the
2251		 * CS_CPU_EXCLUSIVE bit implicitly as well and cpus_allowed
2252		 * cannot be empty.
2253		 */
2254		if (cpumask_empty(cs->cpus_allowed)) {
2255			err = PERR_CPUSEMPTY;
2256			goto out;
2257		}
2258
2259		err = update_flag(CS_CPU_EXCLUSIVE, cs, 1);
2260		if (err) {
2261			err = PERR_NOTEXCL;
2262			goto out;
2263		}
2264
2265		err = update_parent_subparts_cpumask(cs, partcmd_enable,
2266						     NULL, &tmpmask);
2267		if (err) {
2268			update_flag(CS_CPU_EXCLUSIVE, cs, 0);
2269			goto out;
2270		}
2271
2272		if (new_prs == PRS_ISOLATED) {
2273			/*
2274			 * Disable the load balance flag should not return an
2275			 * error unless the system is running out of memory.
2276			 */
2277			update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
2278			sched_domain_rebuilt = true;
2279		}
2280	} else if (old_prs && new_prs) {
2281		/*
2282		 * A change in load balance state only, no change in cpumasks.
2283		 */
2284		update_flag(CS_SCHED_LOAD_BALANCE, cs, (new_prs != PRS_ISOLATED));
2285		sched_domain_rebuilt = true;
2286		goto out;	/* Sched domain is rebuilt in update_flag() */
2287	} else {
2288		/*
2289		 * Switching back to member is always allowed even if it
2290		 * disables child partitions.
2291		 */
2292		update_parent_subparts_cpumask(cs, partcmd_disable, NULL,
2293					       &tmpmask);
 
 
 
2294
2295		/*
2296		 * If there are child partitions, they will all become invalid.
 
2297		 */
2298		if (unlikely(cs->nr_subparts_cpus)) {
2299			spin_lock_irq(&callback_lock);
2300			cs->nr_subparts_cpus = 0;
2301			cpumask_clear(cs->subparts_cpus);
2302			compute_effective_cpumask(cs->effective_cpus, cs, parent);
2303			spin_unlock_irq(&callback_lock);
2304		}
2305
2306		/* Turning off CS_CPU_EXCLUSIVE will not return error */
2307		update_flag(CS_CPU_EXCLUSIVE, cs, 0);
2308
2309		if (!is_sched_load_balance(cs)) {
2310			/* Make sure load balance is on */
2311			update_flag(CS_SCHED_LOAD_BALANCE, cs, 1);
2312			sched_domain_rebuilt = true;
2313		}
2314	}
2315
2316	update_tasks_cpumask(parent, tmpmask.new_cpus);
2317
2318	if (parent->child_ecpus_count)
2319		update_sibling_cpumasks(parent, cs, &tmpmask);
2320
2321	if (!sched_domain_rebuilt)
2322		rebuild_sched_domains_locked();
2323out:
2324	/*
2325	 * Make partition invalid if an error happen
 
2326	 */
2327	if (err)
2328		new_prs = -new_prs;
 
 
 
2329	spin_lock_irq(&callback_lock);
2330	cs->partition_root_state = new_prs;
2331	WRITE_ONCE(cs->prs_err, err);
 
 
 
 
2332	spin_unlock_irq(&callback_lock);
2333	/*
2334	 * Update child cpusets, if present.
2335	 * Force update if switching back to member.
2336	 */
2337	if (!list_empty(&cs->css.children))
2338		update_cpumasks_hier(cs, &tmpmask, !new_prs);
 
2339
2340	notify_partition_change(cs, old_prs);
2341	free_cpumasks(NULL, &tmpmask);
2342	return 0;
2343}
2344
2345/*
2346 * Frequency meter - How fast is some event occurring?
2347 *
2348 * These routines manage a digitally filtered, constant time based,
2349 * event frequency meter.  There are four routines:
2350 *   fmeter_init() - initialize a frequency meter.
2351 *   fmeter_markevent() - called each time the event happens.
2352 *   fmeter_getrate() - returns the recent rate of such events.
2353 *   fmeter_update() - internal routine used to update fmeter.
2354 *
2355 * A common data structure is passed to each of these routines,
2356 * which is used to keep track of the state required to manage the
2357 * frequency meter and its digital filter.
2358 *
2359 * The filter works on the number of events marked per unit time.
2360 * The filter is single-pole low-pass recursive (IIR).  The time unit
2361 * is 1 second.  Arithmetic is done using 32-bit integers scaled to
2362 * simulate 3 decimal digits of precision (multiplied by 1000).
2363 *
2364 * With an FM_COEF of 933, and a time base of 1 second, the filter
2365 * has a half-life of 10 seconds, meaning that if the events quit
2366 * happening, then the rate returned from the fmeter_getrate()
2367 * will be cut in half each 10 seconds, until it converges to zero.
2368 *
2369 * It is not worth doing a real infinitely recursive filter.  If more
2370 * than FM_MAXTICKS ticks have elapsed since the last filter event,
2371 * just compute FM_MAXTICKS ticks worth, by which point the level
2372 * will be stable.
2373 *
2374 * Limit the count of unprocessed events to FM_MAXCNT, so as to avoid
2375 * arithmetic overflow in the fmeter_update() routine.
2376 *
2377 * Given the simple 32 bit integer arithmetic used, this meter works
2378 * best for reporting rates between one per millisecond (msec) and
2379 * one per 32 (approx) seconds.  At constant rates faster than one
2380 * per msec it maxes out at values just under 1,000,000.  At constant
2381 * rates between one per msec, and one per second it will stabilize
2382 * to a value N*1000, where N is the rate of events per second.
2383 * At constant rates between one per second and one per 32 seconds,
2384 * it will be choppy, moving up on the seconds that have an event,
2385 * and then decaying until the next event.  At rates slower than
2386 * about one in 32 seconds, it decays all the way back to zero between
2387 * each event.
2388 */
2389
2390#define FM_COEF 933		/* coefficient for half-life of 10 secs */
2391#define FM_MAXTICKS ((u32)99)   /* useless computing more ticks than this */
2392#define FM_MAXCNT 1000000	/* limit cnt to avoid overflow */
2393#define FM_SCALE 1000		/* faux fixed point scale */
2394
2395/* Initialize a frequency meter */
2396static void fmeter_init(struct fmeter *fmp)
2397{
2398	fmp->cnt = 0;
2399	fmp->val = 0;
2400	fmp->time = 0;
2401	spin_lock_init(&fmp->lock);
2402}
2403
2404/* Internal meter update - process cnt events and update value */
2405static void fmeter_update(struct fmeter *fmp)
2406{
2407	time64_t now;
2408	u32 ticks;
2409
2410	now = ktime_get_seconds();
2411	ticks = now - fmp->time;
2412
2413	if (ticks == 0)
2414		return;
2415
2416	ticks = min(FM_MAXTICKS, ticks);
2417	while (ticks-- > 0)
2418		fmp->val = (FM_COEF * fmp->val) / FM_SCALE;
2419	fmp->time = now;
2420
2421	fmp->val += ((FM_SCALE - FM_COEF) * fmp->cnt) / FM_SCALE;
2422	fmp->cnt = 0;
2423}
2424
2425/* Process any previous ticks, then bump cnt by one (times scale). */
2426static void fmeter_markevent(struct fmeter *fmp)
2427{
2428	spin_lock(&fmp->lock);
2429	fmeter_update(fmp);
2430	fmp->cnt = min(FM_MAXCNT, fmp->cnt + FM_SCALE);
2431	spin_unlock(&fmp->lock);
2432}
2433
2434/* Process any previous ticks, then return current value. */
2435static int fmeter_getrate(struct fmeter *fmp)
2436{
2437	int val;
2438
2439	spin_lock(&fmp->lock);
2440	fmeter_update(fmp);
2441	val = fmp->val;
2442	spin_unlock(&fmp->lock);
2443	return val;
2444}
2445
2446static struct cpuset *cpuset_attach_old_cs;
2447
2448/* Called by cgroups to determine if a cpuset is usable; cpuset_rwsem held */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2449static int cpuset_can_attach(struct cgroup_taskset *tset)
2450{
2451	struct cgroup_subsys_state *css;
2452	struct cpuset *cs;
2453	struct task_struct *task;
 
2454	int ret;
2455
2456	/* used later by cpuset_attach() */
2457	cpuset_attach_old_cs = task_cs(cgroup_taskset_first(tset, &css));
 
2458	cs = css_cs(css);
2459
2460	percpu_down_write(&cpuset_rwsem);
2461
2462	/* allow moving tasks into an empty cpuset if on default hierarchy */
2463	ret = -ENOSPC;
2464	if (!is_in_v2_mode() &&
2465	    (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed)))
2466		goto out_unlock;
2467
2468	/*
2469	 * Task cannot be moved to a cpuset with empty effective cpus.
2470	 */
2471	if (cpumask_empty(cs->effective_cpus))
2472		goto out_unlock;
2473
2474	cgroup_taskset_for_each(task, css, tset) {
2475		ret = task_can_attach(task, cs->effective_cpus);
2476		if (ret)
2477			goto out_unlock;
2478		ret = security_task_setscheduler(task);
2479		if (ret)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2480			goto out_unlock;
 
 
 
 
 
 
 
2481	}
2482
 
2483	/*
2484	 * Mark attach is in progress.  This makes validate_change() fail
2485	 * changes which zero cpus/mems_allowed.
2486	 */
2487	cs->attach_in_progress++;
2488	ret = 0;
2489out_unlock:
2490	percpu_up_write(&cpuset_rwsem);
2491	return ret;
2492}
2493
2494static void cpuset_cancel_attach(struct cgroup_taskset *tset)
2495{
2496	struct cgroup_subsys_state *css;
 
2497
2498	cgroup_taskset_first(tset, &css);
 
 
 
 
 
 
2499
2500	percpu_down_write(&cpuset_rwsem);
2501	css_cs(css)->attach_in_progress--;
2502	percpu_up_write(&cpuset_rwsem);
 
 
 
 
 
2503}
2504
2505/*
2506 * Protected by cpuset_rwsem.  cpus_attach is used only by cpuset_attach()
2507 * but we can't allocate it dynamically there.  Define it global and
2508 * allocate from cpuset_init().
2509 */
2510static cpumask_var_t cpus_attach;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2511
2512static void cpuset_attach(struct cgroup_taskset *tset)
2513{
2514	/* static buf protected by cpuset_rwsem */
2515	static nodemask_t cpuset_attach_nodemask_to;
2516	struct task_struct *task;
2517	struct task_struct *leader;
2518	struct cgroup_subsys_state *css;
2519	struct cpuset *cs;
2520	struct cpuset *oldcs = cpuset_attach_old_cs;
2521	bool cpus_updated, mems_updated;
2522
2523	cgroup_taskset_first(tset, &css);
2524	cs = css_cs(css);
2525
2526	lockdep_assert_cpus_held();	/* see cgroup_attach_lock() */
2527	percpu_down_write(&cpuset_rwsem);
2528	cpus_updated = !cpumask_equal(cs->effective_cpus,
2529				      oldcs->effective_cpus);
2530	mems_updated = !nodes_equal(cs->effective_mems, oldcs->effective_mems);
2531
2532	/*
2533	 * In the default hierarchy, enabling cpuset in the child cgroups
2534	 * will trigger a number of cpuset_attach() calls with no change
2535	 * in effective cpus and mems. In that case, we can optimize out
2536	 * by skipping the task iteration and update.
2537	 */
2538	if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
2539	    !cpus_updated && !mems_updated) {
2540		cpuset_attach_nodemask_to = cs->effective_mems;
2541		goto out;
2542	}
2543
2544	guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
2545
2546	cgroup_taskset_for_each(task, css, tset) {
2547		if (cs != &top_cpuset)
2548			guarantee_online_cpus(task, cpus_attach);
2549		else
2550			cpumask_copy(cpus_attach, task_cpu_possible_mask(task));
2551		/*
2552		 * can_attach beforehand should guarantee that this doesn't
2553		 * fail.  TODO: have a better way to handle failure here
2554		 */
2555		WARN_ON_ONCE(set_cpus_allowed_ptr(task, cpus_attach));
2556
2557		cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to);
2558		cpuset_update_task_spread_flags(cs, task);
2559	}
2560
2561	/*
2562	 * Change mm for all threadgroup leaders. This is expensive and may
2563	 * sleep and should be moved outside migration path proper. Skip it
2564	 * if there is no change in effective_mems and CS_MEMORY_MIGRATE is
2565	 * not set.
2566	 */
2567	cpuset_attach_nodemask_to = cs->effective_mems;
2568	if (!is_memory_migrate(cs) && !mems_updated)
2569		goto out;
2570
2571	cgroup_taskset_for_each_leader(leader, css, tset) {
2572		struct mm_struct *mm = get_task_mm(leader);
2573
2574		if (mm) {
2575			mpol_rebind_mm(mm, &cpuset_attach_nodemask_to);
2576
2577			/*
2578			 * old_mems_allowed is the same with mems_allowed
2579			 * here, except if this task is being moved
2580			 * automatically due to hotplug.  In that case
2581			 * @mems_allowed has been updated and is empty, so
2582			 * @old_mems_allowed is the right nodesets that we
2583			 * migrate mm from.
2584			 */
2585			if (is_memory_migrate(cs))
2586				cpuset_migrate_mm(mm, &oldcs->old_mems_allowed,
2587						  &cpuset_attach_nodemask_to);
2588			else
2589				mmput(mm);
2590		}
2591	}
2592
2593out:
2594	cs->old_mems_allowed = cpuset_attach_nodemask_to;
2595
 
 
 
 
 
 
2596	cs->attach_in_progress--;
2597	if (!cs->attach_in_progress)
2598		wake_up(&cpuset_attach_wq);
2599
2600	percpu_up_write(&cpuset_rwsem);
2601}
2602
2603/* The various types of files and directories in a cpuset file system */
2604
2605typedef enum {
2606	FILE_MEMORY_MIGRATE,
2607	FILE_CPULIST,
2608	FILE_MEMLIST,
2609	FILE_EFFECTIVE_CPULIST,
2610	FILE_EFFECTIVE_MEMLIST,
2611	FILE_SUBPARTS_CPULIST,
 
 
 
2612	FILE_CPU_EXCLUSIVE,
2613	FILE_MEM_EXCLUSIVE,
2614	FILE_MEM_HARDWALL,
2615	FILE_SCHED_LOAD_BALANCE,
2616	FILE_PARTITION_ROOT,
2617	FILE_SCHED_RELAX_DOMAIN_LEVEL,
2618	FILE_MEMORY_PRESSURE_ENABLED,
2619	FILE_MEMORY_PRESSURE,
2620	FILE_SPREAD_PAGE,
2621	FILE_SPREAD_SLAB,
2622} cpuset_filetype_t;
2623
2624static int cpuset_write_u64(struct cgroup_subsys_state *css, struct cftype *cft,
2625			    u64 val)
2626{
2627	struct cpuset *cs = css_cs(css);
2628	cpuset_filetype_t type = cft->private;
2629	int retval = 0;
2630
2631	cpus_read_lock();
2632	percpu_down_write(&cpuset_rwsem);
2633	if (!is_cpuset_online(cs)) {
2634		retval = -ENODEV;
2635		goto out_unlock;
2636	}
2637
2638	switch (type) {
2639	case FILE_CPU_EXCLUSIVE:
2640		retval = update_flag(CS_CPU_EXCLUSIVE, cs, val);
2641		break;
2642	case FILE_MEM_EXCLUSIVE:
2643		retval = update_flag(CS_MEM_EXCLUSIVE, cs, val);
2644		break;
2645	case FILE_MEM_HARDWALL:
2646		retval = update_flag(CS_MEM_HARDWALL, cs, val);
2647		break;
2648	case FILE_SCHED_LOAD_BALANCE:
2649		retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, val);
2650		break;
2651	case FILE_MEMORY_MIGRATE:
2652		retval = update_flag(CS_MEMORY_MIGRATE, cs, val);
2653		break;
2654	case FILE_MEMORY_PRESSURE_ENABLED:
2655		cpuset_memory_pressure_enabled = !!val;
2656		break;
2657	case FILE_SPREAD_PAGE:
2658		retval = update_flag(CS_SPREAD_PAGE, cs, val);
2659		break;
2660	case FILE_SPREAD_SLAB:
2661		retval = update_flag(CS_SPREAD_SLAB, cs, val);
2662		break;
2663	default:
2664		retval = -EINVAL;
2665		break;
2666	}
2667out_unlock:
2668	percpu_up_write(&cpuset_rwsem);
2669	cpus_read_unlock();
2670	return retval;
2671}
2672
2673static int cpuset_write_s64(struct cgroup_subsys_state *css, struct cftype *cft,
2674			    s64 val)
2675{
2676	struct cpuset *cs = css_cs(css);
2677	cpuset_filetype_t type = cft->private;
2678	int retval = -ENODEV;
2679
2680	cpus_read_lock();
2681	percpu_down_write(&cpuset_rwsem);
2682	if (!is_cpuset_online(cs))
2683		goto out_unlock;
2684
2685	switch (type) {
2686	case FILE_SCHED_RELAX_DOMAIN_LEVEL:
2687		retval = update_relax_domain_level(cs, val);
2688		break;
2689	default:
2690		retval = -EINVAL;
2691		break;
2692	}
2693out_unlock:
2694	percpu_up_write(&cpuset_rwsem);
2695	cpus_read_unlock();
2696	return retval;
2697}
2698
2699/*
2700 * Common handling for a write to a "cpus" or "mems" file.
2701 */
2702static ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
2703				    char *buf, size_t nbytes, loff_t off)
2704{
2705	struct cpuset *cs = css_cs(of_css(of));
2706	struct cpuset *trialcs;
2707	int retval = -ENODEV;
2708
2709	buf = strstrip(buf);
2710
2711	/*
2712	 * CPU or memory hotunplug may leave @cs w/o any execution
2713	 * resources, in which case the hotplug code asynchronously updates
2714	 * configuration and transfers all tasks to the nearest ancestor
2715	 * which can execute.
2716	 *
2717	 * As writes to "cpus" or "mems" may restore @cs's execution
2718	 * resources, wait for the previously scheduled operations before
2719	 * proceeding, so that we don't end up keep removing tasks added
2720	 * after execution capability is restored.
2721	 *
2722	 * cpuset_hotplug_work calls back into cgroup core via
2723	 * cgroup_transfer_tasks() and waiting for it from a cgroupfs
2724	 * operation like this one can lead to a deadlock through kernfs
2725	 * active_ref protection.  Let's break the protection.  Losing the
2726	 * protection is okay as we check whether @cs is online after
2727	 * grabbing cpuset_rwsem anyway.  This only happens on the legacy
2728	 * hierarchies.
2729	 */
2730	css_get(&cs->css);
2731	kernfs_break_active_protection(of->kn);
2732	flush_work(&cpuset_hotplug_work);
2733
2734	cpus_read_lock();
2735	percpu_down_write(&cpuset_rwsem);
2736	if (!is_cpuset_online(cs))
2737		goto out_unlock;
2738
2739	trialcs = alloc_trial_cpuset(cs);
2740	if (!trialcs) {
2741		retval = -ENOMEM;
2742		goto out_unlock;
2743	}
2744
2745	switch (of_cft(of)->private) {
2746	case FILE_CPULIST:
2747		retval = update_cpumask(cs, trialcs, buf);
2748		break;
 
 
 
2749	case FILE_MEMLIST:
2750		retval = update_nodemask(cs, trialcs, buf);
2751		break;
2752	default:
2753		retval = -EINVAL;
2754		break;
2755	}
2756
2757	free_cpuset(trialcs);
2758out_unlock:
2759	percpu_up_write(&cpuset_rwsem);
2760	cpus_read_unlock();
2761	kernfs_unbreak_active_protection(of->kn);
2762	css_put(&cs->css);
2763	flush_workqueue(cpuset_migrate_mm_wq);
2764	return retval ?: nbytes;
2765}
2766
2767/*
2768 * These ascii lists should be read in a single call, by using a user
2769 * buffer large enough to hold the entire map.  If read in smaller
2770 * chunks, there is no guarantee of atomicity.  Since the display format
2771 * used, list of ranges of sequential numbers, is variable length,
2772 * and since these maps can change value dynamically, one could read
2773 * gibberish by doing partial reads while a list was changing.
2774 */
2775static int cpuset_common_seq_show(struct seq_file *sf, void *v)
2776{
2777	struct cpuset *cs = css_cs(seq_css(sf));
2778	cpuset_filetype_t type = seq_cft(sf)->private;
2779	int ret = 0;
2780
2781	spin_lock_irq(&callback_lock);
2782
2783	switch (type) {
2784	case FILE_CPULIST:
2785		seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_allowed));
2786		break;
2787	case FILE_MEMLIST:
2788		seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->mems_allowed));
2789		break;
2790	case FILE_EFFECTIVE_CPULIST:
2791		seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->effective_cpus));
2792		break;
2793	case FILE_EFFECTIVE_MEMLIST:
2794		seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->effective_mems));
2795		break;
 
 
 
 
 
 
2796	case FILE_SUBPARTS_CPULIST:
2797		seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->subparts_cpus));
 
 
 
2798		break;
2799	default:
2800		ret = -EINVAL;
2801	}
2802
2803	spin_unlock_irq(&callback_lock);
2804	return ret;
2805}
2806
2807static u64 cpuset_read_u64(struct cgroup_subsys_state *css, struct cftype *cft)
2808{
2809	struct cpuset *cs = css_cs(css);
2810	cpuset_filetype_t type = cft->private;
2811	switch (type) {
2812	case FILE_CPU_EXCLUSIVE:
2813		return is_cpu_exclusive(cs);
2814	case FILE_MEM_EXCLUSIVE:
2815		return is_mem_exclusive(cs);
2816	case FILE_MEM_HARDWALL:
2817		return is_mem_hardwall(cs);
2818	case FILE_SCHED_LOAD_BALANCE:
2819		return is_sched_load_balance(cs);
2820	case FILE_MEMORY_MIGRATE:
2821		return is_memory_migrate(cs);
2822	case FILE_MEMORY_PRESSURE_ENABLED:
2823		return cpuset_memory_pressure_enabled;
2824	case FILE_MEMORY_PRESSURE:
2825		return fmeter_getrate(&cs->fmeter);
2826	case FILE_SPREAD_PAGE:
2827		return is_spread_page(cs);
2828	case FILE_SPREAD_SLAB:
2829		return is_spread_slab(cs);
2830	default:
2831		BUG();
2832	}
2833
2834	/* Unreachable but makes gcc happy */
2835	return 0;
2836}
2837
2838static s64 cpuset_read_s64(struct cgroup_subsys_state *css, struct cftype *cft)
2839{
2840	struct cpuset *cs = css_cs(css);
2841	cpuset_filetype_t type = cft->private;
2842	switch (type) {
2843	case FILE_SCHED_RELAX_DOMAIN_LEVEL:
2844		return cs->relax_domain_level;
2845	default:
2846		BUG();
2847	}
2848
2849	/* Unreachable but makes gcc happy */
2850	return 0;
2851}
2852
2853static int sched_partition_show(struct seq_file *seq, void *v)
2854{
2855	struct cpuset *cs = css_cs(seq_css(seq));
2856	const char *err, *type = NULL;
2857
2858	switch (cs->partition_root_state) {
2859	case PRS_ROOT:
2860		seq_puts(seq, "root\n");
2861		break;
2862	case PRS_ISOLATED:
2863		seq_puts(seq, "isolated\n");
2864		break;
2865	case PRS_MEMBER:
2866		seq_puts(seq, "member\n");
2867		break;
2868	case PRS_INVALID_ROOT:
2869		type = "root";
2870		fallthrough;
2871	case PRS_INVALID_ISOLATED:
2872		if (!type)
2873			type = "isolated";
2874		err = perr_strings[READ_ONCE(cs->prs_err)];
2875		if (err)
2876			seq_printf(seq, "%s invalid (%s)\n", type, err);
2877		else
2878			seq_printf(seq, "%s invalid\n", type);
2879		break;
2880	}
2881	return 0;
2882}
2883
2884static ssize_t sched_partition_write(struct kernfs_open_file *of, char *buf,
2885				     size_t nbytes, loff_t off)
2886{
2887	struct cpuset *cs = css_cs(of_css(of));
2888	int val;
2889	int retval = -ENODEV;
2890
2891	buf = strstrip(buf);
2892
2893	/*
2894	 * Convert "root" to ENABLED, and convert "member" to DISABLED.
2895	 */
2896	if (!strcmp(buf, "root"))
2897		val = PRS_ROOT;
2898	else if (!strcmp(buf, "member"))
2899		val = PRS_MEMBER;
2900	else if (!strcmp(buf, "isolated"))
2901		val = PRS_ISOLATED;
2902	else
2903		return -EINVAL;
2904
2905	css_get(&cs->css);
2906	cpus_read_lock();
2907	percpu_down_write(&cpuset_rwsem);
2908	if (!is_cpuset_online(cs))
2909		goto out_unlock;
2910
2911	retval = update_prstate(cs, val);
2912out_unlock:
2913	percpu_up_write(&cpuset_rwsem);
2914	cpus_read_unlock();
2915	css_put(&cs->css);
2916	return retval ?: nbytes;
2917}
2918
2919/*
2920 * for the common functions, 'private' gives the type of file
2921 */
2922
2923static struct cftype legacy_files[] = {
2924	{
2925		.name = "cpus",
2926		.seq_show = cpuset_common_seq_show,
2927		.write = cpuset_write_resmask,
2928		.max_write_len = (100U + 6 * NR_CPUS),
2929		.private = FILE_CPULIST,
2930	},
2931
2932	{
2933		.name = "mems",
2934		.seq_show = cpuset_common_seq_show,
2935		.write = cpuset_write_resmask,
2936		.max_write_len = (100U + 6 * MAX_NUMNODES),
2937		.private = FILE_MEMLIST,
2938	},
2939
2940	{
2941		.name = "effective_cpus",
2942		.seq_show = cpuset_common_seq_show,
2943		.private = FILE_EFFECTIVE_CPULIST,
2944	},
2945
2946	{
2947		.name = "effective_mems",
2948		.seq_show = cpuset_common_seq_show,
2949		.private = FILE_EFFECTIVE_MEMLIST,
2950	},
2951
2952	{
2953		.name = "cpu_exclusive",
2954		.read_u64 = cpuset_read_u64,
2955		.write_u64 = cpuset_write_u64,
2956		.private = FILE_CPU_EXCLUSIVE,
2957	},
2958
2959	{
2960		.name = "mem_exclusive",
2961		.read_u64 = cpuset_read_u64,
2962		.write_u64 = cpuset_write_u64,
2963		.private = FILE_MEM_EXCLUSIVE,
2964	},
2965
2966	{
2967		.name = "mem_hardwall",
2968		.read_u64 = cpuset_read_u64,
2969		.write_u64 = cpuset_write_u64,
2970		.private = FILE_MEM_HARDWALL,
2971	},
2972
2973	{
2974		.name = "sched_load_balance",
2975		.read_u64 = cpuset_read_u64,
2976		.write_u64 = cpuset_write_u64,
2977		.private = FILE_SCHED_LOAD_BALANCE,
2978	},
2979
2980	{
2981		.name = "sched_relax_domain_level",
2982		.read_s64 = cpuset_read_s64,
2983		.write_s64 = cpuset_write_s64,
2984		.private = FILE_SCHED_RELAX_DOMAIN_LEVEL,
2985	},
2986
2987	{
2988		.name = "memory_migrate",
2989		.read_u64 = cpuset_read_u64,
2990		.write_u64 = cpuset_write_u64,
2991		.private = FILE_MEMORY_MIGRATE,
2992	},
2993
2994	{
2995		.name = "memory_pressure",
2996		.read_u64 = cpuset_read_u64,
2997		.private = FILE_MEMORY_PRESSURE,
2998	},
2999
3000	{
3001		.name = "memory_spread_page",
3002		.read_u64 = cpuset_read_u64,
3003		.write_u64 = cpuset_write_u64,
3004		.private = FILE_SPREAD_PAGE,
3005	},
3006
3007	{
3008		.name = "memory_spread_slab",
3009		.read_u64 = cpuset_read_u64,
3010		.write_u64 = cpuset_write_u64,
3011		.private = FILE_SPREAD_SLAB,
3012	},
3013
3014	{
3015		.name = "memory_pressure_enabled",
3016		.flags = CFTYPE_ONLY_ON_ROOT,
3017		.read_u64 = cpuset_read_u64,
3018		.write_u64 = cpuset_write_u64,
3019		.private = FILE_MEMORY_PRESSURE_ENABLED,
3020	},
3021
3022	{ }	/* terminate */
3023};
3024
3025/*
3026 * This is currently a minimal set for the default hierarchy. It can be
3027 * expanded later on by migrating more features and control files from v1.
3028 */
3029static struct cftype dfl_files[] = {
3030	{
3031		.name = "cpus",
3032		.seq_show = cpuset_common_seq_show,
3033		.write = cpuset_write_resmask,
3034		.max_write_len = (100U + 6 * NR_CPUS),
3035		.private = FILE_CPULIST,
3036		.flags = CFTYPE_NOT_ON_ROOT,
3037	},
3038
3039	{
3040		.name = "mems",
3041		.seq_show = cpuset_common_seq_show,
3042		.write = cpuset_write_resmask,
3043		.max_write_len = (100U + 6 * MAX_NUMNODES),
3044		.private = FILE_MEMLIST,
3045		.flags = CFTYPE_NOT_ON_ROOT,
3046	},
3047
3048	{
3049		.name = "cpus.effective",
3050		.seq_show = cpuset_common_seq_show,
3051		.private = FILE_EFFECTIVE_CPULIST,
3052	},
3053
3054	{
3055		.name = "mems.effective",
3056		.seq_show = cpuset_common_seq_show,
3057		.private = FILE_EFFECTIVE_MEMLIST,
3058	},
3059
3060	{
3061		.name = "cpus.partition",
3062		.seq_show = sched_partition_show,
3063		.write = sched_partition_write,
3064		.private = FILE_PARTITION_ROOT,
3065		.flags = CFTYPE_NOT_ON_ROOT,
3066		.file_offset = offsetof(struct cpuset, partition_file),
3067	},
3068
3069	{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3070		.name = "cpus.subpartitions",
3071		.seq_show = cpuset_common_seq_show,
3072		.private = FILE_SUBPARTS_CPULIST,
3073		.flags = CFTYPE_DEBUG,
 
 
 
 
 
 
 
3074	},
3075
3076	{ }	/* terminate */
3077};
3078
3079
3080/**
3081 * cpuset_css_alloc - Allocate a cpuset css
3082 * @parent_css: Parent css of the control group that the new cpuset will be
3083 *              part of
3084 * Return: cpuset css on success, -ENOMEM on failure.
3085 *
3086 * Allocate and initialize a new cpuset css, for non-NULL @parent_css, return
3087 * top cpuset css otherwise.
3088 */
3089static struct cgroup_subsys_state *
3090cpuset_css_alloc(struct cgroup_subsys_state *parent_css)
3091{
3092	struct cpuset *cs;
3093
3094	if (!parent_css)
3095		return &top_cpuset.css;
3096
3097	cs = kzalloc(sizeof(*cs), GFP_KERNEL);
3098	if (!cs)
3099		return ERR_PTR(-ENOMEM);
3100
3101	if (alloc_cpumasks(cs, NULL)) {
3102		kfree(cs);
3103		return ERR_PTR(-ENOMEM);
3104	}
3105
3106	__set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
3107	nodes_clear(cs->mems_allowed);
3108	nodes_clear(cs->effective_mems);
3109	fmeter_init(&cs->fmeter);
3110	cs->relax_domain_level = -1;
 
3111
3112	/* Set CS_MEMORY_MIGRATE for default hierarchy */
3113	if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys))
3114		__set_bit(CS_MEMORY_MIGRATE, &cs->flags);
3115
3116	return &cs->css;
3117}
3118
3119static int cpuset_css_online(struct cgroup_subsys_state *css)
3120{
3121	struct cpuset *cs = css_cs(css);
3122	struct cpuset *parent = parent_cs(cs);
3123	struct cpuset *tmp_cs;
3124	struct cgroup_subsys_state *pos_css;
3125
3126	if (!parent)
3127		return 0;
3128
3129	cpus_read_lock();
3130	percpu_down_write(&cpuset_rwsem);
3131
3132	set_bit(CS_ONLINE, &cs->flags);
3133	if (is_spread_page(parent))
3134		set_bit(CS_SPREAD_PAGE, &cs->flags);
3135	if (is_spread_slab(parent))
3136		set_bit(CS_SPREAD_SLAB, &cs->flags);
3137
3138	cpuset_inc();
3139
3140	spin_lock_irq(&callback_lock);
3141	if (is_in_v2_mode()) {
3142		cpumask_copy(cs->effective_cpus, parent->effective_cpus);
3143		cs->effective_mems = parent->effective_mems;
3144		cs->use_parent_ecpus = true;
3145		parent->child_ecpus_count++;
 
 
 
 
 
3146	}
 
 
 
 
 
 
 
 
3147	spin_unlock_irq(&callback_lock);
3148
3149	if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags))
3150		goto out_unlock;
3151
3152	/*
3153	 * Clone @parent's configuration if CGRP_CPUSET_CLONE_CHILDREN is
3154	 * set.  This flag handling is implemented in cgroup core for
3155	 * historical reasons - the flag may be specified during mount.
3156	 *
3157	 * Currently, if any sibling cpusets have exclusive cpus or mem, we
3158	 * refuse to clone the configuration - thereby refusing the task to
3159	 * be entered, and as a result refusing the sys_unshare() or
3160	 * clone() which initiated it.  If this becomes a problem for some
3161	 * users who wish to allow that scenario, then this could be
3162	 * changed to grant parent->cpus_allowed-sibling_cpus_exclusive
3163	 * (and likewise for mems) to the new cgroup.
3164	 */
3165	rcu_read_lock();
3166	cpuset_for_each_child(tmp_cs, pos_css, parent) {
3167		if (is_mem_exclusive(tmp_cs) || is_cpu_exclusive(tmp_cs)) {
3168			rcu_read_unlock();
3169			goto out_unlock;
3170		}
3171	}
3172	rcu_read_unlock();
3173
3174	spin_lock_irq(&callback_lock);
3175	cs->mems_allowed = parent->mems_allowed;
3176	cs->effective_mems = parent->mems_allowed;
3177	cpumask_copy(cs->cpus_allowed, parent->cpus_allowed);
3178	cpumask_copy(cs->effective_cpus, parent->cpus_allowed);
3179	spin_unlock_irq(&callback_lock);
3180out_unlock:
3181	percpu_up_write(&cpuset_rwsem);
3182	cpus_read_unlock();
3183	return 0;
3184}
3185
3186/*
3187 * If the cpuset being removed has its flag 'sched_load_balance'
3188 * enabled, then simulate turning sched_load_balance off, which
3189 * will call rebuild_sched_domains_locked(). That is not needed
3190 * in the default hierarchy where only changes in partition
3191 * will cause repartitioning.
3192 *
3193 * If the cpuset has the 'sched.partition' flag enabled, simulate
3194 * turning 'sched.partition" off.
3195 */
3196
3197static void cpuset_css_offline(struct cgroup_subsys_state *css)
3198{
3199	struct cpuset *cs = css_cs(css);
3200
3201	cpus_read_lock();
3202	percpu_down_write(&cpuset_rwsem);
3203
3204	if (is_partition_valid(cs))
3205		update_prstate(cs, 0);
3206
3207	if (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
3208	    is_sched_load_balance(cs))
3209		update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
3210
3211	if (cs->use_parent_ecpus) {
3212		struct cpuset *parent = parent_cs(cs);
3213
3214		cs->use_parent_ecpus = false;
3215		parent->child_ecpus_count--;
3216	}
3217
3218	cpuset_dec();
3219	clear_bit(CS_ONLINE, &cs->flags);
3220
3221	percpu_up_write(&cpuset_rwsem);
3222	cpus_read_unlock();
3223}
3224
3225static void cpuset_css_free(struct cgroup_subsys_state *css)
3226{
3227	struct cpuset *cs = css_cs(css);
3228
3229	free_cpuset(cs);
3230}
3231
3232static void cpuset_bind(struct cgroup_subsys_state *root_css)
3233{
3234	percpu_down_write(&cpuset_rwsem);
3235	spin_lock_irq(&callback_lock);
3236
3237	if (is_in_v2_mode()) {
3238		cpumask_copy(top_cpuset.cpus_allowed, cpu_possible_mask);
 
3239		top_cpuset.mems_allowed = node_possible_map;
3240	} else {
3241		cpumask_copy(top_cpuset.cpus_allowed,
3242			     top_cpuset.effective_cpus);
3243		top_cpuset.mems_allowed = top_cpuset.effective_mems;
3244	}
3245
3246	spin_unlock_irq(&callback_lock);
3247	percpu_up_write(&cpuset_rwsem);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3248}
3249
3250/*
3251 * Make sure the new task conform to the current state of its parent,
3252 * which could have been changed by cpuset just after it inherits the
3253 * state from the parent and before it sits on the cgroup's task list.
3254 */
3255static void cpuset_fork(struct task_struct *task)
3256{
3257	if (task_css_is_root(task, cpuset_cgrp_id))
 
 
 
 
 
 
 
 
 
 
 
 
 
3258		return;
 
3259
3260	set_cpus_allowed_ptr(task, current->cpus_ptr);
3261	task->mems_allowed = current->mems_allowed;
 
 
 
 
 
 
 
 
3262}
3263
3264struct cgroup_subsys cpuset_cgrp_subsys = {
3265	.css_alloc	= cpuset_css_alloc,
3266	.css_online	= cpuset_css_online,
3267	.css_offline	= cpuset_css_offline,
3268	.css_free	= cpuset_css_free,
3269	.can_attach	= cpuset_can_attach,
3270	.cancel_attach	= cpuset_cancel_attach,
3271	.attach		= cpuset_attach,
3272	.post_attach	= cpuset_post_attach,
3273	.bind		= cpuset_bind,
 
 
3274	.fork		= cpuset_fork,
3275	.legacy_cftypes	= legacy_files,
3276	.dfl_cftypes	= dfl_files,
3277	.early_init	= true,
3278	.threaded	= true,
3279};
3280
3281/**
3282 * cpuset_init - initialize cpusets at system boot
3283 *
3284 * Description: Initialize top_cpuset
3285 **/
3286
3287int __init cpuset_init(void)
3288{
3289	BUG_ON(percpu_init_rwsem(&cpuset_rwsem));
3290
3291	BUG_ON(!alloc_cpumask_var(&top_cpuset.cpus_allowed, GFP_KERNEL));
3292	BUG_ON(!alloc_cpumask_var(&top_cpuset.effective_cpus, GFP_KERNEL));
3293	BUG_ON(!zalloc_cpumask_var(&top_cpuset.subparts_cpus, GFP_KERNEL));
 
 
 
3294
3295	cpumask_setall(top_cpuset.cpus_allowed);
3296	nodes_setall(top_cpuset.mems_allowed);
3297	cpumask_setall(top_cpuset.effective_cpus);
 
 
3298	nodes_setall(top_cpuset.effective_mems);
3299
3300	fmeter_init(&top_cpuset.fmeter);
3301	set_bit(CS_SCHED_LOAD_BALANCE, &top_cpuset.flags);
3302	top_cpuset.relax_domain_level = -1;
 
3303
3304	BUG_ON(!alloc_cpumask_var(&cpus_attach, GFP_KERNEL));
3305
3306	return 0;
3307}
3308
3309/*
3310 * If CPU and/or memory hotplug handlers, below, unplug any CPUs
3311 * or memory nodes, we need to walk over the cpuset hierarchy,
3312 * removing that CPU or node from all cpusets.  If this removes the
3313 * last CPU or node from a cpuset, then move the tasks in the empty
3314 * cpuset to its next-highest non-empty parent.
3315 */
3316static void remove_tasks_in_empty_cpuset(struct cpuset *cs)
3317{
3318	struct cpuset *parent;
3319
3320	/*
3321	 * Find its next-highest non-empty parent, (top cpuset
3322	 * has online cpus, so can't be empty).
3323	 */
3324	parent = parent_cs(cs);
3325	while (cpumask_empty(parent->cpus_allowed) ||
3326			nodes_empty(parent->mems_allowed))
3327		parent = parent_cs(parent);
3328
3329	if (cgroup_transfer_tasks(parent->css.cgroup, cs->css.cgroup)) {
3330		pr_err("cpuset: failed to transfer tasks out of empty cpuset ");
3331		pr_cont_cgroup_name(cs->css.cgroup);
3332		pr_cont("\n");
3333	}
3334}
3335
3336static void
3337hotplug_update_tasks_legacy(struct cpuset *cs,
3338			    struct cpumask *new_cpus, nodemask_t *new_mems,
3339			    bool cpus_updated, bool mems_updated)
3340{
3341	bool is_empty;
3342
3343	spin_lock_irq(&callback_lock);
3344	cpumask_copy(cs->cpus_allowed, new_cpus);
3345	cpumask_copy(cs->effective_cpus, new_cpus);
3346	cs->mems_allowed = *new_mems;
3347	cs->effective_mems = *new_mems;
3348	spin_unlock_irq(&callback_lock);
3349
3350	/*
3351	 * Don't call update_tasks_cpumask() if the cpuset becomes empty,
3352	 * as the tasks will be migrated to an ancestor.
3353	 */
3354	if (cpus_updated && !cpumask_empty(cs->cpus_allowed))
3355		update_tasks_cpumask(cs, new_cpus);
3356	if (mems_updated && !nodes_empty(cs->mems_allowed))
3357		update_tasks_nodemask(cs);
3358
3359	is_empty = cpumask_empty(cs->cpus_allowed) ||
3360		   nodes_empty(cs->mems_allowed);
3361
3362	percpu_up_write(&cpuset_rwsem);
3363
3364	/*
3365	 * Move tasks to the nearest ancestor with execution resources,
3366	 * This is full cgroup operation which will also call back into
3367	 * cpuset. Should be done outside any lock.
3368	 */
3369	if (is_empty)
 
3370		remove_tasks_in_empty_cpuset(cs);
3371
3372	percpu_down_write(&cpuset_rwsem);
3373}
3374
3375static void
3376hotplug_update_tasks(struct cpuset *cs,
3377		     struct cpumask *new_cpus, nodemask_t *new_mems,
3378		     bool cpus_updated, bool mems_updated)
3379{
3380	/* A partition root is allowed to have empty effective cpus */
3381	if (cpumask_empty(new_cpus) && !is_partition_valid(cs))
3382		cpumask_copy(new_cpus, parent_cs(cs)->effective_cpus);
3383	if (nodes_empty(*new_mems))
3384		*new_mems = parent_cs(cs)->effective_mems;
3385
3386	spin_lock_irq(&callback_lock);
3387	cpumask_copy(cs->effective_cpus, new_cpus);
3388	cs->effective_mems = *new_mems;
3389	spin_unlock_irq(&callback_lock);
3390
3391	if (cpus_updated)
3392		update_tasks_cpumask(cs, new_cpus);
3393	if (mems_updated)
3394		update_tasks_nodemask(cs);
3395}
3396
3397static bool force_rebuild;
3398
3399void cpuset_force_rebuild(void)
3400{
3401	force_rebuild = true;
3402}
3403
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3404/**
3405 * cpuset_hotplug_update_tasks - update tasks in a cpuset for hotunplug
3406 * @cs: cpuset in interest
3407 * @tmp: the tmpmasks structure pointer
3408 *
3409 * Compare @cs's cpu and mem masks against top_cpuset and if some have gone
3410 * offline, update @cs accordingly.  If @cs ends up with no CPU or memory,
3411 * all its tasks are moved to the nearest ancestor with both resources.
3412 */
3413static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp)
3414{
3415	static cpumask_t new_cpus;
3416	static nodemask_t new_mems;
3417	bool cpus_updated;
3418	bool mems_updated;
 
 
3419	struct cpuset *parent;
3420retry:
3421	wait_event(cpuset_attach_wq, cs->attach_in_progress == 0);
3422
3423	percpu_down_write(&cpuset_rwsem);
3424
3425	/*
3426	 * We have raced with task attaching. We wait until attaching
3427	 * is finished, so we won't attach a task to an empty cpuset.
3428	 */
3429	if (cs->attach_in_progress) {
3430		percpu_up_write(&cpuset_rwsem);
3431		goto retry;
3432	}
3433
3434	parent = parent_cs(cs);
3435	compute_effective_cpumask(&new_cpus, cs, parent);
3436	nodes_and(new_mems, cs->mems_allowed, parent->effective_mems);
3437
3438	if (cs->nr_subparts_cpus)
3439		/*
3440		 * Make sure that CPUs allocated to child partitions
3441		 * do not show up in effective_cpus.
3442		 */
3443		cpumask_andnot(&new_cpus, &new_cpus, cs->subparts_cpus);
3444
3445	if (!tmp || !cs->partition_root_state)
3446		goto update_tasks;
3447
3448	/*
3449	 * In the unlikely event that a partition root has empty
3450	 * effective_cpus with tasks, we will have to invalidate child
3451	 * partitions, if present, by setting nr_subparts_cpus to 0 to
3452	 * reclaim their cpus.
3453	 */
3454	if (cs->nr_subparts_cpus && is_partition_valid(cs) &&
3455	    cpumask_empty(&new_cpus) && partition_is_populated(cs, NULL)) {
3456		spin_lock_irq(&callback_lock);
3457		cs->nr_subparts_cpus = 0;
3458		cpumask_clear(cs->subparts_cpus);
3459		spin_unlock_irq(&callback_lock);
 
 
3460		compute_effective_cpumask(&new_cpus, cs, parent);
 
 
 
3461	}
3462
3463	/*
3464	 * Force the partition to become invalid if either one of
3465	 * the following conditions hold:
3466	 * 1) empty effective cpus but not valid empty partition.
3467	 * 2) parent is invalid or doesn't grant any cpus to child
3468	 *    partitions.
3469	 */
3470	if (is_partition_valid(cs) && (!parent->nr_subparts_cpus ||
3471	   (cpumask_empty(&new_cpus) && partition_is_populated(cs, NULL)))) {
3472		int old_prs, parent_prs;
3473
3474		update_parent_subparts_cpumask(cs, partcmd_disable, NULL, tmp);
3475		if (cs->nr_subparts_cpus) {
3476			spin_lock_irq(&callback_lock);
3477			cs->nr_subparts_cpus = 0;
3478			cpumask_clear(cs->subparts_cpus);
3479			spin_unlock_irq(&callback_lock);
3480			compute_effective_cpumask(&new_cpus, cs, parent);
3481		}
3482
3483		old_prs = cs->partition_root_state;
3484		parent_prs = parent->partition_root_state;
3485		if (is_partition_valid(cs)) {
3486			spin_lock_irq(&callback_lock);
3487			make_partition_invalid(cs);
3488			spin_unlock_irq(&callback_lock);
3489			if (is_prs_invalid(parent_prs))
3490				WRITE_ONCE(cs->prs_err, PERR_INVPARENT);
3491			else if (!parent_prs)
3492				WRITE_ONCE(cs->prs_err, PERR_NOTPART);
3493			else
3494				WRITE_ONCE(cs->prs_err, PERR_HOTPLUG);
3495			notify_partition_change(cs, old_prs);
3496		}
3497		cpuset_force_rebuild();
3498	}
3499
3500	/*
3501	 * On the other hand, an invalid partition root may be transitioned
3502	 * back to a regular one.
3503	 */
3504	else if (is_partition_valid(parent) && is_partition_invalid(cs)) {
3505		update_parent_subparts_cpumask(cs, partcmd_update, NULL, tmp);
3506		if (is_partition_valid(cs))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3507			cpuset_force_rebuild();
 
3508	}
3509
3510update_tasks:
3511	cpus_updated = !cpumask_equal(&new_cpus, cs->effective_cpus);
3512	mems_updated = !nodes_equal(new_mems, cs->effective_mems);
 
 
3513
3514	if (mems_updated)
3515		check_insane_mems_config(&new_mems);
3516
3517	if (is_in_v2_mode())
3518		hotplug_update_tasks(cs, &new_cpus, &new_mems,
3519				     cpus_updated, mems_updated);
3520	else
3521		hotplug_update_tasks_legacy(cs, &new_cpus, &new_mems,
3522					    cpus_updated, mems_updated);
3523
3524	percpu_up_write(&cpuset_rwsem);
 
3525}
3526
3527/**
3528 * cpuset_hotplug_workfn - handle CPU/memory hotunplug for a cpuset
 
3529 *
3530 * This function is called after either CPU or memory configuration has
3531 * changed and updates cpuset accordingly.  The top_cpuset is always
3532 * synchronized to cpu_active_mask and N_MEMORY, which is necessary in
3533 * order to make cpusets transparent (of no affect) on systems that are
3534 * actively using CPU hotplug but making no active use of cpusets.
3535 *
3536 * Non-root cpusets are only affected by offlining.  If any CPUs or memory
3537 * nodes have been taken down, cpuset_hotplug_update_tasks() is invoked on
3538 * all descendants.
3539 *
3540 * Note that CPU offlining during suspend is ignored.  We don't modify
3541 * cpusets across suspend/resume cycles at all.
3542 */
3543static void cpuset_hotplug_workfn(struct work_struct *work)
3544{
3545	static cpumask_t new_cpus;
3546	static nodemask_t new_mems;
3547	bool cpus_updated, mems_updated;
3548	bool on_dfl = is_in_v2_mode();
3549	struct tmpmasks tmp, *ptmp = NULL;
3550
3551	if (on_dfl && !alloc_cpumasks(NULL, &tmp))
3552		ptmp = &tmp;
3553
3554	percpu_down_write(&cpuset_rwsem);
3555
3556	/* fetch the available cpus/mems and find out which changed how */
3557	cpumask_copy(&new_cpus, cpu_active_mask);
3558	new_mems = node_states[N_MEMORY];
3559
3560	/*
3561	 * If subparts_cpus is populated, it is likely that the check below
3562	 * will produce a false positive on cpus_updated when the cpu list
3563	 * isn't changed. It is extra work, but it is better to be safe.
3564	 */
3565	cpus_updated = !cpumask_equal(top_cpuset.effective_cpus, &new_cpus);
 
3566	mems_updated = !nodes_equal(top_cpuset.effective_mems, new_mems);
3567
3568	/*
3569	 * In the rare case that hotplug removes all the cpus in subparts_cpus,
3570	 * we assumed that cpus are updated.
3571	 */
3572	if (!cpus_updated && top_cpuset.nr_subparts_cpus)
3573		cpus_updated = true;
3574
3575	/* synchronize cpus_allowed to cpu_active_mask */
3576	if (cpus_updated) {
3577		spin_lock_irq(&callback_lock);
3578		if (!on_dfl)
3579			cpumask_copy(top_cpuset.cpus_allowed, &new_cpus);
3580		/*
3581		 * Make sure that CPUs allocated to child partitions
3582		 * do not show up in effective_cpus. If no CPU is left,
3583		 * we clear the subparts_cpus & let the child partitions
3584		 * fight for the CPUs again.
3585		 */
3586		if (top_cpuset.nr_subparts_cpus) {
3587			if (cpumask_subset(&new_cpus,
3588					   top_cpuset.subparts_cpus)) {
3589				top_cpuset.nr_subparts_cpus = 0;
3590				cpumask_clear(top_cpuset.subparts_cpus);
3591			} else {
3592				cpumask_andnot(&new_cpus, &new_cpus,
3593					       top_cpuset.subparts_cpus);
3594			}
3595		}
3596		cpumask_copy(top_cpuset.effective_cpus, &new_cpus);
3597		spin_unlock_irq(&callback_lock);
3598		/* we don't mess with cpumasks of tasks in top_cpuset */
3599	}
3600
3601	/* synchronize mems_allowed to N_MEMORY */
3602	if (mems_updated) {
3603		spin_lock_irq(&callback_lock);
3604		if (!on_dfl)
3605			top_cpuset.mems_allowed = new_mems;
3606		top_cpuset.effective_mems = new_mems;
3607		spin_unlock_irq(&callback_lock);
3608		update_tasks_nodemask(&top_cpuset);
3609	}
3610
3611	percpu_up_write(&cpuset_rwsem);
3612
3613	/* if cpus or mems changed, we need to propagate to descendants */
3614	if (cpus_updated || mems_updated) {
3615		struct cpuset *cs;
3616		struct cgroup_subsys_state *pos_css;
3617
3618		rcu_read_lock();
3619		cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {
3620			if (cs == &top_cpuset || !css_tryget_online(&cs->css))
3621				continue;
3622			rcu_read_unlock();
3623
3624			cpuset_hotplug_update_tasks(cs, ptmp);
3625
3626			rcu_read_lock();
3627			css_put(&cs->css);
3628		}
3629		rcu_read_unlock();
3630	}
3631
3632	/* rebuild sched domains if cpus_allowed has changed */
3633	if (cpus_updated || force_rebuild) {
3634		force_rebuild = false;
3635		rebuild_sched_domains();
3636	}
3637
3638	free_cpumasks(NULL, ptmp);
3639}
3640
3641void cpuset_update_active_cpus(void)
3642{
3643	/*
3644	 * We're inside cpu hotplug critical region which usually nests
3645	 * inside cgroup synchronization.  Bounce actual hotplug processing
3646	 * to a work item to avoid reverse locking order.
3647	 */
3648	schedule_work(&cpuset_hotplug_work);
3649}
3650
3651void cpuset_wait_for_hotplug(void)
3652{
3653	flush_work(&cpuset_hotplug_work);
3654}
3655
3656/*
3657 * Keep top_cpuset.mems_allowed tracking node_states[N_MEMORY].
3658 * Call this routine anytime after node_states[N_MEMORY] changes.
3659 * See cpuset_update_active_cpus() for CPU hotplug handling.
3660 */
3661static int cpuset_track_online_nodes(struct notifier_block *self,
3662				unsigned long action, void *arg)
3663{
3664	schedule_work(&cpuset_hotplug_work);
3665	return NOTIFY_OK;
3666}
3667
3668/**
3669 * cpuset_init_smp - initialize cpus_allowed
3670 *
3671 * Description: Finish top cpuset after cpu, node maps are initialized
3672 */
3673void __init cpuset_init_smp(void)
3674{
3675	/*
3676	 * cpus_allowd/mems_allowed set to v2 values in the initial
3677	 * cpuset_bind() call will be reset to v1 values in another
3678	 * cpuset_bind() call when v1 cpuset is mounted.
3679	 */
3680	top_cpuset.old_mems_allowed = top_cpuset.mems_allowed;
3681
3682	cpumask_copy(top_cpuset.effective_cpus, cpu_active_mask);
3683	top_cpuset.effective_mems = node_states[N_MEMORY];
3684
3685	hotplug_memory_notifier(cpuset_track_online_nodes, CPUSET_CALLBACK_PRI);
3686
3687	cpuset_migrate_mm_wq = alloc_ordered_workqueue("cpuset_migrate_mm", 0);
3688	BUG_ON(!cpuset_migrate_mm_wq);
3689}
3690
3691/**
3692 * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset.
3693 * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
3694 * @pmask: pointer to struct cpumask variable to receive cpus_allowed set.
3695 *
3696 * Description: Returns the cpumask_var_t cpus_allowed of the cpuset
3697 * attached to the specified @tsk.  Guaranteed to return some non-empty
3698 * subset of cpu_online_mask, even if this means going outside the
3699 * tasks cpuset, except when the task is in the top cpuset.
3700 **/
3701
3702void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
3703{
3704	unsigned long flags;
3705	struct cpuset *cs;
3706
3707	spin_lock_irqsave(&callback_lock, flags);
3708	rcu_read_lock();
3709
3710	cs = task_cs(tsk);
3711	if (cs != &top_cpuset)
3712		guarantee_online_cpus(tsk, pmask);
3713	/*
3714	 * Tasks in the top cpuset won't get update to their cpumasks
3715	 * when a hotplug online/offline event happens. So we include all
3716	 * offline cpus in the allowed cpu list.
3717	 */
3718	if ((cs == &top_cpuset) || cpumask_empty(pmask)) {
3719		const struct cpumask *possible_mask = task_cpu_possible_mask(tsk);
3720
3721		/*
3722		 * We first exclude cpus allocated to partitions. If there is no
3723		 * allowable online cpu left, we fall back to all possible cpus.
3724		 */
3725		cpumask_andnot(pmask, possible_mask, top_cpuset.subparts_cpus);
3726		if (!cpumask_intersects(pmask, cpu_online_mask))
3727			cpumask_copy(pmask, possible_mask);
3728	}
3729
3730	rcu_read_unlock();
3731	spin_unlock_irqrestore(&callback_lock, flags);
3732}
3733
3734/**
3735 * cpuset_cpus_allowed_fallback - final fallback before complete catastrophe.
3736 * @tsk: pointer to task_struct with which the scheduler is struggling
3737 *
3738 * Description: In the case that the scheduler cannot find an allowed cpu in
3739 * tsk->cpus_allowed, we fall back to task_cs(tsk)->cpus_allowed. In legacy
3740 * mode however, this value is the same as task_cs(tsk)->effective_cpus,
3741 * which will not contain a sane cpumask during cases such as cpu hotplugging.
3742 * This is the absolute last resort for the scheduler and it is only used if
3743 * _every_ other avenue has been traveled.
3744 *
3745 * Returns true if the affinity of @tsk was changed, false otherwise.
3746 **/
3747
3748bool cpuset_cpus_allowed_fallback(struct task_struct *tsk)
3749{
3750	const struct cpumask *possible_mask = task_cpu_possible_mask(tsk);
3751	const struct cpumask *cs_mask;
3752	bool changed = false;
3753
3754	rcu_read_lock();
3755	cs_mask = task_cs(tsk)->cpus_allowed;
3756	if (is_in_v2_mode() && cpumask_subset(cs_mask, possible_mask)) {
3757		do_set_cpus_allowed(tsk, cs_mask);
3758		changed = true;
3759	}
3760	rcu_read_unlock();
3761
3762	/*
3763	 * We own tsk->cpus_allowed, nobody can change it under us.
3764	 *
3765	 * But we used cs && cs->cpus_allowed lockless and thus can
3766	 * race with cgroup_attach_task() or update_cpumask() and get
3767	 * the wrong tsk->cpus_allowed. However, both cases imply the
3768	 * subsequent cpuset_change_cpumask()->set_cpus_allowed_ptr()
3769	 * which takes task_rq_lock().
3770	 *
3771	 * If we are called after it dropped the lock we must see all
3772	 * changes in tsk_cs()->cpus_allowed. Otherwise we can temporary
3773	 * set any mask even if it is not right from task_cs() pov,
3774	 * the pending set_cpus_allowed_ptr() will fix things.
3775	 *
3776	 * select_fallback_rq() will fix things ups and set cpu_possible_mask
3777	 * if required.
3778	 */
3779	return changed;
3780}
3781
3782void __init cpuset_init_current_mems_allowed(void)
3783{
3784	nodes_setall(current->mems_allowed);
3785}
3786
3787/**
3788 * cpuset_mems_allowed - return mems_allowed mask from a tasks cpuset.
3789 * @tsk: pointer to task_struct from which to obtain cpuset->mems_allowed.
3790 *
3791 * Description: Returns the nodemask_t mems_allowed of the cpuset
3792 * attached to the specified @tsk.  Guaranteed to return some non-empty
3793 * subset of node_states[N_MEMORY], even if this means going outside the
3794 * tasks cpuset.
3795 **/
3796
3797nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
3798{
3799	nodemask_t mask;
3800	unsigned long flags;
3801
3802	spin_lock_irqsave(&callback_lock, flags);
3803	rcu_read_lock();
3804	guarantee_online_mems(task_cs(tsk), &mask);
3805	rcu_read_unlock();
3806	spin_unlock_irqrestore(&callback_lock, flags);
3807
3808	return mask;
3809}
3810
3811/**
3812 * cpuset_nodemask_valid_mems_allowed - check nodemask vs. current mems_allowed
3813 * @nodemask: the nodemask to be checked
3814 *
3815 * Are any of the nodes in the nodemask allowed in current->mems_allowed?
3816 */
3817int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
3818{
3819	return nodes_intersects(*nodemask, current->mems_allowed);
3820}
3821
3822/*
3823 * nearest_hardwall_ancestor() - Returns the nearest mem_exclusive or
3824 * mem_hardwall ancestor to the specified cpuset.  Call holding
3825 * callback_lock.  If no ancestor is mem_exclusive or mem_hardwall
3826 * (an unusual configuration), then returns the root cpuset.
3827 */
3828static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs)
3829{
3830	while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && parent_cs(cs))
3831		cs = parent_cs(cs);
3832	return cs;
3833}
3834
3835/*
3836 * __cpuset_node_allowed - Can we allocate on a memory node?
3837 * @node: is this an allowed node?
3838 * @gfp_mask: memory allocation flags
3839 *
3840 * If we're in interrupt, yes, we can always allocate.  If @node is set in
3841 * current's mems_allowed, yes.  If it's not a __GFP_HARDWALL request and this
3842 * node is set in the nearest hardwalled cpuset ancestor to current's cpuset,
3843 * yes.  If current has access to memory reserves as an oom victim, yes.
3844 * Otherwise, no.
3845 *
3846 * GFP_USER allocations are marked with the __GFP_HARDWALL bit,
3847 * and do not allow allocations outside the current tasks cpuset
3848 * unless the task has been OOM killed.
3849 * GFP_KERNEL allocations are not so marked, so can escape to the
3850 * nearest enclosing hardwalled ancestor cpuset.
3851 *
3852 * Scanning up parent cpusets requires callback_lock.  The
3853 * __alloc_pages() routine only calls here with __GFP_HARDWALL bit
3854 * _not_ set if it's a GFP_KERNEL allocation, and all nodes in the
3855 * current tasks mems_allowed came up empty on the first pass over
3856 * the zonelist.  So only GFP_KERNEL allocations, if all nodes in the
3857 * cpuset are short of memory, might require taking the callback_lock.
3858 *
3859 * The first call here from mm/page_alloc:get_page_from_freelist()
3860 * has __GFP_HARDWALL set in gfp_mask, enforcing hardwall cpusets,
3861 * so no allocation on a node outside the cpuset is allowed (unless
3862 * in interrupt, of course).
3863 *
3864 * The second pass through get_page_from_freelist() doesn't even call
3865 * here for GFP_ATOMIC calls.  For those calls, the __alloc_pages()
3866 * variable 'wait' is not set, and the bit ALLOC_CPUSET is not set
3867 * in alloc_flags.  That logic and the checks below have the combined
3868 * affect that:
3869 *	in_interrupt - any node ok (current task context irrelevant)
3870 *	GFP_ATOMIC   - any node ok
3871 *	tsk_is_oom_victim   - any node ok
3872 *	GFP_KERNEL   - any node in enclosing hardwalled cpuset ok
3873 *	GFP_USER     - only nodes in current tasks mems allowed ok.
3874 */
3875bool __cpuset_node_allowed(int node, gfp_t gfp_mask)
3876{
3877	struct cpuset *cs;		/* current cpuset ancestors */
3878	bool allowed;			/* is allocation in zone z allowed? */
3879	unsigned long flags;
3880
3881	if (in_interrupt())
3882		return true;
3883	if (node_isset(node, current->mems_allowed))
3884		return true;
3885	/*
3886	 * Allow tasks that have access to memory reserves because they have
3887	 * been OOM killed to get memory anywhere.
3888	 */
3889	if (unlikely(tsk_is_oom_victim(current)))
3890		return true;
3891	if (gfp_mask & __GFP_HARDWALL)	/* If hardwall request, stop here */
3892		return false;
3893
3894	if (current->flags & PF_EXITING) /* Let dying task have memory */
3895		return true;
3896
3897	/* Not hardwall and node outside mems_allowed: scan up cpusets */
3898	spin_lock_irqsave(&callback_lock, flags);
3899
3900	rcu_read_lock();
3901	cs = nearest_hardwall_ancestor(task_cs(current));
3902	allowed = node_isset(node, cs->mems_allowed);
3903	rcu_read_unlock();
3904
3905	spin_unlock_irqrestore(&callback_lock, flags);
3906	return allowed;
3907}
3908
3909/**
3910 * cpuset_mem_spread_node() - On which node to begin search for a file page
3911 * cpuset_slab_spread_node() - On which node to begin search for a slab page
3912 *
3913 * If a task is marked PF_SPREAD_PAGE or PF_SPREAD_SLAB (as for
3914 * tasks in a cpuset with is_spread_page or is_spread_slab set),
3915 * and if the memory allocation used cpuset_mem_spread_node()
3916 * to determine on which node to start looking, as it will for
3917 * certain page cache or slab cache pages such as used for file
3918 * system buffers and inode caches, then instead of starting on the
3919 * local node to look for a free page, rather spread the starting
3920 * node around the tasks mems_allowed nodes.
3921 *
3922 * We don't have to worry about the returned node being offline
3923 * because "it can't happen", and even if it did, it would be ok.
3924 *
3925 * The routines calling guarantee_online_mems() are careful to
3926 * only set nodes in task->mems_allowed that are online.  So it
3927 * should not be possible for the following code to return an
3928 * offline node.  But if it did, that would be ok, as this routine
3929 * is not returning the node where the allocation must be, only
3930 * the node where the search should start.  The zonelist passed to
3931 * __alloc_pages() will include all nodes.  If the slab allocator
3932 * is passed an offline node, it will fall back to the local node.
3933 * See kmem_cache_alloc_node().
3934 */
3935
3936static int cpuset_spread_node(int *rotor)
3937{
3938	return *rotor = next_node_in(*rotor, current->mems_allowed);
3939}
3940
 
 
 
3941int cpuset_mem_spread_node(void)
3942{
3943	if (current->cpuset_mem_spread_rotor == NUMA_NO_NODE)
3944		current->cpuset_mem_spread_rotor =
3945			node_random(&current->mems_allowed);
3946
3947	return cpuset_spread_node(&current->cpuset_mem_spread_rotor);
3948}
3949
 
 
 
3950int cpuset_slab_spread_node(void)
3951{
3952	if (current->cpuset_slab_spread_rotor == NUMA_NO_NODE)
3953		current->cpuset_slab_spread_rotor =
3954			node_random(&current->mems_allowed);
3955
3956	return cpuset_spread_node(&current->cpuset_slab_spread_rotor);
3957}
3958
3959EXPORT_SYMBOL_GPL(cpuset_mem_spread_node);
3960
3961/**
3962 * cpuset_mems_allowed_intersects - Does @tsk1's mems_allowed intersect @tsk2's?
3963 * @tsk1: pointer to task_struct of some task.
3964 * @tsk2: pointer to task_struct of some other task.
3965 *
3966 * Description: Return true if @tsk1's mems_allowed intersects the
3967 * mems_allowed of @tsk2.  Used by the OOM killer to determine if
3968 * one of the task's memory usage might impact the memory available
3969 * to the other.
3970 **/
3971
3972int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
3973				   const struct task_struct *tsk2)
3974{
3975	return nodes_intersects(tsk1->mems_allowed, tsk2->mems_allowed);
3976}
3977
3978/**
3979 * cpuset_print_current_mems_allowed - prints current's cpuset and mems_allowed
3980 *
3981 * Description: Prints current's name, cpuset name, and cached copy of its
3982 * mems_allowed to the kernel log.
3983 */
3984void cpuset_print_current_mems_allowed(void)
3985{
3986	struct cgroup *cgrp;
3987
3988	rcu_read_lock();
3989
3990	cgrp = task_cs(current)->css.cgroup;
3991	pr_cont(",cpuset=");
3992	pr_cont_cgroup_name(cgrp);
3993	pr_cont(",mems_allowed=%*pbl",
3994		nodemask_pr_args(&current->mems_allowed));
3995
3996	rcu_read_unlock();
3997}
3998
3999/*
4000 * Collection of memory_pressure is suppressed unless
4001 * this flag is enabled by writing "1" to the special
4002 * cpuset file 'memory_pressure_enabled' in the root cpuset.
4003 */
4004
4005int cpuset_memory_pressure_enabled __read_mostly;
4006
4007/*
4008 * __cpuset_memory_pressure_bump - keep stats of per-cpuset reclaims.
4009 *
4010 * Keep a running average of the rate of synchronous (direct)
4011 * page reclaim efforts initiated by tasks in each cpuset.
4012 *
4013 * This represents the rate at which some task in the cpuset
4014 * ran low on memory on all nodes it was allowed to use, and
4015 * had to enter the kernels page reclaim code in an effort to
4016 * create more free memory by tossing clean pages or swapping
4017 * or writing dirty pages.
4018 *
4019 * Display to user space in the per-cpuset read-only file
4020 * "memory_pressure".  Value displayed is an integer
4021 * representing the recent rate of entry into the synchronous
4022 * (direct) page reclaim by any task attached to the cpuset.
4023 */
4024
4025void __cpuset_memory_pressure_bump(void)
4026{
4027	rcu_read_lock();
4028	fmeter_markevent(&task_cs(current)->fmeter);
4029	rcu_read_unlock();
4030}
4031
4032#ifdef CONFIG_PROC_PID_CPUSET
4033/*
4034 * proc_cpuset_show()
4035 *  - Print tasks cpuset path into seq_file.
4036 *  - Used for /proc/<pid>/cpuset.
4037 *  - No need to task_lock(tsk) on this tsk->cpuset reference, as it
4038 *    doesn't really matter if tsk->cpuset changes after we read it,
4039 *    and we take cpuset_rwsem, keeping cpuset_attach() from changing it
4040 *    anyway.
4041 */
4042int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
4043		     struct pid *pid, struct task_struct *tsk)
4044{
4045	char *buf;
4046	struct cgroup_subsys_state *css;
4047	int retval;
4048
4049	retval = -ENOMEM;
4050	buf = kmalloc(PATH_MAX, GFP_KERNEL);
4051	if (!buf)
4052		goto out;
4053
4054	css = task_get_css(tsk, cpuset_cgrp_id);
4055	retval = cgroup_path_ns(css->cgroup, buf, PATH_MAX,
4056				current->nsproxy->cgroup_ns);
4057	css_put(css);
4058	if (retval >= PATH_MAX)
4059		retval = -ENAMETOOLONG;
4060	if (retval < 0)
4061		goto out_free;
4062	seq_puts(m, buf);
4063	seq_putc(m, '\n');
4064	retval = 0;
4065out_free:
4066	kfree(buf);
4067out:
4068	return retval;
4069}
4070#endif /* CONFIG_PROC_PID_CPUSET */
4071
4072/* Display task mems_allowed in /proc/<pid>/status file. */
4073void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task)
4074{
4075	seq_printf(m, "Mems_allowed:\t%*pb\n",
4076		   nodemask_pr_args(&task->mems_allowed));
4077	seq_printf(m, "Mems_allowed_list:\t%*pbl\n",
4078		   nodemask_pr_args(&task->mems_allowed));
4079}
v6.8
   1/*
   2 *  kernel/cpuset.c
   3 *
   4 *  Processor and Memory placement constraints for sets of tasks.
   5 *
   6 *  Copyright (C) 2003 BULL SA.
   7 *  Copyright (C) 2004-2007 Silicon Graphics, Inc.
   8 *  Copyright (C) 2006 Google, Inc
   9 *
  10 *  Portions derived from Patrick Mochel's sysfs code.
  11 *  sysfs is Copyright (c) 2001-3 Patrick Mochel
  12 *
  13 *  2003-10-10 Written by Simon Derr.
  14 *  2003-10-22 Updates by Stephen Hemminger.
  15 *  2004 May-July Rework by Paul Jackson.
  16 *  2006 Rework by Paul Menage to use generic cgroups
  17 *  2008 Rework of the scheduler domains and CPU hotplug handling
  18 *       by Max Krasnyansky
  19 *
  20 *  This file is subject to the terms and conditions of the GNU General Public
  21 *  License.  See the file COPYING in the main directory of the Linux
  22 *  distribution for more details.
  23 */
  24
  25#include <linux/cpu.h>
  26#include <linux/cpumask.h>
  27#include <linux/cpuset.h>
  28#include <linux/delay.h>
 
 
 
  29#include <linux/init.h>
  30#include <linux/interrupt.h>
  31#include <linux/kernel.h>
 
 
 
  32#include <linux/mempolicy.h>
  33#include <linux/mm.h>
  34#include <linux/memory.h>
  35#include <linux/export.h>
 
 
 
 
 
  36#include <linux/rcupdate.h>
  37#include <linux/sched.h>
  38#include <linux/sched/deadline.h>
  39#include <linux/sched/mm.h>
  40#include <linux/sched/task.h>
 
  41#include <linux/security.h>
 
  42#include <linux/spinlock.h>
 
 
 
 
 
 
  43#include <linux/oom.h>
  44#include <linux/sched/isolation.h>
 
 
 
  45#include <linux/cgroup.h>
  46#include <linux/wait.h>
  47#include <linux/workqueue.h>
  48
  49DEFINE_STATIC_KEY_FALSE(cpusets_pre_enable_key);
  50DEFINE_STATIC_KEY_FALSE(cpusets_enabled_key);
  51
  52/*
  53 * There could be abnormal cpuset configurations for cpu or memory
  54 * node binding, add this key to provide a quick low-cost judgment
  55 * of the situation.
  56 */
  57DEFINE_STATIC_KEY_FALSE(cpusets_insane_config_key);
  58
  59/* See "Frequency meter" comments, below. */
  60
  61struct fmeter {
  62	int cnt;		/* unprocessed events count */
  63	int val;		/* most recent output value */
  64	time64_t time;		/* clock (secs) when val computed */
  65	spinlock_t lock;	/* guards read or write of above */
  66};
  67
  68/*
  69 * Invalid partition error code
  70 */
  71enum prs_errcode {
  72	PERR_NONE = 0,
  73	PERR_INVCPUS,
  74	PERR_INVPARENT,
  75	PERR_NOTPART,
  76	PERR_NOTEXCL,
  77	PERR_NOCPUS,
  78	PERR_HOTPLUG,
  79	PERR_CPUSEMPTY,
  80	PERR_HKEEPING,
  81};
  82
  83static const char * const perr_strings[] = {
  84	[PERR_INVCPUS]   = "Invalid cpu list in cpuset.cpus.exclusive",
  85	[PERR_INVPARENT] = "Parent is an invalid partition root",
  86	[PERR_NOTPART]   = "Parent is not a partition root",
  87	[PERR_NOTEXCL]   = "Cpu list in cpuset.cpus not exclusive",
  88	[PERR_NOCPUS]    = "Parent unable to distribute cpu downstream",
  89	[PERR_HOTPLUG]   = "No cpu available due to hotplug",
  90	[PERR_CPUSEMPTY] = "cpuset.cpus is empty",
  91	[PERR_HKEEPING]  = "partition config conflicts with housekeeping setup",
  92};
  93
  94struct cpuset {
  95	struct cgroup_subsys_state css;
  96
  97	unsigned long flags;		/* "unsigned long" so bitops work */
  98
  99	/*
 100	 * On default hierarchy:
 101	 *
 102	 * The user-configured masks can only be changed by writing to
 103	 * cpuset.cpus and cpuset.mems, and won't be limited by the
 104	 * parent masks.
 105	 *
 106	 * The effective masks is the real masks that apply to the tasks
 107	 * in the cpuset. They may be changed if the configured masks are
 108	 * changed or hotplug happens.
 109	 *
 110	 * effective_mask == configured_mask & parent's effective_mask,
 111	 * and if it ends up empty, it will inherit the parent's mask.
 112	 *
 113	 *
 114	 * On legacy hierarchy:
 115	 *
 116	 * The user-configured masks are always the same with effective masks.
 117	 */
 118
 119	/* user-configured CPUs and Memory Nodes allow to tasks */
 120	cpumask_var_t cpus_allowed;
 121	nodemask_t mems_allowed;
 122
 123	/* effective CPUs and Memory Nodes allow to tasks */
 124	cpumask_var_t effective_cpus;
 125	nodemask_t effective_mems;
 126
 127	/*
 128	 * Exclusive CPUs dedicated to current cgroup (default hierarchy only)
 
 
 129	 *
 130	 * This exclusive CPUs must be a subset of cpus_allowed. A parent
 131	 * cgroup can only grant exclusive CPUs to one of its children.
 132	 *
 133	 * When the cgroup becomes a valid partition root, effective_xcpus
 134	 * defaults to cpus_allowed if not set. The effective_cpus of a valid
 135	 * partition root comes solely from its effective_xcpus and some of the
 136	 * effective_xcpus may be distributed to sub-partitions below & hence
 137	 * excluded from its effective_cpus.
 138	 */
 139	cpumask_var_t effective_xcpus;
 140
 141	/*
 142	 * Exclusive CPUs as requested by the user (default hierarchy only)
 143	 */
 144	cpumask_var_t exclusive_cpus;
 145
 146	/*
 147	 * This is old Memory Nodes tasks took on.
 148	 *
 149	 * - top_cpuset.old_mems_allowed is initialized to mems_allowed.
 150	 * - A new cpuset's old_mems_allowed is initialized when some
 151	 *   task is moved into it.
 152	 * - old_mems_allowed is used in cpuset_migrate_mm() when we change
 153	 *   cpuset.mems_allowed and have tasks' nodemask updated, and
 154	 *   then old_mems_allowed is updated to mems_allowed.
 155	 */
 156	nodemask_t old_mems_allowed;
 157
 158	struct fmeter fmeter;		/* memory_pressure filter */
 159
 160	/*
 161	 * Tasks are being attached to this cpuset.  Used to prevent
 162	 * zeroing cpus/mems_allowed between ->can_attach() and ->attach().
 163	 */
 164	int attach_in_progress;
 165
 166	/* partition number for rebuild_sched_domains() */
 167	int pn;
 168
 169	/* for custom sched domain */
 170	int relax_domain_level;
 171
 172	/* number of valid sub-partitions */
 173	int nr_subparts;
 174
 175	/* partition root state */
 176	int partition_root_state;
 177
 178	/*
 179	 * Default hierarchy only:
 180	 * use_parent_ecpus - set if using parent's effective_cpus
 181	 * child_ecpus_count - # of children with use_parent_ecpus set
 182	 */
 183	int use_parent_ecpus;
 184	int child_ecpus_count;
 185
 186	/*
 187	 * number of SCHED_DEADLINE tasks attached to this cpuset, so that we
 188	 * know when to rebuild associated root domain bandwidth information.
 189	 */
 190	int nr_deadline_tasks;
 191	int nr_migrate_dl_tasks;
 192	u64 sum_migrate_dl_bw;
 193
 194	/* Invalid partition error code, not lock protected */
 195	enum prs_errcode prs_err;
 196
 197	/* Handle for cpuset.cpus.partition */
 198	struct cgroup_file partition_file;
 199
 200	/* Remote partition silbling list anchored at remote_children */
 201	struct list_head remote_sibling;
 202};
 203
 204/*
 205 * Exclusive CPUs distributed out to sub-partitions of top_cpuset
 206 */
 207static cpumask_var_t	subpartitions_cpus;
 208
 209/*
 210 * Exclusive CPUs in isolated partitions
 211 */
 212static cpumask_var_t	isolated_cpus;
 213
 214/* List of remote partition root children */
 215static struct list_head remote_children;
 216
 217/*
 218 * Partition root states:
 219 *
 220 *   0 - member (not a partition root)
 221 *   1 - partition root
 222 *   2 - partition root without load balancing (isolated)
 223 *  -1 - invalid partition root
 224 *  -2 - invalid isolated partition root
 225 */
 226#define PRS_MEMBER		0
 227#define PRS_ROOT		1
 228#define PRS_ISOLATED		2
 229#define PRS_INVALID_ROOT	-1
 230#define PRS_INVALID_ISOLATED	-2
 231
 232static inline bool is_prs_invalid(int prs_state)
 233{
 234	return prs_state < 0;
 235}
 236
 237/*
 238 * Temporary cpumasks for working with partitions that are passed among
 239 * functions to avoid memory allocation in inner functions.
 240 */
 241struct tmpmasks {
 242	cpumask_var_t addmask, delmask;	/* For partition root */
 243	cpumask_var_t new_cpus;		/* For update_cpumasks_hier() */
 244};
 245
 246static inline struct cpuset *css_cs(struct cgroup_subsys_state *css)
 247{
 248	return css ? container_of(css, struct cpuset, css) : NULL;
 249}
 250
 251/* Retrieve the cpuset for a task */
 252static inline struct cpuset *task_cs(struct task_struct *task)
 253{
 254	return css_cs(task_css(task, cpuset_cgrp_id));
 255}
 256
 257static inline struct cpuset *parent_cs(struct cpuset *cs)
 258{
 259	return css_cs(cs->css.parent);
 260}
 261
 262void inc_dl_tasks_cs(struct task_struct *p)
 263{
 264	struct cpuset *cs = task_cs(p);
 265
 266	cs->nr_deadline_tasks++;
 267}
 268
 269void dec_dl_tasks_cs(struct task_struct *p)
 270{
 271	struct cpuset *cs = task_cs(p);
 272
 273	cs->nr_deadline_tasks--;
 274}
 275
 276/* bits in struct cpuset flags field */
 277typedef enum {
 278	CS_ONLINE,
 279	CS_CPU_EXCLUSIVE,
 280	CS_MEM_EXCLUSIVE,
 281	CS_MEM_HARDWALL,
 282	CS_MEMORY_MIGRATE,
 283	CS_SCHED_LOAD_BALANCE,
 284	CS_SPREAD_PAGE,
 285	CS_SPREAD_SLAB,
 286} cpuset_flagbits_t;
 287
 288/* convenient tests for these bits */
 289static inline bool is_cpuset_online(struct cpuset *cs)
 290{
 291	return test_bit(CS_ONLINE, &cs->flags) && !css_is_dying(&cs->css);
 292}
 293
 294static inline int is_cpu_exclusive(const struct cpuset *cs)
 295{
 296	return test_bit(CS_CPU_EXCLUSIVE, &cs->flags);
 297}
 298
 299static inline int is_mem_exclusive(const struct cpuset *cs)
 300{
 301	return test_bit(CS_MEM_EXCLUSIVE, &cs->flags);
 302}
 303
 304static inline int is_mem_hardwall(const struct cpuset *cs)
 305{
 306	return test_bit(CS_MEM_HARDWALL, &cs->flags);
 307}
 308
 309static inline int is_sched_load_balance(const struct cpuset *cs)
 310{
 311	return test_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
 312}
 313
 314static inline int is_memory_migrate(const struct cpuset *cs)
 315{
 316	return test_bit(CS_MEMORY_MIGRATE, &cs->flags);
 317}
 318
 319static inline int is_spread_page(const struct cpuset *cs)
 320{
 321	return test_bit(CS_SPREAD_PAGE, &cs->flags);
 322}
 323
 324static inline int is_spread_slab(const struct cpuset *cs)
 325{
 326	return test_bit(CS_SPREAD_SLAB, &cs->flags);
 327}
 328
 329static inline int is_partition_valid(const struct cpuset *cs)
 330{
 331	return cs->partition_root_state > 0;
 332}
 333
 334static inline int is_partition_invalid(const struct cpuset *cs)
 335{
 336	return cs->partition_root_state < 0;
 337}
 338
 339/*
 340 * Callers should hold callback_lock to modify partition_root_state.
 341 */
 342static inline void make_partition_invalid(struct cpuset *cs)
 343{
 344	if (cs->partition_root_state > 0)
 345		cs->partition_root_state = -cs->partition_root_state;
 346}
 347
 348/*
 349 * Send notification event of whenever partition_root_state changes.
 350 */
 351static inline void notify_partition_change(struct cpuset *cs, int old_prs)
 352{
 353	if (old_prs == cs->partition_root_state)
 354		return;
 355	cgroup_file_notify(&cs->partition_file);
 356
 357	/* Reset prs_err if not invalid */
 358	if (is_partition_valid(cs))
 359		WRITE_ONCE(cs->prs_err, PERR_NONE);
 360}
 361
 362static struct cpuset top_cpuset = {
 363	.flags = ((1 << CS_ONLINE) | (1 << CS_CPU_EXCLUSIVE) |
 364		  (1 << CS_MEM_EXCLUSIVE)),
 365	.partition_root_state = PRS_ROOT,
 366	.remote_sibling = LIST_HEAD_INIT(top_cpuset.remote_sibling),
 367};
 368
 369/**
 370 * cpuset_for_each_child - traverse online children of a cpuset
 371 * @child_cs: loop cursor pointing to the current child
 372 * @pos_css: used for iteration
 373 * @parent_cs: target cpuset to walk children of
 374 *
 375 * Walk @child_cs through the online children of @parent_cs.  Must be used
 376 * with RCU read locked.
 377 */
 378#define cpuset_for_each_child(child_cs, pos_css, parent_cs)		\
 379	css_for_each_child((pos_css), &(parent_cs)->css)		\
 380		if (is_cpuset_online(((child_cs) = css_cs((pos_css)))))
 381
 382/**
 383 * cpuset_for_each_descendant_pre - pre-order walk of a cpuset's descendants
 384 * @des_cs: loop cursor pointing to the current descendant
 385 * @pos_css: used for iteration
 386 * @root_cs: target cpuset to walk ancestor of
 387 *
 388 * Walk @des_cs through the online descendants of @root_cs.  Must be used
 389 * with RCU read locked.  The caller may modify @pos_css by calling
 390 * css_rightmost_descendant() to skip subtree.  @root_cs is included in the
 391 * iteration and the first node to be visited.
 392 */
 393#define cpuset_for_each_descendant_pre(des_cs, pos_css, root_cs)	\
 394	css_for_each_descendant_pre((pos_css), &(root_cs)->css)		\
 395		if (is_cpuset_online(((des_cs) = css_cs((pos_css)))))
 396
 397/*
 398 * There are two global locks guarding cpuset structures - cpuset_mutex and
 399 * callback_lock. We also require taking task_lock() when dereferencing a
 400 * task's cpuset pointer. See "The task_lock() exception", at the end of this
 401 * comment.  The cpuset code uses only cpuset_mutex. Other kernel subsystems
 402 * can use cpuset_lock()/cpuset_unlock() to prevent change to cpuset
 403 * structures. Note that cpuset_mutex needs to be a mutex as it is used in
 404 * paths that rely on priority inheritance (e.g. scheduler - on RT) for
 405 * correctness.
 406 *
 407 * A task must hold both locks to modify cpusets.  If a task holds
 408 * cpuset_mutex, it blocks others, ensuring that it is the only task able to
 409 * also acquire callback_lock and be able to modify cpusets.  It can perform
 410 * various checks on the cpuset structure first, knowing nothing will change.
 411 * It can also allocate memory while just holding cpuset_mutex.  While it is
 412 * performing these checks, various callback routines can briefly acquire
 413 * callback_lock to query cpusets.  Once it is ready to make the changes, it
 414 * takes callback_lock, blocking everyone else.
 
 415 *
 416 * Calls to the kernel memory allocator can not be made while holding
 417 * callback_lock, as that would risk double tripping on callback_lock
 418 * from one of the callbacks into the cpuset code from within
 419 * __alloc_pages().
 420 *
 421 * If a task is only holding callback_lock, then it has read-only
 422 * access to cpusets.
 423 *
 424 * Now, the task_struct fields mems_allowed and mempolicy may be changed
 425 * by other task, we use alloc_lock in the task_struct fields to protect
 426 * them.
 427 *
 428 * The cpuset_common_file_read() handlers only hold callback_lock across
 429 * small pieces of code, such as when reading out possibly multi-word
 430 * cpumasks and nodemasks.
 431 *
 432 * Accessing a task's cpuset should be done in accordance with the
 433 * guidelines for accessing subsystem state in kernel/cgroup.c
 434 */
 435
 436static DEFINE_MUTEX(cpuset_mutex);
 437
 438void cpuset_lock(void)
 439{
 440	mutex_lock(&cpuset_mutex);
 441}
 442
 443void cpuset_unlock(void)
 444{
 445	mutex_unlock(&cpuset_mutex);
 446}
 447
 448static DEFINE_SPINLOCK(callback_lock);
 449
 450static struct workqueue_struct *cpuset_migrate_mm_wq;
 451
 452/*
 453 * CPU / memory hotplug is handled asynchronously.
 454 */
 455static void cpuset_hotplug_workfn(struct work_struct *work);
 456static DECLARE_WORK(cpuset_hotplug_work, cpuset_hotplug_workfn);
 457
 458static DECLARE_WAIT_QUEUE_HEAD(cpuset_attach_wq);
 459
 460static inline void check_insane_mems_config(nodemask_t *nodes)
 461{
 462	if (!cpusets_insane_config() &&
 463		movable_only_nodes(nodes)) {
 464		static_branch_enable(&cpusets_insane_config_key);
 465		pr_info("Unsupported (movable nodes only) cpuset configuration detected (nmask=%*pbl)!\n"
 466			"Cpuset allocations might fail even with a lot of memory available.\n",
 467			nodemask_pr_args(nodes));
 468	}
 469}
 470
 471/*
 472 * Cgroup v2 behavior is used on the "cpus" and "mems" control files when
 473 * on default hierarchy or when the cpuset_v2_mode flag is set by mounting
 474 * the v1 cpuset cgroup filesystem with the "cpuset_v2_mode" mount option.
 475 * With v2 behavior, "cpus" and "mems" are always what the users have
 476 * requested and won't be changed by hotplug events. Only the effective
 477 * cpus or mems will be affected.
 478 */
 479static inline bool is_in_v2_mode(void)
 480{
 481	return cgroup_subsys_on_dfl(cpuset_cgrp_subsys) ||
 482	      (cpuset_cgrp_subsys.root->flags & CGRP_ROOT_CPUSET_V2_MODE);
 483}
 484
 485/**
 486 * partition_is_populated - check if partition has tasks
 487 * @cs: partition root to be checked
 488 * @excluded_child: a child cpuset to be excluded in task checking
 489 * Return: true if there are tasks, false otherwise
 490 *
 491 * It is assumed that @cs is a valid partition root. @excluded_child should
 492 * be non-NULL when this cpuset is going to become a partition itself.
 493 */
 494static inline bool partition_is_populated(struct cpuset *cs,
 495					  struct cpuset *excluded_child)
 496{
 497	struct cgroup_subsys_state *css;
 498	struct cpuset *child;
 499
 500	if (cs->css.cgroup->nr_populated_csets)
 501		return true;
 502	if (!excluded_child && !cs->nr_subparts)
 503		return cgroup_is_populated(cs->css.cgroup);
 504
 505	rcu_read_lock();
 506	cpuset_for_each_child(child, css, cs) {
 507		if (child == excluded_child)
 508			continue;
 509		if (is_partition_valid(child))
 510			continue;
 511		if (cgroup_is_populated(child->css.cgroup)) {
 512			rcu_read_unlock();
 513			return true;
 514		}
 515	}
 516	rcu_read_unlock();
 517	return false;
 518}
 519
 520/*
 521 * Return in pmask the portion of a task's cpusets's cpus_allowed that
 522 * are online and are capable of running the task.  If none are found,
 523 * walk up the cpuset hierarchy until we find one that does have some
 524 * appropriate cpus.
 525 *
 526 * One way or another, we guarantee to return some non-empty subset
 527 * of cpu_online_mask.
 528 *
 529 * Call with callback_lock or cpuset_mutex held.
 530 */
 531static void guarantee_online_cpus(struct task_struct *tsk,
 532				  struct cpumask *pmask)
 533{
 534	const struct cpumask *possible_mask = task_cpu_possible_mask(tsk);
 535	struct cpuset *cs;
 536
 537	if (WARN_ON(!cpumask_and(pmask, possible_mask, cpu_online_mask)))
 538		cpumask_copy(pmask, cpu_online_mask);
 539
 540	rcu_read_lock();
 541	cs = task_cs(tsk);
 542
 543	while (!cpumask_intersects(cs->effective_cpus, pmask)) {
 544		cs = parent_cs(cs);
 545		if (unlikely(!cs)) {
 546			/*
 547			 * The top cpuset doesn't have any online cpu as a
 548			 * consequence of a race between cpuset_hotplug_work
 549			 * and cpu hotplug notifier.  But we know the top
 550			 * cpuset's effective_cpus is on its way to be
 551			 * identical to cpu_online_mask.
 552			 */
 553			goto out_unlock;
 554		}
 555	}
 556	cpumask_and(pmask, pmask, cs->effective_cpus);
 557
 558out_unlock:
 559	rcu_read_unlock();
 560}
 561
 562/*
 563 * Return in *pmask the portion of a cpusets's mems_allowed that
 564 * are online, with memory.  If none are online with memory, walk
 565 * up the cpuset hierarchy until we find one that does have some
 566 * online mems.  The top cpuset always has some mems online.
 567 *
 568 * One way or another, we guarantee to return some non-empty subset
 569 * of node_states[N_MEMORY].
 570 *
 571 * Call with callback_lock or cpuset_mutex held.
 572 */
 573static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask)
 574{
 575	while (!nodes_intersects(cs->effective_mems, node_states[N_MEMORY]))
 576		cs = parent_cs(cs);
 577	nodes_and(*pmask, cs->effective_mems, node_states[N_MEMORY]);
 578}
 579
 580/*
 581 * update task's spread flag if cpuset's page/slab spread flag is set
 582 *
 583 * Call with callback_lock or cpuset_mutex held. The check can be skipped
 584 * if on default hierarchy.
 585 */
 586static void cpuset_update_task_spread_flags(struct cpuset *cs,
 587					struct task_struct *tsk)
 588{
 589	if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys))
 590		return;
 591
 592	if (is_spread_page(cs))
 593		task_set_spread_page(tsk);
 594	else
 595		task_clear_spread_page(tsk);
 596
 597	if (is_spread_slab(cs))
 598		task_set_spread_slab(tsk);
 599	else
 600		task_clear_spread_slab(tsk);
 601}
 602
 603/*
 604 * is_cpuset_subset(p, q) - Is cpuset p a subset of cpuset q?
 605 *
 606 * One cpuset is a subset of another if all its allowed CPUs and
 607 * Memory Nodes are a subset of the other, and its exclusive flags
 608 * are only set if the other's are set.  Call holding cpuset_mutex.
 609 */
 610
 611static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
 612{
 613	return	cpumask_subset(p->cpus_allowed, q->cpus_allowed) &&
 614		nodes_subset(p->mems_allowed, q->mems_allowed) &&
 615		is_cpu_exclusive(p) <= is_cpu_exclusive(q) &&
 616		is_mem_exclusive(p) <= is_mem_exclusive(q);
 617}
 618
 619/**
 620 * alloc_cpumasks - allocate three cpumasks for cpuset
 621 * @cs:  the cpuset that have cpumasks to be allocated.
 622 * @tmp: the tmpmasks structure pointer
 623 * Return: 0 if successful, -ENOMEM otherwise.
 624 *
 625 * Only one of the two input arguments should be non-NULL.
 626 */
 627static inline int alloc_cpumasks(struct cpuset *cs, struct tmpmasks *tmp)
 628{
 629	cpumask_var_t *pmask1, *pmask2, *pmask3, *pmask4;
 630
 631	if (cs) {
 632		pmask1 = &cs->cpus_allowed;
 633		pmask2 = &cs->effective_cpus;
 634		pmask3 = &cs->effective_xcpus;
 635		pmask4 = &cs->exclusive_cpus;
 636	} else {
 637		pmask1 = &tmp->new_cpus;
 638		pmask2 = &tmp->addmask;
 639		pmask3 = &tmp->delmask;
 640		pmask4 = NULL;
 641	}
 642
 643	if (!zalloc_cpumask_var(pmask1, GFP_KERNEL))
 644		return -ENOMEM;
 645
 646	if (!zalloc_cpumask_var(pmask2, GFP_KERNEL))
 647		goto free_one;
 648
 649	if (!zalloc_cpumask_var(pmask3, GFP_KERNEL))
 650		goto free_two;
 651
 652	if (pmask4 && !zalloc_cpumask_var(pmask4, GFP_KERNEL))
 653		goto free_three;
 654
 655
 656	return 0;
 657
 658free_three:
 659	free_cpumask_var(*pmask3);
 660free_two:
 661	free_cpumask_var(*pmask2);
 662free_one:
 663	free_cpumask_var(*pmask1);
 664	return -ENOMEM;
 665}
 666
 667/**
 668 * free_cpumasks - free cpumasks in a tmpmasks structure
 669 * @cs:  the cpuset that have cpumasks to be free.
 670 * @tmp: the tmpmasks structure pointer
 671 */
 672static inline void free_cpumasks(struct cpuset *cs, struct tmpmasks *tmp)
 673{
 674	if (cs) {
 675		free_cpumask_var(cs->cpus_allowed);
 676		free_cpumask_var(cs->effective_cpus);
 677		free_cpumask_var(cs->effective_xcpus);
 678		free_cpumask_var(cs->exclusive_cpus);
 679	}
 680	if (tmp) {
 681		free_cpumask_var(tmp->new_cpus);
 682		free_cpumask_var(tmp->addmask);
 683		free_cpumask_var(tmp->delmask);
 684	}
 685}
 686
 687/**
 688 * alloc_trial_cpuset - allocate a trial cpuset
 689 * @cs: the cpuset that the trial cpuset duplicates
 690 */
 691static struct cpuset *alloc_trial_cpuset(struct cpuset *cs)
 692{
 693	struct cpuset *trial;
 694
 695	trial = kmemdup(cs, sizeof(*cs), GFP_KERNEL);
 696	if (!trial)
 697		return NULL;
 698
 699	if (alloc_cpumasks(trial, NULL)) {
 700		kfree(trial);
 701		return NULL;
 702	}
 703
 704	cpumask_copy(trial->cpus_allowed, cs->cpus_allowed);
 705	cpumask_copy(trial->effective_cpus, cs->effective_cpus);
 706	cpumask_copy(trial->effective_xcpus, cs->effective_xcpus);
 707	cpumask_copy(trial->exclusive_cpus, cs->exclusive_cpus);
 708	return trial;
 709}
 710
 711/**
 712 * free_cpuset - free the cpuset
 713 * @cs: the cpuset to be freed
 714 */
 715static inline void free_cpuset(struct cpuset *cs)
 716{
 717	free_cpumasks(cs, NULL);
 718	kfree(cs);
 719}
 720
 721static inline struct cpumask *fetch_xcpus(struct cpuset *cs)
 722{
 723	return !cpumask_empty(cs->exclusive_cpus) ? cs->exclusive_cpus :
 724	       cpumask_empty(cs->effective_xcpus) ? cs->cpus_allowed
 725						  : cs->effective_xcpus;
 726}
 727
 728/*
 729 * cpusets_are_exclusive() - check if two cpusets are exclusive
 730 *
 731 * Return true if exclusive, false if not
 732 */
 733static inline bool cpusets_are_exclusive(struct cpuset *cs1, struct cpuset *cs2)
 734{
 735	struct cpumask *xcpus1 = fetch_xcpus(cs1);
 736	struct cpumask *xcpus2 = fetch_xcpus(cs2);
 737
 738	if (cpumask_intersects(xcpus1, xcpus2))
 739		return false;
 740	return true;
 741}
 742
 743/*
 744 * validate_change_legacy() - Validate conditions specific to legacy (v1)
 745 *                            behavior.
 746 */
 747static int validate_change_legacy(struct cpuset *cur, struct cpuset *trial)
 748{
 749	struct cgroup_subsys_state *css;
 750	struct cpuset *c, *par;
 751	int ret;
 752
 753	WARN_ON_ONCE(!rcu_read_lock_held());
 754
 755	/* Each of our child cpusets must be a subset of us */
 756	ret = -EBUSY;
 757	cpuset_for_each_child(c, css, cur)
 758		if (!is_cpuset_subset(c, trial))
 759			goto out;
 760
 761	/* On legacy hierarchy, we must be a subset of our parent cpuset. */
 762	ret = -EACCES;
 763	par = parent_cs(cur);
 764	if (par && !is_cpuset_subset(trial, par))
 765		goto out;
 766
 767	ret = 0;
 768out:
 769	return ret;
 770}
 771
 772/*
 773 * validate_change() - Used to validate that any proposed cpuset change
 774 *		       follows the structural rules for cpusets.
 775 *
 776 * If we replaced the flag and mask values of the current cpuset
 777 * (cur) with those values in the trial cpuset (trial), would
 778 * our various subset and exclusive rules still be valid?  Presumes
 779 * cpuset_mutex held.
 780 *
 781 * 'cur' is the address of an actual, in-use cpuset.  Operations
 782 * such as list traversal that depend on the actual address of the
 783 * cpuset in the list must use cur below, not trial.
 784 *
 785 * 'trial' is the address of bulk structure copy of cur, with
 786 * perhaps one or more of the fields cpus_allowed, mems_allowed,
 787 * or flags changed to new, trial values.
 788 *
 789 * Return 0 if valid, -errno if not.
 790 */
 791
 792static int validate_change(struct cpuset *cur, struct cpuset *trial)
 793{
 794	struct cgroup_subsys_state *css;
 795	struct cpuset *c, *par;
 796	int ret = 0;
 797
 798	rcu_read_lock();
 799
 800	if (!is_in_v2_mode())
 801		ret = validate_change_legacy(cur, trial);
 802	if (ret)
 803		goto out;
 804
 805	/* Remaining checks don't apply to root cpuset */
 806	if (cur == &top_cpuset)
 807		goto out;
 808
 809	par = parent_cs(cur);
 810
 811	/*
 812	 * Cpusets with tasks - existing or newly being attached - can't
 813	 * be changed to have empty cpus_allowed or mems_allowed.
 814	 */
 815	ret = -ENOSPC;
 816	if ((cgroup_is_populated(cur->css.cgroup) || cur->attach_in_progress)) {
 817		if (!cpumask_empty(cur->cpus_allowed) &&
 818		    cpumask_empty(trial->cpus_allowed))
 819			goto out;
 820		if (!nodes_empty(cur->mems_allowed) &&
 821		    nodes_empty(trial->mems_allowed))
 822			goto out;
 823	}
 824
 825	/*
 826	 * We can't shrink if we won't have enough room for SCHED_DEADLINE
 827	 * tasks.
 828	 */
 829	ret = -EBUSY;
 830	if (is_cpu_exclusive(cur) &&
 831	    !cpuset_cpumask_can_shrink(cur->cpus_allowed,
 832				       trial->cpus_allowed))
 833		goto out;
 834
 835	/*
 836	 * If either I or some sibling (!= me) is exclusive, we can't
 837	 * overlap
 838	 */
 839	ret = -EINVAL;
 840	cpuset_for_each_child(c, css, par) {
 841		if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) &&
 842		    c != cur) {
 843			if (!cpusets_are_exclusive(trial, c))
 844				goto out;
 845		}
 846		if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) &&
 847		    c != cur &&
 848		    nodes_intersects(trial->mems_allowed, c->mems_allowed))
 849			goto out;
 850	}
 851
 852	ret = 0;
 853out:
 854	rcu_read_unlock();
 855	return ret;
 856}
 857
 858#ifdef CONFIG_SMP
 859/*
 860 * Helper routine for generate_sched_domains().
 861 * Do cpusets a, b have overlapping effective cpus_allowed masks?
 862 */
 863static int cpusets_overlap(struct cpuset *a, struct cpuset *b)
 864{
 865	return cpumask_intersects(a->effective_cpus, b->effective_cpus);
 866}
 867
 868static void
 869update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c)
 870{
 871	if (dattr->relax_domain_level < c->relax_domain_level)
 872		dattr->relax_domain_level = c->relax_domain_level;
 873	return;
 874}
 875
 876static void update_domain_attr_tree(struct sched_domain_attr *dattr,
 877				    struct cpuset *root_cs)
 878{
 879	struct cpuset *cp;
 880	struct cgroup_subsys_state *pos_css;
 881
 882	rcu_read_lock();
 883	cpuset_for_each_descendant_pre(cp, pos_css, root_cs) {
 884		/* skip the whole subtree if @cp doesn't have any CPU */
 885		if (cpumask_empty(cp->cpus_allowed)) {
 886			pos_css = css_rightmost_descendant(pos_css);
 887			continue;
 888		}
 889
 890		if (is_sched_load_balance(cp))
 891			update_domain_attr(dattr, cp);
 892	}
 893	rcu_read_unlock();
 894}
 895
 896/* Must be called with cpuset_mutex held.  */
 897static inline int nr_cpusets(void)
 898{
 899	/* jump label reference count + the top-level cpuset */
 900	return static_key_count(&cpusets_enabled_key.key) + 1;
 901}
 902
 903/*
 904 * generate_sched_domains()
 905 *
 906 * This function builds a partial partition of the systems CPUs
 907 * A 'partial partition' is a set of non-overlapping subsets whose
 908 * union is a subset of that set.
 909 * The output of this function needs to be passed to kernel/sched/core.c
 910 * partition_sched_domains() routine, which will rebuild the scheduler's
 911 * load balancing domains (sched domains) as specified by that partial
 912 * partition.
 913 *
 914 * See "What is sched_load_balance" in Documentation/admin-guide/cgroup-v1/cpusets.rst
 915 * for a background explanation of this.
 916 *
 917 * Does not return errors, on the theory that the callers of this
 918 * routine would rather not worry about failures to rebuild sched
 919 * domains when operating in the severe memory shortage situations
 920 * that could cause allocation failures below.
 921 *
 922 * Must be called with cpuset_mutex held.
 923 *
 924 * The three key local variables below are:
 925 *    cp - cpuset pointer, used (together with pos_css) to perform a
 926 *	   top-down scan of all cpusets. For our purposes, rebuilding
 927 *	   the schedulers sched domains, we can ignore !is_sched_load_
 928 *	   balance cpusets.
 929 *  csa  - (for CpuSet Array) Array of pointers to all the cpusets
 930 *	   that need to be load balanced, for convenient iterative
 931 *	   access by the subsequent code that finds the best partition,
 932 *	   i.e the set of domains (subsets) of CPUs such that the
 933 *	   cpus_allowed of every cpuset marked is_sched_load_balance
 934 *	   is a subset of one of these domains, while there are as
 935 *	   many such domains as possible, each as small as possible.
 936 * doms  - Conversion of 'csa' to an array of cpumasks, for passing to
 937 *	   the kernel/sched/core.c routine partition_sched_domains() in a
 938 *	   convenient format, that can be easily compared to the prior
 939 *	   value to determine what partition elements (sched domains)
 940 *	   were changed (added or removed.)
 941 *
 942 * Finding the best partition (set of domains):
 943 *	The triple nested loops below over i, j, k scan over the
 944 *	load balanced cpusets (using the array of cpuset pointers in
 945 *	csa[]) looking for pairs of cpusets that have overlapping
 946 *	cpus_allowed, but which don't have the same 'pn' partition
 947 *	number and gives them in the same partition number.  It keeps
 948 *	looping on the 'restart' label until it can no longer find
 949 *	any such pairs.
 950 *
 951 *	The union of the cpus_allowed masks from the set of
 952 *	all cpusets having the same 'pn' value then form the one
 953 *	element of the partition (one sched domain) to be passed to
 954 *	partition_sched_domains().
 955 */
 956static int generate_sched_domains(cpumask_var_t **domains,
 957			struct sched_domain_attr **attributes)
 958{
 959	struct cpuset *cp;	/* top-down scan of cpusets */
 960	struct cpuset **csa;	/* array of all cpuset ptrs */
 961	int csn;		/* how many cpuset ptrs in csa so far */
 962	int i, j, k;		/* indices for partition finding loops */
 963	cpumask_var_t *doms;	/* resulting partition; i.e. sched domains */
 964	struct sched_domain_attr *dattr;  /* attributes for custom domains */
 965	int ndoms = 0;		/* number of sched domains in result */
 966	int nslot;		/* next empty doms[] struct cpumask slot */
 967	struct cgroup_subsys_state *pos_css;
 968	bool root_load_balance = is_sched_load_balance(&top_cpuset);
 969
 970	doms = NULL;
 971	dattr = NULL;
 972	csa = NULL;
 973
 974	/* Special case for the 99% of systems with one, full, sched domain */
 975	if (root_load_balance && !top_cpuset.nr_subparts) {
 976		ndoms = 1;
 977		doms = alloc_sched_domains(ndoms);
 978		if (!doms)
 979			goto done;
 980
 981		dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL);
 982		if (dattr) {
 983			*dattr = SD_ATTR_INIT;
 984			update_domain_attr_tree(dattr, &top_cpuset);
 985		}
 986		cpumask_and(doms[0], top_cpuset.effective_cpus,
 987			    housekeeping_cpumask(HK_TYPE_DOMAIN));
 988
 989		goto done;
 990	}
 991
 992	csa = kmalloc_array(nr_cpusets(), sizeof(cp), GFP_KERNEL);
 993	if (!csa)
 994		goto done;
 995	csn = 0;
 996
 997	rcu_read_lock();
 998	if (root_load_balance)
 999		csa[csn++] = &top_cpuset;
1000	cpuset_for_each_descendant_pre(cp, pos_css, &top_cpuset) {
1001		if (cp == &top_cpuset)
1002			continue;
1003		/*
1004		 * Continue traversing beyond @cp iff @cp has some CPUs and
1005		 * isn't load balancing.  The former is obvious.  The
1006		 * latter: All child cpusets contain a subset of the
1007		 * parent's cpus, so just skip them, and then we call
1008		 * update_domain_attr_tree() to calc relax_domain_level of
1009		 * the corresponding sched domain.
1010		 *
1011		 * If root is load-balancing, we can skip @cp if it
1012		 * is a subset of the root's effective_cpus.
1013		 */
1014		if (!cpumask_empty(cp->cpus_allowed) &&
1015		    !(is_sched_load_balance(cp) &&
1016		      cpumask_intersects(cp->cpus_allowed,
1017					 housekeeping_cpumask(HK_TYPE_DOMAIN))))
1018			continue;
1019
1020		if (root_load_balance &&
1021		    cpumask_subset(cp->cpus_allowed, top_cpuset.effective_cpus))
1022			continue;
1023
1024		if (is_sched_load_balance(cp) &&
1025		    !cpumask_empty(cp->effective_cpus))
1026			csa[csn++] = cp;
1027
1028		/* skip @cp's subtree if not a partition root */
1029		if (!is_partition_valid(cp))
1030			pos_css = css_rightmost_descendant(pos_css);
1031	}
1032	rcu_read_unlock();
1033
1034	for (i = 0; i < csn; i++)
1035		csa[i]->pn = i;
1036	ndoms = csn;
1037
1038restart:
1039	/* Find the best partition (set of sched domains) */
1040	for (i = 0; i < csn; i++) {
1041		struct cpuset *a = csa[i];
1042		int apn = a->pn;
1043
1044		for (j = 0; j < csn; j++) {
1045			struct cpuset *b = csa[j];
1046			int bpn = b->pn;
1047
1048			if (apn != bpn && cpusets_overlap(a, b)) {
1049				for (k = 0; k < csn; k++) {
1050					struct cpuset *c = csa[k];
1051
1052					if (c->pn == bpn)
1053						c->pn = apn;
1054				}
1055				ndoms--;	/* one less element */
1056				goto restart;
1057			}
1058		}
1059	}
1060
1061	/*
1062	 * Now we know how many domains to create.
1063	 * Convert <csn, csa> to <ndoms, doms> and populate cpu masks.
1064	 */
1065	doms = alloc_sched_domains(ndoms);
1066	if (!doms)
1067		goto done;
1068
1069	/*
1070	 * The rest of the code, including the scheduler, can deal with
1071	 * dattr==NULL case. No need to abort if alloc fails.
1072	 */
1073	dattr = kmalloc_array(ndoms, sizeof(struct sched_domain_attr),
1074			      GFP_KERNEL);
1075
1076	for (nslot = 0, i = 0; i < csn; i++) {
1077		struct cpuset *a = csa[i];
1078		struct cpumask *dp;
1079		int apn = a->pn;
1080
1081		if (apn < 0) {
1082			/* Skip completed partitions */
1083			continue;
1084		}
1085
1086		dp = doms[nslot];
1087
1088		if (nslot == ndoms) {
1089			static int warnings = 10;
1090			if (warnings) {
1091				pr_warn("rebuild_sched_domains confused: nslot %d, ndoms %d, csn %d, i %d, apn %d\n",
1092					nslot, ndoms, csn, i, apn);
1093				warnings--;
1094			}
1095			continue;
1096		}
1097
1098		cpumask_clear(dp);
1099		if (dattr)
1100			*(dattr + nslot) = SD_ATTR_INIT;
1101		for (j = i; j < csn; j++) {
1102			struct cpuset *b = csa[j];
1103
1104			if (apn == b->pn) {
1105				cpumask_or(dp, dp, b->effective_cpus);
1106				cpumask_and(dp, dp, housekeeping_cpumask(HK_TYPE_DOMAIN));
1107				if (dattr)
1108					update_domain_attr_tree(dattr + nslot, b);
1109
1110				/* Done with this partition */
1111				b->pn = -1;
1112			}
1113		}
1114		nslot++;
1115	}
1116	BUG_ON(nslot != ndoms);
1117
1118done:
1119	kfree(csa);
1120
1121	/*
1122	 * Fallback to the default domain if kmalloc() failed.
1123	 * See comments in partition_sched_domains().
1124	 */
1125	if (doms == NULL)
1126		ndoms = 1;
1127
1128	*domains    = doms;
1129	*attributes = dattr;
1130	return ndoms;
1131}
1132
1133static void dl_update_tasks_root_domain(struct cpuset *cs)
1134{
1135	struct css_task_iter it;
1136	struct task_struct *task;
1137
1138	if (cs->nr_deadline_tasks == 0)
1139		return;
1140
1141	css_task_iter_start(&cs->css, 0, &it);
1142
1143	while ((task = css_task_iter_next(&it)))
1144		dl_add_task_root_domain(task);
1145
1146	css_task_iter_end(&it);
1147}
1148
1149static void dl_rebuild_rd_accounting(void)
1150{
1151	struct cpuset *cs = NULL;
1152	struct cgroup_subsys_state *pos_css;
1153
1154	lockdep_assert_held(&cpuset_mutex);
1155	lockdep_assert_cpus_held();
1156	lockdep_assert_held(&sched_domains_mutex);
1157
1158	rcu_read_lock();
1159
1160	/*
1161	 * Clear default root domain DL accounting, it will be computed again
1162	 * if a task belongs to it.
1163	 */
1164	dl_clear_root_domain(&def_root_domain);
1165
1166	cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {
1167
1168		if (cpumask_empty(cs->effective_cpus)) {
1169			pos_css = css_rightmost_descendant(pos_css);
1170			continue;
1171		}
1172
1173		css_get(&cs->css);
1174
1175		rcu_read_unlock();
1176
1177		dl_update_tasks_root_domain(cs);
1178
1179		rcu_read_lock();
1180		css_put(&cs->css);
1181	}
1182	rcu_read_unlock();
1183}
1184
1185static void
1186partition_and_rebuild_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1187				    struct sched_domain_attr *dattr_new)
1188{
1189	mutex_lock(&sched_domains_mutex);
1190	partition_sched_domains_locked(ndoms_new, doms_new, dattr_new);
1191	dl_rebuild_rd_accounting();
1192	mutex_unlock(&sched_domains_mutex);
1193}
1194
1195/*
1196 * Rebuild scheduler domains.
1197 *
1198 * If the flag 'sched_load_balance' of any cpuset with non-empty
1199 * 'cpus' changes, or if the 'cpus' allowed changes in any cpuset
1200 * which has that flag enabled, or if any cpuset with a non-empty
1201 * 'cpus' is removed, then call this routine to rebuild the
1202 * scheduler's dynamic sched domains.
1203 *
1204 * Call with cpuset_mutex held.  Takes cpus_read_lock().
1205 */
1206static void rebuild_sched_domains_locked(void)
1207{
1208	struct cgroup_subsys_state *pos_css;
1209	struct sched_domain_attr *attr;
1210	cpumask_var_t *doms;
1211	struct cpuset *cs;
1212	int ndoms;
1213
1214	lockdep_assert_cpus_held();
1215	lockdep_assert_held(&cpuset_mutex);
1216
1217	/*
1218	 * If we have raced with CPU hotplug, return early to avoid
1219	 * passing doms with offlined cpu to partition_sched_domains().
1220	 * Anyways, cpuset_hotplug_workfn() will rebuild sched domains.
1221	 *
1222	 * With no CPUs in any subpartitions, top_cpuset's effective CPUs
1223	 * should be the same as the active CPUs, so checking only top_cpuset
1224	 * is enough to detect racing CPU offlines.
1225	 */
1226	if (cpumask_empty(subpartitions_cpus) &&
1227	    !cpumask_equal(top_cpuset.effective_cpus, cpu_active_mask))
1228		return;
1229
1230	/*
1231	 * With subpartition CPUs, however, the effective CPUs of a partition
1232	 * root should be only a subset of the active CPUs.  Since a CPU in any
1233	 * partition root could be offlined, all must be checked.
1234	 */
1235	if (top_cpuset.nr_subparts) {
1236		rcu_read_lock();
1237		cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {
1238			if (!is_partition_valid(cs)) {
1239				pos_css = css_rightmost_descendant(pos_css);
1240				continue;
1241			}
1242			if (!cpumask_subset(cs->effective_cpus,
1243					    cpu_active_mask)) {
1244				rcu_read_unlock();
1245				return;
1246			}
1247		}
1248		rcu_read_unlock();
1249	}
1250
1251	/* Generate domain masks and attrs */
1252	ndoms = generate_sched_domains(&doms, &attr);
1253
1254	/* Have scheduler rebuild the domains */
1255	partition_and_rebuild_sched_domains(ndoms, doms, attr);
1256}
1257#else /* !CONFIG_SMP */
1258static void rebuild_sched_domains_locked(void)
1259{
1260}
1261#endif /* CONFIG_SMP */
1262
1263void rebuild_sched_domains(void)
1264{
1265	cpus_read_lock();
1266	mutex_lock(&cpuset_mutex);
1267	rebuild_sched_domains_locked();
1268	mutex_unlock(&cpuset_mutex);
1269	cpus_read_unlock();
1270}
1271
1272/**
1273 * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
1274 * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
1275 * @new_cpus: the temp variable for the new effective_cpus mask
1276 *
1277 * Iterate through each task of @cs updating its cpus_allowed to the
1278 * effective cpuset's.  As this function is called with cpuset_mutex held,
1279 * cpuset membership stays stable. For top_cpuset, task_cpu_possible_mask()
1280 * is used instead of effective_cpus to make sure all offline CPUs are also
1281 * included as hotplug code won't update cpumasks for tasks in top_cpuset.
1282 */
1283static void update_tasks_cpumask(struct cpuset *cs, struct cpumask *new_cpus)
1284{
1285	struct css_task_iter it;
1286	struct task_struct *task;
1287	bool top_cs = cs == &top_cpuset;
1288
1289	css_task_iter_start(&cs->css, 0, &it);
1290	while ((task = css_task_iter_next(&it))) {
1291		const struct cpumask *possible_mask = task_cpu_possible_mask(task);
 
 
 
 
 
1292
1293		if (top_cs) {
1294			/*
1295			 * Percpu kthreads in top_cpuset are ignored
1296			 */
1297			if (kthread_is_per_cpu(task))
1298				continue;
1299			cpumask_andnot(new_cpus, possible_mask, subpartitions_cpus);
1300		} else {
1301			cpumask_and(new_cpus, possible_mask, cs->effective_cpus);
1302		}
1303		set_cpus_allowed_ptr(task, new_cpus);
1304	}
1305	css_task_iter_end(&it);
1306}
1307
1308/**
1309 * compute_effective_cpumask - Compute the effective cpumask of the cpuset
1310 * @new_cpus: the temp variable for the new effective_cpus mask
1311 * @cs: the cpuset the need to recompute the new effective_cpus mask
1312 * @parent: the parent cpuset
1313 *
1314 * The result is valid only if the given cpuset isn't a partition root.
 
 
 
1315 */
1316static void compute_effective_cpumask(struct cpumask *new_cpus,
1317				      struct cpuset *cs, struct cpuset *parent)
1318{
1319	cpumask_and(new_cpus, cs->cpus_allowed, parent->effective_cpus);
 
 
 
 
 
 
 
1320}
1321
1322/*
1323 * Commands for update_parent_effective_cpumask
1324 */
1325enum partition_cmd {
1326	partcmd_enable,		/* Enable partition root	  */
1327	partcmd_enablei,	/* Enable isolated partition root */
1328	partcmd_disable,	/* Disable partition root	  */
1329	partcmd_update,		/* Update parent's effective_cpus */
1330	partcmd_invalidate,	/* Make partition invalid	  */
1331};
1332
1333static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
1334		       int turning_on);
1335static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs,
1336				    struct tmpmasks *tmp);
1337
1338/*
1339 * Update partition exclusive flag
1340 *
1341 * Return: 0 if successful, an error code otherwise
1342 */
1343static int update_partition_exclusive(struct cpuset *cs, int new_prs)
1344{
1345	bool exclusive = (new_prs > 0);
1346
1347	if (exclusive && !is_cpu_exclusive(cs)) {
1348		if (update_flag(CS_CPU_EXCLUSIVE, cs, 1))
1349			return PERR_NOTEXCL;
1350	} else if (!exclusive && is_cpu_exclusive(cs)) {
1351		/* Turning off CS_CPU_EXCLUSIVE will not return error */
1352		update_flag(CS_CPU_EXCLUSIVE, cs, 0);
1353	}
1354	return 0;
1355}
1356
1357/*
1358 * Update partition load balance flag and/or rebuild sched domain
1359 *
1360 * Changing load balance flag will automatically call
1361 * rebuild_sched_domains_locked().
1362 * This function is for cgroup v2 only.
1363 */
1364static void update_partition_sd_lb(struct cpuset *cs, int old_prs)
1365{
1366	int new_prs = cs->partition_root_state;
1367	bool rebuild_domains = (new_prs > 0) || (old_prs > 0);
1368	bool new_lb;
1369
1370	/*
1371	 * If cs is not a valid partition root, the load balance state
1372	 * will follow its parent.
1373	 */
1374	if (new_prs > 0) {
1375		new_lb = (new_prs != PRS_ISOLATED);
1376	} else {
1377		new_lb = is_sched_load_balance(parent_cs(cs));
1378	}
1379	if (new_lb != !!is_sched_load_balance(cs)) {
1380		rebuild_domains = true;
1381		if (new_lb)
1382			set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
1383		else
1384			clear_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
1385	}
1386
1387	if (rebuild_domains)
1388		rebuild_sched_domains_locked();
1389}
1390
1391/*
1392 * tasks_nocpu_error - Return true if tasks will have no effective_cpus
1393 */
1394static bool tasks_nocpu_error(struct cpuset *parent, struct cpuset *cs,
1395			      struct cpumask *xcpus)
1396{
1397	/*
1398	 * A populated partition (cs or parent) can't have empty effective_cpus
1399	 */
1400	return (cpumask_subset(parent->effective_cpus, xcpus) &&
1401		partition_is_populated(parent, cs)) ||
1402	       (!cpumask_intersects(xcpus, cpu_active_mask) &&
1403		partition_is_populated(cs, NULL));
1404}
1405
1406static void reset_partition_data(struct cpuset *cs)
1407{
1408	struct cpuset *parent = parent_cs(cs);
1409
1410	if (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys))
1411		return;
1412
1413	lockdep_assert_held(&callback_lock);
1414
1415	cs->nr_subparts = 0;
1416	if (cpumask_empty(cs->exclusive_cpus)) {
1417		cpumask_clear(cs->effective_xcpus);
1418		if (is_cpu_exclusive(cs))
1419			clear_bit(CS_CPU_EXCLUSIVE, &cs->flags);
1420	}
1421	if (!cpumask_and(cs->effective_cpus,
1422			 parent->effective_cpus, cs->cpus_allowed)) {
1423		cs->use_parent_ecpus = true;
1424		parent->child_ecpus_count++;
1425		cpumask_copy(cs->effective_cpus, parent->effective_cpus);
1426	}
1427}
1428
1429/*
1430 * partition_xcpus_newstate - Exclusive CPUs state change
1431 * @old_prs: old partition_root_state
1432 * @new_prs: new partition_root_state
1433 * @xcpus: exclusive CPUs with state change
1434 */
1435static void partition_xcpus_newstate(int old_prs, int new_prs, struct cpumask *xcpus)
1436{
1437	WARN_ON_ONCE(old_prs == new_prs);
1438	if (new_prs == PRS_ISOLATED)
1439		cpumask_or(isolated_cpus, isolated_cpus, xcpus);
1440	else
1441		cpumask_andnot(isolated_cpus, isolated_cpus, xcpus);
1442}
1443
1444/*
1445 * partition_xcpus_add - Add new exclusive CPUs to partition
1446 * @new_prs: new partition_root_state
1447 * @parent: parent cpuset
1448 * @xcpus: exclusive CPUs to be added
1449 * Return: true if isolated_cpus modified, false otherwise
1450 *
1451 * Remote partition if parent == NULL
1452 */
1453static bool partition_xcpus_add(int new_prs, struct cpuset *parent,
1454				struct cpumask *xcpus)
1455{
1456	bool isolcpus_updated;
1457
1458	WARN_ON_ONCE(new_prs < 0);
1459	lockdep_assert_held(&callback_lock);
1460	if (!parent)
1461		parent = &top_cpuset;
1462
1463
1464	if (parent == &top_cpuset)
1465		cpumask_or(subpartitions_cpus, subpartitions_cpus, xcpus);
1466
1467	isolcpus_updated = (new_prs != parent->partition_root_state);
1468	if (isolcpus_updated)
1469		partition_xcpus_newstate(parent->partition_root_state, new_prs,
1470					 xcpus);
1471
1472	cpumask_andnot(parent->effective_cpus, parent->effective_cpus, xcpus);
1473	return isolcpus_updated;
1474}
1475
1476/*
1477 * partition_xcpus_del - Remove exclusive CPUs from partition
1478 * @old_prs: old partition_root_state
1479 * @parent: parent cpuset
1480 * @xcpus: exclusive CPUs to be removed
1481 * Return: true if isolated_cpus modified, false otherwise
1482 *
1483 * Remote partition if parent == NULL
1484 */
1485static bool partition_xcpus_del(int old_prs, struct cpuset *parent,
1486				struct cpumask *xcpus)
1487{
1488	bool isolcpus_updated;
1489
1490	WARN_ON_ONCE(old_prs < 0);
1491	lockdep_assert_held(&callback_lock);
1492	if (!parent)
1493		parent = &top_cpuset;
1494
1495	if (parent == &top_cpuset)
1496		cpumask_andnot(subpartitions_cpus, subpartitions_cpus, xcpus);
1497
1498	isolcpus_updated = (old_prs != parent->partition_root_state);
1499	if (isolcpus_updated)
1500		partition_xcpus_newstate(old_prs, parent->partition_root_state,
1501					 xcpus);
1502
1503	cpumask_and(xcpus, xcpus, cpu_active_mask);
1504	cpumask_or(parent->effective_cpus, parent->effective_cpus, xcpus);
1505	return isolcpus_updated;
1506}
1507
1508static void update_unbound_workqueue_cpumask(bool isolcpus_updated)
1509{
1510	int ret;
1511
1512	lockdep_assert_cpus_held();
1513
1514	if (!isolcpus_updated)
1515		return;
1516
1517	ret = workqueue_unbound_exclude_cpumask(isolated_cpus);
1518	WARN_ON_ONCE(ret < 0);
1519}
1520
1521/**
1522 * cpuset_cpu_is_isolated - Check if the given CPU is isolated
1523 * @cpu: the CPU number to be checked
1524 * Return: true if CPU is used in an isolated partition, false otherwise
1525 */
1526bool cpuset_cpu_is_isolated(int cpu)
1527{
1528	return cpumask_test_cpu(cpu, isolated_cpus);
1529}
1530EXPORT_SYMBOL_GPL(cpuset_cpu_is_isolated);
1531
1532/*
1533 * compute_effective_exclusive_cpumask - compute effective exclusive CPUs
1534 * @cs: cpuset
1535 * @xcpus: effective exclusive CPUs value to be set
1536 * Return: true if xcpus is not empty, false otherwise.
1537 *
1538 * Starting with exclusive_cpus (cpus_allowed if exclusive_cpus is not set),
1539 * it must be a subset of cpus_allowed and parent's effective_xcpus.
1540 */
1541static bool compute_effective_exclusive_cpumask(struct cpuset *cs,
1542						struct cpumask *xcpus)
1543{
1544	struct cpuset *parent = parent_cs(cs);
1545
1546	if (!xcpus)
1547		xcpus = cs->effective_xcpus;
1548
1549	if (!cpumask_empty(cs->exclusive_cpus))
1550		cpumask_and(xcpus, cs->exclusive_cpus, cs->cpus_allowed);
1551	else
1552		cpumask_copy(xcpus, cs->cpus_allowed);
1553
1554	return cpumask_and(xcpus, xcpus, parent->effective_xcpus);
1555}
1556
1557static inline bool is_remote_partition(struct cpuset *cs)
1558{
1559	return !list_empty(&cs->remote_sibling);
1560}
1561
1562static inline bool is_local_partition(struct cpuset *cs)
1563{
1564	return is_partition_valid(cs) && !is_remote_partition(cs);
1565}
1566
1567/*
1568 * remote_partition_enable - Enable current cpuset as a remote partition root
1569 * @cs: the cpuset to update
1570 * @new_prs: new partition_root_state
1571 * @tmp: temparary masks
1572 * Return: 1 if successful, 0 if error
1573 *
1574 * Enable the current cpuset to become a remote partition root taking CPUs
1575 * directly from the top cpuset. cpuset_mutex must be held by the caller.
1576 */
1577static int remote_partition_enable(struct cpuset *cs, int new_prs,
1578				   struct tmpmasks *tmp)
1579{
1580	bool isolcpus_updated;
1581
1582	/*
1583	 * The user must have sysadmin privilege.
1584	 */
1585	if (!capable(CAP_SYS_ADMIN))
1586		return 0;
1587
1588	/*
1589	 * The requested exclusive_cpus must not be allocated to other
1590	 * partitions and it can't use up all the root's effective_cpus.
1591	 *
1592	 * Note that if there is any local partition root above it or
1593	 * remote partition root underneath it, its exclusive_cpus must
1594	 * have overlapped with subpartitions_cpus.
1595	 */
1596	compute_effective_exclusive_cpumask(cs, tmp->new_cpus);
1597	if (cpumask_empty(tmp->new_cpus) ||
1598	    cpumask_intersects(tmp->new_cpus, subpartitions_cpus) ||
1599	    cpumask_subset(top_cpuset.effective_cpus, tmp->new_cpus))
1600		return 0;
1601
1602	spin_lock_irq(&callback_lock);
1603	isolcpus_updated = partition_xcpus_add(new_prs, NULL, tmp->new_cpus);
1604	list_add(&cs->remote_sibling, &remote_children);
1605	if (cs->use_parent_ecpus) {
1606		struct cpuset *parent = parent_cs(cs);
1607
1608		cs->use_parent_ecpus = false;
1609		parent->child_ecpus_count--;
1610	}
1611	spin_unlock_irq(&callback_lock);
1612	update_unbound_workqueue_cpumask(isolcpus_updated);
1613
1614	/*
1615	 * Proprogate changes in top_cpuset's effective_cpus down the hierarchy.
1616	 */
1617	update_tasks_cpumask(&top_cpuset, tmp->new_cpus);
1618	update_sibling_cpumasks(&top_cpuset, NULL, tmp);
1619	return 1;
1620}
1621
1622/*
1623 * remote_partition_disable - Remove current cpuset from remote partition list
1624 * @cs: the cpuset to update
1625 * @tmp: temparary masks
1626 *
1627 * The effective_cpus is also updated.
1628 *
1629 * cpuset_mutex must be held by the caller.
1630 */
1631static void remote_partition_disable(struct cpuset *cs, struct tmpmasks *tmp)
1632{
1633	bool isolcpus_updated;
1634
1635	compute_effective_exclusive_cpumask(cs, tmp->new_cpus);
1636	WARN_ON_ONCE(!is_remote_partition(cs));
1637	WARN_ON_ONCE(!cpumask_subset(tmp->new_cpus, subpartitions_cpus));
1638
1639	spin_lock_irq(&callback_lock);
1640	list_del_init(&cs->remote_sibling);
1641	isolcpus_updated = partition_xcpus_del(cs->partition_root_state,
1642					       NULL, tmp->new_cpus);
1643	cs->partition_root_state = -cs->partition_root_state;
1644	if (!cs->prs_err)
1645		cs->prs_err = PERR_INVCPUS;
1646	reset_partition_data(cs);
1647	spin_unlock_irq(&callback_lock);
1648	update_unbound_workqueue_cpumask(isolcpus_updated);
1649
1650	/*
1651	 * Proprogate changes in top_cpuset's effective_cpus down the hierarchy.
1652	 */
1653	update_tasks_cpumask(&top_cpuset, tmp->new_cpus);
1654	update_sibling_cpumasks(&top_cpuset, NULL, tmp);
1655}
1656
1657/*
1658 * remote_cpus_update - cpus_exclusive change of remote partition
1659 * @cs: the cpuset to be updated
1660 * @newmask: the new effective_xcpus mask
1661 * @tmp: temparary masks
1662 *
1663 * top_cpuset and subpartitions_cpus will be updated or partition can be
1664 * invalidated.
1665 */
1666static void remote_cpus_update(struct cpuset *cs, struct cpumask *newmask,
1667			       struct tmpmasks *tmp)
1668{
1669	bool adding, deleting;
1670	int prs = cs->partition_root_state;
1671	int isolcpus_updated = 0;
1672
1673	if (WARN_ON_ONCE(!is_remote_partition(cs)))
1674		return;
1675
1676	WARN_ON_ONCE(!cpumask_subset(cs->effective_xcpus, subpartitions_cpus));
1677
1678	if (cpumask_empty(newmask))
1679		goto invalidate;
1680
1681	adding   = cpumask_andnot(tmp->addmask, newmask, cs->effective_xcpus);
1682	deleting = cpumask_andnot(tmp->delmask, cs->effective_xcpus, newmask);
1683
1684	/*
1685	 * Additions of remote CPUs is only allowed if those CPUs are
1686	 * not allocated to other partitions and there are effective_cpus
1687	 * left in the top cpuset.
1688	 */
1689	if (adding && (!capable(CAP_SYS_ADMIN) ||
1690		       cpumask_intersects(tmp->addmask, subpartitions_cpus) ||
1691		       cpumask_subset(top_cpuset.effective_cpus, tmp->addmask)))
1692		goto invalidate;
1693
1694	spin_lock_irq(&callback_lock);
1695	if (adding)
1696		isolcpus_updated += partition_xcpus_add(prs, NULL, tmp->addmask);
1697	if (deleting)
1698		isolcpus_updated += partition_xcpus_del(prs, NULL, tmp->delmask);
1699	spin_unlock_irq(&callback_lock);
1700	update_unbound_workqueue_cpumask(isolcpus_updated);
1701
1702	/*
1703	 * Proprogate changes in top_cpuset's effective_cpus down the hierarchy.
1704	 */
1705	update_tasks_cpumask(&top_cpuset, tmp->new_cpus);
1706	update_sibling_cpumasks(&top_cpuset, NULL, tmp);
1707	return;
1708
1709invalidate:
1710	remote_partition_disable(cs, tmp);
1711}
1712
1713/*
1714 * remote_partition_check - check if a child remote partition needs update
1715 * @cs: the cpuset to be updated
1716 * @newmask: the new effective_xcpus mask
1717 * @delmask: temporary mask for deletion (not in tmp)
1718 * @tmp: temparary masks
1719 *
1720 * This should be called before the given cs has updated its cpus_allowed
1721 * and/or effective_xcpus.
1722 */
1723static void remote_partition_check(struct cpuset *cs, struct cpumask *newmask,
1724				   struct cpumask *delmask, struct tmpmasks *tmp)
1725{
1726	struct cpuset *child, *next;
1727	int disable_cnt = 0;
1728
1729	/*
1730	 * Compute the effective exclusive CPUs that will be deleted.
1731	 */
1732	if (!cpumask_andnot(delmask, cs->effective_xcpus, newmask) ||
1733	    !cpumask_intersects(delmask, subpartitions_cpus))
1734		return;	/* No deletion of exclusive CPUs in partitions */
1735
1736	/*
1737	 * Searching the remote children list to look for those that will
1738	 * be impacted by the deletion of exclusive CPUs.
1739	 *
1740	 * Since a cpuset must be removed from the remote children list
1741	 * before it can go offline and holding cpuset_mutex will prevent
1742	 * any change in cpuset status. RCU read lock isn't needed.
1743	 */
1744	lockdep_assert_held(&cpuset_mutex);
1745	list_for_each_entry_safe(child, next, &remote_children, remote_sibling)
1746		if (cpumask_intersects(child->effective_cpus, delmask)) {
1747			remote_partition_disable(child, tmp);
1748			disable_cnt++;
1749		}
1750	if (disable_cnt)
1751		rebuild_sched_domains_locked();
1752}
1753
1754/*
1755 * prstate_housekeeping_conflict - check for partition & housekeeping conflicts
1756 * @prstate: partition root state to be checked
1757 * @new_cpus: cpu mask
1758 * Return: true if there is conflict, false otherwise
1759 *
1760 * CPUs outside of housekeeping_cpumask(HK_TYPE_DOMAIN) can only be used in
1761 * an isolated partition.
1762 */
1763static bool prstate_housekeeping_conflict(int prstate, struct cpumask *new_cpus)
1764{
1765	const struct cpumask *hk_domain = housekeeping_cpumask(HK_TYPE_DOMAIN);
1766	bool all_in_hk = cpumask_subset(new_cpus, hk_domain);
1767
1768	if (!all_in_hk && (prstate != PRS_ISOLATED))
1769		return true;
1770
1771	return false;
1772}
1773
1774/**
1775 * update_parent_effective_cpumask - update effective_cpus mask of parent cpuset
1776 * @cs:      The cpuset that requests change in partition root state
1777 * @cmd:     Partition root state change command
1778 * @newmask: Optional new cpumask for partcmd_update
1779 * @tmp:     Temporary addmask and delmask
1780 * Return:   0 or a partition root state error code
1781 *
1782 * For partcmd_enable*, the cpuset is being transformed from a non-partition
1783 * root to a partition root. The effective_xcpus (cpus_allowed if
1784 * effective_xcpus not set) mask of the given cpuset will be taken away from
1785 * parent's effective_cpus. The function will return 0 if all the CPUs listed
1786 * in effective_xcpus can be granted or an error code will be returned.
1787 *
1788 * For partcmd_disable, the cpuset is being transformed from a partition
1789 * root back to a non-partition root. Any CPUs in effective_xcpus will be
1790 * given back to parent's effective_cpus. 0 will always be returned.
 
1791 *
1792 * For partcmd_update, if the optional newmask is specified, the cpu list is
1793 * to be changed from effective_xcpus to newmask. Otherwise, effective_xcpus is
1794 * assumed to remain the same. The cpuset should either be a valid or invalid
1795 * partition root. The partition root state may change from valid to invalid
1796 * or vice versa. An error code will be returned if transitioning from
1797 * invalid to valid violates the exclusivity rule.
1798 *
1799 * For partcmd_invalidate, the current partition will be made invalid.
1800 *
1801 * The partcmd_enable* and partcmd_disable commands are used by
1802 * update_prstate(). An error code may be returned and the caller will check
1803 * for error.
1804 *
1805 * The partcmd_update command is used by update_cpumasks_hier() with newmask
1806 * NULL and update_cpumask() with newmask set. The partcmd_invalidate is used
1807 * by update_cpumask() with NULL newmask. In both cases, the callers won't
1808 * check for error and so partition_root_state and prs_error will be updated
1809 * directly.
1810 */
1811static int update_parent_effective_cpumask(struct cpuset *cs, int cmd,
1812					   struct cpumask *newmask,
1813					   struct tmpmasks *tmp)
1814{
1815	struct cpuset *parent = parent_cs(cs);
1816	int adding;	/* Adding cpus to parent's effective_cpus	*/
1817	int deleting;	/* Deleting cpus from parent's effective_cpus	*/
1818	int old_prs, new_prs;
1819	int part_error = PERR_NONE;	/* Partition error? */
1820	int subparts_delta = 0;
1821	struct cpumask *xcpus;		/* cs effective_xcpus */
1822	int isolcpus_updated = 0;
1823	bool nocpu;
1824
1825	lockdep_assert_held(&cpuset_mutex);
1826
1827	/*
1828	 * new_prs will only be changed for the partcmd_update and
1829	 * partcmd_invalidate commands.
1830	 */
1831	adding = deleting = false;
1832	old_prs = new_prs = cs->partition_root_state;
1833	xcpus = !cpumask_empty(cs->exclusive_cpus)
1834		? cs->effective_xcpus : cs->cpus_allowed;
1835
1836	if (cmd == partcmd_invalidate) {
1837		if (is_prs_invalid(old_prs))
1838			return 0;
1839
1840		/*
1841		 * Make the current partition invalid.
1842		 */
1843		if (is_partition_valid(parent))
1844			adding = cpumask_and(tmp->addmask,
1845					     xcpus, parent->effective_xcpus);
1846		if (old_prs > 0) {
1847			new_prs = -old_prs;
1848			subparts_delta--;
1849		}
1850		goto write_error;
1851	}
1852
1853	/*
1854	 * The parent must be a partition root.
1855	 * The new cpumask, if present, or the current cpus_allowed must
1856	 * not be empty.
1857	 */
1858	if (!is_partition_valid(parent)) {
1859		return is_partition_invalid(parent)
1860		       ? PERR_INVPARENT : PERR_NOTPART;
1861	}
1862	if (!newmask && cpumask_empty(cs->cpus_allowed))
 
1863		return PERR_CPUSEMPTY;
1864
1865	nocpu = tasks_nocpu_error(parent, cs, xcpus);
1866
1867	if ((cmd == partcmd_enable) || (cmd == partcmd_enablei)) {
 
 
 
 
1868		/*
1869		 * Enabling partition root is not allowed if its
1870		 * effective_xcpus is empty or doesn't overlap with
1871		 * parent's effective_xcpus.
1872		 */
1873		if (cpumask_empty(xcpus) ||
1874		    !cpumask_intersects(xcpus, parent->effective_xcpus))
1875			return PERR_INVCPUS;
1876
1877		if (prstate_housekeeping_conflict(new_prs, xcpus))
1878			return PERR_HKEEPING;
1879
1880		/*
1881		 * A parent can be left with no CPU as long as there is no
1882		 * task directly associated with the parent partition.
1883		 */
1884		if (nocpu)
 
1885			return PERR_NOCPUS;
1886
1887		cpumask_copy(tmp->delmask, xcpus);
1888		deleting = true;
1889		subparts_delta++;
1890		new_prs = (cmd == partcmd_enable) ? PRS_ROOT : PRS_ISOLATED;
1891	} else if (cmd == partcmd_disable) {
1892		/*
1893		 * May need to add cpus to parent's effective_cpus for
1894		 * valid partition root.
1895		 */
1896		adding = !is_prs_invalid(old_prs) &&
1897			  cpumask_and(tmp->addmask, xcpus, parent->effective_xcpus);
1898		if (adding)
1899			subparts_delta--;
1900		new_prs = PRS_MEMBER;
1901	} else if (newmask) {
 
1902		/*
1903		 * Empty cpumask is not allowed
 
1904		 */
1905		if (cpumask_empty(newmask)) {
1906			part_error = PERR_CPUSEMPTY;
1907			goto write_error;
 
 
1908		}
1909
1910		/*
1911		 * partcmd_update with newmask:
1912		 *
1913		 * Compute add/delete mask to/from effective_cpus
1914		 *
1915		 * For valid partition:
1916		 *   addmask = exclusive_cpus & ~newmask
1917		 *			      & parent->effective_xcpus
1918		 *   delmask = newmask & ~exclusive_cpus
1919		 *		       & parent->effective_xcpus
1920		 *
1921		 * For invalid partition:
1922		 *   delmask = newmask & parent->effective_xcpus
1923		 */
1924		if (is_prs_invalid(old_prs)) {
1925			adding = false;
1926			deleting = cpumask_and(tmp->delmask,
1927					newmask, parent->effective_xcpus);
1928		} else {
1929			cpumask_andnot(tmp->addmask, xcpus, newmask);
1930			adding = cpumask_and(tmp->addmask, tmp->addmask,
1931					     parent->effective_xcpus);
1932
1933			cpumask_andnot(tmp->delmask, newmask, xcpus);
1934			deleting = cpumask_and(tmp->delmask, tmp->delmask,
1935					       parent->effective_xcpus);
1936		}
1937		/*
1938		 * Make partition invalid if parent's effective_cpus could
1939		 * become empty and there are tasks in the parent.
1940		 */
1941		if (nocpu && (!adding ||
1942		    !cpumask_intersects(tmp->addmask, cpu_active_mask))) {
 
 
1943			part_error = PERR_NOCPUS;
1944			deleting = false;
1945			adding = cpumask_and(tmp->addmask,
1946					     xcpus, parent->effective_xcpus);
1947		}
1948	} else {
1949		/*
1950		 * partcmd_update w/o newmask
1951		 *
1952		 * delmask = effective_xcpus & parent->effective_cpus
1953		 *
1954		 * This can be called from:
1955		 * 1) update_cpumasks_hier()
1956		 * 2) cpuset_hotplug_update_tasks()
1957		 *
1958		 * Check to see if it can be transitioned from valid to
1959		 * invalid partition or vice versa.
1960		 *
1961		 * A partition error happens when parent has tasks and all
1962		 * its effective CPUs will have to be distributed out.
1963		 */
1964		WARN_ON_ONCE(!is_partition_valid(parent));
1965		if (nocpu) {
 
 
 
 
 
 
 
 
 
 
 
 
1966			part_error = PERR_NOCPUS;
1967			if (is_partition_valid(cs))
1968				adding = cpumask_and(tmp->addmask,
1969						xcpus, parent->effective_xcpus);
1970		} else if (is_partition_invalid(cs) &&
1971			   cpumask_subset(xcpus, parent->effective_xcpus)) {
1972			struct cgroup_subsys_state *css;
1973			struct cpuset *child;
1974			bool exclusive = true;
1975
1976			/*
1977			 * Convert invalid partition to valid has to
1978			 * pass the cpu exclusivity test.
1979			 */
1980			rcu_read_lock();
1981			cpuset_for_each_child(child, css, parent) {
1982				if (child == cs)
1983					continue;
1984				if (!cpusets_are_exclusive(cs, child)) {
1985					exclusive = false;
1986					break;
1987				}
1988			}
1989			rcu_read_unlock();
1990			if (exclusive)
1991				deleting = cpumask_and(tmp->delmask,
1992						xcpus, parent->effective_cpus);
1993			else
1994				part_error = PERR_NOTEXCL;
1995		}
1996	}
1997
1998write_error:
1999	if (part_error)
2000		WRITE_ONCE(cs->prs_err, part_error);
2001
2002	if (cmd == partcmd_update) {
2003		/*
2004		 * Check for possible transition between valid and invalid
2005		 * partition root.
2006		 */
2007		switch (cs->partition_root_state) {
2008		case PRS_ROOT:
2009		case PRS_ISOLATED:
2010			if (part_error) {
2011				new_prs = -old_prs;
2012				subparts_delta--;
2013			}
2014			break;
2015		case PRS_INVALID_ROOT:
2016		case PRS_INVALID_ISOLATED:
2017			if (!part_error) {
2018				new_prs = -old_prs;
2019				subparts_delta++;
2020			}
2021			break;
2022		}
2023	}
2024
2025	if (!adding && !deleting && (new_prs == old_prs))
2026		return 0;
2027
2028	/*
2029	 * Transitioning between invalid to valid or vice versa may require
2030	 * changing CS_CPU_EXCLUSIVE. In the case of partcmd_update,
2031	 * validate_change() has already been successfully called and
2032	 * CPU lists in cs haven't been updated yet. So defer it to later.
2033	 */
2034	if ((old_prs != new_prs) && (cmd != partcmd_update))  {
2035		int err = update_partition_exclusive(cs, new_prs);
2036
2037		if (err)
2038			return err;
 
2039	}
2040
2041	/*
2042	 * Change the parent's effective_cpus & effective_xcpus (top cpuset
2043	 * only).
2044	 *
2045	 * Newly added CPUs will be removed from effective_cpus and
2046	 * newly deleted ones will be added back to effective_cpus.
2047	 */
2048	spin_lock_irq(&callback_lock);
2049	if (old_prs != new_prs) {
2050		cs->partition_root_state = new_prs;
2051		if (new_prs <= 0)
2052			cs->nr_subparts = 0;
 
2053	}
2054	/*
2055	 * Adding to parent's effective_cpus means deletion CPUs from cs
2056	 * and vice versa.
2057	 */
2058	if (adding)
2059		isolcpus_updated += partition_xcpus_del(old_prs, parent,
2060							tmp->addmask);
2061	if (deleting)
2062		isolcpus_updated += partition_xcpus_add(new_prs, parent,
2063							tmp->delmask);
2064
2065	if (is_partition_valid(parent)) {
2066		parent->nr_subparts += subparts_delta;
2067		WARN_ON_ONCE(parent->nr_subparts < 0);
2068	}
 
 
 
 
 
 
2069	spin_unlock_irq(&callback_lock);
2070	update_unbound_workqueue_cpumask(isolcpus_updated);
2071
2072	if ((old_prs != new_prs) && (cmd == partcmd_update))
2073		update_partition_exclusive(cs, new_prs);
2074
2075	if (adding || deleting) {
2076		update_tasks_cpumask(parent, tmp->addmask);
2077		update_sibling_cpumasks(parent, cs, tmp);
2078	}
2079
2080	/*
2081	 * For partcmd_update without newmask, it is being called from
2082	 * cpuset_hotplug_workfn() where cpus_read_lock() wasn't taken.
2083	 * Update the load balance flag and scheduling domain if
2084	 * cpus_read_trylock() is successful.
2085	 */
2086	if ((cmd == partcmd_update) && !newmask && cpus_read_trylock()) {
2087		update_partition_sd_lb(cs, old_prs);
2088		cpus_read_unlock();
 
 
2089	}
2090
2091	notify_partition_change(cs, old_prs);
2092	return 0;
2093}
2094
2095/**
2096 * compute_partition_effective_cpumask - compute effective_cpus for partition
2097 * @cs: partition root cpuset
2098 * @new_ecpus: previously computed effective_cpus to be updated
2099 *
2100 * Compute the effective_cpus of a partition root by scanning effective_xcpus
2101 * of child partition roots and excluding their effective_xcpus.
2102 *
2103 * This has the side effect of invalidating valid child partition roots,
2104 * if necessary. Since it is called from either cpuset_hotplug_update_tasks()
2105 * or update_cpumasks_hier() where parent and children are modified
2106 * successively, we don't need to call update_parent_effective_cpumask()
2107 * and the child's effective_cpus will be updated in later iterations.
2108 *
2109 * Note that rcu_read_lock() is assumed to be held.
2110 */
2111static void compute_partition_effective_cpumask(struct cpuset *cs,
2112						struct cpumask *new_ecpus)
2113{
2114	struct cgroup_subsys_state *css;
2115	struct cpuset *child;
2116	bool populated = partition_is_populated(cs, NULL);
2117
2118	/*
2119	 * Check child partition roots to see if they should be
2120	 * invalidated when
2121	 *  1) child effective_xcpus not a subset of new
2122	 *     excluisve_cpus
2123	 *  2) All the effective_cpus will be used up and cp
2124	 *     has tasks
2125	 */
2126	compute_effective_exclusive_cpumask(cs, new_ecpus);
2127	cpumask_and(new_ecpus, new_ecpus, cpu_active_mask);
2128
2129	rcu_read_lock();
2130	cpuset_for_each_child(child, css, cs) {
2131		if (!is_partition_valid(child))
2132			continue;
2133
2134		child->prs_err = 0;
2135		if (!cpumask_subset(child->effective_xcpus,
2136				    cs->effective_xcpus))
2137			child->prs_err = PERR_INVCPUS;
2138		else if (populated &&
2139			 cpumask_subset(new_ecpus, child->effective_xcpus))
2140			child->prs_err = PERR_NOCPUS;
2141
2142		if (child->prs_err) {
2143			int old_prs = child->partition_root_state;
2144
2145			/*
2146			 * Invalidate child partition
2147			 */
2148			spin_lock_irq(&callback_lock);
2149			make_partition_invalid(child);
2150			cs->nr_subparts--;
2151			child->nr_subparts = 0;
2152			spin_unlock_irq(&callback_lock);
2153			notify_partition_change(child, old_prs);
2154			continue;
2155		}
2156		cpumask_andnot(new_ecpus, new_ecpus,
2157			       child->effective_xcpus);
2158	}
2159	rcu_read_unlock();
2160}
2161
2162/*
2163 * update_cpumasks_hier() flags
2164 */
2165#define HIER_CHECKALL		0x01	/* Check all cpusets with no skipping */
2166#define HIER_NO_SD_REBUILD	0x02	/* Don't rebuild sched domains */
2167
2168/*
2169 * update_cpumasks_hier - Update effective cpumasks and tasks in the subtree
2170 * @cs:  the cpuset to consider
2171 * @tmp: temp variables for calculating effective_cpus & partition setup
2172 * @force: don't skip any descendant cpusets if set
2173 *
2174 * When configured cpumask is changed, the effective cpumasks of this cpuset
2175 * and all its descendants need to be updated.
2176 *
2177 * On legacy hierarchy, effective_cpus will be the same with cpu_allowed.
2178 *
2179 * Called with cpuset_mutex held
2180 */
2181static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp,
2182				 int flags)
2183{
2184	struct cpuset *cp;
2185	struct cgroup_subsys_state *pos_css;
2186	bool need_rebuild_sched_domains = false;
2187	int old_prs, new_prs;
2188
2189	rcu_read_lock();
2190	cpuset_for_each_descendant_pre(cp, pos_css, cs) {
2191		struct cpuset *parent = parent_cs(cp);
2192		bool remote = is_remote_partition(cp);
2193		bool update_parent = false;
2194
2195		/*
2196		 * Skip descendent remote partition that acquires CPUs
2197		 * directly from top cpuset unless it is cs.
2198		 */
2199		if (remote && (cp != cs)) {
2200			pos_css = css_rightmost_descendant(pos_css);
2201			continue;
2202		}
2203
2204		/*
2205		 * Update effective_xcpus if exclusive_cpus set.
2206		 * The case when exclusive_cpus isn't set is handled later.
2207		 */
2208		if (!cpumask_empty(cp->exclusive_cpus) && (cp != cs)) {
2209			spin_lock_irq(&callback_lock);
2210			compute_effective_exclusive_cpumask(cp, NULL);
2211			spin_unlock_irq(&callback_lock);
2212		}
2213
2214		old_prs = new_prs = cp->partition_root_state;
2215		if (remote || (is_partition_valid(parent) &&
2216			       is_partition_valid(cp)))
2217			compute_partition_effective_cpumask(cp, tmp->new_cpus);
2218		else
2219			compute_effective_cpumask(tmp->new_cpus, cp, parent);
2220
2221		/*
2222		 * A partition with no effective_cpus is allowed as long as
2223		 * there is no task associated with it. Call
2224		 * update_parent_effective_cpumask() to check it.
2225		 */
2226		if (is_partition_valid(cp) && cpumask_empty(tmp->new_cpus)) {
2227			update_parent = true;
2228			goto update_parent_effective;
2229		}
2230
2231		/*
2232		 * If it becomes empty, inherit the effective mask of the
2233		 * parent, which is guaranteed to have some CPUs unless
2234		 * it is a partition root that has explicitly distributed
2235		 * out all its CPUs.
2236		 */
2237		if (is_in_v2_mode() && !remote && cpumask_empty(tmp->new_cpus)) {
 
 
 
 
2238			cpumask_copy(tmp->new_cpus, parent->effective_cpus);
2239			if (!cp->use_parent_ecpus) {
2240				cp->use_parent_ecpus = true;
2241				parent->child_ecpus_count++;
2242			}
2243		} else if (cp->use_parent_ecpus) {
2244			cp->use_parent_ecpus = false;
2245			WARN_ON_ONCE(!parent->child_ecpus_count);
2246			parent->child_ecpus_count--;
2247		}
2248
2249		if (remote)
2250			goto get_css;
2251
2252		/*
2253		 * Skip the whole subtree if
2254		 * 1) the cpumask remains the same,
2255		 * 2) has no partition root state,
2256		 * 3) HIER_CHECKALL flag not set, and
2257		 * 4) for v2 load balance state same as its parent.
2258		 */
2259		if (!cp->partition_root_state && !(flags & HIER_CHECKALL) &&
2260		    cpumask_equal(tmp->new_cpus, cp->effective_cpus) &&
2261		    (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) ||
2262		    (is_sched_load_balance(parent) == is_sched_load_balance(cp)))) {
2263			pos_css = css_rightmost_descendant(pos_css);
2264			continue;
2265		}
2266
2267update_parent_effective:
2268		/*
2269		 * update_parent_effective_cpumask() should have been called
2270		 * for cs already in update_cpumask(). We should also call
2271		 * update_tasks_cpumask() again for tasks in the parent
2272		 * cpuset if the parent's effective_cpus changes.
2273		 */
 
2274		if ((cp != cs) && old_prs) {
2275			switch (parent->partition_root_state) {
2276			case PRS_ROOT:
2277			case PRS_ISOLATED:
2278				update_parent = true;
2279				break;
2280
2281			default:
2282				/*
2283				 * When parent is not a partition root or is
2284				 * invalid, child partition roots become
2285				 * invalid too.
2286				 */
2287				if (is_partition_valid(cp))
2288					new_prs = -cp->partition_root_state;
2289				WRITE_ONCE(cp->prs_err,
2290					   is_partition_invalid(parent)
2291					   ? PERR_INVPARENT : PERR_NOTPART);
2292				break;
2293			}
2294		}
2295get_css:
2296		if (!css_tryget_online(&cp->css))
2297			continue;
2298		rcu_read_unlock();
2299
2300		if (update_parent) {
2301			update_parent_effective_cpumask(cp, partcmd_update, NULL, tmp);
 
2302			/*
2303			 * The cpuset partition_root_state may become
2304			 * invalid. Capture it.
2305			 */
2306			new_prs = cp->partition_root_state;
2307		}
2308
2309		spin_lock_irq(&callback_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
2310		cpumask_copy(cp->effective_cpus, tmp->new_cpus);
 
 
 
 
 
 
 
 
 
2311		cp->partition_root_state = new_prs;
2312		/*
2313		 * Make sure effective_xcpus is properly set for a valid
2314		 * partition root.
2315		 */
2316		if ((new_prs > 0) && cpumask_empty(cp->exclusive_cpus))
2317			cpumask_and(cp->effective_xcpus,
2318				    cp->cpus_allowed, parent->effective_xcpus);
2319		else if (new_prs < 0)
2320			reset_partition_data(cp);
2321		spin_unlock_irq(&callback_lock);
2322
2323		notify_partition_change(cp, old_prs);
2324
2325		WARN_ON(!is_in_v2_mode() &&
2326			!cpumask_equal(cp->cpus_allowed, cp->effective_cpus));
2327
2328		update_tasks_cpumask(cp, cp->effective_cpus);
2329
2330		/*
2331		 * On default hierarchy, inherit the CS_SCHED_LOAD_BALANCE
2332		 * from parent if current cpuset isn't a valid partition root
2333		 * and their load balance states differ.
2334		 */
2335		if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
2336		    !is_partition_valid(cp) &&
2337		    (is_sched_load_balance(parent) != is_sched_load_balance(cp))) {
2338			if (is_sched_load_balance(parent))
2339				set_bit(CS_SCHED_LOAD_BALANCE, &cp->flags);
2340			else
2341				clear_bit(CS_SCHED_LOAD_BALANCE, &cp->flags);
2342		}
2343
2344		/*
2345		 * On legacy hierarchy, if the effective cpumask of any non-
2346		 * empty cpuset is changed, we need to rebuild sched domains.
2347		 * On default hierarchy, the cpuset needs to be a partition
2348		 * root as well.
2349		 */
2350		if (!cpumask_empty(cp->cpus_allowed) &&
2351		    is_sched_load_balance(cp) &&
2352		   (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) ||
2353		    is_partition_valid(cp)))
2354			need_rebuild_sched_domains = true;
2355
2356		rcu_read_lock();
2357		css_put(&cp->css);
2358	}
2359	rcu_read_unlock();
2360
2361	if (need_rebuild_sched_domains && !(flags & HIER_NO_SD_REBUILD))
2362		rebuild_sched_domains_locked();
2363}
2364
2365/**
2366 * update_sibling_cpumasks - Update siblings cpumasks
2367 * @parent:  Parent cpuset
2368 * @cs:      Current cpuset
2369 * @tmp:     Temp variables
2370 */
2371static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs,
2372				    struct tmpmasks *tmp)
2373{
2374	struct cpuset *sibling;
2375	struct cgroup_subsys_state *pos_css;
2376
2377	lockdep_assert_held(&cpuset_mutex);
2378
2379	/*
2380	 * Check all its siblings and call update_cpumasks_hier()
2381	 * if their effective_cpus will need to be changed.
2382	 *
2383	 * With the addition of effective_xcpus which is a subset of
2384	 * cpus_allowed. It is possible a change in parent's effective_cpus
2385	 * due to a change in a child partition's effective_xcpus will impact
2386	 * its siblings even if they do not inherit parent's effective_cpus
2387	 * directly.
2388	 *
2389	 * The update_cpumasks_hier() function may sleep. So we have to
2390	 * release the RCU read lock before calling it. HIER_NO_SD_REBUILD
2391	 * flag is used to suppress rebuild of sched domains as the callers
2392	 * will take care of that.
2393	 */
2394	rcu_read_lock();
2395	cpuset_for_each_child(sibling, pos_css, parent) {
2396		if (sibling == cs)
2397			continue;
2398		if (!sibling->use_parent_ecpus &&
2399		    !is_partition_valid(sibling)) {
2400			compute_effective_cpumask(tmp->new_cpus, sibling,
2401						  parent);
2402			if (cpumask_equal(tmp->new_cpus, sibling->effective_cpus))
2403				continue;
2404		}
2405		if (!css_tryget_online(&sibling->css))
2406			continue;
2407
2408		rcu_read_unlock();
2409		update_cpumasks_hier(sibling, tmp, HIER_NO_SD_REBUILD);
2410		rcu_read_lock();
2411		css_put(&sibling->css);
2412	}
2413	rcu_read_unlock();
2414}
2415
2416/**
2417 * update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it
2418 * @cs: the cpuset to consider
2419 * @trialcs: trial cpuset
2420 * @buf: buffer of cpu numbers written to this cpuset
2421 */
2422static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
2423			  const char *buf)
2424{
2425	int retval;
2426	struct tmpmasks tmp;
2427	struct cpuset *parent = parent_cs(cs);
2428	bool invalidate = false;
2429	int hier_flags = 0;
2430	int old_prs = cs->partition_root_state;
2431
2432	/* top_cpuset.cpus_allowed tracks cpu_online_mask; it's read-only */
2433	if (cs == &top_cpuset)
2434		return -EACCES;
2435
2436	/*
2437	 * An empty cpus_allowed is ok only if the cpuset has no tasks.
2438	 * Since cpulist_parse() fails on an empty mask, we special case
2439	 * that parsing.  The validate_change() call ensures that cpusets
2440	 * with tasks have cpus.
2441	 */
2442	if (!*buf) {
2443		cpumask_clear(trialcs->cpus_allowed);
2444		cpumask_clear(trialcs->effective_xcpus);
2445	} else {
2446		retval = cpulist_parse(buf, trialcs->cpus_allowed);
2447		if (retval < 0)
2448			return retval;
2449
2450		if (!cpumask_subset(trialcs->cpus_allowed,
2451				    top_cpuset.cpus_allowed))
2452			return -EINVAL;
2453
2454		/*
2455		 * When exclusive_cpus isn't explicitly set, it is constrainted
2456		 * by cpus_allowed and parent's effective_xcpus. Otherwise,
2457		 * trialcs->effective_xcpus is used as a temporary cpumask
2458		 * for checking validity of the partition root.
2459		 */
2460		if (!cpumask_empty(trialcs->exclusive_cpus) || is_partition_valid(cs))
2461			compute_effective_exclusive_cpumask(trialcs, NULL);
2462	}
2463
2464	/* Nothing to do if the cpus didn't change */
2465	if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed))
2466		return 0;
2467
2468	if (alloc_cpumasks(NULL, &tmp))
2469		return -ENOMEM;
2470
2471	if (old_prs) {
2472		if (is_partition_valid(cs) &&
2473		    cpumask_empty(trialcs->effective_xcpus)) {
2474			invalidate = true;
2475			cs->prs_err = PERR_INVCPUS;
2476		} else if (prstate_housekeeping_conflict(old_prs, trialcs->effective_xcpus)) {
2477			invalidate = true;
2478			cs->prs_err = PERR_HKEEPING;
2479		} else if (tasks_nocpu_error(parent, cs, trialcs->effective_xcpus)) {
2480			invalidate = true;
2481			cs->prs_err = PERR_NOCPUS;
2482		}
2483	}
2484
2485	/*
2486	 * Check all the descendants in update_cpumasks_hier() if
2487	 * effective_xcpus is to be changed.
2488	 */
2489	if (!cpumask_equal(cs->effective_xcpus, trialcs->effective_xcpus))
2490		hier_flags = HIER_CHECKALL;
 
 
2491
2492	retval = validate_change(cs, trialcs);
2493
2494	if ((retval == -EINVAL) && cgroup_subsys_on_dfl(cpuset_cgrp_subsys)) {
 
2495		struct cgroup_subsys_state *css;
2496		struct cpuset *cp;
2497
2498		/*
2499		 * The -EINVAL error code indicates that partition sibling
2500		 * CPU exclusivity rule has been violated. We still allow
2501		 * the cpumask change to proceed while invalidating the
2502		 * partition. However, any conflicting sibling partitions
2503		 * have to be marked as invalid too.
2504		 */
2505		invalidate = true;
2506		rcu_read_lock();
2507		cpuset_for_each_child(cp, css, parent) {
2508			struct cpumask *xcpus = fetch_xcpus(trialcs);
2509
2510			if (is_partition_valid(cp) &&
2511			    cpumask_intersects(xcpus, cp->effective_xcpus)) {
2512				rcu_read_unlock();
2513				update_parent_effective_cpumask(cp, partcmd_invalidate, NULL, &tmp);
2514				rcu_read_lock();
2515			}
2516		}
2517		rcu_read_unlock();
2518		retval = 0;
2519	}
2520
2521	if (retval < 0)
2522		goto out_free;
2523
2524	if (is_partition_valid(cs) ||
2525	   (is_partition_invalid(cs) && !invalidate)) {
2526		struct cpumask *xcpus = trialcs->effective_xcpus;
2527
2528		if (cpumask_empty(xcpus) && is_partition_invalid(cs))
2529			xcpus = trialcs->cpus_allowed;
2530
2531		/*
2532		 * Call remote_cpus_update() to handle valid remote partition
2533		 */
2534		if (is_remote_partition(cs))
2535			remote_cpus_update(cs, xcpus, &tmp);
2536		else if (invalidate)
2537			update_parent_effective_cpumask(cs, partcmd_invalidate,
2538							NULL, &tmp);
2539		else
2540			update_parent_effective_cpumask(cs, partcmd_update,
2541							xcpus, &tmp);
2542	} else if (!cpumask_empty(cs->exclusive_cpus)) {
2543		/*
2544		 * Use trialcs->effective_cpus as a temp cpumask
2545		 */
2546		remote_partition_check(cs, trialcs->effective_xcpus,
2547				       trialcs->effective_cpus, &tmp);
2548	}
2549
 
 
2550	spin_lock_irq(&callback_lock);
2551	cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed);
2552	cpumask_copy(cs->effective_xcpus, trialcs->effective_xcpus);
2553	if ((old_prs > 0) && !is_partition_valid(cs))
2554		reset_partition_data(cs);
2555	spin_unlock_irq(&callback_lock);
2556
2557	/* effective_cpus/effective_xcpus will be updated here */
2558	update_cpumasks_hier(cs, &tmp, hier_flags);
2559
2560	/* Update CS_SCHED_LOAD_BALANCE and/or sched_domains, if necessary */
2561	if (cs->partition_root_state)
2562		update_partition_sd_lb(cs, old_prs);
2563out_free:
2564	free_cpumasks(NULL, &tmp);
2565	return retval;
2566}
2567
2568/**
2569 * update_exclusive_cpumask - update the exclusive_cpus mask of a cpuset
2570 * @cs: the cpuset to consider
2571 * @trialcs: trial cpuset
2572 * @buf: buffer of cpu numbers written to this cpuset
2573 *
2574 * The tasks' cpumask will be updated if cs is a valid partition root.
2575 */
2576static int update_exclusive_cpumask(struct cpuset *cs, struct cpuset *trialcs,
2577				    const char *buf)
2578{
2579	int retval;
2580	struct tmpmasks tmp;
2581	struct cpuset *parent = parent_cs(cs);
2582	bool invalidate = false;
2583	int hier_flags = 0;
2584	int old_prs = cs->partition_root_state;
2585
2586	if (!*buf) {
2587		cpumask_clear(trialcs->exclusive_cpus);
2588		cpumask_clear(trialcs->effective_xcpus);
2589	} else {
2590		retval = cpulist_parse(buf, trialcs->exclusive_cpus);
2591		if (retval < 0)
2592			return retval;
2593		if (!is_cpu_exclusive(cs))
2594			set_bit(CS_CPU_EXCLUSIVE, &trialcs->flags);
2595	}
 
2596
2597	/* Nothing to do if the CPUs didn't change */
2598	if (cpumask_equal(cs->exclusive_cpus, trialcs->exclusive_cpus))
2599		return 0;
2600
2601	if (*buf)
2602		compute_effective_exclusive_cpumask(trialcs, NULL);
2603
2604	/*
2605	 * Check all the descendants in update_cpumasks_hier() if
2606	 * effective_xcpus is to be changed.
2607	 */
2608	if (!cpumask_equal(cs->effective_xcpus, trialcs->effective_xcpus))
2609		hier_flags = HIER_CHECKALL;
2610
2611	retval = validate_change(cs, trialcs);
2612	if (retval)
2613		return retval;
2614
2615	if (alloc_cpumasks(NULL, &tmp))
2616		return -ENOMEM;
2617
2618	if (old_prs) {
2619		if (cpumask_empty(trialcs->effective_xcpus)) {
2620			invalidate = true;
2621			cs->prs_err = PERR_INVCPUS;
2622		} else if (prstate_housekeeping_conflict(old_prs, trialcs->effective_xcpus)) {
2623			invalidate = true;
2624			cs->prs_err = PERR_HKEEPING;
2625		} else if (tasks_nocpu_error(parent, cs, trialcs->effective_xcpus)) {
2626			invalidate = true;
2627			cs->prs_err = PERR_NOCPUS;
2628		}
2629
2630		if (is_remote_partition(cs)) {
2631			if (invalidate)
2632				remote_partition_disable(cs, &tmp);
2633			else
2634				remote_cpus_update(cs, trialcs->effective_xcpus,
2635						   &tmp);
2636		} else if (invalidate) {
2637			update_parent_effective_cpumask(cs, partcmd_invalidate,
2638							NULL, &tmp);
2639		} else {
2640			update_parent_effective_cpumask(cs, partcmd_update,
2641						trialcs->effective_xcpus, &tmp);
2642		}
2643	} else if (!cpumask_empty(trialcs->exclusive_cpus)) {
2644		/*
2645		 * Use trialcs->effective_cpus as a temp cpumask
 
2646		 */
2647		remote_partition_check(cs, trialcs->effective_xcpus,
2648				       trialcs->effective_cpus, &tmp);
2649	}
2650	spin_lock_irq(&callback_lock);
2651	cpumask_copy(cs->exclusive_cpus, trialcs->exclusive_cpus);
2652	cpumask_copy(cs->effective_xcpus, trialcs->effective_xcpus);
2653	if ((old_prs > 0) && !is_partition_valid(cs))
2654		reset_partition_data(cs);
2655	spin_unlock_irq(&callback_lock);
2656
2657	/*
2658	 * Call update_cpumasks_hier() to update effective_cpus/effective_xcpus
2659	 * of the subtree when it is a valid partition root or effective_xcpus
2660	 * is updated.
2661	 */
2662	if (is_partition_valid(cs) || hier_flags)
2663		update_cpumasks_hier(cs, &tmp, hier_flags);
2664
2665	/* Update CS_SCHED_LOAD_BALANCE and/or sched_domains, if necessary */
2666	if (cs->partition_root_state)
2667		update_partition_sd_lb(cs, old_prs);
2668
2669	free_cpumasks(NULL, &tmp);
2670	return 0;
2671}
2672
2673/*
2674 * Migrate memory region from one set of nodes to another.  This is
2675 * performed asynchronously as it can be called from process migration path
2676 * holding locks involved in process management.  All mm migrations are
2677 * performed in the queued order and can be waited for by flushing
2678 * cpuset_migrate_mm_wq.
2679 */
2680
2681struct cpuset_migrate_mm_work {
2682	struct work_struct	work;
2683	struct mm_struct	*mm;
2684	nodemask_t		from;
2685	nodemask_t		to;
2686};
2687
2688static void cpuset_migrate_mm_workfn(struct work_struct *work)
2689{
2690	struct cpuset_migrate_mm_work *mwork =
2691		container_of(work, struct cpuset_migrate_mm_work, work);
2692
2693	/* on a wq worker, no need to worry about %current's mems_allowed */
2694	do_migrate_pages(mwork->mm, &mwork->from, &mwork->to, MPOL_MF_MOVE_ALL);
2695	mmput(mwork->mm);
2696	kfree(mwork);
2697}
2698
2699static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
2700							const nodemask_t *to)
2701{
2702	struct cpuset_migrate_mm_work *mwork;
2703
2704	if (nodes_equal(*from, *to)) {
2705		mmput(mm);
2706		return;
2707	}
2708
2709	mwork = kzalloc(sizeof(*mwork), GFP_KERNEL);
2710	if (mwork) {
2711		mwork->mm = mm;
2712		mwork->from = *from;
2713		mwork->to = *to;
2714		INIT_WORK(&mwork->work, cpuset_migrate_mm_workfn);
2715		queue_work(cpuset_migrate_mm_wq, &mwork->work);
2716	} else {
2717		mmput(mm);
2718	}
2719}
2720
2721static void cpuset_post_attach(void)
2722{
2723	flush_workqueue(cpuset_migrate_mm_wq);
2724}
2725
2726/*
2727 * cpuset_change_task_nodemask - change task's mems_allowed and mempolicy
2728 * @tsk: the task to change
2729 * @newmems: new nodes that the task will be set
2730 *
2731 * We use the mems_allowed_seq seqlock to safely update both tsk->mems_allowed
2732 * and rebind an eventual tasks' mempolicy. If the task is allocating in
2733 * parallel, it might temporarily see an empty intersection, which results in
2734 * a seqlock check and retry before OOM or allocation failure.
2735 */
2736static void cpuset_change_task_nodemask(struct task_struct *tsk,
2737					nodemask_t *newmems)
2738{
2739	task_lock(tsk);
2740
2741	local_irq_disable();
2742	write_seqcount_begin(&tsk->mems_allowed_seq);
2743
2744	nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems);
2745	mpol_rebind_task(tsk, newmems);
2746	tsk->mems_allowed = *newmems;
2747
2748	write_seqcount_end(&tsk->mems_allowed_seq);
2749	local_irq_enable();
2750
2751	task_unlock(tsk);
2752}
2753
2754static void *cpuset_being_rebound;
2755
2756/**
2757 * update_tasks_nodemask - Update the nodemasks of tasks in the cpuset.
2758 * @cs: the cpuset in which each task's mems_allowed mask needs to be changed
2759 *
2760 * Iterate through each task of @cs updating its mems_allowed to the
2761 * effective cpuset's.  As this function is called with cpuset_mutex held,
2762 * cpuset membership stays stable.
2763 */
2764static void update_tasks_nodemask(struct cpuset *cs)
2765{
2766	static nodemask_t newmems;	/* protected by cpuset_mutex */
2767	struct css_task_iter it;
2768	struct task_struct *task;
2769
2770	cpuset_being_rebound = cs;		/* causes mpol_dup() rebind */
2771
2772	guarantee_online_mems(cs, &newmems);
2773
2774	/*
2775	 * The mpol_rebind_mm() call takes mmap_lock, which we couldn't
2776	 * take while holding tasklist_lock.  Forks can happen - the
2777	 * mpol_dup() cpuset_being_rebound check will catch such forks,
2778	 * and rebind their vma mempolicies too.  Because we still hold
2779	 * the global cpuset_mutex, we know that no other rebind effort
2780	 * will be contending for the global variable cpuset_being_rebound.
2781	 * It's ok if we rebind the same mm twice; mpol_rebind_mm()
2782	 * is idempotent.  Also migrate pages in each mm to new nodes.
2783	 */
2784	css_task_iter_start(&cs->css, 0, &it);
2785	while ((task = css_task_iter_next(&it))) {
2786		struct mm_struct *mm;
2787		bool migrate;
2788
2789		cpuset_change_task_nodemask(task, &newmems);
2790
2791		mm = get_task_mm(task);
2792		if (!mm)
2793			continue;
2794
2795		migrate = is_memory_migrate(cs);
2796
2797		mpol_rebind_mm(mm, &cs->mems_allowed);
2798		if (migrate)
2799			cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems);
2800		else
2801			mmput(mm);
2802	}
2803	css_task_iter_end(&it);
2804
2805	/*
2806	 * All the tasks' nodemasks have been updated, update
2807	 * cs->old_mems_allowed.
2808	 */
2809	cs->old_mems_allowed = newmems;
2810
2811	/* We're done rebinding vmas to this cpuset's new mems_allowed. */
2812	cpuset_being_rebound = NULL;
2813}
2814
2815/*
2816 * update_nodemasks_hier - Update effective nodemasks and tasks in the subtree
2817 * @cs: the cpuset to consider
2818 * @new_mems: a temp variable for calculating new effective_mems
2819 *
2820 * When configured nodemask is changed, the effective nodemasks of this cpuset
2821 * and all its descendants need to be updated.
2822 *
2823 * On legacy hierarchy, effective_mems will be the same with mems_allowed.
2824 *
2825 * Called with cpuset_mutex held
2826 */
2827static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
2828{
2829	struct cpuset *cp;
2830	struct cgroup_subsys_state *pos_css;
2831
2832	rcu_read_lock();
2833	cpuset_for_each_descendant_pre(cp, pos_css, cs) {
2834		struct cpuset *parent = parent_cs(cp);
2835
2836		nodes_and(*new_mems, cp->mems_allowed, parent->effective_mems);
2837
2838		/*
2839		 * If it becomes empty, inherit the effective mask of the
2840		 * parent, which is guaranteed to have some MEMs.
2841		 */
2842		if (is_in_v2_mode() && nodes_empty(*new_mems))
2843			*new_mems = parent->effective_mems;
2844
2845		/* Skip the whole subtree if the nodemask remains the same. */
2846		if (nodes_equal(*new_mems, cp->effective_mems)) {
2847			pos_css = css_rightmost_descendant(pos_css);
2848			continue;
2849		}
2850
2851		if (!css_tryget_online(&cp->css))
2852			continue;
2853		rcu_read_unlock();
2854
2855		spin_lock_irq(&callback_lock);
2856		cp->effective_mems = *new_mems;
2857		spin_unlock_irq(&callback_lock);
2858
2859		WARN_ON(!is_in_v2_mode() &&
2860			!nodes_equal(cp->mems_allowed, cp->effective_mems));
2861
2862		update_tasks_nodemask(cp);
2863
2864		rcu_read_lock();
2865		css_put(&cp->css);
2866	}
2867	rcu_read_unlock();
2868}
2869
2870/*
2871 * Handle user request to change the 'mems' memory placement
2872 * of a cpuset.  Needs to validate the request, update the
2873 * cpusets mems_allowed, and for each task in the cpuset,
2874 * update mems_allowed and rebind task's mempolicy and any vma
2875 * mempolicies and if the cpuset is marked 'memory_migrate',
2876 * migrate the tasks pages to the new memory.
2877 *
2878 * Call with cpuset_mutex held. May take callback_lock during call.
2879 * Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
2880 * lock each such tasks mm->mmap_lock, scan its vma's and rebind
2881 * their mempolicies to the cpusets new mems_allowed.
2882 */
2883static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
2884			   const char *buf)
2885{
2886	int retval;
2887
2888	/*
2889	 * top_cpuset.mems_allowed tracks node_stats[N_MEMORY];
2890	 * it's read-only
2891	 */
2892	if (cs == &top_cpuset) {
2893		retval = -EACCES;
2894		goto done;
2895	}
2896
2897	/*
2898	 * An empty mems_allowed is ok iff there are no tasks in the cpuset.
2899	 * Since nodelist_parse() fails on an empty mask, we special case
2900	 * that parsing.  The validate_change() call ensures that cpusets
2901	 * with tasks have memory.
2902	 */
2903	if (!*buf) {
2904		nodes_clear(trialcs->mems_allowed);
2905	} else {
2906		retval = nodelist_parse(buf, trialcs->mems_allowed);
2907		if (retval < 0)
2908			goto done;
2909
2910		if (!nodes_subset(trialcs->mems_allowed,
2911				  top_cpuset.mems_allowed)) {
2912			retval = -EINVAL;
2913			goto done;
2914		}
2915	}
2916
2917	if (nodes_equal(cs->mems_allowed, trialcs->mems_allowed)) {
2918		retval = 0;		/* Too easy - nothing to do */
2919		goto done;
2920	}
2921	retval = validate_change(cs, trialcs);
2922	if (retval < 0)
2923		goto done;
2924
2925	check_insane_mems_config(&trialcs->mems_allowed);
2926
2927	spin_lock_irq(&callback_lock);
2928	cs->mems_allowed = trialcs->mems_allowed;
2929	spin_unlock_irq(&callback_lock);
2930
2931	/* use trialcs->mems_allowed as a temp variable */
2932	update_nodemasks_hier(cs, &trialcs->mems_allowed);
2933done:
2934	return retval;
2935}
2936
2937bool current_cpuset_is_being_rebound(void)
2938{
2939	bool ret;
2940
2941	rcu_read_lock();
2942	ret = task_cs(current) == cpuset_being_rebound;
2943	rcu_read_unlock();
2944
2945	return ret;
2946}
2947
2948static int update_relax_domain_level(struct cpuset *cs, s64 val)
2949{
2950#ifdef CONFIG_SMP
2951	if (val < -1 || val >= sched_domain_level_max)
2952		return -EINVAL;
2953#endif
2954
2955	if (val != cs->relax_domain_level) {
2956		cs->relax_domain_level = val;
2957		if (!cpumask_empty(cs->cpus_allowed) &&
2958		    is_sched_load_balance(cs))
2959			rebuild_sched_domains_locked();
2960	}
2961
2962	return 0;
2963}
2964
2965/**
2966 * update_tasks_flags - update the spread flags of tasks in the cpuset.
2967 * @cs: the cpuset in which each task's spread flags needs to be changed
2968 *
2969 * Iterate through each task of @cs updating its spread flags.  As this
2970 * function is called with cpuset_mutex held, cpuset membership stays
2971 * stable.
2972 */
2973static void update_tasks_flags(struct cpuset *cs)
2974{
2975	struct css_task_iter it;
2976	struct task_struct *task;
2977
2978	css_task_iter_start(&cs->css, 0, &it);
2979	while ((task = css_task_iter_next(&it)))
2980		cpuset_update_task_spread_flags(cs, task);
2981	css_task_iter_end(&it);
2982}
2983
2984/*
2985 * update_flag - read a 0 or a 1 in a file and update associated flag
2986 * bit:		the bit to update (see cpuset_flagbits_t)
2987 * cs:		the cpuset to update
2988 * turning_on: 	whether the flag is being set or cleared
2989 *
2990 * Call with cpuset_mutex held.
2991 */
2992
2993static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
2994		       int turning_on)
2995{
2996	struct cpuset *trialcs;
2997	int balance_flag_changed;
2998	int spread_flag_changed;
2999	int err;
3000
3001	trialcs = alloc_trial_cpuset(cs);
3002	if (!trialcs)
3003		return -ENOMEM;
3004
3005	if (turning_on)
3006		set_bit(bit, &trialcs->flags);
3007	else
3008		clear_bit(bit, &trialcs->flags);
3009
3010	err = validate_change(cs, trialcs);
3011	if (err < 0)
3012		goto out;
3013
3014	balance_flag_changed = (is_sched_load_balance(cs) !=
3015				is_sched_load_balance(trialcs));
3016
3017	spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs))
3018			|| (is_spread_page(cs) != is_spread_page(trialcs)));
3019
3020	spin_lock_irq(&callback_lock);
3021	cs->flags = trialcs->flags;
3022	spin_unlock_irq(&callback_lock);
3023
3024	if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed)
3025		rebuild_sched_domains_locked();
3026
3027	if (spread_flag_changed)
3028		update_tasks_flags(cs);
3029out:
3030	free_cpuset(trialcs);
3031	return err;
3032}
3033
3034/**
3035 * update_prstate - update partition_root_state
3036 * @cs: the cpuset to update
3037 * @new_prs: new partition root state
3038 * Return: 0 if successful, != 0 if error
3039 *
3040 * Call with cpuset_mutex held.
3041 */
3042static int update_prstate(struct cpuset *cs, int new_prs)
3043{
3044	int err = PERR_NONE, old_prs = cs->partition_root_state;
 
3045	struct cpuset *parent = parent_cs(cs);
3046	struct tmpmasks tmpmask;
3047	bool new_xcpus_state = false;
3048
3049	if (old_prs == new_prs)
3050		return 0;
3051
3052	/*
3053	 * Treat a previously invalid partition root as if it is a "member".
 
3054	 */
3055	if (new_prs && is_prs_invalid(old_prs))
3056		old_prs = PRS_MEMBER;
 
 
3057
3058	if (alloc_cpumasks(NULL, &tmpmask))
3059		return -ENOMEM;
3060
3061	/*
3062	 * Setup effective_xcpus if not properly set yet, it will be cleared
3063	 * later if partition becomes invalid.
3064	 */
3065	if ((new_prs > 0) && cpumask_empty(cs->exclusive_cpus)) {
3066		spin_lock_irq(&callback_lock);
3067		cpumask_and(cs->effective_xcpus,
3068			    cs->cpus_allowed, parent->effective_xcpus);
3069		spin_unlock_irq(&callback_lock);
3070	}
3071
3072	err = update_partition_exclusive(cs, new_prs);
3073	if (err)
3074		goto out;
3075
3076	if (!old_prs) {
3077		enum partition_cmd cmd = (new_prs == PRS_ROOT)
3078				       ? partcmd_enable : partcmd_enablei;
3079
3080		/*
3081		 * cpus_allowed cannot be empty.
 
 
3082		 */
3083		if (cpumask_empty(cs->cpus_allowed)) {
3084			err = PERR_CPUSEMPTY;
3085			goto out;
3086		}
3087
3088		err = update_parent_effective_cpumask(cs, cmd, NULL, &tmpmask);
3089		/*
3090		 * If an attempt to become local partition root fails,
3091		 * try to become a remote partition root instead.
3092		 */
3093		if (err && remote_partition_enable(cs, new_prs, &tmpmask))
3094			err = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3095	} else if (old_prs && new_prs) {
3096		/*
3097		 * A change in load balance state only, no change in cpumasks.
3098		 */
3099		new_xcpus_state = true;
 
 
3100	} else {
3101		/*
3102		 * Switching back to member is always allowed even if it
3103		 * disables child partitions.
3104		 */
3105		if (is_remote_partition(cs))
3106			remote_partition_disable(cs, &tmpmask);
3107		else
3108			update_parent_effective_cpumask(cs, partcmd_disable,
3109							NULL, &tmpmask);
3110
3111		/*
3112		 * Invalidation of child partitions will be done in
3113		 * update_cpumasks_hier().
3114		 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3115	}
 
 
 
 
 
 
 
 
3116out:
3117	/*
3118	 * Make partition invalid & disable CS_CPU_EXCLUSIVE if an error
3119	 * happens.
3120	 */
3121	if (err) {
3122		new_prs = -new_prs;
3123		update_partition_exclusive(cs, new_prs);
3124	}
3125
3126	spin_lock_irq(&callback_lock);
3127	cs->partition_root_state = new_prs;
3128	WRITE_ONCE(cs->prs_err, err);
3129	if (!is_partition_valid(cs))
3130		reset_partition_data(cs);
3131	else if (new_xcpus_state)
3132		partition_xcpus_newstate(old_prs, new_prs, cs->effective_xcpus);
3133	spin_unlock_irq(&callback_lock);
3134	update_unbound_workqueue_cpumask(new_xcpus_state);
3135
3136	/* Force update if switching back to member */
3137	update_cpumasks_hier(cs, &tmpmask, !new_prs ? HIER_CHECKALL : 0);
3138
3139	/* Update sched domains and load balance flag */
3140	update_partition_sd_lb(cs, old_prs);
3141
3142	notify_partition_change(cs, old_prs);
3143	free_cpumasks(NULL, &tmpmask);
3144	return 0;
3145}
3146
3147/*
3148 * Frequency meter - How fast is some event occurring?
3149 *
3150 * These routines manage a digitally filtered, constant time based,
3151 * event frequency meter.  There are four routines:
3152 *   fmeter_init() - initialize a frequency meter.
3153 *   fmeter_markevent() - called each time the event happens.
3154 *   fmeter_getrate() - returns the recent rate of such events.
3155 *   fmeter_update() - internal routine used to update fmeter.
3156 *
3157 * A common data structure is passed to each of these routines,
3158 * which is used to keep track of the state required to manage the
3159 * frequency meter and its digital filter.
3160 *
3161 * The filter works on the number of events marked per unit time.
3162 * The filter is single-pole low-pass recursive (IIR).  The time unit
3163 * is 1 second.  Arithmetic is done using 32-bit integers scaled to
3164 * simulate 3 decimal digits of precision (multiplied by 1000).
3165 *
3166 * With an FM_COEF of 933, and a time base of 1 second, the filter
3167 * has a half-life of 10 seconds, meaning that if the events quit
3168 * happening, then the rate returned from the fmeter_getrate()
3169 * will be cut in half each 10 seconds, until it converges to zero.
3170 *
3171 * It is not worth doing a real infinitely recursive filter.  If more
3172 * than FM_MAXTICKS ticks have elapsed since the last filter event,
3173 * just compute FM_MAXTICKS ticks worth, by which point the level
3174 * will be stable.
3175 *
3176 * Limit the count of unprocessed events to FM_MAXCNT, so as to avoid
3177 * arithmetic overflow in the fmeter_update() routine.
3178 *
3179 * Given the simple 32 bit integer arithmetic used, this meter works
3180 * best for reporting rates between one per millisecond (msec) and
3181 * one per 32 (approx) seconds.  At constant rates faster than one
3182 * per msec it maxes out at values just under 1,000,000.  At constant
3183 * rates between one per msec, and one per second it will stabilize
3184 * to a value N*1000, where N is the rate of events per second.
3185 * At constant rates between one per second and one per 32 seconds,
3186 * it will be choppy, moving up on the seconds that have an event,
3187 * and then decaying until the next event.  At rates slower than
3188 * about one in 32 seconds, it decays all the way back to zero between
3189 * each event.
3190 */
3191
3192#define FM_COEF 933		/* coefficient for half-life of 10 secs */
3193#define FM_MAXTICKS ((u32)99)   /* useless computing more ticks than this */
3194#define FM_MAXCNT 1000000	/* limit cnt to avoid overflow */
3195#define FM_SCALE 1000		/* faux fixed point scale */
3196
3197/* Initialize a frequency meter */
3198static void fmeter_init(struct fmeter *fmp)
3199{
3200	fmp->cnt = 0;
3201	fmp->val = 0;
3202	fmp->time = 0;
3203	spin_lock_init(&fmp->lock);
3204}
3205
3206/* Internal meter update - process cnt events and update value */
3207static void fmeter_update(struct fmeter *fmp)
3208{
3209	time64_t now;
3210	u32 ticks;
3211
3212	now = ktime_get_seconds();
3213	ticks = now - fmp->time;
3214
3215	if (ticks == 0)
3216		return;
3217
3218	ticks = min(FM_MAXTICKS, ticks);
3219	while (ticks-- > 0)
3220		fmp->val = (FM_COEF * fmp->val) / FM_SCALE;
3221	fmp->time = now;
3222
3223	fmp->val += ((FM_SCALE - FM_COEF) * fmp->cnt) / FM_SCALE;
3224	fmp->cnt = 0;
3225}
3226
3227/* Process any previous ticks, then bump cnt by one (times scale). */
3228static void fmeter_markevent(struct fmeter *fmp)
3229{
3230	spin_lock(&fmp->lock);
3231	fmeter_update(fmp);
3232	fmp->cnt = min(FM_MAXCNT, fmp->cnt + FM_SCALE);
3233	spin_unlock(&fmp->lock);
3234}
3235
3236/* Process any previous ticks, then return current value. */
3237static int fmeter_getrate(struct fmeter *fmp)
3238{
3239	int val;
3240
3241	spin_lock(&fmp->lock);
3242	fmeter_update(fmp);
3243	val = fmp->val;
3244	spin_unlock(&fmp->lock);
3245	return val;
3246}
3247
3248static struct cpuset *cpuset_attach_old_cs;
3249
3250/*
3251 * Check to see if a cpuset can accept a new task
3252 * For v1, cpus_allowed and mems_allowed can't be empty.
3253 * For v2, effective_cpus can't be empty.
3254 * Note that in v1, effective_cpus = cpus_allowed.
3255 */
3256static int cpuset_can_attach_check(struct cpuset *cs)
3257{
3258	if (cpumask_empty(cs->effective_cpus) ||
3259	   (!is_in_v2_mode() && nodes_empty(cs->mems_allowed)))
3260		return -ENOSPC;
3261	return 0;
3262}
3263
3264static void reset_migrate_dl_data(struct cpuset *cs)
3265{
3266	cs->nr_migrate_dl_tasks = 0;
3267	cs->sum_migrate_dl_bw = 0;
3268}
3269
3270/* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */
3271static int cpuset_can_attach(struct cgroup_taskset *tset)
3272{
3273	struct cgroup_subsys_state *css;
3274	struct cpuset *cs, *oldcs;
3275	struct task_struct *task;
3276	bool cpus_updated, mems_updated;
3277	int ret;
3278
3279	/* used later by cpuset_attach() */
3280	cpuset_attach_old_cs = task_cs(cgroup_taskset_first(tset, &css));
3281	oldcs = cpuset_attach_old_cs;
3282	cs = css_cs(css);
3283
3284	mutex_lock(&cpuset_mutex);
3285
3286	/* Check to see if task is allowed in the cpuset */
3287	ret = cpuset_can_attach_check(cs);
3288	if (ret)
 
3289		goto out_unlock;
3290
3291	cpus_updated = !cpumask_equal(cs->effective_cpus, oldcs->effective_cpus);
3292	mems_updated = !nodes_equal(cs->effective_mems, oldcs->effective_mems);
 
 
 
3293
3294	cgroup_taskset_for_each(task, css, tset) {
3295		ret = task_can_attach(task);
3296		if (ret)
3297			goto out_unlock;
3298
3299		/*
3300		 * Skip rights over task check in v2 when nothing changes,
3301		 * migration permission derives from hierarchy ownership in
3302		 * cgroup_procs_write_permission()).
3303		 */
3304		if (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) ||
3305		    (cpus_updated || mems_updated)) {
3306			ret = security_task_setscheduler(task);
3307			if (ret)
3308				goto out_unlock;
3309		}
3310
3311		if (dl_task(task)) {
3312			cs->nr_migrate_dl_tasks++;
3313			cs->sum_migrate_dl_bw += task->dl.dl_bw;
3314		}
3315	}
3316
3317	if (!cs->nr_migrate_dl_tasks)
3318		goto out_success;
3319
3320	if (!cpumask_intersects(oldcs->effective_cpus, cs->effective_cpus)) {
3321		int cpu = cpumask_any_and(cpu_active_mask, cs->effective_cpus);
3322
3323		if (unlikely(cpu >= nr_cpu_ids)) {
3324			reset_migrate_dl_data(cs);
3325			ret = -EINVAL;
3326			goto out_unlock;
3327		}
3328
3329		ret = dl_bw_alloc(cpu, cs->sum_migrate_dl_bw);
3330		if (ret) {
3331			reset_migrate_dl_data(cs);
3332			goto out_unlock;
3333		}
3334	}
3335
3336out_success:
3337	/*
3338	 * Mark attach is in progress.  This makes validate_change() fail
3339	 * changes which zero cpus/mems_allowed.
3340	 */
3341	cs->attach_in_progress++;
 
3342out_unlock:
3343	mutex_unlock(&cpuset_mutex);
3344	return ret;
3345}
3346
3347static void cpuset_cancel_attach(struct cgroup_taskset *tset)
3348{
3349	struct cgroup_subsys_state *css;
3350	struct cpuset *cs;
3351
3352	cgroup_taskset_first(tset, &css);
3353	cs = css_cs(css);
3354
3355	mutex_lock(&cpuset_mutex);
3356	cs->attach_in_progress--;
3357	if (!cs->attach_in_progress)
3358		wake_up(&cpuset_attach_wq);
3359
3360	if (cs->nr_migrate_dl_tasks) {
3361		int cpu = cpumask_any(cs->effective_cpus);
3362
3363		dl_bw_free(cpu, cs->sum_migrate_dl_bw);
3364		reset_migrate_dl_data(cs);
3365	}
3366
3367	mutex_unlock(&cpuset_mutex);
3368}
3369
3370/*
3371 * Protected by cpuset_mutex. cpus_attach is used only by cpuset_attach_task()
3372 * but we can't allocate it dynamically there.  Define it global and
3373 * allocate from cpuset_init().
3374 */
3375static cpumask_var_t cpus_attach;
3376static nodemask_t cpuset_attach_nodemask_to;
3377
3378static void cpuset_attach_task(struct cpuset *cs, struct task_struct *task)
3379{
3380	lockdep_assert_held(&cpuset_mutex);
3381
3382	if (cs != &top_cpuset)
3383		guarantee_online_cpus(task, cpus_attach);
3384	else
3385		cpumask_andnot(cpus_attach, task_cpu_possible_mask(task),
3386			       subpartitions_cpus);
3387	/*
3388	 * can_attach beforehand should guarantee that this doesn't
3389	 * fail.  TODO: have a better way to handle failure here
3390	 */
3391	WARN_ON_ONCE(set_cpus_allowed_ptr(task, cpus_attach));
3392
3393	cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to);
3394	cpuset_update_task_spread_flags(cs, task);
3395}
3396
3397static void cpuset_attach(struct cgroup_taskset *tset)
3398{
 
 
3399	struct task_struct *task;
3400	struct task_struct *leader;
3401	struct cgroup_subsys_state *css;
3402	struct cpuset *cs;
3403	struct cpuset *oldcs = cpuset_attach_old_cs;
3404	bool cpus_updated, mems_updated;
3405
3406	cgroup_taskset_first(tset, &css);
3407	cs = css_cs(css);
3408
3409	lockdep_assert_cpus_held();	/* see cgroup_attach_lock() */
3410	mutex_lock(&cpuset_mutex);
3411	cpus_updated = !cpumask_equal(cs->effective_cpus,
3412				      oldcs->effective_cpus);
3413	mems_updated = !nodes_equal(cs->effective_mems, oldcs->effective_mems);
3414
3415	/*
3416	 * In the default hierarchy, enabling cpuset in the child cgroups
3417	 * will trigger a number of cpuset_attach() calls with no change
3418	 * in effective cpus and mems. In that case, we can optimize out
3419	 * by skipping the task iteration and update.
3420	 */
3421	if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
3422	    !cpus_updated && !mems_updated) {
3423		cpuset_attach_nodemask_to = cs->effective_mems;
3424		goto out;
3425	}
3426
3427	guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
3428
3429	cgroup_taskset_for_each(task, css, tset)
3430		cpuset_attach_task(cs, task);
 
 
 
 
 
 
 
 
 
 
 
 
3431
3432	/*
3433	 * Change mm for all threadgroup leaders. This is expensive and may
3434	 * sleep and should be moved outside migration path proper. Skip it
3435	 * if there is no change in effective_mems and CS_MEMORY_MIGRATE is
3436	 * not set.
3437	 */
3438	cpuset_attach_nodemask_to = cs->effective_mems;
3439	if (!is_memory_migrate(cs) && !mems_updated)
3440		goto out;
3441
3442	cgroup_taskset_for_each_leader(leader, css, tset) {
3443		struct mm_struct *mm = get_task_mm(leader);
3444
3445		if (mm) {
3446			mpol_rebind_mm(mm, &cpuset_attach_nodemask_to);
3447
3448			/*
3449			 * old_mems_allowed is the same with mems_allowed
3450			 * here, except if this task is being moved
3451			 * automatically due to hotplug.  In that case
3452			 * @mems_allowed has been updated and is empty, so
3453			 * @old_mems_allowed is the right nodesets that we
3454			 * migrate mm from.
3455			 */
3456			if (is_memory_migrate(cs))
3457				cpuset_migrate_mm(mm, &oldcs->old_mems_allowed,
3458						  &cpuset_attach_nodemask_to);
3459			else
3460				mmput(mm);
3461		}
3462	}
3463
3464out:
3465	cs->old_mems_allowed = cpuset_attach_nodemask_to;
3466
3467	if (cs->nr_migrate_dl_tasks) {
3468		cs->nr_deadline_tasks += cs->nr_migrate_dl_tasks;
3469		oldcs->nr_deadline_tasks -= cs->nr_migrate_dl_tasks;
3470		reset_migrate_dl_data(cs);
3471	}
3472
3473	cs->attach_in_progress--;
3474	if (!cs->attach_in_progress)
3475		wake_up(&cpuset_attach_wq);
3476
3477	mutex_unlock(&cpuset_mutex);
3478}
3479
3480/* The various types of files and directories in a cpuset file system */
3481
3482typedef enum {
3483	FILE_MEMORY_MIGRATE,
3484	FILE_CPULIST,
3485	FILE_MEMLIST,
3486	FILE_EFFECTIVE_CPULIST,
3487	FILE_EFFECTIVE_MEMLIST,
3488	FILE_SUBPARTS_CPULIST,
3489	FILE_EXCLUSIVE_CPULIST,
3490	FILE_EFFECTIVE_XCPULIST,
3491	FILE_ISOLATED_CPULIST,
3492	FILE_CPU_EXCLUSIVE,
3493	FILE_MEM_EXCLUSIVE,
3494	FILE_MEM_HARDWALL,
3495	FILE_SCHED_LOAD_BALANCE,
3496	FILE_PARTITION_ROOT,
3497	FILE_SCHED_RELAX_DOMAIN_LEVEL,
3498	FILE_MEMORY_PRESSURE_ENABLED,
3499	FILE_MEMORY_PRESSURE,
3500	FILE_SPREAD_PAGE,
3501	FILE_SPREAD_SLAB,
3502} cpuset_filetype_t;
3503
3504static int cpuset_write_u64(struct cgroup_subsys_state *css, struct cftype *cft,
3505			    u64 val)
3506{
3507	struct cpuset *cs = css_cs(css);
3508	cpuset_filetype_t type = cft->private;
3509	int retval = 0;
3510
3511	cpus_read_lock();
3512	mutex_lock(&cpuset_mutex);
3513	if (!is_cpuset_online(cs)) {
3514		retval = -ENODEV;
3515		goto out_unlock;
3516	}
3517
3518	switch (type) {
3519	case FILE_CPU_EXCLUSIVE:
3520		retval = update_flag(CS_CPU_EXCLUSIVE, cs, val);
3521		break;
3522	case FILE_MEM_EXCLUSIVE:
3523		retval = update_flag(CS_MEM_EXCLUSIVE, cs, val);
3524		break;
3525	case FILE_MEM_HARDWALL:
3526		retval = update_flag(CS_MEM_HARDWALL, cs, val);
3527		break;
3528	case FILE_SCHED_LOAD_BALANCE:
3529		retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, val);
3530		break;
3531	case FILE_MEMORY_MIGRATE:
3532		retval = update_flag(CS_MEMORY_MIGRATE, cs, val);
3533		break;
3534	case FILE_MEMORY_PRESSURE_ENABLED:
3535		cpuset_memory_pressure_enabled = !!val;
3536		break;
3537	case FILE_SPREAD_PAGE:
3538		retval = update_flag(CS_SPREAD_PAGE, cs, val);
3539		break;
3540	case FILE_SPREAD_SLAB:
3541		retval = update_flag(CS_SPREAD_SLAB, cs, val);
3542		break;
3543	default:
3544		retval = -EINVAL;
3545		break;
3546	}
3547out_unlock:
3548	mutex_unlock(&cpuset_mutex);
3549	cpus_read_unlock();
3550	return retval;
3551}
3552
3553static int cpuset_write_s64(struct cgroup_subsys_state *css, struct cftype *cft,
3554			    s64 val)
3555{
3556	struct cpuset *cs = css_cs(css);
3557	cpuset_filetype_t type = cft->private;
3558	int retval = -ENODEV;
3559
3560	cpus_read_lock();
3561	mutex_lock(&cpuset_mutex);
3562	if (!is_cpuset_online(cs))
3563		goto out_unlock;
3564
3565	switch (type) {
3566	case FILE_SCHED_RELAX_DOMAIN_LEVEL:
3567		retval = update_relax_domain_level(cs, val);
3568		break;
3569	default:
3570		retval = -EINVAL;
3571		break;
3572	}
3573out_unlock:
3574	mutex_unlock(&cpuset_mutex);
3575	cpus_read_unlock();
3576	return retval;
3577}
3578
3579/*
3580 * Common handling for a write to a "cpus" or "mems" file.
3581 */
3582static ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
3583				    char *buf, size_t nbytes, loff_t off)
3584{
3585	struct cpuset *cs = css_cs(of_css(of));
3586	struct cpuset *trialcs;
3587	int retval = -ENODEV;
3588
3589	buf = strstrip(buf);
3590
3591	/*
3592	 * CPU or memory hotunplug may leave @cs w/o any execution
3593	 * resources, in which case the hotplug code asynchronously updates
3594	 * configuration and transfers all tasks to the nearest ancestor
3595	 * which can execute.
3596	 *
3597	 * As writes to "cpus" or "mems" may restore @cs's execution
3598	 * resources, wait for the previously scheduled operations before
3599	 * proceeding, so that we don't end up keep removing tasks added
3600	 * after execution capability is restored.
3601	 *
3602	 * cpuset_hotplug_work calls back into cgroup core via
3603	 * cgroup_transfer_tasks() and waiting for it from a cgroupfs
3604	 * operation like this one can lead to a deadlock through kernfs
3605	 * active_ref protection.  Let's break the protection.  Losing the
3606	 * protection is okay as we check whether @cs is online after
3607	 * grabbing cpuset_mutex anyway.  This only happens on the legacy
3608	 * hierarchies.
3609	 */
3610	css_get(&cs->css);
3611	kernfs_break_active_protection(of->kn);
3612	flush_work(&cpuset_hotplug_work);
3613
3614	cpus_read_lock();
3615	mutex_lock(&cpuset_mutex);
3616	if (!is_cpuset_online(cs))
3617		goto out_unlock;
3618
3619	trialcs = alloc_trial_cpuset(cs);
3620	if (!trialcs) {
3621		retval = -ENOMEM;
3622		goto out_unlock;
3623	}
3624
3625	switch (of_cft(of)->private) {
3626	case FILE_CPULIST:
3627		retval = update_cpumask(cs, trialcs, buf);
3628		break;
3629	case FILE_EXCLUSIVE_CPULIST:
3630		retval = update_exclusive_cpumask(cs, trialcs, buf);
3631		break;
3632	case FILE_MEMLIST:
3633		retval = update_nodemask(cs, trialcs, buf);
3634		break;
3635	default:
3636		retval = -EINVAL;
3637		break;
3638	}
3639
3640	free_cpuset(trialcs);
3641out_unlock:
3642	mutex_unlock(&cpuset_mutex);
3643	cpus_read_unlock();
3644	kernfs_unbreak_active_protection(of->kn);
3645	css_put(&cs->css);
3646	flush_workqueue(cpuset_migrate_mm_wq);
3647	return retval ?: nbytes;
3648}
3649
3650/*
3651 * These ascii lists should be read in a single call, by using a user
3652 * buffer large enough to hold the entire map.  If read in smaller
3653 * chunks, there is no guarantee of atomicity.  Since the display format
3654 * used, list of ranges of sequential numbers, is variable length,
3655 * and since these maps can change value dynamically, one could read
3656 * gibberish by doing partial reads while a list was changing.
3657 */
3658static int cpuset_common_seq_show(struct seq_file *sf, void *v)
3659{
3660	struct cpuset *cs = css_cs(seq_css(sf));
3661	cpuset_filetype_t type = seq_cft(sf)->private;
3662	int ret = 0;
3663
3664	spin_lock_irq(&callback_lock);
3665
3666	switch (type) {
3667	case FILE_CPULIST:
3668		seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_allowed));
3669		break;
3670	case FILE_MEMLIST:
3671		seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->mems_allowed));
3672		break;
3673	case FILE_EFFECTIVE_CPULIST:
3674		seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->effective_cpus));
3675		break;
3676	case FILE_EFFECTIVE_MEMLIST:
3677		seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->effective_mems));
3678		break;
3679	case FILE_EXCLUSIVE_CPULIST:
3680		seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->exclusive_cpus));
3681		break;
3682	case FILE_EFFECTIVE_XCPULIST:
3683		seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->effective_xcpus));
3684		break;
3685	case FILE_SUBPARTS_CPULIST:
3686		seq_printf(sf, "%*pbl\n", cpumask_pr_args(subpartitions_cpus));
3687		break;
3688	case FILE_ISOLATED_CPULIST:
3689		seq_printf(sf, "%*pbl\n", cpumask_pr_args(isolated_cpus));
3690		break;
3691	default:
3692		ret = -EINVAL;
3693	}
3694
3695	spin_unlock_irq(&callback_lock);
3696	return ret;
3697}
3698
3699static u64 cpuset_read_u64(struct cgroup_subsys_state *css, struct cftype *cft)
3700{
3701	struct cpuset *cs = css_cs(css);
3702	cpuset_filetype_t type = cft->private;
3703	switch (type) {
3704	case FILE_CPU_EXCLUSIVE:
3705		return is_cpu_exclusive(cs);
3706	case FILE_MEM_EXCLUSIVE:
3707		return is_mem_exclusive(cs);
3708	case FILE_MEM_HARDWALL:
3709		return is_mem_hardwall(cs);
3710	case FILE_SCHED_LOAD_BALANCE:
3711		return is_sched_load_balance(cs);
3712	case FILE_MEMORY_MIGRATE:
3713		return is_memory_migrate(cs);
3714	case FILE_MEMORY_PRESSURE_ENABLED:
3715		return cpuset_memory_pressure_enabled;
3716	case FILE_MEMORY_PRESSURE:
3717		return fmeter_getrate(&cs->fmeter);
3718	case FILE_SPREAD_PAGE:
3719		return is_spread_page(cs);
3720	case FILE_SPREAD_SLAB:
3721		return is_spread_slab(cs);
3722	default:
3723		BUG();
3724	}
3725
3726	/* Unreachable but makes gcc happy */
3727	return 0;
3728}
3729
3730static s64 cpuset_read_s64(struct cgroup_subsys_state *css, struct cftype *cft)
3731{
3732	struct cpuset *cs = css_cs(css);
3733	cpuset_filetype_t type = cft->private;
3734	switch (type) {
3735	case FILE_SCHED_RELAX_DOMAIN_LEVEL:
3736		return cs->relax_domain_level;
3737	default:
3738		BUG();
3739	}
3740
3741	/* Unreachable but makes gcc happy */
3742	return 0;
3743}
3744
3745static int sched_partition_show(struct seq_file *seq, void *v)
3746{
3747	struct cpuset *cs = css_cs(seq_css(seq));
3748	const char *err, *type = NULL;
3749
3750	switch (cs->partition_root_state) {
3751	case PRS_ROOT:
3752		seq_puts(seq, "root\n");
3753		break;
3754	case PRS_ISOLATED:
3755		seq_puts(seq, "isolated\n");
3756		break;
3757	case PRS_MEMBER:
3758		seq_puts(seq, "member\n");
3759		break;
3760	case PRS_INVALID_ROOT:
3761		type = "root";
3762		fallthrough;
3763	case PRS_INVALID_ISOLATED:
3764		if (!type)
3765			type = "isolated";
3766		err = perr_strings[READ_ONCE(cs->prs_err)];
3767		if (err)
3768			seq_printf(seq, "%s invalid (%s)\n", type, err);
3769		else
3770			seq_printf(seq, "%s invalid\n", type);
3771		break;
3772	}
3773	return 0;
3774}
3775
3776static ssize_t sched_partition_write(struct kernfs_open_file *of, char *buf,
3777				     size_t nbytes, loff_t off)
3778{
3779	struct cpuset *cs = css_cs(of_css(of));
3780	int val;
3781	int retval = -ENODEV;
3782
3783	buf = strstrip(buf);
3784
3785	/*
3786	 * Convert "root" to ENABLED, and convert "member" to DISABLED.
3787	 */
3788	if (!strcmp(buf, "root"))
3789		val = PRS_ROOT;
3790	else if (!strcmp(buf, "member"))
3791		val = PRS_MEMBER;
3792	else if (!strcmp(buf, "isolated"))
3793		val = PRS_ISOLATED;
3794	else
3795		return -EINVAL;
3796
3797	css_get(&cs->css);
3798	cpus_read_lock();
3799	mutex_lock(&cpuset_mutex);
3800	if (!is_cpuset_online(cs))
3801		goto out_unlock;
3802
3803	retval = update_prstate(cs, val);
3804out_unlock:
3805	mutex_unlock(&cpuset_mutex);
3806	cpus_read_unlock();
3807	css_put(&cs->css);
3808	return retval ?: nbytes;
3809}
3810
3811/*
3812 * for the common functions, 'private' gives the type of file
3813 */
3814
3815static struct cftype legacy_files[] = {
3816	{
3817		.name = "cpus",
3818		.seq_show = cpuset_common_seq_show,
3819		.write = cpuset_write_resmask,
3820		.max_write_len = (100U + 6 * NR_CPUS),
3821		.private = FILE_CPULIST,
3822	},
3823
3824	{
3825		.name = "mems",
3826		.seq_show = cpuset_common_seq_show,
3827		.write = cpuset_write_resmask,
3828		.max_write_len = (100U + 6 * MAX_NUMNODES),
3829		.private = FILE_MEMLIST,
3830	},
3831
3832	{
3833		.name = "effective_cpus",
3834		.seq_show = cpuset_common_seq_show,
3835		.private = FILE_EFFECTIVE_CPULIST,
3836	},
3837
3838	{
3839		.name = "effective_mems",
3840		.seq_show = cpuset_common_seq_show,
3841		.private = FILE_EFFECTIVE_MEMLIST,
3842	},
3843
3844	{
3845		.name = "cpu_exclusive",
3846		.read_u64 = cpuset_read_u64,
3847		.write_u64 = cpuset_write_u64,
3848		.private = FILE_CPU_EXCLUSIVE,
3849	},
3850
3851	{
3852		.name = "mem_exclusive",
3853		.read_u64 = cpuset_read_u64,
3854		.write_u64 = cpuset_write_u64,
3855		.private = FILE_MEM_EXCLUSIVE,
3856	},
3857
3858	{
3859		.name = "mem_hardwall",
3860		.read_u64 = cpuset_read_u64,
3861		.write_u64 = cpuset_write_u64,
3862		.private = FILE_MEM_HARDWALL,
3863	},
3864
3865	{
3866		.name = "sched_load_balance",
3867		.read_u64 = cpuset_read_u64,
3868		.write_u64 = cpuset_write_u64,
3869		.private = FILE_SCHED_LOAD_BALANCE,
3870	},
3871
3872	{
3873		.name = "sched_relax_domain_level",
3874		.read_s64 = cpuset_read_s64,
3875		.write_s64 = cpuset_write_s64,
3876		.private = FILE_SCHED_RELAX_DOMAIN_LEVEL,
3877	},
3878
3879	{
3880		.name = "memory_migrate",
3881		.read_u64 = cpuset_read_u64,
3882		.write_u64 = cpuset_write_u64,
3883		.private = FILE_MEMORY_MIGRATE,
3884	},
3885
3886	{
3887		.name = "memory_pressure",
3888		.read_u64 = cpuset_read_u64,
3889		.private = FILE_MEMORY_PRESSURE,
3890	},
3891
3892	{
3893		.name = "memory_spread_page",
3894		.read_u64 = cpuset_read_u64,
3895		.write_u64 = cpuset_write_u64,
3896		.private = FILE_SPREAD_PAGE,
3897	},
3898
3899	{
3900		.name = "memory_spread_slab",
3901		.read_u64 = cpuset_read_u64,
3902		.write_u64 = cpuset_write_u64,
3903		.private = FILE_SPREAD_SLAB,
3904	},
3905
3906	{
3907		.name = "memory_pressure_enabled",
3908		.flags = CFTYPE_ONLY_ON_ROOT,
3909		.read_u64 = cpuset_read_u64,
3910		.write_u64 = cpuset_write_u64,
3911		.private = FILE_MEMORY_PRESSURE_ENABLED,
3912	},
3913
3914	{ }	/* terminate */
3915};
3916
3917/*
3918 * This is currently a minimal set for the default hierarchy. It can be
3919 * expanded later on by migrating more features and control files from v1.
3920 */
3921static struct cftype dfl_files[] = {
3922	{
3923		.name = "cpus",
3924		.seq_show = cpuset_common_seq_show,
3925		.write = cpuset_write_resmask,
3926		.max_write_len = (100U + 6 * NR_CPUS),
3927		.private = FILE_CPULIST,
3928		.flags = CFTYPE_NOT_ON_ROOT,
3929	},
3930
3931	{
3932		.name = "mems",
3933		.seq_show = cpuset_common_seq_show,
3934		.write = cpuset_write_resmask,
3935		.max_write_len = (100U + 6 * MAX_NUMNODES),
3936		.private = FILE_MEMLIST,
3937		.flags = CFTYPE_NOT_ON_ROOT,
3938	},
3939
3940	{
3941		.name = "cpus.effective",
3942		.seq_show = cpuset_common_seq_show,
3943		.private = FILE_EFFECTIVE_CPULIST,
3944	},
3945
3946	{
3947		.name = "mems.effective",
3948		.seq_show = cpuset_common_seq_show,
3949		.private = FILE_EFFECTIVE_MEMLIST,
3950	},
3951
3952	{
3953		.name = "cpus.partition",
3954		.seq_show = sched_partition_show,
3955		.write = sched_partition_write,
3956		.private = FILE_PARTITION_ROOT,
3957		.flags = CFTYPE_NOT_ON_ROOT,
3958		.file_offset = offsetof(struct cpuset, partition_file),
3959	},
3960
3961	{
3962		.name = "cpus.exclusive",
3963		.seq_show = cpuset_common_seq_show,
3964		.write = cpuset_write_resmask,
3965		.max_write_len = (100U + 6 * NR_CPUS),
3966		.private = FILE_EXCLUSIVE_CPULIST,
3967		.flags = CFTYPE_NOT_ON_ROOT,
3968	},
3969
3970	{
3971		.name = "cpus.exclusive.effective",
3972		.seq_show = cpuset_common_seq_show,
3973		.private = FILE_EFFECTIVE_XCPULIST,
3974		.flags = CFTYPE_NOT_ON_ROOT,
3975	},
3976
3977	{
3978		.name = "cpus.subpartitions",
3979		.seq_show = cpuset_common_seq_show,
3980		.private = FILE_SUBPARTS_CPULIST,
3981		.flags = CFTYPE_ONLY_ON_ROOT | CFTYPE_DEBUG,
3982	},
3983
3984	{
3985		.name = "cpus.isolated",
3986		.seq_show = cpuset_common_seq_show,
3987		.private = FILE_ISOLATED_CPULIST,
3988		.flags = CFTYPE_ONLY_ON_ROOT,
3989	},
3990
3991	{ }	/* terminate */
3992};
3993
3994
3995/**
3996 * cpuset_css_alloc - Allocate a cpuset css
3997 * @parent_css: Parent css of the control group that the new cpuset will be
3998 *              part of
3999 * Return: cpuset css on success, -ENOMEM on failure.
4000 *
4001 * Allocate and initialize a new cpuset css, for non-NULL @parent_css, return
4002 * top cpuset css otherwise.
4003 */
4004static struct cgroup_subsys_state *
4005cpuset_css_alloc(struct cgroup_subsys_state *parent_css)
4006{
4007	struct cpuset *cs;
4008
4009	if (!parent_css)
4010		return &top_cpuset.css;
4011
4012	cs = kzalloc(sizeof(*cs), GFP_KERNEL);
4013	if (!cs)
4014		return ERR_PTR(-ENOMEM);
4015
4016	if (alloc_cpumasks(cs, NULL)) {
4017		kfree(cs);
4018		return ERR_PTR(-ENOMEM);
4019	}
4020
4021	__set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
4022	nodes_clear(cs->mems_allowed);
4023	nodes_clear(cs->effective_mems);
4024	fmeter_init(&cs->fmeter);
4025	cs->relax_domain_level = -1;
4026	INIT_LIST_HEAD(&cs->remote_sibling);
4027
4028	/* Set CS_MEMORY_MIGRATE for default hierarchy */
4029	if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys))
4030		__set_bit(CS_MEMORY_MIGRATE, &cs->flags);
4031
4032	return &cs->css;
4033}
4034
4035static int cpuset_css_online(struct cgroup_subsys_state *css)
4036{
4037	struct cpuset *cs = css_cs(css);
4038	struct cpuset *parent = parent_cs(cs);
4039	struct cpuset *tmp_cs;
4040	struct cgroup_subsys_state *pos_css;
4041
4042	if (!parent)
4043		return 0;
4044
4045	cpus_read_lock();
4046	mutex_lock(&cpuset_mutex);
4047
4048	set_bit(CS_ONLINE, &cs->flags);
4049	if (is_spread_page(parent))
4050		set_bit(CS_SPREAD_PAGE, &cs->flags);
4051	if (is_spread_slab(parent))
4052		set_bit(CS_SPREAD_SLAB, &cs->flags);
4053
4054	cpuset_inc();
4055
4056	spin_lock_irq(&callback_lock);
4057	if (is_in_v2_mode()) {
4058		cpumask_copy(cs->effective_cpus, parent->effective_cpus);
4059		cs->effective_mems = parent->effective_mems;
4060		cs->use_parent_ecpus = true;
4061		parent->child_ecpus_count++;
4062		/*
4063		 * Clear CS_SCHED_LOAD_BALANCE if parent is isolated
4064		 */
4065		if (!is_sched_load_balance(parent))
4066			clear_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
4067	}
4068
4069	/*
4070	 * For v2, clear CS_SCHED_LOAD_BALANCE if parent is isolated
4071	 */
4072	if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
4073	    !is_sched_load_balance(parent))
4074		clear_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
4075
4076	spin_unlock_irq(&callback_lock);
4077
4078	if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags))
4079		goto out_unlock;
4080
4081	/*
4082	 * Clone @parent's configuration if CGRP_CPUSET_CLONE_CHILDREN is
4083	 * set.  This flag handling is implemented in cgroup core for
4084	 * historical reasons - the flag may be specified during mount.
4085	 *
4086	 * Currently, if any sibling cpusets have exclusive cpus or mem, we
4087	 * refuse to clone the configuration - thereby refusing the task to
4088	 * be entered, and as a result refusing the sys_unshare() or
4089	 * clone() which initiated it.  If this becomes a problem for some
4090	 * users who wish to allow that scenario, then this could be
4091	 * changed to grant parent->cpus_allowed-sibling_cpus_exclusive
4092	 * (and likewise for mems) to the new cgroup.
4093	 */
4094	rcu_read_lock();
4095	cpuset_for_each_child(tmp_cs, pos_css, parent) {
4096		if (is_mem_exclusive(tmp_cs) || is_cpu_exclusive(tmp_cs)) {
4097			rcu_read_unlock();
4098			goto out_unlock;
4099		}
4100	}
4101	rcu_read_unlock();
4102
4103	spin_lock_irq(&callback_lock);
4104	cs->mems_allowed = parent->mems_allowed;
4105	cs->effective_mems = parent->mems_allowed;
4106	cpumask_copy(cs->cpus_allowed, parent->cpus_allowed);
4107	cpumask_copy(cs->effective_cpus, parent->cpus_allowed);
4108	spin_unlock_irq(&callback_lock);
4109out_unlock:
4110	mutex_unlock(&cpuset_mutex);
4111	cpus_read_unlock();
4112	return 0;
4113}
4114
4115/*
4116 * If the cpuset being removed has its flag 'sched_load_balance'
4117 * enabled, then simulate turning sched_load_balance off, which
4118 * will call rebuild_sched_domains_locked(). That is not needed
4119 * in the default hierarchy where only changes in partition
4120 * will cause repartitioning.
4121 *
4122 * If the cpuset has the 'sched.partition' flag enabled, simulate
4123 * turning 'sched.partition" off.
4124 */
4125
4126static void cpuset_css_offline(struct cgroup_subsys_state *css)
4127{
4128	struct cpuset *cs = css_cs(css);
4129
4130	cpus_read_lock();
4131	mutex_lock(&cpuset_mutex);
4132
4133	if (is_partition_valid(cs))
4134		update_prstate(cs, 0);
4135
4136	if (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
4137	    is_sched_load_balance(cs))
4138		update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
4139
4140	if (cs->use_parent_ecpus) {
4141		struct cpuset *parent = parent_cs(cs);
4142
4143		cs->use_parent_ecpus = false;
4144		parent->child_ecpus_count--;
4145	}
4146
4147	cpuset_dec();
4148	clear_bit(CS_ONLINE, &cs->flags);
4149
4150	mutex_unlock(&cpuset_mutex);
4151	cpus_read_unlock();
4152}
4153
4154static void cpuset_css_free(struct cgroup_subsys_state *css)
4155{
4156	struct cpuset *cs = css_cs(css);
4157
4158	free_cpuset(cs);
4159}
4160
4161static void cpuset_bind(struct cgroup_subsys_state *root_css)
4162{
4163	mutex_lock(&cpuset_mutex);
4164	spin_lock_irq(&callback_lock);
4165
4166	if (is_in_v2_mode()) {
4167		cpumask_copy(top_cpuset.cpus_allowed, cpu_possible_mask);
4168		cpumask_copy(top_cpuset.effective_xcpus, cpu_possible_mask);
4169		top_cpuset.mems_allowed = node_possible_map;
4170	} else {
4171		cpumask_copy(top_cpuset.cpus_allowed,
4172			     top_cpuset.effective_cpus);
4173		top_cpuset.mems_allowed = top_cpuset.effective_mems;
4174	}
4175
4176	spin_unlock_irq(&callback_lock);
4177	mutex_unlock(&cpuset_mutex);
4178}
4179
4180/*
4181 * In case the child is cloned into a cpuset different from its parent,
4182 * additional checks are done to see if the move is allowed.
4183 */
4184static int cpuset_can_fork(struct task_struct *task, struct css_set *cset)
4185{
4186	struct cpuset *cs = css_cs(cset->subsys[cpuset_cgrp_id]);
4187	bool same_cs;
4188	int ret;
4189
4190	rcu_read_lock();
4191	same_cs = (cs == task_cs(current));
4192	rcu_read_unlock();
4193
4194	if (same_cs)
4195		return 0;
4196
4197	lockdep_assert_held(&cgroup_mutex);
4198	mutex_lock(&cpuset_mutex);
4199
4200	/* Check to see if task is allowed in the cpuset */
4201	ret = cpuset_can_attach_check(cs);
4202	if (ret)
4203		goto out_unlock;
4204
4205	ret = task_can_attach(task);
4206	if (ret)
4207		goto out_unlock;
4208
4209	ret = security_task_setscheduler(task);
4210	if (ret)
4211		goto out_unlock;
4212
4213	/*
4214	 * Mark attach is in progress.  This makes validate_change() fail
4215	 * changes which zero cpus/mems_allowed.
4216	 */
4217	cs->attach_in_progress++;
4218out_unlock:
4219	mutex_unlock(&cpuset_mutex);
4220	return ret;
4221}
4222
4223static void cpuset_cancel_fork(struct task_struct *task, struct css_set *cset)
4224{
4225	struct cpuset *cs = css_cs(cset->subsys[cpuset_cgrp_id]);
4226	bool same_cs;
4227
4228	rcu_read_lock();
4229	same_cs = (cs == task_cs(current));
4230	rcu_read_unlock();
4231
4232	if (same_cs)
4233		return;
4234
4235	mutex_lock(&cpuset_mutex);
4236	cs->attach_in_progress--;
4237	if (!cs->attach_in_progress)
4238		wake_up(&cpuset_attach_wq);
4239	mutex_unlock(&cpuset_mutex);
4240}
4241
4242/*
4243 * Make sure the new task conform to the current state of its parent,
4244 * which could have been changed by cpuset just after it inherits the
4245 * state from the parent and before it sits on the cgroup's task list.
4246 */
4247static void cpuset_fork(struct task_struct *task)
4248{
4249	struct cpuset *cs;
4250	bool same_cs;
4251
4252	rcu_read_lock();
4253	cs = task_cs(task);
4254	same_cs = (cs == task_cs(current));
4255	rcu_read_unlock();
4256
4257	if (same_cs) {
4258		if (cs == &top_cpuset)
4259			return;
4260
4261		set_cpus_allowed_ptr(task, current->cpus_ptr);
4262		task->mems_allowed = current->mems_allowed;
4263		return;
4264	}
4265
4266	/* CLONE_INTO_CGROUP */
4267	mutex_lock(&cpuset_mutex);
4268	guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
4269	cpuset_attach_task(cs, task);
4270
4271	cs->attach_in_progress--;
4272	if (!cs->attach_in_progress)
4273		wake_up(&cpuset_attach_wq);
4274
4275	mutex_unlock(&cpuset_mutex);
4276}
4277
4278struct cgroup_subsys cpuset_cgrp_subsys = {
4279	.css_alloc	= cpuset_css_alloc,
4280	.css_online	= cpuset_css_online,
4281	.css_offline	= cpuset_css_offline,
4282	.css_free	= cpuset_css_free,
4283	.can_attach	= cpuset_can_attach,
4284	.cancel_attach	= cpuset_cancel_attach,
4285	.attach		= cpuset_attach,
4286	.post_attach	= cpuset_post_attach,
4287	.bind		= cpuset_bind,
4288	.can_fork	= cpuset_can_fork,
4289	.cancel_fork	= cpuset_cancel_fork,
4290	.fork		= cpuset_fork,
4291	.legacy_cftypes	= legacy_files,
4292	.dfl_cftypes	= dfl_files,
4293	.early_init	= true,
4294	.threaded	= true,
4295};
4296
4297/**
4298 * cpuset_init - initialize cpusets at system boot
4299 *
4300 * Description: Initialize top_cpuset
4301 **/
4302
4303int __init cpuset_init(void)
4304{
 
 
4305	BUG_ON(!alloc_cpumask_var(&top_cpuset.cpus_allowed, GFP_KERNEL));
4306	BUG_ON(!alloc_cpumask_var(&top_cpuset.effective_cpus, GFP_KERNEL));
4307	BUG_ON(!alloc_cpumask_var(&top_cpuset.effective_xcpus, GFP_KERNEL));
4308	BUG_ON(!alloc_cpumask_var(&top_cpuset.exclusive_cpus, GFP_KERNEL));
4309	BUG_ON(!zalloc_cpumask_var(&subpartitions_cpus, GFP_KERNEL));
4310	BUG_ON(!zalloc_cpumask_var(&isolated_cpus, GFP_KERNEL));
4311
4312	cpumask_setall(top_cpuset.cpus_allowed);
4313	nodes_setall(top_cpuset.mems_allowed);
4314	cpumask_setall(top_cpuset.effective_cpus);
4315	cpumask_setall(top_cpuset.effective_xcpus);
4316	cpumask_setall(top_cpuset.exclusive_cpus);
4317	nodes_setall(top_cpuset.effective_mems);
4318
4319	fmeter_init(&top_cpuset.fmeter);
4320	set_bit(CS_SCHED_LOAD_BALANCE, &top_cpuset.flags);
4321	top_cpuset.relax_domain_level = -1;
4322	INIT_LIST_HEAD(&remote_children);
4323
4324	BUG_ON(!alloc_cpumask_var(&cpus_attach, GFP_KERNEL));
4325
4326	return 0;
4327}
4328
4329/*
4330 * If CPU and/or memory hotplug handlers, below, unplug any CPUs
4331 * or memory nodes, we need to walk over the cpuset hierarchy,
4332 * removing that CPU or node from all cpusets.  If this removes the
4333 * last CPU or node from a cpuset, then move the tasks in the empty
4334 * cpuset to its next-highest non-empty parent.
4335 */
4336static void remove_tasks_in_empty_cpuset(struct cpuset *cs)
4337{
4338	struct cpuset *parent;
4339
4340	/*
4341	 * Find its next-highest non-empty parent, (top cpuset
4342	 * has online cpus, so can't be empty).
4343	 */
4344	parent = parent_cs(cs);
4345	while (cpumask_empty(parent->cpus_allowed) ||
4346			nodes_empty(parent->mems_allowed))
4347		parent = parent_cs(parent);
4348
4349	if (cgroup_transfer_tasks(parent->css.cgroup, cs->css.cgroup)) {
4350		pr_err("cpuset: failed to transfer tasks out of empty cpuset ");
4351		pr_cont_cgroup_name(cs->css.cgroup);
4352		pr_cont("\n");
4353	}
4354}
4355
4356static void
4357hotplug_update_tasks_legacy(struct cpuset *cs,
4358			    struct cpumask *new_cpus, nodemask_t *new_mems,
4359			    bool cpus_updated, bool mems_updated)
4360{
4361	bool is_empty;
4362
4363	spin_lock_irq(&callback_lock);
4364	cpumask_copy(cs->cpus_allowed, new_cpus);
4365	cpumask_copy(cs->effective_cpus, new_cpus);
4366	cs->mems_allowed = *new_mems;
4367	cs->effective_mems = *new_mems;
4368	spin_unlock_irq(&callback_lock);
4369
4370	/*
4371	 * Don't call update_tasks_cpumask() if the cpuset becomes empty,
4372	 * as the tasks will be migrated to an ancestor.
4373	 */
4374	if (cpus_updated && !cpumask_empty(cs->cpus_allowed))
4375		update_tasks_cpumask(cs, new_cpus);
4376	if (mems_updated && !nodes_empty(cs->mems_allowed))
4377		update_tasks_nodemask(cs);
4378
4379	is_empty = cpumask_empty(cs->cpus_allowed) ||
4380		   nodes_empty(cs->mems_allowed);
4381
 
 
4382	/*
4383	 * Move tasks to the nearest ancestor with execution resources,
4384	 * This is full cgroup operation which will also call back into
4385	 * cpuset. Should be done outside any lock.
4386	 */
4387	if (is_empty) {
4388		mutex_unlock(&cpuset_mutex);
4389		remove_tasks_in_empty_cpuset(cs);
4390		mutex_lock(&cpuset_mutex);
4391	}
4392}
4393
4394static void
4395hotplug_update_tasks(struct cpuset *cs,
4396		     struct cpumask *new_cpus, nodemask_t *new_mems,
4397		     bool cpus_updated, bool mems_updated)
4398{
4399	/* A partition root is allowed to have empty effective cpus */
4400	if (cpumask_empty(new_cpus) && !is_partition_valid(cs))
4401		cpumask_copy(new_cpus, parent_cs(cs)->effective_cpus);
4402	if (nodes_empty(*new_mems))
4403		*new_mems = parent_cs(cs)->effective_mems;
4404
4405	spin_lock_irq(&callback_lock);
4406	cpumask_copy(cs->effective_cpus, new_cpus);
4407	cs->effective_mems = *new_mems;
4408	spin_unlock_irq(&callback_lock);
4409
4410	if (cpus_updated)
4411		update_tasks_cpumask(cs, new_cpus);
4412	if (mems_updated)
4413		update_tasks_nodemask(cs);
4414}
4415
4416static bool force_rebuild;
4417
4418void cpuset_force_rebuild(void)
4419{
4420	force_rebuild = true;
4421}
4422
4423/*
4424 * Attempt to acquire a cpus_read_lock while a hotplug operation may be in
4425 * progress.
4426 * Return: true if successful, false otherwise
4427 *
4428 * To avoid circular lock dependency between cpuset_mutex and cpus_read_lock,
4429 * cpus_read_trylock() is used here to acquire the lock.
4430 */
4431static bool cpuset_hotplug_cpus_read_trylock(void)
4432{
4433	int retries = 0;
4434
4435	while (!cpus_read_trylock()) {
4436		/*
4437		 * CPU hotplug still in progress. Retry 5 times
4438		 * with a 10ms wait before bailing out.
4439		 */
4440		if (++retries > 5)
4441			return false;
4442		msleep(10);
4443	}
4444	return true;
4445}
4446
4447/**
4448 * cpuset_hotplug_update_tasks - update tasks in a cpuset for hotunplug
4449 * @cs: cpuset in interest
4450 * @tmp: the tmpmasks structure pointer
4451 *
4452 * Compare @cs's cpu and mem masks against top_cpuset and if some have gone
4453 * offline, update @cs accordingly.  If @cs ends up with no CPU or memory,
4454 * all its tasks are moved to the nearest ancestor with both resources.
4455 */
4456static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp)
4457{
4458	static cpumask_t new_cpus;
4459	static nodemask_t new_mems;
4460	bool cpus_updated;
4461	bool mems_updated;
4462	bool remote;
4463	int partcmd = -1;
4464	struct cpuset *parent;
4465retry:
4466	wait_event(cpuset_attach_wq, cs->attach_in_progress == 0);
4467
4468	mutex_lock(&cpuset_mutex);
4469
4470	/*
4471	 * We have raced with task attaching. We wait until attaching
4472	 * is finished, so we won't attach a task to an empty cpuset.
4473	 */
4474	if (cs->attach_in_progress) {
4475		mutex_unlock(&cpuset_mutex);
4476		goto retry;
4477	}
4478
4479	parent = parent_cs(cs);
4480	compute_effective_cpumask(&new_cpus, cs, parent);
4481	nodes_and(new_mems, cs->mems_allowed, parent->effective_mems);
4482
 
 
 
 
 
 
 
4483	if (!tmp || !cs->partition_root_state)
4484		goto update_tasks;
4485
4486	/*
4487	 * Compute effective_cpus for valid partition root, may invalidate
4488	 * child partition roots if necessary.
 
 
4489	 */
4490	remote = is_remote_partition(cs);
4491	if (remote || (is_partition_valid(cs) && is_partition_valid(parent)))
4492		compute_partition_effective_cpumask(cs, &new_cpus);
4493
4494	if (remote && cpumask_empty(&new_cpus) &&
4495	    partition_is_populated(cs, NULL) &&
4496	    cpuset_hotplug_cpus_read_trylock()) {
4497		remote_partition_disable(cs, tmp);
4498		compute_effective_cpumask(&new_cpus, cs, parent);
4499		remote = false;
4500		cpuset_force_rebuild();
4501		cpus_read_unlock();
4502	}
4503
4504	/*
4505	 * Force the partition to become invalid if either one of
4506	 * the following conditions hold:
4507	 * 1) empty effective cpus but not valid empty partition.
4508	 * 2) parent is invalid or doesn't grant any cpus to child
4509	 *    partitions.
4510	 */
4511	if (is_local_partition(cs) && (!is_partition_valid(parent) ||
4512				tasks_nocpu_error(parent, cs, &new_cpus)))
4513		partcmd = partcmd_invalidate;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4514	/*
4515	 * On the other hand, an invalid partition root may be transitioned
4516	 * back to a regular one.
4517	 */
4518	else if (is_partition_valid(parent) && is_partition_invalid(cs))
4519		partcmd = partcmd_update;
4520
4521	/*
4522	 * cpus_read_lock needs to be held before calling
4523	 * update_parent_effective_cpumask(). To avoid circular lock
4524	 * dependency between cpuset_mutex and cpus_read_lock,
4525	 * cpus_read_trylock() is used here to acquire the lock.
4526	 */
4527	if (partcmd >= 0) {
4528		if (!cpuset_hotplug_cpus_read_trylock())
4529			goto update_tasks;
4530
4531		update_parent_effective_cpumask(cs, partcmd, NULL, tmp);
4532		cpus_read_unlock();
4533		if ((partcmd == partcmd_invalidate) || is_partition_valid(cs)) {
4534			compute_partition_effective_cpumask(cs, &new_cpus);
4535			cpuset_force_rebuild();
4536		}
4537	}
4538
4539update_tasks:
4540	cpus_updated = !cpumask_equal(&new_cpus, cs->effective_cpus);
4541	mems_updated = !nodes_equal(new_mems, cs->effective_mems);
4542	if (!cpus_updated && !mems_updated)
4543		goto unlock;	/* Hotplug doesn't affect this cpuset */
4544
4545	if (mems_updated)
4546		check_insane_mems_config(&new_mems);
4547
4548	if (is_in_v2_mode())
4549		hotplug_update_tasks(cs, &new_cpus, &new_mems,
4550				     cpus_updated, mems_updated);
4551	else
4552		hotplug_update_tasks_legacy(cs, &new_cpus, &new_mems,
4553					    cpus_updated, mems_updated);
4554
4555unlock:
4556	mutex_unlock(&cpuset_mutex);
4557}
4558
4559/**
4560 * cpuset_hotplug_workfn - handle CPU/memory hotunplug for a cpuset
4561 * @work: unused
4562 *
4563 * This function is called after either CPU or memory configuration has
4564 * changed and updates cpuset accordingly.  The top_cpuset is always
4565 * synchronized to cpu_active_mask and N_MEMORY, which is necessary in
4566 * order to make cpusets transparent (of no affect) on systems that are
4567 * actively using CPU hotplug but making no active use of cpusets.
4568 *
4569 * Non-root cpusets are only affected by offlining.  If any CPUs or memory
4570 * nodes have been taken down, cpuset_hotplug_update_tasks() is invoked on
4571 * all descendants.
4572 *
4573 * Note that CPU offlining during suspend is ignored.  We don't modify
4574 * cpusets across suspend/resume cycles at all.
4575 */
4576static void cpuset_hotplug_workfn(struct work_struct *work)
4577{
4578	static cpumask_t new_cpus;
4579	static nodemask_t new_mems;
4580	bool cpus_updated, mems_updated;
4581	bool on_dfl = is_in_v2_mode();
4582	struct tmpmasks tmp, *ptmp = NULL;
4583
4584	if (on_dfl && !alloc_cpumasks(NULL, &tmp))
4585		ptmp = &tmp;
4586
4587	mutex_lock(&cpuset_mutex);
4588
4589	/* fetch the available cpus/mems and find out which changed how */
4590	cpumask_copy(&new_cpus, cpu_active_mask);
4591	new_mems = node_states[N_MEMORY];
4592
4593	/*
4594	 * If subpartitions_cpus is populated, it is likely that the check
4595	 * below will produce a false positive on cpus_updated when the cpu
4596	 * list isn't changed. It is extra work, but it is better to be safe.
4597	 */
4598	cpus_updated = !cpumask_equal(top_cpuset.effective_cpus, &new_cpus) ||
4599		       !cpumask_empty(subpartitions_cpus);
4600	mems_updated = !nodes_equal(top_cpuset.effective_mems, new_mems);
4601
4602	/*
4603	 * In the rare case that hotplug removes all the cpus in
4604	 * subpartitions_cpus, we assumed that cpus are updated.
4605	 */
4606	if (!cpus_updated && top_cpuset.nr_subparts)
4607		cpus_updated = true;
4608
4609	/* For v1, synchronize cpus_allowed to cpu_active_mask */
4610	if (cpus_updated) {
4611		spin_lock_irq(&callback_lock);
4612		if (!on_dfl)
4613			cpumask_copy(top_cpuset.cpus_allowed, &new_cpus);
4614		/*
4615		 * Make sure that CPUs allocated to child partitions
4616		 * do not show up in effective_cpus. If no CPU is left,
4617		 * we clear the subpartitions_cpus & let the child partitions
4618		 * fight for the CPUs again.
4619		 */
4620		if (!cpumask_empty(subpartitions_cpus)) {
4621			if (cpumask_subset(&new_cpus, subpartitions_cpus)) {
4622				top_cpuset.nr_subparts = 0;
4623				cpumask_clear(subpartitions_cpus);
 
4624			} else {
4625				cpumask_andnot(&new_cpus, &new_cpus,
4626					       subpartitions_cpus);
4627			}
4628		}
4629		cpumask_copy(top_cpuset.effective_cpus, &new_cpus);
4630		spin_unlock_irq(&callback_lock);
4631		/* we don't mess with cpumasks of tasks in top_cpuset */
4632	}
4633
4634	/* synchronize mems_allowed to N_MEMORY */
4635	if (mems_updated) {
4636		spin_lock_irq(&callback_lock);
4637		if (!on_dfl)
4638			top_cpuset.mems_allowed = new_mems;
4639		top_cpuset.effective_mems = new_mems;
4640		spin_unlock_irq(&callback_lock);
4641		update_tasks_nodemask(&top_cpuset);
4642	}
4643
4644	mutex_unlock(&cpuset_mutex);
4645
4646	/* if cpus or mems changed, we need to propagate to descendants */
4647	if (cpus_updated || mems_updated) {
4648		struct cpuset *cs;
4649		struct cgroup_subsys_state *pos_css;
4650
4651		rcu_read_lock();
4652		cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {
4653			if (cs == &top_cpuset || !css_tryget_online(&cs->css))
4654				continue;
4655			rcu_read_unlock();
4656
4657			cpuset_hotplug_update_tasks(cs, ptmp);
4658
4659			rcu_read_lock();
4660			css_put(&cs->css);
4661		}
4662		rcu_read_unlock();
4663	}
4664
4665	/* rebuild sched domains if cpus_allowed has changed */
4666	if (cpus_updated || force_rebuild) {
4667		force_rebuild = false;
4668		rebuild_sched_domains();
4669	}
4670
4671	free_cpumasks(NULL, ptmp);
4672}
4673
4674void cpuset_update_active_cpus(void)
4675{
4676	/*
4677	 * We're inside cpu hotplug critical region which usually nests
4678	 * inside cgroup synchronization.  Bounce actual hotplug processing
4679	 * to a work item to avoid reverse locking order.
4680	 */
4681	schedule_work(&cpuset_hotplug_work);
4682}
4683
4684void cpuset_wait_for_hotplug(void)
4685{
4686	flush_work(&cpuset_hotplug_work);
4687}
4688
4689/*
4690 * Keep top_cpuset.mems_allowed tracking node_states[N_MEMORY].
4691 * Call this routine anytime after node_states[N_MEMORY] changes.
4692 * See cpuset_update_active_cpus() for CPU hotplug handling.
4693 */
4694static int cpuset_track_online_nodes(struct notifier_block *self,
4695				unsigned long action, void *arg)
4696{
4697	schedule_work(&cpuset_hotplug_work);
4698	return NOTIFY_OK;
4699}
4700
4701/**
4702 * cpuset_init_smp - initialize cpus_allowed
4703 *
4704 * Description: Finish top cpuset after cpu, node maps are initialized
4705 */
4706void __init cpuset_init_smp(void)
4707{
4708	/*
4709	 * cpus_allowd/mems_allowed set to v2 values in the initial
4710	 * cpuset_bind() call will be reset to v1 values in another
4711	 * cpuset_bind() call when v1 cpuset is mounted.
4712	 */
4713	top_cpuset.old_mems_allowed = top_cpuset.mems_allowed;
4714
4715	cpumask_copy(top_cpuset.effective_cpus, cpu_active_mask);
4716	top_cpuset.effective_mems = node_states[N_MEMORY];
4717
4718	hotplug_memory_notifier(cpuset_track_online_nodes, CPUSET_CALLBACK_PRI);
4719
4720	cpuset_migrate_mm_wq = alloc_ordered_workqueue("cpuset_migrate_mm", 0);
4721	BUG_ON(!cpuset_migrate_mm_wq);
4722}
4723
4724/**
4725 * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset.
4726 * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
4727 * @pmask: pointer to struct cpumask variable to receive cpus_allowed set.
4728 *
4729 * Description: Returns the cpumask_var_t cpus_allowed of the cpuset
4730 * attached to the specified @tsk.  Guaranteed to return some non-empty
4731 * subset of cpu_online_mask, even if this means going outside the
4732 * tasks cpuset, except when the task is in the top cpuset.
4733 **/
4734
4735void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
4736{
4737	unsigned long flags;
4738	struct cpuset *cs;
4739
4740	spin_lock_irqsave(&callback_lock, flags);
4741	rcu_read_lock();
4742
4743	cs = task_cs(tsk);
4744	if (cs != &top_cpuset)
4745		guarantee_online_cpus(tsk, pmask);
4746	/*
4747	 * Tasks in the top cpuset won't get update to their cpumasks
4748	 * when a hotplug online/offline event happens. So we include all
4749	 * offline cpus in the allowed cpu list.
4750	 */
4751	if ((cs == &top_cpuset) || cpumask_empty(pmask)) {
4752		const struct cpumask *possible_mask = task_cpu_possible_mask(tsk);
4753
4754		/*
4755		 * We first exclude cpus allocated to partitions. If there is no
4756		 * allowable online cpu left, we fall back to all possible cpus.
4757		 */
4758		cpumask_andnot(pmask, possible_mask, subpartitions_cpus);
4759		if (!cpumask_intersects(pmask, cpu_online_mask))
4760			cpumask_copy(pmask, possible_mask);
4761	}
4762
4763	rcu_read_unlock();
4764	spin_unlock_irqrestore(&callback_lock, flags);
4765}
4766
4767/**
4768 * cpuset_cpus_allowed_fallback - final fallback before complete catastrophe.
4769 * @tsk: pointer to task_struct with which the scheduler is struggling
4770 *
4771 * Description: In the case that the scheduler cannot find an allowed cpu in
4772 * tsk->cpus_allowed, we fall back to task_cs(tsk)->cpus_allowed. In legacy
4773 * mode however, this value is the same as task_cs(tsk)->effective_cpus,
4774 * which will not contain a sane cpumask during cases such as cpu hotplugging.
4775 * This is the absolute last resort for the scheduler and it is only used if
4776 * _every_ other avenue has been traveled.
4777 *
4778 * Returns true if the affinity of @tsk was changed, false otherwise.
4779 **/
4780
4781bool cpuset_cpus_allowed_fallback(struct task_struct *tsk)
4782{
4783	const struct cpumask *possible_mask = task_cpu_possible_mask(tsk);
4784	const struct cpumask *cs_mask;
4785	bool changed = false;
4786
4787	rcu_read_lock();
4788	cs_mask = task_cs(tsk)->cpus_allowed;
4789	if (is_in_v2_mode() && cpumask_subset(cs_mask, possible_mask)) {
4790		do_set_cpus_allowed(tsk, cs_mask);
4791		changed = true;
4792	}
4793	rcu_read_unlock();
4794
4795	/*
4796	 * We own tsk->cpus_allowed, nobody can change it under us.
4797	 *
4798	 * But we used cs && cs->cpus_allowed lockless and thus can
4799	 * race with cgroup_attach_task() or update_cpumask() and get
4800	 * the wrong tsk->cpus_allowed. However, both cases imply the
4801	 * subsequent cpuset_change_cpumask()->set_cpus_allowed_ptr()
4802	 * which takes task_rq_lock().
4803	 *
4804	 * If we are called after it dropped the lock we must see all
4805	 * changes in tsk_cs()->cpus_allowed. Otherwise we can temporary
4806	 * set any mask even if it is not right from task_cs() pov,
4807	 * the pending set_cpus_allowed_ptr() will fix things.
4808	 *
4809	 * select_fallback_rq() will fix things ups and set cpu_possible_mask
4810	 * if required.
4811	 */
4812	return changed;
4813}
4814
4815void __init cpuset_init_current_mems_allowed(void)
4816{
4817	nodes_setall(current->mems_allowed);
4818}
4819
4820/**
4821 * cpuset_mems_allowed - return mems_allowed mask from a tasks cpuset.
4822 * @tsk: pointer to task_struct from which to obtain cpuset->mems_allowed.
4823 *
4824 * Description: Returns the nodemask_t mems_allowed of the cpuset
4825 * attached to the specified @tsk.  Guaranteed to return some non-empty
4826 * subset of node_states[N_MEMORY], even if this means going outside the
4827 * tasks cpuset.
4828 **/
4829
4830nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
4831{
4832	nodemask_t mask;
4833	unsigned long flags;
4834
4835	spin_lock_irqsave(&callback_lock, flags);
4836	rcu_read_lock();
4837	guarantee_online_mems(task_cs(tsk), &mask);
4838	rcu_read_unlock();
4839	spin_unlock_irqrestore(&callback_lock, flags);
4840
4841	return mask;
4842}
4843
4844/**
4845 * cpuset_nodemask_valid_mems_allowed - check nodemask vs. current mems_allowed
4846 * @nodemask: the nodemask to be checked
4847 *
4848 * Are any of the nodes in the nodemask allowed in current->mems_allowed?
4849 */
4850int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
4851{
4852	return nodes_intersects(*nodemask, current->mems_allowed);
4853}
4854
4855/*
4856 * nearest_hardwall_ancestor() - Returns the nearest mem_exclusive or
4857 * mem_hardwall ancestor to the specified cpuset.  Call holding
4858 * callback_lock.  If no ancestor is mem_exclusive or mem_hardwall
4859 * (an unusual configuration), then returns the root cpuset.
4860 */
4861static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs)
4862{
4863	while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && parent_cs(cs))
4864		cs = parent_cs(cs);
4865	return cs;
4866}
4867
4868/*
4869 * cpuset_node_allowed - Can we allocate on a memory node?
4870 * @node: is this an allowed node?
4871 * @gfp_mask: memory allocation flags
4872 *
4873 * If we're in interrupt, yes, we can always allocate.  If @node is set in
4874 * current's mems_allowed, yes.  If it's not a __GFP_HARDWALL request and this
4875 * node is set in the nearest hardwalled cpuset ancestor to current's cpuset,
4876 * yes.  If current has access to memory reserves as an oom victim, yes.
4877 * Otherwise, no.
4878 *
4879 * GFP_USER allocations are marked with the __GFP_HARDWALL bit,
4880 * and do not allow allocations outside the current tasks cpuset
4881 * unless the task has been OOM killed.
4882 * GFP_KERNEL allocations are not so marked, so can escape to the
4883 * nearest enclosing hardwalled ancestor cpuset.
4884 *
4885 * Scanning up parent cpusets requires callback_lock.  The
4886 * __alloc_pages() routine only calls here with __GFP_HARDWALL bit
4887 * _not_ set if it's a GFP_KERNEL allocation, and all nodes in the
4888 * current tasks mems_allowed came up empty on the first pass over
4889 * the zonelist.  So only GFP_KERNEL allocations, if all nodes in the
4890 * cpuset are short of memory, might require taking the callback_lock.
4891 *
4892 * The first call here from mm/page_alloc:get_page_from_freelist()
4893 * has __GFP_HARDWALL set in gfp_mask, enforcing hardwall cpusets,
4894 * so no allocation on a node outside the cpuset is allowed (unless
4895 * in interrupt, of course).
4896 *
4897 * The second pass through get_page_from_freelist() doesn't even call
4898 * here for GFP_ATOMIC calls.  For those calls, the __alloc_pages()
4899 * variable 'wait' is not set, and the bit ALLOC_CPUSET is not set
4900 * in alloc_flags.  That logic and the checks below have the combined
4901 * affect that:
4902 *	in_interrupt - any node ok (current task context irrelevant)
4903 *	GFP_ATOMIC   - any node ok
4904 *	tsk_is_oom_victim   - any node ok
4905 *	GFP_KERNEL   - any node in enclosing hardwalled cpuset ok
4906 *	GFP_USER     - only nodes in current tasks mems allowed ok.
4907 */
4908bool cpuset_node_allowed(int node, gfp_t gfp_mask)
4909{
4910	struct cpuset *cs;		/* current cpuset ancestors */
4911	bool allowed;			/* is allocation in zone z allowed? */
4912	unsigned long flags;
4913
4914	if (in_interrupt())
4915		return true;
4916	if (node_isset(node, current->mems_allowed))
4917		return true;
4918	/*
4919	 * Allow tasks that have access to memory reserves because they have
4920	 * been OOM killed to get memory anywhere.
4921	 */
4922	if (unlikely(tsk_is_oom_victim(current)))
4923		return true;
4924	if (gfp_mask & __GFP_HARDWALL)	/* If hardwall request, stop here */
4925		return false;
4926
4927	if (current->flags & PF_EXITING) /* Let dying task have memory */
4928		return true;
4929
4930	/* Not hardwall and node outside mems_allowed: scan up cpusets */
4931	spin_lock_irqsave(&callback_lock, flags);
4932
4933	rcu_read_lock();
4934	cs = nearest_hardwall_ancestor(task_cs(current));
4935	allowed = node_isset(node, cs->mems_allowed);
4936	rcu_read_unlock();
4937
4938	spin_unlock_irqrestore(&callback_lock, flags);
4939	return allowed;
4940}
4941
4942/**
4943 * cpuset_spread_node() - On which node to begin search for a page
4944 * @rotor: round robin rotor
4945 *
4946 * If a task is marked PF_SPREAD_PAGE or PF_SPREAD_SLAB (as for
4947 * tasks in a cpuset with is_spread_page or is_spread_slab set),
4948 * and if the memory allocation used cpuset_mem_spread_node()
4949 * to determine on which node to start looking, as it will for
4950 * certain page cache or slab cache pages such as used for file
4951 * system buffers and inode caches, then instead of starting on the
4952 * local node to look for a free page, rather spread the starting
4953 * node around the tasks mems_allowed nodes.
4954 *
4955 * We don't have to worry about the returned node being offline
4956 * because "it can't happen", and even if it did, it would be ok.
4957 *
4958 * The routines calling guarantee_online_mems() are careful to
4959 * only set nodes in task->mems_allowed that are online.  So it
4960 * should not be possible for the following code to return an
4961 * offline node.  But if it did, that would be ok, as this routine
4962 * is not returning the node where the allocation must be, only
4963 * the node where the search should start.  The zonelist passed to
4964 * __alloc_pages() will include all nodes.  If the slab allocator
4965 * is passed an offline node, it will fall back to the local node.
4966 * See kmem_cache_alloc_node().
4967 */
 
4968static int cpuset_spread_node(int *rotor)
4969{
4970	return *rotor = next_node_in(*rotor, current->mems_allowed);
4971}
4972
4973/**
4974 * cpuset_mem_spread_node() - On which node to begin search for a file page
4975 */
4976int cpuset_mem_spread_node(void)
4977{
4978	if (current->cpuset_mem_spread_rotor == NUMA_NO_NODE)
4979		current->cpuset_mem_spread_rotor =
4980			node_random(&current->mems_allowed);
4981
4982	return cpuset_spread_node(&current->cpuset_mem_spread_rotor);
4983}
4984
4985/**
4986 * cpuset_slab_spread_node() - On which node to begin search for a slab page
4987 */
4988int cpuset_slab_spread_node(void)
4989{
4990	if (current->cpuset_slab_spread_rotor == NUMA_NO_NODE)
4991		current->cpuset_slab_spread_rotor =
4992			node_random(&current->mems_allowed);
4993
4994	return cpuset_spread_node(&current->cpuset_slab_spread_rotor);
4995}
 
4996EXPORT_SYMBOL_GPL(cpuset_mem_spread_node);
4997
4998/**
4999 * cpuset_mems_allowed_intersects - Does @tsk1's mems_allowed intersect @tsk2's?
5000 * @tsk1: pointer to task_struct of some task.
5001 * @tsk2: pointer to task_struct of some other task.
5002 *
5003 * Description: Return true if @tsk1's mems_allowed intersects the
5004 * mems_allowed of @tsk2.  Used by the OOM killer to determine if
5005 * one of the task's memory usage might impact the memory available
5006 * to the other.
5007 **/
5008
5009int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
5010				   const struct task_struct *tsk2)
5011{
5012	return nodes_intersects(tsk1->mems_allowed, tsk2->mems_allowed);
5013}
5014
5015/**
5016 * cpuset_print_current_mems_allowed - prints current's cpuset and mems_allowed
5017 *
5018 * Description: Prints current's name, cpuset name, and cached copy of its
5019 * mems_allowed to the kernel log.
5020 */
5021void cpuset_print_current_mems_allowed(void)
5022{
5023	struct cgroup *cgrp;
5024
5025	rcu_read_lock();
5026
5027	cgrp = task_cs(current)->css.cgroup;
5028	pr_cont(",cpuset=");
5029	pr_cont_cgroup_name(cgrp);
5030	pr_cont(",mems_allowed=%*pbl",
5031		nodemask_pr_args(&current->mems_allowed));
5032
5033	rcu_read_unlock();
5034}
5035
5036/*
5037 * Collection of memory_pressure is suppressed unless
5038 * this flag is enabled by writing "1" to the special
5039 * cpuset file 'memory_pressure_enabled' in the root cpuset.
5040 */
5041
5042int cpuset_memory_pressure_enabled __read_mostly;
5043
5044/*
5045 * __cpuset_memory_pressure_bump - keep stats of per-cpuset reclaims.
5046 *
5047 * Keep a running average of the rate of synchronous (direct)
5048 * page reclaim efforts initiated by tasks in each cpuset.
5049 *
5050 * This represents the rate at which some task in the cpuset
5051 * ran low on memory on all nodes it was allowed to use, and
5052 * had to enter the kernels page reclaim code in an effort to
5053 * create more free memory by tossing clean pages or swapping
5054 * or writing dirty pages.
5055 *
5056 * Display to user space in the per-cpuset read-only file
5057 * "memory_pressure".  Value displayed is an integer
5058 * representing the recent rate of entry into the synchronous
5059 * (direct) page reclaim by any task attached to the cpuset.
5060 */
5061
5062void __cpuset_memory_pressure_bump(void)
5063{
5064	rcu_read_lock();
5065	fmeter_markevent(&task_cs(current)->fmeter);
5066	rcu_read_unlock();
5067}
5068
5069#ifdef CONFIG_PROC_PID_CPUSET
5070/*
5071 * proc_cpuset_show()
5072 *  - Print tasks cpuset path into seq_file.
5073 *  - Used for /proc/<pid>/cpuset.
5074 *  - No need to task_lock(tsk) on this tsk->cpuset reference, as it
5075 *    doesn't really matter if tsk->cpuset changes after we read it,
5076 *    and we take cpuset_mutex, keeping cpuset_attach() from changing it
5077 *    anyway.
5078 */
5079int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
5080		     struct pid *pid, struct task_struct *tsk)
5081{
5082	char *buf;
5083	struct cgroup_subsys_state *css;
5084	int retval;
5085
5086	retval = -ENOMEM;
5087	buf = kmalloc(PATH_MAX, GFP_KERNEL);
5088	if (!buf)
5089		goto out;
5090
5091	css = task_get_css(tsk, cpuset_cgrp_id);
5092	retval = cgroup_path_ns(css->cgroup, buf, PATH_MAX,
5093				current->nsproxy->cgroup_ns);
5094	css_put(css);
5095	if (retval == -E2BIG)
5096		retval = -ENAMETOOLONG;
5097	if (retval < 0)
5098		goto out_free;
5099	seq_puts(m, buf);
5100	seq_putc(m, '\n');
5101	retval = 0;
5102out_free:
5103	kfree(buf);
5104out:
5105	return retval;
5106}
5107#endif /* CONFIG_PROC_PID_CPUSET */
5108
5109/* Display task mems_allowed in /proc/<pid>/status file. */
5110void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task)
5111{
5112	seq_printf(m, "Mems_allowed:\t%*pb\n",
5113		   nodemask_pr_args(&task->mems_allowed));
5114	seq_printf(m, "Mems_allowed_list:\t%*pbl\n",
5115		   nodemask_pr_args(&task->mems_allowed));
5116}