Loading...
1/*
2 * kernel/cpuset.c
3 *
4 * Processor and Memory placement constraints for sets of tasks.
5 *
6 * Copyright (C) 2003 BULL SA.
7 * Copyright (C) 2004-2007 Silicon Graphics, Inc.
8 * Copyright (C) 2006 Google, Inc
9 *
10 * Portions derived from Patrick Mochel's sysfs code.
11 * sysfs is Copyright (c) 2001-3 Patrick Mochel
12 *
13 * 2003-10-10 Written by Simon Derr.
14 * 2003-10-22 Updates by Stephen Hemminger.
15 * 2004 May-July Rework by Paul Jackson.
16 * 2006 Rework by Paul Menage to use generic cgroups
17 * 2008 Rework of the scheduler domains and CPU hotplug handling
18 * by Max Krasnyansky
19 *
20 * This file is subject to the terms and conditions of the GNU General Public
21 * License. See the file COPYING in the main directory of the Linux
22 * distribution for more details.
23 */
24
25#include <linux/cpu.h>
26#include <linux/cpumask.h>
27#include <linux/cpuset.h>
28#include <linux/err.h>
29#include <linux/errno.h>
30#include <linux/file.h>
31#include <linux/fs.h>
32#include <linux/init.h>
33#include <linux/interrupt.h>
34#include <linux/kernel.h>
35#include <linux/kmod.h>
36#include <linux/list.h>
37#include <linux/mempolicy.h>
38#include <linux/mm.h>
39#include <linux/memory.h>
40#include <linux/export.h>
41#include <linux/mount.h>
42#include <linux/fs_context.h>
43#include <linux/namei.h>
44#include <linux/pagemap.h>
45#include <linux/proc_fs.h>
46#include <linux/rcupdate.h>
47#include <linux/sched.h>
48#include <linux/sched/deadline.h>
49#include <linux/sched/mm.h>
50#include <linux/sched/task.h>
51#include <linux/seq_file.h>
52#include <linux/security.h>
53#include <linux/slab.h>
54#include <linux/spinlock.h>
55#include <linux/stat.h>
56#include <linux/string.h>
57#include <linux/time.h>
58#include <linux/time64.h>
59#include <linux/backing-dev.h>
60#include <linux/sort.h>
61#include <linux/oom.h>
62#include <linux/sched/isolation.h>
63#include <linux/uaccess.h>
64#include <linux/atomic.h>
65#include <linux/mutex.h>
66#include <linux/cgroup.h>
67#include <linux/wait.h>
68
69DEFINE_STATIC_KEY_FALSE(cpusets_pre_enable_key);
70DEFINE_STATIC_KEY_FALSE(cpusets_enabled_key);
71
72/* See "Frequency meter" comments, below. */
73
74struct fmeter {
75 int cnt; /* unprocessed events count */
76 int val; /* most recent output value */
77 time64_t time; /* clock (secs) when val computed */
78 spinlock_t lock; /* guards read or write of above */
79};
80
81struct cpuset {
82 struct cgroup_subsys_state css;
83
84 unsigned long flags; /* "unsigned long" so bitops work */
85
86 /*
87 * On default hierarchy:
88 *
89 * The user-configured masks can only be changed by writing to
90 * cpuset.cpus and cpuset.mems, and won't be limited by the
91 * parent masks.
92 *
93 * The effective masks is the real masks that apply to the tasks
94 * in the cpuset. They may be changed if the configured masks are
95 * changed or hotplug happens.
96 *
97 * effective_mask == configured_mask & parent's effective_mask,
98 * and if it ends up empty, it will inherit the parent's mask.
99 *
100 *
101 * On legacy hierachy:
102 *
103 * The user-configured masks are always the same with effective masks.
104 */
105
106 /* user-configured CPUs and Memory Nodes allow to tasks */
107 cpumask_var_t cpus_allowed;
108 nodemask_t mems_allowed;
109
110 /* effective CPUs and Memory Nodes allow to tasks */
111 cpumask_var_t effective_cpus;
112 nodemask_t effective_mems;
113
114 /*
115 * CPUs allocated to child sub-partitions (default hierarchy only)
116 * - CPUs granted by the parent = effective_cpus U subparts_cpus
117 * - effective_cpus and subparts_cpus are mutually exclusive.
118 *
119 * effective_cpus contains only onlined CPUs, but subparts_cpus
120 * may have offlined ones.
121 */
122 cpumask_var_t subparts_cpus;
123
124 /*
125 * This is old Memory Nodes tasks took on.
126 *
127 * - top_cpuset.old_mems_allowed is initialized to mems_allowed.
128 * - A new cpuset's old_mems_allowed is initialized when some
129 * task is moved into it.
130 * - old_mems_allowed is used in cpuset_migrate_mm() when we change
131 * cpuset.mems_allowed and have tasks' nodemask updated, and
132 * then old_mems_allowed is updated to mems_allowed.
133 */
134 nodemask_t old_mems_allowed;
135
136 struct fmeter fmeter; /* memory_pressure filter */
137
138 /*
139 * Tasks are being attached to this cpuset. Used to prevent
140 * zeroing cpus/mems_allowed between ->can_attach() and ->attach().
141 */
142 int attach_in_progress;
143
144 /* partition number for rebuild_sched_domains() */
145 int pn;
146
147 /* for custom sched domain */
148 int relax_domain_level;
149
150 /* number of CPUs in subparts_cpus */
151 int nr_subparts_cpus;
152
153 /* partition root state */
154 int partition_root_state;
155
156 /*
157 * Default hierarchy only:
158 * use_parent_ecpus - set if using parent's effective_cpus
159 * child_ecpus_count - # of children with use_parent_ecpus set
160 */
161 int use_parent_ecpus;
162 int child_ecpus_count;
163};
164
165/*
166 * Partition root states:
167 *
168 * 0 - not a partition root
169 *
170 * 1 - partition root
171 *
172 * -1 - invalid partition root
173 * None of the cpus in cpus_allowed can be put into the parent's
174 * subparts_cpus. In this case, the cpuset is not a real partition
175 * root anymore. However, the CPU_EXCLUSIVE bit will still be set
176 * and the cpuset can be restored back to a partition root if the
177 * parent cpuset can give more CPUs back to this child cpuset.
178 */
179#define PRS_DISABLED 0
180#define PRS_ENABLED 1
181#define PRS_ERROR -1
182
183/*
184 * Temporary cpumasks for working with partitions that are passed among
185 * functions to avoid memory allocation in inner functions.
186 */
187struct tmpmasks {
188 cpumask_var_t addmask, delmask; /* For partition root */
189 cpumask_var_t new_cpus; /* For update_cpumasks_hier() */
190};
191
192static inline struct cpuset *css_cs(struct cgroup_subsys_state *css)
193{
194 return css ? container_of(css, struct cpuset, css) : NULL;
195}
196
197/* Retrieve the cpuset for a task */
198static inline struct cpuset *task_cs(struct task_struct *task)
199{
200 return css_cs(task_css(task, cpuset_cgrp_id));
201}
202
203static inline struct cpuset *parent_cs(struct cpuset *cs)
204{
205 return css_cs(cs->css.parent);
206}
207
208/* bits in struct cpuset flags field */
209typedef enum {
210 CS_ONLINE,
211 CS_CPU_EXCLUSIVE,
212 CS_MEM_EXCLUSIVE,
213 CS_MEM_HARDWALL,
214 CS_MEMORY_MIGRATE,
215 CS_SCHED_LOAD_BALANCE,
216 CS_SPREAD_PAGE,
217 CS_SPREAD_SLAB,
218} cpuset_flagbits_t;
219
220/* convenient tests for these bits */
221static inline bool is_cpuset_online(struct cpuset *cs)
222{
223 return test_bit(CS_ONLINE, &cs->flags) && !css_is_dying(&cs->css);
224}
225
226static inline int is_cpu_exclusive(const struct cpuset *cs)
227{
228 return test_bit(CS_CPU_EXCLUSIVE, &cs->flags);
229}
230
231static inline int is_mem_exclusive(const struct cpuset *cs)
232{
233 return test_bit(CS_MEM_EXCLUSIVE, &cs->flags);
234}
235
236static inline int is_mem_hardwall(const struct cpuset *cs)
237{
238 return test_bit(CS_MEM_HARDWALL, &cs->flags);
239}
240
241static inline int is_sched_load_balance(const struct cpuset *cs)
242{
243 return test_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
244}
245
246static inline int is_memory_migrate(const struct cpuset *cs)
247{
248 return test_bit(CS_MEMORY_MIGRATE, &cs->flags);
249}
250
251static inline int is_spread_page(const struct cpuset *cs)
252{
253 return test_bit(CS_SPREAD_PAGE, &cs->flags);
254}
255
256static inline int is_spread_slab(const struct cpuset *cs)
257{
258 return test_bit(CS_SPREAD_SLAB, &cs->flags);
259}
260
261static inline int is_partition_root(const struct cpuset *cs)
262{
263 return cs->partition_root_state > 0;
264}
265
266static struct cpuset top_cpuset = {
267 .flags = ((1 << CS_ONLINE) | (1 << CS_CPU_EXCLUSIVE) |
268 (1 << CS_MEM_EXCLUSIVE)),
269 .partition_root_state = PRS_ENABLED,
270};
271
272/**
273 * cpuset_for_each_child - traverse online children of a cpuset
274 * @child_cs: loop cursor pointing to the current child
275 * @pos_css: used for iteration
276 * @parent_cs: target cpuset to walk children of
277 *
278 * Walk @child_cs through the online children of @parent_cs. Must be used
279 * with RCU read locked.
280 */
281#define cpuset_for_each_child(child_cs, pos_css, parent_cs) \
282 css_for_each_child((pos_css), &(parent_cs)->css) \
283 if (is_cpuset_online(((child_cs) = css_cs((pos_css)))))
284
285/**
286 * cpuset_for_each_descendant_pre - pre-order walk of a cpuset's descendants
287 * @des_cs: loop cursor pointing to the current descendant
288 * @pos_css: used for iteration
289 * @root_cs: target cpuset to walk ancestor of
290 *
291 * Walk @des_cs through the online descendants of @root_cs. Must be used
292 * with RCU read locked. The caller may modify @pos_css by calling
293 * css_rightmost_descendant() to skip subtree. @root_cs is included in the
294 * iteration and the first node to be visited.
295 */
296#define cpuset_for_each_descendant_pre(des_cs, pos_css, root_cs) \
297 css_for_each_descendant_pre((pos_css), &(root_cs)->css) \
298 if (is_cpuset_online(((des_cs) = css_cs((pos_css)))))
299
300/*
301 * There are two global locks guarding cpuset structures - cpuset_mutex and
302 * callback_lock. We also require taking task_lock() when dereferencing a
303 * task's cpuset pointer. See "The task_lock() exception", at the end of this
304 * comment.
305 *
306 * A task must hold both locks to modify cpusets. If a task holds
307 * cpuset_mutex, then it blocks others wanting that mutex, ensuring that it
308 * is the only task able to also acquire callback_lock and be able to
309 * modify cpusets. It can perform various checks on the cpuset structure
310 * first, knowing nothing will change. It can also allocate memory while
311 * just holding cpuset_mutex. While it is performing these checks, various
312 * callback routines can briefly acquire callback_lock to query cpusets.
313 * Once it is ready to make the changes, it takes callback_lock, blocking
314 * everyone else.
315 *
316 * Calls to the kernel memory allocator can not be made while holding
317 * callback_lock, as that would risk double tripping on callback_lock
318 * from one of the callbacks into the cpuset code from within
319 * __alloc_pages().
320 *
321 * If a task is only holding callback_lock, then it has read-only
322 * access to cpusets.
323 *
324 * Now, the task_struct fields mems_allowed and mempolicy may be changed
325 * by other task, we use alloc_lock in the task_struct fields to protect
326 * them.
327 *
328 * The cpuset_common_file_read() handlers only hold callback_lock across
329 * small pieces of code, such as when reading out possibly multi-word
330 * cpumasks and nodemasks.
331 *
332 * Accessing a task's cpuset should be done in accordance with the
333 * guidelines for accessing subsystem state in kernel/cgroup.c
334 */
335
336DEFINE_STATIC_PERCPU_RWSEM(cpuset_rwsem);
337
338void cpuset_read_lock(void)
339{
340 percpu_down_read(&cpuset_rwsem);
341}
342
343void cpuset_read_unlock(void)
344{
345 percpu_up_read(&cpuset_rwsem);
346}
347
348static DEFINE_SPINLOCK(callback_lock);
349
350static struct workqueue_struct *cpuset_migrate_mm_wq;
351
352/*
353 * CPU / memory hotplug is handled asynchronously.
354 */
355static void cpuset_hotplug_workfn(struct work_struct *work);
356static DECLARE_WORK(cpuset_hotplug_work, cpuset_hotplug_workfn);
357
358static DECLARE_WAIT_QUEUE_HEAD(cpuset_attach_wq);
359
360/*
361 * Cgroup v2 behavior is used when on default hierarchy or the
362 * cgroup_v2_mode flag is set.
363 */
364static inline bool is_in_v2_mode(void)
365{
366 return cgroup_subsys_on_dfl(cpuset_cgrp_subsys) ||
367 (cpuset_cgrp_subsys.root->flags & CGRP_ROOT_CPUSET_V2_MODE);
368}
369
370/*
371 * Return in pmask the portion of a cpusets's cpus_allowed that
372 * are online. If none are online, walk up the cpuset hierarchy
373 * until we find one that does have some online cpus.
374 *
375 * One way or another, we guarantee to return some non-empty subset
376 * of cpu_online_mask.
377 *
378 * Call with callback_lock or cpuset_mutex held.
379 */
380static void guarantee_online_cpus(struct cpuset *cs, struct cpumask *pmask)
381{
382 while (!cpumask_intersects(cs->effective_cpus, cpu_online_mask)) {
383 cs = parent_cs(cs);
384 if (unlikely(!cs)) {
385 /*
386 * The top cpuset doesn't have any online cpu as a
387 * consequence of a race between cpuset_hotplug_work
388 * and cpu hotplug notifier. But we know the top
389 * cpuset's effective_cpus is on its way to to be
390 * identical to cpu_online_mask.
391 */
392 cpumask_copy(pmask, cpu_online_mask);
393 return;
394 }
395 }
396 cpumask_and(pmask, cs->effective_cpus, cpu_online_mask);
397}
398
399/*
400 * Return in *pmask the portion of a cpusets's mems_allowed that
401 * are online, with memory. If none are online with memory, walk
402 * up the cpuset hierarchy until we find one that does have some
403 * online mems. The top cpuset always has some mems online.
404 *
405 * One way or another, we guarantee to return some non-empty subset
406 * of node_states[N_MEMORY].
407 *
408 * Call with callback_lock or cpuset_mutex held.
409 */
410static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask)
411{
412 while (!nodes_intersects(cs->effective_mems, node_states[N_MEMORY]))
413 cs = parent_cs(cs);
414 nodes_and(*pmask, cs->effective_mems, node_states[N_MEMORY]);
415}
416
417/*
418 * update task's spread flag if cpuset's page/slab spread flag is set
419 *
420 * Call with callback_lock or cpuset_mutex held.
421 */
422static void cpuset_update_task_spread_flag(struct cpuset *cs,
423 struct task_struct *tsk)
424{
425 if (is_spread_page(cs))
426 task_set_spread_page(tsk);
427 else
428 task_clear_spread_page(tsk);
429
430 if (is_spread_slab(cs))
431 task_set_spread_slab(tsk);
432 else
433 task_clear_spread_slab(tsk);
434}
435
436/*
437 * is_cpuset_subset(p, q) - Is cpuset p a subset of cpuset q?
438 *
439 * One cpuset is a subset of another if all its allowed CPUs and
440 * Memory Nodes are a subset of the other, and its exclusive flags
441 * are only set if the other's are set. Call holding cpuset_mutex.
442 */
443
444static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
445{
446 return cpumask_subset(p->cpus_allowed, q->cpus_allowed) &&
447 nodes_subset(p->mems_allowed, q->mems_allowed) &&
448 is_cpu_exclusive(p) <= is_cpu_exclusive(q) &&
449 is_mem_exclusive(p) <= is_mem_exclusive(q);
450}
451
452/**
453 * alloc_cpumasks - allocate three cpumasks for cpuset
454 * @cs: the cpuset that have cpumasks to be allocated.
455 * @tmp: the tmpmasks structure pointer
456 * Return: 0 if successful, -ENOMEM otherwise.
457 *
458 * Only one of the two input arguments should be non-NULL.
459 */
460static inline int alloc_cpumasks(struct cpuset *cs, struct tmpmasks *tmp)
461{
462 cpumask_var_t *pmask1, *pmask2, *pmask3;
463
464 if (cs) {
465 pmask1 = &cs->cpus_allowed;
466 pmask2 = &cs->effective_cpus;
467 pmask3 = &cs->subparts_cpus;
468 } else {
469 pmask1 = &tmp->new_cpus;
470 pmask2 = &tmp->addmask;
471 pmask3 = &tmp->delmask;
472 }
473
474 if (!zalloc_cpumask_var(pmask1, GFP_KERNEL))
475 return -ENOMEM;
476
477 if (!zalloc_cpumask_var(pmask2, GFP_KERNEL))
478 goto free_one;
479
480 if (!zalloc_cpumask_var(pmask3, GFP_KERNEL))
481 goto free_two;
482
483 return 0;
484
485free_two:
486 free_cpumask_var(*pmask2);
487free_one:
488 free_cpumask_var(*pmask1);
489 return -ENOMEM;
490}
491
492/**
493 * free_cpumasks - free cpumasks in a tmpmasks structure
494 * @cs: the cpuset that have cpumasks to be free.
495 * @tmp: the tmpmasks structure pointer
496 */
497static inline void free_cpumasks(struct cpuset *cs, struct tmpmasks *tmp)
498{
499 if (cs) {
500 free_cpumask_var(cs->cpus_allowed);
501 free_cpumask_var(cs->effective_cpus);
502 free_cpumask_var(cs->subparts_cpus);
503 }
504 if (tmp) {
505 free_cpumask_var(tmp->new_cpus);
506 free_cpumask_var(tmp->addmask);
507 free_cpumask_var(tmp->delmask);
508 }
509}
510
511/**
512 * alloc_trial_cpuset - allocate a trial cpuset
513 * @cs: the cpuset that the trial cpuset duplicates
514 */
515static struct cpuset *alloc_trial_cpuset(struct cpuset *cs)
516{
517 struct cpuset *trial;
518
519 trial = kmemdup(cs, sizeof(*cs), GFP_KERNEL);
520 if (!trial)
521 return NULL;
522
523 if (alloc_cpumasks(trial, NULL)) {
524 kfree(trial);
525 return NULL;
526 }
527
528 cpumask_copy(trial->cpus_allowed, cs->cpus_allowed);
529 cpumask_copy(trial->effective_cpus, cs->effective_cpus);
530 return trial;
531}
532
533/**
534 * free_cpuset - free the cpuset
535 * @cs: the cpuset to be freed
536 */
537static inline void free_cpuset(struct cpuset *cs)
538{
539 free_cpumasks(cs, NULL);
540 kfree(cs);
541}
542
543/*
544 * validate_change() - Used to validate that any proposed cpuset change
545 * follows the structural rules for cpusets.
546 *
547 * If we replaced the flag and mask values of the current cpuset
548 * (cur) with those values in the trial cpuset (trial), would
549 * our various subset and exclusive rules still be valid? Presumes
550 * cpuset_mutex held.
551 *
552 * 'cur' is the address of an actual, in-use cpuset. Operations
553 * such as list traversal that depend on the actual address of the
554 * cpuset in the list must use cur below, not trial.
555 *
556 * 'trial' is the address of bulk structure copy of cur, with
557 * perhaps one or more of the fields cpus_allowed, mems_allowed,
558 * or flags changed to new, trial values.
559 *
560 * Return 0 if valid, -errno if not.
561 */
562
563static int validate_change(struct cpuset *cur, struct cpuset *trial)
564{
565 struct cgroup_subsys_state *css;
566 struct cpuset *c, *par;
567 int ret;
568
569 rcu_read_lock();
570
571 /* Each of our child cpusets must be a subset of us */
572 ret = -EBUSY;
573 cpuset_for_each_child(c, css, cur)
574 if (!is_cpuset_subset(c, trial))
575 goto out;
576
577 /* Remaining checks don't apply to root cpuset */
578 ret = 0;
579 if (cur == &top_cpuset)
580 goto out;
581
582 par = parent_cs(cur);
583
584 /* On legacy hiearchy, we must be a subset of our parent cpuset. */
585 ret = -EACCES;
586 if (!is_in_v2_mode() && !is_cpuset_subset(trial, par))
587 goto out;
588
589 /*
590 * If either I or some sibling (!= me) is exclusive, we can't
591 * overlap
592 */
593 ret = -EINVAL;
594 cpuset_for_each_child(c, css, par) {
595 if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) &&
596 c != cur &&
597 cpumask_intersects(trial->cpus_allowed, c->cpus_allowed))
598 goto out;
599 if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) &&
600 c != cur &&
601 nodes_intersects(trial->mems_allowed, c->mems_allowed))
602 goto out;
603 }
604
605 /*
606 * Cpusets with tasks - existing or newly being attached - can't
607 * be changed to have empty cpus_allowed or mems_allowed.
608 */
609 ret = -ENOSPC;
610 if ((cgroup_is_populated(cur->css.cgroup) || cur->attach_in_progress)) {
611 if (!cpumask_empty(cur->cpus_allowed) &&
612 cpumask_empty(trial->cpus_allowed))
613 goto out;
614 if (!nodes_empty(cur->mems_allowed) &&
615 nodes_empty(trial->mems_allowed))
616 goto out;
617 }
618
619 /*
620 * We can't shrink if we won't have enough room for SCHED_DEADLINE
621 * tasks.
622 */
623 ret = -EBUSY;
624 if (is_cpu_exclusive(cur) &&
625 !cpuset_cpumask_can_shrink(cur->cpus_allowed,
626 trial->cpus_allowed))
627 goto out;
628
629 ret = 0;
630out:
631 rcu_read_unlock();
632 return ret;
633}
634
635#ifdef CONFIG_SMP
636/*
637 * Helper routine for generate_sched_domains().
638 * Do cpusets a, b have overlapping effective cpus_allowed masks?
639 */
640static int cpusets_overlap(struct cpuset *a, struct cpuset *b)
641{
642 return cpumask_intersects(a->effective_cpus, b->effective_cpus);
643}
644
645static void
646update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c)
647{
648 if (dattr->relax_domain_level < c->relax_domain_level)
649 dattr->relax_domain_level = c->relax_domain_level;
650 return;
651}
652
653static void update_domain_attr_tree(struct sched_domain_attr *dattr,
654 struct cpuset *root_cs)
655{
656 struct cpuset *cp;
657 struct cgroup_subsys_state *pos_css;
658
659 rcu_read_lock();
660 cpuset_for_each_descendant_pre(cp, pos_css, root_cs) {
661 /* skip the whole subtree if @cp doesn't have any CPU */
662 if (cpumask_empty(cp->cpus_allowed)) {
663 pos_css = css_rightmost_descendant(pos_css);
664 continue;
665 }
666
667 if (is_sched_load_balance(cp))
668 update_domain_attr(dattr, cp);
669 }
670 rcu_read_unlock();
671}
672
673/* Must be called with cpuset_mutex held. */
674static inline int nr_cpusets(void)
675{
676 /* jump label reference count + the top-level cpuset */
677 return static_key_count(&cpusets_enabled_key.key) + 1;
678}
679
680/*
681 * generate_sched_domains()
682 *
683 * This function builds a partial partition of the systems CPUs
684 * A 'partial partition' is a set of non-overlapping subsets whose
685 * union is a subset of that set.
686 * The output of this function needs to be passed to kernel/sched/core.c
687 * partition_sched_domains() routine, which will rebuild the scheduler's
688 * load balancing domains (sched domains) as specified by that partial
689 * partition.
690 *
691 * See "What is sched_load_balance" in Documentation/admin-guide/cgroup-v1/cpusets.rst
692 * for a background explanation of this.
693 *
694 * Does not return errors, on the theory that the callers of this
695 * routine would rather not worry about failures to rebuild sched
696 * domains when operating in the severe memory shortage situations
697 * that could cause allocation failures below.
698 *
699 * Must be called with cpuset_mutex held.
700 *
701 * The three key local variables below are:
702 * cp - cpuset pointer, used (together with pos_css) to perform a
703 * top-down scan of all cpusets. For our purposes, rebuilding
704 * the schedulers sched domains, we can ignore !is_sched_load_
705 * balance cpusets.
706 * csa - (for CpuSet Array) Array of pointers to all the cpusets
707 * that need to be load balanced, for convenient iterative
708 * access by the subsequent code that finds the best partition,
709 * i.e the set of domains (subsets) of CPUs such that the
710 * cpus_allowed of every cpuset marked is_sched_load_balance
711 * is a subset of one of these domains, while there are as
712 * many such domains as possible, each as small as possible.
713 * doms - Conversion of 'csa' to an array of cpumasks, for passing to
714 * the kernel/sched/core.c routine partition_sched_domains() in a
715 * convenient format, that can be easily compared to the prior
716 * value to determine what partition elements (sched domains)
717 * were changed (added or removed.)
718 *
719 * Finding the best partition (set of domains):
720 * The triple nested loops below over i, j, k scan over the
721 * load balanced cpusets (using the array of cpuset pointers in
722 * csa[]) looking for pairs of cpusets that have overlapping
723 * cpus_allowed, but which don't have the same 'pn' partition
724 * number and gives them in the same partition number. It keeps
725 * looping on the 'restart' label until it can no longer find
726 * any such pairs.
727 *
728 * The union of the cpus_allowed masks from the set of
729 * all cpusets having the same 'pn' value then form the one
730 * element of the partition (one sched domain) to be passed to
731 * partition_sched_domains().
732 */
733static int generate_sched_domains(cpumask_var_t **domains,
734 struct sched_domain_attr **attributes)
735{
736 struct cpuset *cp; /* top-down scan of cpusets */
737 struct cpuset **csa; /* array of all cpuset ptrs */
738 int csn; /* how many cpuset ptrs in csa so far */
739 int i, j, k; /* indices for partition finding loops */
740 cpumask_var_t *doms; /* resulting partition; i.e. sched domains */
741 struct sched_domain_attr *dattr; /* attributes for custom domains */
742 int ndoms = 0; /* number of sched domains in result */
743 int nslot; /* next empty doms[] struct cpumask slot */
744 struct cgroup_subsys_state *pos_css;
745 bool root_load_balance = is_sched_load_balance(&top_cpuset);
746
747 doms = NULL;
748 dattr = NULL;
749 csa = NULL;
750
751 /* Special case for the 99% of systems with one, full, sched domain */
752 if (root_load_balance && !top_cpuset.nr_subparts_cpus) {
753 ndoms = 1;
754 doms = alloc_sched_domains(ndoms);
755 if (!doms)
756 goto done;
757
758 dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL);
759 if (dattr) {
760 *dattr = SD_ATTR_INIT;
761 update_domain_attr_tree(dattr, &top_cpuset);
762 }
763 cpumask_and(doms[0], top_cpuset.effective_cpus,
764 housekeeping_cpumask(HK_FLAG_DOMAIN));
765
766 goto done;
767 }
768
769 csa = kmalloc_array(nr_cpusets(), sizeof(cp), GFP_KERNEL);
770 if (!csa)
771 goto done;
772 csn = 0;
773
774 rcu_read_lock();
775 if (root_load_balance)
776 csa[csn++] = &top_cpuset;
777 cpuset_for_each_descendant_pre(cp, pos_css, &top_cpuset) {
778 if (cp == &top_cpuset)
779 continue;
780 /*
781 * Continue traversing beyond @cp iff @cp has some CPUs and
782 * isn't load balancing. The former is obvious. The
783 * latter: All child cpusets contain a subset of the
784 * parent's cpus, so just skip them, and then we call
785 * update_domain_attr_tree() to calc relax_domain_level of
786 * the corresponding sched domain.
787 *
788 * If root is load-balancing, we can skip @cp if it
789 * is a subset of the root's effective_cpus.
790 */
791 if (!cpumask_empty(cp->cpus_allowed) &&
792 !(is_sched_load_balance(cp) &&
793 cpumask_intersects(cp->cpus_allowed,
794 housekeeping_cpumask(HK_FLAG_DOMAIN))))
795 continue;
796
797 if (root_load_balance &&
798 cpumask_subset(cp->cpus_allowed, top_cpuset.effective_cpus))
799 continue;
800
801 if (is_sched_load_balance(cp) &&
802 !cpumask_empty(cp->effective_cpus))
803 csa[csn++] = cp;
804
805 /* skip @cp's subtree if not a partition root */
806 if (!is_partition_root(cp))
807 pos_css = css_rightmost_descendant(pos_css);
808 }
809 rcu_read_unlock();
810
811 for (i = 0; i < csn; i++)
812 csa[i]->pn = i;
813 ndoms = csn;
814
815restart:
816 /* Find the best partition (set of sched domains) */
817 for (i = 0; i < csn; i++) {
818 struct cpuset *a = csa[i];
819 int apn = a->pn;
820
821 for (j = 0; j < csn; j++) {
822 struct cpuset *b = csa[j];
823 int bpn = b->pn;
824
825 if (apn != bpn && cpusets_overlap(a, b)) {
826 for (k = 0; k < csn; k++) {
827 struct cpuset *c = csa[k];
828
829 if (c->pn == bpn)
830 c->pn = apn;
831 }
832 ndoms--; /* one less element */
833 goto restart;
834 }
835 }
836 }
837
838 /*
839 * Now we know how many domains to create.
840 * Convert <csn, csa> to <ndoms, doms> and populate cpu masks.
841 */
842 doms = alloc_sched_domains(ndoms);
843 if (!doms)
844 goto done;
845
846 /*
847 * The rest of the code, including the scheduler, can deal with
848 * dattr==NULL case. No need to abort if alloc fails.
849 */
850 dattr = kmalloc_array(ndoms, sizeof(struct sched_domain_attr),
851 GFP_KERNEL);
852
853 for (nslot = 0, i = 0; i < csn; i++) {
854 struct cpuset *a = csa[i];
855 struct cpumask *dp;
856 int apn = a->pn;
857
858 if (apn < 0) {
859 /* Skip completed partitions */
860 continue;
861 }
862
863 dp = doms[nslot];
864
865 if (nslot == ndoms) {
866 static int warnings = 10;
867 if (warnings) {
868 pr_warn("rebuild_sched_domains confused: nslot %d, ndoms %d, csn %d, i %d, apn %d\n",
869 nslot, ndoms, csn, i, apn);
870 warnings--;
871 }
872 continue;
873 }
874
875 cpumask_clear(dp);
876 if (dattr)
877 *(dattr + nslot) = SD_ATTR_INIT;
878 for (j = i; j < csn; j++) {
879 struct cpuset *b = csa[j];
880
881 if (apn == b->pn) {
882 cpumask_or(dp, dp, b->effective_cpus);
883 cpumask_and(dp, dp, housekeeping_cpumask(HK_FLAG_DOMAIN));
884 if (dattr)
885 update_domain_attr_tree(dattr + nslot, b);
886
887 /* Done with this partition */
888 b->pn = -1;
889 }
890 }
891 nslot++;
892 }
893 BUG_ON(nslot != ndoms);
894
895done:
896 kfree(csa);
897
898 /*
899 * Fallback to the default domain if kmalloc() failed.
900 * See comments in partition_sched_domains().
901 */
902 if (doms == NULL)
903 ndoms = 1;
904
905 *domains = doms;
906 *attributes = dattr;
907 return ndoms;
908}
909
910static void update_tasks_root_domain(struct cpuset *cs)
911{
912 struct css_task_iter it;
913 struct task_struct *task;
914
915 css_task_iter_start(&cs->css, 0, &it);
916
917 while ((task = css_task_iter_next(&it)))
918 dl_add_task_root_domain(task);
919
920 css_task_iter_end(&it);
921}
922
923static void rebuild_root_domains(void)
924{
925 struct cpuset *cs = NULL;
926 struct cgroup_subsys_state *pos_css;
927
928 percpu_rwsem_assert_held(&cpuset_rwsem);
929 lockdep_assert_cpus_held();
930 lockdep_assert_held(&sched_domains_mutex);
931
932 cgroup_enable_task_cg_lists();
933
934 rcu_read_lock();
935
936 /*
937 * Clear default root domain DL accounting, it will be computed again
938 * if a task belongs to it.
939 */
940 dl_clear_root_domain(&def_root_domain);
941
942 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {
943
944 if (cpumask_empty(cs->effective_cpus)) {
945 pos_css = css_rightmost_descendant(pos_css);
946 continue;
947 }
948
949 css_get(&cs->css);
950
951 rcu_read_unlock();
952
953 update_tasks_root_domain(cs);
954
955 rcu_read_lock();
956 css_put(&cs->css);
957 }
958 rcu_read_unlock();
959}
960
961static void
962partition_and_rebuild_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
963 struct sched_domain_attr *dattr_new)
964{
965 mutex_lock(&sched_domains_mutex);
966 partition_sched_domains_locked(ndoms_new, doms_new, dattr_new);
967 rebuild_root_domains();
968 mutex_unlock(&sched_domains_mutex);
969}
970
971/*
972 * Rebuild scheduler domains.
973 *
974 * If the flag 'sched_load_balance' of any cpuset with non-empty
975 * 'cpus' changes, or if the 'cpus' allowed changes in any cpuset
976 * which has that flag enabled, or if any cpuset with a non-empty
977 * 'cpus' is removed, then call this routine to rebuild the
978 * scheduler's dynamic sched domains.
979 *
980 * Call with cpuset_mutex held. Takes get_online_cpus().
981 */
982static void rebuild_sched_domains_locked(void)
983{
984 struct sched_domain_attr *attr;
985 cpumask_var_t *doms;
986 int ndoms;
987
988 lockdep_assert_cpus_held();
989 percpu_rwsem_assert_held(&cpuset_rwsem);
990
991 /*
992 * We have raced with CPU hotplug. Don't do anything to avoid
993 * passing doms with offlined cpu to partition_sched_domains().
994 * Anyways, hotplug work item will rebuild sched domains.
995 */
996 if (!top_cpuset.nr_subparts_cpus &&
997 !cpumask_equal(top_cpuset.effective_cpus, cpu_active_mask))
998 return;
999
1000 if (top_cpuset.nr_subparts_cpus &&
1001 !cpumask_subset(top_cpuset.effective_cpus, cpu_active_mask))
1002 return;
1003
1004 /* Generate domain masks and attrs */
1005 ndoms = generate_sched_domains(&doms, &attr);
1006
1007 /* Have scheduler rebuild the domains */
1008 partition_and_rebuild_sched_domains(ndoms, doms, attr);
1009}
1010#else /* !CONFIG_SMP */
1011static void rebuild_sched_domains_locked(void)
1012{
1013}
1014#endif /* CONFIG_SMP */
1015
1016void rebuild_sched_domains(void)
1017{
1018 get_online_cpus();
1019 percpu_down_write(&cpuset_rwsem);
1020 rebuild_sched_domains_locked();
1021 percpu_up_write(&cpuset_rwsem);
1022 put_online_cpus();
1023}
1024
1025/**
1026 * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
1027 * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
1028 *
1029 * Iterate through each task of @cs updating its cpus_allowed to the
1030 * effective cpuset's. As this function is called with cpuset_mutex held,
1031 * cpuset membership stays stable.
1032 */
1033static void update_tasks_cpumask(struct cpuset *cs)
1034{
1035 struct css_task_iter it;
1036 struct task_struct *task;
1037
1038 css_task_iter_start(&cs->css, 0, &it);
1039 while ((task = css_task_iter_next(&it)))
1040 set_cpus_allowed_ptr(task, cs->effective_cpus);
1041 css_task_iter_end(&it);
1042}
1043
1044/**
1045 * compute_effective_cpumask - Compute the effective cpumask of the cpuset
1046 * @new_cpus: the temp variable for the new effective_cpus mask
1047 * @cs: the cpuset the need to recompute the new effective_cpus mask
1048 * @parent: the parent cpuset
1049 *
1050 * If the parent has subpartition CPUs, include them in the list of
1051 * allowable CPUs in computing the new effective_cpus mask. Since offlined
1052 * CPUs are not removed from subparts_cpus, we have to use cpu_active_mask
1053 * to mask those out.
1054 */
1055static void compute_effective_cpumask(struct cpumask *new_cpus,
1056 struct cpuset *cs, struct cpuset *parent)
1057{
1058 if (parent->nr_subparts_cpus) {
1059 cpumask_or(new_cpus, parent->effective_cpus,
1060 parent->subparts_cpus);
1061 cpumask_and(new_cpus, new_cpus, cs->cpus_allowed);
1062 cpumask_and(new_cpus, new_cpus, cpu_active_mask);
1063 } else {
1064 cpumask_and(new_cpus, cs->cpus_allowed, parent->effective_cpus);
1065 }
1066}
1067
1068/*
1069 * Commands for update_parent_subparts_cpumask
1070 */
1071enum subparts_cmd {
1072 partcmd_enable, /* Enable partition root */
1073 partcmd_disable, /* Disable partition root */
1074 partcmd_update, /* Update parent's subparts_cpus */
1075};
1076
1077/**
1078 * update_parent_subparts_cpumask - update subparts_cpus mask of parent cpuset
1079 * @cpuset: The cpuset that requests change in partition root state
1080 * @cmd: Partition root state change command
1081 * @newmask: Optional new cpumask for partcmd_update
1082 * @tmp: Temporary addmask and delmask
1083 * Return: 0, 1 or an error code
1084 *
1085 * For partcmd_enable, the cpuset is being transformed from a non-partition
1086 * root to a partition root. The cpus_allowed mask of the given cpuset will
1087 * be put into parent's subparts_cpus and taken away from parent's
1088 * effective_cpus. The function will return 0 if all the CPUs listed in
1089 * cpus_allowed can be granted or an error code will be returned.
1090 *
1091 * For partcmd_disable, the cpuset is being transofrmed from a partition
1092 * root back to a non-partition root. any CPUs in cpus_allowed that are in
1093 * parent's subparts_cpus will be taken away from that cpumask and put back
1094 * into parent's effective_cpus. 0 should always be returned.
1095 *
1096 * For partcmd_update, if the optional newmask is specified, the cpu
1097 * list is to be changed from cpus_allowed to newmask. Otherwise,
1098 * cpus_allowed is assumed to remain the same. The cpuset should either
1099 * be a partition root or an invalid partition root. The partition root
1100 * state may change if newmask is NULL and none of the requested CPUs can
1101 * be granted by the parent. The function will return 1 if changes to
1102 * parent's subparts_cpus and effective_cpus happen or 0 otherwise.
1103 * Error code should only be returned when newmask is non-NULL.
1104 *
1105 * The partcmd_enable and partcmd_disable commands are used by
1106 * update_prstate(). The partcmd_update command is used by
1107 * update_cpumasks_hier() with newmask NULL and update_cpumask() with
1108 * newmask set.
1109 *
1110 * The checking is more strict when enabling partition root than the
1111 * other two commands.
1112 *
1113 * Because of the implicit cpu exclusive nature of a partition root,
1114 * cpumask changes that violates the cpu exclusivity rule will not be
1115 * permitted when checked by validate_change(). The validate_change()
1116 * function will also prevent any changes to the cpu list if it is not
1117 * a superset of children's cpu lists.
1118 */
1119static int update_parent_subparts_cpumask(struct cpuset *cpuset, int cmd,
1120 struct cpumask *newmask,
1121 struct tmpmasks *tmp)
1122{
1123 struct cpuset *parent = parent_cs(cpuset);
1124 int adding; /* Moving cpus from effective_cpus to subparts_cpus */
1125 int deleting; /* Moving cpus from subparts_cpus to effective_cpus */
1126 bool part_error = false; /* Partition error? */
1127
1128 percpu_rwsem_assert_held(&cpuset_rwsem);
1129
1130 /*
1131 * The parent must be a partition root.
1132 * The new cpumask, if present, or the current cpus_allowed must
1133 * not be empty.
1134 */
1135 if (!is_partition_root(parent) ||
1136 (newmask && cpumask_empty(newmask)) ||
1137 (!newmask && cpumask_empty(cpuset->cpus_allowed)))
1138 return -EINVAL;
1139
1140 /*
1141 * Enabling/disabling partition root is not allowed if there are
1142 * online children.
1143 */
1144 if ((cmd != partcmd_update) && css_has_online_children(&cpuset->css))
1145 return -EBUSY;
1146
1147 /*
1148 * Enabling partition root is not allowed if not all the CPUs
1149 * can be granted from parent's effective_cpus or at least one
1150 * CPU will be left after that.
1151 */
1152 if ((cmd == partcmd_enable) &&
1153 (!cpumask_subset(cpuset->cpus_allowed, parent->effective_cpus) ||
1154 cpumask_equal(cpuset->cpus_allowed, parent->effective_cpus)))
1155 return -EINVAL;
1156
1157 /*
1158 * A cpumask update cannot make parent's effective_cpus become empty.
1159 */
1160 adding = deleting = false;
1161 if (cmd == partcmd_enable) {
1162 cpumask_copy(tmp->addmask, cpuset->cpus_allowed);
1163 adding = true;
1164 } else if (cmd == partcmd_disable) {
1165 deleting = cpumask_and(tmp->delmask, cpuset->cpus_allowed,
1166 parent->subparts_cpus);
1167 } else if (newmask) {
1168 /*
1169 * partcmd_update with newmask:
1170 *
1171 * delmask = cpus_allowed & ~newmask & parent->subparts_cpus
1172 * addmask = newmask & parent->effective_cpus
1173 * & ~parent->subparts_cpus
1174 */
1175 cpumask_andnot(tmp->delmask, cpuset->cpus_allowed, newmask);
1176 deleting = cpumask_and(tmp->delmask, tmp->delmask,
1177 parent->subparts_cpus);
1178
1179 cpumask_and(tmp->addmask, newmask, parent->effective_cpus);
1180 adding = cpumask_andnot(tmp->addmask, tmp->addmask,
1181 parent->subparts_cpus);
1182 /*
1183 * Return error if the new effective_cpus could become empty.
1184 */
1185 if (adding &&
1186 cpumask_equal(parent->effective_cpus, tmp->addmask)) {
1187 if (!deleting)
1188 return -EINVAL;
1189 /*
1190 * As some of the CPUs in subparts_cpus might have
1191 * been offlined, we need to compute the real delmask
1192 * to confirm that.
1193 */
1194 if (!cpumask_and(tmp->addmask, tmp->delmask,
1195 cpu_active_mask))
1196 return -EINVAL;
1197 cpumask_copy(tmp->addmask, parent->effective_cpus);
1198 }
1199 } else {
1200 /*
1201 * partcmd_update w/o newmask:
1202 *
1203 * addmask = cpus_allowed & parent->effectiveb_cpus
1204 *
1205 * Note that parent's subparts_cpus may have been
1206 * pre-shrunk in case there is a change in the cpu list.
1207 * So no deletion is needed.
1208 */
1209 adding = cpumask_and(tmp->addmask, cpuset->cpus_allowed,
1210 parent->effective_cpus);
1211 part_error = cpumask_equal(tmp->addmask,
1212 parent->effective_cpus);
1213 }
1214
1215 if (cmd == partcmd_update) {
1216 int prev_prs = cpuset->partition_root_state;
1217
1218 /*
1219 * Check for possible transition between PRS_ENABLED
1220 * and PRS_ERROR.
1221 */
1222 switch (cpuset->partition_root_state) {
1223 case PRS_ENABLED:
1224 if (part_error)
1225 cpuset->partition_root_state = PRS_ERROR;
1226 break;
1227 case PRS_ERROR:
1228 if (!part_error)
1229 cpuset->partition_root_state = PRS_ENABLED;
1230 break;
1231 }
1232 /*
1233 * Set part_error if previously in invalid state.
1234 */
1235 part_error = (prev_prs == PRS_ERROR);
1236 }
1237
1238 if (!part_error && (cpuset->partition_root_state == PRS_ERROR))
1239 return 0; /* Nothing need to be done */
1240
1241 if (cpuset->partition_root_state == PRS_ERROR) {
1242 /*
1243 * Remove all its cpus from parent's subparts_cpus.
1244 */
1245 adding = false;
1246 deleting = cpumask_and(tmp->delmask, cpuset->cpus_allowed,
1247 parent->subparts_cpus);
1248 }
1249
1250 if (!adding && !deleting)
1251 return 0;
1252
1253 /*
1254 * Change the parent's subparts_cpus.
1255 * Newly added CPUs will be removed from effective_cpus and
1256 * newly deleted ones will be added back to effective_cpus.
1257 */
1258 spin_lock_irq(&callback_lock);
1259 if (adding) {
1260 cpumask_or(parent->subparts_cpus,
1261 parent->subparts_cpus, tmp->addmask);
1262 cpumask_andnot(parent->effective_cpus,
1263 parent->effective_cpus, tmp->addmask);
1264 }
1265 if (deleting) {
1266 cpumask_andnot(parent->subparts_cpus,
1267 parent->subparts_cpus, tmp->delmask);
1268 /*
1269 * Some of the CPUs in subparts_cpus might have been offlined.
1270 */
1271 cpumask_and(tmp->delmask, tmp->delmask, cpu_active_mask);
1272 cpumask_or(parent->effective_cpus,
1273 parent->effective_cpus, tmp->delmask);
1274 }
1275
1276 parent->nr_subparts_cpus = cpumask_weight(parent->subparts_cpus);
1277 spin_unlock_irq(&callback_lock);
1278
1279 return cmd == partcmd_update;
1280}
1281
1282/*
1283 * update_cpumasks_hier - Update effective cpumasks and tasks in the subtree
1284 * @cs: the cpuset to consider
1285 * @tmp: temp variables for calculating effective_cpus & partition setup
1286 *
1287 * When congifured cpumask is changed, the effective cpumasks of this cpuset
1288 * and all its descendants need to be updated.
1289 *
1290 * On legacy hierachy, effective_cpus will be the same with cpu_allowed.
1291 *
1292 * Called with cpuset_mutex held
1293 */
1294static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp)
1295{
1296 struct cpuset *cp;
1297 struct cgroup_subsys_state *pos_css;
1298 bool need_rebuild_sched_domains = false;
1299
1300 rcu_read_lock();
1301 cpuset_for_each_descendant_pre(cp, pos_css, cs) {
1302 struct cpuset *parent = parent_cs(cp);
1303
1304 compute_effective_cpumask(tmp->new_cpus, cp, parent);
1305
1306 /*
1307 * If it becomes empty, inherit the effective mask of the
1308 * parent, which is guaranteed to have some CPUs.
1309 */
1310 if (is_in_v2_mode() && cpumask_empty(tmp->new_cpus)) {
1311 cpumask_copy(tmp->new_cpus, parent->effective_cpus);
1312 if (!cp->use_parent_ecpus) {
1313 cp->use_parent_ecpus = true;
1314 parent->child_ecpus_count++;
1315 }
1316 } else if (cp->use_parent_ecpus) {
1317 cp->use_parent_ecpus = false;
1318 WARN_ON_ONCE(!parent->child_ecpus_count);
1319 parent->child_ecpus_count--;
1320 }
1321
1322 /*
1323 * Skip the whole subtree if the cpumask remains the same
1324 * and has no partition root state.
1325 */
1326 if (!cp->partition_root_state &&
1327 cpumask_equal(tmp->new_cpus, cp->effective_cpus)) {
1328 pos_css = css_rightmost_descendant(pos_css);
1329 continue;
1330 }
1331
1332 /*
1333 * update_parent_subparts_cpumask() should have been called
1334 * for cs already in update_cpumask(). We should also call
1335 * update_tasks_cpumask() again for tasks in the parent
1336 * cpuset if the parent's subparts_cpus changes.
1337 */
1338 if ((cp != cs) && cp->partition_root_state) {
1339 switch (parent->partition_root_state) {
1340 case PRS_DISABLED:
1341 /*
1342 * If parent is not a partition root or an
1343 * invalid partition root, clear the state
1344 * state and the CS_CPU_EXCLUSIVE flag.
1345 */
1346 WARN_ON_ONCE(cp->partition_root_state
1347 != PRS_ERROR);
1348 cp->partition_root_state = 0;
1349
1350 /*
1351 * clear_bit() is an atomic operation and
1352 * readers aren't interested in the state
1353 * of CS_CPU_EXCLUSIVE anyway. So we can
1354 * just update the flag without holding
1355 * the callback_lock.
1356 */
1357 clear_bit(CS_CPU_EXCLUSIVE, &cp->flags);
1358 break;
1359
1360 case PRS_ENABLED:
1361 if (update_parent_subparts_cpumask(cp, partcmd_update, NULL, tmp))
1362 update_tasks_cpumask(parent);
1363 break;
1364
1365 case PRS_ERROR:
1366 /*
1367 * When parent is invalid, it has to be too.
1368 */
1369 cp->partition_root_state = PRS_ERROR;
1370 if (cp->nr_subparts_cpus) {
1371 cp->nr_subparts_cpus = 0;
1372 cpumask_clear(cp->subparts_cpus);
1373 }
1374 break;
1375 }
1376 }
1377
1378 if (!css_tryget_online(&cp->css))
1379 continue;
1380 rcu_read_unlock();
1381
1382 spin_lock_irq(&callback_lock);
1383
1384 cpumask_copy(cp->effective_cpus, tmp->new_cpus);
1385 if (cp->nr_subparts_cpus &&
1386 (cp->partition_root_state != PRS_ENABLED)) {
1387 cp->nr_subparts_cpus = 0;
1388 cpumask_clear(cp->subparts_cpus);
1389 } else if (cp->nr_subparts_cpus) {
1390 /*
1391 * Make sure that effective_cpus & subparts_cpus
1392 * are mutually exclusive.
1393 *
1394 * In the unlikely event that effective_cpus
1395 * becomes empty. we clear cp->nr_subparts_cpus and
1396 * let its child partition roots to compete for
1397 * CPUs again.
1398 */
1399 cpumask_andnot(cp->effective_cpus, cp->effective_cpus,
1400 cp->subparts_cpus);
1401 if (cpumask_empty(cp->effective_cpus)) {
1402 cpumask_copy(cp->effective_cpus, tmp->new_cpus);
1403 cpumask_clear(cp->subparts_cpus);
1404 cp->nr_subparts_cpus = 0;
1405 } else if (!cpumask_subset(cp->subparts_cpus,
1406 tmp->new_cpus)) {
1407 cpumask_andnot(cp->subparts_cpus,
1408 cp->subparts_cpus, tmp->new_cpus);
1409 cp->nr_subparts_cpus
1410 = cpumask_weight(cp->subparts_cpus);
1411 }
1412 }
1413 spin_unlock_irq(&callback_lock);
1414
1415 WARN_ON(!is_in_v2_mode() &&
1416 !cpumask_equal(cp->cpus_allowed, cp->effective_cpus));
1417
1418 update_tasks_cpumask(cp);
1419
1420 /*
1421 * On legacy hierarchy, if the effective cpumask of any non-
1422 * empty cpuset is changed, we need to rebuild sched domains.
1423 * On default hierarchy, the cpuset needs to be a partition
1424 * root as well.
1425 */
1426 if (!cpumask_empty(cp->cpus_allowed) &&
1427 is_sched_load_balance(cp) &&
1428 (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) ||
1429 is_partition_root(cp)))
1430 need_rebuild_sched_domains = true;
1431
1432 rcu_read_lock();
1433 css_put(&cp->css);
1434 }
1435 rcu_read_unlock();
1436
1437 if (need_rebuild_sched_domains)
1438 rebuild_sched_domains_locked();
1439}
1440
1441/**
1442 * update_sibling_cpumasks - Update siblings cpumasks
1443 * @parent: Parent cpuset
1444 * @cs: Current cpuset
1445 * @tmp: Temp variables
1446 */
1447static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs,
1448 struct tmpmasks *tmp)
1449{
1450 struct cpuset *sibling;
1451 struct cgroup_subsys_state *pos_css;
1452
1453 /*
1454 * Check all its siblings and call update_cpumasks_hier()
1455 * if their use_parent_ecpus flag is set in order for them
1456 * to use the right effective_cpus value.
1457 */
1458 rcu_read_lock();
1459 cpuset_for_each_child(sibling, pos_css, parent) {
1460 if (sibling == cs)
1461 continue;
1462 if (!sibling->use_parent_ecpus)
1463 continue;
1464
1465 update_cpumasks_hier(sibling, tmp);
1466 }
1467 rcu_read_unlock();
1468}
1469
1470/**
1471 * update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it
1472 * @cs: the cpuset to consider
1473 * @trialcs: trial cpuset
1474 * @buf: buffer of cpu numbers written to this cpuset
1475 */
1476static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
1477 const char *buf)
1478{
1479 int retval;
1480 struct tmpmasks tmp;
1481
1482 /* top_cpuset.cpus_allowed tracks cpu_online_mask; it's read-only */
1483 if (cs == &top_cpuset)
1484 return -EACCES;
1485
1486 /*
1487 * An empty cpus_allowed is ok only if the cpuset has no tasks.
1488 * Since cpulist_parse() fails on an empty mask, we special case
1489 * that parsing. The validate_change() call ensures that cpusets
1490 * with tasks have cpus.
1491 */
1492 if (!*buf) {
1493 cpumask_clear(trialcs->cpus_allowed);
1494 } else {
1495 retval = cpulist_parse(buf, trialcs->cpus_allowed);
1496 if (retval < 0)
1497 return retval;
1498
1499 if (!cpumask_subset(trialcs->cpus_allowed,
1500 top_cpuset.cpus_allowed))
1501 return -EINVAL;
1502 }
1503
1504 /* Nothing to do if the cpus didn't change */
1505 if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed))
1506 return 0;
1507
1508 retval = validate_change(cs, trialcs);
1509 if (retval < 0)
1510 return retval;
1511
1512#ifdef CONFIG_CPUMASK_OFFSTACK
1513 /*
1514 * Use the cpumasks in trialcs for tmpmasks when they are pointers
1515 * to allocated cpumasks.
1516 */
1517 tmp.addmask = trialcs->subparts_cpus;
1518 tmp.delmask = trialcs->effective_cpus;
1519 tmp.new_cpus = trialcs->cpus_allowed;
1520#endif
1521
1522 if (cs->partition_root_state) {
1523 /* Cpumask of a partition root cannot be empty */
1524 if (cpumask_empty(trialcs->cpus_allowed))
1525 return -EINVAL;
1526 if (update_parent_subparts_cpumask(cs, partcmd_update,
1527 trialcs->cpus_allowed, &tmp) < 0)
1528 return -EINVAL;
1529 }
1530
1531 spin_lock_irq(&callback_lock);
1532 cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed);
1533
1534 /*
1535 * Make sure that subparts_cpus is a subset of cpus_allowed.
1536 */
1537 if (cs->nr_subparts_cpus) {
1538 cpumask_andnot(cs->subparts_cpus, cs->subparts_cpus,
1539 cs->cpus_allowed);
1540 cs->nr_subparts_cpus = cpumask_weight(cs->subparts_cpus);
1541 }
1542 spin_unlock_irq(&callback_lock);
1543
1544 update_cpumasks_hier(cs, &tmp);
1545
1546 if (cs->partition_root_state) {
1547 struct cpuset *parent = parent_cs(cs);
1548
1549 /*
1550 * For partition root, update the cpumasks of sibling
1551 * cpusets if they use parent's effective_cpus.
1552 */
1553 if (parent->child_ecpus_count)
1554 update_sibling_cpumasks(parent, cs, &tmp);
1555 }
1556 return 0;
1557}
1558
1559/*
1560 * Migrate memory region from one set of nodes to another. This is
1561 * performed asynchronously as it can be called from process migration path
1562 * holding locks involved in process management. All mm migrations are
1563 * performed in the queued order and can be waited for by flushing
1564 * cpuset_migrate_mm_wq.
1565 */
1566
1567struct cpuset_migrate_mm_work {
1568 struct work_struct work;
1569 struct mm_struct *mm;
1570 nodemask_t from;
1571 nodemask_t to;
1572};
1573
1574static void cpuset_migrate_mm_workfn(struct work_struct *work)
1575{
1576 struct cpuset_migrate_mm_work *mwork =
1577 container_of(work, struct cpuset_migrate_mm_work, work);
1578
1579 /* on a wq worker, no need to worry about %current's mems_allowed */
1580 do_migrate_pages(mwork->mm, &mwork->from, &mwork->to, MPOL_MF_MOVE_ALL);
1581 mmput(mwork->mm);
1582 kfree(mwork);
1583}
1584
1585static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
1586 const nodemask_t *to)
1587{
1588 struct cpuset_migrate_mm_work *mwork;
1589
1590 mwork = kzalloc(sizeof(*mwork), GFP_KERNEL);
1591 if (mwork) {
1592 mwork->mm = mm;
1593 mwork->from = *from;
1594 mwork->to = *to;
1595 INIT_WORK(&mwork->work, cpuset_migrate_mm_workfn);
1596 queue_work(cpuset_migrate_mm_wq, &mwork->work);
1597 } else {
1598 mmput(mm);
1599 }
1600}
1601
1602static void cpuset_post_attach(void)
1603{
1604 flush_workqueue(cpuset_migrate_mm_wq);
1605}
1606
1607/*
1608 * cpuset_change_task_nodemask - change task's mems_allowed and mempolicy
1609 * @tsk: the task to change
1610 * @newmems: new nodes that the task will be set
1611 *
1612 * We use the mems_allowed_seq seqlock to safely update both tsk->mems_allowed
1613 * and rebind an eventual tasks' mempolicy. If the task is allocating in
1614 * parallel, it might temporarily see an empty intersection, which results in
1615 * a seqlock check and retry before OOM or allocation failure.
1616 */
1617static void cpuset_change_task_nodemask(struct task_struct *tsk,
1618 nodemask_t *newmems)
1619{
1620 task_lock(tsk);
1621
1622 local_irq_disable();
1623 write_seqcount_begin(&tsk->mems_allowed_seq);
1624
1625 nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems);
1626 mpol_rebind_task(tsk, newmems);
1627 tsk->mems_allowed = *newmems;
1628
1629 write_seqcount_end(&tsk->mems_allowed_seq);
1630 local_irq_enable();
1631
1632 task_unlock(tsk);
1633}
1634
1635static void *cpuset_being_rebound;
1636
1637/**
1638 * update_tasks_nodemask - Update the nodemasks of tasks in the cpuset.
1639 * @cs: the cpuset in which each task's mems_allowed mask needs to be changed
1640 *
1641 * Iterate through each task of @cs updating its mems_allowed to the
1642 * effective cpuset's. As this function is called with cpuset_mutex held,
1643 * cpuset membership stays stable.
1644 */
1645static void update_tasks_nodemask(struct cpuset *cs)
1646{
1647 static nodemask_t newmems; /* protected by cpuset_mutex */
1648 struct css_task_iter it;
1649 struct task_struct *task;
1650
1651 cpuset_being_rebound = cs; /* causes mpol_dup() rebind */
1652
1653 guarantee_online_mems(cs, &newmems);
1654
1655 /*
1656 * The mpol_rebind_mm() call takes mmap_sem, which we couldn't
1657 * take while holding tasklist_lock. Forks can happen - the
1658 * mpol_dup() cpuset_being_rebound check will catch such forks,
1659 * and rebind their vma mempolicies too. Because we still hold
1660 * the global cpuset_mutex, we know that no other rebind effort
1661 * will be contending for the global variable cpuset_being_rebound.
1662 * It's ok if we rebind the same mm twice; mpol_rebind_mm()
1663 * is idempotent. Also migrate pages in each mm to new nodes.
1664 */
1665 css_task_iter_start(&cs->css, 0, &it);
1666 while ((task = css_task_iter_next(&it))) {
1667 struct mm_struct *mm;
1668 bool migrate;
1669
1670 cpuset_change_task_nodemask(task, &newmems);
1671
1672 mm = get_task_mm(task);
1673 if (!mm)
1674 continue;
1675
1676 migrate = is_memory_migrate(cs);
1677
1678 mpol_rebind_mm(mm, &cs->mems_allowed);
1679 if (migrate)
1680 cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems);
1681 else
1682 mmput(mm);
1683 }
1684 css_task_iter_end(&it);
1685
1686 /*
1687 * All the tasks' nodemasks have been updated, update
1688 * cs->old_mems_allowed.
1689 */
1690 cs->old_mems_allowed = newmems;
1691
1692 /* We're done rebinding vmas to this cpuset's new mems_allowed. */
1693 cpuset_being_rebound = NULL;
1694}
1695
1696/*
1697 * update_nodemasks_hier - Update effective nodemasks and tasks in the subtree
1698 * @cs: the cpuset to consider
1699 * @new_mems: a temp variable for calculating new effective_mems
1700 *
1701 * When configured nodemask is changed, the effective nodemasks of this cpuset
1702 * and all its descendants need to be updated.
1703 *
1704 * On legacy hiearchy, effective_mems will be the same with mems_allowed.
1705 *
1706 * Called with cpuset_mutex held
1707 */
1708static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
1709{
1710 struct cpuset *cp;
1711 struct cgroup_subsys_state *pos_css;
1712
1713 rcu_read_lock();
1714 cpuset_for_each_descendant_pre(cp, pos_css, cs) {
1715 struct cpuset *parent = parent_cs(cp);
1716
1717 nodes_and(*new_mems, cp->mems_allowed, parent->effective_mems);
1718
1719 /*
1720 * If it becomes empty, inherit the effective mask of the
1721 * parent, which is guaranteed to have some MEMs.
1722 */
1723 if (is_in_v2_mode() && nodes_empty(*new_mems))
1724 *new_mems = parent->effective_mems;
1725
1726 /* Skip the whole subtree if the nodemask remains the same. */
1727 if (nodes_equal(*new_mems, cp->effective_mems)) {
1728 pos_css = css_rightmost_descendant(pos_css);
1729 continue;
1730 }
1731
1732 if (!css_tryget_online(&cp->css))
1733 continue;
1734 rcu_read_unlock();
1735
1736 spin_lock_irq(&callback_lock);
1737 cp->effective_mems = *new_mems;
1738 spin_unlock_irq(&callback_lock);
1739
1740 WARN_ON(!is_in_v2_mode() &&
1741 !nodes_equal(cp->mems_allowed, cp->effective_mems));
1742
1743 update_tasks_nodemask(cp);
1744
1745 rcu_read_lock();
1746 css_put(&cp->css);
1747 }
1748 rcu_read_unlock();
1749}
1750
1751/*
1752 * Handle user request to change the 'mems' memory placement
1753 * of a cpuset. Needs to validate the request, update the
1754 * cpusets mems_allowed, and for each task in the cpuset,
1755 * update mems_allowed and rebind task's mempolicy and any vma
1756 * mempolicies and if the cpuset is marked 'memory_migrate',
1757 * migrate the tasks pages to the new memory.
1758 *
1759 * Call with cpuset_mutex held. May take callback_lock during call.
1760 * Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
1761 * lock each such tasks mm->mmap_sem, scan its vma's and rebind
1762 * their mempolicies to the cpusets new mems_allowed.
1763 */
1764static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
1765 const char *buf)
1766{
1767 int retval;
1768
1769 /*
1770 * top_cpuset.mems_allowed tracks node_stats[N_MEMORY];
1771 * it's read-only
1772 */
1773 if (cs == &top_cpuset) {
1774 retval = -EACCES;
1775 goto done;
1776 }
1777
1778 /*
1779 * An empty mems_allowed is ok iff there are no tasks in the cpuset.
1780 * Since nodelist_parse() fails on an empty mask, we special case
1781 * that parsing. The validate_change() call ensures that cpusets
1782 * with tasks have memory.
1783 */
1784 if (!*buf) {
1785 nodes_clear(trialcs->mems_allowed);
1786 } else {
1787 retval = nodelist_parse(buf, trialcs->mems_allowed);
1788 if (retval < 0)
1789 goto done;
1790
1791 if (!nodes_subset(trialcs->mems_allowed,
1792 top_cpuset.mems_allowed)) {
1793 retval = -EINVAL;
1794 goto done;
1795 }
1796 }
1797
1798 if (nodes_equal(cs->mems_allowed, trialcs->mems_allowed)) {
1799 retval = 0; /* Too easy - nothing to do */
1800 goto done;
1801 }
1802 retval = validate_change(cs, trialcs);
1803 if (retval < 0)
1804 goto done;
1805
1806 spin_lock_irq(&callback_lock);
1807 cs->mems_allowed = trialcs->mems_allowed;
1808 spin_unlock_irq(&callback_lock);
1809
1810 /* use trialcs->mems_allowed as a temp variable */
1811 update_nodemasks_hier(cs, &trialcs->mems_allowed);
1812done:
1813 return retval;
1814}
1815
1816bool current_cpuset_is_being_rebound(void)
1817{
1818 bool ret;
1819
1820 rcu_read_lock();
1821 ret = task_cs(current) == cpuset_being_rebound;
1822 rcu_read_unlock();
1823
1824 return ret;
1825}
1826
1827static int update_relax_domain_level(struct cpuset *cs, s64 val)
1828{
1829#ifdef CONFIG_SMP
1830 if (val < -1 || val >= sched_domain_level_max)
1831 return -EINVAL;
1832#endif
1833
1834 if (val != cs->relax_domain_level) {
1835 cs->relax_domain_level = val;
1836 if (!cpumask_empty(cs->cpus_allowed) &&
1837 is_sched_load_balance(cs))
1838 rebuild_sched_domains_locked();
1839 }
1840
1841 return 0;
1842}
1843
1844/**
1845 * update_tasks_flags - update the spread flags of tasks in the cpuset.
1846 * @cs: the cpuset in which each task's spread flags needs to be changed
1847 *
1848 * Iterate through each task of @cs updating its spread flags. As this
1849 * function is called with cpuset_mutex held, cpuset membership stays
1850 * stable.
1851 */
1852static void update_tasks_flags(struct cpuset *cs)
1853{
1854 struct css_task_iter it;
1855 struct task_struct *task;
1856
1857 css_task_iter_start(&cs->css, 0, &it);
1858 while ((task = css_task_iter_next(&it)))
1859 cpuset_update_task_spread_flag(cs, task);
1860 css_task_iter_end(&it);
1861}
1862
1863/*
1864 * update_flag - read a 0 or a 1 in a file and update associated flag
1865 * bit: the bit to update (see cpuset_flagbits_t)
1866 * cs: the cpuset to update
1867 * turning_on: whether the flag is being set or cleared
1868 *
1869 * Call with cpuset_mutex held.
1870 */
1871
1872static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
1873 int turning_on)
1874{
1875 struct cpuset *trialcs;
1876 int balance_flag_changed;
1877 int spread_flag_changed;
1878 int err;
1879
1880 trialcs = alloc_trial_cpuset(cs);
1881 if (!trialcs)
1882 return -ENOMEM;
1883
1884 if (turning_on)
1885 set_bit(bit, &trialcs->flags);
1886 else
1887 clear_bit(bit, &trialcs->flags);
1888
1889 err = validate_change(cs, trialcs);
1890 if (err < 0)
1891 goto out;
1892
1893 balance_flag_changed = (is_sched_load_balance(cs) !=
1894 is_sched_load_balance(trialcs));
1895
1896 spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs))
1897 || (is_spread_page(cs) != is_spread_page(trialcs)));
1898
1899 spin_lock_irq(&callback_lock);
1900 cs->flags = trialcs->flags;
1901 spin_unlock_irq(&callback_lock);
1902
1903 if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed)
1904 rebuild_sched_domains_locked();
1905
1906 if (spread_flag_changed)
1907 update_tasks_flags(cs);
1908out:
1909 free_cpuset(trialcs);
1910 return err;
1911}
1912
1913/*
1914 * update_prstate - update partititon_root_state
1915 * cs: the cpuset to update
1916 * val: 0 - disabled, 1 - enabled
1917 *
1918 * Call with cpuset_mutex held.
1919 */
1920static int update_prstate(struct cpuset *cs, int val)
1921{
1922 int err;
1923 struct cpuset *parent = parent_cs(cs);
1924 struct tmpmasks tmp;
1925
1926 if ((val != 0) && (val != 1))
1927 return -EINVAL;
1928 if (val == cs->partition_root_state)
1929 return 0;
1930
1931 /*
1932 * Cannot force a partial or invalid partition root to a full
1933 * partition root.
1934 */
1935 if (val && cs->partition_root_state)
1936 return -EINVAL;
1937
1938 if (alloc_cpumasks(NULL, &tmp))
1939 return -ENOMEM;
1940
1941 err = -EINVAL;
1942 if (!cs->partition_root_state) {
1943 /*
1944 * Turning on partition root requires setting the
1945 * CS_CPU_EXCLUSIVE bit implicitly as well and cpus_allowed
1946 * cannot be NULL.
1947 */
1948 if (cpumask_empty(cs->cpus_allowed))
1949 goto out;
1950
1951 err = update_flag(CS_CPU_EXCLUSIVE, cs, 1);
1952 if (err)
1953 goto out;
1954
1955 err = update_parent_subparts_cpumask(cs, partcmd_enable,
1956 NULL, &tmp);
1957 if (err) {
1958 update_flag(CS_CPU_EXCLUSIVE, cs, 0);
1959 goto out;
1960 }
1961 cs->partition_root_state = PRS_ENABLED;
1962 } else {
1963 /*
1964 * Turning off partition root will clear the
1965 * CS_CPU_EXCLUSIVE bit.
1966 */
1967 if (cs->partition_root_state == PRS_ERROR) {
1968 cs->partition_root_state = 0;
1969 update_flag(CS_CPU_EXCLUSIVE, cs, 0);
1970 err = 0;
1971 goto out;
1972 }
1973
1974 err = update_parent_subparts_cpumask(cs, partcmd_disable,
1975 NULL, &tmp);
1976 if (err)
1977 goto out;
1978
1979 cs->partition_root_state = 0;
1980
1981 /* Turning off CS_CPU_EXCLUSIVE will not return error */
1982 update_flag(CS_CPU_EXCLUSIVE, cs, 0);
1983 }
1984
1985 /*
1986 * Update cpumask of parent's tasks except when it is the top
1987 * cpuset as some system daemons cannot be mapped to other CPUs.
1988 */
1989 if (parent != &top_cpuset)
1990 update_tasks_cpumask(parent);
1991
1992 if (parent->child_ecpus_count)
1993 update_sibling_cpumasks(parent, cs, &tmp);
1994
1995 rebuild_sched_domains_locked();
1996out:
1997 free_cpumasks(NULL, &tmp);
1998 return err;
1999}
2000
2001/*
2002 * Frequency meter - How fast is some event occurring?
2003 *
2004 * These routines manage a digitally filtered, constant time based,
2005 * event frequency meter. There are four routines:
2006 * fmeter_init() - initialize a frequency meter.
2007 * fmeter_markevent() - called each time the event happens.
2008 * fmeter_getrate() - returns the recent rate of such events.
2009 * fmeter_update() - internal routine used to update fmeter.
2010 *
2011 * A common data structure is passed to each of these routines,
2012 * which is used to keep track of the state required to manage the
2013 * frequency meter and its digital filter.
2014 *
2015 * The filter works on the number of events marked per unit time.
2016 * The filter is single-pole low-pass recursive (IIR). The time unit
2017 * is 1 second. Arithmetic is done using 32-bit integers scaled to
2018 * simulate 3 decimal digits of precision (multiplied by 1000).
2019 *
2020 * With an FM_COEF of 933, and a time base of 1 second, the filter
2021 * has a half-life of 10 seconds, meaning that if the events quit
2022 * happening, then the rate returned from the fmeter_getrate()
2023 * will be cut in half each 10 seconds, until it converges to zero.
2024 *
2025 * It is not worth doing a real infinitely recursive filter. If more
2026 * than FM_MAXTICKS ticks have elapsed since the last filter event,
2027 * just compute FM_MAXTICKS ticks worth, by which point the level
2028 * will be stable.
2029 *
2030 * Limit the count of unprocessed events to FM_MAXCNT, so as to avoid
2031 * arithmetic overflow in the fmeter_update() routine.
2032 *
2033 * Given the simple 32 bit integer arithmetic used, this meter works
2034 * best for reporting rates between one per millisecond (msec) and
2035 * one per 32 (approx) seconds. At constant rates faster than one
2036 * per msec it maxes out at values just under 1,000,000. At constant
2037 * rates between one per msec, and one per second it will stabilize
2038 * to a value N*1000, where N is the rate of events per second.
2039 * At constant rates between one per second and one per 32 seconds,
2040 * it will be choppy, moving up on the seconds that have an event,
2041 * and then decaying until the next event. At rates slower than
2042 * about one in 32 seconds, it decays all the way back to zero between
2043 * each event.
2044 */
2045
2046#define FM_COEF 933 /* coefficient for half-life of 10 secs */
2047#define FM_MAXTICKS ((u32)99) /* useless computing more ticks than this */
2048#define FM_MAXCNT 1000000 /* limit cnt to avoid overflow */
2049#define FM_SCALE 1000 /* faux fixed point scale */
2050
2051/* Initialize a frequency meter */
2052static void fmeter_init(struct fmeter *fmp)
2053{
2054 fmp->cnt = 0;
2055 fmp->val = 0;
2056 fmp->time = 0;
2057 spin_lock_init(&fmp->lock);
2058}
2059
2060/* Internal meter update - process cnt events and update value */
2061static void fmeter_update(struct fmeter *fmp)
2062{
2063 time64_t now;
2064 u32 ticks;
2065
2066 now = ktime_get_seconds();
2067 ticks = now - fmp->time;
2068
2069 if (ticks == 0)
2070 return;
2071
2072 ticks = min(FM_MAXTICKS, ticks);
2073 while (ticks-- > 0)
2074 fmp->val = (FM_COEF * fmp->val) / FM_SCALE;
2075 fmp->time = now;
2076
2077 fmp->val += ((FM_SCALE - FM_COEF) * fmp->cnt) / FM_SCALE;
2078 fmp->cnt = 0;
2079}
2080
2081/* Process any previous ticks, then bump cnt by one (times scale). */
2082static void fmeter_markevent(struct fmeter *fmp)
2083{
2084 spin_lock(&fmp->lock);
2085 fmeter_update(fmp);
2086 fmp->cnt = min(FM_MAXCNT, fmp->cnt + FM_SCALE);
2087 spin_unlock(&fmp->lock);
2088}
2089
2090/* Process any previous ticks, then return current value. */
2091static int fmeter_getrate(struct fmeter *fmp)
2092{
2093 int val;
2094
2095 spin_lock(&fmp->lock);
2096 fmeter_update(fmp);
2097 val = fmp->val;
2098 spin_unlock(&fmp->lock);
2099 return val;
2100}
2101
2102static struct cpuset *cpuset_attach_old_cs;
2103
2104/* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */
2105static int cpuset_can_attach(struct cgroup_taskset *tset)
2106{
2107 struct cgroup_subsys_state *css;
2108 struct cpuset *cs;
2109 struct task_struct *task;
2110 int ret;
2111
2112 /* used later by cpuset_attach() */
2113 cpuset_attach_old_cs = task_cs(cgroup_taskset_first(tset, &css));
2114 cs = css_cs(css);
2115
2116 percpu_down_write(&cpuset_rwsem);
2117
2118 /* allow moving tasks into an empty cpuset if on default hierarchy */
2119 ret = -ENOSPC;
2120 if (!is_in_v2_mode() &&
2121 (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed)))
2122 goto out_unlock;
2123
2124 cgroup_taskset_for_each(task, css, tset) {
2125 ret = task_can_attach(task, cs->cpus_allowed);
2126 if (ret)
2127 goto out_unlock;
2128 ret = security_task_setscheduler(task);
2129 if (ret)
2130 goto out_unlock;
2131 }
2132
2133 /*
2134 * Mark attach is in progress. This makes validate_change() fail
2135 * changes which zero cpus/mems_allowed.
2136 */
2137 cs->attach_in_progress++;
2138 ret = 0;
2139out_unlock:
2140 percpu_up_write(&cpuset_rwsem);
2141 return ret;
2142}
2143
2144static void cpuset_cancel_attach(struct cgroup_taskset *tset)
2145{
2146 struct cgroup_subsys_state *css;
2147
2148 cgroup_taskset_first(tset, &css);
2149
2150 percpu_down_write(&cpuset_rwsem);
2151 css_cs(css)->attach_in_progress--;
2152 percpu_up_write(&cpuset_rwsem);
2153}
2154
2155/*
2156 * Protected by cpuset_mutex. cpus_attach is used only by cpuset_attach()
2157 * but we can't allocate it dynamically there. Define it global and
2158 * allocate from cpuset_init().
2159 */
2160static cpumask_var_t cpus_attach;
2161
2162static void cpuset_attach(struct cgroup_taskset *tset)
2163{
2164 /* static buf protected by cpuset_mutex */
2165 static nodemask_t cpuset_attach_nodemask_to;
2166 struct task_struct *task;
2167 struct task_struct *leader;
2168 struct cgroup_subsys_state *css;
2169 struct cpuset *cs;
2170 struct cpuset *oldcs = cpuset_attach_old_cs;
2171
2172 cgroup_taskset_first(tset, &css);
2173 cs = css_cs(css);
2174
2175 percpu_down_write(&cpuset_rwsem);
2176
2177 /* prepare for attach */
2178 if (cs == &top_cpuset)
2179 cpumask_copy(cpus_attach, cpu_possible_mask);
2180 else
2181 guarantee_online_cpus(cs, cpus_attach);
2182
2183 guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
2184
2185 cgroup_taskset_for_each(task, css, tset) {
2186 /*
2187 * can_attach beforehand should guarantee that this doesn't
2188 * fail. TODO: have a better way to handle failure here
2189 */
2190 WARN_ON_ONCE(set_cpus_allowed_ptr(task, cpus_attach));
2191
2192 cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to);
2193 cpuset_update_task_spread_flag(cs, task);
2194 }
2195
2196 /*
2197 * Change mm for all threadgroup leaders. This is expensive and may
2198 * sleep and should be moved outside migration path proper.
2199 */
2200 cpuset_attach_nodemask_to = cs->effective_mems;
2201 cgroup_taskset_for_each_leader(leader, css, tset) {
2202 struct mm_struct *mm = get_task_mm(leader);
2203
2204 if (mm) {
2205 mpol_rebind_mm(mm, &cpuset_attach_nodemask_to);
2206
2207 /*
2208 * old_mems_allowed is the same with mems_allowed
2209 * here, except if this task is being moved
2210 * automatically due to hotplug. In that case
2211 * @mems_allowed has been updated and is empty, so
2212 * @old_mems_allowed is the right nodesets that we
2213 * migrate mm from.
2214 */
2215 if (is_memory_migrate(cs))
2216 cpuset_migrate_mm(mm, &oldcs->old_mems_allowed,
2217 &cpuset_attach_nodemask_to);
2218 else
2219 mmput(mm);
2220 }
2221 }
2222
2223 cs->old_mems_allowed = cpuset_attach_nodemask_to;
2224
2225 cs->attach_in_progress--;
2226 if (!cs->attach_in_progress)
2227 wake_up(&cpuset_attach_wq);
2228
2229 percpu_up_write(&cpuset_rwsem);
2230}
2231
2232/* The various types of files and directories in a cpuset file system */
2233
2234typedef enum {
2235 FILE_MEMORY_MIGRATE,
2236 FILE_CPULIST,
2237 FILE_MEMLIST,
2238 FILE_EFFECTIVE_CPULIST,
2239 FILE_EFFECTIVE_MEMLIST,
2240 FILE_SUBPARTS_CPULIST,
2241 FILE_CPU_EXCLUSIVE,
2242 FILE_MEM_EXCLUSIVE,
2243 FILE_MEM_HARDWALL,
2244 FILE_SCHED_LOAD_BALANCE,
2245 FILE_PARTITION_ROOT,
2246 FILE_SCHED_RELAX_DOMAIN_LEVEL,
2247 FILE_MEMORY_PRESSURE_ENABLED,
2248 FILE_MEMORY_PRESSURE,
2249 FILE_SPREAD_PAGE,
2250 FILE_SPREAD_SLAB,
2251} cpuset_filetype_t;
2252
2253static int cpuset_write_u64(struct cgroup_subsys_state *css, struct cftype *cft,
2254 u64 val)
2255{
2256 struct cpuset *cs = css_cs(css);
2257 cpuset_filetype_t type = cft->private;
2258 int retval = 0;
2259
2260 get_online_cpus();
2261 percpu_down_write(&cpuset_rwsem);
2262 if (!is_cpuset_online(cs)) {
2263 retval = -ENODEV;
2264 goto out_unlock;
2265 }
2266
2267 switch (type) {
2268 case FILE_CPU_EXCLUSIVE:
2269 retval = update_flag(CS_CPU_EXCLUSIVE, cs, val);
2270 break;
2271 case FILE_MEM_EXCLUSIVE:
2272 retval = update_flag(CS_MEM_EXCLUSIVE, cs, val);
2273 break;
2274 case FILE_MEM_HARDWALL:
2275 retval = update_flag(CS_MEM_HARDWALL, cs, val);
2276 break;
2277 case FILE_SCHED_LOAD_BALANCE:
2278 retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, val);
2279 break;
2280 case FILE_MEMORY_MIGRATE:
2281 retval = update_flag(CS_MEMORY_MIGRATE, cs, val);
2282 break;
2283 case FILE_MEMORY_PRESSURE_ENABLED:
2284 cpuset_memory_pressure_enabled = !!val;
2285 break;
2286 case FILE_SPREAD_PAGE:
2287 retval = update_flag(CS_SPREAD_PAGE, cs, val);
2288 break;
2289 case FILE_SPREAD_SLAB:
2290 retval = update_flag(CS_SPREAD_SLAB, cs, val);
2291 break;
2292 default:
2293 retval = -EINVAL;
2294 break;
2295 }
2296out_unlock:
2297 percpu_up_write(&cpuset_rwsem);
2298 put_online_cpus();
2299 return retval;
2300}
2301
2302static int cpuset_write_s64(struct cgroup_subsys_state *css, struct cftype *cft,
2303 s64 val)
2304{
2305 struct cpuset *cs = css_cs(css);
2306 cpuset_filetype_t type = cft->private;
2307 int retval = -ENODEV;
2308
2309 get_online_cpus();
2310 percpu_down_write(&cpuset_rwsem);
2311 if (!is_cpuset_online(cs))
2312 goto out_unlock;
2313
2314 switch (type) {
2315 case FILE_SCHED_RELAX_DOMAIN_LEVEL:
2316 retval = update_relax_domain_level(cs, val);
2317 break;
2318 default:
2319 retval = -EINVAL;
2320 break;
2321 }
2322out_unlock:
2323 percpu_up_write(&cpuset_rwsem);
2324 put_online_cpus();
2325 return retval;
2326}
2327
2328/*
2329 * Common handling for a write to a "cpus" or "mems" file.
2330 */
2331static ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
2332 char *buf, size_t nbytes, loff_t off)
2333{
2334 struct cpuset *cs = css_cs(of_css(of));
2335 struct cpuset *trialcs;
2336 int retval = -ENODEV;
2337
2338 buf = strstrip(buf);
2339
2340 /*
2341 * CPU or memory hotunplug may leave @cs w/o any execution
2342 * resources, in which case the hotplug code asynchronously updates
2343 * configuration and transfers all tasks to the nearest ancestor
2344 * which can execute.
2345 *
2346 * As writes to "cpus" or "mems" may restore @cs's execution
2347 * resources, wait for the previously scheduled operations before
2348 * proceeding, so that we don't end up keep removing tasks added
2349 * after execution capability is restored.
2350 *
2351 * cpuset_hotplug_work calls back into cgroup core via
2352 * cgroup_transfer_tasks() and waiting for it from a cgroupfs
2353 * operation like this one can lead to a deadlock through kernfs
2354 * active_ref protection. Let's break the protection. Losing the
2355 * protection is okay as we check whether @cs is online after
2356 * grabbing cpuset_mutex anyway. This only happens on the legacy
2357 * hierarchies.
2358 */
2359 css_get(&cs->css);
2360 kernfs_break_active_protection(of->kn);
2361 flush_work(&cpuset_hotplug_work);
2362
2363 get_online_cpus();
2364 percpu_down_write(&cpuset_rwsem);
2365 if (!is_cpuset_online(cs))
2366 goto out_unlock;
2367
2368 trialcs = alloc_trial_cpuset(cs);
2369 if (!trialcs) {
2370 retval = -ENOMEM;
2371 goto out_unlock;
2372 }
2373
2374 switch (of_cft(of)->private) {
2375 case FILE_CPULIST:
2376 retval = update_cpumask(cs, trialcs, buf);
2377 break;
2378 case FILE_MEMLIST:
2379 retval = update_nodemask(cs, trialcs, buf);
2380 break;
2381 default:
2382 retval = -EINVAL;
2383 break;
2384 }
2385
2386 free_cpuset(trialcs);
2387out_unlock:
2388 percpu_up_write(&cpuset_rwsem);
2389 put_online_cpus();
2390 kernfs_unbreak_active_protection(of->kn);
2391 css_put(&cs->css);
2392 flush_workqueue(cpuset_migrate_mm_wq);
2393 return retval ?: nbytes;
2394}
2395
2396/*
2397 * These ascii lists should be read in a single call, by using a user
2398 * buffer large enough to hold the entire map. If read in smaller
2399 * chunks, there is no guarantee of atomicity. Since the display format
2400 * used, list of ranges of sequential numbers, is variable length,
2401 * and since these maps can change value dynamically, one could read
2402 * gibberish by doing partial reads while a list was changing.
2403 */
2404static int cpuset_common_seq_show(struct seq_file *sf, void *v)
2405{
2406 struct cpuset *cs = css_cs(seq_css(sf));
2407 cpuset_filetype_t type = seq_cft(sf)->private;
2408 int ret = 0;
2409
2410 spin_lock_irq(&callback_lock);
2411
2412 switch (type) {
2413 case FILE_CPULIST:
2414 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_allowed));
2415 break;
2416 case FILE_MEMLIST:
2417 seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->mems_allowed));
2418 break;
2419 case FILE_EFFECTIVE_CPULIST:
2420 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->effective_cpus));
2421 break;
2422 case FILE_EFFECTIVE_MEMLIST:
2423 seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->effective_mems));
2424 break;
2425 case FILE_SUBPARTS_CPULIST:
2426 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->subparts_cpus));
2427 break;
2428 default:
2429 ret = -EINVAL;
2430 }
2431
2432 spin_unlock_irq(&callback_lock);
2433 return ret;
2434}
2435
2436static u64 cpuset_read_u64(struct cgroup_subsys_state *css, struct cftype *cft)
2437{
2438 struct cpuset *cs = css_cs(css);
2439 cpuset_filetype_t type = cft->private;
2440 switch (type) {
2441 case FILE_CPU_EXCLUSIVE:
2442 return is_cpu_exclusive(cs);
2443 case FILE_MEM_EXCLUSIVE:
2444 return is_mem_exclusive(cs);
2445 case FILE_MEM_HARDWALL:
2446 return is_mem_hardwall(cs);
2447 case FILE_SCHED_LOAD_BALANCE:
2448 return is_sched_load_balance(cs);
2449 case FILE_MEMORY_MIGRATE:
2450 return is_memory_migrate(cs);
2451 case FILE_MEMORY_PRESSURE_ENABLED:
2452 return cpuset_memory_pressure_enabled;
2453 case FILE_MEMORY_PRESSURE:
2454 return fmeter_getrate(&cs->fmeter);
2455 case FILE_SPREAD_PAGE:
2456 return is_spread_page(cs);
2457 case FILE_SPREAD_SLAB:
2458 return is_spread_slab(cs);
2459 default:
2460 BUG();
2461 }
2462
2463 /* Unreachable but makes gcc happy */
2464 return 0;
2465}
2466
2467static s64 cpuset_read_s64(struct cgroup_subsys_state *css, struct cftype *cft)
2468{
2469 struct cpuset *cs = css_cs(css);
2470 cpuset_filetype_t type = cft->private;
2471 switch (type) {
2472 case FILE_SCHED_RELAX_DOMAIN_LEVEL:
2473 return cs->relax_domain_level;
2474 default:
2475 BUG();
2476 }
2477
2478 /* Unrechable but makes gcc happy */
2479 return 0;
2480}
2481
2482static int sched_partition_show(struct seq_file *seq, void *v)
2483{
2484 struct cpuset *cs = css_cs(seq_css(seq));
2485
2486 switch (cs->partition_root_state) {
2487 case PRS_ENABLED:
2488 seq_puts(seq, "root\n");
2489 break;
2490 case PRS_DISABLED:
2491 seq_puts(seq, "member\n");
2492 break;
2493 case PRS_ERROR:
2494 seq_puts(seq, "root invalid\n");
2495 break;
2496 }
2497 return 0;
2498}
2499
2500static ssize_t sched_partition_write(struct kernfs_open_file *of, char *buf,
2501 size_t nbytes, loff_t off)
2502{
2503 struct cpuset *cs = css_cs(of_css(of));
2504 int val;
2505 int retval = -ENODEV;
2506
2507 buf = strstrip(buf);
2508
2509 /*
2510 * Convert "root" to ENABLED, and convert "member" to DISABLED.
2511 */
2512 if (!strcmp(buf, "root"))
2513 val = PRS_ENABLED;
2514 else if (!strcmp(buf, "member"))
2515 val = PRS_DISABLED;
2516 else
2517 return -EINVAL;
2518
2519 css_get(&cs->css);
2520 get_online_cpus();
2521 percpu_down_write(&cpuset_rwsem);
2522 if (!is_cpuset_online(cs))
2523 goto out_unlock;
2524
2525 retval = update_prstate(cs, val);
2526out_unlock:
2527 percpu_up_write(&cpuset_rwsem);
2528 put_online_cpus();
2529 css_put(&cs->css);
2530 return retval ?: nbytes;
2531}
2532
2533/*
2534 * for the common functions, 'private' gives the type of file
2535 */
2536
2537static struct cftype legacy_files[] = {
2538 {
2539 .name = "cpus",
2540 .seq_show = cpuset_common_seq_show,
2541 .write = cpuset_write_resmask,
2542 .max_write_len = (100U + 6 * NR_CPUS),
2543 .private = FILE_CPULIST,
2544 },
2545
2546 {
2547 .name = "mems",
2548 .seq_show = cpuset_common_seq_show,
2549 .write = cpuset_write_resmask,
2550 .max_write_len = (100U + 6 * MAX_NUMNODES),
2551 .private = FILE_MEMLIST,
2552 },
2553
2554 {
2555 .name = "effective_cpus",
2556 .seq_show = cpuset_common_seq_show,
2557 .private = FILE_EFFECTIVE_CPULIST,
2558 },
2559
2560 {
2561 .name = "effective_mems",
2562 .seq_show = cpuset_common_seq_show,
2563 .private = FILE_EFFECTIVE_MEMLIST,
2564 },
2565
2566 {
2567 .name = "cpu_exclusive",
2568 .read_u64 = cpuset_read_u64,
2569 .write_u64 = cpuset_write_u64,
2570 .private = FILE_CPU_EXCLUSIVE,
2571 },
2572
2573 {
2574 .name = "mem_exclusive",
2575 .read_u64 = cpuset_read_u64,
2576 .write_u64 = cpuset_write_u64,
2577 .private = FILE_MEM_EXCLUSIVE,
2578 },
2579
2580 {
2581 .name = "mem_hardwall",
2582 .read_u64 = cpuset_read_u64,
2583 .write_u64 = cpuset_write_u64,
2584 .private = FILE_MEM_HARDWALL,
2585 },
2586
2587 {
2588 .name = "sched_load_balance",
2589 .read_u64 = cpuset_read_u64,
2590 .write_u64 = cpuset_write_u64,
2591 .private = FILE_SCHED_LOAD_BALANCE,
2592 },
2593
2594 {
2595 .name = "sched_relax_domain_level",
2596 .read_s64 = cpuset_read_s64,
2597 .write_s64 = cpuset_write_s64,
2598 .private = FILE_SCHED_RELAX_DOMAIN_LEVEL,
2599 },
2600
2601 {
2602 .name = "memory_migrate",
2603 .read_u64 = cpuset_read_u64,
2604 .write_u64 = cpuset_write_u64,
2605 .private = FILE_MEMORY_MIGRATE,
2606 },
2607
2608 {
2609 .name = "memory_pressure",
2610 .read_u64 = cpuset_read_u64,
2611 .private = FILE_MEMORY_PRESSURE,
2612 },
2613
2614 {
2615 .name = "memory_spread_page",
2616 .read_u64 = cpuset_read_u64,
2617 .write_u64 = cpuset_write_u64,
2618 .private = FILE_SPREAD_PAGE,
2619 },
2620
2621 {
2622 .name = "memory_spread_slab",
2623 .read_u64 = cpuset_read_u64,
2624 .write_u64 = cpuset_write_u64,
2625 .private = FILE_SPREAD_SLAB,
2626 },
2627
2628 {
2629 .name = "memory_pressure_enabled",
2630 .flags = CFTYPE_ONLY_ON_ROOT,
2631 .read_u64 = cpuset_read_u64,
2632 .write_u64 = cpuset_write_u64,
2633 .private = FILE_MEMORY_PRESSURE_ENABLED,
2634 },
2635
2636 { } /* terminate */
2637};
2638
2639/*
2640 * This is currently a minimal set for the default hierarchy. It can be
2641 * expanded later on by migrating more features and control files from v1.
2642 */
2643static struct cftype dfl_files[] = {
2644 {
2645 .name = "cpus",
2646 .seq_show = cpuset_common_seq_show,
2647 .write = cpuset_write_resmask,
2648 .max_write_len = (100U + 6 * NR_CPUS),
2649 .private = FILE_CPULIST,
2650 .flags = CFTYPE_NOT_ON_ROOT,
2651 },
2652
2653 {
2654 .name = "mems",
2655 .seq_show = cpuset_common_seq_show,
2656 .write = cpuset_write_resmask,
2657 .max_write_len = (100U + 6 * MAX_NUMNODES),
2658 .private = FILE_MEMLIST,
2659 .flags = CFTYPE_NOT_ON_ROOT,
2660 },
2661
2662 {
2663 .name = "cpus.effective",
2664 .seq_show = cpuset_common_seq_show,
2665 .private = FILE_EFFECTIVE_CPULIST,
2666 },
2667
2668 {
2669 .name = "mems.effective",
2670 .seq_show = cpuset_common_seq_show,
2671 .private = FILE_EFFECTIVE_MEMLIST,
2672 },
2673
2674 {
2675 .name = "cpus.partition",
2676 .seq_show = sched_partition_show,
2677 .write = sched_partition_write,
2678 .private = FILE_PARTITION_ROOT,
2679 .flags = CFTYPE_NOT_ON_ROOT,
2680 },
2681
2682 {
2683 .name = "cpus.subpartitions",
2684 .seq_show = cpuset_common_seq_show,
2685 .private = FILE_SUBPARTS_CPULIST,
2686 .flags = CFTYPE_DEBUG,
2687 },
2688
2689 { } /* terminate */
2690};
2691
2692
2693/*
2694 * cpuset_css_alloc - allocate a cpuset css
2695 * cgrp: control group that the new cpuset will be part of
2696 */
2697
2698static struct cgroup_subsys_state *
2699cpuset_css_alloc(struct cgroup_subsys_state *parent_css)
2700{
2701 struct cpuset *cs;
2702
2703 if (!parent_css)
2704 return &top_cpuset.css;
2705
2706 cs = kzalloc(sizeof(*cs), GFP_KERNEL);
2707 if (!cs)
2708 return ERR_PTR(-ENOMEM);
2709
2710 if (alloc_cpumasks(cs, NULL)) {
2711 kfree(cs);
2712 return ERR_PTR(-ENOMEM);
2713 }
2714
2715 set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
2716 nodes_clear(cs->mems_allowed);
2717 nodes_clear(cs->effective_mems);
2718 fmeter_init(&cs->fmeter);
2719 cs->relax_domain_level = -1;
2720
2721 return &cs->css;
2722}
2723
2724static int cpuset_css_online(struct cgroup_subsys_state *css)
2725{
2726 struct cpuset *cs = css_cs(css);
2727 struct cpuset *parent = parent_cs(cs);
2728 struct cpuset *tmp_cs;
2729 struct cgroup_subsys_state *pos_css;
2730
2731 if (!parent)
2732 return 0;
2733
2734 get_online_cpus();
2735 percpu_down_write(&cpuset_rwsem);
2736
2737 set_bit(CS_ONLINE, &cs->flags);
2738 if (is_spread_page(parent))
2739 set_bit(CS_SPREAD_PAGE, &cs->flags);
2740 if (is_spread_slab(parent))
2741 set_bit(CS_SPREAD_SLAB, &cs->flags);
2742
2743 cpuset_inc();
2744
2745 spin_lock_irq(&callback_lock);
2746 if (is_in_v2_mode()) {
2747 cpumask_copy(cs->effective_cpus, parent->effective_cpus);
2748 cs->effective_mems = parent->effective_mems;
2749 cs->use_parent_ecpus = true;
2750 parent->child_ecpus_count++;
2751 }
2752 spin_unlock_irq(&callback_lock);
2753
2754 if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags))
2755 goto out_unlock;
2756
2757 /*
2758 * Clone @parent's configuration if CGRP_CPUSET_CLONE_CHILDREN is
2759 * set. This flag handling is implemented in cgroup core for
2760 * histrical reasons - the flag may be specified during mount.
2761 *
2762 * Currently, if any sibling cpusets have exclusive cpus or mem, we
2763 * refuse to clone the configuration - thereby refusing the task to
2764 * be entered, and as a result refusing the sys_unshare() or
2765 * clone() which initiated it. If this becomes a problem for some
2766 * users who wish to allow that scenario, then this could be
2767 * changed to grant parent->cpus_allowed-sibling_cpus_exclusive
2768 * (and likewise for mems) to the new cgroup.
2769 */
2770 rcu_read_lock();
2771 cpuset_for_each_child(tmp_cs, pos_css, parent) {
2772 if (is_mem_exclusive(tmp_cs) || is_cpu_exclusive(tmp_cs)) {
2773 rcu_read_unlock();
2774 goto out_unlock;
2775 }
2776 }
2777 rcu_read_unlock();
2778
2779 spin_lock_irq(&callback_lock);
2780 cs->mems_allowed = parent->mems_allowed;
2781 cs->effective_mems = parent->mems_allowed;
2782 cpumask_copy(cs->cpus_allowed, parent->cpus_allowed);
2783 cpumask_copy(cs->effective_cpus, parent->cpus_allowed);
2784 spin_unlock_irq(&callback_lock);
2785out_unlock:
2786 percpu_up_write(&cpuset_rwsem);
2787 put_online_cpus();
2788 return 0;
2789}
2790
2791/*
2792 * If the cpuset being removed has its flag 'sched_load_balance'
2793 * enabled, then simulate turning sched_load_balance off, which
2794 * will call rebuild_sched_domains_locked(). That is not needed
2795 * in the default hierarchy where only changes in partition
2796 * will cause repartitioning.
2797 *
2798 * If the cpuset has the 'sched.partition' flag enabled, simulate
2799 * turning 'sched.partition" off.
2800 */
2801
2802static void cpuset_css_offline(struct cgroup_subsys_state *css)
2803{
2804 struct cpuset *cs = css_cs(css);
2805
2806 get_online_cpus();
2807 percpu_down_write(&cpuset_rwsem);
2808
2809 if (is_partition_root(cs))
2810 update_prstate(cs, 0);
2811
2812 if (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
2813 is_sched_load_balance(cs))
2814 update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
2815
2816 if (cs->use_parent_ecpus) {
2817 struct cpuset *parent = parent_cs(cs);
2818
2819 cs->use_parent_ecpus = false;
2820 parent->child_ecpus_count--;
2821 }
2822
2823 cpuset_dec();
2824 clear_bit(CS_ONLINE, &cs->flags);
2825
2826 percpu_up_write(&cpuset_rwsem);
2827 put_online_cpus();
2828}
2829
2830static void cpuset_css_free(struct cgroup_subsys_state *css)
2831{
2832 struct cpuset *cs = css_cs(css);
2833
2834 free_cpuset(cs);
2835}
2836
2837static void cpuset_bind(struct cgroup_subsys_state *root_css)
2838{
2839 percpu_down_write(&cpuset_rwsem);
2840 spin_lock_irq(&callback_lock);
2841
2842 if (is_in_v2_mode()) {
2843 cpumask_copy(top_cpuset.cpus_allowed, cpu_possible_mask);
2844 top_cpuset.mems_allowed = node_possible_map;
2845 } else {
2846 cpumask_copy(top_cpuset.cpus_allowed,
2847 top_cpuset.effective_cpus);
2848 top_cpuset.mems_allowed = top_cpuset.effective_mems;
2849 }
2850
2851 spin_unlock_irq(&callback_lock);
2852 percpu_up_write(&cpuset_rwsem);
2853}
2854
2855/*
2856 * Make sure the new task conform to the current state of its parent,
2857 * which could have been changed by cpuset just after it inherits the
2858 * state from the parent and before it sits on the cgroup's task list.
2859 */
2860static void cpuset_fork(struct task_struct *task)
2861{
2862 if (task_css_is_root(task, cpuset_cgrp_id))
2863 return;
2864
2865 set_cpus_allowed_ptr(task, current->cpus_ptr);
2866 task->mems_allowed = current->mems_allowed;
2867}
2868
2869struct cgroup_subsys cpuset_cgrp_subsys = {
2870 .css_alloc = cpuset_css_alloc,
2871 .css_online = cpuset_css_online,
2872 .css_offline = cpuset_css_offline,
2873 .css_free = cpuset_css_free,
2874 .can_attach = cpuset_can_attach,
2875 .cancel_attach = cpuset_cancel_attach,
2876 .attach = cpuset_attach,
2877 .post_attach = cpuset_post_attach,
2878 .bind = cpuset_bind,
2879 .fork = cpuset_fork,
2880 .legacy_cftypes = legacy_files,
2881 .dfl_cftypes = dfl_files,
2882 .early_init = true,
2883 .threaded = true,
2884};
2885
2886/**
2887 * cpuset_init - initialize cpusets at system boot
2888 *
2889 * Description: Initialize top_cpuset
2890 **/
2891
2892int __init cpuset_init(void)
2893{
2894 BUG_ON(percpu_init_rwsem(&cpuset_rwsem));
2895
2896 BUG_ON(!alloc_cpumask_var(&top_cpuset.cpus_allowed, GFP_KERNEL));
2897 BUG_ON(!alloc_cpumask_var(&top_cpuset.effective_cpus, GFP_KERNEL));
2898 BUG_ON(!zalloc_cpumask_var(&top_cpuset.subparts_cpus, GFP_KERNEL));
2899
2900 cpumask_setall(top_cpuset.cpus_allowed);
2901 nodes_setall(top_cpuset.mems_allowed);
2902 cpumask_setall(top_cpuset.effective_cpus);
2903 nodes_setall(top_cpuset.effective_mems);
2904
2905 fmeter_init(&top_cpuset.fmeter);
2906 set_bit(CS_SCHED_LOAD_BALANCE, &top_cpuset.flags);
2907 top_cpuset.relax_domain_level = -1;
2908
2909 BUG_ON(!alloc_cpumask_var(&cpus_attach, GFP_KERNEL));
2910
2911 return 0;
2912}
2913
2914/*
2915 * If CPU and/or memory hotplug handlers, below, unplug any CPUs
2916 * or memory nodes, we need to walk over the cpuset hierarchy,
2917 * removing that CPU or node from all cpusets. If this removes the
2918 * last CPU or node from a cpuset, then move the tasks in the empty
2919 * cpuset to its next-highest non-empty parent.
2920 */
2921static void remove_tasks_in_empty_cpuset(struct cpuset *cs)
2922{
2923 struct cpuset *parent;
2924
2925 /*
2926 * Find its next-highest non-empty parent, (top cpuset
2927 * has online cpus, so can't be empty).
2928 */
2929 parent = parent_cs(cs);
2930 while (cpumask_empty(parent->cpus_allowed) ||
2931 nodes_empty(parent->mems_allowed))
2932 parent = parent_cs(parent);
2933
2934 if (cgroup_transfer_tasks(parent->css.cgroup, cs->css.cgroup)) {
2935 pr_err("cpuset: failed to transfer tasks out of empty cpuset ");
2936 pr_cont_cgroup_name(cs->css.cgroup);
2937 pr_cont("\n");
2938 }
2939}
2940
2941static void
2942hotplug_update_tasks_legacy(struct cpuset *cs,
2943 struct cpumask *new_cpus, nodemask_t *new_mems,
2944 bool cpus_updated, bool mems_updated)
2945{
2946 bool is_empty;
2947
2948 spin_lock_irq(&callback_lock);
2949 cpumask_copy(cs->cpus_allowed, new_cpus);
2950 cpumask_copy(cs->effective_cpus, new_cpus);
2951 cs->mems_allowed = *new_mems;
2952 cs->effective_mems = *new_mems;
2953 spin_unlock_irq(&callback_lock);
2954
2955 /*
2956 * Don't call update_tasks_cpumask() if the cpuset becomes empty,
2957 * as the tasks will be migratecd to an ancestor.
2958 */
2959 if (cpus_updated && !cpumask_empty(cs->cpus_allowed))
2960 update_tasks_cpumask(cs);
2961 if (mems_updated && !nodes_empty(cs->mems_allowed))
2962 update_tasks_nodemask(cs);
2963
2964 is_empty = cpumask_empty(cs->cpus_allowed) ||
2965 nodes_empty(cs->mems_allowed);
2966
2967 percpu_up_write(&cpuset_rwsem);
2968
2969 /*
2970 * Move tasks to the nearest ancestor with execution resources,
2971 * This is full cgroup operation which will also call back into
2972 * cpuset. Should be done outside any lock.
2973 */
2974 if (is_empty)
2975 remove_tasks_in_empty_cpuset(cs);
2976
2977 percpu_down_write(&cpuset_rwsem);
2978}
2979
2980static void
2981hotplug_update_tasks(struct cpuset *cs,
2982 struct cpumask *new_cpus, nodemask_t *new_mems,
2983 bool cpus_updated, bool mems_updated)
2984{
2985 if (cpumask_empty(new_cpus))
2986 cpumask_copy(new_cpus, parent_cs(cs)->effective_cpus);
2987 if (nodes_empty(*new_mems))
2988 *new_mems = parent_cs(cs)->effective_mems;
2989
2990 spin_lock_irq(&callback_lock);
2991 cpumask_copy(cs->effective_cpus, new_cpus);
2992 cs->effective_mems = *new_mems;
2993 spin_unlock_irq(&callback_lock);
2994
2995 if (cpus_updated)
2996 update_tasks_cpumask(cs);
2997 if (mems_updated)
2998 update_tasks_nodemask(cs);
2999}
3000
3001static bool force_rebuild;
3002
3003void cpuset_force_rebuild(void)
3004{
3005 force_rebuild = true;
3006}
3007
3008/**
3009 * cpuset_hotplug_update_tasks - update tasks in a cpuset for hotunplug
3010 * @cs: cpuset in interest
3011 * @tmp: the tmpmasks structure pointer
3012 *
3013 * Compare @cs's cpu and mem masks against top_cpuset and if some have gone
3014 * offline, update @cs accordingly. If @cs ends up with no CPU or memory,
3015 * all its tasks are moved to the nearest ancestor with both resources.
3016 */
3017static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp)
3018{
3019 static cpumask_t new_cpus;
3020 static nodemask_t new_mems;
3021 bool cpus_updated;
3022 bool mems_updated;
3023 struct cpuset *parent;
3024retry:
3025 wait_event(cpuset_attach_wq, cs->attach_in_progress == 0);
3026
3027 percpu_down_write(&cpuset_rwsem);
3028
3029 /*
3030 * We have raced with task attaching. We wait until attaching
3031 * is finished, so we won't attach a task to an empty cpuset.
3032 */
3033 if (cs->attach_in_progress) {
3034 percpu_up_write(&cpuset_rwsem);
3035 goto retry;
3036 }
3037
3038 parent = parent_cs(cs);
3039 compute_effective_cpumask(&new_cpus, cs, parent);
3040 nodes_and(new_mems, cs->mems_allowed, parent->effective_mems);
3041
3042 if (cs->nr_subparts_cpus)
3043 /*
3044 * Make sure that CPUs allocated to child partitions
3045 * do not show up in effective_cpus.
3046 */
3047 cpumask_andnot(&new_cpus, &new_cpus, cs->subparts_cpus);
3048
3049 if (!tmp || !cs->partition_root_state)
3050 goto update_tasks;
3051
3052 /*
3053 * In the unlikely event that a partition root has empty
3054 * effective_cpus or its parent becomes erroneous, we have to
3055 * transition it to the erroneous state.
3056 */
3057 if (is_partition_root(cs) && (cpumask_empty(&new_cpus) ||
3058 (parent->partition_root_state == PRS_ERROR))) {
3059 if (cs->nr_subparts_cpus) {
3060 cs->nr_subparts_cpus = 0;
3061 cpumask_clear(cs->subparts_cpus);
3062 compute_effective_cpumask(&new_cpus, cs, parent);
3063 }
3064
3065 /*
3066 * If the effective_cpus is empty because the child
3067 * partitions take away all the CPUs, we can keep
3068 * the current partition and let the child partitions
3069 * fight for available CPUs.
3070 */
3071 if ((parent->partition_root_state == PRS_ERROR) ||
3072 cpumask_empty(&new_cpus)) {
3073 update_parent_subparts_cpumask(cs, partcmd_disable,
3074 NULL, tmp);
3075 cs->partition_root_state = PRS_ERROR;
3076 }
3077 cpuset_force_rebuild();
3078 }
3079
3080 /*
3081 * On the other hand, an erroneous partition root may be transitioned
3082 * back to a regular one or a partition root with no CPU allocated
3083 * from the parent may change to erroneous.
3084 */
3085 if (is_partition_root(parent) &&
3086 ((cs->partition_root_state == PRS_ERROR) ||
3087 !cpumask_intersects(&new_cpus, parent->subparts_cpus)) &&
3088 update_parent_subparts_cpumask(cs, partcmd_update, NULL, tmp))
3089 cpuset_force_rebuild();
3090
3091update_tasks:
3092 cpus_updated = !cpumask_equal(&new_cpus, cs->effective_cpus);
3093 mems_updated = !nodes_equal(new_mems, cs->effective_mems);
3094
3095 if (is_in_v2_mode())
3096 hotplug_update_tasks(cs, &new_cpus, &new_mems,
3097 cpus_updated, mems_updated);
3098 else
3099 hotplug_update_tasks_legacy(cs, &new_cpus, &new_mems,
3100 cpus_updated, mems_updated);
3101
3102 percpu_up_write(&cpuset_rwsem);
3103}
3104
3105/**
3106 * cpuset_hotplug_workfn - handle CPU/memory hotunplug for a cpuset
3107 *
3108 * This function is called after either CPU or memory configuration has
3109 * changed and updates cpuset accordingly. The top_cpuset is always
3110 * synchronized to cpu_active_mask and N_MEMORY, which is necessary in
3111 * order to make cpusets transparent (of no affect) on systems that are
3112 * actively using CPU hotplug but making no active use of cpusets.
3113 *
3114 * Non-root cpusets are only affected by offlining. If any CPUs or memory
3115 * nodes have been taken down, cpuset_hotplug_update_tasks() is invoked on
3116 * all descendants.
3117 *
3118 * Note that CPU offlining during suspend is ignored. We don't modify
3119 * cpusets across suspend/resume cycles at all.
3120 */
3121static void cpuset_hotplug_workfn(struct work_struct *work)
3122{
3123 static cpumask_t new_cpus;
3124 static nodemask_t new_mems;
3125 bool cpus_updated, mems_updated;
3126 bool on_dfl = is_in_v2_mode();
3127 struct tmpmasks tmp, *ptmp = NULL;
3128
3129 if (on_dfl && !alloc_cpumasks(NULL, &tmp))
3130 ptmp = &tmp;
3131
3132 percpu_down_write(&cpuset_rwsem);
3133
3134 /* fetch the available cpus/mems and find out which changed how */
3135 cpumask_copy(&new_cpus, cpu_active_mask);
3136 new_mems = node_states[N_MEMORY];
3137
3138 /*
3139 * If subparts_cpus is populated, it is likely that the check below
3140 * will produce a false positive on cpus_updated when the cpu list
3141 * isn't changed. It is extra work, but it is better to be safe.
3142 */
3143 cpus_updated = !cpumask_equal(top_cpuset.effective_cpus, &new_cpus);
3144 mems_updated = !nodes_equal(top_cpuset.effective_mems, new_mems);
3145
3146 /* synchronize cpus_allowed to cpu_active_mask */
3147 if (cpus_updated) {
3148 spin_lock_irq(&callback_lock);
3149 if (!on_dfl)
3150 cpumask_copy(top_cpuset.cpus_allowed, &new_cpus);
3151 /*
3152 * Make sure that CPUs allocated to child partitions
3153 * do not show up in effective_cpus. If no CPU is left,
3154 * we clear the subparts_cpus & let the child partitions
3155 * fight for the CPUs again.
3156 */
3157 if (top_cpuset.nr_subparts_cpus) {
3158 if (cpumask_subset(&new_cpus,
3159 top_cpuset.subparts_cpus)) {
3160 top_cpuset.nr_subparts_cpus = 0;
3161 cpumask_clear(top_cpuset.subparts_cpus);
3162 } else {
3163 cpumask_andnot(&new_cpus, &new_cpus,
3164 top_cpuset.subparts_cpus);
3165 }
3166 }
3167 cpumask_copy(top_cpuset.effective_cpus, &new_cpus);
3168 spin_unlock_irq(&callback_lock);
3169 /* we don't mess with cpumasks of tasks in top_cpuset */
3170 }
3171
3172 /* synchronize mems_allowed to N_MEMORY */
3173 if (mems_updated) {
3174 spin_lock_irq(&callback_lock);
3175 if (!on_dfl)
3176 top_cpuset.mems_allowed = new_mems;
3177 top_cpuset.effective_mems = new_mems;
3178 spin_unlock_irq(&callback_lock);
3179 update_tasks_nodemask(&top_cpuset);
3180 }
3181
3182 percpu_up_write(&cpuset_rwsem);
3183
3184 /* if cpus or mems changed, we need to propagate to descendants */
3185 if (cpus_updated || mems_updated) {
3186 struct cpuset *cs;
3187 struct cgroup_subsys_state *pos_css;
3188
3189 rcu_read_lock();
3190 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {
3191 if (cs == &top_cpuset || !css_tryget_online(&cs->css))
3192 continue;
3193 rcu_read_unlock();
3194
3195 cpuset_hotplug_update_tasks(cs, ptmp);
3196
3197 rcu_read_lock();
3198 css_put(&cs->css);
3199 }
3200 rcu_read_unlock();
3201 }
3202
3203 /* rebuild sched domains if cpus_allowed has changed */
3204 if (cpus_updated || force_rebuild) {
3205 force_rebuild = false;
3206 rebuild_sched_domains();
3207 }
3208
3209 free_cpumasks(NULL, ptmp);
3210}
3211
3212void cpuset_update_active_cpus(void)
3213{
3214 /*
3215 * We're inside cpu hotplug critical region which usually nests
3216 * inside cgroup synchronization. Bounce actual hotplug processing
3217 * to a work item to avoid reverse locking order.
3218 */
3219 schedule_work(&cpuset_hotplug_work);
3220}
3221
3222void cpuset_wait_for_hotplug(void)
3223{
3224 flush_work(&cpuset_hotplug_work);
3225}
3226
3227/*
3228 * Keep top_cpuset.mems_allowed tracking node_states[N_MEMORY].
3229 * Call this routine anytime after node_states[N_MEMORY] changes.
3230 * See cpuset_update_active_cpus() for CPU hotplug handling.
3231 */
3232static int cpuset_track_online_nodes(struct notifier_block *self,
3233 unsigned long action, void *arg)
3234{
3235 schedule_work(&cpuset_hotplug_work);
3236 return NOTIFY_OK;
3237}
3238
3239static struct notifier_block cpuset_track_online_nodes_nb = {
3240 .notifier_call = cpuset_track_online_nodes,
3241 .priority = 10, /* ??! */
3242};
3243
3244/**
3245 * cpuset_init_smp - initialize cpus_allowed
3246 *
3247 * Description: Finish top cpuset after cpu, node maps are initialized
3248 */
3249void __init cpuset_init_smp(void)
3250{
3251 cpumask_copy(top_cpuset.cpus_allowed, cpu_active_mask);
3252 top_cpuset.mems_allowed = node_states[N_MEMORY];
3253 top_cpuset.old_mems_allowed = top_cpuset.mems_allowed;
3254
3255 cpumask_copy(top_cpuset.effective_cpus, cpu_active_mask);
3256 top_cpuset.effective_mems = node_states[N_MEMORY];
3257
3258 register_hotmemory_notifier(&cpuset_track_online_nodes_nb);
3259
3260 cpuset_migrate_mm_wq = alloc_ordered_workqueue("cpuset_migrate_mm", 0);
3261 BUG_ON(!cpuset_migrate_mm_wq);
3262}
3263
3264/**
3265 * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset.
3266 * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
3267 * @pmask: pointer to struct cpumask variable to receive cpus_allowed set.
3268 *
3269 * Description: Returns the cpumask_var_t cpus_allowed of the cpuset
3270 * attached to the specified @tsk. Guaranteed to return some non-empty
3271 * subset of cpu_online_mask, even if this means going outside the
3272 * tasks cpuset.
3273 **/
3274
3275void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
3276{
3277 unsigned long flags;
3278
3279 spin_lock_irqsave(&callback_lock, flags);
3280 rcu_read_lock();
3281 guarantee_online_cpus(task_cs(tsk), pmask);
3282 rcu_read_unlock();
3283 spin_unlock_irqrestore(&callback_lock, flags);
3284}
3285
3286/**
3287 * cpuset_cpus_allowed_fallback - final fallback before complete catastrophe.
3288 * @tsk: pointer to task_struct with which the scheduler is struggling
3289 *
3290 * Description: In the case that the scheduler cannot find an allowed cpu in
3291 * tsk->cpus_allowed, we fall back to task_cs(tsk)->cpus_allowed. In legacy
3292 * mode however, this value is the same as task_cs(tsk)->effective_cpus,
3293 * which will not contain a sane cpumask during cases such as cpu hotplugging.
3294 * This is the absolute last resort for the scheduler and it is only used if
3295 * _every_ other avenue has been traveled.
3296 **/
3297
3298void cpuset_cpus_allowed_fallback(struct task_struct *tsk)
3299{
3300 rcu_read_lock();
3301 do_set_cpus_allowed(tsk, is_in_v2_mode() ?
3302 task_cs(tsk)->cpus_allowed : cpu_possible_mask);
3303 rcu_read_unlock();
3304
3305 /*
3306 * We own tsk->cpus_allowed, nobody can change it under us.
3307 *
3308 * But we used cs && cs->cpus_allowed lockless and thus can
3309 * race with cgroup_attach_task() or update_cpumask() and get
3310 * the wrong tsk->cpus_allowed. However, both cases imply the
3311 * subsequent cpuset_change_cpumask()->set_cpus_allowed_ptr()
3312 * which takes task_rq_lock().
3313 *
3314 * If we are called after it dropped the lock we must see all
3315 * changes in tsk_cs()->cpus_allowed. Otherwise we can temporary
3316 * set any mask even if it is not right from task_cs() pov,
3317 * the pending set_cpus_allowed_ptr() will fix things.
3318 *
3319 * select_fallback_rq() will fix things ups and set cpu_possible_mask
3320 * if required.
3321 */
3322}
3323
3324void __init cpuset_init_current_mems_allowed(void)
3325{
3326 nodes_setall(current->mems_allowed);
3327}
3328
3329/**
3330 * cpuset_mems_allowed - return mems_allowed mask from a tasks cpuset.
3331 * @tsk: pointer to task_struct from which to obtain cpuset->mems_allowed.
3332 *
3333 * Description: Returns the nodemask_t mems_allowed of the cpuset
3334 * attached to the specified @tsk. Guaranteed to return some non-empty
3335 * subset of node_states[N_MEMORY], even if this means going outside the
3336 * tasks cpuset.
3337 **/
3338
3339nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
3340{
3341 nodemask_t mask;
3342 unsigned long flags;
3343
3344 spin_lock_irqsave(&callback_lock, flags);
3345 rcu_read_lock();
3346 guarantee_online_mems(task_cs(tsk), &mask);
3347 rcu_read_unlock();
3348 spin_unlock_irqrestore(&callback_lock, flags);
3349
3350 return mask;
3351}
3352
3353/**
3354 * cpuset_nodemask_valid_mems_allowed - check nodemask vs. curremt mems_allowed
3355 * @nodemask: the nodemask to be checked
3356 *
3357 * Are any of the nodes in the nodemask allowed in current->mems_allowed?
3358 */
3359int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
3360{
3361 return nodes_intersects(*nodemask, current->mems_allowed);
3362}
3363
3364/*
3365 * nearest_hardwall_ancestor() - Returns the nearest mem_exclusive or
3366 * mem_hardwall ancestor to the specified cpuset. Call holding
3367 * callback_lock. If no ancestor is mem_exclusive or mem_hardwall
3368 * (an unusual configuration), then returns the root cpuset.
3369 */
3370static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs)
3371{
3372 while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && parent_cs(cs))
3373 cs = parent_cs(cs);
3374 return cs;
3375}
3376
3377/**
3378 * cpuset_node_allowed - Can we allocate on a memory node?
3379 * @node: is this an allowed node?
3380 * @gfp_mask: memory allocation flags
3381 *
3382 * If we're in interrupt, yes, we can always allocate. If @node is set in
3383 * current's mems_allowed, yes. If it's not a __GFP_HARDWALL request and this
3384 * node is set in the nearest hardwalled cpuset ancestor to current's cpuset,
3385 * yes. If current has access to memory reserves as an oom victim, yes.
3386 * Otherwise, no.
3387 *
3388 * GFP_USER allocations are marked with the __GFP_HARDWALL bit,
3389 * and do not allow allocations outside the current tasks cpuset
3390 * unless the task has been OOM killed.
3391 * GFP_KERNEL allocations are not so marked, so can escape to the
3392 * nearest enclosing hardwalled ancestor cpuset.
3393 *
3394 * Scanning up parent cpusets requires callback_lock. The
3395 * __alloc_pages() routine only calls here with __GFP_HARDWALL bit
3396 * _not_ set if it's a GFP_KERNEL allocation, and all nodes in the
3397 * current tasks mems_allowed came up empty on the first pass over
3398 * the zonelist. So only GFP_KERNEL allocations, if all nodes in the
3399 * cpuset are short of memory, might require taking the callback_lock.
3400 *
3401 * The first call here from mm/page_alloc:get_page_from_freelist()
3402 * has __GFP_HARDWALL set in gfp_mask, enforcing hardwall cpusets,
3403 * so no allocation on a node outside the cpuset is allowed (unless
3404 * in interrupt, of course).
3405 *
3406 * The second pass through get_page_from_freelist() doesn't even call
3407 * here for GFP_ATOMIC calls. For those calls, the __alloc_pages()
3408 * variable 'wait' is not set, and the bit ALLOC_CPUSET is not set
3409 * in alloc_flags. That logic and the checks below have the combined
3410 * affect that:
3411 * in_interrupt - any node ok (current task context irrelevant)
3412 * GFP_ATOMIC - any node ok
3413 * tsk_is_oom_victim - any node ok
3414 * GFP_KERNEL - any node in enclosing hardwalled cpuset ok
3415 * GFP_USER - only nodes in current tasks mems allowed ok.
3416 */
3417bool __cpuset_node_allowed(int node, gfp_t gfp_mask)
3418{
3419 struct cpuset *cs; /* current cpuset ancestors */
3420 int allowed; /* is allocation in zone z allowed? */
3421 unsigned long flags;
3422
3423 if (in_interrupt())
3424 return true;
3425 if (node_isset(node, current->mems_allowed))
3426 return true;
3427 /*
3428 * Allow tasks that have access to memory reserves because they have
3429 * been OOM killed to get memory anywhere.
3430 */
3431 if (unlikely(tsk_is_oom_victim(current)))
3432 return true;
3433 if (gfp_mask & __GFP_HARDWALL) /* If hardwall request, stop here */
3434 return false;
3435
3436 if (current->flags & PF_EXITING) /* Let dying task have memory */
3437 return true;
3438
3439 /* Not hardwall and node outside mems_allowed: scan up cpusets */
3440 spin_lock_irqsave(&callback_lock, flags);
3441
3442 rcu_read_lock();
3443 cs = nearest_hardwall_ancestor(task_cs(current));
3444 allowed = node_isset(node, cs->mems_allowed);
3445 rcu_read_unlock();
3446
3447 spin_unlock_irqrestore(&callback_lock, flags);
3448 return allowed;
3449}
3450
3451/**
3452 * cpuset_mem_spread_node() - On which node to begin search for a file page
3453 * cpuset_slab_spread_node() - On which node to begin search for a slab page
3454 *
3455 * If a task is marked PF_SPREAD_PAGE or PF_SPREAD_SLAB (as for
3456 * tasks in a cpuset with is_spread_page or is_spread_slab set),
3457 * and if the memory allocation used cpuset_mem_spread_node()
3458 * to determine on which node to start looking, as it will for
3459 * certain page cache or slab cache pages such as used for file
3460 * system buffers and inode caches, then instead of starting on the
3461 * local node to look for a free page, rather spread the starting
3462 * node around the tasks mems_allowed nodes.
3463 *
3464 * We don't have to worry about the returned node being offline
3465 * because "it can't happen", and even if it did, it would be ok.
3466 *
3467 * The routines calling guarantee_online_mems() are careful to
3468 * only set nodes in task->mems_allowed that are online. So it
3469 * should not be possible for the following code to return an
3470 * offline node. But if it did, that would be ok, as this routine
3471 * is not returning the node where the allocation must be, only
3472 * the node where the search should start. The zonelist passed to
3473 * __alloc_pages() will include all nodes. If the slab allocator
3474 * is passed an offline node, it will fall back to the local node.
3475 * See kmem_cache_alloc_node().
3476 */
3477
3478static int cpuset_spread_node(int *rotor)
3479{
3480 return *rotor = next_node_in(*rotor, current->mems_allowed);
3481}
3482
3483int cpuset_mem_spread_node(void)
3484{
3485 if (current->cpuset_mem_spread_rotor == NUMA_NO_NODE)
3486 current->cpuset_mem_spread_rotor =
3487 node_random(¤t->mems_allowed);
3488
3489 return cpuset_spread_node(¤t->cpuset_mem_spread_rotor);
3490}
3491
3492int cpuset_slab_spread_node(void)
3493{
3494 if (current->cpuset_slab_spread_rotor == NUMA_NO_NODE)
3495 current->cpuset_slab_spread_rotor =
3496 node_random(¤t->mems_allowed);
3497
3498 return cpuset_spread_node(¤t->cpuset_slab_spread_rotor);
3499}
3500
3501EXPORT_SYMBOL_GPL(cpuset_mem_spread_node);
3502
3503/**
3504 * cpuset_mems_allowed_intersects - Does @tsk1's mems_allowed intersect @tsk2's?
3505 * @tsk1: pointer to task_struct of some task.
3506 * @tsk2: pointer to task_struct of some other task.
3507 *
3508 * Description: Return true if @tsk1's mems_allowed intersects the
3509 * mems_allowed of @tsk2. Used by the OOM killer to determine if
3510 * one of the task's memory usage might impact the memory available
3511 * to the other.
3512 **/
3513
3514int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
3515 const struct task_struct *tsk2)
3516{
3517 return nodes_intersects(tsk1->mems_allowed, tsk2->mems_allowed);
3518}
3519
3520/**
3521 * cpuset_print_current_mems_allowed - prints current's cpuset and mems_allowed
3522 *
3523 * Description: Prints current's name, cpuset name, and cached copy of its
3524 * mems_allowed to the kernel log.
3525 */
3526void cpuset_print_current_mems_allowed(void)
3527{
3528 struct cgroup *cgrp;
3529
3530 rcu_read_lock();
3531
3532 cgrp = task_cs(current)->css.cgroup;
3533 pr_cont(",cpuset=");
3534 pr_cont_cgroup_name(cgrp);
3535 pr_cont(",mems_allowed=%*pbl",
3536 nodemask_pr_args(¤t->mems_allowed));
3537
3538 rcu_read_unlock();
3539}
3540
3541/*
3542 * Collection of memory_pressure is suppressed unless
3543 * this flag is enabled by writing "1" to the special
3544 * cpuset file 'memory_pressure_enabled' in the root cpuset.
3545 */
3546
3547int cpuset_memory_pressure_enabled __read_mostly;
3548
3549/**
3550 * cpuset_memory_pressure_bump - keep stats of per-cpuset reclaims.
3551 *
3552 * Keep a running average of the rate of synchronous (direct)
3553 * page reclaim efforts initiated by tasks in each cpuset.
3554 *
3555 * This represents the rate at which some task in the cpuset
3556 * ran low on memory on all nodes it was allowed to use, and
3557 * had to enter the kernels page reclaim code in an effort to
3558 * create more free memory by tossing clean pages or swapping
3559 * or writing dirty pages.
3560 *
3561 * Display to user space in the per-cpuset read-only file
3562 * "memory_pressure". Value displayed is an integer
3563 * representing the recent rate of entry into the synchronous
3564 * (direct) page reclaim by any task attached to the cpuset.
3565 **/
3566
3567void __cpuset_memory_pressure_bump(void)
3568{
3569 rcu_read_lock();
3570 fmeter_markevent(&task_cs(current)->fmeter);
3571 rcu_read_unlock();
3572}
3573
3574#ifdef CONFIG_PROC_PID_CPUSET
3575/*
3576 * proc_cpuset_show()
3577 * - Print tasks cpuset path into seq_file.
3578 * - Used for /proc/<pid>/cpuset.
3579 * - No need to task_lock(tsk) on this tsk->cpuset reference, as it
3580 * doesn't really matter if tsk->cpuset changes after we read it,
3581 * and we take cpuset_mutex, keeping cpuset_attach() from changing it
3582 * anyway.
3583 */
3584int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
3585 struct pid *pid, struct task_struct *tsk)
3586{
3587 char *buf;
3588 struct cgroup_subsys_state *css;
3589 int retval;
3590
3591 retval = -ENOMEM;
3592 buf = kmalloc(PATH_MAX, GFP_KERNEL);
3593 if (!buf)
3594 goto out;
3595
3596 css = task_get_css(tsk, cpuset_cgrp_id);
3597 retval = cgroup_path_ns(css->cgroup, buf, PATH_MAX,
3598 current->nsproxy->cgroup_ns);
3599 css_put(css);
3600 if (retval >= PATH_MAX)
3601 retval = -ENAMETOOLONG;
3602 if (retval < 0)
3603 goto out_free;
3604 seq_puts(m, buf);
3605 seq_putc(m, '\n');
3606 retval = 0;
3607out_free:
3608 kfree(buf);
3609out:
3610 return retval;
3611}
3612#endif /* CONFIG_PROC_PID_CPUSET */
3613
3614/* Display task mems_allowed in /proc/<pid>/status file. */
3615void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task)
3616{
3617 seq_printf(m, "Mems_allowed:\t%*pb\n",
3618 nodemask_pr_args(&task->mems_allowed));
3619 seq_printf(m, "Mems_allowed_list:\t%*pbl\n",
3620 nodemask_pr_args(&task->mems_allowed));
3621}
1/*
2 * kernel/cpuset.c
3 *
4 * Processor and Memory placement constraints for sets of tasks.
5 *
6 * Copyright (C) 2003 BULL SA.
7 * Copyright (C) 2004-2007 Silicon Graphics, Inc.
8 * Copyright (C) 2006 Google, Inc
9 *
10 * Portions derived from Patrick Mochel's sysfs code.
11 * sysfs is Copyright (c) 2001-3 Patrick Mochel
12 *
13 * 2003-10-10 Written by Simon Derr.
14 * 2003-10-22 Updates by Stephen Hemminger.
15 * 2004 May-July Rework by Paul Jackson.
16 * 2006 Rework by Paul Menage to use generic cgroups
17 * 2008 Rework of the scheduler domains and CPU hotplug handling
18 * by Max Krasnyansky
19 *
20 * This file is subject to the terms and conditions of the GNU General Public
21 * License. See the file COPYING in the main directory of the Linux
22 * distribution for more details.
23 */
24
25#include <linux/cpu.h>
26#include <linux/cpumask.h>
27#include <linux/cpuset.h>
28#include <linux/err.h>
29#include <linux/errno.h>
30#include <linux/file.h>
31#include <linux/fs.h>
32#include <linux/init.h>
33#include <linux/interrupt.h>
34#include <linux/kernel.h>
35#include <linux/kmod.h>
36#include <linux/list.h>
37#include <linux/mempolicy.h>
38#include <linux/mm.h>
39#include <linux/memory.h>
40#include <linux/export.h>
41#include <linux/mount.h>
42#include <linux/namei.h>
43#include <linux/pagemap.h>
44#include <linux/proc_fs.h>
45#include <linux/rcupdate.h>
46#include <linux/sched.h>
47#include <linux/sched/mm.h>
48#include <linux/sched/task.h>
49#include <linux/seq_file.h>
50#include <linux/security.h>
51#include <linux/slab.h>
52#include <linux/spinlock.h>
53#include <linux/stat.h>
54#include <linux/string.h>
55#include <linux/time.h>
56#include <linux/time64.h>
57#include <linux/backing-dev.h>
58#include <linux/sort.h>
59#include <linux/oom.h>
60#include <linux/sched/isolation.h>
61#include <linux/uaccess.h>
62#include <linux/atomic.h>
63#include <linux/mutex.h>
64#include <linux/cgroup.h>
65#include <linux/wait.h>
66
67DEFINE_STATIC_KEY_FALSE(cpusets_pre_enable_key);
68DEFINE_STATIC_KEY_FALSE(cpusets_enabled_key);
69
70/* See "Frequency meter" comments, below. */
71
72struct fmeter {
73 int cnt; /* unprocessed events count */
74 int val; /* most recent output value */
75 time64_t time; /* clock (secs) when val computed */
76 spinlock_t lock; /* guards read or write of above */
77};
78
79struct cpuset {
80 struct cgroup_subsys_state css;
81
82 unsigned long flags; /* "unsigned long" so bitops work */
83
84 /*
85 * On default hierarchy:
86 *
87 * The user-configured masks can only be changed by writing to
88 * cpuset.cpus and cpuset.mems, and won't be limited by the
89 * parent masks.
90 *
91 * The effective masks is the real masks that apply to the tasks
92 * in the cpuset. They may be changed if the configured masks are
93 * changed or hotplug happens.
94 *
95 * effective_mask == configured_mask & parent's effective_mask,
96 * and if it ends up empty, it will inherit the parent's mask.
97 *
98 *
99 * On legacy hierachy:
100 *
101 * The user-configured masks are always the same with effective masks.
102 */
103
104 /* user-configured CPUs and Memory Nodes allow to tasks */
105 cpumask_var_t cpus_allowed;
106 nodemask_t mems_allowed;
107
108 /* effective CPUs and Memory Nodes allow to tasks */
109 cpumask_var_t effective_cpus;
110 nodemask_t effective_mems;
111
112 /*
113 * This is old Memory Nodes tasks took on.
114 *
115 * - top_cpuset.old_mems_allowed is initialized to mems_allowed.
116 * - A new cpuset's old_mems_allowed is initialized when some
117 * task is moved into it.
118 * - old_mems_allowed is used in cpuset_migrate_mm() when we change
119 * cpuset.mems_allowed and have tasks' nodemask updated, and
120 * then old_mems_allowed is updated to mems_allowed.
121 */
122 nodemask_t old_mems_allowed;
123
124 struct fmeter fmeter; /* memory_pressure filter */
125
126 /*
127 * Tasks are being attached to this cpuset. Used to prevent
128 * zeroing cpus/mems_allowed between ->can_attach() and ->attach().
129 */
130 int attach_in_progress;
131
132 /* partition number for rebuild_sched_domains() */
133 int pn;
134
135 /* for custom sched domain */
136 int relax_domain_level;
137};
138
139static inline struct cpuset *css_cs(struct cgroup_subsys_state *css)
140{
141 return css ? container_of(css, struct cpuset, css) : NULL;
142}
143
144/* Retrieve the cpuset for a task */
145static inline struct cpuset *task_cs(struct task_struct *task)
146{
147 return css_cs(task_css(task, cpuset_cgrp_id));
148}
149
150static inline struct cpuset *parent_cs(struct cpuset *cs)
151{
152 return css_cs(cs->css.parent);
153}
154
155#ifdef CONFIG_NUMA
156static inline bool task_has_mempolicy(struct task_struct *task)
157{
158 return task->mempolicy;
159}
160#else
161static inline bool task_has_mempolicy(struct task_struct *task)
162{
163 return false;
164}
165#endif
166
167
168/* bits in struct cpuset flags field */
169typedef enum {
170 CS_ONLINE,
171 CS_CPU_EXCLUSIVE,
172 CS_MEM_EXCLUSIVE,
173 CS_MEM_HARDWALL,
174 CS_MEMORY_MIGRATE,
175 CS_SCHED_LOAD_BALANCE,
176 CS_SPREAD_PAGE,
177 CS_SPREAD_SLAB,
178} cpuset_flagbits_t;
179
180/* convenient tests for these bits */
181static inline bool is_cpuset_online(struct cpuset *cs)
182{
183 return test_bit(CS_ONLINE, &cs->flags) && !css_is_dying(&cs->css);
184}
185
186static inline int is_cpu_exclusive(const struct cpuset *cs)
187{
188 return test_bit(CS_CPU_EXCLUSIVE, &cs->flags);
189}
190
191static inline int is_mem_exclusive(const struct cpuset *cs)
192{
193 return test_bit(CS_MEM_EXCLUSIVE, &cs->flags);
194}
195
196static inline int is_mem_hardwall(const struct cpuset *cs)
197{
198 return test_bit(CS_MEM_HARDWALL, &cs->flags);
199}
200
201static inline int is_sched_load_balance(const struct cpuset *cs)
202{
203 return test_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
204}
205
206static inline int is_memory_migrate(const struct cpuset *cs)
207{
208 return test_bit(CS_MEMORY_MIGRATE, &cs->flags);
209}
210
211static inline int is_spread_page(const struct cpuset *cs)
212{
213 return test_bit(CS_SPREAD_PAGE, &cs->flags);
214}
215
216static inline int is_spread_slab(const struct cpuset *cs)
217{
218 return test_bit(CS_SPREAD_SLAB, &cs->flags);
219}
220
221static struct cpuset top_cpuset = {
222 .flags = ((1 << CS_ONLINE) | (1 << CS_CPU_EXCLUSIVE) |
223 (1 << CS_MEM_EXCLUSIVE)),
224};
225
226/**
227 * cpuset_for_each_child - traverse online children of a cpuset
228 * @child_cs: loop cursor pointing to the current child
229 * @pos_css: used for iteration
230 * @parent_cs: target cpuset to walk children of
231 *
232 * Walk @child_cs through the online children of @parent_cs. Must be used
233 * with RCU read locked.
234 */
235#define cpuset_for_each_child(child_cs, pos_css, parent_cs) \
236 css_for_each_child((pos_css), &(parent_cs)->css) \
237 if (is_cpuset_online(((child_cs) = css_cs((pos_css)))))
238
239/**
240 * cpuset_for_each_descendant_pre - pre-order walk of a cpuset's descendants
241 * @des_cs: loop cursor pointing to the current descendant
242 * @pos_css: used for iteration
243 * @root_cs: target cpuset to walk ancestor of
244 *
245 * Walk @des_cs through the online descendants of @root_cs. Must be used
246 * with RCU read locked. The caller may modify @pos_css by calling
247 * css_rightmost_descendant() to skip subtree. @root_cs is included in the
248 * iteration and the first node to be visited.
249 */
250#define cpuset_for_each_descendant_pre(des_cs, pos_css, root_cs) \
251 css_for_each_descendant_pre((pos_css), &(root_cs)->css) \
252 if (is_cpuset_online(((des_cs) = css_cs((pos_css)))))
253
254/*
255 * There are two global locks guarding cpuset structures - cpuset_mutex and
256 * callback_lock. We also require taking task_lock() when dereferencing a
257 * task's cpuset pointer. See "The task_lock() exception", at the end of this
258 * comment.
259 *
260 * A task must hold both locks to modify cpusets. If a task holds
261 * cpuset_mutex, then it blocks others wanting that mutex, ensuring that it
262 * is the only task able to also acquire callback_lock and be able to
263 * modify cpusets. It can perform various checks on the cpuset structure
264 * first, knowing nothing will change. It can also allocate memory while
265 * just holding cpuset_mutex. While it is performing these checks, various
266 * callback routines can briefly acquire callback_lock to query cpusets.
267 * Once it is ready to make the changes, it takes callback_lock, blocking
268 * everyone else.
269 *
270 * Calls to the kernel memory allocator can not be made while holding
271 * callback_lock, as that would risk double tripping on callback_lock
272 * from one of the callbacks into the cpuset code from within
273 * __alloc_pages().
274 *
275 * If a task is only holding callback_lock, then it has read-only
276 * access to cpusets.
277 *
278 * Now, the task_struct fields mems_allowed and mempolicy may be changed
279 * by other task, we use alloc_lock in the task_struct fields to protect
280 * them.
281 *
282 * The cpuset_common_file_read() handlers only hold callback_lock across
283 * small pieces of code, such as when reading out possibly multi-word
284 * cpumasks and nodemasks.
285 *
286 * Accessing a task's cpuset should be done in accordance with the
287 * guidelines for accessing subsystem state in kernel/cgroup.c
288 */
289
290static DEFINE_MUTEX(cpuset_mutex);
291static DEFINE_SPINLOCK(callback_lock);
292
293static struct workqueue_struct *cpuset_migrate_mm_wq;
294
295/*
296 * CPU / memory hotplug is handled asynchronously.
297 */
298static void cpuset_hotplug_workfn(struct work_struct *work);
299static DECLARE_WORK(cpuset_hotplug_work, cpuset_hotplug_workfn);
300
301static DECLARE_WAIT_QUEUE_HEAD(cpuset_attach_wq);
302
303/*
304 * Cgroup v2 behavior is used when on default hierarchy or the
305 * cgroup_v2_mode flag is set.
306 */
307static inline bool is_in_v2_mode(void)
308{
309 return cgroup_subsys_on_dfl(cpuset_cgrp_subsys) ||
310 (cpuset_cgrp_subsys.root->flags & CGRP_ROOT_CPUSET_V2_MODE);
311}
312
313/*
314 * This is ugly, but preserves the userspace API for existing cpuset
315 * users. If someone tries to mount the "cpuset" filesystem, we
316 * silently switch it to mount "cgroup" instead
317 */
318static struct dentry *cpuset_mount(struct file_system_type *fs_type,
319 int flags, const char *unused_dev_name, void *data)
320{
321 struct file_system_type *cgroup_fs = get_fs_type("cgroup");
322 struct dentry *ret = ERR_PTR(-ENODEV);
323 if (cgroup_fs) {
324 char mountopts[] =
325 "cpuset,noprefix,"
326 "release_agent=/sbin/cpuset_release_agent";
327 ret = cgroup_fs->mount(cgroup_fs, flags,
328 unused_dev_name, mountopts);
329 put_filesystem(cgroup_fs);
330 }
331 return ret;
332}
333
334static struct file_system_type cpuset_fs_type = {
335 .name = "cpuset",
336 .mount = cpuset_mount,
337};
338
339/*
340 * Return in pmask the portion of a cpusets's cpus_allowed that
341 * are online. If none are online, walk up the cpuset hierarchy
342 * until we find one that does have some online cpus.
343 *
344 * One way or another, we guarantee to return some non-empty subset
345 * of cpu_online_mask.
346 *
347 * Call with callback_lock or cpuset_mutex held.
348 */
349static void guarantee_online_cpus(struct cpuset *cs, struct cpumask *pmask)
350{
351 while (!cpumask_intersects(cs->effective_cpus, cpu_online_mask)) {
352 cs = parent_cs(cs);
353 if (unlikely(!cs)) {
354 /*
355 * The top cpuset doesn't have any online cpu as a
356 * consequence of a race between cpuset_hotplug_work
357 * and cpu hotplug notifier. But we know the top
358 * cpuset's effective_cpus is on its way to to be
359 * identical to cpu_online_mask.
360 */
361 cpumask_copy(pmask, cpu_online_mask);
362 return;
363 }
364 }
365 cpumask_and(pmask, cs->effective_cpus, cpu_online_mask);
366}
367
368/*
369 * Return in *pmask the portion of a cpusets's mems_allowed that
370 * are online, with memory. If none are online with memory, walk
371 * up the cpuset hierarchy until we find one that does have some
372 * online mems. The top cpuset always has some mems online.
373 *
374 * One way or another, we guarantee to return some non-empty subset
375 * of node_states[N_MEMORY].
376 *
377 * Call with callback_lock or cpuset_mutex held.
378 */
379static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask)
380{
381 while (!nodes_intersects(cs->effective_mems, node_states[N_MEMORY]))
382 cs = parent_cs(cs);
383 nodes_and(*pmask, cs->effective_mems, node_states[N_MEMORY]);
384}
385
386/*
387 * update task's spread flag if cpuset's page/slab spread flag is set
388 *
389 * Call with callback_lock or cpuset_mutex held.
390 */
391static void cpuset_update_task_spread_flag(struct cpuset *cs,
392 struct task_struct *tsk)
393{
394 if (is_spread_page(cs))
395 task_set_spread_page(tsk);
396 else
397 task_clear_spread_page(tsk);
398
399 if (is_spread_slab(cs))
400 task_set_spread_slab(tsk);
401 else
402 task_clear_spread_slab(tsk);
403}
404
405/*
406 * is_cpuset_subset(p, q) - Is cpuset p a subset of cpuset q?
407 *
408 * One cpuset is a subset of another if all its allowed CPUs and
409 * Memory Nodes are a subset of the other, and its exclusive flags
410 * are only set if the other's are set. Call holding cpuset_mutex.
411 */
412
413static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
414{
415 return cpumask_subset(p->cpus_allowed, q->cpus_allowed) &&
416 nodes_subset(p->mems_allowed, q->mems_allowed) &&
417 is_cpu_exclusive(p) <= is_cpu_exclusive(q) &&
418 is_mem_exclusive(p) <= is_mem_exclusive(q);
419}
420
421/**
422 * alloc_trial_cpuset - allocate a trial cpuset
423 * @cs: the cpuset that the trial cpuset duplicates
424 */
425static struct cpuset *alloc_trial_cpuset(struct cpuset *cs)
426{
427 struct cpuset *trial;
428
429 trial = kmemdup(cs, sizeof(*cs), GFP_KERNEL);
430 if (!trial)
431 return NULL;
432
433 if (!alloc_cpumask_var(&trial->cpus_allowed, GFP_KERNEL))
434 goto free_cs;
435 if (!alloc_cpumask_var(&trial->effective_cpus, GFP_KERNEL))
436 goto free_cpus;
437
438 cpumask_copy(trial->cpus_allowed, cs->cpus_allowed);
439 cpumask_copy(trial->effective_cpus, cs->effective_cpus);
440 return trial;
441
442free_cpus:
443 free_cpumask_var(trial->cpus_allowed);
444free_cs:
445 kfree(trial);
446 return NULL;
447}
448
449/**
450 * free_trial_cpuset - free the trial cpuset
451 * @trial: the trial cpuset to be freed
452 */
453static void free_trial_cpuset(struct cpuset *trial)
454{
455 free_cpumask_var(trial->effective_cpus);
456 free_cpumask_var(trial->cpus_allowed);
457 kfree(trial);
458}
459
460/*
461 * validate_change() - Used to validate that any proposed cpuset change
462 * follows the structural rules for cpusets.
463 *
464 * If we replaced the flag and mask values of the current cpuset
465 * (cur) with those values in the trial cpuset (trial), would
466 * our various subset and exclusive rules still be valid? Presumes
467 * cpuset_mutex held.
468 *
469 * 'cur' is the address of an actual, in-use cpuset. Operations
470 * such as list traversal that depend on the actual address of the
471 * cpuset in the list must use cur below, not trial.
472 *
473 * 'trial' is the address of bulk structure copy of cur, with
474 * perhaps one or more of the fields cpus_allowed, mems_allowed,
475 * or flags changed to new, trial values.
476 *
477 * Return 0 if valid, -errno if not.
478 */
479
480static int validate_change(struct cpuset *cur, struct cpuset *trial)
481{
482 struct cgroup_subsys_state *css;
483 struct cpuset *c, *par;
484 int ret;
485
486 rcu_read_lock();
487
488 /* Each of our child cpusets must be a subset of us */
489 ret = -EBUSY;
490 cpuset_for_each_child(c, css, cur)
491 if (!is_cpuset_subset(c, trial))
492 goto out;
493
494 /* Remaining checks don't apply to root cpuset */
495 ret = 0;
496 if (cur == &top_cpuset)
497 goto out;
498
499 par = parent_cs(cur);
500
501 /* On legacy hiearchy, we must be a subset of our parent cpuset. */
502 ret = -EACCES;
503 if (!is_in_v2_mode() && !is_cpuset_subset(trial, par))
504 goto out;
505
506 /*
507 * If either I or some sibling (!= me) is exclusive, we can't
508 * overlap
509 */
510 ret = -EINVAL;
511 cpuset_for_each_child(c, css, par) {
512 if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) &&
513 c != cur &&
514 cpumask_intersects(trial->cpus_allowed, c->cpus_allowed))
515 goto out;
516 if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) &&
517 c != cur &&
518 nodes_intersects(trial->mems_allowed, c->mems_allowed))
519 goto out;
520 }
521
522 /*
523 * Cpusets with tasks - existing or newly being attached - can't
524 * be changed to have empty cpus_allowed or mems_allowed.
525 */
526 ret = -ENOSPC;
527 if ((cgroup_is_populated(cur->css.cgroup) || cur->attach_in_progress)) {
528 if (!cpumask_empty(cur->cpus_allowed) &&
529 cpumask_empty(trial->cpus_allowed))
530 goto out;
531 if (!nodes_empty(cur->mems_allowed) &&
532 nodes_empty(trial->mems_allowed))
533 goto out;
534 }
535
536 /*
537 * We can't shrink if we won't have enough room for SCHED_DEADLINE
538 * tasks.
539 */
540 ret = -EBUSY;
541 if (is_cpu_exclusive(cur) &&
542 !cpuset_cpumask_can_shrink(cur->cpus_allowed,
543 trial->cpus_allowed))
544 goto out;
545
546 ret = 0;
547out:
548 rcu_read_unlock();
549 return ret;
550}
551
552#ifdef CONFIG_SMP
553/*
554 * Helper routine for generate_sched_domains().
555 * Do cpusets a, b have overlapping effective cpus_allowed masks?
556 */
557static int cpusets_overlap(struct cpuset *a, struct cpuset *b)
558{
559 return cpumask_intersects(a->effective_cpus, b->effective_cpus);
560}
561
562static void
563update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c)
564{
565 if (dattr->relax_domain_level < c->relax_domain_level)
566 dattr->relax_domain_level = c->relax_domain_level;
567 return;
568}
569
570static void update_domain_attr_tree(struct sched_domain_attr *dattr,
571 struct cpuset *root_cs)
572{
573 struct cpuset *cp;
574 struct cgroup_subsys_state *pos_css;
575
576 rcu_read_lock();
577 cpuset_for_each_descendant_pre(cp, pos_css, root_cs) {
578 /* skip the whole subtree if @cp doesn't have any CPU */
579 if (cpumask_empty(cp->cpus_allowed)) {
580 pos_css = css_rightmost_descendant(pos_css);
581 continue;
582 }
583
584 if (is_sched_load_balance(cp))
585 update_domain_attr(dattr, cp);
586 }
587 rcu_read_unlock();
588}
589
590/* Must be called with cpuset_mutex held. */
591static inline int nr_cpusets(void)
592{
593 /* jump label reference count + the top-level cpuset */
594 return static_key_count(&cpusets_enabled_key.key) + 1;
595}
596
597/*
598 * generate_sched_domains()
599 *
600 * This function builds a partial partition of the systems CPUs
601 * A 'partial partition' is a set of non-overlapping subsets whose
602 * union is a subset of that set.
603 * The output of this function needs to be passed to kernel/sched/core.c
604 * partition_sched_domains() routine, which will rebuild the scheduler's
605 * load balancing domains (sched domains) as specified by that partial
606 * partition.
607 *
608 * See "What is sched_load_balance" in Documentation/cgroups/cpusets.txt
609 * for a background explanation of this.
610 *
611 * Does not return errors, on the theory that the callers of this
612 * routine would rather not worry about failures to rebuild sched
613 * domains when operating in the severe memory shortage situations
614 * that could cause allocation failures below.
615 *
616 * Must be called with cpuset_mutex held.
617 *
618 * The three key local variables below are:
619 * q - a linked-list queue of cpuset pointers, used to implement a
620 * top-down scan of all cpusets. This scan loads a pointer
621 * to each cpuset marked is_sched_load_balance into the
622 * array 'csa'. For our purposes, rebuilding the schedulers
623 * sched domains, we can ignore !is_sched_load_balance cpusets.
624 * csa - (for CpuSet Array) Array of pointers to all the cpusets
625 * that need to be load balanced, for convenient iterative
626 * access by the subsequent code that finds the best partition,
627 * i.e the set of domains (subsets) of CPUs such that the
628 * cpus_allowed of every cpuset marked is_sched_load_balance
629 * is a subset of one of these domains, while there are as
630 * many such domains as possible, each as small as possible.
631 * doms - Conversion of 'csa' to an array of cpumasks, for passing to
632 * the kernel/sched/core.c routine partition_sched_domains() in a
633 * convenient format, that can be easily compared to the prior
634 * value to determine what partition elements (sched domains)
635 * were changed (added or removed.)
636 *
637 * Finding the best partition (set of domains):
638 * The triple nested loops below over i, j, k scan over the
639 * load balanced cpusets (using the array of cpuset pointers in
640 * csa[]) looking for pairs of cpusets that have overlapping
641 * cpus_allowed, but which don't have the same 'pn' partition
642 * number and gives them in the same partition number. It keeps
643 * looping on the 'restart' label until it can no longer find
644 * any such pairs.
645 *
646 * The union of the cpus_allowed masks from the set of
647 * all cpusets having the same 'pn' value then form the one
648 * element of the partition (one sched domain) to be passed to
649 * partition_sched_domains().
650 */
651static int generate_sched_domains(cpumask_var_t **domains,
652 struct sched_domain_attr **attributes)
653{
654 struct cpuset *cp; /* scans q */
655 struct cpuset **csa; /* array of all cpuset ptrs */
656 int csn; /* how many cpuset ptrs in csa so far */
657 int i, j, k; /* indices for partition finding loops */
658 cpumask_var_t *doms; /* resulting partition; i.e. sched domains */
659 struct sched_domain_attr *dattr; /* attributes for custom domains */
660 int ndoms = 0; /* number of sched domains in result */
661 int nslot; /* next empty doms[] struct cpumask slot */
662 struct cgroup_subsys_state *pos_css;
663
664 doms = NULL;
665 dattr = NULL;
666 csa = NULL;
667
668 /* Special case for the 99% of systems with one, full, sched domain */
669 if (is_sched_load_balance(&top_cpuset)) {
670 ndoms = 1;
671 doms = alloc_sched_domains(ndoms);
672 if (!doms)
673 goto done;
674
675 dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL);
676 if (dattr) {
677 *dattr = SD_ATTR_INIT;
678 update_domain_attr_tree(dattr, &top_cpuset);
679 }
680 cpumask_and(doms[0], top_cpuset.effective_cpus,
681 housekeeping_cpumask(HK_FLAG_DOMAIN));
682
683 goto done;
684 }
685
686 csa = kmalloc(nr_cpusets() * sizeof(cp), GFP_KERNEL);
687 if (!csa)
688 goto done;
689 csn = 0;
690
691 rcu_read_lock();
692 cpuset_for_each_descendant_pre(cp, pos_css, &top_cpuset) {
693 if (cp == &top_cpuset)
694 continue;
695 /*
696 * Continue traversing beyond @cp iff @cp has some CPUs and
697 * isn't load balancing. The former is obvious. The
698 * latter: All child cpusets contain a subset of the
699 * parent's cpus, so just skip them, and then we call
700 * update_domain_attr_tree() to calc relax_domain_level of
701 * the corresponding sched domain.
702 */
703 if (!cpumask_empty(cp->cpus_allowed) &&
704 !(is_sched_load_balance(cp) &&
705 cpumask_intersects(cp->cpus_allowed,
706 housekeeping_cpumask(HK_FLAG_DOMAIN))))
707 continue;
708
709 if (is_sched_load_balance(cp))
710 csa[csn++] = cp;
711
712 /* skip @cp's subtree */
713 pos_css = css_rightmost_descendant(pos_css);
714 }
715 rcu_read_unlock();
716
717 for (i = 0; i < csn; i++)
718 csa[i]->pn = i;
719 ndoms = csn;
720
721restart:
722 /* Find the best partition (set of sched domains) */
723 for (i = 0; i < csn; i++) {
724 struct cpuset *a = csa[i];
725 int apn = a->pn;
726
727 for (j = 0; j < csn; j++) {
728 struct cpuset *b = csa[j];
729 int bpn = b->pn;
730
731 if (apn != bpn && cpusets_overlap(a, b)) {
732 for (k = 0; k < csn; k++) {
733 struct cpuset *c = csa[k];
734
735 if (c->pn == bpn)
736 c->pn = apn;
737 }
738 ndoms--; /* one less element */
739 goto restart;
740 }
741 }
742 }
743
744 /*
745 * Now we know how many domains to create.
746 * Convert <csn, csa> to <ndoms, doms> and populate cpu masks.
747 */
748 doms = alloc_sched_domains(ndoms);
749 if (!doms)
750 goto done;
751
752 /*
753 * The rest of the code, including the scheduler, can deal with
754 * dattr==NULL case. No need to abort if alloc fails.
755 */
756 dattr = kmalloc(ndoms * sizeof(struct sched_domain_attr), GFP_KERNEL);
757
758 for (nslot = 0, i = 0; i < csn; i++) {
759 struct cpuset *a = csa[i];
760 struct cpumask *dp;
761 int apn = a->pn;
762
763 if (apn < 0) {
764 /* Skip completed partitions */
765 continue;
766 }
767
768 dp = doms[nslot];
769
770 if (nslot == ndoms) {
771 static int warnings = 10;
772 if (warnings) {
773 pr_warn("rebuild_sched_domains confused: nslot %d, ndoms %d, csn %d, i %d, apn %d\n",
774 nslot, ndoms, csn, i, apn);
775 warnings--;
776 }
777 continue;
778 }
779
780 cpumask_clear(dp);
781 if (dattr)
782 *(dattr + nslot) = SD_ATTR_INIT;
783 for (j = i; j < csn; j++) {
784 struct cpuset *b = csa[j];
785
786 if (apn == b->pn) {
787 cpumask_or(dp, dp, b->effective_cpus);
788 cpumask_and(dp, dp, housekeeping_cpumask(HK_FLAG_DOMAIN));
789 if (dattr)
790 update_domain_attr_tree(dattr + nslot, b);
791
792 /* Done with this partition */
793 b->pn = -1;
794 }
795 }
796 nslot++;
797 }
798 BUG_ON(nslot != ndoms);
799
800done:
801 kfree(csa);
802
803 /*
804 * Fallback to the default domain if kmalloc() failed.
805 * See comments in partition_sched_domains().
806 */
807 if (doms == NULL)
808 ndoms = 1;
809
810 *domains = doms;
811 *attributes = dattr;
812 return ndoms;
813}
814
815/*
816 * Rebuild scheduler domains.
817 *
818 * If the flag 'sched_load_balance' of any cpuset with non-empty
819 * 'cpus' changes, or if the 'cpus' allowed changes in any cpuset
820 * which has that flag enabled, or if any cpuset with a non-empty
821 * 'cpus' is removed, then call this routine to rebuild the
822 * scheduler's dynamic sched domains.
823 *
824 * Call with cpuset_mutex held. Takes get_online_cpus().
825 */
826static void rebuild_sched_domains_locked(void)
827{
828 struct sched_domain_attr *attr;
829 cpumask_var_t *doms;
830 int ndoms;
831
832 lockdep_assert_held(&cpuset_mutex);
833 get_online_cpus();
834
835 /*
836 * We have raced with CPU hotplug. Don't do anything to avoid
837 * passing doms with offlined cpu to partition_sched_domains().
838 * Anyways, hotplug work item will rebuild sched domains.
839 */
840 if (!cpumask_equal(top_cpuset.effective_cpus, cpu_active_mask))
841 goto out;
842
843 /* Generate domain masks and attrs */
844 ndoms = generate_sched_domains(&doms, &attr);
845
846 /* Have scheduler rebuild the domains */
847 partition_sched_domains(ndoms, doms, attr);
848out:
849 put_online_cpus();
850}
851#else /* !CONFIG_SMP */
852static void rebuild_sched_domains_locked(void)
853{
854}
855#endif /* CONFIG_SMP */
856
857void rebuild_sched_domains(void)
858{
859 mutex_lock(&cpuset_mutex);
860 rebuild_sched_domains_locked();
861 mutex_unlock(&cpuset_mutex);
862}
863
864/**
865 * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
866 * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
867 *
868 * Iterate through each task of @cs updating its cpus_allowed to the
869 * effective cpuset's. As this function is called with cpuset_mutex held,
870 * cpuset membership stays stable.
871 */
872static void update_tasks_cpumask(struct cpuset *cs)
873{
874 struct css_task_iter it;
875 struct task_struct *task;
876
877 css_task_iter_start(&cs->css, 0, &it);
878 while ((task = css_task_iter_next(&it)))
879 set_cpus_allowed_ptr(task, cs->effective_cpus);
880 css_task_iter_end(&it);
881}
882
883/*
884 * update_cpumasks_hier - Update effective cpumasks and tasks in the subtree
885 * @cs: the cpuset to consider
886 * @new_cpus: temp variable for calculating new effective_cpus
887 *
888 * When congifured cpumask is changed, the effective cpumasks of this cpuset
889 * and all its descendants need to be updated.
890 *
891 * On legacy hierachy, effective_cpus will be the same with cpu_allowed.
892 *
893 * Called with cpuset_mutex held
894 */
895static void update_cpumasks_hier(struct cpuset *cs, struct cpumask *new_cpus)
896{
897 struct cpuset *cp;
898 struct cgroup_subsys_state *pos_css;
899 bool need_rebuild_sched_domains = false;
900
901 rcu_read_lock();
902 cpuset_for_each_descendant_pre(cp, pos_css, cs) {
903 struct cpuset *parent = parent_cs(cp);
904
905 cpumask_and(new_cpus, cp->cpus_allowed, parent->effective_cpus);
906
907 /*
908 * If it becomes empty, inherit the effective mask of the
909 * parent, which is guaranteed to have some CPUs.
910 */
911 if (is_in_v2_mode() && cpumask_empty(new_cpus))
912 cpumask_copy(new_cpus, parent->effective_cpus);
913
914 /* Skip the whole subtree if the cpumask remains the same. */
915 if (cpumask_equal(new_cpus, cp->effective_cpus)) {
916 pos_css = css_rightmost_descendant(pos_css);
917 continue;
918 }
919
920 if (!css_tryget_online(&cp->css))
921 continue;
922 rcu_read_unlock();
923
924 spin_lock_irq(&callback_lock);
925 cpumask_copy(cp->effective_cpus, new_cpus);
926 spin_unlock_irq(&callback_lock);
927
928 WARN_ON(!is_in_v2_mode() &&
929 !cpumask_equal(cp->cpus_allowed, cp->effective_cpus));
930
931 update_tasks_cpumask(cp);
932
933 /*
934 * If the effective cpumask of any non-empty cpuset is changed,
935 * we need to rebuild sched domains.
936 */
937 if (!cpumask_empty(cp->cpus_allowed) &&
938 is_sched_load_balance(cp))
939 need_rebuild_sched_domains = true;
940
941 rcu_read_lock();
942 css_put(&cp->css);
943 }
944 rcu_read_unlock();
945
946 if (need_rebuild_sched_domains)
947 rebuild_sched_domains_locked();
948}
949
950/**
951 * update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it
952 * @cs: the cpuset to consider
953 * @trialcs: trial cpuset
954 * @buf: buffer of cpu numbers written to this cpuset
955 */
956static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
957 const char *buf)
958{
959 int retval;
960
961 /* top_cpuset.cpus_allowed tracks cpu_online_mask; it's read-only */
962 if (cs == &top_cpuset)
963 return -EACCES;
964
965 /*
966 * An empty cpus_allowed is ok only if the cpuset has no tasks.
967 * Since cpulist_parse() fails on an empty mask, we special case
968 * that parsing. The validate_change() call ensures that cpusets
969 * with tasks have cpus.
970 */
971 if (!*buf) {
972 cpumask_clear(trialcs->cpus_allowed);
973 } else {
974 retval = cpulist_parse(buf, trialcs->cpus_allowed);
975 if (retval < 0)
976 return retval;
977
978 if (!cpumask_subset(trialcs->cpus_allowed,
979 top_cpuset.cpus_allowed))
980 return -EINVAL;
981 }
982
983 /* Nothing to do if the cpus didn't change */
984 if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed))
985 return 0;
986
987 retval = validate_change(cs, trialcs);
988 if (retval < 0)
989 return retval;
990
991 spin_lock_irq(&callback_lock);
992 cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed);
993 spin_unlock_irq(&callback_lock);
994
995 /* use trialcs->cpus_allowed as a temp variable */
996 update_cpumasks_hier(cs, trialcs->cpus_allowed);
997 return 0;
998}
999
1000/*
1001 * Migrate memory region from one set of nodes to another. This is
1002 * performed asynchronously as it can be called from process migration path
1003 * holding locks involved in process management. All mm migrations are
1004 * performed in the queued order and can be waited for by flushing
1005 * cpuset_migrate_mm_wq.
1006 */
1007
1008struct cpuset_migrate_mm_work {
1009 struct work_struct work;
1010 struct mm_struct *mm;
1011 nodemask_t from;
1012 nodemask_t to;
1013};
1014
1015static void cpuset_migrate_mm_workfn(struct work_struct *work)
1016{
1017 struct cpuset_migrate_mm_work *mwork =
1018 container_of(work, struct cpuset_migrate_mm_work, work);
1019
1020 /* on a wq worker, no need to worry about %current's mems_allowed */
1021 do_migrate_pages(mwork->mm, &mwork->from, &mwork->to, MPOL_MF_MOVE_ALL);
1022 mmput(mwork->mm);
1023 kfree(mwork);
1024}
1025
1026static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
1027 const nodemask_t *to)
1028{
1029 struct cpuset_migrate_mm_work *mwork;
1030
1031 mwork = kzalloc(sizeof(*mwork), GFP_KERNEL);
1032 if (mwork) {
1033 mwork->mm = mm;
1034 mwork->from = *from;
1035 mwork->to = *to;
1036 INIT_WORK(&mwork->work, cpuset_migrate_mm_workfn);
1037 queue_work(cpuset_migrate_mm_wq, &mwork->work);
1038 } else {
1039 mmput(mm);
1040 }
1041}
1042
1043static void cpuset_post_attach(void)
1044{
1045 flush_workqueue(cpuset_migrate_mm_wq);
1046}
1047
1048/*
1049 * cpuset_change_task_nodemask - change task's mems_allowed and mempolicy
1050 * @tsk: the task to change
1051 * @newmems: new nodes that the task will be set
1052 *
1053 * We use the mems_allowed_seq seqlock to safely update both tsk->mems_allowed
1054 * and rebind an eventual tasks' mempolicy. If the task is allocating in
1055 * parallel, it might temporarily see an empty intersection, which results in
1056 * a seqlock check and retry before OOM or allocation failure.
1057 */
1058static void cpuset_change_task_nodemask(struct task_struct *tsk,
1059 nodemask_t *newmems)
1060{
1061 task_lock(tsk);
1062
1063 local_irq_disable();
1064 write_seqcount_begin(&tsk->mems_allowed_seq);
1065
1066 nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems);
1067 mpol_rebind_task(tsk, newmems);
1068 tsk->mems_allowed = *newmems;
1069
1070 write_seqcount_end(&tsk->mems_allowed_seq);
1071 local_irq_enable();
1072
1073 task_unlock(tsk);
1074}
1075
1076static void *cpuset_being_rebound;
1077
1078/**
1079 * update_tasks_nodemask - Update the nodemasks of tasks in the cpuset.
1080 * @cs: the cpuset in which each task's mems_allowed mask needs to be changed
1081 *
1082 * Iterate through each task of @cs updating its mems_allowed to the
1083 * effective cpuset's. As this function is called with cpuset_mutex held,
1084 * cpuset membership stays stable.
1085 */
1086static void update_tasks_nodemask(struct cpuset *cs)
1087{
1088 static nodemask_t newmems; /* protected by cpuset_mutex */
1089 struct css_task_iter it;
1090 struct task_struct *task;
1091
1092 cpuset_being_rebound = cs; /* causes mpol_dup() rebind */
1093
1094 guarantee_online_mems(cs, &newmems);
1095
1096 /*
1097 * The mpol_rebind_mm() call takes mmap_sem, which we couldn't
1098 * take while holding tasklist_lock. Forks can happen - the
1099 * mpol_dup() cpuset_being_rebound check will catch such forks,
1100 * and rebind their vma mempolicies too. Because we still hold
1101 * the global cpuset_mutex, we know that no other rebind effort
1102 * will be contending for the global variable cpuset_being_rebound.
1103 * It's ok if we rebind the same mm twice; mpol_rebind_mm()
1104 * is idempotent. Also migrate pages in each mm to new nodes.
1105 */
1106 css_task_iter_start(&cs->css, 0, &it);
1107 while ((task = css_task_iter_next(&it))) {
1108 struct mm_struct *mm;
1109 bool migrate;
1110
1111 cpuset_change_task_nodemask(task, &newmems);
1112
1113 mm = get_task_mm(task);
1114 if (!mm)
1115 continue;
1116
1117 migrate = is_memory_migrate(cs);
1118
1119 mpol_rebind_mm(mm, &cs->mems_allowed);
1120 if (migrate)
1121 cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems);
1122 else
1123 mmput(mm);
1124 }
1125 css_task_iter_end(&it);
1126
1127 /*
1128 * All the tasks' nodemasks have been updated, update
1129 * cs->old_mems_allowed.
1130 */
1131 cs->old_mems_allowed = newmems;
1132
1133 /* We're done rebinding vmas to this cpuset's new mems_allowed. */
1134 cpuset_being_rebound = NULL;
1135}
1136
1137/*
1138 * update_nodemasks_hier - Update effective nodemasks and tasks in the subtree
1139 * @cs: the cpuset to consider
1140 * @new_mems: a temp variable for calculating new effective_mems
1141 *
1142 * When configured nodemask is changed, the effective nodemasks of this cpuset
1143 * and all its descendants need to be updated.
1144 *
1145 * On legacy hiearchy, effective_mems will be the same with mems_allowed.
1146 *
1147 * Called with cpuset_mutex held
1148 */
1149static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
1150{
1151 struct cpuset *cp;
1152 struct cgroup_subsys_state *pos_css;
1153
1154 rcu_read_lock();
1155 cpuset_for_each_descendant_pre(cp, pos_css, cs) {
1156 struct cpuset *parent = parent_cs(cp);
1157
1158 nodes_and(*new_mems, cp->mems_allowed, parent->effective_mems);
1159
1160 /*
1161 * If it becomes empty, inherit the effective mask of the
1162 * parent, which is guaranteed to have some MEMs.
1163 */
1164 if (is_in_v2_mode() && nodes_empty(*new_mems))
1165 *new_mems = parent->effective_mems;
1166
1167 /* Skip the whole subtree if the nodemask remains the same. */
1168 if (nodes_equal(*new_mems, cp->effective_mems)) {
1169 pos_css = css_rightmost_descendant(pos_css);
1170 continue;
1171 }
1172
1173 if (!css_tryget_online(&cp->css))
1174 continue;
1175 rcu_read_unlock();
1176
1177 spin_lock_irq(&callback_lock);
1178 cp->effective_mems = *new_mems;
1179 spin_unlock_irq(&callback_lock);
1180
1181 WARN_ON(!is_in_v2_mode() &&
1182 !nodes_equal(cp->mems_allowed, cp->effective_mems));
1183
1184 update_tasks_nodemask(cp);
1185
1186 rcu_read_lock();
1187 css_put(&cp->css);
1188 }
1189 rcu_read_unlock();
1190}
1191
1192/*
1193 * Handle user request to change the 'mems' memory placement
1194 * of a cpuset. Needs to validate the request, update the
1195 * cpusets mems_allowed, and for each task in the cpuset,
1196 * update mems_allowed and rebind task's mempolicy and any vma
1197 * mempolicies and if the cpuset is marked 'memory_migrate',
1198 * migrate the tasks pages to the new memory.
1199 *
1200 * Call with cpuset_mutex held. May take callback_lock during call.
1201 * Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
1202 * lock each such tasks mm->mmap_sem, scan its vma's and rebind
1203 * their mempolicies to the cpusets new mems_allowed.
1204 */
1205static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
1206 const char *buf)
1207{
1208 int retval;
1209
1210 /*
1211 * top_cpuset.mems_allowed tracks node_stats[N_MEMORY];
1212 * it's read-only
1213 */
1214 if (cs == &top_cpuset) {
1215 retval = -EACCES;
1216 goto done;
1217 }
1218
1219 /*
1220 * An empty mems_allowed is ok iff there are no tasks in the cpuset.
1221 * Since nodelist_parse() fails on an empty mask, we special case
1222 * that parsing. The validate_change() call ensures that cpusets
1223 * with tasks have memory.
1224 */
1225 if (!*buf) {
1226 nodes_clear(trialcs->mems_allowed);
1227 } else {
1228 retval = nodelist_parse(buf, trialcs->mems_allowed);
1229 if (retval < 0)
1230 goto done;
1231
1232 if (!nodes_subset(trialcs->mems_allowed,
1233 top_cpuset.mems_allowed)) {
1234 retval = -EINVAL;
1235 goto done;
1236 }
1237 }
1238
1239 if (nodes_equal(cs->mems_allowed, trialcs->mems_allowed)) {
1240 retval = 0; /* Too easy - nothing to do */
1241 goto done;
1242 }
1243 retval = validate_change(cs, trialcs);
1244 if (retval < 0)
1245 goto done;
1246
1247 spin_lock_irq(&callback_lock);
1248 cs->mems_allowed = trialcs->mems_allowed;
1249 spin_unlock_irq(&callback_lock);
1250
1251 /* use trialcs->mems_allowed as a temp variable */
1252 update_nodemasks_hier(cs, &trialcs->mems_allowed);
1253done:
1254 return retval;
1255}
1256
1257bool current_cpuset_is_being_rebound(void)
1258{
1259 bool ret;
1260
1261 rcu_read_lock();
1262 ret = task_cs(current) == cpuset_being_rebound;
1263 rcu_read_unlock();
1264
1265 return ret;
1266}
1267
1268static int update_relax_domain_level(struct cpuset *cs, s64 val)
1269{
1270#ifdef CONFIG_SMP
1271 if (val < -1 || val >= sched_domain_level_max)
1272 return -EINVAL;
1273#endif
1274
1275 if (val != cs->relax_domain_level) {
1276 cs->relax_domain_level = val;
1277 if (!cpumask_empty(cs->cpus_allowed) &&
1278 is_sched_load_balance(cs))
1279 rebuild_sched_domains_locked();
1280 }
1281
1282 return 0;
1283}
1284
1285/**
1286 * update_tasks_flags - update the spread flags of tasks in the cpuset.
1287 * @cs: the cpuset in which each task's spread flags needs to be changed
1288 *
1289 * Iterate through each task of @cs updating its spread flags. As this
1290 * function is called with cpuset_mutex held, cpuset membership stays
1291 * stable.
1292 */
1293static void update_tasks_flags(struct cpuset *cs)
1294{
1295 struct css_task_iter it;
1296 struct task_struct *task;
1297
1298 css_task_iter_start(&cs->css, 0, &it);
1299 while ((task = css_task_iter_next(&it)))
1300 cpuset_update_task_spread_flag(cs, task);
1301 css_task_iter_end(&it);
1302}
1303
1304/*
1305 * update_flag - read a 0 or a 1 in a file and update associated flag
1306 * bit: the bit to update (see cpuset_flagbits_t)
1307 * cs: the cpuset to update
1308 * turning_on: whether the flag is being set or cleared
1309 *
1310 * Call with cpuset_mutex held.
1311 */
1312
1313static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
1314 int turning_on)
1315{
1316 struct cpuset *trialcs;
1317 int balance_flag_changed;
1318 int spread_flag_changed;
1319 int err;
1320
1321 trialcs = alloc_trial_cpuset(cs);
1322 if (!trialcs)
1323 return -ENOMEM;
1324
1325 if (turning_on)
1326 set_bit(bit, &trialcs->flags);
1327 else
1328 clear_bit(bit, &trialcs->flags);
1329
1330 err = validate_change(cs, trialcs);
1331 if (err < 0)
1332 goto out;
1333
1334 balance_flag_changed = (is_sched_load_balance(cs) !=
1335 is_sched_load_balance(trialcs));
1336
1337 spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs))
1338 || (is_spread_page(cs) != is_spread_page(trialcs)));
1339
1340 spin_lock_irq(&callback_lock);
1341 cs->flags = trialcs->flags;
1342 spin_unlock_irq(&callback_lock);
1343
1344 if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed)
1345 rebuild_sched_domains_locked();
1346
1347 if (spread_flag_changed)
1348 update_tasks_flags(cs);
1349out:
1350 free_trial_cpuset(trialcs);
1351 return err;
1352}
1353
1354/*
1355 * Frequency meter - How fast is some event occurring?
1356 *
1357 * These routines manage a digitally filtered, constant time based,
1358 * event frequency meter. There are four routines:
1359 * fmeter_init() - initialize a frequency meter.
1360 * fmeter_markevent() - called each time the event happens.
1361 * fmeter_getrate() - returns the recent rate of such events.
1362 * fmeter_update() - internal routine used to update fmeter.
1363 *
1364 * A common data structure is passed to each of these routines,
1365 * which is used to keep track of the state required to manage the
1366 * frequency meter and its digital filter.
1367 *
1368 * The filter works on the number of events marked per unit time.
1369 * The filter is single-pole low-pass recursive (IIR). The time unit
1370 * is 1 second. Arithmetic is done using 32-bit integers scaled to
1371 * simulate 3 decimal digits of precision (multiplied by 1000).
1372 *
1373 * With an FM_COEF of 933, and a time base of 1 second, the filter
1374 * has a half-life of 10 seconds, meaning that if the events quit
1375 * happening, then the rate returned from the fmeter_getrate()
1376 * will be cut in half each 10 seconds, until it converges to zero.
1377 *
1378 * It is not worth doing a real infinitely recursive filter. If more
1379 * than FM_MAXTICKS ticks have elapsed since the last filter event,
1380 * just compute FM_MAXTICKS ticks worth, by which point the level
1381 * will be stable.
1382 *
1383 * Limit the count of unprocessed events to FM_MAXCNT, so as to avoid
1384 * arithmetic overflow in the fmeter_update() routine.
1385 *
1386 * Given the simple 32 bit integer arithmetic used, this meter works
1387 * best for reporting rates between one per millisecond (msec) and
1388 * one per 32 (approx) seconds. At constant rates faster than one
1389 * per msec it maxes out at values just under 1,000,000. At constant
1390 * rates between one per msec, and one per second it will stabilize
1391 * to a value N*1000, where N is the rate of events per second.
1392 * At constant rates between one per second and one per 32 seconds,
1393 * it will be choppy, moving up on the seconds that have an event,
1394 * and then decaying until the next event. At rates slower than
1395 * about one in 32 seconds, it decays all the way back to zero between
1396 * each event.
1397 */
1398
1399#define FM_COEF 933 /* coefficient for half-life of 10 secs */
1400#define FM_MAXTICKS ((u32)99) /* useless computing more ticks than this */
1401#define FM_MAXCNT 1000000 /* limit cnt to avoid overflow */
1402#define FM_SCALE 1000 /* faux fixed point scale */
1403
1404/* Initialize a frequency meter */
1405static void fmeter_init(struct fmeter *fmp)
1406{
1407 fmp->cnt = 0;
1408 fmp->val = 0;
1409 fmp->time = 0;
1410 spin_lock_init(&fmp->lock);
1411}
1412
1413/* Internal meter update - process cnt events and update value */
1414static void fmeter_update(struct fmeter *fmp)
1415{
1416 time64_t now;
1417 u32 ticks;
1418
1419 now = ktime_get_seconds();
1420 ticks = now - fmp->time;
1421
1422 if (ticks == 0)
1423 return;
1424
1425 ticks = min(FM_MAXTICKS, ticks);
1426 while (ticks-- > 0)
1427 fmp->val = (FM_COEF * fmp->val) / FM_SCALE;
1428 fmp->time = now;
1429
1430 fmp->val += ((FM_SCALE - FM_COEF) * fmp->cnt) / FM_SCALE;
1431 fmp->cnt = 0;
1432}
1433
1434/* Process any previous ticks, then bump cnt by one (times scale). */
1435static void fmeter_markevent(struct fmeter *fmp)
1436{
1437 spin_lock(&fmp->lock);
1438 fmeter_update(fmp);
1439 fmp->cnt = min(FM_MAXCNT, fmp->cnt + FM_SCALE);
1440 spin_unlock(&fmp->lock);
1441}
1442
1443/* Process any previous ticks, then return current value. */
1444static int fmeter_getrate(struct fmeter *fmp)
1445{
1446 int val;
1447
1448 spin_lock(&fmp->lock);
1449 fmeter_update(fmp);
1450 val = fmp->val;
1451 spin_unlock(&fmp->lock);
1452 return val;
1453}
1454
1455static struct cpuset *cpuset_attach_old_cs;
1456
1457/* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */
1458static int cpuset_can_attach(struct cgroup_taskset *tset)
1459{
1460 struct cgroup_subsys_state *css;
1461 struct cpuset *cs;
1462 struct task_struct *task;
1463 int ret;
1464
1465 /* used later by cpuset_attach() */
1466 cpuset_attach_old_cs = task_cs(cgroup_taskset_first(tset, &css));
1467 cs = css_cs(css);
1468
1469 mutex_lock(&cpuset_mutex);
1470
1471 /* allow moving tasks into an empty cpuset if on default hierarchy */
1472 ret = -ENOSPC;
1473 if (!is_in_v2_mode() &&
1474 (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed)))
1475 goto out_unlock;
1476
1477 cgroup_taskset_for_each(task, css, tset) {
1478 ret = task_can_attach(task, cs->cpus_allowed);
1479 if (ret)
1480 goto out_unlock;
1481 ret = security_task_setscheduler(task);
1482 if (ret)
1483 goto out_unlock;
1484 }
1485
1486 /*
1487 * Mark attach is in progress. This makes validate_change() fail
1488 * changes which zero cpus/mems_allowed.
1489 */
1490 cs->attach_in_progress++;
1491 ret = 0;
1492out_unlock:
1493 mutex_unlock(&cpuset_mutex);
1494 return ret;
1495}
1496
1497static void cpuset_cancel_attach(struct cgroup_taskset *tset)
1498{
1499 struct cgroup_subsys_state *css;
1500 struct cpuset *cs;
1501
1502 cgroup_taskset_first(tset, &css);
1503 cs = css_cs(css);
1504
1505 mutex_lock(&cpuset_mutex);
1506 css_cs(css)->attach_in_progress--;
1507 mutex_unlock(&cpuset_mutex);
1508}
1509
1510/*
1511 * Protected by cpuset_mutex. cpus_attach is used only by cpuset_attach()
1512 * but we can't allocate it dynamically there. Define it global and
1513 * allocate from cpuset_init().
1514 */
1515static cpumask_var_t cpus_attach;
1516
1517static void cpuset_attach(struct cgroup_taskset *tset)
1518{
1519 /* static buf protected by cpuset_mutex */
1520 static nodemask_t cpuset_attach_nodemask_to;
1521 struct task_struct *task;
1522 struct task_struct *leader;
1523 struct cgroup_subsys_state *css;
1524 struct cpuset *cs;
1525 struct cpuset *oldcs = cpuset_attach_old_cs;
1526
1527 cgroup_taskset_first(tset, &css);
1528 cs = css_cs(css);
1529
1530 mutex_lock(&cpuset_mutex);
1531
1532 /* prepare for attach */
1533 if (cs == &top_cpuset)
1534 cpumask_copy(cpus_attach, cpu_possible_mask);
1535 else
1536 guarantee_online_cpus(cs, cpus_attach);
1537
1538 guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
1539
1540 cgroup_taskset_for_each(task, css, tset) {
1541 /*
1542 * can_attach beforehand should guarantee that this doesn't
1543 * fail. TODO: have a better way to handle failure here
1544 */
1545 WARN_ON_ONCE(set_cpus_allowed_ptr(task, cpus_attach));
1546
1547 cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to);
1548 cpuset_update_task_spread_flag(cs, task);
1549 }
1550
1551 /*
1552 * Change mm for all threadgroup leaders. This is expensive and may
1553 * sleep and should be moved outside migration path proper.
1554 */
1555 cpuset_attach_nodemask_to = cs->effective_mems;
1556 cgroup_taskset_for_each_leader(leader, css, tset) {
1557 struct mm_struct *mm = get_task_mm(leader);
1558
1559 if (mm) {
1560 mpol_rebind_mm(mm, &cpuset_attach_nodemask_to);
1561
1562 /*
1563 * old_mems_allowed is the same with mems_allowed
1564 * here, except if this task is being moved
1565 * automatically due to hotplug. In that case
1566 * @mems_allowed has been updated and is empty, so
1567 * @old_mems_allowed is the right nodesets that we
1568 * migrate mm from.
1569 */
1570 if (is_memory_migrate(cs))
1571 cpuset_migrate_mm(mm, &oldcs->old_mems_allowed,
1572 &cpuset_attach_nodemask_to);
1573 else
1574 mmput(mm);
1575 }
1576 }
1577
1578 cs->old_mems_allowed = cpuset_attach_nodemask_to;
1579
1580 cs->attach_in_progress--;
1581 if (!cs->attach_in_progress)
1582 wake_up(&cpuset_attach_wq);
1583
1584 mutex_unlock(&cpuset_mutex);
1585}
1586
1587/* The various types of files and directories in a cpuset file system */
1588
1589typedef enum {
1590 FILE_MEMORY_MIGRATE,
1591 FILE_CPULIST,
1592 FILE_MEMLIST,
1593 FILE_EFFECTIVE_CPULIST,
1594 FILE_EFFECTIVE_MEMLIST,
1595 FILE_CPU_EXCLUSIVE,
1596 FILE_MEM_EXCLUSIVE,
1597 FILE_MEM_HARDWALL,
1598 FILE_SCHED_LOAD_BALANCE,
1599 FILE_SCHED_RELAX_DOMAIN_LEVEL,
1600 FILE_MEMORY_PRESSURE_ENABLED,
1601 FILE_MEMORY_PRESSURE,
1602 FILE_SPREAD_PAGE,
1603 FILE_SPREAD_SLAB,
1604} cpuset_filetype_t;
1605
1606static int cpuset_write_u64(struct cgroup_subsys_state *css, struct cftype *cft,
1607 u64 val)
1608{
1609 struct cpuset *cs = css_cs(css);
1610 cpuset_filetype_t type = cft->private;
1611 int retval = 0;
1612
1613 mutex_lock(&cpuset_mutex);
1614 if (!is_cpuset_online(cs)) {
1615 retval = -ENODEV;
1616 goto out_unlock;
1617 }
1618
1619 switch (type) {
1620 case FILE_CPU_EXCLUSIVE:
1621 retval = update_flag(CS_CPU_EXCLUSIVE, cs, val);
1622 break;
1623 case FILE_MEM_EXCLUSIVE:
1624 retval = update_flag(CS_MEM_EXCLUSIVE, cs, val);
1625 break;
1626 case FILE_MEM_HARDWALL:
1627 retval = update_flag(CS_MEM_HARDWALL, cs, val);
1628 break;
1629 case FILE_SCHED_LOAD_BALANCE:
1630 retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, val);
1631 break;
1632 case FILE_MEMORY_MIGRATE:
1633 retval = update_flag(CS_MEMORY_MIGRATE, cs, val);
1634 break;
1635 case FILE_MEMORY_PRESSURE_ENABLED:
1636 cpuset_memory_pressure_enabled = !!val;
1637 break;
1638 case FILE_SPREAD_PAGE:
1639 retval = update_flag(CS_SPREAD_PAGE, cs, val);
1640 break;
1641 case FILE_SPREAD_SLAB:
1642 retval = update_flag(CS_SPREAD_SLAB, cs, val);
1643 break;
1644 default:
1645 retval = -EINVAL;
1646 break;
1647 }
1648out_unlock:
1649 mutex_unlock(&cpuset_mutex);
1650 return retval;
1651}
1652
1653static int cpuset_write_s64(struct cgroup_subsys_state *css, struct cftype *cft,
1654 s64 val)
1655{
1656 struct cpuset *cs = css_cs(css);
1657 cpuset_filetype_t type = cft->private;
1658 int retval = -ENODEV;
1659
1660 mutex_lock(&cpuset_mutex);
1661 if (!is_cpuset_online(cs))
1662 goto out_unlock;
1663
1664 switch (type) {
1665 case FILE_SCHED_RELAX_DOMAIN_LEVEL:
1666 retval = update_relax_domain_level(cs, val);
1667 break;
1668 default:
1669 retval = -EINVAL;
1670 break;
1671 }
1672out_unlock:
1673 mutex_unlock(&cpuset_mutex);
1674 return retval;
1675}
1676
1677/*
1678 * Common handling for a write to a "cpus" or "mems" file.
1679 */
1680static ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
1681 char *buf, size_t nbytes, loff_t off)
1682{
1683 struct cpuset *cs = css_cs(of_css(of));
1684 struct cpuset *trialcs;
1685 int retval = -ENODEV;
1686
1687 buf = strstrip(buf);
1688
1689 /*
1690 * CPU or memory hotunplug may leave @cs w/o any execution
1691 * resources, in which case the hotplug code asynchronously updates
1692 * configuration and transfers all tasks to the nearest ancestor
1693 * which can execute.
1694 *
1695 * As writes to "cpus" or "mems" may restore @cs's execution
1696 * resources, wait for the previously scheduled operations before
1697 * proceeding, so that we don't end up keep removing tasks added
1698 * after execution capability is restored.
1699 *
1700 * cpuset_hotplug_work calls back into cgroup core via
1701 * cgroup_transfer_tasks() and waiting for it from a cgroupfs
1702 * operation like this one can lead to a deadlock through kernfs
1703 * active_ref protection. Let's break the protection. Losing the
1704 * protection is okay as we check whether @cs is online after
1705 * grabbing cpuset_mutex anyway. This only happens on the legacy
1706 * hierarchies.
1707 */
1708 css_get(&cs->css);
1709 kernfs_break_active_protection(of->kn);
1710 flush_work(&cpuset_hotplug_work);
1711
1712 mutex_lock(&cpuset_mutex);
1713 if (!is_cpuset_online(cs))
1714 goto out_unlock;
1715
1716 trialcs = alloc_trial_cpuset(cs);
1717 if (!trialcs) {
1718 retval = -ENOMEM;
1719 goto out_unlock;
1720 }
1721
1722 switch (of_cft(of)->private) {
1723 case FILE_CPULIST:
1724 retval = update_cpumask(cs, trialcs, buf);
1725 break;
1726 case FILE_MEMLIST:
1727 retval = update_nodemask(cs, trialcs, buf);
1728 break;
1729 default:
1730 retval = -EINVAL;
1731 break;
1732 }
1733
1734 free_trial_cpuset(trialcs);
1735out_unlock:
1736 mutex_unlock(&cpuset_mutex);
1737 kernfs_unbreak_active_protection(of->kn);
1738 css_put(&cs->css);
1739 flush_workqueue(cpuset_migrate_mm_wq);
1740 return retval ?: nbytes;
1741}
1742
1743/*
1744 * These ascii lists should be read in a single call, by using a user
1745 * buffer large enough to hold the entire map. If read in smaller
1746 * chunks, there is no guarantee of atomicity. Since the display format
1747 * used, list of ranges of sequential numbers, is variable length,
1748 * and since these maps can change value dynamically, one could read
1749 * gibberish by doing partial reads while a list was changing.
1750 */
1751static int cpuset_common_seq_show(struct seq_file *sf, void *v)
1752{
1753 struct cpuset *cs = css_cs(seq_css(sf));
1754 cpuset_filetype_t type = seq_cft(sf)->private;
1755 int ret = 0;
1756
1757 spin_lock_irq(&callback_lock);
1758
1759 switch (type) {
1760 case FILE_CPULIST:
1761 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_allowed));
1762 break;
1763 case FILE_MEMLIST:
1764 seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->mems_allowed));
1765 break;
1766 case FILE_EFFECTIVE_CPULIST:
1767 seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->effective_cpus));
1768 break;
1769 case FILE_EFFECTIVE_MEMLIST:
1770 seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->effective_mems));
1771 break;
1772 default:
1773 ret = -EINVAL;
1774 }
1775
1776 spin_unlock_irq(&callback_lock);
1777 return ret;
1778}
1779
1780static u64 cpuset_read_u64(struct cgroup_subsys_state *css, struct cftype *cft)
1781{
1782 struct cpuset *cs = css_cs(css);
1783 cpuset_filetype_t type = cft->private;
1784 switch (type) {
1785 case FILE_CPU_EXCLUSIVE:
1786 return is_cpu_exclusive(cs);
1787 case FILE_MEM_EXCLUSIVE:
1788 return is_mem_exclusive(cs);
1789 case FILE_MEM_HARDWALL:
1790 return is_mem_hardwall(cs);
1791 case FILE_SCHED_LOAD_BALANCE:
1792 return is_sched_load_balance(cs);
1793 case FILE_MEMORY_MIGRATE:
1794 return is_memory_migrate(cs);
1795 case FILE_MEMORY_PRESSURE_ENABLED:
1796 return cpuset_memory_pressure_enabled;
1797 case FILE_MEMORY_PRESSURE:
1798 return fmeter_getrate(&cs->fmeter);
1799 case FILE_SPREAD_PAGE:
1800 return is_spread_page(cs);
1801 case FILE_SPREAD_SLAB:
1802 return is_spread_slab(cs);
1803 default:
1804 BUG();
1805 }
1806
1807 /* Unreachable but makes gcc happy */
1808 return 0;
1809}
1810
1811static s64 cpuset_read_s64(struct cgroup_subsys_state *css, struct cftype *cft)
1812{
1813 struct cpuset *cs = css_cs(css);
1814 cpuset_filetype_t type = cft->private;
1815 switch (type) {
1816 case FILE_SCHED_RELAX_DOMAIN_LEVEL:
1817 return cs->relax_domain_level;
1818 default:
1819 BUG();
1820 }
1821
1822 /* Unrechable but makes gcc happy */
1823 return 0;
1824}
1825
1826
1827/*
1828 * for the common functions, 'private' gives the type of file
1829 */
1830
1831static struct cftype files[] = {
1832 {
1833 .name = "cpus",
1834 .seq_show = cpuset_common_seq_show,
1835 .write = cpuset_write_resmask,
1836 .max_write_len = (100U + 6 * NR_CPUS),
1837 .private = FILE_CPULIST,
1838 },
1839
1840 {
1841 .name = "mems",
1842 .seq_show = cpuset_common_seq_show,
1843 .write = cpuset_write_resmask,
1844 .max_write_len = (100U + 6 * MAX_NUMNODES),
1845 .private = FILE_MEMLIST,
1846 },
1847
1848 {
1849 .name = "effective_cpus",
1850 .seq_show = cpuset_common_seq_show,
1851 .private = FILE_EFFECTIVE_CPULIST,
1852 },
1853
1854 {
1855 .name = "effective_mems",
1856 .seq_show = cpuset_common_seq_show,
1857 .private = FILE_EFFECTIVE_MEMLIST,
1858 },
1859
1860 {
1861 .name = "cpu_exclusive",
1862 .read_u64 = cpuset_read_u64,
1863 .write_u64 = cpuset_write_u64,
1864 .private = FILE_CPU_EXCLUSIVE,
1865 },
1866
1867 {
1868 .name = "mem_exclusive",
1869 .read_u64 = cpuset_read_u64,
1870 .write_u64 = cpuset_write_u64,
1871 .private = FILE_MEM_EXCLUSIVE,
1872 },
1873
1874 {
1875 .name = "mem_hardwall",
1876 .read_u64 = cpuset_read_u64,
1877 .write_u64 = cpuset_write_u64,
1878 .private = FILE_MEM_HARDWALL,
1879 },
1880
1881 {
1882 .name = "sched_load_balance",
1883 .read_u64 = cpuset_read_u64,
1884 .write_u64 = cpuset_write_u64,
1885 .private = FILE_SCHED_LOAD_BALANCE,
1886 },
1887
1888 {
1889 .name = "sched_relax_domain_level",
1890 .read_s64 = cpuset_read_s64,
1891 .write_s64 = cpuset_write_s64,
1892 .private = FILE_SCHED_RELAX_DOMAIN_LEVEL,
1893 },
1894
1895 {
1896 .name = "memory_migrate",
1897 .read_u64 = cpuset_read_u64,
1898 .write_u64 = cpuset_write_u64,
1899 .private = FILE_MEMORY_MIGRATE,
1900 },
1901
1902 {
1903 .name = "memory_pressure",
1904 .read_u64 = cpuset_read_u64,
1905 .private = FILE_MEMORY_PRESSURE,
1906 },
1907
1908 {
1909 .name = "memory_spread_page",
1910 .read_u64 = cpuset_read_u64,
1911 .write_u64 = cpuset_write_u64,
1912 .private = FILE_SPREAD_PAGE,
1913 },
1914
1915 {
1916 .name = "memory_spread_slab",
1917 .read_u64 = cpuset_read_u64,
1918 .write_u64 = cpuset_write_u64,
1919 .private = FILE_SPREAD_SLAB,
1920 },
1921
1922 {
1923 .name = "memory_pressure_enabled",
1924 .flags = CFTYPE_ONLY_ON_ROOT,
1925 .read_u64 = cpuset_read_u64,
1926 .write_u64 = cpuset_write_u64,
1927 .private = FILE_MEMORY_PRESSURE_ENABLED,
1928 },
1929
1930 { } /* terminate */
1931};
1932
1933/*
1934 * cpuset_css_alloc - allocate a cpuset css
1935 * cgrp: control group that the new cpuset will be part of
1936 */
1937
1938static struct cgroup_subsys_state *
1939cpuset_css_alloc(struct cgroup_subsys_state *parent_css)
1940{
1941 struct cpuset *cs;
1942
1943 if (!parent_css)
1944 return &top_cpuset.css;
1945
1946 cs = kzalloc(sizeof(*cs), GFP_KERNEL);
1947 if (!cs)
1948 return ERR_PTR(-ENOMEM);
1949 if (!alloc_cpumask_var(&cs->cpus_allowed, GFP_KERNEL))
1950 goto free_cs;
1951 if (!alloc_cpumask_var(&cs->effective_cpus, GFP_KERNEL))
1952 goto free_cpus;
1953
1954 set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
1955 cpumask_clear(cs->cpus_allowed);
1956 nodes_clear(cs->mems_allowed);
1957 cpumask_clear(cs->effective_cpus);
1958 nodes_clear(cs->effective_mems);
1959 fmeter_init(&cs->fmeter);
1960 cs->relax_domain_level = -1;
1961
1962 return &cs->css;
1963
1964free_cpus:
1965 free_cpumask_var(cs->cpus_allowed);
1966free_cs:
1967 kfree(cs);
1968 return ERR_PTR(-ENOMEM);
1969}
1970
1971static int cpuset_css_online(struct cgroup_subsys_state *css)
1972{
1973 struct cpuset *cs = css_cs(css);
1974 struct cpuset *parent = parent_cs(cs);
1975 struct cpuset *tmp_cs;
1976 struct cgroup_subsys_state *pos_css;
1977
1978 if (!parent)
1979 return 0;
1980
1981 mutex_lock(&cpuset_mutex);
1982
1983 set_bit(CS_ONLINE, &cs->flags);
1984 if (is_spread_page(parent))
1985 set_bit(CS_SPREAD_PAGE, &cs->flags);
1986 if (is_spread_slab(parent))
1987 set_bit(CS_SPREAD_SLAB, &cs->flags);
1988
1989 cpuset_inc();
1990
1991 spin_lock_irq(&callback_lock);
1992 if (is_in_v2_mode()) {
1993 cpumask_copy(cs->effective_cpus, parent->effective_cpus);
1994 cs->effective_mems = parent->effective_mems;
1995 }
1996 spin_unlock_irq(&callback_lock);
1997
1998 if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags))
1999 goto out_unlock;
2000
2001 /*
2002 * Clone @parent's configuration if CGRP_CPUSET_CLONE_CHILDREN is
2003 * set. This flag handling is implemented in cgroup core for
2004 * histrical reasons - the flag may be specified during mount.
2005 *
2006 * Currently, if any sibling cpusets have exclusive cpus or mem, we
2007 * refuse to clone the configuration - thereby refusing the task to
2008 * be entered, and as a result refusing the sys_unshare() or
2009 * clone() which initiated it. If this becomes a problem for some
2010 * users who wish to allow that scenario, then this could be
2011 * changed to grant parent->cpus_allowed-sibling_cpus_exclusive
2012 * (and likewise for mems) to the new cgroup.
2013 */
2014 rcu_read_lock();
2015 cpuset_for_each_child(tmp_cs, pos_css, parent) {
2016 if (is_mem_exclusive(tmp_cs) || is_cpu_exclusive(tmp_cs)) {
2017 rcu_read_unlock();
2018 goto out_unlock;
2019 }
2020 }
2021 rcu_read_unlock();
2022
2023 spin_lock_irq(&callback_lock);
2024 cs->mems_allowed = parent->mems_allowed;
2025 cs->effective_mems = parent->mems_allowed;
2026 cpumask_copy(cs->cpus_allowed, parent->cpus_allowed);
2027 cpumask_copy(cs->effective_cpus, parent->cpus_allowed);
2028 spin_unlock_irq(&callback_lock);
2029out_unlock:
2030 mutex_unlock(&cpuset_mutex);
2031 return 0;
2032}
2033
2034/*
2035 * If the cpuset being removed has its flag 'sched_load_balance'
2036 * enabled, then simulate turning sched_load_balance off, which
2037 * will call rebuild_sched_domains_locked().
2038 */
2039
2040static void cpuset_css_offline(struct cgroup_subsys_state *css)
2041{
2042 struct cpuset *cs = css_cs(css);
2043
2044 mutex_lock(&cpuset_mutex);
2045
2046 if (is_sched_load_balance(cs))
2047 update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
2048
2049 cpuset_dec();
2050 clear_bit(CS_ONLINE, &cs->flags);
2051
2052 mutex_unlock(&cpuset_mutex);
2053}
2054
2055static void cpuset_css_free(struct cgroup_subsys_state *css)
2056{
2057 struct cpuset *cs = css_cs(css);
2058
2059 free_cpumask_var(cs->effective_cpus);
2060 free_cpumask_var(cs->cpus_allowed);
2061 kfree(cs);
2062}
2063
2064static void cpuset_bind(struct cgroup_subsys_state *root_css)
2065{
2066 mutex_lock(&cpuset_mutex);
2067 spin_lock_irq(&callback_lock);
2068
2069 if (is_in_v2_mode()) {
2070 cpumask_copy(top_cpuset.cpus_allowed, cpu_possible_mask);
2071 top_cpuset.mems_allowed = node_possible_map;
2072 } else {
2073 cpumask_copy(top_cpuset.cpus_allowed,
2074 top_cpuset.effective_cpus);
2075 top_cpuset.mems_allowed = top_cpuset.effective_mems;
2076 }
2077
2078 spin_unlock_irq(&callback_lock);
2079 mutex_unlock(&cpuset_mutex);
2080}
2081
2082/*
2083 * Make sure the new task conform to the current state of its parent,
2084 * which could have been changed by cpuset just after it inherits the
2085 * state from the parent and before it sits on the cgroup's task list.
2086 */
2087static void cpuset_fork(struct task_struct *task)
2088{
2089 if (task_css_is_root(task, cpuset_cgrp_id))
2090 return;
2091
2092 set_cpus_allowed_ptr(task, ¤t->cpus_allowed);
2093 task->mems_allowed = current->mems_allowed;
2094}
2095
2096struct cgroup_subsys cpuset_cgrp_subsys = {
2097 .css_alloc = cpuset_css_alloc,
2098 .css_online = cpuset_css_online,
2099 .css_offline = cpuset_css_offline,
2100 .css_free = cpuset_css_free,
2101 .can_attach = cpuset_can_attach,
2102 .cancel_attach = cpuset_cancel_attach,
2103 .attach = cpuset_attach,
2104 .post_attach = cpuset_post_attach,
2105 .bind = cpuset_bind,
2106 .fork = cpuset_fork,
2107 .legacy_cftypes = files,
2108 .early_init = true,
2109};
2110
2111/**
2112 * cpuset_init - initialize cpusets at system boot
2113 *
2114 * Description: Initialize top_cpuset and the cpuset internal file system,
2115 **/
2116
2117int __init cpuset_init(void)
2118{
2119 int err = 0;
2120
2121 BUG_ON(!alloc_cpumask_var(&top_cpuset.cpus_allowed, GFP_KERNEL));
2122 BUG_ON(!alloc_cpumask_var(&top_cpuset.effective_cpus, GFP_KERNEL));
2123
2124 cpumask_setall(top_cpuset.cpus_allowed);
2125 nodes_setall(top_cpuset.mems_allowed);
2126 cpumask_setall(top_cpuset.effective_cpus);
2127 nodes_setall(top_cpuset.effective_mems);
2128
2129 fmeter_init(&top_cpuset.fmeter);
2130 set_bit(CS_SCHED_LOAD_BALANCE, &top_cpuset.flags);
2131 top_cpuset.relax_domain_level = -1;
2132
2133 err = register_filesystem(&cpuset_fs_type);
2134 if (err < 0)
2135 return err;
2136
2137 BUG_ON(!alloc_cpumask_var(&cpus_attach, GFP_KERNEL));
2138
2139 return 0;
2140}
2141
2142/*
2143 * If CPU and/or memory hotplug handlers, below, unplug any CPUs
2144 * or memory nodes, we need to walk over the cpuset hierarchy,
2145 * removing that CPU or node from all cpusets. If this removes the
2146 * last CPU or node from a cpuset, then move the tasks in the empty
2147 * cpuset to its next-highest non-empty parent.
2148 */
2149static void remove_tasks_in_empty_cpuset(struct cpuset *cs)
2150{
2151 struct cpuset *parent;
2152
2153 /*
2154 * Find its next-highest non-empty parent, (top cpuset
2155 * has online cpus, so can't be empty).
2156 */
2157 parent = parent_cs(cs);
2158 while (cpumask_empty(parent->cpus_allowed) ||
2159 nodes_empty(parent->mems_allowed))
2160 parent = parent_cs(parent);
2161
2162 if (cgroup_transfer_tasks(parent->css.cgroup, cs->css.cgroup)) {
2163 pr_err("cpuset: failed to transfer tasks out of empty cpuset ");
2164 pr_cont_cgroup_name(cs->css.cgroup);
2165 pr_cont("\n");
2166 }
2167}
2168
2169static void
2170hotplug_update_tasks_legacy(struct cpuset *cs,
2171 struct cpumask *new_cpus, nodemask_t *new_mems,
2172 bool cpus_updated, bool mems_updated)
2173{
2174 bool is_empty;
2175
2176 spin_lock_irq(&callback_lock);
2177 cpumask_copy(cs->cpus_allowed, new_cpus);
2178 cpumask_copy(cs->effective_cpus, new_cpus);
2179 cs->mems_allowed = *new_mems;
2180 cs->effective_mems = *new_mems;
2181 spin_unlock_irq(&callback_lock);
2182
2183 /*
2184 * Don't call update_tasks_cpumask() if the cpuset becomes empty,
2185 * as the tasks will be migratecd to an ancestor.
2186 */
2187 if (cpus_updated && !cpumask_empty(cs->cpus_allowed))
2188 update_tasks_cpumask(cs);
2189 if (mems_updated && !nodes_empty(cs->mems_allowed))
2190 update_tasks_nodemask(cs);
2191
2192 is_empty = cpumask_empty(cs->cpus_allowed) ||
2193 nodes_empty(cs->mems_allowed);
2194
2195 mutex_unlock(&cpuset_mutex);
2196
2197 /*
2198 * Move tasks to the nearest ancestor with execution resources,
2199 * This is full cgroup operation which will also call back into
2200 * cpuset. Should be done outside any lock.
2201 */
2202 if (is_empty)
2203 remove_tasks_in_empty_cpuset(cs);
2204
2205 mutex_lock(&cpuset_mutex);
2206}
2207
2208static void
2209hotplug_update_tasks(struct cpuset *cs,
2210 struct cpumask *new_cpus, nodemask_t *new_mems,
2211 bool cpus_updated, bool mems_updated)
2212{
2213 if (cpumask_empty(new_cpus))
2214 cpumask_copy(new_cpus, parent_cs(cs)->effective_cpus);
2215 if (nodes_empty(*new_mems))
2216 *new_mems = parent_cs(cs)->effective_mems;
2217
2218 spin_lock_irq(&callback_lock);
2219 cpumask_copy(cs->effective_cpus, new_cpus);
2220 cs->effective_mems = *new_mems;
2221 spin_unlock_irq(&callback_lock);
2222
2223 if (cpus_updated)
2224 update_tasks_cpumask(cs);
2225 if (mems_updated)
2226 update_tasks_nodemask(cs);
2227}
2228
2229/**
2230 * cpuset_hotplug_update_tasks - update tasks in a cpuset for hotunplug
2231 * @cs: cpuset in interest
2232 *
2233 * Compare @cs's cpu and mem masks against top_cpuset and if some have gone
2234 * offline, update @cs accordingly. If @cs ends up with no CPU or memory,
2235 * all its tasks are moved to the nearest ancestor with both resources.
2236 */
2237static void cpuset_hotplug_update_tasks(struct cpuset *cs)
2238{
2239 static cpumask_t new_cpus;
2240 static nodemask_t new_mems;
2241 bool cpus_updated;
2242 bool mems_updated;
2243retry:
2244 wait_event(cpuset_attach_wq, cs->attach_in_progress == 0);
2245
2246 mutex_lock(&cpuset_mutex);
2247
2248 /*
2249 * We have raced with task attaching. We wait until attaching
2250 * is finished, so we won't attach a task to an empty cpuset.
2251 */
2252 if (cs->attach_in_progress) {
2253 mutex_unlock(&cpuset_mutex);
2254 goto retry;
2255 }
2256
2257 cpumask_and(&new_cpus, cs->cpus_allowed, parent_cs(cs)->effective_cpus);
2258 nodes_and(new_mems, cs->mems_allowed, parent_cs(cs)->effective_mems);
2259
2260 cpus_updated = !cpumask_equal(&new_cpus, cs->effective_cpus);
2261 mems_updated = !nodes_equal(new_mems, cs->effective_mems);
2262
2263 if (is_in_v2_mode())
2264 hotplug_update_tasks(cs, &new_cpus, &new_mems,
2265 cpus_updated, mems_updated);
2266 else
2267 hotplug_update_tasks_legacy(cs, &new_cpus, &new_mems,
2268 cpus_updated, mems_updated);
2269
2270 mutex_unlock(&cpuset_mutex);
2271}
2272
2273static bool force_rebuild;
2274
2275void cpuset_force_rebuild(void)
2276{
2277 force_rebuild = true;
2278}
2279
2280/**
2281 * cpuset_hotplug_workfn - handle CPU/memory hotunplug for a cpuset
2282 *
2283 * This function is called after either CPU or memory configuration has
2284 * changed and updates cpuset accordingly. The top_cpuset is always
2285 * synchronized to cpu_active_mask and N_MEMORY, which is necessary in
2286 * order to make cpusets transparent (of no affect) on systems that are
2287 * actively using CPU hotplug but making no active use of cpusets.
2288 *
2289 * Non-root cpusets are only affected by offlining. If any CPUs or memory
2290 * nodes have been taken down, cpuset_hotplug_update_tasks() is invoked on
2291 * all descendants.
2292 *
2293 * Note that CPU offlining during suspend is ignored. We don't modify
2294 * cpusets across suspend/resume cycles at all.
2295 */
2296static void cpuset_hotplug_workfn(struct work_struct *work)
2297{
2298 static cpumask_t new_cpus;
2299 static nodemask_t new_mems;
2300 bool cpus_updated, mems_updated;
2301 bool on_dfl = is_in_v2_mode();
2302
2303 mutex_lock(&cpuset_mutex);
2304
2305 /* fetch the available cpus/mems and find out which changed how */
2306 cpumask_copy(&new_cpus, cpu_active_mask);
2307 new_mems = node_states[N_MEMORY];
2308
2309 cpus_updated = !cpumask_equal(top_cpuset.effective_cpus, &new_cpus);
2310 mems_updated = !nodes_equal(top_cpuset.effective_mems, new_mems);
2311
2312 /* synchronize cpus_allowed to cpu_active_mask */
2313 if (cpus_updated) {
2314 spin_lock_irq(&callback_lock);
2315 if (!on_dfl)
2316 cpumask_copy(top_cpuset.cpus_allowed, &new_cpus);
2317 cpumask_copy(top_cpuset.effective_cpus, &new_cpus);
2318 spin_unlock_irq(&callback_lock);
2319 /* we don't mess with cpumasks of tasks in top_cpuset */
2320 }
2321
2322 /* synchronize mems_allowed to N_MEMORY */
2323 if (mems_updated) {
2324 spin_lock_irq(&callback_lock);
2325 if (!on_dfl)
2326 top_cpuset.mems_allowed = new_mems;
2327 top_cpuset.effective_mems = new_mems;
2328 spin_unlock_irq(&callback_lock);
2329 update_tasks_nodemask(&top_cpuset);
2330 }
2331
2332 mutex_unlock(&cpuset_mutex);
2333
2334 /* if cpus or mems changed, we need to propagate to descendants */
2335 if (cpus_updated || mems_updated) {
2336 struct cpuset *cs;
2337 struct cgroup_subsys_state *pos_css;
2338
2339 rcu_read_lock();
2340 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {
2341 if (cs == &top_cpuset || !css_tryget_online(&cs->css))
2342 continue;
2343 rcu_read_unlock();
2344
2345 cpuset_hotplug_update_tasks(cs);
2346
2347 rcu_read_lock();
2348 css_put(&cs->css);
2349 }
2350 rcu_read_unlock();
2351 }
2352
2353 /* rebuild sched domains if cpus_allowed has changed */
2354 if (cpus_updated || force_rebuild) {
2355 force_rebuild = false;
2356 rebuild_sched_domains();
2357 }
2358}
2359
2360void cpuset_update_active_cpus(void)
2361{
2362 /*
2363 * We're inside cpu hotplug critical region which usually nests
2364 * inside cgroup synchronization. Bounce actual hotplug processing
2365 * to a work item to avoid reverse locking order.
2366 */
2367 schedule_work(&cpuset_hotplug_work);
2368}
2369
2370void cpuset_wait_for_hotplug(void)
2371{
2372 flush_work(&cpuset_hotplug_work);
2373}
2374
2375/*
2376 * Keep top_cpuset.mems_allowed tracking node_states[N_MEMORY].
2377 * Call this routine anytime after node_states[N_MEMORY] changes.
2378 * See cpuset_update_active_cpus() for CPU hotplug handling.
2379 */
2380static int cpuset_track_online_nodes(struct notifier_block *self,
2381 unsigned long action, void *arg)
2382{
2383 schedule_work(&cpuset_hotplug_work);
2384 return NOTIFY_OK;
2385}
2386
2387static struct notifier_block cpuset_track_online_nodes_nb = {
2388 .notifier_call = cpuset_track_online_nodes,
2389 .priority = 10, /* ??! */
2390};
2391
2392/**
2393 * cpuset_init_smp - initialize cpus_allowed
2394 *
2395 * Description: Finish top cpuset after cpu, node maps are initialized
2396 */
2397void __init cpuset_init_smp(void)
2398{
2399 cpumask_copy(top_cpuset.cpus_allowed, cpu_active_mask);
2400 top_cpuset.mems_allowed = node_states[N_MEMORY];
2401 top_cpuset.old_mems_allowed = top_cpuset.mems_allowed;
2402
2403 cpumask_copy(top_cpuset.effective_cpus, cpu_active_mask);
2404 top_cpuset.effective_mems = node_states[N_MEMORY];
2405
2406 register_hotmemory_notifier(&cpuset_track_online_nodes_nb);
2407
2408 cpuset_migrate_mm_wq = alloc_ordered_workqueue("cpuset_migrate_mm", 0);
2409 BUG_ON(!cpuset_migrate_mm_wq);
2410}
2411
2412/**
2413 * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset.
2414 * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
2415 * @pmask: pointer to struct cpumask variable to receive cpus_allowed set.
2416 *
2417 * Description: Returns the cpumask_var_t cpus_allowed of the cpuset
2418 * attached to the specified @tsk. Guaranteed to return some non-empty
2419 * subset of cpu_online_mask, even if this means going outside the
2420 * tasks cpuset.
2421 **/
2422
2423void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
2424{
2425 unsigned long flags;
2426
2427 spin_lock_irqsave(&callback_lock, flags);
2428 rcu_read_lock();
2429 guarantee_online_cpus(task_cs(tsk), pmask);
2430 rcu_read_unlock();
2431 spin_unlock_irqrestore(&callback_lock, flags);
2432}
2433
2434void cpuset_cpus_allowed_fallback(struct task_struct *tsk)
2435{
2436 rcu_read_lock();
2437 do_set_cpus_allowed(tsk, task_cs(tsk)->effective_cpus);
2438 rcu_read_unlock();
2439
2440 /*
2441 * We own tsk->cpus_allowed, nobody can change it under us.
2442 *
2443 * But we used cs && cs->cpus_allowed lockless and thus can
2444 * race with cgroup_attach_task() or update_cpumask() and get
2445 * the wrong tsk->cpus_allowed. However, both cases imply the
2446 * subsequent cpuset_change_cpumask()->set_cpus_allowed_ptr()
2447 * which takes task_rq_lock().
2448 *
2449 * If we are called after it dropped the lock we must see all
2450 * changes in tsk_cs()->cpus_allowed. Otherwise we can temporary
2451 * set any mask even if it is not right from task_cs() pov,
2452 * the pending set_cpus_allowed_ptr() will fix things.
2453 *
2454 * select_fallback_rq() will fix things ups and set cpu_possible_mask
2455 * if required.
2456 */
2457}
2458
2459void __init cpuset_init_current_mems_allowed(void)
2460{
2461 nodes_setall(current->mems_allowed);
2462}
2463
2464/**
2465 * cpuset_mems_allowed - return mems_allowed mask from a tasks cpuset.
2466 * @tsk: pointer to task_struct from which to obtain cpuset->mems_allowed.
2467 *
2468 * Description: Returns the nodemask_t mems_allowed of the cpuset
2469 * attached to the specified @tsk. Guaranteed to return some non-empty
2470 * subset of node_states[N_MEMORY], even if this means going outside the
2471 * tasks cpuset.
2472 **/
2473
2474nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
2475{
2476 nodemask_t mask;
2477 unsigned long flags;
2478
2479 spin_lock_irqsave(&callback_lock, flags);
2480 rcu_read_lock();
2481 guarantee_online_mems(task_cs(tsk), &mask);
2482 rcu_read_unlock();
2483 spin_unlock_irqrestore(&callback_lock, flags);
2484
2485 return mask;
2486}
2487
2488/**
2489 * cpuset_nodemask_valid_mems_allowed - check nodemask vs. curremt mems_allowed
2490 * @nodemask: the nodemask to be checked
2491 *
2492 * Are any of the nodes in the nodemask allowed in current->mems_allowed?
2493 */
2494int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
2495{
2496 return nodes_intersects(*nodemask, current->mems_allowed);
2497}
2498
2499/*
2500 * nearest_hardwall_ancestor() - Returns the nearest mem_exclusive or
2501 * mem_hardwall ancestor to the specified cpuset. Call holding
2502 * callback_lock. If no ancestor is mem_exclusive or mem_hardwall
2503 * (an unusual configuration), then returns the root cpuset.
2504 */
2505static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs)
2506{
2507 while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && parent_cs(cs))
2508 cs = parent_cs(cs);
2509 return cs;
2510}
2511
2512/**
2513 * cpuset_node_allowed - Can we allocate on a memory node?
2514 * @node: is this an allowed node?
2515 * @gfp_mask: memory allocation flags
2516 *
2517 * If we're in interrupt, yes, we can always allocate. If @node is set in
2518 * current's mems_allowed, yes. If it's not a __GFP_HARDWALL request and this
2519 * node is set in the nearest hardwalled cpuset ancestor to current's cpuset,
2520 * yes. If current has access to memory reserves as an oom victim, yes.
2521 * Otherwise, no.
2522 *
2523 * GFP_USER allocations are marked with the __GFP_HARDWALL bit,
2524 * and do not allow allocations outside the current tasks cpuset
2525 * unless the task has been OOM killed.
2526 * GFP_KERNEL allocations are not so marked, so can escape to the
2527 * nearest enclosing hardwalled ancestor cpuset.
2528 *
2529 * Scanning up parent cpusets requires callback_lock. The
2530 * __alloc_pages() routine only calls here with __GFP_HARDWALL bit
2531 * _not_ set if it's a GFP_KERNEL allocation, and all nodes in the
2532 * current tasks mems_allowed came up empty on the first pass over
2533 * the zonelist. So only GFP_KERNEL allocations, if all nodes in the
2534 * cpuset are short of memory, might require taking the callback_lock.
2535 *
2536 * The first call here from mm/page_alloc:get_page_from_freelist()
2537 * has __GFP_HARDWALL set in gfp_mask, enforcing hardwall cpusets,
2538 * so no allocation on a node outside the cpuset is allowed (unless
2539 * in interrupt, of course).
2540 *
2541 * The second pass through get_page_from_freelist() doesn't even call
2542 * here for GFP_ATOMIC calls. For those calls, the __alloc_pages()
2543 * variable 'wait' is not set, and the bit ALLOC_CPUSET is not set
2544 * in alloc_flags. That logic and the checks below have the combined
2545 * affect that:
2546 * in_interrupt - any node ok (current task context irrelevant)
2547 * GFP_ATOMIC - any node ok
2548 * tsk_is_oom_victim - any node ok
2549 * GFP_KERNEL - any node in enclosing hardwalled cpuset ok
2550 * GFP_USER - only nodes in current tasks mems allowed ok.
2551 */
2552bool __cpuset_node_allowed(int node, gfp_t gfp_mask)
2553{
2554 struct cpuset *cs; /* current cpuset ancestors */
2555 int allowed; /* is allocation in zone z allowed? */
2556 unsigned long flags;
2557
2558 if (in_interrupt())
2559 return true;
2560 if (node_isset(node, current->mems_allowed))
2561 return true;
2562 /*
2563 * Allow tasks that have access to memory reserves because they have
2564 * been OOM killed to get memory anywhere.
2565 */
2566 if (unlikely(tsk_is_oom_victim(current)))
2567 return true;
2568 if (gfp_mask & __GFP_HARDWALL) /* If hardwall request, stop here */
2569 return false;
2570
2571 if (current->flags & PF_EXITING) /* Let dying task have memory */
2572 return true;
2573
2574 /* Not hardwall and node outside mems_allowed: scan up cpusets */
2575 spin_lock_irqsave(&callback_lock, flags);
2576
2577 rcu_read_lock();
2578 cs = nearest_hardwall_ancestor(task_cs(current));
2579 allowed = node_isset(node, cs->mems_allowed);
2580 rcu_read_unlock();
2581
2582 spin_unlock_irqrestore(&callback_lock, flags);
2583 return allowed;
2584}
2585
2586/**
2587 * cpuset_mem_spread_node() - On which node to begin search for a file page
2588 * cpuset_slab_spread_node() - On which node to begin search for a slab page
2589 *
2590 * If a task is marked PF_SPREAD_PAGE or PF_SPREAD_SLAB (as for
2591 * tasks in a cpuset with is_spread_page or is_spread_slab set),
2592 * and if the memory allocation used cpuset_mem_spread_node()
2593 * to determine on which node to start looking, as it will for
2594 * certain page cache or slab cache pages such as used for file
2595 * system buffers and inode caches, then instead of starting on the
2596 * local node to look for a free page, rather spread the starting
2597 * node around the tasks mems_allowed nodes.
2598 *
2599 * We don't have to worry about the returned node being offline
2600 * because "it can't happen", and even if it did, it would be ok.
2601 *
2602 * The routines calling guarantee_online_mems() are careful to
2603 * only set nodes in task->mems_allowed that are online. So it
2604 * should not be possible for the following code to return an
2605 * offline node. But if it did, that would be ok, as this routine
2606 * is not returning the node where the allocation must be, only
2607 * the node where the search should start. The zonelist passed to
2608 * __alloc_pages() will include all nodes. If the slab allocator
2609 * is passed an offline node, it will fall back to the local node.
2610 * See kmem_cache_alloc_node().
2611 */
2612
2613static int cpuset_spread_node(int *rotor)
2614{
2615 return *rotor = next_node_in(*rotor, current->mems_allowed);
2616}
2617
2618int cpuset_mem_spread_node(void)
2619{
2620 if (current->cpuset_mem_spread_rotor == NUMA_NO_NODE)
2621 current->cpuset_mem_spread_rotor =
2622 node_random(¤t->mems_allowed);
2623
2624 return cpuset_spread_node(¤t->cpuset_mem_spread_rotor);
2625}
2626
2627int cpuset_slab_spread_node(void)
2628{
2629 if (current->cpuset_slab_spread_rotor == NUMA_NO_NODE)
2630 current->cpuset_slab_spread_rotor =
2631 node_random(¤t->mems_allowed);
2632
2633 return cpuset_spread_node(¤t->cpuset_slab_spread_rotor);
2634}
2635
2636EXPORT_SYMBOL_GPL(cpuset_mem_spread_node);
2637
2638/**
2639 * cpuset_mems_allowed_intersects - Does @tsk1's mems_allowed intersect @tsk2's?
2640 * @tsk1: pointer to task_struct of some task.
2641 * @tsk2: pointer to task_struct of some other task.
2642 *
2643 * Description: Return true if @tsk1's mems_allowed intersects the
2644 * mems_allowed of @tsk2. Used by the OOM killer to determine if
2645 * one of the task's memory usage might impact the memory available
2646 * to the other.
2647 **/
2648
2649int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
2650 const struct task_struct *tsk2)
2651{
2652 return nodes_intersects(tsk1->mems_allowed, tsk2->mems_allowed);
2653}
2654
2655/**
2656 * cpuset_print_current_mems_allowed - prints current's cpuset and mems_allowed
2657 *
2658 * Description: Prints current's name, cpuset name, and cached copy of its
2659 * mems_allowed to the kernel log.
2660 */
2661void cpuset_print_current_mems_allowed(void)
2662{
2663 struct cgroup *cgrp;
2664
2665 rcu_read_lock();
2666
2667 cgrp = task_cs(current)->css.cgroup;
2668 pr_info("%s cpuset=", current->comm);
2669 pr_cont_cgroup_name(cgrp);
2670 pr_cont(" mems_allowed=%*pbl\n",
2671 nodemask_pr_args(¤t->mems_allowed));
2672
2673 rcu_read_unlock();
2674}
2675
2676/*
2677 * Collection of memory_pressure is suppressed unless
2678 * this flag is enabled by writing "1" to the special
2679 * cpuset file 'memory_pressure_enabled' in the root cpuset.
2680 */
2681
2682int cpuset_memory_pressure_enabled __read_mostly;
2683
2684/**
2685 * cpuset_memory_pressure_bump - keep stats of per-cpuset reclaims.
2686 *
2687 * Keep a running average of the rate of synchronous (direct)
2688 * page reclaim efforts initiated by tasks in each cpuset.
2689 *
2690 * This represents the rate at which some task in the cpuset
2691 * ran low on memory on all nodes it was allowed to use, and
2692 * had to enter the kernels page reclaim code in an effort to
2693 * create more free memory by tossing clean pages or swapping
2694 * or writing dirty pages.
2695 *
2696 * Display to user space in the per-cpuset read-only file
2697 * "memory_pressure". Value displayed is an integer
2698 * representing the recent rate of entry into the synchronous
2699 * (direct) page reclaim by any task attached to the cpuset.
2700 **/
2701
2702void __cpuset_memory_pressure_bump(void)
2703{
2704 rcu_read_lock();
2705 fmeter_markevent(&task_cs(current)->fmeter);
2706 rcu_read_unlock();
2707}
2708
2709#ifdef CONFIG_PROC_PID_CPUSET
2710/*
2711 * proc_cpuset_show()
2712 * - Print tasks cpuset path into seq_file.
2713 * - Used for /proc/<pid>/cpuset.
2714 * - No need to task_lock(tsk) on this tsk->cpuset reference, as it
2715 * doesn't really matter if tsk->cpuset changes after we read it,
2716 * and we take cpuset_mutex, keeping cpuset_attach() from changing it
2717 * anyway.
2718 */
2719int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
2720 struct pid *pid, struct task_struct *tsk)
2721{
2722 char *buf;
2723 struct cgroup_subsys_state *css;
2724 int retval;
2725
2726 retval = -ENOMEM;
2727 buf = kmalloc(PATH_MAX, GFP_KERNEL);
2728 if (!buf)
2729 goto out;
2730
2731 css = task_get_css(tsk, cpuset_cgrp_id);
2732 retval = cgroup_path_ns(css->cgroup, buf, PATH_MAX,
2733 current->nsproxy->cgroup_ns);
2734 css_put(css);
2735 if (retval >= PATH_MAX)
2736 retval = -ENAMETOOLONG;
2737 if (retval < 0)
2738 goto out_free;
2739 seq_puts(m, buf);
2740 seq_putc(m, '\n');
2741 retval = 0;
2742out_free:
2743 kfree(buf);
2744out:
2745 return retval;
2746}
2747#endif /* CONFIG_PROC_PID_CPUSET */
2748
2749/* Display task mems_allowed in /proc/<pid>/status file. */
2750void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task)
2751{
2752 seq_printf(m, "Mems_allowed:\t%*pb\n",
2753 nodemask_pr_args(&task->mems_allowed));
2754 seq_printf(m, "Mems_allowed_list:\t%*pbl\n",
2755 nodemask_pr_args(&task->mems_allowed));
2756}