Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2#include "cgroup-internal.h"
3
4#include <linux/ctype.h>
5#include <linux/kmod.h>
6#include <linux/sort.h>
7#include <linux/delay.h>
8#include <linux/mm.h>
9#include <linux/sched/signal.h>
10#include <linux/sched/task.h>
11#include <linux/magic.h>
12#include <linux/slab.h>
13#include <linux/vmalloc.h>
14#include <linux/delayacct.h>
15#include <linux/pid_namespace.h>
16#include <linux/cgroupstats.h>
17#include <linux/fs_parser.h>
18
19#include <trace/events/cgroup.h>
20
21/*
22 * pidlists linger the following amount before being destroyed. The goal
23 * is avoiding frequent destruction in the middle of consecutive read calls
24 * Expiring in the middle is a performance problem not a correctness one.
25 * 1 sec should be enough.
26 */
27#define CGROUP_PIDLIST_DESTROY_DELAY HZ
28
29/* Controllers blocked by the commandline in v1 */
30static u16 cgroup_no_v1_mask;
31
32/* disable named v1 mounts */
33static bool cgroup_no_v1_named;
34
35/*
36 * pidlist destructions need to be flushed on cgroup destruction. Use a
37 * separate workqueue as flush domain.
38 */
39static struct workqueue_struct *cgroup_pidlist_destroy_wq;
40
41/* protects cgroup_subsys->release_agent_path */
42static DEFINE_SPINLOCK(release_agent_path_lock);
43
44bool cgroup1_ssid_disabled(int ssid)
45{
46 return cgroup_no_v1_mask & (1 << ssid);
47}
48
49/**
50 * cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from'
51 * @from: attach to all cgroups of a given task
52 * @tsk: the task to be attached
53 *
54 * Return: %0 on success or a negative errno code on failure
55 */
56int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
57{
58 struct cgroup_root *root;
59 int retval = 0;
60
61 mutex_lock(&cgroup_mutex);
62 cgroup_attach_lock(true);
63 for_each_root(root) {
64 struct cgroup *from_cgrp;
65
66 spin_lock_irq(&css_set_lock);
67 from_cgrp = task_cgroup_from_root(from, root);
68 spin_unlock_irq(&css_set_lock);
69
70 retval = cgroup_attach_task(from_cgrp, tsk, false);
71 if (retval)
72 break;
73 }
74 cgroup_attach_unlock(true);
75 mutex_unlock(&cgroup_mutex);
76
77 return retval;
78}
79EXPORT_SYMBOL_GPL(cgroup_attach_task_all);
80
81/**
82 * cgroup_transfer_tasks - move tasks from one cgroup to another
83 * @to: cgroup to which the tasks will be moved
84 * @from: cgroup in which the tasks currently reside
85 *
86 * Locking rules between cgroup_post_fork() and the migration path
87 * guarantee that, if a task is forking while being migrated, the new child
88 * is guaranteed to be either visible in the source cgroup after the
89 * parent's migration is complete or put into the target cgroup. No task
90 * can slip out of migration through forking.
91 *
92 * Return: %0 on success or a negative errno code on failure
93 */
94int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
95{
96 DEFINE_CGROUP_MGCTX(mgctx);
97 struct cgrp_cset_link *link;
98 struct css_task_iter it;
99 struct task_struct *task;
100 int ret;
101
102 if (cgroup_on_dfl(to))
103 return -EINVAL;
104
105 ret = cgroup_migrate_vet_dst(to);
106 if (ret)
107 return ret;
108
109 mutex_lock(&cgroup_mutex);
110
111 percpu_down_write(&cgroup_threadgroup_rwsem);
112
113 /* all tasks in @from are being moved, all csets are source */
114 spin_lock_irq(&css_set_lock);
115 list_for_each_entry(link, &from->cset_links, cset_link)
116 cgroup_migrate_add_src(link->cset, to, &mgctx);
117 spin_unlock_irq(&css_set_lock);
118
119 ret = cgroup_migrate_prepare_dst(&mgctx);
120 if (ret)
121 goto out_err;
122
123 /*
124 * Migrate tasks one-by-one until @from is empty. This fails iff
125 * ->can_attach() fails.
126 */
127 do {
128 css_task_iter_start(&from->self, 0, &it);
129
130 do {
131 task = css_task_iter_next(&it);
132 } while (task && (task->flags & PF_EXITING));
133
134 if (task)
135 get_task_struct(task);
136 css_task_iter_end(&it);
137
138 if (task) {
139 ret = cgroup_migrate(task, false, &mgctx);
140 if (!ret)
141 TRACE_CGROUP_PATH(transfer_tasks, to, task, false);
142 put_task_struct(task);
143 }
144 } while (task && !ret);
145out_err:
146 cgroup_migrate_finish(&mgctx);
147 percpu_up_write(&cgroup_threadgroup_rwsem);
148 mutex_unlock(&cgroup_mutex);
149 return ret;
150}
151
152/*
153 * Stuff for reading the 'tasks'/'procs' files.
154 *
155 * Reading this file can return large amounts of data if a cgroup has
156 * *lots* of attached tasks. So it may need several calls to read(),
157 * but we cannot guarantee that the information we produce is correct
158 * unless we produce it entirely atomically.
159 *
160 */
161
162/* which pidlist file are we talking about? */
163enum cgroup_filetype {
164 CGROUP_FILE_PROCS,
165 CGROUP_FILE_TASKS,
166};
167
168/*
169 * A pidlist is a list of pids that virtually represents the contents of one
170 * of the cgroup files ("procs" or "tasks"). We keep a list of such pidlists,
171 * a pair (one each for procs, tasks) for each pid namespace that's relevant
172 * to the cgroup.
173 */
174struct cgroup_pidlist {
175 /*
176 * used to find which pidlist is wanted. doesn't change as long as
177 * this particular list stays in the list.
178 */
179 struct { enum cgroup_filetype type; struct pid_namespace *ns; } key;
180 /* array of xids */
181 pid_t *list;
182 /* how many elements the above list has */
183 int length;
184 /* each of these stored in a list by its cgroup */
185 struct list_head links;
186 /* pointer to the cgroup we belong to, for list removal purposes */
187 struct cgroup *owner;
188 /* for delayed destruction */
189 struct delayed_work destroy_dwork;
190};
191
192/*
193 * Used to destroy all pidlists lingering waiting for destroy timer. None
194 * should be left afterwards.
195 */
196void cgroup1_pidlist_destroy_all(struct cgroup *cgrp)
197{
198 struct cgroup_pidlist *l, *tmp_l;
199
200 mutex_lock(&cgrp->pidlist_mutex);
201 list_for_each_entry_safe(l, tmp_l, &cgrp->pidlists, links)
202 mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork, 0);
203 mutex_unlock(&cgrp->pidlist_mutex);
204
205 flush_workqueue(cgroup_pidlist_destroy_wq);
206 BUG_ON(!list_empty(&cgrp->pidlists));
207}
208
209static void cgroup_pidlist_destroy_work_fn(struct work_struct *work)
210{
211 struct delayed_work *dwork = to_delayed_work(work);
212 struct cgroup_pidlist *l = container_of(dwork, struct cgroup_pidlist,
213 destroy_dwork);
214 struct cgroup_pidlist *tofree = NULL;
215
216 mutex_lock(&l->owner->pidlist_mutex);
217
218 /*
219 * Destroy iff we didn't get queued again. The state won't change
220 * as destroy_dwork can only be queued while locked.
221 */
222 if (!delayed_work_pending(dwork)) {
223 list_del(&l->links);
224 kvfree(l->list);
225 put_pid_ns(l->key.ns);
226 tofree = l;
227 }
228
229 mutex_unlock(&l->owner->pidlist_mutex);
230 kfree(tofree);
231}
232
233/*
234 * pidlist_uniq - given a kmalloc()ed list, strip out all duplicate entries
235 * Returns the number of unique elements.
236 */
237static int pidlist_uniq(pid_t *list, int length)
238{
239 int src, dest = 1;
240
241 /*
242 * we presume the 0th element is unique, so i starts at 1. trivial
243 * edge cases first; no work needs to be done for either
244 */
245 if (length == 0 || length == 1)
246 return length;
247 /* src and dest walk down the list; dest counts unique elements */
248 for (src = 1; src < length; src++) {
249 /* find next unique element */
250 while (list[src] == list[src-1]) {
251 src++;
252 if (src == length)
253 goto after;
254 }
255 /* dest always points to where the next unique element goes */
256 list[dest] = list[src];
257 dest++;
258 }
259after:
260 return dest;
261}
262
263/*
264 * The two pid files - task and cgroup.procs - guaranteed that the result
265 * is sorted, which forced this whole pidlist fiasco. As pid order is
266 * different per namespace, each namespace needs differently sorted list,
267 * making it impossible to use, for example, single rbtree of member tasks
268 * sorted by task pointer. As pidlists can be fairly large, allocating one
269 * per open file is dangerous, so cgroup had to implement shared pool of
270 * pidlists keyed by cgroup and namespace.
271 */
272static int cmppid(const void *a, const void *b)
273{
274 return *(pid_t *)a - *(pid_t *)b;
275}
276
277static struct cgroup_pidlist *cgroup_pidlist_find(struct cgroup *cgrp,
278 enum cgroup_filetype type)
279{
280 struct cgroup_pidlist *l;
281 /* don't need task_nsproxy() if we're looking at ourself */
282 struct pid_namespace *ns = task_active_pid_ns(current);
283
284 lockdep_assert_held(&cgrp->pidlist_mutex);
285
286 list_for_each_entry(l, &cgrp->pidlists, links)
287 if (l->key.type == type && l->key.ns == ns)
288 return l;
289 return NULL;
290}
291
292/*
293 * find the appropriate pidlist for our purpose (given procs vs tasks)
294 * returns with the lock on that pidlist already held, and takes care
295 * of the use count, or returns NULL with no locks held if we're out of
296 * memory.
297 */
298static struct cgroup_pidlist *cgroup_pidlist_find_create(struct cgroup *cgrp,
299 enum cgroup_filetype type)
300{
301 struct cgroup_pidlist *l;
302
303 lockdep_assert_held(&cgrp->pidlist_mutex);
304
305 l = cgroup_pidlist_find(cgrp, type);
306 if (l)
307 return l;
308
309 /* entry not found; create a new one */
310 l = kzalloc(sizeof(struct cgroup_pidlist), GFP_KERNEL);
311 if (!l)
312 return l;
313
314 INIT_DELAYED_WORK(&l->destroy_dwork, cgroup_pidlist_destroy_work_fn);
315 l->key.type = type;
316 /* don't need task_nsproxy() if we're looking at ourself */
317 l->key.ns = get_pid_ns(task_active_pid_ns(current));
318 l->owner = cgrp;
319 list_add(&l->links, &cgrp->pidlists);
320 return l;
321}
322
323/*
324 * Load a cgroup's pidarray with either procs' tgids or tasks' pids
325 */
326static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type,
327 struct cgroup_pidlist **lp)
328{
329 pid_t *array;
330 int length;
331 int pid, n = 0; /* used for populating the array */
332 struct css_task_iter it;
333 struct task_struct *tsk;
334 struct cgroup_pidlist *l;
335
336 lockdep_assert_held(&cgrp->pidlist_mutex);
337
338 /*
339 * If cgroup gets more users after we read count, we won't have
340 * enough space - tough. This race is indistinguishable to the
341 * caller from the case that the additional cgroup users didn't
342 * show up until sometime later on.
343 */
344 length = cgroup_task_count(cgrp);
345 array = kvmalloc_array(length, sizeof(pid_t), GFP_KERNEL);
346 if (!array)
347 return -ENOMEM;
348 /* now, populate the array */
349 css_task_iter_start(&cgrp->self, 0, &it);
350 while ((tsk = css_task_iter_next(&it))) {
351 if (unlikely(n == length))
352 break;
353 /* get tgid or pid for procs or tasks file respectively */
354 if (type == CGROUP_FILE_PROCS)
355 pid = task_tgid_vnr(tsk);
356 else
357 pid = task_pid_vnr(tsk);
358 if (pid > 0) /* make sure to only use valid results */
359 array[n++] = pid;
360 }
361 css_task_iter_end(&it);
362 length = n;
363 /* now sort & (if procs) strip out duplicates */
364 sort(array, length, sizeof(pid_t), cmppid, NULL);
365 if (type == CGROUP_FILE_PROCS)
366 length = pidlist_uniq(array, length);
367
368 l = cgroup_pidlist_find_create(cgrp, type);
369 if (!l) {
370 kvfree(array);
371 return -ENOMEM;
372 }
373
374 /* store array, freeing old if necessary */
375 kvfree(l->list);
376 l->list = array;
377 l->length = length;
378 *lp = l;
379 return 0;
380}
381
382/*
383 * seq_file methods for the tasks/procs files. The seq_file position is the
384 * next pid to display; the seq_file iterator is a pointer to the pid
385 * in the cgroup->l->list array.
386 */
387
388static void *cgroup_pidlist_start(struct seq_file *s, loff_t *pos)
389{
390 /*
391 * Initially we receive a position value that corresponds to
392 * one more than the last pid shown (or 0 on the first call or
393 * after a seek to the start). Use a binary-search to find the
394 * next pid to display, if any
395 */
396 struct kernfs_open_file *of = s->private;
397 struct cgroup_file_ctx *ctx = of->priv;
398 struct cgroup *cgrp = seq_css(s)->cgroup;
399 struct cgroup_pidlist *l;
400 enum cgroup_filetype type = seq_cft(s)->private;
401 int index = 0, pid = *pos;
402 int *iter, ret;
403
404 mutex_lock(&cgrp->pidlist_mutex);
405
406 /*
407 * !NULL @ctx->procs1.pidlist indicates that this isn't the first
408 * start() after open. If the matching pidlist is around, we can use
409 * that. Look for it. Note that @ctx->procs1.pidlist can't be used
410 * directly. It could already have been destroyed.
411 */
412 if (ctx->procs1.pidlist)
413 ctx->procs1.pidlist = cgroup_pidlist_find(cgrp, type);
414
415 /*
416 * Either this is the first start() after open or the matching
417 * pidlist has been destroyed inbetween. Create a new one.
418 */
419 if (!ctx->procs1.pidlist) {
420 ret = pidlist_array_load(cgrp, type, &ctx->procs1.pidlist);
421 if (ret)
422 return ERR_PTR(ret);
423 }
424 l = ctx->procs1.pidlist;
425
426 if (pid) {
427 int end = l->length;
428
429 while (index < end) {
430 int mid = (index + end) / 2;
431 if (l->list[mid] == pid) {
432 index = mid;
433 break;
434 } else if (l->list[mid] <= pid)
435 index = mid + 1;
436 else
437 end = mid;
438 }
439 }
440 /* If we're off the end of the array, we're done */
441 if (index >= l->length)
442 return NULL;
443 /* Update the abstract position to be the actual pid that we found */
444 iter = l->list + index;
445 *pos = *iter;
446 return iter;
447}
448
449static void cgroup_pidlist_stop(struct seq_file *s, void *v)
450{
451 struct kernfs_open_file *of = s->private;
452 struct cgroup_file_ctx *ctx = of->priv;
453 struct cgroup_pidlist *l = ctx->procs1.pidlist;
454
455 if (l)
456 mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork,
457 CGROUP_PIDLIST_DESTROY_DELAY);
458 mutex_unlock(&seq_css(s)->cgroup->pidlist_mutex);
459}
460
461static void *cgroup_pidlist_next(struct seq_file *s, void *v, loff_t *pos)
462{
463 struct kernfs_open_file *of = s->private;
464 struct cgroup_file_ctx *ctx = of->priv;
465 struct cgroup_pidlist *l = ctx->procs1.pidlist;
466 pid_t *p = v;
467 pid_t *end = l->list + l->length;
468 /*
469 * Advance to the next pid in the array. If this goes off the
470 * end, we're done
471 */
472 p++;
473 if (p >= end) {
474 (*pos)++;
475 return NULL;
476 } else {
477 *pos = *p;
478 return p;
479 }
480}
481
482static int cgroup_pidlist_show(struct seq_file *s, void *v)
483{
484 seq_printf(s, "%d\n", *(int *)v);
485
486 return 0;
487}
488
489static ssize_t __cgroup1_procs_write(struct kernfs_open_file *of,
490 char *buf, size_t nbytes, loff_t off,
491 bool threadgroup)
492{
493 struct cgroup *cgrp;
494 struct task_struct *task;
495 const struct cred *cred, *tcred;
496 ssize_t ret;
497 bool locked;
498
499 cgrp = cgroup_kn_lock_live(of->kn, false);
500 if (!cgrp)
501 return -ENODEV;
502
503 task = cgroup_procs_write_start(buf, threadgroup, &locked);
504 ret = PTR_ERR_OR_ZERO(task);
505 if (ret)
506 goto out_unlock;
507
508 /*
509 * Even if we're attaching all tasks in the thread group, we only need
510 * to check permissions on one of them. Check permissions using the
511 * credentials from file open to protect against inherited fd attacks.
512 */
513 cred = of->file->f_cred;
514 tcred = get_task_cred(task);
515 if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
516 !uid_eq(cred->euid, tcred->uid) &&
517 !uid_eq(cred->euid, tcred->suid))
518 ret = -EACCES;
519 put_cred(tcred);
520 if (ret)
521 goto out_finish;
522
523 ret = cgroup_attach_task(cgrp, task, threadgroup);
524
525out_finish:
526 cgroup_procs_write_finish(task, locked);
527out_unlock:
528 cgroup_kn_unlock(of->kn);
529
530 return ret ?: nbytes;
531}
532
533static ssize_t cgroup1_procs_write(struct kernfs_open_file *of,
534 char *buf, size_t nbytes, loff_t off)
535{
536 return __cgroup1_procs_write(of, buf, nbytes, off, true);
537}
538
539static ssize_t cgroup1_tasks_write(struct kernfs_open_file *of,
540 char *buf, size_t nbytes, loff_t off)
541{
542 return __cgroup1_procs_write(of, buf, nbytes, off, false);
543}
544
545static ssize_t cgroup_release_agent_write(struct kernfs_open_file *of,
546 char *buf, size_t nbytes, loff_t off)
547{
548 struct cgroup *cgrp;
549 struct cgroup_file_ctx *ctx;
550
551 BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX);
552
553 /*
554 * Release agent gets called with all capabilities,
555 * require capabilities to set release agent.
556 */
557 ctx = of->priv;
558 if ((ctx->ns->user_ns != &init_user_ns) ||
559 !file_ns_capable(of->file, &init_user_ns, CAP_SYS_ADMIN))
560 return -EPERM;
561
562 cgrp = cgroup_kn_lock_live(of->kn, false);
563 if (!cgrp)
564 return -ENODEV;
565 spin_lock(&release_agent_path_lock);
566 strlcpy(cgrp->root->release_agent_path, strstrip(buf),
567 sizeof(cgrp->root->release_agent_path));
568 spin_unlock(&release_agent_path_lock);
569 cgroup_kn_unlock(of->kn);
570 return nbytes;
571}
572
573static int cgroup_release_agent_show(struct seq_file *seq, void *v)
574{
575 struct cgroup *cgrp = seq_css(seq)->cgroup;
576
577 spin_lock(&release_agent_path_lock);
578 seq_puts(seq, cgrp->root->release_agent_path);
579 spin_unlock(&release_agent_path_lock);
580 seq_putc(seq, '\n');
581 return 0;
582}
583
584static int cgroup_sane_behavior_show(struct seq_file *seq, void *v)
585{
586 seq_puts(seq, "0\n");
587 return 0;
588}
589
590static u64 cgroup_read_notify_on_release(struct cgroup_subsys_state *css,
591 struct cftype *cft)
592{
593 return notify_on_release(css->cgroup);
594}
595
596static int cgroup_write_notify_on_release(struct cgroup_subsys_state *css,
597 struct cftype *cft, u64 val)
598{
599 if (val)
600 set_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags);
601 else
602 clear_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags);
603 return 0;
604}
605
606static u64 cgroup_clone_children_read(struct cgroup_subsys_state *css,
607 struct cftype *cft)
608{
609 return test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
610}
611
612static int cgroup_clone_children_write(struct cgroup_subsys_state *css,
613 struct cftype *cft, u64 val)
614{
615 if (val)
616 set_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
617 else
618 clear_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
619 return 0;
620}
621
622/* cgroup core interface files for the legacy hierarchies */
623struct cftype cgroup1_base_files[] = {
624 {
625 .name = "cgroup.procs",
626 .seq_start = cgroup_pidlist_start,
627 .seq_next = cgroup_pidlist_next,
628 .seq_stop = cgroup_pidlist_stop,
629 .seq_show = cgroup_pidlist_show,
630 .private = CGROUP_FILE_PROCS,
631 .write = cgroup1_procs_write,
632 },
633 {
634 .name = "cgroup.clone_children",
635 .read_u64 = cgroup_clone_children_read,
636 .write_u64 = cgroup_clone_children_write,
637 },
638 {
639 .name = "cgroup.sane_behavior",
640 .flags = CFTYPE_ONLY_ON_ROOT,
641 .seq_show = cgroup_sane_behavior_show,
642 },
643 {
644 .name = "tasks",
645 .seq_start = cgroup_pidlist_start,
646 .seq_next = cgroup_pidlist_next,
647 .seq_stop = cgroup_pidlist_stop,
648 .seq_show = cgroup_pidlist_show,
649 .private = CGROUP_FILE_TASKS,
650 .write = cgroup1_tasks_write,
651 },
652 {
653 .name = "notify_on_release",
654 .read_u64 = cgroup_read_notify_on_release,
655 .write_u64 = cgroup_write_notify_on_release,
656 },
657 {
658 .name = "release_agent",
659 .flags = CFTYPE_ONLY_ON_ROOT,
660 .seq_show = cgroup_release_agent_show,
661 .write = cgroup_release_agent_write,
662 .max_write_len = PATH_MAX - 1,
663 },
664 { } /* terminate */
665};
666
667/* Display information about each subsystem and each hierarchy */
668int proc_cgroupstats_show(struct seq_file *m, void *v)
669{
670 struct cgroup_subsys *ss;
671 int i;
672
673 seq_puts(m, "#subsys_name\thierarchy\tnum_cgroups\tenabled\n");
674 /*
675 * Grab the subsystems state racily. No need to add avenue to
676 * cgroup_mutex contention.
677 */
678
679 for_each_subsys(ss, i)
680 seq_printf(m, "%s\t%d\t%d\t%d\n",
681 ss->legacy_name, ss->root->hierarchy_id,
682 atomic_read(&ss->root->nr_cgrps),
683 cgroup_ssid_enabled(i));
684
685 return 0;
686}
687
688/**
689 * cgroupstats_build - build and fill cgroupstats
690 * @stats: cgroupstats to fill information into
691 * @dentry: A dentry entry belonging to the cgroup for which stats have
692 * been requested.
693 *
694 * Build and fill cgroupstats so that taskstats can export it to user
695 * space.
696 *
697 * Return: %0 on success or a negative errno code on failure
698 */
699int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry)
700{
701 struct kernfs_node *kn = kernfs_node_from_dentry(dentry);
702 struct cgroup *cgrp;
703 struct css_task_iter it;
704 struct task_struct *tsk;
705
706 /* it should be kernfs_node belonging to cgroupfs and is a directory */
707 if (dentry->d_sb->s_type != &cgroup_fs_type || !kn ||
708 kernfs_type(kn) != KERNFS_DIR)
709 return -EINVAL;
710
711 /*
712 * We aren't being called from kernfs and there's no guarantee on
713 * @kn->priv's validity. For this and css_tryget_online_from_dir(),
714 * @kn->priv is RCU safe. Let's do the RCU dancing.
715 */
716 rcu_read_lock();
717 cgrp = rcu_dereference(*(void __rcu __force **)&kn->priv);
718 if (!cgrp || !cgroup_tryget(cgrp)) {
719 rcu_read_unlock();
720 return -ENOENT;
721 }
722 rcu_read_unlock();
723
724 css_task_iter_start(&cgrp->self, 0, &it);
725 while ((tsk = css_task_iter_next(&it))) {
726 switch (READ_ONCE(tsk->__state)) {
727 case TASK_RUNNING:
728 stats->nr_running++;
729 break;
730 case TASK_INTERRUPTIBLE:
731 stats->nr_sleeping++;
732 break;
733 case TASK_UNINTERRUPTIBLE:
734 stats->nr_uninterruptible++;
735 break;
736 case TASK_STOPPED:
737 stats->nr_stopped++;
738 break;
739 default:
740 if (tsk->in_iowait)
741 stats->nr_io_wait++;
742 break;
743 }
744 }
745 css_task_iter_end(&it);
746
747 cgroup_put(cgrp);
748 return 0;
749}
750
751void cgroup1_check_for_release(struct cgroup *cgrp)
752{
753 if (notify_on_release(cgrp) && !cgroup_is_populated(cgrp) &&
754 !css_has_online_children(&cgrp->self) && !cgroup_is_dead(cgrp))
755 schedule_work(&cgrp->release_agent_work);
756}
757
758/*
759 * Notify userspace when a cgroup is released, by running the
760 * configured release agent with the name of the cgroup (path
761 * relative to the root of cgroup file system) as the argument.
762 *
763 * Most likely, this user command will try to rmdir this cgroup.
764 *
765 * This races with the possibility that some other task will be
766 * attached to this cgroup before it is removed, or that some other
767 * user task will 'mkdir' a child cgroup of this cgroup. That's ok.
768 * The presumed 'rmdir' will fail quietly if this cgroup is no longer
769 * unused, and this cgroup will be reprieved from its death sentence,
770 * to continue to serve a useful existence. Next time it's released,
771 * we will get notified again, if it still has 'notify_on_release' set.
772 *
773 * The final arg to call_usermodehelper() is UMH_WAIT_EXEC, which
774 * means only wait until the task is successfully execve()'d. The
775 * separate release agent task is forked by call_usermodehelper(),
776 * then control in this thread returns here, without waiting for the
777 * release agent task. We don't bother to wait because the caller of
778 * this routine has no use for the exit status of the release agent
779 * task, so no sense holding our caller up for that.
780 */
781void cgroup1_release_agent(struct work_struct *work)
782{
783 struct cgroup *cgrp =
784 container_of(work, struct cgroup, release_agent_work);
785 char *pathbuf, *agentbuf;
786 char *argv[3], *envp[3];
787 int ret;
788
789 /* snoop agent path and exit early if empty */
790 if (!cgrp->root->release_agent_path[0])
791 return;
792
793 /* prepare argument buffers */
794 pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
795 agentbuf = kmalloc(PATH_MAX, GFP_KERNEL);
796 if (!pathbuf || !agentbuf)
797 goto out_free;
798
799 spin_lock(&release_agent_path_lock);
800 strlcpy(agentbuf, cgrp->root->release_agent_path, PATH_MAX);
801 spin_unlock(&release_agent_path_lock);
802 if (!agentbuf[0])
803 goto out_free;
804
805 ret = cgroup_path_ns(cgrp, pathbuf, PATH_MAX, &init_cgroup_ns);
806 if (ret < 0 || ret >= PATH_MAX)
807 goto out_free;
808
809 argv[0] = agentbuf;
810 argv[1] = pathbuf;
811 argv[2] = NULL;
812
813 /* minimal command environment */
814 envp[0] = "HOME=/";
815 envp[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
816 envp[2] = NULL;
817
818 call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC);
819out_free:
820 kfree(agentbuf);
821 kfree(pathbuf);
822}
823
824/*
825 * cgroup_rename - Only allow simple rename of directories in place.
826 */
827static int cgroup1_rename(struct kernfs_node *kn, struct kernfs_node *new_parent,
828 const char *new_name_str)
829{
830 struct cgroup *cgrp = kn->priv;
831 int ret;
832
833 /* do not accept '\n' to prevent making /proc/<pid>/cgroup unparsable */
834 if (strchr(new_name_str, '\n'))
835 return -EINVAL;
836
837 if (kernfs_type(kn) != KERNFS_DIR)
838 return -ENOTDIR;
839 if (kn->parent != new_parent)
840 return -EIO;
841
842 /*
843 * We're gonna grab cgroup_mutex which nests outside kernfs
844 * active_ref. kernfs_rename() doesn't require active_ref
845 * protection. Break them before grabbing cgroup_mutex.
846 */
847 kernfs_break_active_protection(new_parent);
848 kernfs_break_active_protection(kn);
849
850 mutex_lock(&cgroup_mutex);
851
852 ret = kernfs_rename(kn, new_parent, new_name_str);
853 if (!ret)
854 TRACE_CGROUP_PATH(rename, cgrp);
855
856 mutex_unlock(&cgroup_mutex);
857
858 kernfs_unbreak_active_protection(kn);
859 kernfs_unbreak_active_protection(new_parent);
860 return ret;
861}
862
863static int cgroup1_show_options(struct seq_file *seq, struct kernfs_root *kf_root)
864{
865 struct cgroup_root *root = cgroup_root_from_kf(kf_root);
866 struct cgroup_subsys *ss;
867 int ssid;
868
869 for_each_subsys(ss, ssid)
870 if (root->subsys_mask & (1 << ssid))
871 seq_show_option(seq, ss->legacy_name, NULL);
872 if (root->flags & CGRP_ROOT_NOPREFIX)
873 seq_puts(seq, ",noprefix");
874 if (root->flags & CGRP_ROOT_XATTR)
875 seq_puts(seq, ",xattr");
876 if (root->flags & CGRP_ROOT_CPUSET_V2_MODE)
877 seq_puts(seq, ",cpuset_v2_mode");
878 if (root->flags & CGRP_ROOT_FAVOR_DYNMODS)
879 seq_puts(seq, ",favordynmods");
880
881 spin_lock(&release_agent_path_lock);
882 if (strlen(root->release_agent_path))
883 seq_show_option(seq, "release_agent",
884 root->release_agent_path);
885 spin_unlock(&release_agent_path_lock);
886
887 if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags))
888 seq_puts(seq, ",clone_children");
889 if (strlen(root->name))
890 seq_show_option(seq, "name", root->name);
891 return 0;
892}
893
894enum cgroup1_param {
895 Opt_all,
896 Opt_clone_children,
897 Opt_cpuset_v2_mode,
898 Opt_name,
899 Opt_none,
900 Opt_noprefix,
901 Opt_release_agent,
902 Opt_xattr,
903 Opt_favordynmods,
904 Opt_nofavordynmods,
905};
906
907const struct fs_parameter_spec cgroup1_fs_parameters[] = {
908 fsparam_flag ("all", Opt_all),
909 fsparam_flag ("clone_children", Opt_clone_children),
910 fsparam_flag ("cpuset_v2_mode", Opt_cpuset_v2_mode),
911 fsparam_string("name", Opt_name),
912 fsparam_flag ("none", Opt_none),
913 fsparam_flag ("noprefix", Opt_noprefix),
914 fsparam_string("release_agent", Opt_release_agent),
915 fsparam_flag ("xattr", Opt_xattr),
916 fsparam_flag ("favordynmods", Opt_favordynmods),
917 fsparam_flag ("nofavordynmods", Opt_nofavordynmods),
918 {}
919};
920
921int cgroup1_parse_param(struct fs_context *fc, struct fs_parameter *param)
922{
923 struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
924 struct cgroup_subsys *ss;
925 struct fs_parse_result result;
926 int opt, i;
927
928 opt = fs_parse(fc, cgroup1_fs_parameters, param, &result);
929 if (opt == -ENOPARAM) {
930 int ret;
931
932 ret = vfs_parse_fs_param_source(fc, param);
933 if (ret != -ENOPARAM)
934 return ret;
935 for_each_subsys(ss, i) {
936 if (strcmp(param->key, ss->legacy_name))
937 continue;
938 if (!cgroup_ssid_enabled(i) || cgroup1_ssid_disabled(i))
939 return invalfc(fc, "Disabled controller '%s'",
940 param->key);
941 ctx->subsys_mask |= (1 << i);
942 return 0;
943 }
944 return invalfc(fc, "Unknown subsys name '%s'", param->key);
945 }
946 if (opt < 0)
947 return opt;
948
949 switch (opt) {
950 case Opt_none:
951 /* Explicitly have no subsystems */
952 ctx->none = true;
953 break;
954 case Opt_all:
955 ctx->all_ss = true;
956 break;
957 case Opt_noprefix:
958 ctx->flags |= CGRP_ROOT_NOPREFIX;
959 break;
960 case Opt_clone_children:
961 ctx->cpuset_clone_children = true;
962 break;
963 case Opt_cpuset_v2_mode:
964 ctx->flags |= CGRP_ROOT_CPUSET_V2_MODE;
965 break;
966 case Opt_xattr:
967 ctx->flags |= CGRP_ROOT_XATTR;
968 break;
969 case Opt_favordynmods:
970 ctx->flags |= CGRP_ROOT_FAVOR_DYNMODS;
971 break;
972 case Opt_nofavordynmods:
973 ctx->flags &= ~CGRP_ROOT_FAVOR_DYNMODS;
974 break;
975 case Opt_release_agent:
976 /* Specifying two release agents is forbidden */
977 if (ctx->release_agent)
978 return invalfc(fc, "release_agent respecified");
979 /*
980 * Release agent gets called with all capabilities,
981 * require capabilities to set release agent.
982 */
983 if ((fc->user_ns != &init_user_ns) || !capable(CAP_SYS_ADMIN))
984 return invalfc(fc, "Setting release_agent not allowed");
985 ctx->release_agent = param->string;
986 param->string = NULL;
987 break;
988 case Opt_name:
989 /* blocked by boot param? */
990 if (cgroup_no_v1_named)
991 return -ENOENT;
992 /* Can't specify an empty name */
993 if (!param->size)
994 return invalfc(fc, "Empty name");
995 if (param->size > MAX_CGROUP_ROOT_NAMELEN - 1)
996 return invalfc(fc, "Name too long");
997 /* Must match [\w.-]+ */
998 for (i = 0; i < param->size; i++) {
999 char c = param->string[i];
1000 if (isalnum(c))
1001 continue;
1002 if ((c == '.') || (c == '-') || (c == '_'))
1003 continue;
1004 return invalfc(fc, "Invalid name");
1005 }
1006 /* Specifying two names is forbidden */
1007 if (ctx->name)
1008 return invalfc(fc, "name respecified");
1009 ctx->name = param->string;
1010 param->string = NULL;
1011 break;
1012 }
1013 return 0;
1014}
1015
1016static int check_cgroupfs_options(struct fs_context *fc)
1017{
1018 struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
1019 u16 mask = U16_MAX;
1020 u16 enabled = 0;
1021 struct cgroup_subsys *ss;
1022 int i;
1023
1024#ifdef CONFIG_CPUSETS
1025 mask = ~((u16)1 << cpuset_cgrp_id);
1026#endif
1027 for_each_subsys(ss, i)
1028 if (cgroup_ssid_enabled(i) && !cgroup1_ssid_disabled(i))
1029 enabled |= 1 << i;
1030
1031 ctx->subsys_mask &= enabled;
1032
1033 /*
1034 * In absence of 'none', 'name=' and subsystem name options,
1035 * let's default to 'all'.
1036 */
1037 if (!ctx->subsys_mask && !ctx->none && !ctx->name)
1038 ctx->all_ss = true;
1039
1040 if (ctx->all_ss) {
1041 /* Mutually exclusive option 'all' + subsystem name */
1042 if (ctx->subsys_mask)
1043 return invalfc(fc, "subsys name conflicts with all");
1044 /* 'all' => select all the subsystems */
1045 ctx->subsys_mask = enabled;
1046 }
1047
1048 /*
1049 * We either have to specify by name or by subsystems. (So all
1050 * empty hierarchies must have a name).
1051 */
1052 if (!ctx->subsys_mask && !ctx->name)
1053 return invalfc(fc, "Need name or subsystem set");
1054
1055 /*
1056 * Option noprefix was introduced just for backward compatibility
1057 * with the old cpuset, so we allow noprefix only if mounting just
1058 * the cpuset subsystem.
1059 */
1060 if ((ctx->flags & CGRP_ROOT_NOPREFIX) && (ctx->subsys_mask & mask))
1061 return invalfc(fc, "noprefix used incorrectly");
1062
1063 /* Can't specify "none" and some subsystems */
1064 if (ctx->subsys_mask && ctx->none)
1065 return invalfc(fc, "none used incorrectly");
1066
1067 return 0;
1068}
1069
1070int cgroup1_reconfigure(struct fs_context *fc)
1071{
1072 struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
1073 struct kernfs_root *kf_root = kernfs_root_from_sb(fc->root->d_sb);
1074 struct cgroup_root *root = cgroup_root_from_kf(kf_root);
1075 int ret = 0;
1076 u16 added_mask, removed_mask;
1077
1078 cgroup_lock_and_drain_offline(&cgrp_dfl_root.cgrp);
1079
1080 /* See what subsystems are wanted */
1081 ret = check_cgroupfs_options(fc);
1082 if (ret)
1083 goto out_unlock;
1084
1085 if (ctx->subsys_mask != root->subsys_mask || ctx->release_agent)
1086 pr_warn("option changes via remount are deprecated (pid=%d comm=%s)\n",
1087 task_tgid_nr(current), current->comm);
1088
1089 added_mask = ctx->subsys_mask & ~root->subsys_mask;
1090 removed_mask = root->subsys_mask & ~ctx->subsys_mask;
1091
1092 /* Don't allow flags or name to change at remount */
1093 if ((ctx->flags ^ root->flags) ||
1094 (ctx->name && strcmp(ctx->name, root->name))) {
1095 errorfc(fc, "option or name mismatch, new: 0x%x \"%s\", old: 0x%x \"%s\"",
1096 ctx->flags, ctx->name ?: "", root->flags, root->name);
1097 ret = -EINVAL;
1098 goto out_unlock;
1099 }
1100
1101 /* remounting is not allowed for populated hierarchies */
1102 if (!list_empty(&root->cgrp.self.children)) {
1103 ret = -EBUSY;
1104 goto out_unlock;
1105 }
1106
1107 ret = rebind_subsystems(root, added_mask);
1108 if (ret)
1109 goto out_unlock;
1110
1111 WARN_ON(rebind_subsystems(&cgrp_dfl_root, removed_mask));
1112
1113 if (ctx->release_agent) {
1114 spin_lock(&release_agent_path_lock);
1115 strcpy(root->release_agent_path, ctx->release_agent);
1116 spin_unlock(&release_agent_path_lock);
1117 }
1118
1119 trace_cgroup_remount(root);
1120
1121 out_unlock:
1122 mutex_unlock(&cgroup_mutex);
1123 return ret;
1124}
1125
1126struct kernfs_syscall_ops cgroup1_kf_syscall_ops = {
1127 .rename = cgroup1_rename,
1128 .show_options = cgroup1_show_options,
1129 .mkdir = cgroup_mkdir,
1130 .rmdir = cgroup_rmdir,
1131 .show_path = cgroup_show_path,
1132};
1133
1134/*
1135 * The guts of cgroup1 mount - find or create cgroup_root to use.
1136 * Called with cgroup_mutex held; returns 0 on success, -E... on
1137 * error and positive - in case when the candidate is busy dying.
1138 * On success it stashes a reference to cgroup_root into given
1139 * cgroup_fs_context; that reference is *NOT* counting towards the
1140 * cgroup_root refcount.
1141 */
1142static int cgroup1_root_to_use(struct fs_context *fc)
1143{
1144 struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
1145 struct cgroup_root *root;
1146 struct cgroup_subsys *ss;
1147 int i, ret;
1148
1149 /* First find the desired set of subsystems */
1150 ret = check_cgroupfs_options(fc);
1151 if (ret)
1152 return ret;
1153
1154 /*
1155 * Destruction of cgroup root is asynchronous, so subsystems may
1156 * still be dying after the previous unmount. Let's drain the
1157 * dying subsystems. We just need to ensure that the ones
1158 * unmounted previously finish dying and don't care about new ones
1159 * starting. Testing ref liveliness is good enough.
1160 */
1161 for_each_subsys(ss, i) {
1162 if (!(ctx->subsys_mask & (1 << i)) ||
1163 ss->root == &cgrp_dfl_root)
1164 continue;
1165
1166 if (!percpu_ref_tryget_live(&ss->root->cgrp.self.refcnt))
1167 return 1; /* restart */
1168 cgroup_put(&ss->root->cgrp);
1169 }
1170
1171 for_each_root(root) {
1172 bool name_match = false;
1173
1174 if (root == &cgrp_dfl_root)
1175 continue;
1176
1177 /*
1178 * If we asked for a name then it must match. Also, if
1179 * name matches but sybsys_mask doesn't, we should fail.
1180 * Remember whether name matched.
1181 */
1182 if (ctx->name) {
1183 if (strcmp(ctx->name, root->name))
1184 continue;
1185 name_match = true;
1186 }
1187
1188 /*
1189 * If we asked for subsystems (or explicitly for no
1190 * subsystems) then they must match.
1191 */
1192 if ((ctx->subsys_mask || ctx->none) &&
1193 (ctx->subsys_mask != root->subsys_mask)) {
1194 if (!name_match)
1195 continue;
1196 return -EBUSY;
1197 }
1198
1199 if (root->flags ^ ctx->flags)
1200 pr_warn("new mount options do not match the existing superblock, will be ignored\n");
1201
1202 ctx->root = root;
1203 return 0;
1204 }
1205
1206 /*
1207 * No such thing, create a new one. name= matching without subsys
1208 * specification is allowed for already existing hierarchies but we
1209 * can't create new one without subsys specification.
1210 */
1211 if (!ctx->subsys_mask && !ctx->none)
1212 return invalfc(fc, "No subsys list or none specified");
1213
1214 /* Hierarchies may only be created in the initial cgroup namespace. */
1215 if (ctx->ns != &init_cgroup_ns)
1216 return -EPERM;
1217
1218 root = kzalloc(sizeof(*root), GFP_KERNEL);
1219 if (!root)
1220 return -ENOMEM;
1221
1222 ctx->root = root;
1223 init_cgroup_root(ctx);
1224
1225 ret = cgroup_setup_root(root, ctx->subsys_mask);
1226 if (!ret)
1227 cgroup_favor_dynmods(root, ctx->flags & CGRP_ROOT_FAVOR_DYNMODS);
1228 else
1229 cgroup_free_root(root);
1230
1231 return ret;
1232}
1233
1234int cgroup1_get_tree(struct fs_context *fc)
1235{
1236 struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
1237 int ret;
1238
1239 /* Check if the caller has permission to mount. */
1240 if (!ns_capable(ctx->ns->user_ns, CAP_SYS_ADMIN))
1241 return -EPERM;
1242
1243 cgroup_lock_and_drain_offline(&cgrp_dfl_root.cgrp);
1244
1245 ret = cgroup1_root_to_use(fc);
1246 if (!ret && !percpu_ref_tryget_live(&ctx->root->cgrp.self.refcnt))
1247 ret = 1; /* restart */
1248
1249 mutex_unlock(&cgroup_mutex);
1250
1251 if (!ret)
1252 ret = cgroup_do_get_tree(fc);
1253
1254 if (!ret && percpu_ref_is_dying(&ctx->root->cgrp.self.refcnt)) {
1255 fc_drop_locked(fc);
1256 ret = 1;
1257 }
1258
1259 if (unlikely(ret > 0)) {
1260 msleep(10);
1261 return restart_syscall();
1262 }
1263 return ret;
1264}
1265
1266static int __init cgroup1_wq_init(void)
1267{
1268 /*
1269 * Used to destroy pidlists and separate to serve as flush domain.
1270 * Cap @max_active to 1 too.
1271 */
1272 cgroup_pidlist_destroy_wq = alloc_workqueue("cgroup_pidlist_destroy",
1273 0, 1);
1274 BUG_ON(!cgroup_pidlist_destroy_wq);
1275 return 0;
1276}
1277core_initcall(cgroup1_wq_init);
1278
1279static int __init cgroup_no_v1(char *str)
1280{
1281 struct cgroup_subsys *ss;
1282 char *token;
1283 int i;
1284
1285 while ((token = strsep(&str, ",")) != NULL) {
1286 if (!*token)
1287 continue;
1288
1289 if (!strcmp(token, "all")) {
1290 cgroup_no_v1_mask = U16_MAX;
1291 continue;
1292 }
1293
1294 if (!strcmp(token, "named")) {
1295 cgroup_no_v1_named = true;
1296 continue;
1297 }
1298
1299 for_each_subsys(ss, i) {
1300 if (strcmp(token, ss->name) &&
1301 strcmp(token, ss->legacy_name))
1302 continue;
1303
1304 cgroup_no_v1_mask |= 1 << i;
1305 }
1306 }
1307 return 1;
1308}
1309__setup("cgroup_no_v1=", cgroup_no_v1);
1// SPDX-License-Identifier: GPL-2.0-only
2#include "cgroup-internal.h"
3
4#include <linux/ctype.h>
5#include <linux/kmod.h>
6#include <linux/sort.h>
7#include <linux/delay.h>
8#include <linux/mm.h>
9#include <linux/sched/signal.h>
10#include <linux/sched/task.h>
11#include <linux/magic.h>
12#include <linux/slab.h>
13#include <linux/vmalloc.h>
14#include <linux/delayacct.h>
15#include <linux/pid_namespace.h>
16#include <linux/cgroupstats.h>
17#include <linux/fs_parser.h>
18
19#include <trace/events/cgroup.h>
20
21#define cg_invalf(fc, fmt, ...) invalf(fc, fmt, ## __VA_ARGS__)
22
23/*
24 * pidlists linger the following amount before being destroyed. The goal
25 * is avoiding frequent destruction in the middle of consecutive read calls
26 * Expiring in the middle is a performance problem not a correctness one.
27 * 1 sec should be enough.
28 */
29#define CGROUP_PIDLIST_DESTROY_DELAY HZ
30
31/* Controllers blocked by the commandline in v1 */
32static u16 cgroup_no_v1_mask;
33
34/* disable named v1 mounts */
35static bool cgroup_no_v1_named;
36
37/*
38 * pidlist destructions need to be flushed on cgroup destruction. Use a
39 * separate workqueue as flush domain.
40 */
41static struct workqueue_struct *cgroup_pidlist_destroy_wq;
42
43/*
44 * Protects cgroup_subsys->release_agent_path. Modifying it also requires
45 * cgroup_mutex. Reading requires either cgroup_mutex or this spinlock.
46 */
47static DEFINE_SPINLOCK(release_agent_path_lock);
48
49bool cgroup1_ssid_disabled(int ssid)
50{
51 return cgroup_no_v1_mask & (1 << ssid);
52}
53
54/**
55 * cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from'
56 * @from: attach to all cgroups of a given task
57 * @tsk: the task to be attached
58 */
59int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
60{
61 struct cgroup_root *root;
62 int retval = 0;
63
64 mutex_lock(&cgroup_mutex);
65 percpu_down_write(&cgroup_threadgroup_rwsem);
66 for_each_root(root) {
67 struct cgroup *from_cgrp;
68
69 if (root == &cgrp_dfl_root)
70 continue;
71
72 spin_lock_irq(&css_set_lock);
73 from_cgrp = task_cgroup_from_root(from, root);
74 spin_unlock_irq(&css_set_lock);
75
76 retval = cgroup_attach_task(from_cgrp, tsk, false);
77 if (retval)
78 break;
79 }
80 percpu_up_write(&cgroup_threadgroup_rwsem);
81 mutex_unlock(&cgroup_mutex);
82
83 return retval;
84}
85EXPORT_SYMBOL_GPL(cgroup_attach_task_all);
86
87/**
88 * cgroup_trasnsfer_tasks - move tasks from one cgroup to another
89 * @to: cgroup to which the tasks will be moved
90 * @from: cgroup in which the tasks currently reside
91 *
92 * Locking rules between cgroup_post_fork() and the migration path
93 * guarantee that, if a task is forking while being migrated, the new child
94 * is guaranteed to be either visible in the source cgroup after the
95 * parent's migration is complete or put into the target cgroup. No task
96 * can slip out of migration through forking.
97 */
98int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
99{
100 DEFINE_CGROUP_MGCTX(mgctx);
101 struct cgrp_cset_link *link;
102 struct css_task_iter it;
103 struct task_struct *task;
104 int ret;
105
106 if (cgroup_on_dfl(to))
107 return -EINVAL;
108
109 ret = cgroup_migrate_vet_dst(to);
110 if (ret)
111 return ret;
112
113 mutex_lock(&cgroup_mutex);
114
115 percpu_down_write(&cgroup_threadgroup_rwsem);
116
117 /* all tasks in @from are being moved, all csets are source */
118 spin_lock_irq(&css_set_lock);
119 list_for_each_entry(link, &from->cset_links, cset_link)
120 cgroup_migrate_add_src(link->cset, to, &mgctx);
121 spin_unlock_irq(&css_set_lock);
122
123 ret = cgroup_migrate_prepare_dst(&mgctx);
124 if (ret)
125 goto out_err;
126
127 /*
128 * Migrate tasks one-by-one until @from is empty. This fails iff
129 * ->can_attach() fails.
130 */
131 do {
132 css_task_iter_start(&from->self, 0, &it);
133
134 do {
135 task = css_task_iter_next(&it);
136 } while (task && (task->flags & PF_EXITING));
137
138 if (task)
139 get_task_struct(task);
140 css_task_iter_end(&it);
141
142 if (task) {
143 ret = cgroup_migrate(task, false, &mgctx);
144 if (!ret)
145 TRACE_CGROUP_PATH(transfer_tasks, to, task, false);
146 put_task_struct(task);
147 }
148 } while (task && !ret);
149out_err:
150 cgroup_migrate_finish(&mgctx);
151 percpu_up_write(&cgroup_threadgroup_rwsem);
152 mutex_unlock(&cgroup_mutex);
153 return ret;
154}
155
156/*
157 * Stuff for reading the 'tasks'/'procs' files.
158 *
159 * Reading this file can return large amounts of data if a cgroup has
160 * *lots* of attached tasks. So it may need several calls to read(),
161 * but we cannot guarantee that the information we produce is correct
162 * unless we produce it entirely atomically.
163 *
164 */
165
166/* which pidlist file are we talking about? */
167enum cgroup_filetype {
168 CGROUP_FILE_PROCS,
169 CGROUP_FILE_TASKS,
170};
171
172/*
173 * A pidlist is a list of pids that virtually represents the contents of one
174 * of the cgroup files ("procs" or "tasks"). We keep a list of such pidlists,
175 * a pair (one each for procs, tasks) for each pid namespace that's relevant
176 * to the cgroup.
177 */
178struct cgroup_pidlist {
179 /*
180 * used to find which pidlist is wanted. doesn't change as long as
181 * this particular list stays in the list.
182 */
183 struct { enum cgroup_filetype type; struct pid_namespace *ns; } key;
184 /* array of xids */
185 pid_t *list;
186 /* how many elements the above list has */
187 int length;
188 /* each of these stored in a list by its cgroup */
189 struct list_head links;
190 /* pointer to the cgroup we belong to, for list removal purposes */
191 struct cgroup *owner;
192 /* for delayed destruction */
193 struct delayed_work destroy_dwork;
194};
195
196/*
197 * Used to destroy all pidlists lingering waiting for destroy timer. None
198 * should be left afterwards.
199 */
200void cgroup1_pidlist_destroy_all(struct cgroup *cgrp)
201{
202 struct cgroup_pidlist *l, *tmp_l;
203
204 mutex_lock(&cgrp->pidlist_mutex);
205 list_for_each_entry_safe(l, tmp_l, &cgrp->pidlists, links)
206 mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork, 0);
207 mutex_unlock(&cgrp->pidlist_mutex);
208
209 flush_workqueue(cgroup_pidlist_destroy_wq);
210 BUG_ON(!list_empty(&cgrp->pidlists));
211}
212
213static void cgroup_pidlist_destroy_work_fn(struct work_struct *work)
214{
215 struct delayed_work *dwork = to_delayed_work(work);
216 struct cgroup_pidlist *l = container_of(dwork, struct cgroup_pidlist,
217 destroy_dwork);
218 struct cgroup_pidlist *tofree = NULL;
219
220 mutex_lock(&l->owner->pidlist_mutex);
221
222 /*
223 * Destroy iff we didn't get queued again. The state won't change
224 * as destroy_dwork can only be queued while locked.
225 */
226 if (!delayed_work_pending(dwork)) {
227 list_del(&l->links);
228 kvfree(l->list);
229 put_pid_ns(l->key.ns);
230 tofree = l;
231 }
232
233 mutex_unlock(&l->owner->pidlist_mutex);
234 kfree(tofree);
235}
236
237/*
238 * pidlist_uniq - given a kmalloc()ed list, strip out all duplicate entries
239 * Returns the number of unique elements.
240 */
241static int pidlist_uniq(pid_t *list, int length)
242{
243 int src, dest = 1;
244
245 /*
246 * we presume the 0th element is unique, so i starts at 1. trivial
247 * edge cases first; no work needs to be done for either
248 */
249 if (length == 0 || length == 1)
250 return length;
251 /* src and dest walk down the list; dest counts unique elements */
252 for (src = 1; src < length; src++) {
253 /* find next unique element */
254 while (list[src] == list[src-1]) {
255 src++;
256 if (src == length)
257 goto after;
258 }
259 /* dest always points to where the next unique element goes */
260 list[dest] = list[src];
261 dest++;
262 }
263after:
264 return dest;
265}
266
267/*
268 * The two pid files - task and cgroup.procs - guaranteed that the result
269 * is sorted, which forced this whole pidlist fiasco. As pid order is
270 * different per namespace, each namespace needs differently sorted list,
271 * making it impossible to use, for example, single rbtree of member tasks
272 * sorted by task pointer. As pidlists can be fairly large, allocating one
273 * per open file is dangerous, so cgroup had to implement shared pool of
274 * pidlists keyed by cgroup and namespace.
275 */
276static int cmppid(const void *a, const void *b)
277{
278 return *(pid_t *)a - *(pid_t *)b;
279}
280
281static struct cgroup_pidlist *cgroup_pidlist_find(struct cgroup *cgrp,
282 enum cgroup_filetype type)
283{
284 struct cgroup_pidlist *l;
285 /* don't need task_nsproxy() if we're looking at ourself */
286 struct pid_namespace *ns = task_active_pid_ns(current);
287
288 lockdep_assert_held(&cgrp->pidlist_mutex);
289
290 list_for_each_entry(l, &cgrp->pidlists, links)
291 if (l->key.type == type && l->key.ns == ns)
292 return l;
293 return NULL;
294}
295
296/*
297 * find the appropriate pidlist for our purpose (given procs vs tasks)
298 * returns with the lock on that pidlist already held, and takes care
299 * of the use count, or returns NULL with no locks held if we're out of
300 * memory.
301 */
302static struct cgroup_pidlist *cgroup_pidlist_find_create(struct cgroup *cgrp,
303 enum cgroup_filetype type)
304{
305 struct cgroup_pidlist *l;
306
307 lockdep_assert_held(&cgrp->pidlist_mutex);
308
309 l = cgroup_pidlist_find(cgrp, type);
310 if (l)
311 return l;
312
313 /* entry not found; create a new one */
314 l = kzalloc(sizeof(struct cgroup_pidlist), GFP_KERNEL);
315 if (!l)
316 return l;
317
318 INIT_DELAYED_WORK(&l->destroy_dwork, cgroup_pidlist_destroy_work_fn);
319 l->key.type = type;
320 /* don't need task_nsproxy() if we're looking at ourself */
321 l->key.ns = get_pid_ns(task_active_pid_ns(current));
322 l->owner = cgrp;
323 list_add(&l->links, &cgrp->pidlists);
324 return l;
325}
326
327/*
328 * Load a cgroup's pidarray with either procs' tgids or tasks' pids
329 */
330static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type,
331 struct cgroup_pidlist **lp)
332{
333 pid_t *array;
334 int length;
335 int pid, n = 0; /* used for populating the array */
336 struct css_task_iter it;
337 struct task_struct *tsk;
338 struct cgroup_pidlist *l;
339
340 lockdep_assert_held(&cgrp->pidlist_mutex);
341
342 /*
343 * If cgroup gets more users after we read count, we won't have
344 * enough space - tough. This race is indistinguishable to the
345 * caller from the case that the additional cgroup users didn't
346 * show up until sometime later on.
347 */
348 length = cgroup_task_count(cgrp);
349 array = kvmalloc_array(length, sizeof(pid_t), GFP_KERNEL);
350 if (!array)
351 return -ENOMEM;
352 /* now, populate the array */
353 css_task_iter_start(&cgrp->self, 0, &it);
354 while ((tsk = css_task_iter_next(&it))) {
355 if (unlikely(n == length))
356 break;
357 /* get tgid or pid for procs or tasks file respectively */
358 if (type == CGROUP_FILE_PROCS)
359 pid = task_tgid_vnr(tsk);
360 else
361 pid = task_pid_vnr(tsk);
362 if (pid > 0) /* make sure to only use valid results */
363 array[n++] = pid;
364 }
365 css_task_iter_end(&it);
366 length = n;
367 /* now sort & (if procs) strip out duplicates */
368 sort(array, length, sizeof(pid_t), cmppid, NULL);
369 if (type == CGROUP_FILE_PROCS)
370 length = pidlist_uniq(array, length);
371
372 l = cgroup_pidlist_find_create(cgrp, type);
373 if (!l) {
374 kvfree(array);
375 return -ENOMEM;
376 }
377
378 /* store array, freeing old if necessary */
379 kvfree(l->list);
380 l->list = array;
381 l->length = length;
382 *lp = l;
383 return 0;
384}
385
386/*
387 * seq_file methods for the tasks/procs files. The seq_file position is the
388 * next pid to display; the seq_file iterator is a pointer to the pid
389 * in the cgroup->l->list array.
390 */
391
392static void *cgroup_pidlist_start(struct seq_file *s, loff_t *pos)
393{
394 /*
395 * Initially we receive a position value that corresponds to
396 * one more than the last pid shown (or 0 on the first call or
397 * after a seek to the start). Use a binary-search to find the
398 * next pid to display, if any
399 */
400 struct kernfs_open_file *of = s->private;
401 struct cgroup *cgrp = seq_css(s)->cgroup;
402 struct cgroup_pidlist *l;
403 enum cgroup_filetype type = seq_cft(s)->private;
404 int index = 0, pid = *pos;
405 int *iter, ret;
406
407 mutex_lock(&cgrp->pidlist_mutex);
408
409 /*
410 * !NULL @of->priv indicates that this isn't the first start()
411 * after open. If the matching pidlist is around, we can use that.
412 * Look for it. Note that @of->priv can't be used directly. It
413 * could already have been destroyed.
414 */
415 if (of->priv)
416 of->priv = cgroup_pidlist_find(cgrp, type);
417
418 /*
419 * Either this is the first start() after open or the matching
420 * pidlist has been destroyed inbetween. Create a new one.
421 */
422 if (!of->priv) {
423 ret = pidlist_array_load(cgrp, type,
424 (struct cgroup_pidlist **)&of->priv);
425 if (ret)
426 return ERR_PTR(ret);
427 }
428 l = of->priv;
429
430 if (pid) {
431 int end = l->length;
432
433 while (index < end) {
434 int mid = (index + end) / 2;
435 if (l->list[mid] == pid) {
436 index = mid;
437 break;
438 } else if (l->list[mid] <= pid)
439 index = mid + 1;
440 else
441 end = mid;
442 }
443 }
444 /* If we're off the end of the array, we're done */
445 if (index >= l->length)
446 return NULL;
447 /* Update the abstract position to be the actual pid that we found */
448 iter = l->list + index;
449 *pos = *iter;
450 return iter;
451}
452
453static void cgroup_pidlist_stop(struct seq_file *s, void *v)
454{
455 struct kernfs_open_file *of = s->private;
456 struct cgroup_pidlist *l = of->priv;
457
458 if (l)
459 mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork,
460 CGROUP_PIDLIST_DESTROY_DELAY);
461 mutex_unlock(&seq_css(s)->cgroup->pidlist_mutex);
462}
463
464static void *cgroup_pidlist_next(struct seq_file *s, void *v, loff_t *pos)
465{
466 struct kernfs_open_file *of = s->private;
467 struct cgroup_pidlist *l = of->priv;
468 pid_t *p = v;
469 pid_t *end = l->list + l->length;
470 /*
471 * Advance to the next pid in the array. If this goes off the
472 * end, we're done
473 */
474 p++;
475 if (p >= end) {
476 return NULL;
477 } else {
478 *pos = *p;
479 return p;
480 }
481}
482
483static int cgroup_pidlist_show(struct seq_file *s, void *v)
484{
485 seq_printf(s, "%d\n", *(int *)v);
486
487 return 0;
488}
489
490static ssize_t __cgroup1_procs_write(struct kernfs_open_file *of,
491 char *buf, size_t nbytes, loff_t off,
492 bool threadgroup)
493{
494 struct cgroup *cgrp;
495 struct task_struct *task;
496 const struct cred *cred, *tcred;
497 ssize_t ret;
498
499 cgrp = cgroup_kn_lock_live(of->kn, false);
500 if (!cgrp)
501 return -ENODEV;
502
503 task = cgroup_procs_write_start(buf, threadgroup);
504 ret = PTR_ERR_OR_ZERO(task);
505 if (ret)
506 goto out_unlock;
507
508 /*
509 * Even if we're attaching all tasks in the thread group, we only
510 * need to check permissions on one of them.
511 */
512 cred = current_cred();
513 tcred = get_task_cred(task);
514 if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
515 !uid_eq(cred->euid, tcred->uid) &&
516 !uid_eq(cred->euid, tcred->suid))
517 ret = -EACCES;
518 put_cred(tcred);
519 if (ret)
520 goto out_finish;
521
522 ret = cgroup_attach_task(cgrp, task, threadgroup);
523
524out_finish:
525 cgroup_procs_write_finish(task);
526out_unlock:
527 cgroup_kn_unlock(of->kn);
528
529 return ret ?: nbytes;
530}
531
532static ssize_t cgroup1_procs_write(struct kernfs_open_file *of,
533 char *buf, size_t nbytes, loff_t off)
534{
535 return __cgroup1_procs_write(of, buf, nbytes, off, true);
536}
537
538static ssize_t cgroup1_tasks_write(struct kernfs_open_file *of,
539 char *buf, size_t nbytes, loff_t off)
540{
541 return __cgroup1_procs_write(of, buf, nbytes, off, false);
542}
543
544static ssize_t cgroup_release_agent_write(struct kernfs_open_file *of,
545 char *buf, size_t nbytes, loff_t off)
546{
547 struct cgroup *cgrp;
548
549 BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX);
550
551 cgrp = cgroup_kn_lock_live(of->kn, false);
552 if (!cgrp)
553 return -ENODEV;
554 spin_lock(&release_agent_path_lock);
555 strlcpy(cgrp->root->release_agent_path, strstrip(buf),
556 sizeof(cgrp->root->release_agent_path));
557 spin_unlock(&release_agent_path_lock);
558 cgroup_kn_unlock(of->kn);
559 return nbytes;
560}
561
562static int cgroup_release_agent_show(struct seq_file *seq, void *v)
563{
564 struct cgroup *cgrp = seq_css(seq)->cgroup;
565
566 spin_lock(&release_agent_path_lock);
567 seq_puts(seq, cgrp->root->release_agent_path);
568 spin_unlock(&release_agent_path_lock);
569 seq_putc(seq, '\n');
570 return 0;
571}
572
573static int cgroup_sane_behavior_show(struct seq_file *seq, void *v)
574{
575 seq_puts(seq, "0\n");
576 return 0;
577}
578
579static u64 cgroup_read_notify_on_release(struct cgroup_subsys_state *css,
580 struct cftype *cft)
581{
582 return notify_on_release(css->cgroup);
583}
584
585static int cgroup_write_notify_on_release(struct cgroup_subsys_state *css,
586 struct cftype *cft, u64 val)
587{
588 if (val)
589 set_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags);
590 else
591 clear_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags);
592 return 0;
593}
594
595static u64 cgroup_clone_children_read(struct cgroup_subsys_state *css,
596 struct cftype *cft)
597{
598 return test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
599}
600
601static int cgroup_clone_children_write(struct cgroup_subsys_state *css,
602 struct cftype *cft, u64 val)
603{
604 if (val)
605 set_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
606 else
607 clear_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
608 return 0;
609}
610
611/* cgroup core interface files for the legacy hierarchies */
612struct cftype cgroup1_base_files[] = {
613 {
614 .name = "cgroup.procs",
615 .seq_start = cgroup_pidlist_start,
616 .seq_next = cgroup_pidlist_next,
617 .seq_stop = cgroup_pidlist_stop,
618 .seq_show = cgroup_pidlist_show,
619 .private = CGROUP_FILE_PROCS,
620 .write = cgroup1_procs_write,
621 },
622 {
623 .name = "cgroup.clone_children",
624 .read_u64 = cgroup_clone_children_read,
625 .write_u64 = cgroup_clone_children_write,
626 },
627 {
628 .name = "cgroup.sane_behavior",
629 .flags = CFTYPE_ONLY_ON_ROOT,
630 .seq_show = cgroup_sane_behavior_show,
631 },
632 {
633 .name = "tasks",
634 .seq_start = cgroup_pidlist_start,
635 .seq_next = cgroup_pidlist_next,
636 .seq_stop = cgroup_pidlist_stop,
637 .seq_show = cgroup_pidlist_show,
638 .private = CGROUP_FILE_TASKS,
639 .write = cgroup1_tasks_write,
640 },
641 {
642 .name = "notify_on_release",
643 .read_u64 = cgroup_read_notify_on_release,
644 .write_u64 = cgroup_write_notify_on_release,
645 },
646 {
647 .name = "release_agent",
648 .flags = CFTYPE_ONLY_ON_ROOT,
649 .seq_show = cgroup_release_agent_show,
650 .write = cgroup_release_agent_write,
651 .max_write_len = PATH_MAX - 1,
652 },
653 { } /* terminate */
654};
655
656/* Display information about each subsystem and each hierarchy */
657int proc_cgroupstats_show(struct seq_file *m, void *v)
658{
659 struct cgroup_subsys *ss;
660 int i;
661
662 seq_puts(m, "#subsys_name\thierarchy\tnum_cgroups\tenabled\n");
663 /*
664 * ideally we don't want subsystems moving around while we do this.
665 * cgroup_mutex is also necessary to guarantee an atomic snapshot of
666 * subsys/hierarchy state.
667 */
668 mutex_lock(&cgroup_mutex);
669
670 for_each_subsys(ss, i)
671 seq_printf(m, "%s\t%d\t%d\t%d\n",
672 ss->legacy_name, ss->root->hierarchy_id,
673 atomic_read(&ss->root->nr_cgrps),
674 cgroup_ssid_enabled(i));
675
676 mutex_unlock(&cgroup_mutex);
677 return 0;
678}
679
680/**
681 * cgroupstats_build - build and fill cgroupstats
682 * @stats: cgroupstats to fill information into
683 * @dentry: A dentry entry belonging to the cgroup for which stats have
684 * been requested.
685 *
686 * Build and fill cgroupstats so that taskstats can export it to user
687 * space.
688 */
689int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry)
690{
691 struct kernfs_node *kn = kernfs_node_from_dentry(dentry);
692 struct cgroup *cgrp;
693 struct css_task_iter it;
694 struct task_struct *tsk;
695
696 /* it should be kernfs_node belonging to cgroupfs and is a directory */
697 if (dentry->d_sb->s_type != &cgroup_fs_type || !kn ||
698 kernfs_type(kn) != KERNFS_DIR)
699 return -EINVAL;
700
701 mutex_lock(&cgroup_mutex);
702
703 /*
704 * We aren't being called from kernfs and there's no guarantee on
705 * @kn->priv's validity. For this and css_tryget_online_from_dir(),
706 * @kn->priv is RCU safe. Let's do the RCU dancing.
707 */
708 rcu_read_lock();
709 cgrp = rcu_dereference(*(void __rcu __force **)&kn->priv);
710 if (!cgrp || cgroup_is_dead(cgrp)) {
711 rcu_read_unlock();
712 mutex_unlock(&cgroup_mutex);
713 return -ENOENT;
714 }
715 rcu_read_unlock();
716
717 css_task_iter_start(&cgrp->self, 0, &it);
718 while ((tsk = css_task_iter_next(&it))) {
719 switch (tsk->state) {
720 case TASK_RUNNING:
721 stats->nr_running++;
722 break;
723 case TASK_INTERRUPTIBLE:
724 stats->nr_sleeping++;
725 break;
726 case TASK_UNINTERRUPTIBLE:
727 stats->nr_uninterruptible++;
728 break;
729 case TASK_STOPPED:
730 stats->nr_stopped++;
731 break;
732 default:
733 if (delayacct_is_task_waiting_on_io(tsk))
734 stats->nr_io_wait++;
735 break;
736 }
737 }
738 css_task_iter_end(&it);
739
740 mutex_unlock(&cgroup_mutex);
741 return 0;
742}
743
744void cgroup1_check_for_release(struct cgroup *cgrp)
745{
746 if (notify_on_release(cgrp) && !cgroup_is_populated(cgrp) &&
747 !css_has_online_children(&cgrp->self) && !cgroup_is_dead(cgrp))
748 schedule_work(&cgrp->release_agent_work);
749}
750
751/*
752 * Notify userspace when a cgroup is released, by running the
753 * configured release agent with the name of the cgroup (path
754 * relative to the root of cgroup file system) as the argument.
755 *
756 * Most likely, this user command will try to rmdir this cgroup.
757 *
758 * This races with the possibility that some other task will be
759 * attached to this cgroup before it is removed, or that some other
760 * user task will 'mkdir' a child cgroup of this cgroup. That's ok.
761 * The presumed 'rmdir' will fail quietly if this cgroup is no longer
762 * unused, and this cgroup will be reprieved from its death sentence,
763 * to continue to serve a useful existence. Next time it's released,
764 * we will get notified again, if it still has 'notify_on_release' set.
765 *
766 * The final arg to call_usermodehelper() is UMH_WAIT_EXEC, which
767 * means only wait until the task is successfully execve()'d. The
768 * separate release agent task is forked by call_usermodehelper(),
769 * then control in this thread returns here, without waiting for the
770 * release agent task. We don't bother to wait because the caller of
771 * this routine has no use for the exit status of the release agent
772 * task, so no sense holding our caller up for that.
773 */
774void cgroup1_release_agent(struct work_struct *work)
775{
776 struct cgroup *cgrp =
777 container_of(work, struct cgroup, release_agent_work);
778 char *pathbuf = NULL, *agentbuf = NULL;
779 char *argv[3], *envp[3];
780 int ret;
781
782 mutex_lock(&cgroup_mutex);
783
784 pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
785 agentbuf = kstrdup(cgrp->root->release_agent_path, GFP_KERNEL);
786 if (!pathbuf || !agentbuf)
787 goto out;
788
789 spin_lock_irq(&css_set_lock);
790 ret = cgroup_path_ns_locked(cgrp, pathbuf, PATH_MAX, &init_cgroup_ns);
791 spin_unlock_irq(&css_set_lock);
792 if (ret < 0 || ret >= PATH_MAX)
793 goto out;
794
795 argv[0] = agentbuf;
796 argv[1] = pathbuf;
797 argv[2] = NULL;
798
799 /* minimal command environment */
800 envp[0] = "HOME=/";
801 envp[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
802 envp[2] = NULL;
803
804 mutex_unlock(&cgroup_mutex);
805 call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC);
806 goto out_free;
807out:
808 mutex_unlock(&cgroup_mutex);
809out_free:
810 kfree(agentbuf);
811 kfree(pathbuf);
812}
813
814/*
815 * cgroup_rename - Only allow simple rename of directories in place.
816 */
817static int cgroup1_rename(struct kernfs_node *kn, struct kernfs_node *new_parent,
818 const char *new_name_str)
819{
820 struct cgroup *cgrp = kn->priv;
821 int ret;
822
823 if (kernfs_type(kn) != KERNFS_DIR)
824 return -ENOTDIR;
825 if (kn->parent != new_parent)
826 return -EIO;
827
828 /*
829 * We're gonna grab cgroup_mutex which nests outside kernfs
830 * active_ref. kernfs_rename() doesn't require active_ref
831 * protection. Break them before grabbing cgroup_mutex.
832 */
833 kernfs_break_active_protection(new_parent);
834 kernfs_break_active_protection(kn);
835
836 mutex_lock(&cgroup_mutex);
837
838 ret = kernfs_rename(kn, new_parent, new_name_str);
839 if (!ret)
840 TRACE_CGROUP_PATH(rename, cgrp);
841
842 mutex_unlock(&cgroup_mutex);
843
844 kernfs_unbreak_active_protection(kn);
845 kernfs_unbreak_active_protection(new_parent);
846 return ret;
847}
848
849static int cgroup1_show_options(struct seq_file *seq, struct kernfs_root *kf_root)
850{
851 struct cgroup_root *root = cgroup_root_from_kf(kf_root);
852 struct cgroup_subsys *ss;
853 int ssid;
854
855 for_each_subsys(ss, ssid)
856 if (root->subsys_mask & (1 << ssid))
857 seq_show_option(seq, ss->legacy_name, NULL);
858 if (root->flags & CGRP_ROOT_NOPREFIX)
859 seq_puts(seq, ",noprefix");
860 if (root->flags & CGRP_ROOT_XATTR)
861 seq_puts(seq, ",xattr");
862 if (root->flags & CGRP_ROOT_CPUSET_V2_MODE)
863 seq_puts(seq, ",cpuset_v2_mode");
864
865 spin_lock(&release_agent_path_lock);
866 if (strlen(root->release_agent_path))
867 seq_show_option(seq, "release_agent",
868 root->release_agent_path);
869 spin_unlock(&release_agent_path_lock);
870
871 if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags))
872 seq_puts(seq, ",clone_children");
873 if (strlen(root->name))
874 seq_show_option(seq, "name", root->name);
875 return 0;
876}
877
878enum cgroup1_param {
879 Opt_all,
880 Opt_clone_children,
881 Opt_cpuset_v2_mode,
882 Opt_name,
883 Opt_none,
884 Opt_noprefix,
885 Opt_release_agent,
886 Opt_xattr,
887};
888
889static const struct fs_parameter_spec cgroup1_param_specs[] = {
890 fsparam_flag ("all", Opt_all),
891 fsparam_flag ("clone_children", Opt_clone_children),
892 fsparam_flag ("cpuset_v2_mode", Opt_cpuset_v2_mode),
893 fsparam_string("name", Opt_name),
894 fsparam_flag ("none", Opt_none),
895 fsparam_flag ("noprefix", Opt_noprefix),
896 fsparam_string("release_agent", Opt_release_agent),
897 fsparam_flag ("xattr", Opt_xattr),
898 {}
899};
900
901const struct fs_parameter_description cgroup1_fs_parameters = {
902 .name = "cgroup1",
903 .specs = cgroup1_param_specs,
904};
905
906int cgroup1_parse_param(struct fs_context *fc, struct fs_parameter *param)
907{
908 struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
909 struct cgroup_subsys *ss;
910 struct fs_parse_result result;
911 int opt, i;
912
913 opt = fs_parse(fc, &cgroup1_fs_parameters, param, &result);
914 if (opt == -ENOPARAM) {
915 if (strcmp(param->key, "source") == 0) {
916 fc->source = param->string;
917 param->string = NULL;
918 return 0;
919 }
920 for_each_subsys(ss, i) {
921 if (strcmp(param->key, ss->legacy_name))
922 continue;
923 ctx->subsys_mask |= (1 << i);
924 return 0;
925 }
926 return cg_invalf(fc, "cgroup1: Unknown subsys name '%s'", param->key);
927 }
928 if (opt < 0)
929 return opt;
930
931 switch (opt) {
932 case Opt_none:
933 /* Explicitly have no subsystems */
934 ctx->none = true;
935 break;
936 case Opt_all:
937 ctx->all_ss = true;
938 break;
939 case Opt_noprefix:
940 ctx->flags |= CGRP_ROOT_NOPREFIX;
941 break;
942 case Opt_clone_children:
943 ctx->cpuset_clone_children = true;
944 break;
945 case Opt_cpuset_v2_mode:
946 ctx->flags |= CGRP_ROOT_CPUSET_V2_MODE;
947 break;
948 case Opt_xattr:
949 ctx->flags |= CGRP_ROOT_XATTR;
950 break;
951 case Opt_release_agent:
952 /* Specifying two release agents is forbidden */
953 if (ctx->release_agent)
954 return cg_invalf(fc, "cgroup1: release_agent respecified");
955 ctx->release_agent = param->string;
956 param->string = NULL;
957 break;
958 case Opt_name:
959 /* blocked by boot param? */
960 if (cgroup_no_v1_named)
961 return -ENOENT;
962 /* Can't specify an empty name */
963 if (!param->size)
964 return cg_invalf(fc, "cgroup1: Empty name");
965 if (param->size > MAX_CGROUP_ROOT_NAMELEN - 1)
966 return cg_invalf(fc, "cgroup1: Name too long");
967 /* Must match [\w.-]+ */
968 for (i = 0; i < param->size; i++) {
969 char c = param->string[i];
970 if (isalnum(c))
971 continue;
972 if ((c == '.') || (c == '-') || (c == '_'))
973 continue;
974 return cg_invalf(fc, "cgroup1: Invalid name");
975 }
976 /* Specifying two names is forbidden */
977 if (ctx->name)
978 return cg_invalf(fc, "cgroup1: name respecified");
979 ctx->name = param->string;
980 param->string = NULL;
981 break;
982 }
983 return 0;
984}
985
986static int check_cgroupfs_options(struct fs_context *fc)
987{
988 struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
989 u16 mask = U16_MAX;
990 u16 enabled = 0;
991 struct cgroup_subsys *ss;
992 int i;
993
994#ifdef CONFIG_CPUSETS
995 mask = ~((u16)1 << cpuset_cgrp_id);
996#endif
997 for_each_subsys(ss, i)
998 if (cgroup_ssid_enabled(i) && !cgroup1_ssid_disabled(i))
999 enabled |= 1 << i;
1000
1001 ctx->subsys_mask &= enabled;
1002
1003 /*
1004 * In absense of 'none', 'name=' or subsystem name options,
1005 * let's default to 'all'.
1006 */
1007 if (!ctx->subsys_mask && !ctx->none && !ctx->name)
1008 ctx->all_ss = true;
1009
1010 if (ctx->all_ss) {
1011 /* Mutually exclusive option 'all' + subsystem name */
1012 if (ctx->subsys_mask)
1013 return cg_invalf(fc, "cgroup1: subsys name conflicts with all");
1014 /* 'all' => select all the subsystems */
1015 ctx->subsys_mask = enabled;
1016 }
1017
1018 /*
1019 * We either have to specify by name or by subsystems. (So all
1020 * empty hierarchies must have a name).
1021 */
1022 if (!ctx->subsys_mask && !ctx->name)
1023 return cg_invalf(fc, "cgroup1: Need name or subsystem set");
1024
1025 /*
1026 * Option noprefix was introduced just for backward compatibility
1027 * with the old cpuset, so we allow noprefix only if mounting just
1028 * the cpuset subsystem.
1029 */
1030 if ((ctx->flags & CGRP_ROOT_NOPREFIX) && (ctx->subsys_mask & mask))
1031 return cg_invalf(fc, "cgroup1: noprefix used incorrectly");
1032
1033 /* Can't specify "none" and some subsystems */
1034 if (ctx->subsys_mask && ctx->none)
1035 return cg_invalf(fc, "cgroup1: none used incorrectly");
1036
1037 return 0;
1038}
1039
1040int cgroup1_reconfigure(struct fs_context *fc)
1041{
1042 struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
1043 struct kernfs_root *kf_root = kernfs_root_from_sb(fc->root->d_sb);
1044 struct cgroup_root *root = cgroup_root_from_kf(kf_root);
1045 int ret = 0;
1046 u16 added_mask, removed_mask;
1047
1048 cgroup_lock_and_drain_offline(&cgrp_dfl_root.cgrp);
1049
1050 /* See what subsystems are wanted */
1051 ret = check_cgroupfs_options(fc);
1052 if (ret)
1053 goto out_unlock;
1054
1055 if (ctx->subsys_mask != root->subsys_mask || ctx->release_agent)
1056 pr_warn("option changes via remount are deprecated (pid=%d comm=%s)\n",
1057 task_tgid_nr(current), current->comm);
1058
1059 added_mask = ctx->subsys_mask & ~root->subsys_mask;
1060 removed_mask = root->subsys_mask & ~ctx->subsys_mask;
1061
1062 /* Don't allow flags or name to change at remount */
1063 if ((ctx->flags ^ root->flags) ||
1064 (ctx->name && strcmp(ctx->name, root->name))) {
1065 cg_invalf(fc, "option or name mismatch, new: 0x%x \"%s\", old: 0x%x \"%s\"",
1066 ctx->flags, ctx->name ?: "", root->flags, root->name);
1067 ret = -EINVAL;
1068 goto out_unlock;
1069 }
1070
1071 /* remounting is not allowed for populated hierarchies */
1072 if (!list_empty(&root->cgrp.self.children)) {
1073 ret = -EBUSY;
1074 goto out_unlock;
1075 }
1076
1077 ret = rebind_subsystems(root, added_mask);
1078 if (ret)
1079 goto out_unlock;
1080
1081 WARN_ON(rebind_subsystems(&cgrp_dfl_root, removed_mask));
1082
1083 if (ctx->release_agent) {
1084 spin_lock(&release_agent_path_lock);
1085 strcpy(root->release_agent_path, ctx->release_agent);
1086 spin_unlock(&release_agent_path_lock);
1087 }
1088
1089 trace_cgroup_remount(root);
1090
1091 out_unlock:
1092 mutex_unlock(&cgroup_mutex);
1093 return ret;
1094}
1095
1096struct kernfs_syscall_ops cgroup1_kf_syscall_ops = {
1097 .rename = cgroup1_rename,
1098 .show_options = cgroup1_show_options,
1099 .mkdir = cgroup_mkdir,
1100 .rmdir = cgroup_rmdir,
1101 .show_path = cgroup_show_path,
1102};
1103
1104/*
1105 * The guts of cgroup1 mount - find or create cgroup_root to use.
1106 * Called with cgroup_mutex held; returns 0 on success, -E... on
1107 * error and positive - in case when the candidate is busy dying.
1108 * On success it stashes a reference to cgroup_root into given
1109 * cgroup_fs_context; that reference is *NOT* counting towards the
1110 * cgroup_root refcount.
1111 */
1112static int cgroup1_root_to_use(struct fs_context *fc)
1113{
1114 struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
1115 struct cgroup_root *root;
1116 struct cgroup_subsys *ss;
1117 int i, ret;
1118
1119 /* First find the desired set of subsystems */
1120 ret = check_cgroupfs_options(fc);
1121 if (ret)
1122 return ret;
1123
1124 /*
1125 * Destruction of cgroup root is asynchronous, so subsystems may
1126 * still be dying after the previous unmount. Let's drain the
1127 * dying subsystems. We just need to ensure that the ones
1128 * unmounted previously finish dying and don't care about new ones
1129 * starting. Testing ref liveliness is good enough.
1130 */
1131 for_each_subsys(ss, i) {
1132 if (!(ctx->subsys_mask & (1 << i)) ||
1133 ss->root == &cgrp_dfl_root)
1134 continue;
1135
1136 if (!percpu_ref_tryget_live(&ss->root->cgrp.self.refcnt))
1137 return 1; /* restart */
1138 cgroup_put(&ss->root->cgrp);
1139 }
1140
1141 for_each_root(root) {
1142 bool name_match = false;
1143
1144 if (root == &cgrp_dfl_root)
1145 continue;
1146
1147 /*
1148 * If we asked for a name then it must match. Also, if
1149 * name matches but sybsys_mask doesn't, we should fail.
1150 * Remember whether name matched.
1151 */
1152 if (ctx->name) {
1153 if (strcmp(ctx->name, root->name))
1154 continue;
1155 name_match = true;
1156 }
1157
1158 /*
1159 * If we asked for subsystems (or explicitly for no
1160 * subsystems) then they must match.
1161 */
1162 if ((ctx->subsys_mask || ctx->none) &&
1163 (ctx->subsys_mask != root->subsys_mask)) {
1164 if (!name_match)
1165 continue;
1166 return -EBUSY;
1167 }
1168
1169 if (root->flags ^ ctx->flags)
1170 pr_warn("new mount options do not match the existing superblock, will be ignored\n");
1171
1172 ctx->root = root;
1173 return 0;
1174 }
1175
1176 /*
1177 * No such thing, create a new one. name= matching without subsys
1178 * specification is allowed for already existing hierarchies but we
1179 * can't create new one without subsys specification.
1180 */
1181 if (!ctx->subsys_mask && !ctx->none)
1182 return cg_invalf(fc, "cgroup1: No subsys list or none specified");
1183
1184 /* Hierarchies may only be created in the initial cgroup namespace. */
1185 if (ctx->ns != &init_cgroup_ns)
1186 return -EPERM;
1187
1188 root = kzalloc(sizeof(*root), GFP_KERNEL);
1189 if (!root)
1190 return -ENOMEM;
1191
1192 ctx->root = root;
1193 init_cgroup_root(ctx);
1194
1195 ret = cgroup_setup_root(root, ctx->subsys_mask);
1196 if (ret)
1197 cgroup_free_root(root);
1198 return ret;
1199}
1200
1201int cgroup1_get_tree(struct fs_context *fc)
1202{
1203 struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
1204 int ret;
1205
1206 /* Check if the caller has permission to mount. */
1207 if (!ns_capable(ctx->ns->user_ns, CAP_SYS_ADMIN))
1208 return -EPERM;
1209
1210 cgroup_lock_and_drain_offline(&cgrp_dfl_root.cgrp);
1211
1212 ret = cgroup1_root_to_use(fc);
1213 if (!ret && !percpu_ref_tryget_live(&ctx->root->cgrp.self.refcnt))
1214 ret = 1; /* restart */
1215
1216 mutex_unlock(&cgroup_mutex);
1217
1218 if (!ret)
1219 ret = cgroup_do_get_tree(fc);
1220
1221 if (!ret && percpu_ref_is_dying(&ctx->root->cgrp.self.refcnt)) {
1222 struct super_block *sb = fc->root->d_sb;
1223 dput(fc->root);
1224 deactivate_locked_super(sb);
1225 ret = 1;
1226 }
1227
1228 if (unlikely(ret > 0)) {
1229 msleep(10);
1230 return restart_syscall();
1231 }
1232 return ret;
1233}
1234
1235static int __init cgroup1_wq_init(void)
1236{
1237 /*
1238 * Used to destroy pidlists and separate to serve as flush domain.
1239 * Cap @max_active to 1 too.
1240 */
1241 cgroup_pidlist_destroy_wq = alloc_workqueue("cgroup_pidlist_destroy",
1242 0, 1);
1243 BUG_ON(!cgroup_pidlist_destroy_wq);
1244 return 0;
1245}
1246core_initcall(cgroup1_wq_init);
1247
1248static int __init cgroup_no_v1(char *str)
1249{
1250 struct cgroup_subsys *ss;
1251 char *token;
1252 int i;
1253
1254 while ((token = strsep(&str, ",")) != NULL) {
1255 if (!*token)
1256 continue;
1257
1258 if (!strcmp(token, "all")) {
1259 cgroup_no_v1_mask = U16_MAX;
1260 continue;
1261 }
1262
1263 if (!strcmp(token, "named")) {
1264 cgroup_no_v1_named = true;
1265 continue;
1266 }
1267
1268 for_each_subsys(ss, i) {
1269 if (strcmp(token, ss->name) &&
1270 strcmp(token, ss->legacy_name))
1271 continue;
1272
1273 cgroup_no_v1_mask |= 1 << i;
1274 }
1275 }
1276 return 1;
1277}
1278__setup("cgroup_no_v1=", cgroup_no_v1);