Loading...
1/*
2 * cgroup_freezer.c - control group freezer subsystem
3 *
4 * Copyright IBM Corporation, 2007
5 *
6 * Author : Cedric Le Goater <clg@fr.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2.1 of the GNU Lesser General Public License
10 * as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it would be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
15 */
16
17#include <linux/export.h>
18#include <linux/slab.h>
19#include <linux/cgroup.h>
20#include <linux/fs.h>
21#include <linux/uaccess.h>
22#include <linux/freezer.h>
23#include <linux/seq_file.h>
24#include <linux/mutex.h>
25
26/*
27 * A cgroup is freezing if any FREEZING flags are set. FREEZING_SELF is
28 * set if "FROZEN" is written to freezer.state cgroupfs file, and cleared
29 * for "THAWED". FREEZING_PARENT is set if the parent freezer is FREEZING
30 * for whatever reason. IOW, a cgroup has FREEZING_PARENT set if one of
31 * its ancestors has FREEZING_SELF set.
32 */
33enum freezer_state_flags {
34 CGROUP_FREEZER_ONLINE = (1 << 0), /* freezer is fully online */
35 CGROUP_FREEZING_SELF = (1 << 1), /* this freezer is freezing */
36 CGROUP_FREEZING_PARENT = (1 << 2), /* the parent freezer is freezing */
37 CGROUP_FROZEN = (1 << 3), /* this and its descendants frozen */
38
39 /* mask for all FREEZING flags */
40 CGROUP_FREEZING = CGROUP_FREEZING_SELF | CGROUP_FREEZING_PARENT,
41};
42
43struct freezer {
44 struct cgroup_subsys_state css;
45 unsigned int state;
46};
47
48static DEFINE_MUTEX(freezer_mutex);
49
50static inline struct freezer *css_freezer(struct cgroup_subsys_state *css)
51{
52 return css ? container_of(css, struct freezer, css) : NULL;
53}
54
55static inline struct freezer *task_freezer(struct task_struct *task)
56{
57 return css_freezer(task_css(task, freezer_cgrp_id));
58}
59
60static struct freezer *parent_freezer(struct freezer *freezer)
61{
62 return css_freezer(freezer->css.parent);
63}
64
65bool cgroup_freezing(struct task_struct *task)
66{
67 bool ret;
68
69 rcu_read_lock();
70 ret = task_freezer(task)->state & CGROUP_FREEZING;
71 rcu_read_unlock();
72
73 return ret;
74}
75
76static const char *freezer_state_strs(unsigned int state)
77{
78 if (state & CGROUP_FROZEN)
79 return "FROZEN";
80 if (state & CGROUP_FREEZING)
81 return "FREEZING";
82 return "THAWED";
83};
84
85static struct cgroup_subsys_state *
86freezer_css_alloc(struct cgroup_subsys_state *parent_css)
87{
88 struct freezer *freezer;
89
90 freezer = kzalloc(sizeof(struct freezer), GFP_KERNEL);
91 if (!freezer)
92 return ERR_PTR(-ENOMEM);
93
94 return &freezer->css;
95}
96
97/**
98 * freezer_css_online - commit creation of a freezer css
99 * @css: css being created
100 *
101 * We're committing to creation of @css. Mark it online and inherit
102 * parent's freezing state while holding both parent's and our
103 * freezer->lock.
104 */
105static int freezer_css_online(struct cgroup_subsys_state *css)
106{
107 struct freezer *freezer = css_freezer(css);
108 struct freezer *parent = parent_freezer(freezer);
109
110 mutex_lock(&freezer_mutex);
111
112 freezer->state |= CGROUP_FREEZER_ONLINE;
113
114 if (parent && (parent->state & CGROUP_FREEZING)) {
115 freezer->state |= CGROUP_FREEZING_PARENT | CGROUP_FROZEN;
116 atomic_inc(&system_freezing_cnt);
117 }
118
119 mutex_unlock(&freezer_mutex);
120 return 0;
121}
122
123/**
124 * freezer_css_offline - initiate destruction of a freezer css
125 * @css: css being destroyed
126 *
127 * @css is going away. Mark it dead and decrement system_freezing_count if
128 * it was holding one.
129 */
130static void freezer_css_offline(struct cgroup_subsys_state *css)
131{
132 struct freezer *freezer = css_freezer(css);
133
134 mutex_lock(&freezer_mutex);
135
136 if (freezer->state & CGROUP_FREEZING)
137 atomic_dec(&system_freezing_cnt);
138
139 freezer->state = 0;
140
141 mutex_unlock(&freezer_mutex);
142}
143
144static void freezer_css_free(struct cgroup_subsys_state *css)
145{
146 kfree(css_freezer(css));
147}
148
149/*
150 * Tasks can be migrated into a different freezer anytime regardless of its
151 * current state. freezer_attach() is responsible for making new tasks
152 * conform to the current state.
153 *
154 * Freezer state changes and task migration are synchronized via
155 * @freezer->lock. freezer_attach() makes the new tasks conform to the
156 * current state and all following state changes can see the new tasks.
157 */
158static void freezer_attach(struct cgroup_taskset *tset)
159{
160 struct task_struct *task;
161 struct cgroup_subsys_state *new_css;
162
163 mutex_lock(&freezer_mutex);
164
165 /*
166 * Make the new tasks conform to the current state of @new_css.
167 * For simplicity, when migrating any task to a FROZEN cgroup, we
168 * revert it to FREEZING and let update_if_frozen() determine the
169 * correct state later.
170 *
171 * Tasks in @tset are on @new_css but may not conform to its
172 * current state before executing the following - !frozen tasks may
173 * be visible in a FROZEN cgroup and frozen tasks in a THAWED one.
174 */
175 cgroup_taskset_for_each(task, new_css, tset) {
176 struct freezer *freezer = css_freezer(new_css);
177
178 if (!(freezer->state & CGROUP_FREEZING)) {
179 __thaw_task(task);
180 } else {
181 freeze_task(task);
182 /* clear FROZEN and propagate upwards */
183 while (freezer && (freezer->state & CGROUP_FROZEN)) {
184 freezer->state &= ~CGROUP_FROZEN;
185 freezer = parent_freezer(freezer);
186 }
187 }
188 }
189
190 mutex_unlock(&freezer_mutex);
191}
192
193/**
194 * freezer_fork - cgroup post fork callback
195 * @task: a task which has just been forked
196 *
197 * @task has just been created and should conform to the current state of
198 * the cgroup_freezer it belongs to. This function may race against
199 * freezer_attach(). Losing to freezer_attach() means that we don't have
200 * to do anything as freezer_attach() will put @task into the appropriate
201 * state.
202 */
203static void freezer_fork(struct task_struct *task)
204{
205 struct freezer *freezer;
206
207 /*
208 * The root cgroup is non-freezable, so we can skip locking the
209 * freezer. This is safe regardless of race with task migration.
210 * If we didn't race or won, skipping is obviously the right thing
211 * to do. If we lost and root is the new cgroup, noop is still the
212 * right thing to do.
213 */
214 if (task_css_is_root(task, freezer_cgrp_id))
215 return;
216
217 mutex_lock(&freezer_mutex);
218 rcu_read_lock();
219
220 freezer = task_freezer(task);
221 if (freezer->state & CGROUP_FREEZING)
222 freeze_task(task);
223
224 rcu_read_unlock();
225 mutex_unlock(&freezer_mutex);
226}
227
228/**
229 * update_if_frozen - update whether a cgroup finished freezing
230 * @css: css of interest
231 *
232 * Once FREEZING is initiated, transition to FROZEN is lazily updated by
233 * calling this function. If the current state is FREEZING but not FROZEN,
234 * this function checks whether all tasks of this cgroup and the descendant
235 * cgroups finished freezing and, if so, sets FROZEN.
236 *
237 * The caller is responsible for grabbing RCU read lock and calling
238 * update_if_frozen() on all descendants prior to invoking this function.
239 *
240 * Task states and freezer state might disagree while tasks are being
241 * migrated into or out of @css, so we can't verify task states against
242 * @freezer state here. See freezer_attach() for details.
243 */
244static void update_if_frozen(struct cgroup_subsys_state *css)
245{
246 struct freezer *freezer = css_freezer(css);
247 struct cgroup_subsys_state *pos;
248 struct css_task_iter it;
249 struct task_struct *task;
250
251 lockdep_assert_held(&freezer_mutex);
252
253 if (!(freezer->state & CGROUP_FREEZING) ||
254 (freezer->state & CGROUP_FROZEN))
255 return;
256
257 /* are all (live) children frozen? */
258 rcu_read_lock();
259 css_for_each_child(pos, css) {
260 struct freezer *child = css_freezer(pos);
261
262 if ((child->state & CGROUP_FREEZER_ONLINE) &&
263 !(child->state & CGROUP_FROZEN)) {
264 rcu_read_unlock();
265 return;
266 }
267 }
268 rcu_read_unlock();
269
270 /* are all tasks frozen? */
271 css_task_iter_start(css, 0, &it);
272
273 while ((task = css_task_iter_next(&it))) {
274 if (freezing(task)) {
275 /*
276 * freezer_should_skip() indicates that the task
277 * should be skipped when determining freezing
278 * completion. Consider it frozen in addition to
279 * the usual frozen condition.
280 */
281 if (!frozen(task) && !freezer_should_skip(task))
282 goto out_iter_end;
283 }
284 }
285
286 freezer->state |= CGROUP_FROZEN;
287out_iter_end:
288 css_task_iter_end(&it);
289}
290
291static int freezer_read(struct seq_file *m, void *v)
292{
293 struct cgroup_subsys_state *css = seq_css(m), *pos;
294
295 mutex_lock(&freezer_mutex);
296 rcu_read_lock();
297
298 /* update states bottom-up */
299 css_for_each_descendant_post(pos, css) {
300 if (!css_tryget_online(pos))
301 continue;
302 rcu_read_unlock();
303
304 update_if_frozen(pos);
305
306 rcu_read_lock();
307 css_put(pos);
308 }
309
310 rcu_read_unlock();
311 mutex_unlock(&freezer_mutex);
312
313 seq_puts(m, freezer_state_strs(css_freezer(css)->state));
314 seq_putc(m, '\n');
315 return 0;
316}
317
318static void freeze_cgroup(struct freezer *freezer)
319{
320 struct css_task_iter it;
321 struct task_struct *task;
322
323 css_task_iter_start(&freezer->css, 0, &it);
324 while ((task = css_task_iter_next(&it)))
325 freeze_task(task);
326 css_task_iter_end(&it);
327}
328
329static void unfreeze_cgroup(struct freezer *freezer)
330{
331 struct css_task_iter it;
332 struct task_struct *task;
333
334 css_task_iter_start(&freezer->css, 0, &it);
335 while ((task = css_task_iter_next(&it)))
336 __thaw_task(task);
337 css_task_iter_end(&it);
338}
339
340/**
341 * freezer_apply_state - apply state change to a single cgroup_freezer
342 * @freezer: freezer to apply state change to
343 * @freeze: whether to freeze or unfreeze
344 * @state: CGROUP_FREEZING_* flag to set or clear
345 *
346 * Set or clear @state on @cgroup according to @freeze, and perform
347 * freezing or thawing as necessary.
348 */
349static void freezer_apply_state(struct freezer *freezer, bool freeze,
350 unsigned int state)
351{
352 /* also synchronizes against task migration, see freezer_attach() */
353 lockdep_assert_held(&freezer_mutex);
354
355 if (!(freezer->state & CGROUP_FREEZER_ONLINE))
356 return;
357
358 if (freeze) {
359 if (!(freezer->state & CGROUP_FREEZING))
360 atomic_inc(&system_freezing_cnt);
361 freezer->state |= state;
362 freeze_cgroup(freezer);
363 } else {
364 bool was_freezing = freezer->state & CGROUP_FREEZING;
365
366 freezer->state &= ~state;
367
368 if (!(freezer->state & CGROUP_FREEZING)) {
369 if (was_freezing)
370 atomic_dec(&system_freezing_cnt);
371 freezer->state &= ~CGROUP_FROZEN;
372 unfreeze_cgroup(freezer);
373 }
374 }
375}
376
377/**
378 * freezer_change_state - change the freezing state of a cgroup_freezer
379 * @freezer: freezer of interest
380 * @freeze: whether to freeze or thaw
381 *
382 * Freeze or thaw @freezer according to @freeze. The operations are
383 * recursive - all descendants of @freezer will be affected.
384 */
385static void freezer_change_state(struct freezer *freezer, bool freeze)
386{
387 struct cgroup_subsys_state *pos;
388
389 /*
390 * Update all its descendants in pre-order traversal. Each
391 * descendant will try to inherit its parent's FREEZING state as
392 * CGROUP_FREEZING_PARENT.
393 */
394 mutex_lock(&freezer_mutex);
395 rcu_read_lock();
396 css_for_each_descendant_pre(pos, &freezer->css) {
397 struct freezer *pos_f = css_freezer(pos);
398 struct freezer *parent = parent_freezer(pos_f);
399
400 if (!css_tryget_online(pos))
401 continue;
402 rcu_read_unlock();
403
404 if (pos_f == freezer)
405 freezer_apply_state(pos_f, freeze,
406 CGROUP_FREEZING_SELF);
407 else
408 freezer_apply_state(pos_f,
409 parent->state & CGROUP_FREEZING,
410 CGROUP_FREEZING_PARENT);
411
412 rcu_read_lock();
413 css_put(pos);
414 }
415 rcu_read_unlock();
416 mutex_unlock(&freezer_mutex);
417}
418
419static ssize_t freezer_write(struct kernfs_open_file *of,
420 char *buf, size_t nbytes, loff_t off)
421{
422 bool freeze;
423
424 buf = strstrip(buf);
425
426 if (strcmp(buf, freezer_state_strs(0)) == 0)
427 freeze = false;
428 else if (strcmp(buf, freezer_state_strs(CGROUP_FROZEN)) == 0)
429 freeze = true;
430 else
431 return -EINVAL;
432
433 freezer_change_state(css_freezer(of_css(of)), freeze);
434 return nbytes;
435}
436
437static u64 freezer_self_freezing_read(struct cgroup_subsys_state *css,
438 struct cftype *cft)
439{
440 struct freezer *freezer = css_freezer(css);
441
442 return (bool)(freezer->state & CGROUP_FREEZING_SELF);
443}
444
445static u64 freezer_parent_freezing_read(struct cgroup_subsys_state *css,
446 struct cftype *cft)
447{
448 struct freezer *freezer = css_freezer(css);
449
450 return (bool)(freezer->state & CGROUP_FREEZING_PARENT);
451}
452
453static struct cftype files[] = {
454 {
455 .name = "state",
456 .flags = CFTYPE_NOT_ON_ROOT,
457 .seq_show = freezer_read,
458 .write = freezer_write,
459 },
460 {
461 .name = "self_freezing",
462 .flags = CFTYPE_NOT_ON_ROOT,
463 .read_u64 = freezer_self_freezing_read,
464 },
465 {
466 .name = "parent_freezing",
467 .flags = CFTYPE_NOT_ON_ROOT,
468 .read_u64 = freezer_parent_freezing_read,
469 },
470 { } /* terminate */
471};
472
473struct cgroup_subsys freezer_cgrp_subsys = {
474 .css_alloc = freezer_css_alloc,
475 .css_online = freezer_css_online,
476 .css_offline = freezer_css_offline,
477 .css_free = freezer_css_free,
478 .attach = freezer_attach,
479 .fork = freezer_fork,
480 .legacy_cftypes = files,
481};
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/cgroup.h>
3#include <linux/sched.h>
4#include <linux/sched/task.h>
5#include <linux/sched/signal.h>
6
7#include "cgroup-internal.h"
8
9#include <trace/events/cgroup.h>
10
11/*
12 * Propagate the cgroup frozen state upwards by the cgroup tree.
13 */
14static void cgroup_propagate_frozen(struct cgroup *cgrp, bool frozen)
15{
16 int desc = 1;
17
18 /*
19 * If the new state is frozen, some freezing ancestor cgroups may change
20 * their state too, depending on if all their descendants are frozen.
21 *
22 * Otherwise, all ancestor cgroups are forced into the non-frozen state.
23 */
24 while ((cgrp = cgroup_parent(cgrp))) {
25 if (frozen) {
26 cgrp->freezer.nr_frozen_descendants += desc;
27 if (!test_bit(CGRP_FROZEN, &cgrp->flags) &&
28 test_bit(CGRP_FREEZE, &cgrp->flags) &&
29 cgrp->freezer.nr_frozen_descendants ==
30 cgrp->nr_descendants) {
31 set_bit(CGRP_FROZEN, &cgrp->flags);
32 cgroup_file_notify(&cgrp->events_file);
33 TRACE_CGROUP_PATH(notify_frozen, cgrp, 1);
34 desc++;
35 }
36 } else {
37 cgrp->freezer.nr_frozen_descendants -= desc;
38 if (test_bit(CGRP_FROZEN, &cgrp->flags)) {
39 clear_bit(CGRP_FROZEN, &cgrp->flags);
40 cgroup_file_notify(&cgrp->events_file);
41 TRACE_CGROUP_PATH(notify_frozen, cgrp, 0);
42 desc++;
43 }
44 }
45 }
46}
47
48/*
49 * Revisit the cgroup frozen state.
50 * Checks if the cgroup is really frozen and perform all state transitions.
51 */
52void cgroup_update_frozen(struct cgroup *cgrp)
53{
54 bool frozen;
55
56 lockdep_assert_held(&css_set_lock);
57
58 /*
59 * If the cgroup has to be frozen (CGRP_FREEZE bit set),
60 * and all tasks are frozen and/or stopped, let's consider
61 * the cgroup frozen. Otherwise it's not frozen.
62 */
63 frozen = test_bit(CGRP_FREEZE, &cgrp->flags) &&
64 cgrp->freezer.nr_frozen_tasks == __cgroup_task_count(cgrp);
65
66 if (frozen) {
67 /* Already there? */
68 if (test_bit(CGRP_FROZEN, &cgrp->flags))
69 return;
70
71 set_bit(CGRP_FROZEN, &cgrp->flags);
72 } else {
73 /* Already there? */
74 if (!test_bit(CGRP_FROZEN, &cgrp->flags))
75 return;
76
77 clear_bit(CGRP_FROZEN, &cgrp->flags);
78 }
79 cgroup_file_notify(&cgrp->events_file);
80 TRACE_CGROUP_PATH(notify_frozen, cgrp, frozen);
81
82 /* Update the state of ancestor cgroups. */
83 cgroup_propagate_frozen(cgrp, frozen);
84}
85
86/*
87 * Increment cgroup's nr_frozen_tasks.
88 */
89static void cgroup_inc_frozen_cnt(struct cgroup *cgrp)
90{
91 cgrp->freezer.nr_frozen_tasks++;
92}
93
94/*
95 * Decrement cgroup's nr_frozen_tasks.
96 */
97static void cgroup_dec_frozen_cnt(struct cgroup *cgrp)
98{
99 cgrp->freezer.nr_frozen_tasks--;
100 WARN_ON_ONCE(cgrp->freezer.nr_frozen_tasks < 0);
101}
102
103/*
104 * Enter frozen/stopped state, if not yet there. Update cgroup's counters,
105 * and revisit the state of the cgroup, if necessary.
106 */
107void cgroup_enter_frozen(void)
108{
109 struct cgroup *cgrp;
110
111 if (current->frozen)
112 return;
113
114 spin_lock_irq(&css_set_lock);
115 current->frozen = true;
116 cgrp = task_dfl_cgroup(current);
117 cgroup_inc_frozen_cnt(cgrp);
118 cgroup_update_frozen(cgrp);
119 spin_unlock_irq(&css_set_lock);
120}
121
122/*
123 * Conditionally leave frozen/stopped state. Update cgroup's counters,
124 * and revisit the state of the cgroup, if necessary.
125 *
126 * If always_leave is not set, and the cgroup is freezing,
127 * we're racing with the cgroup freezing. In this case, we don't
128 * drop the frozen counter to avoid a transient switch to
129 * the unfrozen state.
130 */
131void cgroup_leave_frozen(bool always_leave)
132{
133 struct cgroup *cgrp;
134
135 spin_lock_irq(&css_set_lock);
136 cgrp = task_dfl_cgroup(current);
137 if (always_leave || !test_bit(CGRP_FREEZE, &cgrp->flags)) {
138 cgroup_dec_frozen_cnt(cgrp);
139 cgroup_update_frozen(cgrp);
140 WARN_ON_ONCE(!current->frozen);
141 current->frozen = false;
142 } else if (!(current->jobctl & JOBCTL_TRAP_FREEZE)) {
143 spin_lock(¤t->sighand->siglock);
144 current->jobctl |= JOBCTL_TRAP_FREEZE;
145 set_thread_flag(TIF_SIGPENDING);
146 spin_unlock(¤t->sighand->siglock);
147 }
148 spin_unlock_irq(&css_set_lock);
149}
150
151/*
152 * Freeze or unfreeze the task by setting or clearing the JOBCTL_TRAP_FREEZE
153 * jobctl bit.
154 */
155static void cgroup_freeze_task(struct task_struct *task, bool freeze)
156{
157 unsigned long flags;
158
159 /* If the task is about to die, don't bother with freezing it. */
160 if (!lock_task_sighand(task, &flags))
161 return;
162
163 if (freeze) {
164 task->jobctl |= JOBCTL_TRAP_FREEZE;
165 signal_wake_up(task, false);
166 } else {
167 task->jobctl &= ~JOBCTL_TRAP_FREEZE;
168 wake_up_process(task);
169 }
170
171 unlock_task_sighand(task, &flags);
172}
173
174/*
175 * Freeze or unfreeze all tasks in the given cgroup.
176 */
177static void cgroup_do_freeze(struct cgroup *cgrp, bool freeze)
178{
179 struct css_task_iter it;
180 struct task_struct *task;
181
182 lockdep_assert_held(&cgroup_mutex);
183
184 spin_lock_irq(&css_set_lock);
185 if (freeze)
186 set_bit(CGRP_FREEZE, &cgrp->flags);
187 else
188 clear_bit(CGRP_FREEZE, &cgrp->flags);
189 spin_unlock_irq(&css_set_lock);
190
191 if (freeze)
192 TRACE_CGROUP_PATH(freeze, cgrp);
193 else
194 TRACE_CGROUP_PATH(unfreeze, cgrp);
195
196 css_task_iter_start(&cgrp->self, 0, &it);
197 while ((task = css_task_iter_next(&it))) {
198 /*
199 * Ignore kernel threads here. Freezing cgroups containing
200 * kthreads isn't supported.
201 */
202 if (task->flags & PF_KTHREAD)
203 continue;
204 cgroup_freeze_task(task, freeze);
205 }
206 css_task_iter_end(&it);
207
208 /*
209 * Cgroup state should be revisited here to cover empty leaf cgroups
210 * and cgroups which descendants are already in the desired state.
211 */
212 spin_lock_irq(&css_set_lock);
213 if (cgrp->nr_descendants == cgrp->freezer.nr_frozen_descendants)
214 cgroup_update_frozen(cgrp);
215 spin_unlock_irq(&css_set_lock);
216}
217
218/*
219 * Adjust the task state (freeze or unfreeze) and revisit the state of
220 * source and destination cgroups.
221 */
222void cgroup_freezer_migrate_task(struct task_struct *task,
223 struct cgroup *src, struct cgroup *dst)
224{
225 lockdep_assert_held(&css_set_lock);
226
227 /*
228 * Kernel threads are not supposed to be frozen at all.
229 */
230 if (task->flags & PF_KTHREAD)
231 return;
232
233 /*
234 * It's not necessary to do changes if both of the src and dst cgroups
235 * are not freezing and task is not frozen.
236 */
237 if (!test_bit(CGRP_FREEZE, &src->flags) &&
238 !test_bit(CGRP_FREEZE, &dst->flags) &&
239 !task->frozen)
240 return;
241
242 /*
243 * Adjust counters of freezing and frozen tasks.
244 * Note, that if the task is frozen, but the destination cgroup is not
245 * frozen, we bump both counters to keep them balanced.
246 */
247 if (task->frozen) {
248 cgroup_inc_frozen_cnt(dst);
249 cgroup_dec_frozen_cnt(src);
250 }
251 cgroup_update_frozen(dst);
252 cgroup_update_frozen(src);
253
254 /*
255 * Force the task to the desired state.
256 */
257 cgroup_freeze_task(task, test_bit(CGRP_FREEZE, &dst->flags));
258}
259
260void cgroup_freeze(struct cgroup *cgrp, bool freeze)
261{
262 struct cgroup_subsys_state *css;
263 struct cgroup *dsct;
264 bool applied = false;
265
266 lockdep_assert_held(&cgroup_mutex);
267
268 /*
269 * Nothing changed? Just exit.
270 */
271 if (cgrp->freezer.freeze == freeze)
272 return;
273
274 cgrp->freezer.freeze = freeze;
275
276 /*
277 * Propagate changes downwards the cgroup tree.
278 */
279 css_for_each_descendant_pre(css, &cgrp->self) {
280 dsct = css->cgroup;
281
282 if (cgroup_is_dead(dsct))
283 continue;
284
285 if (freeze) {
286 dsct->freezer.e_freeze++;
287 /*
288 * Already frozen because of ancestor's settings?
289 */
290 if (dsct->freezer.e_freeze > 1)
291 continue;
292 } else {
293 dsct->freezer.e_freeze--;
294 /*
295 * Still frozen because of ancestor's settings?
296 */
297 if (dsct->freezer.e_freeze > 0)
298 continue;
299
300 WARN_ON_ONCE(dsct->freezer.e_freeze < 0);
301 }
302
303 /*
304 * Do change actual state: freeze or unfreeze.
305 */
306 cgroup_do_freeze(dsct, freeze);
307 applied = true;
308 }
309
310 /*
311 * Even if the actual state hasn't changed, let's notify a user.
312 * The state can be enforced by an ancestor cgroup: the cgroup
313 * can already be in the desired state or it can be locked in the
314 * opposite state, so that the transition will never happen.
315 * In both cases it's better to notify a user, that there is
316 * nothing to wait for.
317 */
318 if (!applied) {
319 TRACE_CGROUP_PATH(notify_frozen, cgrp,
320 test_bit(CGRP_FROZEN, &cgrp->flags));
321 cgroup_file_notify(&cgrp->events_file);
322 }
323}