Linux Audio

Check our new training course

In-person Linux kernel drivers training

Jun 16-20, 2025
Register
Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2#include <linux/cgroup.h>
  3#include <linux/sched.h>
  4#include <linux/sched/task.h>
  5#include <linux/sched/signal.h>
  6
  7#include "cgroup-internal.h"
 
 
 
 
 
 
 
 
  8
  9#include <trace/events/cgroup.h>
 
 
 
 
 
 
 
 10
 11/*
 12 * Update CGRP_FROZEN of cgroup.flag
 13 * Return true if flags is updated; false if flags has no change
 
 
 
 14 */
 15static bool cgroup_update_frozen_flag(struct cgroup *cgrp, bool frozen)
 16{
 17	lockdep_assert_held(&css_set_lock);
 
 
 18
 19	/* Already there? */
 20	if (test_bit(CGRP_FROZEN, &cgrp->flags) == frozen)
 21		return false;
 22
 23	if (frozen)
 24		set_bit(CGRP_FROZEN, &cgrp->flags);
 25	else
 26		clear_bit(CGRP_FROZEN, &cgrp->flags);
 27
 28	cgroup_file_notify(&cgrp->events_file);
 29	TRACE_CGROUP_PATH(notify_frozen, cgrp, frozen);
 30	return true;
 31}
 32
 33/*
 34 * Propagate the cgroup frozen state upwards by the cgroup tree.
 35 */
 36static void cgroup_propagate_frozen(struct cgroup *cgrp, bool frozen)
 37{
 38	int desc = 1;
 
 39
 40	/*
 41	 * If the new state is frozen, some freezing ancestor cgroups may change
 42	 * their state too, depending on if all their descendants are frozen.
 43	 *
 44	 * Otherwise, all ancestor cgroups are forced into the non-frozen state.
 45	 */
 46	while ((cgrp = cgroup_parent(cgrp))) {
 47		if (frozen) {
 48			cgrp->freezer.nr_frozen_descendants += desc;
 49			if (!test_bit(CGRP_FREEZE, &cgrp->flags) ||
 50			    (cgrp->freezer.nr_frozen_descendants !=
 51			    cgrp->nr_descendants))
 52				continue;
 53		} else {
 54			cgrp->freezer.nr_frozen_descendants -= desc;
 55		}
 56
 57		if (cgroup_update_frozen_flag(cgrp, frozen))
 58			desc++;
 59	}
 60}
 61
 62/*
 63 * Revisit the cgroup frozen state.
 64 * Checks if the cgroup is really frozen and perform all state transitions.
 65 */
 66void cgroup_update_frozen(struct cgroup *cgrp)
 67{
 68	bool frozen;
 69
 70	/*
 71	 * If the cgroup has to be frozen (CGRP_FREEZE bit set),
 72	 * and all tasks are frozen and/or stopped, let's consider
 73	 * the cgroup frozen. Otherwise it's not frozen.
 74	 */
 75	frozen = test_bit(CGRP_FREEZE, &cgrp->flags) &&
 76		cgrp->freezer.nr_frozen_tasks == __cgroup_task_count(cgrp);
 77
 78	/* If flags is updated, update the state of ancestor cgroups. */
 79	if (cgroup_update_frozen_flag(cgrp, frozen))
 80		cgroup_propagate_frozen(cgrp, frozen);
 81}
 82
 83/*
 84 * Increment cgroup's nr_frozen_tasks.
 85 */
 86static void cgroup_inc_frozen_cnt(struct cgroup *cgrp)
 
 
 
 
 
 
 
 87{
 88	cgrp->freezer.nr_frozen_tasks++;
 
 
 
 
 
 
 89}
 90
 91/*
 92 * Decrement cgroup's nr_frozen_tasks.
 
 
 
 
 
 93 */
 94static void cgroup_dec_frozen_cnt(struct cgroup *cgrp)
 95{
 96	cgrp->freezer.nr_frozen_tasks--;
 97	WARN_ON_ONCE(cgrp->freezer.nr_frozen_tasks < 0);
 
 
 
 
 
 
 
 
 
 
 
 
 98}
 99
100/*
101 * Enter frozen/stopped state, if not yet there. Update cgroup's counters,
102 * and revisit the state of the cgroup, if necessary.
 
 
 
103 */
104void cgroup_enter_frozen(void)
105{
106	struct cgroup *cgrp;
107
108	if (current->frozen)
109		return;
110
111	spin_lock_irq(&css_set_lock);
112	current->frozen = true;
113	cgrp = task_dfl_cgroup(current);
114	cgroup_inc_frozen_cnt(cgrp);
115	cgroup_update_frozen(cgrp);
116	spin_unlock_irq(&css_set_lock);
 
 
 
 
 
117}
118
119/*
120 * Conditionally leave frozen/stopped state. Update cgroup's counters,
121 * and revisit the state of the cgroup, if necessary.
 
122 *
123 * If always_leave is not set, and the cgroup is freezing,
124 * we're racing with the cgroup freezing. In this case, we don't
125 * drop the frozen counter to avoid a transient switch to
126 * the unfrozen state.
127 */
128void cgroup_leave_frozen(bool always_leave)
129{
130	struct cgroup *cgrp;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
131
132	spin_lock_irq(&css_set_lock);
133	cgrp = task_dfl_cgroup(current);
134	if (always_leave || !test_bit(CGRP_FREEZE, &cgrp->flags)) {
135		cgroup_dec_frozen_cnt(cgrp);
136		cgroup_update_frozen(cgrp);
137		WARN_ON_ONCE(!current->frozen);
138		current->frozen = false;
139	} else if (!(current->jobctl & JOBCTL_TRAP_FREEZE)) {
140		spin_lock(&current->sighand->siglock);
141		current->jobctl |= JOBCTL_TRAP_FREEZE;
142		set_thread_flag(TIF_SIGPENDING);
143		spin_unlock(&current->sighand->siglock);
144	}
145	spin_unlock_irq(&css_set_lock);
 
146}
147
148/*
149 * Freeze or unfreeze the task by setting or clearing the JOBCTL_TRAP_FREEZE
150 * jobctl bit.
 
 
 
 
 
 
151 */
152static void cgroup_freeze_task(struct task_struct *task, bool freeze)
153{
154	unsigned long flags;
155
156	/* If the task is about to die, don't bother with freezing it. */
157	if (!lock_task_sighand(task, &flags))
 
 
 
 
 
 
158		return;
159
160	if (freeze) {
161		task->jobctl |= JOBCTL_TRAP_FREEZE;
162		signal_wake_up(task, false);
163	} else {
164		task->jobctl &= ~JOBCTL_TRAP_FREEZE;
165		wake_up_process(task);
166	}
167
168	unlock_task_sighand(task, &flags);
 
 
 
 
 
169}
170
171/*
172 * Freeze or unfreeze all tasks in the given cgroup.
 
 
 
 
 
 
 
 
 
 
 
 
 
173 */
174static void cgroup_do_freeze(struct cgroup *cgrp, bool freeze)
175{
 
 
176	struct css_task_iter it;
177	struct task_struct *task;
178
179	lockdep_assert_held(&cgroup_mutex);
180
181	spin_lock_irq(&css_set_lock);
182	if (freeze)
183		set_bit(CGRP_FREEZE, &cgrp->flags);
184	else
185		clear_bit(CGRP_FREEZE, &cgrp->flags);
186	spin_unlock_irq(&css_set_lock);
187
188	if (freeze)
189		TRACE_CGROUP_PATH(freeze, cgrp);
190	else
191		TRACE_CGROUP_PATH(unfreeze, cgrp);
 
 
 
 
 
 
 
 
 
 
 
192
193	css_task_iter_start(&cgrp->self, 0, &it);
194	while ((task = css_task_iter_next(&it))) {
195		/*
196		 * Ignore kernel threads here. Freezing cgroups containing
197		 * kthreads isn't supported.
198		 */
199		if (task->flags & PF_KTHREAD)
200			continue;
201		cgroup_freeze_task(task, freeze);
 
 
 
202	}
203	css_task_iter_end(&it);
204
205	/*
206	 * Cgroup state should be revisited here to cover empty leaf cgroups
207	 * and cgroups which descendants are already in the desired state.
208	 */
209	spin_lock_irq(&css_set_lock);
210	if (cgrp->nr_descendants == cgrp->freezer.nr_frozen_descendants)
211		cgroup_update_frozen(cgrp);
212	spin_unlock_irq(&css_set_lock);
213}
214
215/*
216 * Adjust the task state (freeze or unfreeze) and revisit the state of
217 * source and destination cgroups.
218 */
219void cgroup_freezer_migrate_task(struct task_struct *task,
220				 struct cgroup *src, struct cgroup *dst)
221{
222	lockdep_assert_held(&css_set_lock);
223
224	/*
225	 * Kernel threads are not supposed to be frozen at all.
226	 */
227	if (task->flags & PF_KTHREAD)
228		return;
229
230	/*
231	 * It's not necessary to do changes if both of the src and dst cgroups
232	 * are not freezing and task is not frozen.
233	 */
234	if (!test_bit(CGRP_FREEZE, &src->flags) &&
235	    !test_bit(CGRP_FREEZE, &dst->flags) &&
236	    !task->frozen)
237		return;
238
239	/*
240	 * Adjust counters of freezing and frozen tasks.
241	 * Note, that if the task is frozen, but the destination cgroup is not
242	 * frozen, we bump both counters to keep them balanced.
243	 */
244	if (task->frozen) {
245		cgroup_inc_frozen_cnt(dst);
246		cgroup_dec_frozen_cnt(src);
247	}
248	cgroup_update_frozen(dst);
249	cgroup_update_frozen(src);
250
251	/*
252	 * Force the task to the desired state.
253	 */
254	cgroup_freeze_task(task, test_bit(CGRP_FREEZE, &dst->flags));
 
 
255}
256
257void cgroup_freeze(struct cgroup *cgrp, bool freeze)
258{
259	struct cgroup_subsys_state *css;
260	struct cgroup *parent;
261	struct cgroup *dsct;
262	bool applied = false;
263	bool old_e;
264
265	lockdep_assert_held(&cgroup_mutex);
 
 
 
 
266
267	/*
268	 * Nothing changed? Just exit.
269	 */
270	if (cgrp->freezer.freeze == freeze)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
271		return;
272
273	cgrp->freezer.freeze = freeze;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
274
275	/*
276	 * Propagate changes downwards the cgroup tree.
 
 
277	 */
278	css_for_each_descendant_pre(css, &cgrp->self) {
279		dsct = css->cgroup;
 
 
 
280
281		if (cgroup_is_dead(dsct))
282			continue;
 
283
284		/*
285		 * e_freeze is affected by parent's e_freeze and dst's freeze.
286		 * If old e_freeze eq new e_freeze, no change, its children
287		 * will not be affected. So do nothing and skip the subtree
288		 */
289		old_e = dsct->freezer.e_freeze;
290		parent = cgroup_parent(dsct);
291		dsct->freezer.e_freeze = (dsct->freezer.freeze ||
292					  parent->freezer.e_freeze);
293		if (dsct->freezer.e_freeze == old_e) {
294			css = css_rightmost_descendant(css);
295			continue;
296		}
297
298		/*
299		 * Do change actual state: freeze or unfreeze.
300		 */
301		cgroup_do_freeze(dsct, freeze);
302		applied = true;
303	}
 
 
 
304
305	/*
306	 * Even if the actual state hasn't changed, let's notify a user.
307	 * The state can be enforced by an ancestor cgroup: the cgroup
308	 * can already be in the desired state or it can be locked in the
309	 * opposite state, so that the transition will never happen.
310	 * In both cases it's better to notify a user, that there is
311	 * nothing to wait for.
312	 */
313	if (!applied) {
314		TRACE_CGROUP_PATH(notify_frozen, cgrp,
315				  test_bit(CGRP_FROZEN, &cgrp->flags));
316		cgroup_file_notify(&cgrp->events_file);
317	}
 
 
 
318}
v4.17
  1/*
  2 * cgroup_freezer.c -  control group freezer subsystem
  3 *
  4 * Copyright IBM Corporation, 2007
  5 *
  6 * Author : Cedric Le Goater <clg@fr.ibm.com>
  7 *
  8 * This program is free software; you can redistribute it and/or modify it
  9 * under the terms of version 2.1 of the GNU Lesser General Public License
 10 * as published by the Free Software Foundation.
 11 *
 12 * This program is distributed in the hope that it would be useful, but
 13 * WITHOUT ANY WARRANTY; without even the implied warranty of
 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
 15 */
 16
 17#include <linux/export.h>
 18#include <linux/slab.h>
 19#include <linux/cgroup.h>
 20#include <linux/fs.h>
 21#include <linux/uaccess.h>
 22#include <linux/freezer.h>
 23#include <linux/seq_file.h>
 24#include <linux/mutex.h>
 25
 26/*
 27 * A cgroup is freezing if any FREEZING flags are set.  FREEZING_SELF is
 28 * set if "FROZEN" is written to freezer.state cgroupfs file, and cleared
 29 * for "THAWED".  FREEZING_PARENT is set if the parent freezer is FREEZING
 30 * for whatever reason.  IOW, a cgroup has FREEZING_PARENT set if one of
 31 * its ancestors has FREEZING_SELF set.
 32 */
 33enum freezer_state_flags {
 34	CGROUP_FREEZER_ONLINE	= (1 << 0), /* freezer is fully online */
 35	CGROUP_FREEZING_SELF	= (1 << 1), /* this freezer is freezing */
 36	CGROUP_FREEZING_PARENT	= (1 << 2), /* the parent freezer is freezing */
 37	CGROUP_FROZEN		= (1 << 3), /* this and its descendants frozen */
 38
 39	/* mask for all FREEZING flags */
 40	CGROUP_FREEZING		= CGROUP_FREEZING_SELF | CGROUP_FREEZING_PARENT,
 41};
 42
 43struct freezer {
 44	struct cgroup_subsys_state	css;
 45	unsigned int			state;
 46};
 47
 48static DEFINE_MUTEX(freezer_mutex);
 
 
 
 49
 50static inline struct freezer *css_freezer(struct cgroup_subsys_state *css)
 
 
 
 51{
 52	return css ? container_of(css, struct freezer, css) : NULL;
 53}
 54
 55static inline struct freezer *task_freezer(struct task_struct *task)
 56{
 57	return css_freezer(task_css(task, freezer_cgrp_id));
 58}
 
 
 
 
 
 
 
 
 
 
 
 
 59
 60static struct freezer *parent_freezer(struct freezer *freezer)
 61{
 62	return css_freezer(freezer->css.parent);
 63}
 64
 65bool cgroup_freezing(struct task_struct *task)
 
 
 
 
 66{
 67	bool ret;
 68
 69	rcu_read_lock();
 70	ret = task_freezer(task)->state & CGROUP_FREEZING;
 71	rcu_read_unlock();
 
 
 
 
 72
 73	return ret;
 
 
 74}
 75
 76static const char *freezer_state_strs(unsigned int state)
 77{
 78	if (state & CGROUP_FROZEN)
 79		return "FROZEN";
 80	if (state & CGROUP_FREEZING)
 81		return "FREEZING";
 82	return "THAWED";
 83};
 84
 85static struct cgroup_subsys_state *
 86freezer_css_alloc(struct cgroup_subsys_state *parent_css)
 87{
 88	struct freezer *freezer;
 89
 90	freezer = kzalloc(sizeof(struct freezer), GFP_KERNEL);
 91	if (!freezer)
 92		return ERR_PTR(-ENOMEM);
 93
 94	return &freezer->css;
 95}
 96
 97/**
 98 * freezer_css_online - commit creation of a freezer css
 99 * @css: css being created
100 *
101 * We're committing to creation of @css.  Mark it online and inherit
102 * parent's freezing state while holding both parent's and our
103 * freezer->lock.
104 */
105static int freezer_css_online(struct cgroup_subsys_state *css)
106{
107	struct freezer *freezer = css_freezer(css);
108	struct freezer *parent = parent_freezer(freezer);
109
110	mutex_lock(&freezer_mutex);
111
112	freezer->state |= CGROUP_FREEZER_ONLINE;
113
114	if (parent && (parent->state & CGROUP_FREEZING)) {
115		freezer->state |= CGROUP_FREEZING_PARENT | CGROUP_FROZEN;
116		atomic_inc(&system_freezing_cnt);
117	}
118
119	mutex_unlock(&freezer_mutex);
120	return 0;
121}
122
123/**
124 * freezer_css_offline - initiate destruction of a freezer css
125 * @css: css being destroyed
126 *
127 * @css is going away.  Mark it dead and decrement system_freezing_count if
128 * it was holding one.
129 */
130static void freezer_css_offline(struct cgroup_subsys_state *css)
131{
132	struct freezer *freezer = css_freezer(css);
133
134	mutex_lock(&freezer_mutex);
 
135
136	if (freezer->state & CGROUP_FREEZING)
137		atomic_dec(&system_freezing_cnt);
138
139	freezer->state = 0;
140
141	mutex_unlock(&freezer_mutex);
142}
143
144static void freezer_css_free(struct cgroup_subsys_state *css)
145{
146	kfree(css_freezer(css));
147}
148
149/*
150 * Tasks can be migrated into a different freezer anytime regardless of its
151 * current state.  freezer_attach() is responsible for making new tasks
152 * conform to the current state.
153 *
154 * Freezer state changes and task migration are synchronized via
155 * @freezer->lock.  freezer_attach() makes the new tasks conform to the
156 * current state and all following state changes can see the new tasks.
 
157 */
158static void freezer_attach(struct cgroup_taskset *tset)
159{
160	struct task_struct *task;
161	struct cgroup_subsys_state *new_css;
162
163	mutex_lock(&freezer_mutex);
164
165	/*
166	 * Make the new tasks conform to the current state of @new_css.
167	 * For simplicity, when migrating any task to a FROZEN cgroup, we
168	 * revert it to FREEZING and let update_if_frozen() determine the
169	 * correct state later.
170	 *
171	 * Tasks in @tset are on @new_css but may not conform to its
172	 * current state before executing the following - !frozen tasks may
173	 * be visible in a FROZEN cgroup and frozen tasks in a THAWED one.
174	 */
175	cgroup_taskset_for_each(task, new_css, tset) {
176		struct freezer *freezer = css_freezer(new_css);
177
178		if (!(freezer->state & CGROUP_FREEZING)) {
179			__thaw_task(task);
180		} else {
181			freeze_task(task);
182			/* clear FROZEN and propagate upwards */
183			while (freezer && (freezer->state & CGROUP_FROZEN)) {
184				freezer->state &= ~CGROUP_FROZEN;
185				freezer = parent_freezer(freezer);
186			}
187		}
 
 
188	}
189
190	mutex_unlock(&freezer_mutex);
191}
192
193/**
194 * freezer_fork - cgroup post fork callback
195 * @task: a task which has just been forked
196 *
197 * @task has just been created and should conform to the current state of
198 * the cgroup_freezer it belongs to.  This function may race against
199 * freezer_attach().  Losing to freezer_attach() means that we don't have
200 * to do anything as freezer_attach() will put @task into the appropriate
201 * state.
202 */
203static void freezer_fork(struct task_struct *task)
204{
205	struct freezer *freezer;
206
207	/*
208	 * The root cgroup is non-freezable, so we can skip locking the
209	 * freezer.  This is safe regardless of race with task migration.
210	 * If we didn't race or won, skipping is obviously the right thing
211	 * to do.  If we lost and root is the new cgroup, noop is still the
212	 * right thing to do.
213	 */
214	if (task_css_is_root(task, freezer_cgrp_id))
215		return;
216
217	mutex_lock(&freezer_mutex);
218	rcu_read_lock();
 
 
 
 
 
219
220	freezer = task_freezer(task);
221	if (freezer->state & CGROUP_FREEZING)
222		freeze_task(task);
223
224	rcu_read_unlock();
225	mutex_unlock(&freezer_mutex);
226}
227
228/**
229 * update_if_frozen - update whether a cgroup finished freezing
230 * @css: css of interest
231 *
232 * Once FREEZING is initiated, transition to FROZEN is lazily updated by
233 * calling this function.  If the current state is FREEZING but not FROZEN,
234 * this function checks whether all tasks of this cgroup and the descendant
235 * cgroups finished freezing and, if so, sets FROZEN.
236 *
237 * The caller is responsible for grabbing RCU read lock and calling
238 * update_if_frozen() on all descendants prior to invoking this function.
239 *
240 * Task states and freezer state might disagree while tasks are being
241 * migrated into or out of @css, so we can't verify task states against
242 * @freezer state here.  See freezer_attach() for details.
243 */
244static void update_if_frozen(struct cgroup_subsys_state *css)
245{
246	struct freezer *freezer = css_freezer(css);
247	struct cgroup_subsys_state *pos;
248	struct css_task_iter it;
249	struct task_struct *task;
250
251	lockdep_assert_held(&freezer_mutex);
252
253	if (!(freezer->state & CGROUP_FREEZING) ||
254	    (freezer->state & CGROUP_FROZEN))
255		return;
 
 
 
256
257	/* are all (live) children frozen? */
258	rcu_read_lock();
259	css_for_each_child(pos, css) {
260		struct freezer *child = css_freezer(pos);
261
262		if ((child->state & CGROUP_FREEZER_ONLINE) &&
263		    !(child->state & CGROUP_FROZEN)) {
264			rcu_read_unlock();
265			return;
266		}
267	}
268	rcu_read_unlock();
269
270	/* are all tasks frozen? */
271	css_task_iter_start(css, 0, &it);
272
 
273	while ((task = css_task_iter_next(&it))) {
274		if (freezing(task)) {
275			/*
276			 * freezer_should_skip() indicates that the task
277			 * should be skipped when determining freezing
278			 * completion.  Consider it frozen in addition to
279			 * the usual frozen condition.
280			 */
281			if (!frozen(task) && !freezer_should_skip(task))
282				goto out_iter_end;
283		}
284	}
 
285
286	freezer->state |= CGROUP_FROZEN;
287out_iter_end:
288	css_task_iter_end(&it);
 
 
 
 
 
289}
290
291static int freezer_read(struct seq_file *m, void *v)
 
 
 
 
 
292{
293	struct cgroup_subsys_state *css = seq_css(m), *pos;
294
295	mutex_lock(&freezer_mutex);
296	rcu_read_lock();
 
 
 
297
298	/* update states bottom-up */
299	css_for_each_descendant_post(pos, css) {
300		if (!css_tryget_online(pos))
301			continue;
302		rcu_read_unlock();
 
 
 
303
304		update_if_frozen(pos);
305
306		rcu_read_lock();
307		css_put(pos);
 
 
 
 
308	}
 
 
309
310	rcu_read_unlock();
311	mutex_unlock(&freezer_mutex);
312
313	seq_puts(m, freezer_state_strs(css_freezer(css)->state));
314	seq_putc(m, '\n');
315	return 0;
316}
317
318static void freeze_cgroup(struct freezer *freezer)
319{
320	struct css_task_iter it;
321	struct task_struct *task;
 
 
 
322
323	css_task_iter_start(&freezer->css, 0, &it);
324	while ((task = css_task_iter_next(&it)))
325		freeze_task(task);
326	css_task_iter_end(&it);
327}
328
329static void unfreeze_cgroup(struct freezer *freezer)
330{
331	struct css_task_iter it;
332	struct task_struct *task;
333
334	css_task_iter_start(&freezer->css, 0, &it);
335	while ((task = css_task_iter_next(&it)))
336		__thaw_task(task);
337	css_task_iter_end(&it);
338}
339
340/**
341 * freezer_apply_state - apply state change to a single cgroup_freezer
342 * @freezer: freezer to apply state change to
343 * @freeze: whether to freeze or unfreeze
344 * @state: CGROUP_FREEZING_* flag to set or clear
345 *
346 * Set or clear @state on @cgroup according to @freeze, and perform
347 * freezing or thawing as necessary.
348 */
349static void freezer_apply_state(struct freezer *freezer, bool freeze,
350				unsigned int state)
351{
352	/* also synchronizes against task migration, see freezer_attach() */
353	lockdep_assert_held(&freezer_mutex);
354
355	if (!(freezer->state & CGROUP_FREEZER_ONLINE))
356		return;
357
358	if (freeze) {
359		if (!(freezer->state & CGROUP_FREEZING))
360			atomic_inc(&system_freezing_cnt);
361		freezer->state |= state;
362		freeze_cgroup(freezer);
363	} else {
364		bool was_freezing = freezer->state & CGROUP_FREEZING;
365
366		freezer->state &= ~state;
367
368		if (!(freezer->state & CGROUP_FREEZING)) {
369			if (was_freezing)
370				atomic_dec(&system_freezing_cnt);
371			freezer->state &= ~CGROUP_FROZEN;
372			unfreeze_cgroup(freezer);
373		}
374	}
375}
376
377/**
378 * freezer_change_state - change the freezing state of a cgroup_freezer
379 * @freezer: freezer of interest
380 * @freeze: whether to freeze or thaw
381 *
382 * Freeze or thaw @freezer according to @freeze.  The operations are
383 * recursive - all descendants of @freezer will be affected.
384 */
385static void freezer_change_state(struct freezer *freezer, bool freeze)
386{
387	struct cgroup_subsys_state *pos;
388
389	/*
390	 * Update all its descendants in pre-order traversal.  Each
391	 * descendant will try to inherit its parent's FREEZING state as
392	 * CGROUP_FREEZING_PARENT.
393	 */
394	mutex_lock(&freezer_mutex);
395	rcu_read_lock();
396	css_for_each_descendant_pre(pos, &freezer->css) {
397		struct freezer *pos_f = css_freezer(pos);
398		struct freezer *parent = parent_freezer(pos_f);
399
400		if (!css_tryget_online(pos))
401			continue;
402		rcu_read_unlock();
403
404		if (pos_f == freezer)
405			freezer_apply_state(pos_f, freeze,
406					    CGROUP_FREEZING_SELF);
407		else
408			freezer_apply_state(pos_f,
409					    parent->state & CGROUP_FREEZING,
410					    CGROUP_FREEZING_PARENT);
 
 
 
 
 
 
411
412		rcu_read_lock();
413		css_put(pos);
 
 
 
414	}
415	rcu_read_unlock();
416	mutex_unlock(&freezer_mutex);
417}
418
419static ssize_t freezer_write(struct kernfs_open_file *of,
420			     char *buf, size_t nbytes, loff_t off)
421{
422	bool freeze;
423
424	buf = strstrip(buf);
425
426	if (strcmp(buf, freezer_state_strs(0)) == 0)
427		freeze = false;
428	else if (strcmp(buf, freezer_state_strs(CGROUP_FROZEN)) == 0)
429		freeze = true;
430	else
431		return -EINVAL;
432
433	freezer_change_state(css_freezer(of_css(of)), freeze);
434	return nbytes;
435}
436
437static u64 freezer_self_freezing_read(struct cgroup_subsys_state *css,
438				      struct cftype *cft)
439{
440	struct freezer *freezer = css_freezer(css);
441
442	return (bool)(freezer->state & CGROUP_FREEZING_SELF);
443}
444
445static u64 freezer_parent_freezing_read(struct cgroup_subsys_state *css,
446					struct cftype *cft)
447{
448	struct freezer *freezer = css_freezer(css);
449
450	return (bool)(freezer->state & CGROUP_FREEZING_PARENT);
451}
452
453static struct cftype files[] = {
454	{
455		.name = "state",
456		.flags = CFTYPE_NOT_ON_ROOT,
457		.seq_show = freezer_read,
458		.write = freezer_write,
459	},
460	{
461		.name = "self_freezing",
462		.flags = CFTYPE_NOT_ON_ROOT,
463		.read_u64 = freezer_self_freezing_read,
464	},
465	{
466		.name = "parent_freezing",
467		.flags = CFTYPE_NOT_ON_ROOT,
468		.read_u64 = freezer_parent_freezing_read,
469	},
470	{ }	/* terminate */
471};
472
473struct cgroup_subsys freezer_cgrp_subsys = {
474	.css_alloc	= freezer_css_alloc,
475	.css_online	= freezer_css_online,
476	.css_offline	= freezer_css_offline,
477	.css_free	= freezer_css_free,
478	.attach		= freezer_attach,
479	.fork		= freezer_fork,
480	.legacy_cftypes	= files,
481};