Loading...
1/*
2 * cgroup_freezer.c - control group freezer subsystem
3 *
4 * Copyright IBM Corporation, 2007
5 *
6 * Author : Cedric Le Goater <clg@fr.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2.1 of the GNU Lesser General Public License
10 * as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it would be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
15 */
16
17#include <linux/export.h>
18#include <linux/slab.h>
19#include <linux/cgroup.h>
20#include <linux/fs.h>
21#include <linux/uaccess.h>
22#include <linux/freezer.h>
23#include <linux/seq_file.h>
24#include <linux/mutex.h>
25
26/*
27 * A cgroup is freezing if any FREEZING flags are set. FREEZING_SELF is
28 * set if "FROZEN" is written to freezer.state cgroupfs file, and cleared
29 * for "THAWED". FREEZING_PARENT is set if the parent freezer is FREEZING
30 * for whatever reason. IOW, a cgroup has FREEZING_PARENT set if one of
31 * its ancestors has FREEZING_SELF set.
32 */
33enum freezer_state_flags {
34 CGROUP_FREEZER_ONLINE = (1 << 0), /* freezer is fully online */
35 CGROUP_FREEZING_SELF = (1 << 1), /* this freezer is freezing */
36 CGROUP_FREEZING_PARENT = (1 << 2), /* the parent freezer is freezing */
37 CGROUP_FROZEN = (1 << 3), /* this and its descendants frozen */
38
39 /* mask for all FREEZING flags */
40 CGROUP_FREEZING = CGROUP_FREEZING_SELF | CGROUP_FREEZING_PARENT,
41};
42
43struct freezer {
44 struct cgroup_subsys_state css;
45 unsigned int state;
46};
47
48static DEFINE_MUTEX(freezer_mutex);
49
50static inline struct freezer *css_freezer(struct cgroup_subsys_state *css)
51{
52 return css ? container_of(css, struct freezer, css) : NULL;
53}
54
55static inline struct freezer *task_freezer(struct task_struct *task)
56{
57 return css_freezer(task_css(task, freezer_cgrp_id));
58}
59
60static struct freezer *parent_freezer(struct freezer *freezer)
61{
62 return css_freezer(css_parent(&freezer->css));
63}
64
65bool cgroup_freezing(struct task_struct *task)
66{
67 bool ret;
68
69 rcu_read_lock();
70 ret = task_freezer(task)->state & CGROUP_FREEZING;
71 rcu_read_unlock();
72
73 return ret;
74}
75
76/*
77 * cgroups_write_string() limits the size of freezer state strings to
78 * CGROUP_LOCAL_BUFFER_SIZE
79 */
80static const char *freezer_state_strs(unsigned int state)
81{
82 if (state & CGROUP_FROZEN)
83 return "FROZEN";
84 if (state & CGROUP_FREEZING)
85 return "FREEZING";
86 return "THAWED";
87};
88
89static struct cgroup_subsys_state *
90freezer_css_alloc(struct cgroup_subsys_state *parent_css)
91{
92 struct freezer *freezer;
93
94 freezer = kzalloc(sizeof(struct freezer), GFP_KERNEL);
95 if (!freezer)
96 return ERR_PTR(-ENOMEM);
97
98 return &freezer->css;
99}
100
101/**
102 * freezer_css_online - commit creation of a freezer css
103 * @css: css being created
104 *
105 * We're committing to creation of @css. Mark it online and inherit
106 * parent's freezing state while holding both parent's and our
107 * freezer->lock.
108 */
109static int freezer_css_online(struct cgroup_subsys_state *css)
110{
111 struct freezer *freezer = css_freezer(css);
112 struct freezer *parent = parent_freezer(freezer);
113
114 mutex_lock(&freezer_mutex);
115
116 freezer->state |= CGROUP_FREEZER_ONLINE;
117
118 if (parent && (parent->state & CGROUP_FREEZING)) {
119 freezer->state |= CGROUP_FREEZING_PARENT | CGROUP_FROZEN;
120 atomic_inc(&system_freezing_cnt);
121 }
122
123 mutex_unlock(&freezer_mutex);
124 return 0;
125}
126
127/**
128 * freezer_css_offline - initiate destruction of a freezer css
129 * @css: css being destroyed
130 *
131 * @css is going away. Mark it dead and decrement system_freezing_count if
132 * it was holding one.
133 */
134static void freezer_css_offline(struct cgroup_subsys_state *css)
135{
136 struct freezer *freezer = css_freezer(css);
137
138 mutex_lock(&freezer_mutex);
139
140 if (freezer->state & CGROUP_FREEZING)
141 atomic_dec(&system_freezing_cnt);
142
143 freezer->state = 0;
144
145 mutex_unlock(&freezer_mutex);
146}
147
148static void freezer_css_free(struct cgroup_subsys_state *css)
149{
150 kfree(css_freezer(css));
151}
152
153/*
154 * Tasks can be migrated into a different freezer anytime regardless of its
155 * current state. freezer_attach() is responsible for making new tasks
156 * conform to the current state.
157 *
158 * Freezer state changes and task migration are synchronized via
159 * @freezer->lock. freezer_attach() makes the new tasks conform to the
160 * current state and all following state changes can see the new tasks.
161 */
162static void freezer_attach(struct cgroup_subsys_state *new_css,
163 struct cgroup_taskset *tset)
164{
165 struct freezer *freezer = css_freezer(new_css);
166 struct task_struct *task;
167 bool clear_frozen = false;
168
169 mutex_lock(&freezer_mutex);
170
171 /*
172 * Make the new tasks conform to the current state of @new_css.
173 * For simplicity, when migrating any task to a FROZEN cgroup, we
174 * revert it to FREEZING and let update_if_frozen() determine the
175 * correct state later.
176 *
177 * Tasks in @tset are on @new_css but may not conform to its
178 * current state before executing the following - !frozen tasks may
179 * be visible in a FROZEN cgroup and frozen tasks in a THAWED one.
180 */
181 cgroup_taskset_for_each(task, tset) {
182 if (!(freezer->state & CGROUP_FREEZING)) {
183 __thaw_task(task);
184 } else {
185 freeze_task(task);
186 freezer->state &= ~CGROUP_FROZEN;
187 clear_frozen = true;
188 }
189 }
190
191 /* propagate FROZEN clearing upwards */
192 while (clear_frozen && (freezer = parent_freezer(freezer))) {
193 freezer->state &= ~CGROUP_FROZEN;
194 clear_frozen = freezer->state & CGROUP_FREEZING;
195 }
196
197 mutex_unlock(&freezer_mutex);
198}
199
200/**
201 * freezer_fork - cgroup post fork callback
202 * @task: a task which has just been forked
203 *
204 * @task has just been created and should conform to the current state of
205 * the cgroup_freezer it belongs to. This function may race against
206 * freezer_attach(). Losing to freezer_attach() means that we don't have
207 * to do anything as freezer_attach() will put @task into the appropriate
208 * state.
209 */
210static void freezer_fork(struct task_struct *task)
211{
212 struct freezer *freezer;
213
214 /*
215 * The root cgroup is non-freezable, so we can skip locking the
216 * freezer. This is safe regardless of race with task migration.
217 * If we didn't race or won, skipping is obviously the right thing
218 * to do. If we lost and root is the new cgroup, noop is still the
219 * right thing to do.
220 */
221 if (task_css_is_root(task, freezer_cgrp_id))
222 return;
223
224 mutex_lock(&freezer_mutex);
225 rcu_read_lock();
226
227 freezer = task_freezer(task);
228 if (freezer->state & CGROUP_FREEZING)
229 freeze_task(task);
230
231 rcu_read_unlock();
232 mutex_unlock(&freezer_mutex);
233}
234
235/**
236 * update_if_frozen - update whether a cgroup finished freezing
237 * @css: css of interest
238 *
239 * Once FREEZING is initiated, transition to FROZEN is lazily updated by
240 * calling this function. If the current state is FREEZING but not FROZEN,
241 * this function checks whether all tasks of this cgroup and the descendant
242 * cgroups finished freezing and, if so, sets FROZEN.
243 *
244 * The caller is responsible for grabbing RCU read lock and calling
245 * update_if_frozen() on all descendants prior to invoking this function.
246 *
247 * Task states and freezer state might disagree while tasks are being
248 * migrated into or out of @css, so we can't verify task states against
249 * @freezer state here. See freezer_attach() for details.
250 */
251static void update_if_frozen(struct cgroup_subsys_state *css)
252{
253 struct freezer *freezer = css_freezer(css);
254 struct cgroup_subsys_state *pos;
255 struct css_task_iter it;
256 struct task_struct *task;
257
258 lockdep_assert_held(&freezer_mutex);
259
260 if (!(freezer->state & CGROUP_FREEZING) ||
261 (freezer->state & CGROUP_FROZEN))
262 return;
263
264 /* are all (live) children frozen? */
265 rcu_read_lock();
266 css_for_each_child(pos, css) {
267 struct freezer *child = css_freezer(pos);
268
269 if ((child->state & CGROUP_FREEZER_ONLINE) &&
270 !(child->state & CGROUP_FROZEN)) {
271 rcu_read_unlock();
272 return;
273 }
274 }
275 rcu_read_unlock();
276
277 /* are all tasks frozen? */
278 css_task_iter_start(css, &it);
279
280 while ((task = css_task_iter_next(&it))) {
281 if (freezing(task)) {
282 /*
283 * freezer_should_skip() indicates that the task
284 * should be skipped when determining freezing
285 * completion. Consider it frozen in addition to
286 * the usual frozen condition.
287 */
288 if (!frozen(task) && !freezer_should_skip(task))
289 goto out_iter_end;
290 }
291 }
292
293 freezer->state |= CGROUP_FROZEN;
294out_iter_end:
295 css_task_iter_end(&it);
296}
297
298static int freezer_read(struct seq_file *m, void *v)
299{
300 struct cgroup_subsys_state *css = seq_css(m), *pos;
301
302 mutex_lock(&freezer_mutex);
303 rcu_read_lock();
304
305 /* update states bottom-up */
306 css_for_each_descendant_post(pos, css) {
307 if (!css_tryget(pos))
308 continue;
309 rcu_read_unlock();
310
311 update_if_frozen(pos);
312
313 rcu_read_lock();
314 css_put(pos);
315 }
316
317 rcu_read_unlock();
318 mutex_unlock(&freezer_mutex);
319
320 seq_puts(m, freezer_state_strs(css_freezer(css)->state));
321 seq_putc(m, '\n');
322 return 0;
323}
324
325static void freeze_cgroup(struct freezer *freezer)
326{
327 struct css_task_iter it;
328 struct task_struct *task;
329
330 css_task_iter_start(&freezer->css, &it);
331 while ((task = css_task_iter_next(&it)))
332 freeze_task(task);
333 css_task_iter_end(&it);
334}
335
336static void unfreeze_cgroup(struct freezer *freezer)
337{
338 struct css_task_iter it;
339 struct task_struct *task;
340
341 css_task_iter_start(&freezer->css, &it);
342 while ((task = css_task_iter_next(&it)))
343 __thaw_task(task);
344 css_task_iter_end(&it);
345}
346
347/**
348 * freezer_apply_state - apply state change to a single cgroup_freezer
349 * @freezer: freezer to apply state change to
350 * @freeze: whether to freeze or unfreeze
351 * @state: CGROUP_FREEZING_* flag to set or clear
352 *
353 * Set or clear @state on @cgroup according to @freeze, and perform
354 * freezing or thawing as necessary.
355 */
356static void freezer_apply_state(struct freezer *freezer, bool freeze,
357 unsigned int state)
358{
359 /* also synchronizes against task migration, see freezer_attach() */
360 lockdep_assert_held(&freezer_mutex);
361
362 if (!(freezer->state & CGROUP_FREEZER_ONLINE))
363 return;
364
365 if (freeze) {
366 if (!(freezer->state & CGROUP_FREEZING))
367 atomic_inc(&system_freezing_cnt);
368 freezer->state |= state;
369 freeze_cgroup(freezer);
370 } else {
371 bool was_freezing = freezer->state & CGROUP_FREEZING;
372
373 freezer->state &= ~state;
374
375 if (!(freezer->state & CGROUP_FREEZING)) {
376 if (was_freezing)
377 atomic_dec(&system_freezing_cnt);
378 freezer->state &= ~CGROUP_FROZEN;
379 unfreeze_cgroup(freezer);
380 }
381 }
382}
383
384/**
385 * freezer_change_state - change the freezing state of a cgroup_freezer
386 * @freezer: freezer of interest
387 * @freeze: whether to freeze or thaw
388 *
389 * Freeze or thaw @freezer according to @freeze. The operations are
390 * recursive - all descendants of @freezer will be affected.
391 */
392static void freezer_change_state(struct freezer *freezer, bool freeze)
393{
394 struct cgroup_subsys_state *pos;
395
396 /*
397 * Update all its descendants in pre-order traversal. Each
398 * descendant will try to inherit its parent's FREEZING state as
399 * CGROUP_FREEZING_PARENT.
400 */
401 mutex_lock(&freezer_mutex);
402 rcu_read_lock();
403 css_for_each_descendant_pre(pos, &freezer->css) {
404 struct freezer *pos_f = css_freezer(pos);
405 struct freezer *parent = parent_freezer(pos_f);
406
407 if (!css_tryget(pos))
408 continue;
409 rcu_read_unlock();
410
411 if (pos_f == freezer)
412 freezer_apply_state(pos_f, freeze,
413 CGROUP_FREEZING_SELF);
414 else
415 freezer_apply_state(pos_f,
416 parent->state & CGROUP_FREEZING,
417 CGROUP_FREEZING_PARENT);
418
419 rcu_read_lock();
420 css_put(pos);
421 }
422 rcu_read_unlock();
423 mutex_unlock(&freezer_mutex);
424}
425
426static int freezer_write(struct cgroup_subsys_state *css, struct cftype *cft,
427 char *buffer)
428{
429 bool freeze;
430
431 if (strcmp(buffer, freezer_state_strs(0)) == 0)
432 freeze = false;
433 else if (strcmp(buffer, freezer_state_strs(CGROUP_FROZEN)) == 0)
434 freeze = true;
435 else
436 return -EINVAL;
437
438 freezer_change_state(css_freezer(css), freeze);
439 return 0;
440}
441
442static u64 freezer_self_freezing_read(struct cgroup_subsys_state *css,
443 struct cftype *cft)
444{
445 struct freezer *freezer = css_freezer(css);
446
447 return (bool)(freezer->state & CGROUP_FREEZING_SELF);
448}
449
450static u64 freezer_parent_freezing_read(struct cgroup_subsys_state *css,
451 struct cftype *cft)
452{
453 struct freezer *freezer = css_freezer(css);
454
455 return (bool)(freezer->state & CGROUP_FREEZING_PARENT);
456}
457
458static struct cftype files[] = {
459 {
460 .name = "state",
461 .flags = CFTYPE_NOT_ON_ROOT,
462 .seq_show = freezer_read,
463 .write_string = freezer_write,
464 },
465 {
466 .name = "self_freezing",
467 .flags = CFTYPE_NOT_ON_ROOT,
468 .read_u64 = freezer_self_freezing_read,
469 },
470 {
471 .name = "parent_freezing",
472 .flags = CFTYPE_NOT_ON_ROOT,
473 .read_u64 = freezer_parent_freezing_read,
474 },
475 { } /* terminate */
476};
477
478struct cgroup_subsys freezer_cgrp_subsys = {
479 .css_alloc = freezer_css_alloc,
480 .css_online = freezer_css_online,
481 .css_offline = freezer_css_offline,
482 .css_free = freezer_css_free,
483 .attach = freezer_attach,
484 .fork = freezer_fork,
485 .base_cftypes = files,
486};
1/*
2 * cgroup_freezer.c - control group freezer subsystem
3 *
4 * Copyright IBM Corporation, 2007
5 *
6 * Author : Cedric Le Goater <clg@fr.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2.1 of the GNU Lesser General Public License
10 * as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it would be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
15 */
16
17#include <linux/export.h>
18#include <linux/slab.h>
19#include <linux/cgroup.h>
20#include <linux/fs.h>
21#include <linux/uaccess.h>
22#include <linux/freezer.h>
23#include <linux/seq_file.h>
24
25enum freezer_state {
26 CGROUP_THAWED = 0,
27 CGROUP_FREEZING,
28 CGROUP_FROZEN,
29};
30
31struct freezer {
32 struct cgroup_subsys_state css;
33 enum freezer_state state;
34 spinlock_t lock; /* protects _writes_ to state */
35};
36
37static inline struct freezer *cgroup_freezer(
38 struct cgroup *cgroup)
39{
40 return container_of(
41 cgroup_subsys_state(cgroup, freezer_subsys_id),
42 struct freezer, css);
43}
44
45static inline struct freezer *task_freezer(struct task_struct *task)
46{
47 return container_of(task_subsys_state(task, freezer_subsys_id),
48 struct freezer, css);
49}
50
51bool cgroup_freezing(struct task_struct *task)
52{
53 enum freezer_state state;
54 bool ret;
55
56 rcu_read_lock();
57 state = task_freezer(task)->state;
58 ret = state == CGROUP_FREEZING || state == CGROUP_FROZEN;
59 rcu_read_unlock();
60
61 return ret;
62}
63
64/*
65 * cgroups_write_string() limits the size of freezer state strings to
66 * CGROUP_LOCAL_BUFFER_SIZE
67 */
68static const char *freezer_state_strs[] = {
69 "THAWED",
70 "FREEZING",
71 "FROZEN",
72};
73
74/*
75 * State diagram
76 * Transitions are caused by userspace writes to the freezer.state file.
77 * The values in parenthesis are state labels. The rest are edge labels.
78 *
79 * (THAWED) --FROZEN--> (FREEZING) --FROZEN--> (FROZEN)
80 * ^ ^ | |
81 * | \_______THAWED_______/ |
82 * \__________________________THAWED____________/
83 */
84
85struct cgroup_subsys freezer_subsys;
86
87/* Locks taken and their ordering
88 * ------------------------------
89 * cgroup_mutex (AKA cgroup_lock)
90 * freezer->lock
91 * css_set_lock
92 * task->alloc_lock (AKA task_lock)
93 * task->sighand->siglock
94 *
95 * cgroup code forces css_set_lock to be taken before task->alloc_lock
96 *
97 * freezer_create(), freezer_destroy():
98 * cgroup_mutex [ by cgroup core ]
99 *
100 * freezer_can_attach():
101 * cgroup_mutex (held by caller of can_attach)
102 *
103 * freezer_fork() (preserving fork() performance means can't take cgroup_mutex):
104 * freezer->lock
105 * sighand->siglock (if the cgroup is freezing)
106 *
107 * freezer_read():
108 * cgroup_mutex
109 * freezer->lock
110 * write_lock css_set_lock (cgroup iterator start)
111 * task->alloc_lock
112 * read_lock css_set_lock (cgroup iterator start)
113 *
114 * freezer_write() (freeze):
115 * cgroup_mutex
116 * freezer->lock
117 * write_lock css_set_lock (cgroup iterator start)
118 * task->alloc_lock
119 * read_lock css_set_lock (cgroup iterator start)
120 * sighand->siglock (fake signal delivery inside freeze_task())
121 *
122 * freezer_write() (unfreeze):
123 * cgroup_mutex
124 * freezer->lock
125 * write_lock css_set_lock (cgroup iterator start)
126 * task->alloc_lock
127 * read_lock css_set_lock (cgroup iterator start)
128 * task->alloc_lock (inside __thaw_task(), prevents race with refrigerator())
129 * sighand->siglock
130 */
131static struct cgroup_subsys_state *freezer_create(struct cgroup *cgroup)
132{
133 struct freezer *freezer;
134
135 freezer = kzalloc(sizeof(struct freezer), GFP_KERNEL);
136 if (!freezer)
137 return ERR_PTR(-ENOMEM);
138
139 spin_lock_init(&freezer->lock);
140 freezer->state = CGROUP_THAWED;
141 return &freezer->css;
142}
143
144static void freezer_destroy(struct cgroup *cgroup)
145{
146 struct freezer *freezer = cgroup_freezer(cgroup);
147
148 if (freezer->state != CGROUP_THAWED)
149 atomic_dec(&system_freezing_cnt);
150 kfree(freezer);
151}
152
153/* task is frozen or will freeze immediately when next it gets woken */
154static bool is_task_frozen_enough(struct task_struct *task)
155{
156 return frozen(task) ||
157 (task_is_stopped_or_traced(task) && freezing(task));
158}
159
160/*
161 * The call to cgroup_lock() in the freezer.state write method prevents
162 * a write to that file racing against an attach, and hence the
163 * can_attach() result will remain valid until the attach completes.
164 */
165static int freezer_can_attach(struct cgroup *new_cgroup,
166 struct cgroup_taskset *tset)
167{
168 struct freezer *freezer;
169 struct task_struct *task;
170
171 /*
172 * Anything frozen can't move or be moved to/from.
173 */
174 cgroup_taskset_for_each(task, new_cgroup, tset)
175 if (cgroup_freezing(task))
176 return -EBUSY;
177
178 freezer = cgroup_freezer(new_cgroup);
179 if (freezer->state != CGROUP_THAWED)
180 return -EBUSY;
181
182 return 0;
183}
184
185static void freezer_fork(struct task_struct *task)
186{
187 struct freezer *freezer;
188
189 /*
190 * No lock is needed, since the task isn't on tasklist yet,
191 * so it can't be moved to another cgroup, which means the
192 * freezer won't be removed and will be valid during this
193 * function call. Nevertheless, apply RCU read-side critical
194 * section to suppress RCU lockdep false positives.
195 */
196 rcu_read_lock();
197 freezer = task_freezer(task);
198 rcu_read_unlock();
199
200 /*
201 * The root cgroup is non-freezable, so we can skip the
202 * following check.
203 */
204 if (!freezer->css.cgroup->parent)
205 return;
206
207 spin_lock_irq(&freezer->lock);
208 BUG_ON(freezer->state == CGROUP_FROZEN);
209
210 /* Locking avoids race with FREEZING -> THAWED transitions. */
211 if (freezer->state == CGROUP_FREEZING)
212 freeze_task(task);
213 spin_unlock_irq(&freezer->lock);
214}
215
216/*
217 * caller must hold freezer->lock
218 */
219static void update_if_frozen(struct cgroup *cgroup,
220 struct freezer *freezer)
221{
222 struct cgroup_iter it;
223 struct task_struct *task;
224 unsigned int nfrozen = 0, ntotal = 0;
225 enum freezer_state old_state = freezer->state;
226
227 cgroup_iter_start(cgroup, &it);
228 while ((task = cgroup_iter_next(cgroup, &it))) {
229 ntotal++;
230 if (freezing(task) && is_task_frozen_enough(task))
231 nfrozen++;
232 }
233
234 if (old_state == CGROUP_THAWED) {
235 BUG_ON(nfrozen > 0);
236 } else if (old_state == CGROUP_FREEZING) {
237 if (nfrozen == ntotal)
238 freezer->state = CGROUP_FROZEN;
239 } else { /* old_state == CGROUP_FROZEN */
240 BUG_ON(nfrozen != ntotal);
241 }
242
243 cgroup_iter_end(cgroup, &it);
244}
245
246static int freezer_read(struct cgroup *cgroup, struct cftype *cft,
247 struct seq_file *m)
248{
249 struct freezer *freezer;
250 enum freezer_state state;
251
252 if (!cgroup_lock_live_group(cgroup))
253 return -ENODEV;
254
255 freezer = cgroup_freezer(cgroup);
256 spin_lock_irq(&freezer->lock);
257 state = freezer->state;
258 if (state == CGROUP_FREEZING) {
259 /* We change from FREEZING to FROZEN lazily if the cgroup was
260 * only partially frozen when we exitted write. */
261 update_if_frozen(cgroup, freezer);
262 state = freezer->state;
263 }
264 spin_unlock_irq(&freezer->lock);
265 cgroup_unlock();
266
267 seq_puts(m, freezer_state_strs[state]);
268 seq_putc(m, '\n');
269 return 0;
270}
271
272static int try_to_freeze_cgroup(struct cgroup *cgroup, struct freezer *freezer)
273{
274 struct cgroup_iter it;
275 struct task_struct *task;
276 unsigned int num_cant_freeze_now = 0;
277
278 cgroup_iter_start(cgroup, &it);
279 while ((task = cgroup_iter_next(cgroup, &it))) {
280 if (!freeze_task(task))
281 continue;
282 if (is_task_frozen_enough(task))
283 continue;
284 if (!freezing(task) && !freezer_should_skip(task))
285 num_cant_freeze_now++;
286 }
287 cgroup_iter_end(cgroup, &it);
288
289 return num_cant_freeze_now ? -EBUSY : 0;
290}
291
292static void unfreeze_cgroup(struct cgroup *cgroup, struct freezer *freezer)
293{
294 struct cgroup_iter it;
295 struct task_struct *task;
296
297 cgroup_iter_start(cgroup, &it);
298 while ((task = cgroup_iter_next(cgroup, &it)))
299 __thaw_task(task);
300 cgroup_iter_end(cgroup, &it);
301}
302
303static int freezer_change_state(struct cgroup *cgroup,
304 enum freezer_state goal_state)
305{
306 struct freezer *freezer;
307 int retval = 0;
308
309 freezer = cgroup_freezer(cgroup);
310
311 spin_lock_irq(&freezer->lock);
312
313 update_if_frozen(cgroup, freezer);
314
315 switch (goal_state) {
316 case CGROUP_THAWED:
317 if (freezer->state != CGROUP_THAWED)
318 atomic_dec(&system_freezing_cnt);
319 freezer->state = CGROUP_THAWED;
320 unfreeze_cgroup(cgroup, freezer);
321 break;
322 case CGROUP_FROZEN:
323 if (freezer->state == CGROUP_THAWED)
324 atomic_inc(&system_freezing_cnt);
325 freezer->state = CGROUP_FREEZING;
326 retval = try_to_freeze_cgroup(cgroup, freezer);
327 break;
328 default:
329 BUG();
330 }
331
332 spin_unlock_irq(&freezer->lock);
333
334 return retval;
335}
336
337static int freezer_write(struct cgroup *cgroup,
338 struct cftype *cft,
339 const char *buffer)
340{
341 int retval;
342 enum freezer_state goal_state;
343
344 if (strcmp(buffer, freezer_state_strs[CGROUP_THAWED]) == 0)
345 goal_state = CGROUP_THAWED;
346 else if (strcmp(buffer, freezer_state_strs[CGROUP_FROZEN]) == 0)
347 goal_state = CGROUP_FROZEN;
348 else
349 return -EINVAL;
350
351 if (!cgroup_lock_live_group(cgroup))
352 return -ENODEV;
353 retval = freezer_change_state(cgroup, goal_state);
354 cgroup_unlock();
355 return retval;
356}
357
358static struct cftype files[] = {
359 {
360 .name = "state",
361 .flags = CFTYPE_NOT_ON_ROOT,
362 .read_seq_string = freezer_read,
363 .write_string = freezer_write,
364 },
365 { } /* terminate */
366};
367
368struct cgroup_subsys freezer_subsys = {
369 .name = "freezer",
370 .create = freezer_create,
371 .destroy = freezer_destroy,
372 .subsys_id = freezer_subsys_id,
373 .can_attach = freezer_can_attach,
374 .fork = freezer_fork,
375 .base_cftypes = files,
376};