Loading...
1/*
2 * device_cgroup.c - device cgroup subsystem
3 *
4 * Copyright 2007 IBM Corp
5 */
6
7#include <linux/device_cgroup.h>
8#include <linux/cgroup.h>
9#include <linux/ctype.h>
10#include <linux/list.h>
11#include <linux/uaccess.h>
12#include <linux/seq_file.h>
13#include <linux/slab.h>
14#include <linux/rcupdate.h>
15#include <linux/mutex.h>
16
17#define ACC_MKNOD 1
18#define ACC_READ 2
19#define ACC_WRITE 4
20#define ACC_MASK (ACC_MKNOD | ACC_READ | ACC_WRITE)
21
22#define DEV_BLOCK 1
23#define DEV_CHAR 2
24#define DEV_ALL 4 /* this represents all devices */
25
26static DEFINE_MUTEX(devcgroup_mutex);
27
28/*
29 * whitelist locking rules:
30 * hold devcgroup_mutex for update/read.
31 * hold rcu_read_lock() for read.
32 */
33
34struct dev_whitelist_item {
35 u32 major, minor;
36 short type;
37 short access;
38 struct list_head list;
39 struct rcu_head rcu;
40};
41
42struct dev_cgroup {
43 struct cgroup_subsys_state css;
44 struct list_head whitelist;
45};
46
47static inline struct dev_cgroup *css_to_devcgroup(struct cgroup_subsys_state *s)
48{
49 return container_of(s, struct dev_cgroup, css);
50}
51
52static inline struct dev_cgroup *cgroup_to_devcgroup(struct cgroup *cgroup)
53{
54 return css_to_devcgroup(cgroup_subsys_state(cgroup, devices_subsys_id));
55}
56
57static inline struct dev_cgroup *task_devcgroup(struct task_struct *task)
58{
59 return css_to_devcgroup(task_subsys_state(task, devices_subsys_id));
60}
61
62struct cgroup_subsys devices_subsys;
63
64static int devcgroup_can_attach(struct cgroup *new_cgrp,
65 struct cgroup_taskset *set)
66{
67 struct task_struct *task = cgroup_taskset_first(set);
68
69 if (current != task && !capable(CAP_SYS_ADMIN))
70 return -EPERM;
71 return 0;
72}
73
74/*
75 * called under devcgroup_mutex
76 */
77static int dev_whitelist_copy(struct list_head *dest, struct list_head *orig)
78{
79 struct dev_whitelist_item *wh, *tmp, *new;
80
81 list_for_each_entry(wh, orig, list) {
82 new = kmemdup(wh, sizeof(*wh), GFP_KERNEL);
83 if (!new)
84 goto free_and_exit;
85 list_add_tail(&new->list, dest);
86 }
87
88 return 0;
89
90free_and_exit:
91 list_for_each_entry_safe(wh, tmp, dest, list) {
92 list_del(&wh->list);
93 kfree(wh);
94 }
95 return -ENOMEM;
96}
97
98/* Stupid prototype - don't bother combining existing entries */
99/*
100 * called under devcgroup_mutex
101 */
102static int dev_whitelist_add(struct dev_cgroup *dev_cgroup,
103 struct dev_whitelist_item *wh)
104{
105 struct dev_whitelist_item *whcopy, *walk;
106
107 whcopy = kmemdup(wh, sizeof(*wh), GFP_KERNEL);
108 if (!whcopy)
109 return -ENOMEM;
110
111 list_for_each_entry(walk, &dev_cgroup->whitelist, list) {
112 if (walk->type != wh->type)
113 continue;
114 if (walk->major != wh->major)
115 continue;
116 if (walk->minor != wh->minor)
117 continue;
118
119 walk->access |= wh->access;
120 kfree(whcopy);
121 whcopy = NULL;
122 }
123
124 if (whcopy != NULL)
125 list_add_tail_rcu(&whcopy->list, &dev_cgroup->whitelist);
126 return 0;
127}
128
129/*
130 * called under devcgroup_mutex
131 */
132static void dev_whitelist_rm(struct dev_cgroup *dev_cgroup,
133 struct dev_whitelist_item *wh)
134{
135 struct dev_whitelist_item *walk, *tmp;
136
137 list_for_each_entry_safe(walk, tmp, &dev_cgroup->whitelist, list) {
138 if (walk->type == DEV_ALL)
139 goto remove;
140 if (walk->type != wh->type)
141 continue;
142 if (walk->major != ~0 && walk->major != wh->major)
143 continue;
144 if (walk->minor != ~0 && walk->minor != wh->minor)
145 continue;
146
147remove:
148 walk->access &= ~wh->access;
149 if (!walk->access) {
150 list_del_rcu(&walk->list);
151 kfree_rcu(walk, rcu);
152 }
153 }
154}
155
156/*
157 * called from kernel/cgroup.c with cgroup_lock() held.
158 */
159static struct cgroup_subsys_state *devcgroup_create(struct cgroup *cgroup)
160{
161 struct dev_cgroup *dev_cgroup, *parent_dev_cgroup;
162 struct cgroup *parent_cgroup;
163 int ret;
164
165 dev_cgroup = kzalloc(sizeof(*dev_cgroup), GFP_KERNEL);
166 if (!dev_cgroup)
167 return ERR_PTR(-ENOMEM);
168 INIT_LIST_HEAD(&dev_cgroup->whitelist);
169 parent_cgroup = cgroup->parent;
170
171 if (parent_cgroup == NULL) {
172 struct dev_whitelist_item *wh;
173 wh = kmalloc(sizeof(*wh), GFP_KERNEL);
174 if (!wh) {
175 kfree(dev_cgroup);
176 return ERR_PTR(-ENOMEM);
177 }
178 wh->minor = wh->major = ~0;
179 wh->type = DEV_ALL;
180 wh->access = ACC_MASK;
181 list_add(&wh->list, &dev_cgroup->whitelist);
182 } else {
183 parent_dev_cgroup = cgroup_to_devcgroup(parent_cgroup);
184 mutex_lock(&devcgroup_mutex);
185 ret = dev_whitelist_copy(&dev_cgroup->whitelist,
186 &parent_dev_cgroup->whitelist);
187 mutex_unlock(&devcgroup_mutex);
188 if (ret) {
189 kfree(dev_cgroup);
190 return ERR_PTR(ret);
191 }
192 }
193
194 return &dev_cgroup->css;
195}
196
197static void devcgroup_destroy(struct cgroup *cgroup)
198{
199 struct dev_cgroup *dev_cgroup;
200 struct dev_whitelist_item *wh, *tmp;
201
202 dev_cgroup = cgroup_to_devcgroup(cgroup);
203 list_for_each_entry_safe(wh, tmp, &dev_cgroup->whitelist, list) {
204 list_del(&wh->list);
205 kfree(wh);
206 }
207 kfree(dev_cgroup);
208}
209
210#define DEVCG_ALLOW 1
211#define DEVCG_DENY 2
212#define DEVCG_LIST 3
213
214#define MAJMINLEN 13
215#define ACCLEN 4
216
217static void set_access(char *acc, short access)
218{
219 int idx = 0;
220 memset(acc, 0, ACCLEN);
221 if (access & ACC_READ)
222 acc[idx++] = 'r';
223 if (access & ACC_WRITE)
224 acc[idx++] = 'w';
225 if (access & ACC_MKNOD)
226 acc[idx++] = 'm';
227}
228
229static char type_to_char(short type)
230{
231 if (type == DEV_ALL)
232 return 'a';
233 if (type == DEV_CHAR)
234 return 'c';
235 if (type == DEV_BLOCK)
236 return 'b';
237 return 'X';
238}
239
240static void set_majmin(char *str, unsigned m)
241{
242 if (m == ~0)
243 strcpy(str, "*");
244 else
245 sprintf(str, "%u", m);
246}
247
248static int devcgroup_seq_read(struct cgroup *cgroup, struct cftype *cft,
249 struct seq_file *m)
250{
251 struct dev_cgroup *devcgroup = cgroup_to_devcgroup(cgroup);
252 struct dev_whitelist_item *wh;
253 char maj[MAJMINLEN], min[MAJMINLEN], acc[ACCLEN];
254
255 rcu_read_lock();
256 list_for_each_entry_rcu(wh, &devcgroup->whitelist, list) {
257 set_access(acc, wh->access);
258 set_majmin(maj, wh->major);
259 set_majmin(min, wh->minor);
260 seq_printf(m, "%c %s:%s %s\n", type_to_char(wh->type),
261 maj, min, acc);
262 }
263 rcu_read_unlock();
264
265 return 0;
266}
267
268/*
269 * may_access_whitelist:
270 * does the access granted to dev_cgroup c contain the access
271 * requested in whitelist item refwh.
272 * return 1 if yes, 0 if no.
273 * call with devcgroup_mutex held
274 */
275static int may_access_whitelist(struct dev_cgroup *c,
276 struct dev_whitelist_item *refwh)
277{
278 struct dev_whitelist_item *whitem;
279
280 list_for_each_entry(whitem, &c->whitelist, list) {
281 if (whitem->type & DEV_ALL)
282 return 1;
283 if ((refwh->type & DEV_BLOCK) && !(whitem->type & DEV_BLOCK))
284 continue;
285 if ((refwh->type & DEV_CHAR) && !(whitem->type & DEV_CHAR))
286 continue;
287 if (whitem->major != ~0 && whitem->major != refwh->major)
288 continue;
289 if (whitem->minor != ~0 && whitem->minor != refwh->minor)
290 continue;
291 if (refwh->access & (~whitem->access))
292 continue;
293 return 1;
294 }
295 return 0;
296}
297
298/*
299 * parent_has_perm:
300 * when adding a new allow rule to a device whitelist, the rule
301 * must be allowed in the parent device
302 */
303static int parent_has_perm(struct dev_cgroup *childcg,
304 struct dev_whitelist_item *wh)
305{
306 struct cgroup *pcg = childcg->css.cgroup->parent;
307 struct dev_cgroup *parent;
308
309 if (!pcg)
310 return 1;
311 parent = cgroup_to_devcgroup(pcg);
312 return may_access_whitelist(parent, wh);
313}
314
315/*
316 * Modify the whitelist using allow/deny rules.
317 * CAP_SYS_ADMIN is needed for this. It's at least separate from CAP_MKNOD
318 * so we can give a container CAP_MKNOD to let it create devices but not
319 * modify the whitelist.
320 * It seems likely we'll want to add a CAP_CONTAINER capability to allow
321 * us to also grant CAP_SYS_ADMIN to containers without giving away the
322 * device whitelist controls, but for now we'll stick with CAP_SYS_ADMIN
323 *
324 * Taking rules away is always allowed (given CAP_SYS_ADMIN). Granting
325 * new access is only allowed if you're in the top-level cgroup, or your
326 * parent cgroup has the access you're asking for.
327 */
328static int devcgroup_update_access(struct dev_cgroup *devcgroup,
329 int filetype, const char *buffer)
330{
331 const char *b;
332 char *endp;
333 int count;
334 struct dev_whitelist_item wh;
335
336 if (!capable(CAP_SYS_ADMIN))
337 return -EPERM;
338
339 memset(&wh, 0, sizeof(wh));
340 b = buffer;
341
342 switch (*b) {
343 case 'a':
344 wh.type = DEV_ALL;
345 wh.access = ACC_MASK;
346 wh.major = ~0;
347 wh.minor = ~0;
348 goto handle;
349 case 'b':
350 wh.type = DEV_BLOCK;
351 break;
352 case 'c':
353 wh.type = DEV_CHAR;
354 break;
355 default:
356 return -EINVAL;
357 }
358 b++;
359 if (!isspace(*b))
360 return -EINVAL;
361 b++;
362 if (*b == '*') {
363 wh.major = ~0;
364 b++;
365 } else if (isdigit(*b)) {
366 wh.major = simple_strtoul(b, &endp, 10);
367 b = endp;
368 } else {
369 return -EINVAL;
370 }
371 if (*b != ':')
372 return -EINVAL;
373 b++;
374
375 /* read minor */
376 if (*b == '*') {
377 wh.minor = ~0;
378 b++;
379 } else if (isdigit(*b)) {
380 wh.minor = simple_strtoul(b, &endp, 10);
381 b = endp;
382 } else {
383 return -EINVAL;
384 }
385 if (!isspace(*b))
386 return -EINVAL;
387 for (b++, count = 0; count < 3; count++, b++) {
388 switch (*b) {
389 case 'r':
390 wh.access |= ACC_READ;
391 break;
392 case 'w':
393 wh.access |= ACC_WRITE;
394 break;
395 case 'm':
396 wh.access |= ACC_MKNOD;
397 break;
398 case '\n':
399 case '\0':
400 count = 3;
401 break;
402 default:
403 return -EINVAL;
404 }
405 }
406
407handle:
408 switch (filetype) {
409 case DEVCG_ALLOW:
410 if (!parent_has_perm(devcgroup, &wh))
411 return -EPERM;
412 return dev_whitelist_add(devcgroup, &wh);
413 case DEVCG_DENY:
414 dev_whitelist_rm(devcgroup, &wh);
415 break;
416 default:
417 return -EINVAL;
418 }
419 return 0;
420}
421
422static int devcgroup_access_write(struct cgroup *cgrp, struct cftype *cft,
423 const char *buffer)
424{
425 int retval;
426
427 mutex_lock(&devcgroup_mutex);
428 retval = devcgroup_update_access(cgroup_to_devcgroup(cgrp),
429 cft->private, buffer);
430 mutex_unlock(&devcgroup_mutex);
431 return retval;
432}
433
434static struct cftype dev_cgroup_files[] = {
435 {
436 .name = "allow",
437 .write_string = devcgroup_access_write,
438 .private = DEVCG_ALLOW,
439 },
440 {
441 .name = "deny",
442 .write_string = devcgroup_access_write,
443 .private = DEVCG_DENY,
444 },
445 {
446 .name = "list",
447 .read_seq_string = devcgroup_seq_read,
448 .private = DEVCG_LIST,
449 },
450 { } /* terminate */
451};
452
453struct cgroup_subsys devices_subsys = {
454 .name = "devices",
455 .can_attach = devcgroup_can_attach,
456 .create = devcgroup_create,
457 .destroy = devcgroup_destroy,
458 .subsys_id = devices_subsys_id,
459 .base_cftypes = dev_cgroup_files,
460};
461
462int __devcgroup_inode_permission(struct inode *inode, int mask)
463{
464 struct dev_cgroup *dev_cgroup;
465 struct dev_whitelist_item *wh;
466
467 rcu_read_lock();
468
469 dev_cgroup = task_devcgroup(current);
470
471 list_for_each_entry_rcu(wh, &dev_cgroup->whitelist, list) {
472 if (wh->type & DEV_ALL)
473 goto found;
474 if ((wh->type & DEV_BLOCK) && !S_ISBLK(inode->i_mode))
475 continue;
476 if ((wh->type & DEV_CHAR) && !S_ISCHR(inode->i_mode))
477 continue;
478 if (wh->major != ~0 && wh->major != imajor(inode))
479 continue;
480 if (wh->minor != ~0 && wh->minor != iminor(inode))
481 continue;
482
483 if ((mask & MAY_WRITE) && !(wh->access & ACC_WRITE))
484 continue;
485 if ((mask & MAY_READ) && !(wh->access & ACC_READ))
486 continue;
487found:
488 rcu_read_unlock();
489 return 0;
490 }
491
492 rcu_read_unlock();
493
494 return -EPERM;
495}
496
497int devcgroup_inode_mknod(int mode, dev_t dev)
498{
499 struct dev_cgroup *dev_cgroup;
500 struct dev_whitelist_item *wh;
501
502 if (!S_ISBLK(mode) && !S_ISCHR(mode))
503 return 0;
504
505 rcu_read_lock();
506
507 dev_cgroup = task_devcgroup(current);
508
509 list_for_each_entry_rcu(wh, &dev_cgroup->whitelist, list) {
510 if (wh->type & DEV_ALL)
511 goto found;
512 if ((wh->type & DEV_BLOCK) && !S_ISBLK(mode))
513 continue;
514 if ((wh->type & DEV_CHAR) && !S_ISCHR(mode))
515 continue;
516 if (wh->major != ~0 && wh->major != MAJOR(dev))
517 continue;
518 if (wh->minor != ~0 && wh->minor != MINOR(dev))
519 continue;
520
521 if (!(wh->access & ACC_MKNOD))
522 continue;
523found:
524 rcu_read_unlock();
525 return 0;
526 }
527
528 rcu_read_unlock();
529
530 return -EPERM;
531}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * device_cgroup.c - device cgroup subsystem
4 *
5 * Copyright 2007 IBM Corp
6 */
7
8#include <linux/device_cgroup.h>
9#include <linux/cgroup.h>
10#include <linux/ctype.h>
11#include <linux/list.h>
12#include <linux/uaccess.h>
13#include <linux/seq_file.h>
14#include <linux/slab.h>
15#include <linux/rcupdate.h>
16#include <linux/mutex.h>
17
18#ifdef CONFIG_CGROUP_DEVICE
19
20static DEFINE_MUTEX(devcgroup_mutex);
21
22enum devcg_behavior {
23 DEVCG_DEFAULT_NONE,
24 DEVCG_DEFAULT_ALLOW,
25 DEVCG_DEFAULT_DENY,
26};
27
28/*
29 * exception list locking rules:
30 * hold devcgroup_mutex for update/read.
31 * hold rcu_read_lock() for read.
32 */
33
34struct dev_exception_item {
35 u32 major, minor;
36 short type;
37 short access;
38 struct list_head list;
39 struct rcu_head rcu;
40};
41
42struct dev_cgroup {
43 struct cgroup_subsys_state css;
44 struct list_head exceptions;
45 enum devcg_behavior behavior;
46};
47
48static inline struct dev_cgroup *css_to_devcgroup(struct cgroup_subsys_state *s)
49{
50 return s ? container_of(s, struct dev_cgroup, css) : NULL;
51}
52
53static inline struct dev_cgroup *task_devcgroup(struct task_struct *task)
54{
55 return css_to_devcgroup(task_css(task, devices_cgrp_id));
56}
57
58/*
59 * called under devcgroup_mutex
60 */
61static int dev_exceptions_copy(struct list_head *dest, struct list_head *orig)
62{
63 struct dev_exception_item *ex, *tmp, *new;
64
65 lockdep_assert_held(&devcgroup_mutex);
66
67 list_for_each_entry(ex, orig, list) {
68 new = kmemdup(ex, sizeof(*ex), GFP_KERNEL);
69 if (!new)
70 goto free_and_exit;
71 list_add_tail(&new->list, dest);
72 }
73
74 return 0;
75
76free_and_exit:
77 list_for_each_entry_safe(ex, tmp, dest, list) {
78 list_del(&ex->list);
79 kfree(ex);
80 }
81 return -ENOMEM;
82}
83
84/*
85 * called under devcgroup_mutex
86 */
87static int dev_exception_add(struct dev_cgroup *dev_cgroup,
88 struct dev_exception_item *ex)
89{
90 struct dev_exception_item *excopy, *walk;
91
92 lockdep_assert_held(&devcgroup_mutex);
93
94 excopy = kmemdup(ex, sizeof(*ex), GFP_KERNEL);
95 if (!excopy)
96 return -ENOMEM;
97
98 list_for_each_entry(walk, &dev_cgroup->exceptions, list) {
99 if (walk->type != ex->type)
100 continue;
101 if (walk->major != ex->major)
102 continue;
103 if (walk->minor != ex->minor)
104 continue;
105
106 walk->access |= ex->access;
107 kfree(excopy);
108 excopy = NULL;
109 }
110
111 if (excopy != NULL)
112 list_add_tail_rcu(&excopy->list, &dev_cgroup->exceptions);
113 return 0;
114}
115
116/*
117 * called under devcgroup_mutex
118 */
119static void dev_exception_rm(struct dev_cgroup *dev_cgroup,
120 struct dev_exception_item *ex)
121{
122 struct dev_exception_item *walk, *tmp;
123
124 lockdep_assert_held(&devcgroup_mutex);
125
126 list_for_each_entry_safe(walk, tmp, &dev_cgroup->exceptions, list) {
127 if (walk->type != ex->type)
128 continue;
129 if (walk->major != ex->major)
130 continue;
131 if (walk->minor != ex->minor)
132 continue;
133
134 walk->access &= ~ex->access;
135 if (!walk->access) {
136 list_del_rcu(&walk->list);
137 kfree_rcu(walk, rcu);
138 }
139 }
140}
141
142static void __dev_exception_clean(struct dev_cgroup *dev_cgroup)
143{
144 struct dev_exception_item *ex, *tmp;
145
146 list_for_each_entry_safe(ex, tmp, &dev_cgroup->exceptions, list) {
147 list_del_rcu(&ex->list);
148 kfree_rcu(ex, rcu);
149 }
150}
151
152/**
153 * dev_exception_clean - frees all entries of the exception list
154 * @dev_cgroup: dev_cgroup with the exception list to be cleaned
155 *
156 * called under devcgroup_mutex
157 */
158static void dev_exception_clean(struct dev_cgroup *dev_cgroup)
159{
160 lockdep_assert_held(&devcgroup_mutex);
161
162 __dev_exception_clean(dev_cgroup);
163}
164
165static inline bool is_devcg_online(const struct dev_cgroup *devcg)
166{
167 return (devcg->behavior != DEVCG_DEFAULT_NONE);
168}
169
170/**
171 * devcgroup_online - initializes devcgroup's behavior and exceptions based on
172 * parent's
173 * @css: css getting online
174 * returns 0 in case of success, error code otherwise
175 */
176static int devcgroup_online(struct cgroup_subsys_state *css)
177{
178 struct dev_cgroup *dev_cgroup = css_to_devcgroup(css);
179 struct dev_cgroup *parent_dev_cgroup = css_to_devcgroup(css->parent);
180 int ret = 0;
181
182 mutex_lock(&devcgroup_mutex);
183
184 if (parent_dev_cgroup == NULL)
185 dev_cgroup->behavior = DEVCG_DEFAULT_ALLOW;
186 else {
187 ret = dev_exceptions_copy(&dev_cgroup->exceptions,
188 &parent_dev_cgroup->exceptions);
189 if (!ret)
190 dev_cgroup->behavior = parent_dev_cgroup->behavior;
191 }
192 mutex_unlock(&devcgroup_mutex);
193
194 return ret;
195}
196
197static void devcgroup_offline(struct cgroup_subsys_state *css)
198{
199 struct dev_cgroup *dev_cgroup = css_to_devcgroup(css);
200
201 mutex_lock(&devcgroup_mutex);
202 dev_cgroup->behavior = DEVCG_DEFAULT_NONE;
203 mutex_unlock(&devcgroup_mutex);
204}
205
206/*
207 * called from kernel/cgroup.c with cgroup_lock() held.
208 */
209static struct cgroup_subsys_state *
210devcgroup_css_alloc(struct cgroup_subsys_state *parent_css)
211{
212 struct dev_cgroup *dev_cgroup;
213
214 dev_cgroup = kzalloc(sizeof(*dev_cgroup), GFP_KERNEL);
215 if (!dev_cgroup)
216 return ERR_PTR(-ENOMEM);
217 INIT_LIST_HEAD(&dev_cgroup->exceptions);
218 dev_cgroup->behavior = DEVCG_DEFAULT_NONE;
219
220 return &dev_cgroup->css;
221}
222
223static void devcgroup_css_free(struct cgroup_subsys_state *css)
224{
225 struct dev_cgroup *dev_cgroup = css_to_devcgroup(css);
226
227 __dev_exception_clean(dev_cgroup);
228 kfree(dev_cgroup);
229}
230
231#define DEVCG_ALLOW 1
232#define DEVCG_DENY 2
233#define DEVCG_LIST 3
234
235#define MAJMINLEN 13
236#define ACCLEN 4
237
238static void set_access(char *acc, short access)
239{
240 int idx = 0;
241 memset(acc, 0, ACCLEN);
242 if (access & DEVCG_ACC_READ)
243 acc[idx++] = 'r';
244 if (access & DEVCG_ACC_WRITE)
245 acc[idx++] = 'w';
246 if (access & DEVCG_ACC_MKNOD)
247 acc[idx++] = 'm';
248}
249
250static char type_to_char(short type)
251{
252 if (type == DEVCG_DEV_ALL)
253 return 'a';
254 if (type == DEVCG_DEV_CHAR)
255 return 'c';
256 if (type == DEVCG_DEV_BLOCK)
257 return 'b';
258 return 'X';
259}
260
261static void set_majmin(char *str, unsigned m)
262{
263 if (m == ~0)
264 strcpy(str, "*");
265 else
266 sprintf(str, "%u", m);
267}
268
269static int devcgroup_seq_show(struct seq_file *m, void *v)
270{
271 struct dev_cgroup *devcgroup = css_to_devcgroup(seq_css(m));
272 struct dev_exception_item *ex;
273 char maj[MAJMINLEN], min[MAJMINLEN], acc[ACCLEN];
274
275 rcu_read_lock();
276 /*
277 * To preserve the compatibility:
278 * - Only show the "all devices" when the default policy is to allow
279 * - List the exceptions in case the default policy is to deny
280 * This way, the file remains as a "whitelist of devices"
281 */
282 if (devcgroup->behavior == DEVCG_DEFAULT_ALLOW) {
283 set_access(acc, DEVCG_ACC_MASK);
284 set_majmin(maj, ~0);
285 set_majmin(min, ~0);
286 seq_printf(m, "%c %s:%s %s\n", type_to_char(DEVCG_DEV_ALL),
287 maj, min, acc);
288 } else {
289 list_for_each_entry_rcu(ex, &devcgroup->exceptions, list) {
290 set_access(acc, ex->access);
291 set_majmin(maj, ex->major);
292 set_majmin(min, ex->minor);
293 seq_printf(m, "%c %s:%s %s\n", type_to_char(ex->type),
294 maj, min, acc);
295 }
296 }
297 rcu_read_unlock();
298
299 return 0;
300}
301
302/**
303 * match_exception - iterates the exception list trying to find a complete match
304 * @exceptions: list of exceptions
305 * @type: device type (DEVCG_DEV_BLOCK or DEVCG_DEV_CHAR)
306 * @major: device file major number, ~0 to match all
307 * @minor: device file minor number, ~0 to match all
308 * @access: permission mask (DEVCG_ACC_READ, DEVCG_ACC_WRITE, DEVCG_ACC_MKNOD)
309 *
310 * It is considered a complete match if an exception is found that will
311 * contain the entire range of provided parameters.
312 *
313 * Return: true in case it matches an exception completely
314 */
315static bool match_exception(struct list_head *exceptions, short type,
316 u32 major, u32 minor, short access)
317{
318 struct dev_exception_item *ex;
319
320 list_for_each_entry_rcu(ex, exceptions, list) {
321 if ((type & DEVCG_DEV_BLOCK) && !(ex->type & DEVCG_DEV_BLOCK))
322 continue;
323 if ((type & DEVCG_DEV_CHAR) && !(ex->type & DEVCG_DEV_CHAR))
324 continue;
325 if (ex->major != ~0 && ex->major != major)
326 continue;
327 if (ex->minor != ~0 && ex->minor != minor)
328 continue;
329 /* provided access cannot have more than the exception rule */
330 if (access & (~ex->access))
331 continue;
332 return true;
333 }
334 return false;
335}
336
337/**
338 * match_exception_partial - iterates the exception list trying to find a partial match
339 * @exceptions: list of exceptions
340 * @type: device type (DEVCG_DEV_BLOCK or DEVCG_DEV_CHAR)
341 * @major: device file major number, ~0 to match all
342 * @minor: device file minor number, ~0 to match all
343 * @access: permission mask (DEVCG_ACC_READ, DEVCG_ACC_WRITE, DEVCG_ACC_MKNOD)
344 *
345 * It is considered a partial match if an exception's range is found to
346 * contain *any* of the devices specified by provided parameters. This is
347 * used to make sure no extra access is being granted that is forbidden by
348 * any of the exception list.
349 *
350 * Return: true in case the provided range mat matches an exception completely
351 */
352static bool match_exception_partial(struct list_head *exceptions, short type,
353 u32 major, u32 minor, short access)
354{
355 struct dev_exception_item *ex;
356
357 list_for_each_entry_rcu(ex, exceptions, list,
358 lockdep_is_held(&devcgroup_mutex)) {
359 if ((type & DEVCG_DEV_BLOCK) && !(ex->type & DEVCG_DEV_BLOCK))
360 continue;
361 if ((type & DEVCG_DEV_CHAR) && !(ex->type & DEVCG_DEV_CHAR))
362 continue;
363 /*
364 * We must be sure that both the exception and the provided
365 * range aren't masking all devices
366 */
367 if (ex->major != ~0 && major != ~0 && ex->major != major)
368 continue;
369 if (ex->minor != ~0 && minor != ~0 && ex->minor != minor)
370 continue;
371 /*
372 * In order to make sure the provided range isn't matching
373 * an exception, all its access bits shouldn't match the
374 * exception's access bits
375 */
376 if (!(access & ex->access))
377 continue;
378 return true;
379 }
380 return false;
381}
382
383/**
384 * verify_new_ex - verifies if a new exception is allowed by parent cgroup's permissions
385 * @dev_cgroup: dev cgroup to be tested against
386 * @refex: new exception
387 * @behavior: behavior of the exception's dev_cgroup
388 *
389 * This is used to make sure a child cgroup won't have more privileges
390 * than its parent
391 */
392static bool verify_new_ex(struct dev_cgroup *dev_cgroup,
393 struct dev_exception_item *refex,
394 enum devcg_behavior behavior)
395{
396 bool match = false;
397
398 RCU_LOCKDEP_WARN(!rcu_read_lock_held() &&
399 !lockdep_is_held(&devcgroup_mutex),
400 "device_cgroup:verify_new_ex called without proper synchronization");
401
402 if (dev_cgroup->behavior == DEVCG_DEFAULT_ALLOW) {
403 if (behavior == DEVCG_DEFAULT_ALLOW) {
404 /*
405 * new exception in the child doesn't matter, only
406 * adding extra restrictions
407 */
408 return true;
409 } else {
410 /*
411 * new exception in the child will add more devices
412 * that can be acessed, so it can't match any of
413 * parent's exceptions, even slightly
414 */
415 match = match_exception_partial(&dev_cgroup->exceptions,
416 refex->type,
417 refex->major,
418 refex->minor,
419 refex->access);
420
421 if (match)
422 return false;
423 return true;
424 }
425 } else {
426 /*
427 * Only behavior == DEVCG_DEFAULT_DENY allowed here, therefore
428 * the new exception will add access to more devices and must
429 * be contained completely in an parent's exception to be
430 * allowed
431 */
432 match = match_exception(&dev_cgroup->exceptions, refex->type,
433 refex->major, refex->minor,
434 refex->access);
435
436 if (match)
437 /* parent has an exception that matches the proposed */
438 return true;
439 else
440 return false;
441 }
442 return false;
443}
444
445/*
446 * parent_has_perm:
447 * when adding a new allow rule to a device exception list, the rule
448 * must be allowed in the parent device
449 */
450static int parent_has_perm(struct dev_cgroup *childcg,
451 struct dev_exception_item *ex)
452{
453 struct dev_cgroup *parent = css_to_devcgroup(childcg->css.parent);
454
455 if (!parent)
456 return 1;
457 return verify_new_ex(parent, ex, childcg->behavior);
458}
459
460/**
461 * parent_allows_removal - verify if it's ok to remove an exception
462 * @childcg: child cgroup from where the exception will be removed
463 * @ex: exception being removed
464 *
465 * When removing an exception in cgroups with default ALLOW policy, it must
466 * be checked if removing it will give the child cgroup more access than the
467 * parent.
468 *
469 * Return: true if it's ok to remove exception, false otherwise
470 */
471static bool parent_allows_removal(struct dev_cgroup *childcg,
472 struct dev_exception_item *ex)
473{
474 struct dev_cgroup *parent = css_to_devcgroup(childcg->css.parent);
475
476 if (!parent)
477 return true;
478
479 /* It's always allowed to remove access to devices */
480 if (childcg->behavior == DEVCG_DEFAULT_DENY)
481 return true;
482
483 /*
484 * Make sure you're not removing part or a whole exception existing in
485 * the parent cgroup
486 */
487 return !match_exception_partial(&parent->exceptions, ex->type,
488 ex->major, ex->minor, ex->access);
489}
490
491/**
492 * may_allow_all - checks if it's possible to change the behavior to
493 * allow based on parent's rules.
494 * @parent: device cgroup's parent
495 * returns: != 0 in case it's allowed, 0 otherwise
496 */
497static inline int may_allow_all(struct dev_cgroup *parent)
498{
499 if (!parent)
500 return 1;
501 return parent->behavior == DEVCG_DEFAULT_ALLOW;
502}
503
504/**
505 * revalidate_active_exceptions - walks through the active exception list and
506 * revalidates the exceptions based on parent's
507 * behavior and exceptions. The exceptions that
508 * are no longer valid will be removed.
509 * Called with devcgroup_mutex held.
510 * @devcg: cgroup which exceptions will be checked
511 *
512 * This is one of the three key functions for hierarchy implementation.
513 * This function is responsible for re-evaluating all the cgroup's active
514 * exceptions due to a parent's exception change.
515 * Refer to Documentation/admin-guide/cgroup-v1/devices.rst for more details.
516 */
517static void revalidate_active_exceptions(struct dev_cgroup *devcg)
518{
519 struct dev_exception_item *ex;
520 struct list_head *this, *tmp;
521
522 list_for_each_safe(this, tmp, &devcg->exceptions) {
523 ex = container_of(this, struct dev_exception_item, list);
524 if (!parent_has_perm(devcg, ex))
525 dev_exception_rm(devcg, ex);
526 }
527}
528
529/**
530 * propagate_exception - propagates a new exception to the children
531 * @devcg_root: device cgroup that added a new exception
532 * @ex: new exception to be propagated
533 *
534 * returns: 0 in case of success, != 0 in case of error
535 */
536static int propagate_exception(struct dev_cgroup *devcg_root,
537 struct dev_exception_item *ex)
538{
539 struct cgroup_subsys_state *pos;
540 int rc = 0;
541
542 rcu_read_lock();
543
544 css_for_each_descendant_pre(pos, &devcg_root->css) {
545 struct dev_cgroup *devcg = css_to_devcgroup(pos);
546
547 /*
548 * Because devcgroup_mutex is held, no devcg will become
549 * online or offline during the tree walk (see on/offline
550 * methods), and online ones are safe to access outside RCU
551 * read lock without bumping refcnt.
552 */
553 if (pos == &devcg_root->css || !is_devcg_online(devcg))
554 continue;
555
556 rcu_read_unlock();
557
558 /*
559 * in case both root's behavior and devcg is allow, a new
560 * restriction means adding to the exception list
561 */
562 if (devcg_root->behavior == DEVCG_DEFAULT_ALLOW &&
563 devcg->behavior == DEVCG_DEFAULT_ALLOW) {
564 rc = dev_exception_add(devcg, ex);
565 if (rc)
566 return rc;
567 } else {
568 /*
569 * in the other possible cases:
570 * root's behavior: allow, devcg's: deny
571 * root's behavior: deny, devcg's: deny
572 * the exception will be removed
573 */
574 dev_exception_rm(devcg, ex);
575 }
576 revalidate_active_exceptions(devcg);
577
578 rcu_read_lock();
579 }
580
581 rcu_read_unlock();
582 return rc;
583}
584
585/*
586 * Modify the exception list using allow/deny rules.
587 * CAP_SYS_ADMIN is needed for this. It's at least separate from CAP_MKNOD
588 * so we can give a container CAP_MKNOD to let it create devices but not
589 * modify the exception list.
590 * It seems likely we'll want to add a CAP_CONTAINER capability to allow
591 * us to also grant CAP_SYS_ADMIN to containers without giving away the
592 * device exception list controls, but for now we'll stick with CAP_SYS_ADMIN
593 *
594 * Taking rules away is always allowed (given CAP_SYS_ADMIN). Granting
595 * new access is only allowed if you're in the top-level cgroup, or your
596 * parent cgroup has the access you're asking for.
597 */
598static int devcgroup_update_access(struct dev_cgroup *devcgroup,
599 int filetype, char *buffer)
600{
601 const char *b;
602 char temp[12]; /* 11 + 1 characters needed for a u32 */
603 int count, rc = 0;
604 struct dev_exception_item ex;
605 struct dev_cgroup *parent = css_to_devcgroup(devcgroup->css.parent);
606
607 if (!capable(CAP_SYS_ADMIN))
608 return -EPERM;
609
610 memset(&ex, 0, sizeof(ex));
611 b = buffer;
612
613 switch (*b) {
614 case 'a':
615 switch (filetype) {
616 case DEVCG_ALLOW:
617 if (css_has_online_children(&devcgroup->css))
618 return -EINVAL;
619
620 if (!may_allow_all(parent))
621 return -EPERM;
622 dev_exception_clean(devcgroup);
623 devcgroup->behavior = DEVCG_DEFAULT_ALLOW;
624 if (!parent)
625 break;
626
627 rc = dev_exceptions_copy(&devcgroup->exceptions,
628 &parent->exceptions);
629 if (rc)
630 return rc;
631 break;
632 case DEVCG_DENY:
633 if (css_has_online_children(&devcgroup->css))
634 return -EINVAL;
635
636 dev_exception_clean(devcgroup);
637 devcgroup->behavior = DEVCG_DEFAULT_DENY;
638 break;
639 default:
640 return -EINVAL;
641 }
642 return 0;
643 case 'b':
644 ex.type = DEVCG_DEV_BLOCK;
645 break;
646 case 'c':
647 ex.type = DEVCG_DEV_CHAR;
648 break;
649 default:
650 return -EINVAL;
651 }
652 b++;
653 if (!isspace(*b))
654 return -EINVAL;
655 b++;
656 if (*b == '*') {
657 ex.major = ~0;
658 b++;
659 } else if (isdigit(*b)) {
660 memset(temp, 0, sizeof(temp));
661 for (count = 0; count < sizeof(temp) - 1; count++) {
662 temp[count] = *b;
663 b++;
664 if (!isdigit(*b))
665 break;
666 }
667 rc = kstrtou32(temp, 10, &ex.major);
668 if (rc)
669 return -EINVAL;
670 } else {
671 return -EINVAL;
672 }
673 if (*b != ':')
674 return -EINVAL;
675 b++;
676
677 /* read minor */
678 if (*b == '*') {
679 ex.minor = ~0;
680 b++;
681 } else if (isdigit(*b)) {
682 memset(temp, 0, sizeof(temp));
683 for (count = 0; count < sizeof(temp) - 1; count++) {
684 temp[count] = *b;
685 b++;
686 if (!isdigit(*b))
687 break;
688 }
689 rc = kstrtou32(temp, 10, &ex.minor);
690 if (rc)
691 return -EINVAL;
692 } else {
693 return -EINVAL;
694 }
695 if (!isspace(*b))
696 return -EINVAL;
697 for (b++, count = 0; count < 3; count++, b++) {
698 switch (*b) {
699 case 'r':
700 ex.access |= DEVCG_ACC_READ;
701 break;
702 case 'w':
703 ex.access |= DEVCG_ACC_WRITE;
704 break;
705 case 'm':
706 ex.access |= DEVCG_ACC_MKNOD;
707 break;
708 case '\n':
709 case '\0':
710 count = 3;
711 break;
712 default:
713 return -EINVAL;
714 }
715 }
716
717 switch (filetype) {
718 case DEVCG_ALLOW:
719 /*
720 * If the default policy is to allow by default, try to remove
721 * an matching exception instead. And be silent about it: we
722 * don't want to break compatibility
723 */
724 if (devcgroup->behavior == DEVCG_DEFAULT_ALLOW) {
725 /* Check if the parent allows removing it first */
726 if (!parent_allows_removal(devcgroup, &ex))
727 return -EPERM;
728 dev_exception_rm(devcgroup, &ex);
729 break;
730 }
731
732 if (!parent_has_perm(devcgroup, &ex))
733 return -EPERM;
734 rc = dev_exception_add(devcgroup, &ex);
735 break;
736 case DEVCG_DENY:
737 /*
738 * If the default policy is to deny by default, try to remove
739 * an matching exception instead. And be silent about it: we
740 * don't want to break compatibility
741 */
742 if (devcgroup->behavior == DEVCG_DEFAULT_DENY)
743 dev_exception_rm(devcgroup, &ex);
744 else
745 rc = dev_exception_add(devcgroup, &ex);
746
747 if (rc)
748 break;
749 /* we only propagate new restrictions */
750 rc = propagate_exception(devcgroup, &ex);
751 break;
752 default:
753 rc = -EINVAL;
754 }
755 return rc;
756}
757
758static ssize_t devcgroup_access_write(struct kernfs_open_file *of,
759 char *buf, size_t nbytes, loff_t off)
760{
761 int retval;
762
763 mutex_lock(&devcgroup_mutex);
764 retval = devcgroup_update_access(css_to_devcgroup(of_css(of)),
765 of_cft(of)->private, strstrip(buf));
766 mutex_unlock(&devcgroup_mutex);
767 return retval ?: nbytes;
768}
769
770static struct cftype dev_cgroup_files[] = {
771 {
772 .name = "allow",
773 .write = devcgroup_access_write,
774 .private = DEVCG_ALLOW,
775 },
776 {
777 .name = "deny",
778 .write = devcgroup_access_write,
779 .private = DEVCG_DENY,
780 },
781 {
782 .name = "list",
783 .seq_show = devcgroup_seq_show,
784 .private = DEVCG_LIST,
785 },
786 { } /* terminate */
787};
788
789struct cgroup_subsys devices_cgrp_subsys = {
790 .css_alloc = devcgroup_css_alloc,
791 .css_free = devcgroup_css_free,
792 .css_online = devcgroup_online,
793 .css_offline = devcgroup_offline,
794 .legacy_cftypes = dev_cgroup_files,
795};
796
797/**
798 * devcgroup_legacy_check_permission - checks if an inode operation is permitted
799 * @dev_cgroup: the dev cgroup to be tested against
800 * @type: device type
801 * @major: device major number
802 * @minor: device minor number
803 * @access: combination of DEVCG_ACC_WRITE, DEVCG_ACC_READ and DEVCG_ACC_MKNOD
804 *
805 * returns 0 on success, -EPERM case the operation is not permitted
806 */
807static int devcgroup_legacy_check_permission(short type, u32 major, u32 minor,
808 short access)
809{
810 struct dev_cgroup *dev_cgroup;
811 bool rc;
812
813 rcu_read_lock();
814 dev_cgroup = task_devcgroup(current);
815 if (dev_cgroup->behavior == DEVCG_DEFAULT_ALLOW)
816 /* Can't match any of the exceptions, even partially */
817 rc = !match_exception_partial(&dev_cgroup->exceptions,
818 type, major, minor, access);
819 else
820 /* Need to match completely one exception to be allowed */
821 rc = match_exception(&dev_cgroup->exceptions, type, major,
822 minor, access);
823 rcu_read_unlock();
824
825 if (!rc)
826 return -EPERM;
827
828 return 0;
829}
830
831#endif /* CONFIG_CGROUP_DEVICE */
832
833#if defined(CONFIG_CGROUP_DEVICE) || defined(CONFIG_CGROUP_BPF)
834
835int devcgroup_check_permission(short type, u32 major, u32 minor, short access)
836{
837 int rc = BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type, major, minor, access);
838
839 if (rc)
840 return -EPERM;
841
842 #ifdef CONFIG_CGROUP_DEVICE
843 return devcgroup_legacy_check_permission(type, major, minor, access);
844
845 #else /* CONFIG_CGROUP_DEVICE */
846 return 0;
847
848 #endif /* CONFIG_CGROUP_DEVICE */
849}
850EXPORT_SYMBOL(devcgroup_check_permission);
851#endif /* defined(CONFIG_CGROUP_DEVICE) || defined(CONFIG_CGROUP_BPF) */