Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * device_cgroup.c - device cgroup subsystem
  4 *
  5 * Copyright 2007 IBM Corp
  6 */
  7
  8#include <linux/device_cgroup.h>
  9#include <linux/cgroup.h>
 10#include <linux/ctype.h>
 11#include <linux/list.h>
 12#include <linux/uaccess.h>
 13#include <linux/seq_file.h>
 14#include <linux/slab.h>
 15#include <linux/rcupdate.h>
 16#include <linux/mutex.h>
 17
 18static DEFINE_MUTEX(devcgroup_mutex);
 
 
 
 
 
 
 
 19
 20enum devcg_behavior {
 21	DEVCG_DEFAULT_NONE,
 22	DEVCG_DEFAULT_ALLOW,
 23	DEVCG_DEFAULT_DENY,
 24};
 25
 26/*
 27 * exception list locking rules:
 28 * hold devcgroup_mutex for update/read.
 29 * hold rcu_read_lock() for read.
 30 */
 31
 32struct dev_exception_item {
 33	u32 major, minor;
 34	short type;
 35	short access;
 36	struct list_head list;
 37	struct rcu_head rcu;
 38};
 39
 40struct dev_cgroup {
 41	struct cgroup_subsys_state css;
 42	struct list_head exceptions;
 43	enum devcg_behavior behavior;
 44};
 45
 46static inline struct dev_cgroup *css_to_devcgroup(struct cgroup_subsys_state *s)
 47{
 48	return s ? container_of(s, struct dev_cgroup, css) : NULL;
 
 
 
 
 
 49}
 50
 51static inline struct dev_cgroup *task_devcgroup(struct task_struct *task)
 52{
 53	return css_to_devcgroup(task_css(task, devices_cgrp_id));
 
 
 
 
 
 
 
 
 
 
 
 
 54}
 55
 56/*
 57 * called under devcgroup_mutex
 58 */
 59static int dev_exceptions_copy(struct list_head *dest, struct list_head *orig)
 60{
 61	struct dev_exception_item *ex, *tmp, *new;
 62
 63	lockdep_assert_held(&devcgroup_mutex);
 64
 65	list_for_each_entry(ex, orig, list) {
 66		new = kmemdup(ex, sizeof(*ex), GFP_KERNEL);
 67		if (!new)
 68			goto free_and_exit;
 69		list_add_tail(&new->list, dest);
 70	}
 71
 72	return 0;
 73
 74free_and_exit:
 75	list_for_each_entry_safe(ex, tmp, dest, list) {
 76		list_del(&ex->list);
 77		kfree(ex);
 78	}
 79	return -ENOMEM;
 80}
 81
 
 82/*
 83 * called under devcgroup_mutex
 84 */
 85static int dev_exception_add(struct dev_cgroup *dev_cgroup,
 86			     struct dev_exception_item *ex)
 87{
 88	struct dev_exception_item *excopy, *walk;
 89
 90	lockdep_assert_held(&devcgroup_mutex);
 91
 92	excopy = kmemdup(ex, sizeof(*ex), GFP_KERNEL);
 93	if (!excopy)
 94		return -ENOMEM;
 95
 96	list_for_each_entry(walk, &dev_cgroup->exceptions, list) {
 97		if (walk->type != ex->type)
 98			continue;
 99		if (walk->major != ex->major)
100			continue;
101		if (walk->minor != ex->minor)
102			continue;
103
104		walk->access |= ex->access;
105		kfree(excopy);
106		excopy = NULL;
107	}
108
109	if (excopy != NULL)
110		list_add_tail_rcu(&excopy->list, &dev_cgroup->exceptions);
111	return 0;
112}
113
114/*
115 * called under devcgroup_mutex
116 */
117static void dev_exception_rm(struct dev_cgroup *dev_cgroup,
118			     struct dev_exception_item *ex)
119{
120	struct dev_exception_item *walk, *tmp;
121
122	lockdep_assert_held(&devcgroup_mutex);
123
124	list_for_each_entry_safe(walk, tmp, &dev_cgroup->exceptions, list) {
125		if (walk->type != ex->type)
126			continue;
127		if (walk->major != ex->major)
128			continue;
129		if (walk->minor != ex->minor)
130			continue;
131
132		walk->access &= ~ex->access;
 
133		if (!walk->access) {
134			list_del_rcu(&walk->list);
135			kfree_rcu(walk, rcu);
136		}
137	}
138}
139
140static void __dev_exception_clean(struct dev_cgroup *dev_cgroup)
141{
142	struct dev_exception_item *ex, *tmp;
143
144	list_for_each_entry_safe(ex, tmp, &dev_cgroup->exceptions, list) {
145		list_del_rcu(&ex->list);
146		kfree_rcu(ex, rcu);
147	}
148}
149
150/**
151 * dev_exception_clean - frees all entries of the exception list
152 * @dev_cgroup: dev_cgroup with the exception list to be cleaned
153 *
154 * called under devcgroup_mutex
155 */
156static void dev_exception_clean(struct dev_cgroup *dev_cgroup)
157{
158	lockdep_assert_held(&devcgroup_mutex);
159
160	__dev_exception_clean(dev_cgroup);
161}
162
163static inline bool is_devcg_online(const struct dev_cgroup *devcg)
164{
165	return (devcg->behavior != DEVCG_DEFAULT_NONE);
166}
167
168/**
169 * devcgroup_online - initializes devcgroup's behavior and exceptions based on
170 * 		      parent's
171 * @css: css getting online
172 * returns 0 in case of success, error code otherwise
173 */
174static int devcgroup_online(struct cgroup_subsys_state *css)
175{
176	struct dev_cgroup *dev_cgroup = css_to_devcgroup(css);
177	struct dev_cgroup *parent_dev_cgroup = css_to_devcgroup(css->parent);
178	int ret = 0;
179
180	mutex_lock(&devcgroup_mutex);
181
182	if (parent_dev_cgroup == NULL)
183		dev_cgroup->behavior = DEVCG_DEFAULT_ALLOW;
184	else {
185		ret = dev_exceptions_copy(&dev_cgroup->exceptions,
186					  &parent_dev_cgroup->exceptions);
187		if (!ret)
188			dev_cgroup->behavior = parent_dev_cgroup->behavior;
189	}
190	mutex_unlock(&devcgroup_mutex);
191
192	return ret;
193}
194
195static void devcgroup_offline(struct cgroup_subsys_state *css)
196{
197	struct dev_cgroup *dev_cgroup = css_to_devcgroup(css);
198
199	mutex_lock(&devcgroup_mutex);
200	dev_cgroup->behavior = DEVCG_DEFAULT_NONE;
201	mutex_unlock(&devcgroup_mutex);
202}
203
204/*
205 * called from kernel/cgroup.c with cgroup_lock() held.
206 */
207static struct cgroup_subsys_state *
208devcgroup_css_alloc(struct cgroup_subsys_state *parent_css)
209{
210	struct dev_cgroup *dev_cgroup;
 
 
211
212	dev_cgroup = kzalloc(sizeof(*dev_cgroup), GFP_KERNEL);
213	if (!dev_cgroup)
214		return ERR_PTR(-ENOMEM);
215	INIT_LIST_HEAD(&dev_cgroup->exceptions);
216	dev_cgroup->behavior = DEVCG_DEFAULT_NONE;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
217
218	return &dev_cgroup->css;
219}
220
221static void devcgroup_css_free(struct cgroup_subsys_state *css)
222{
223	struct dev_cgroup *dev_cgroup = css_to_devcgroup(css);
 
224
225	__dev_exception_clean(dev_cgroup);
 
 
 
 
226	kfree(dev_cgroup);
227}
228
229#define DEVCG_ALLOW 1
230#define DEVCG_DENY 2
231#define DEVCG_LIST 3
232
233#define MAJMINLEN 13
234#define ACCLEN 4
235
236static void set_access(char *acc, short access)
237{
238	int idx = 0;
239	memset(acc, 0, ACCLEN);
240	if (access & DEVCG_ACC_READ)
241		acc[idx++] = 'r';
242	if (access & DEVCG_ACC_WRITE)
243		acc[idx++] = 'w';
244	if (access & DEVCG_ACC_MKNOD)
245		acc[idx++] = 'm';
246}
247
248static char type_to_char(short type)
249{
250	if (type == DEVCG_DEV_ALL)
251		return 'a';
252	if (type == DEVCG_DEV_CHAR)
253		return 'c';
254	if (type == DEVCG_DEV_BLOCK)
255		return 'b';
256	return 'X';
257}
258
259static void set_majmin(char *str, unsigned m)
260{
261	if (m == ~0)
262		strcpy(str, "*");
263	else
264		sprintf(str, "%u", m);
265}
266
267static int devcgroup_seq_show(struct seq_file *m, void *v)
 
268{
269	struct dev_cgroup *devcgroup = css_to_devcgroup(seq_css(m));
270	struct dev_exception_item *ex;
271	char maj[MAJMINLEN], min[MAJMINLEN], acc[ACCLEN];
272
273	rcu_read_lock();
274	/*
275	 * To preserve the compatibility:
276	 * - Only show the "all devices" when the default policy is to allow
277	 * - List the exceptions in case the default policy is to deny
278	 * This way, the file remains as a "whitelist of devices"
279	 */
280	if (devcgroup->behavior == DEVCG_DEFAULT_ALLOW) {
281		set_access(acc, DEVCG_ACC_MASK);
282		set_majmin(maj, ~0);
283		set_majmin(min, ~0);
284		seq_printf(m, "%c %s:%s %s\n", type_to_char(DEVCG_DEV_ALL),
285			   maj, min, acc);
286	} else {
287		list_for_each_entry_rcu(ex, &devcgroup->exceptions, list) {
288			set_access(acc, ex->access);
289			set_majmin(maj, ex->major);
290			set_majmin(min, ex->minor);
291			seq_printf(m, "%c %s:%s %s\n", type_to_char(ex->type),
292				   maj, min, acc);
293		}
294	}
295	rcu_read_unlock();
296
297	return 0;
298}
299
300/**
301 * match_exception	- iterates the exception list trying to find a complete match
302 * @exceptions: list of exceptions
303 * @type: device type (DEVCG_DEV_BLOCK or DEVCG_DEV_CHAR)
304 * @major: device file major number, ~0 to match all
305 * @minor: device file minor number, ~0 to match all
306 * @access: permission mask (DEVCG_ACC_READ, DEVCG_ACC_WRITE, DEVCG_ACC_MKNOD)
307 *
308 * It is considered a complete match if an exception is found that will
309 * contain the entire range of provided parameters.
310 *
311 * Return: true in case it matches an exception completely
312 */
313static bool match_exception(struct list_head *exceptions, short type,
314			    u32 major, u32 minor, short access)
315{
316	struct dev_exception_item *ex;
317
318	list_for_each_entry_rcu(ex, exceptions, list) {
319		if ((type & DEVCG_DEV_BLOCK) && !(ex->type & DEVCG_DEV_BLOCK))
320			continue;
321		if ((type & DEVCG_DEV_CHAR) && !(ex->type & DEVCG_DEV_CHAR))
322			continue;
323		if (ex->major != ~0 && ex->major != major)
324			continue;
325		if (ex->minor != ~0 && ex->minor != minor)
326			continue;
327		/* provided access cannot have more than the exception rule */
328		if (access & (~ex->access))
329			continue;
330		return true;
331	}
332	return false;
333}
334
335/**
336 * match_exception_partial - iterates the exception list trying to find a partial match
337 * @exceptions: list of exceptions
338 * @type: device type (DEVCG_DEV_BLOCK or DEVCG_DEV_CHAR)
339 * @major: device file major number, ~0 to match all
340 * @minor: device file minor number, ~0 to match all
341 * @access: permission mask (DEVCG_ACC_READ, DEVCG_ACC_WRITE, DEVCG_ACC_MKNOD)
342 *
343 * It is considered a partial match if an exception's range is found to
344 * contain *any* of the devices specified by provided parameters. This is
345 * used to make sure no extra access is being granted that is forbidden by
346 * any of the exception list.
347 *
348 * Return: true in case the provided range mat matches an exception completely
349 */
350static bool match_exception_partial(struct list_head *exceptions, short type,
351				    u32 major, u32 minor, short access)
352{
353	struct dev_exception_item *ex;
354
355	list_for_each_entry_rcu(ex, exceptions, list) {
356		if ((type & DEVCG_DEV_BLOCK) && !(ex->type & DEVCG_DEV_BLOCK))
 
 
357			continue;
358		if ((type & DEVCG_DEV_CHAR) && !(ex->type & DEVCG_DEV_CHAR))
359			continue;
360		/*
361		 * We must be sure that both the exception and the provided
362		 * range aren't masking all devices
363		 */
364		if (ex->major != ~0 && major != ~0 && ex->major != major)
365			continue;
366		if (ex->minor != ~0 && minor != ~0 && ex->minor != minor)
367			continue;
368		/*
369		 * In order to make sure the provided range isn't matching
370		 * an exception, all its access bits shouldn't match the
371		 * exception's access bits
372		 */
373		if (!(access & ex->access))
374			continue;
375		return true;
376	}
377	return false;
378}
379
380/**
381 * verify_new_ex - verifies if a new exception is allowed by parent cgroup's permissions
382 * @dev_cgroup: dev cgroup to be tested against
383 * @refex: new exception
384 * @behavior: behavior of the exception's dev_cgroup
385 *
386 * This is used to make sure a child cgroup won't have more privileges
387 * than its parent
388 */
389static bool verify_new_ex(struct dev_cgroup *dev_cgroup,
390		          struct dev_exception_item *refex,
391		          enum devcg_behavior behavior)
392{
393	bool match = false;
394
395	RCU_LOCKDEP_WARN(!rcu_read_lock_held() &&
396			 !lockdep_is_held(&devcgroup_mutex),
397			 "device_cgroup:verify_new_ex called without proper synchronization");
398
399	if (dev_cgroup->behavior == DEVCG_DEFAULT_ALLOW) {
400		if (behavior == DEVCG_DEFAULT_ALLOW) {
401			/*
402			 * new exception in the child doesn't matter, only
403			 * adding extra restrictions
404			 */ 
405			return true;
406		} else {
407			/*
408			 * new exception in the child will add more devices
409			 * that can be acessed, so it can't match any of
410			 * parent's exceptions, even slightly
411			 */ 
412			match = match_exception_partial(&dev_cgroup->exceptions,
413							refex->type,
414							refex->major,
415							refex->minor,
416							refex->access);
417
418			if (match)
419				return false;
420			return true;
421		}
422	} else {
423		/*
424		 * Only behavior == DEVCG_DEFAULT_DENY allowed here, therefore
425		 * the new exception will add access to more devices and must
426		 * be contained completely in an parent's exception to be
427		 * allowed
428		 */
429		match = match_exception(&dev_cgroup->exceptions, refex->type,
430					refex->major, refex->minor,
431					refex->access);
432
433		if (match)
434			/* parent has an exception that matches the proposed */
435			return true;
436		else
437			return false;
438	}
439	return false;
440}
441
442/*
443 * parent_has_perm:
444 * when adding a new allow rule to a device exception list, the rule
445 * must be allowed in the parent device
446 */
447static int parent_has_perm(struct dev_cgroup *childcg,
448				  struct dev_exception_item *ex)
449{
450	struct dev_cgroup *parent = css_to_devcgroup(childcg->css.parent);
 
451
452	if (!parent)
453		return 1;
454	return verify_new_ex(parent, ex, childcg->behavior);
455}
456
457/**
458 * parent_allows_removal - verify if it's ok to remove an exception
459 * @childcg: child cgroup from where the exception will be removed
460 * @ex: exception being removed
461 *
462 * When removing an exception in cgroups with default ALLOW policy, it must
463 * be checked if removing it will give the child cgroup more access than the
464 * parent.
465 *
466 * Return: true if it's ok to remove exception, false otherwise
467 */
468static bool parent_allows_removal(struct dev_cgroup *childcg,
469				  struct dev_exception_item *ex)
470{
471	struct dev_cgroup *parent = css_to_devcgroup(childcg->css.parent);
472
473	if (!parent)
474		return true;
475
476	/* It's always allowed to remove access to devices */
477	if (childcg->behavior == DEVCG_DEFAULT_DENY)
478		return true;
479
480	/*
481	 * Make sure you're not removing part or a whole exception existing in
482	 * the parent cgroup
483	 */
484	return !match_exception_partial(&parent->exceptions, ex->type,
485					ex->major, ex->minor, ex->access);
486}
487
488/**
489 * may_allow_all - checks if it's possible to change the behavior to
490 *		   allow based on parent's rules.
491 * @parent: device cgroup's parent
492 * returns: != 0 in case it's allowed, 0 otherwise
493 */
494static inline int may_allow_all(struct dev_cgroup *parent)
495{
496	if (!parent)
497		return 1;
498	return parent->behavior == DEVCG_DEFAULT_ALLOW;
499}
500
501/**
502 * revalidate_active_exceptions - walks through the active exception list and
503 * 				  revalidates the exceptions based on parent's
504 * 				  behavior and exceptions. The exceptions that
505 * 				  are no longer valid will be removed.
506 * 				  Called with devcgroup_mutex held.
507 * @devcg: cgroup which exceptions will be checked
508 *
509 * This is one of the three key functions for hierarchy implementation.
510 * This function is responsible for re-evaluating all the cgroup's active
511 * exceptions due to a parent's exception change.
512 * Refer to Documentation/admin-guide/cgroup-v1/devices.rst for more details.
513 */
514static void revalidate_active_exceptions(struct dev_cgroup *devcg)
515{
516	struct dev_exception_item *ex;
517	struct list_head *this, *tmp;
518
519	list_for_each_safe(this, tmp, &devcg->exceptions) {
520		ex = container_of(this, struct dev_exception_item, list);
521		if (!parent_has_perm(devcg, ex))
522			dev_exception_rm(devcg, ex);
523	}
524}
525
526/**
527 * propagate_exception - propagates a new exception to the children
528 * @devcg_root: device cgroup that added a new exception
529 * @ex: new exception to be propagated
530 *
531 * returns: 0 in case of success, != 0 in case of error
532 */
533static int propagate_exception(struct dev_cgroup *devcg_root,
534			       struct dev_exception_item *ex)
535{
536	struct cgroup_subsys_state *pos;
537	int rc = 0;
538
539	rcu_read_lock();
540
541	css_for_each_descendant_pre(pos, &devcg_root->css) {
542		struct dev_cgroup *devcg = css_to_devcgroup(pos);
543
544		/*
545		 * Because devcgroup_mutex is held, no devcg will become
546		 * online or offline during the tree walk (see on/offline
547		 * methods), and online ones are safe to access outside RCU
548		 * read lock without bumping refcnt.
549		 */
550		if (pos == &devcg_root->css || !is_devcg_online(devcg))
551			continue;
552
553		rcu_read_unlock();
554
555		/*
556		 * in case both root's behavior and devcg is allow, a new
557		 * restriction means adding to the exception list
558		 */
559		if (devcg_root->behavior == DEVCG_DEFAULT_ALLOW &&
560		    devcg->behavior == DEVCG_DEFAULT_ALLOW) {
561			rc = dev_exception_add(devcg, ex);
562			if (rc)
563				return rc;
564		} else {
565			/*
566			 * in the other possible cases:
567			 * root's behavior: allow, devcg's: deny
568			 * root's behavior: deny, devcg's: deny
569			 * the exception will be removed
570			 */
571			dev_exception_rm(devcg, ex);
572		}
573		revalidate_active_exceptions(devcg);
574
575		rcu_read_lock();
576	}
577
578	rcu_read_unlock();
579	return rc;
580}
581
582/*
583 * Modify the exception list using allow/deny rules.
584 * CAP_SYS_ADMIN is needed for this.  It's at least separate from CAP_MKNOD
585 * so we can give a container CAP_MKNOD to let it create devices but not
586 * modify the exception list.
587 * It seems likely we'll want to add a CAP_CONTAINER capability to allow
588 * us to also grant CAP_SYS_ADMIN to containers without giving away the
589 * device exception list controls, but for now we'll stick with CAP_SYS_ADMIN
590 *
591 * Taking rules away is always allowed (given CAP_SYS_ADMIN).  Granting
592 * new access is only allowed if you're in the top-level cgroup, or your
593 * parent cgroup has the access you're asking for.
594 */
595static int devcgroup_update_access(struct dev_cgroup *devcgroup,
596				   int filetype, char *buffer)
597{
598	const char *b;
599	char temp[12];		/* 11 + 1 characters needed for a u32 */
600	int count, rc = 0;
601	struct dev_exception_item ex;
602	struct dev_cgroup *parent = css_to_devcgroup(devcgroup->css.parent);
603
604	if (!capable(CAP_SYS_ADMIN))
605		return -EPERM;
606
607	memset(&ex, 0, sizeof(ex));
608	b = buffer;
609
610	switch (*b) {
611	case 'a':
612		switch (filetype) {
613		case DEVCG_ALLOW:
614			if (css_has_online_children(&devcgroup->css))
615				return -EINVAL;
616
617			if (!may_allow_all(parent))
618				return -EPERM;
619			dev_exception_clean(devcgroup);
620			devcgroup->behavior = DEVCG_DEFAULT_ALLOW;
621			if (!parent)
622				break;
623
624			rc = dev_exceptions_copy(&devcgroup->exceptions,
625						 &parent->exceptions);
626			if (rc)
627				return rc;
628			break;
629		case DEVCG_DENY:
630			if (css_has_online_children(&devcgroup->css))
631				return -EINVAL;
632
633			dev_exception_clean(devcgroup);
634			devcgroup->behavior = DEVCG_DEFAULT_DENY;
635			break;
636		default:
637			return -EINVAL;
638		}
639		return 0;
640	case 'b':
641		ex.type = DEVCG_DEV_BLOCK;
642		break;
643	case 'c':
644		ex.type = DEVCG_DEV_CHAR;
645		break;
646	default:
647		return -EINVAL;
648	}
649	b++;
650	if (!isspace(*b))
651		return -EINVAL;
652	b++;
653	if (*b == '*') {
654		ex.major = ~0;
655		b++;
656	} else if (isdigit(*b)) {
657		memset(temp, 0, sizeof(temp));
658		for (count = 0; count < sizeof(temp) - 1; count++) {
659			temp[count] = *b;
660			b++;
661			if (!isdigit(*b))
662				break;
663		}
664		rc = kstrtou32(temp, 10, &ex.major);
665		if (rc)
666			return -EINVAL;
667	} else {
668		return -EINVAL;
669	}
670	if (*b != ':')
671		return -EINVAL;
672	b++;
673
674	/* read minor */
675	if (*b == '*') {
676		ex.minor = ~0;
677		b++;
678	} else if (isdigit(*b)) {
679		memset(temp, 0, sizeof(temp));
680		for (count = 0; count < sizeof(temp) - 1; count++) {
681			temp[count] = *b;
682			b++;
683			if (!isdigit(*b))
684				break;
685		}
686		rc = kstrtou32(temp, 10, &ex.minor);
687		if (rc)
688			return -EINVAL;
689	} else {
690		return -EINVAL;
691	}
692	if (!isspace(*b))
693		return -EINVAL;
694	for (b++, count = 0; count < 3; count++, b++) {
695		switch (*b) {
696		case 'r':
697			ex.access |= DEVCG_ACC_READ;
698			break;
699		case 'w':
700			ex.access |= DEVCG_ACC_WRITE;
701			break;
702		case 'm':
703			ex.access |= DEVCG_ACC_MKNOD;
704			break;
705		case '\n':
706		case '\0':
707			count = 3;
708			break;
709		default:
710			return -EINVAL;
711		}
712	}
713
 
714	switch (filetype) {
715	case DEVCG_ALLOW:
716		/*
717		 * If the default policy is to allow by default, try to remove
718		 * an matching exception instead. And be silent about it: we
719		 * don't want to break compatibility
720		 */
721		if (devcgroup->behavior == DEVCG_DEFAULT_ALLOW) {
722			/* Check if the parent allows removing it first */
723			if (!parent_allows_removal(devcgroup, &ex))
724				return -EPERM;
725			dev_exception_rm(devcgroup, &ex);
726			break;
727		}
728
729		if (!parent_has_perm(devcgroup, &ex))
730			return -EPERM;
731		rc = dev_exception_add(devcgroup, &ex);
732		break;
733	case DEVCG_DENY:
734		/*
735		 * If the default policy is to deny by default, try to remove
736		 * an matching exception instead. And be silent about it: we
737		 * don't want to break compatibility
738		 */
739		if (devcgroup->behavior == DEVCG_DEFAULT_DENY)
740			dev_exception_rm(devcgroup, &ex);
741		else
742			rc = dev_exception_add(devcgroup, &ex);
743
744		if (rc)
745			break;
746		/* we only propagate new restrictions */
747		rc = propagate_exception(devcgroup, &ex);
748		break;
749	default:
750		rc = -EINVAL;
751	}
752	return rc;
753}
754
755static ssize_t devcgroup_access_write(struct kernfs_open_file *of,
756				      char *buf, size_t nbytes, loff_t off)
757{
758	int retval;
759
760	mutex_lock(&devcgroup_mutex);
761	retval = devcgroup_update_access(css_to_devcgroup(of_css(of)),
762					 of_cft(of)->private, strstrip(buf));
763	mutex_unlock(&devcgroup_mutex);
764	return retval ?: nbytes;
765}
766
767static struct cftype dev_cgroup_files[] = {
768	{
769		.name = "allow",
770		.write = devcgroup_access_write,
771		.private = DEVCG_ALLOW,
772	},
773	{
774		.name = "deny",
775		.write = devcgroup_access_write,
776		.private = DEVCG_DENY,
777	},
778	{
779		.name = "list",
780		.seq_show = devcgroup_seq_show,
781		.private = DEVCG_LIST,
782	},
783	{ }	/* terminate */
784};
785
786struct cgroup_subsys devices_cgrp_subsys = {
787	.css_alloc = devcgroup_css_alloc,
788	.css_free = devcgroup_css_free,
789	.css_online = devcgroup_online,
790	.css_offline = devcgroup_offline,
791	.legacy_cftypes = dev_cgroup_files,
 
792};
793
794/**
795 * __devcgroup_check_permission - checks if an inode operation is permitted
796 * @dev_cgroup: the dev cgroup to be tested against
797 * @type: device type
798 * @major: device major number
799 * @minor: device minor number
800 * @access: combination of DEVCG_ACC_WRITE, DEVCG_ACC_READ and DEVCG_ACC_MKNOD
801 *
802 * returns 0 on success, -EPERM case the operation is not permitted
803 */
804int __devcgroup_check_permission(short type, u32 major, u32 minor,
805				 short access)
806{
807	struct dev_cgroup *dev_cgroup;
808	bool rc;
809
810	rcu_read_lock();
 
811	dev_cgroup = task_devcgroup(current);
812	if (dev_cgroup->behavior == DEVCG_DEFAULT_ALLOW)
813		/* Can't match any of the exceptions, even partially */
814		rc = !match_exception_partial(&dev_cgroup->exceptions,
815					      type, major, minor, access);
816	else
817		/* Need to match completely one exception to be allowed */
818		rc = match_exception(&dev_cgroup->exceptions, type, major,
819				     minor, access);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
820	rcu_read_unlock();
821
822	if (!rc)
823		return -EPERM;
824
825	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
826}
v3.5.6
 
  1/*
  2 * device_cgroup.c - device cgroup subsystem
  3 *
  4 * Copyright 2007 IBM Corp
  5 */
  6
  7#include <linux/device_cgroup.h>
  8#include <linux/cgroup.h>
  9#include <linux/ctype.h>
 10#include <linux/list.h>
 11#include <linux/uaccess.h>
 12#include <linux/seq_file.h>
 13#include <linux/slab.h>
 14#include <linux/rcupdate.h>
 15#include <linux/mutex.h>
 16
 17#define ACC_MKNOD 1
 18#define ACC_READ  2
 19#define ACC_WRITE 4
 20#define ACC_MASK (ACC_MKNOD | ACC_READ | ACC_WRITE)
 21
 22#define DEV_BLOCK 1
 23#define DEV_CHAR  2
 24#define DEV_ALL   4  /* this represents all devices */
 25
 26static DEFINE_MUTEX(devcgroup_mutex);
 
 
 
 
 27
 28/*
 29 * whitelist locking rules:
 30 * hold devcgroup_mutex for update/read.
 31 * hold rcu_read_lock() for read.
 32 */
 33
 34struct dev_whitelist_item {
 35	u32 major, minor;
 36	short type;
 37	short access;
 38	struct list_head list;
 39	struct rcu_head rcu;
 40};
 41
 42struct dev_cgroup {
 43	struct cgroup_subsys_state css;
 44	struct list_head whitelist;
 
 45};
 46
 47static inline struct dev_cgroup *css_to_devcgroup(struct cgroup_subsys_state *s)
 48{
 49	return container_of(s, struct dev_cgroup, css);
 50}
 51
 52static inline struct dev_cgroup *cgroup_to_devcgroup(struct cgroup *cgroup)
 53{
 54	return css_to_devcgroup(cgroup_subsys_state(cgroup, devices_subsys_id));
 55}
 56
 57static inline struct dev_cgroup *task_devcgroup(struct task_struct *task)
 58{
 59	return css_to_devcgroup(task_subsys_state(task, devices_subsys_id));
 60}
 61
 62struct cgroup_subsys devices_subsys;
 63
 64static int devcgroup_can_attach(struct cgroup *new_cgrp,
 65				struct cgroup_taskset *set)
 66{
 67	struct task_struct *task = cgroup_taskset_first(set);
 68
 69	if (current != task && !capable(CAP_SYS_ADMIN))
 70		return -EPERM;
 71	return 0;
 72}
 73
 74/*
 75 * called under devcgroup_mutex
 76 */
 77static int dev_whitelist_copy(struct list_head *dest, struct list_head *orig)
 78{
 79	struct dev_whitelist_item *wh, *tmp, *new;
 
 
 80
 81	list_for_each_entry(wh, orig, list) {
 82		new = kmemdup(wh, sizeof(*wh), GFP_KERNEL);
 83		if (!new)
 84			goto free_and_exit;
 85		list_add_tail(&new->list, dest);
 86	}
 87
 88	return 0;
 89
 90free_and_exit:
 91	list_for_each_entry_safe(wh, tmp, dest, list) {
 92		list_del(&wh->list);
 93		kfree(wh);
 94	}
 95	return -ENOMEM;
 96}
 97
 98/* Stupid prototype - don't bother combining existing entries */
 99/*
100 * called under devcgroup_mutex
101 */
102static int dev_whitelist_add(struct dev_cgroup *dev_cgroup,
103			struct dev_whitelist_item *wh)
104{
105	struct dev_whitelist_item *whcopy, *walk;
 
 
106
107	whcopy = kmemdup(wh, sizeof(*wh), GFP_KERNEL);
108	if (!whcopy)
109		return -ENOMEM;
110
111	list_for_each_entry(walk, &dev_cgroup->whitelist, list) {
112		if (walk->type != wh->type)
113			continue;
114		if (walk->major != wh->major)
115			continue;
116		if (walk->minor != wh->minor)
117			continue;
118
119		walk->access |= wh->access;
120		kfree(whcopy);
121		whcopy = NULL;
122	}
123
124	if (whcopy != NULL)
125		list_add_tail_rcu(&whcopy->list, &dev_cgroup->whitelist);
126	return 0;
127}
128
129/*
130 * called under devcgroup_mutex
131 */
132static void dev_whitelist_rm(struct dev_cgroup *dev_cgroup,
133			struct dev_whitelist_item *wh)
134{
135	struct dev_whitelist_item *walk, *tmp;
136
137	list_for_each_entry_safe(walk, tmp, &dev_cgroup->whitelist, list) {
138		if (walk->type == DEV_ALL)
139			goto remove;
140		if (walk->type != wh->type)
141			continue;
142		if (walk->major != ~0 && walk->major != wh->major)
143			continue;
144		if (walk->minor != ~0 && walk->minor != wh->minor)
145			continue;
146
147remove:
148		walk->access &= ~wh->access;
149		if (!walk->access) {
150			list_del_rcu(&walk->list);
151			kfree_rcu(walk, rcu);
152		}
153	}
154}
155
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
156/*
157 * called from kernel/cgroup.c with cgroup_lock() held.
158 */
159static struct cgroup_subsys_state *devcgroup_create(struct cgroup *cgroup)
 
160{
161	struct dev_cgroup *dev_cgroup, *parent_dev_cgroup;
162	struct cgroup *parent_cgroup;
163	int ret;
164
165	dev_cgroup = kzalloc(sizeof(*dev_cgroup), GFP_KERNEL);
166	if (!dev_cgroup)
167		return ERR_PTR(-ENOMEM);
168	INIT_LIST_HEAD(&dev_cgroup->whitelist);
169	parent_cgroup = cgroup->parent;
170
171	if (parent_cgroup == NULL) {
172		struct dev_whitelist_item *wh;
173		wh = kmalloc(sizeof(*wh), GFP_KERNEL);
174		if (!wh) {
175			kfree(dev_cgroup);
176			return ERR_PTR(-ENOMEM);
177		}
178		wh->minor = wh->major = ~0;
179		wh->type = DEV_ALL;
180		wh->access = ACC_MASK;
181		list_add(&wh->list, &dev_cgroup->whitelist);
182	} else {
183		parent_dev_cgroup = cgroup_to_devcgroup(parent_cgroup);
184		mutex_lock(&devcgroup_mutex);
185		ret = dev_whitelist_copy(&dev_cgroup->whitelist,
186				&parent_dev_cgroup->whitelist);
187		mutex_unlock(&devcgroup_mutex);
188		if (ret) {
189			kfree(dev_cgroup);
190			return ERR_PTR(ret);
191		}
192	}
193
194	return &dev_cgroup->css;
195}
196
197static void devcgroup_destroy(struct cgroup *cgroup)
198{
199	struct dev_cgroup *dev_cgroup;
200	struct dev_whitelist_item *wh, *tmp;
201
202	dev_cgroup = cgroup_to_devcgroup(cgroup);
203	list_for_each_entry_safe(wh, tmp, &dev_cgroup->whitelist, list) {
204		list_del(&wh->list);
205		kfree(wh);
206	}
207	kfree(dev_cgroup);
208}
209
210#define DEVCG_ALLOW 1
211#define DEVCG_DENY 2
212#define DEVCG_LIST 3
213
214#define MAJMINLEN 13
215#define ACCLEN 4
216
217static void set_access(char *acc, short access)
218{
219	int idx = 0;
220	memset(acc, 0, ACCLEN);
221	if (access & ACC_READ)
222		acc[idx++] = 'r';
223	if (access & ACC_WRITE)
224		acc[idx++] = 'w';
225	if (access & ACC_MKNOD)
226		acc[idx++] = 'm';
227}
228
229static char type_to_char(short type)
230{
231	if (type == DEV_ALL)
232		return 'a';
233	if (type == DEV_CHAR)
234		return 'c';
235	if (type == DEV_BLOCK)
236		return 'b';
237	return 'X';
238}
239
240static void set_majmin(char *str, unsigned m)
241{
242	if (m == ~0)
243		strcpy(str, "*");
244	else
245		sprintf(str, "%u", m);
246}
247
248static int devcgroup_seq_read(struct cgroup *cgroup, struct cftype *cft,
249				struct seq_file *m)
250{
251	struct dev_cgroup *devcgroup = cgroup_to_devcgroup(cgroup);
252	struct dev_whitelist_item *wh;
253	char maj[MAJMINLEN], min[MAJMINLEN], acc[ACCLEN];
254
255	rcu_read_lock();
256	list_for_each_entry_rcu(wh, &devcgroup->whitelist, list) {
257		set_access(acc, wh->access);
258		set_majmin(maj, wh->major);
259		set_majmin(min, wh->minor);
260		seq_printf(m, "%c %s:%s %s\n", type_to_char(wh->type),
 
 
 
 
 
 
261			   maj, min, acc);
 
 
 
 
 
 
 
 
262	}
263	rcu_read_unlock();
264
265	return 0;
266}
267
268/*
269 * may_access_whitelist:
270 * does the access granted to dev_cgroup c contain the access
271 * requested in whitelist item refwh.
272 * return 1 if yes, 0 if no.
273 * call with devcgroup_mutex held
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
274 */
275static int may_access_whitelist(struct dev_cgroup *c,
276				       struct dev_whitelist_item *refwh)
277{
278	struct dev_whitelist_item *whitem;
279
280	list_for_each_entry(whitem, &c->whitelist, list) {
281		if (whitem->type & DEV_ALL)
282			return 1;
283		if ((refwh->type & DEV_BLOCK) && !(whitem->type & DEV_BLOCK))
284			continue;
285		if ((refwh->type & DEV_CHAR) && !(whitem->type & DEV_CHAR))
286			continue;
287		if (whitem->major != ~0 && whitem->major != refwh->major)
 
 
 
 
288			continue;
289		if (whitem->minor != ~0 && whitem->minor != refwh->minor)
290			continue;
291		if (refwh->access & (~whitem->access))
 
 
 
 
 
292			continue;
293		return 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
294	}
295	return 0;
296}
297
298/*
299 * parent_has_perm:
300 * when adding a new allow rule to a device whitelist, the rule
301 * must be allowed in the parent device
302 */
303static int parent_has_perm(struct dev_cgroup *childcg,
304				  struct dev_whitelist_item *wh)
305{
306	struct cgroup *pcg = childcg->css.cgroup->parent;
307	struct dev_cgroup *parent;
308
309	if (!pcg)
310		return 1;
311	parent = cgroup_to_devcgroup(pcg);
312	return may_access_whitelist(parent, wh);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
313}
314
315/*
316 * Modify the whitelist using allow/deny rules.
317 * CAP_SYS_ADMIN is needed for this.  It's at least separate from CAP_MKNOD
318 * so we can give a container CAP_MKNOD to let it create devices but not
319 * modify the whitelist.
320 * It seems likely we'll want to add a CAP_CONTAINER capability to allow
321 * us to also grant CAP_SYS_ADMIN to containers without giving away the
322 * device whitelist controls, but for now we'll stick with CAP_SYS_ADMIN
323 *
324 * Taking rules away is always allowed (given CAP_SYS_ADMIN).  Granting
325 * new access is only allowed if you're in the top-level cgroup, or your
326 * parent cgroup has the access you're asking for.
327 */
328static int devcgroup_update_access(struct dev_cgroup *devcgroup,
329				   int filetype, const char *buffer)
330{
331	const char *b;
332	char *endp;
333	int count;
334	struct dev_whitelist_item wh;
 
335
336	if (!capable(CAP_SYS_ADMIN))
337		return -EPERM;
338
339	memset(&wh, 0, sizeof(wh));
340	b = buffer;
341
342	switch (*b) {
343	case 'a':
344		wh.type = DEV_ALL;
345		wh.access = ACC_MASK;
346		wh.major = ~0;
347		wh.minor = ~0;
348		goto handle;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
349	case 'b':
350		wh.type = DEV_BLOCK;
351		break;
352	case 'c':
353		wh.type = DEV_CHAR;
354		break;
355	default:
356		return -EINVAL;
357	}
358	b++;
359	if (!isspace(*b))
360		return -EINVAL;
361	b++;
362	if (*b == '*') {
363		wh.major = ~0;
364		b++;
365	} else if (isdigit(*b)) {
366		wh.major = simple_strtoul(b, &endp, 10);
367		b = endp;
 
 
 
 
 
 
 
 
368	} else {
369		return -EINVAL;
370	}
371	if (*b != ':')
372		return -EINVAL;
373	b++;
374
375	/* read minor */
376	if (*b == '*') {
377		wh.minor = ~0;
378		b++;
379	} else if (isdigit(*b)) {
380		wh.minor = simple_strtoul(b, &endp, 10);
381		b = endp;
 
 
 
 
 
 
 
 
382	} else {
383		return -EINVAL;
384	}
385	if (!isspace(*b))
386		return -EINVAL;
387	for (b++, count = 0; count < 3; count++, b++) {
388		switch (*b) {
389		case 'r':
390			wh.access |= ACC_READ;
391			break;
392		case 'w':
393			wh.access |= ACC_WRITE;
394			break;
395		case 'm':
396			wh.access |= ACC_MKNOD;
397			break;
398		case '\n':
399		case '\0':
400			count = 3;
401			break;
402		default:
403			return -EINVAL;
404		}
405	}
406
407handle:
408	switch (filetype) {
409	case DEVCG_ALLOW:
410		if (!parent_has_perm(devcgroup, &wh))
 
 
 
 
 
 
 
 
 
 
 
 
 
411			return -EPERM;
412		return dev_whitelist_add(devcgroup, &wh);
 
413	case DEVCG_DENY:
414		dev_whitelist_rm(devcgroup, &wh);
 
 
 
 
 
 
 
 
 
 
 
 
 
415		break;
416	default:
417		return -EINVAL;
418	}
419	return 0;
420}
421
422static int devcgroup_access_write(struct cgroup *cgrp, struct cftype *cft,
423				  const char *buffer)
424{
425	int retval;
426
427	mutex_lock(&devcgroup_mutex);
428	retval = devcgroup_update_access(cgroup_to_devcgroup(cgrp),
429					 cft->private, buffer);
430	mutex_unlock(&devcgroup_mutex);
431	return retval;
432}
433
434static struct cftype dev_cgroup_files[] = {
435	{
436		.name = "allow",
437		.write_string  = devcgroup_access_write,
438		.private = DEVCG_ALLOW,
439	},
440	{
441		.name = "deny",
442		.write_string = devcgroup_access_write,
443		.private = DEVCG_DENY,
444	},
445	{
446		.name = "list",
447		.read_seq_string = devcgroup_seq_read,
448		.private = DEVCG_LIST,
449	},
450	{ }	/* terminate */
451};
452
453struct cgroup_subsys devices_subsys = {
454	.name = "devices",
455	.can_attach = devcgroup_can_attach,
456	.create = devcgroup_create,
457	.destroy = devcgroup_destroy,
458	.subsys_id = devices_subsys_id,
459	.base_cftypes = dev_cgroup_files,
460};
461
462int __devcgroup_inode_permission(struct inode *inode, int mask)
 
 
 
 
 
 
 
 
 
 
 
463{
464	struct dev_cgroup *dev_cgroup;
465	struct dev_whitelist_item *wh;
466
467	rcu_read_lock();
468
469	dev_cgroup = task_devcgroup(current);
470
471	list_for_each_entry_rcu(wh, &dev_cgroup->whitelist, list) {
472		if (wh->type & DEV_ALL)
473			goto found;
474		if ((wh->type & DEV_BLOCK) && !S_ISBLK(inode->i_mode))
475			continue;
476		if ((wh->type & DEV_CHAR) && !S_ISCHR(inode->i_mode))
477			continue;
478		if (wh->major != ~0 && wh->major != imajor(inode))
479			continue;
480		if (wh->minor != ~0 && wh->minor != iminor(inode))
481			continue;
482
483		if ((mask & MAY_WRITE) && !(wh->access & ACC_WRITE))
484			continue;
485		if ((mask & MAY_READ) && !(wh->access & ACC_READ))
486			continue;
487found:
488		rcu_read_unlock();
489		return 0;
490	}
491
492	rcu_read_unlock();
493
494	return -EPERM;
495}
496
497int devcgroup_inode_mknod(int mode, dev_t dev)
498{
499	struct dev_cgroup *dev_cgroup;
500	struct dev_whitelist_item *wh;
501
502	if (!S_ISBLK(mode) && !S_ISCHR(mode))
503		return 0;
504
505	rcu_read_lock();
506
507	dev_cgroup = task_devcgroup(current);
508
509	list_for_each_entry_rcu(wh, &dev_cgroup->whitelist, list) {
510		if (wh->type & DEV_ALL)
511			goto found;
512		if ((wh->type & DEV_BLOCK) && !S_ISBLK(mode))
513			continue;
514		if ((wh->type & DEV_CHAR) && !S_ISCHR(mode))
515			continue;
516		if (wh->major != ~0 && wh->major != MAJOR(dev))
517			continue;
518		if (wh->minor != ~0 && wh->minor != MINOR(dev))
519			continue;
520
521		if (!(wh->access & ACC_MKNOD))
522			continue;
523found:
524		rcu_read_unlock();
525		return 0;
526	}
527
528	rcu_read_unlock();
529
530	return -EPERM;
531}