xref: /openbmc/linux/security/device_cgroup.c (revision 79d71974)
1 /*
2  * device_cgroup.c - device cgroup subsystem
3  *
4  * Copyright 2007 IBM Corp
5  */
6 
7 #include <linux/device_cgroup.h>
8 #include <linux/cgroup.h>
9 #include <linux/ctype.h>
10 #include <linux/list.h>
11 #include <linux/uaccess.h>
12 #include <linux/seq_file.h>
13 #include <linux/slab.h>
14 #include <linux/rcupdate.h>
15 #include <linux/mutex.h>
16 
17 #define ACC_MKNOD 1
18 #define ACC_READ  2
19 #define ACC_WRITE 4
20 #define ACC_MASK (ACC_MKNOD | ACC_READ | ACC_WRITE)
21 
22 #define DEV_BLOCK 1
23 #define DEV_CHAR  2
24 #define DEV_ALL   4  /* this represents all devices */
25 
26 static DEFINE_MUTEX(devcgroup_mutex);
27 
28 enum devcg_behavior {
29 	DEVCG_DEFAULT_NONE,
30 	DEVCG_DEFAULT_ALLOW,
31 	DEVCG_DEFAULT_DENY,
32 };
33 
34 /*
35  * exception list locking rules:
36  * hold devcgroup_mutex for update/read.
37  * hold rcu_read_lock() for read.
38  */
39 
40 struct dev_exception_item {
41 	u32 major, minor;
42 	short type;
43 	short access;
44 	struct list_head list;
45 	struct rcu_head rcu;
46 };
47 
48 struct dev_cgroup {
49 	struct cgroup_subsys_state css;
50 	struct list_head exceptions;
51 	enum devcg_behavior behavior;
52 };
53 
54 static inline struct dev_cgroup *css_to_devcgroup(struct cgroup_subsys_state *s)
55 {
56 	return s ? container_of(s, struct dev_cgroup, css) : NULL;
57 }
58 
59 static inline struct dev_cgroup *task_devcgroup(struct task_struct *task)
60 {
61 	return css_to_devcgroup(task_css(task, devices_cgrp_id));
62 }
63 
64 /*
65  * called under devcgroup_mutex
66  */
67 static int dev_exceptions_copy(struct list_head *dest, struct list_head *orig)
68 {
69 	struct dev_exception_item *ex, *tmp, *new;
70 
71 	lockdep_assert_held(&devcgroup_mutex);
72 
73 	list_for_each_entry(ex, orig, list) {
74 		new = kmemdup(ex, sizeof(*ex), GFP_KERNEL);
75 		if (!new)
76 			goto free_and_exit;
77 		list_add_tail(&new->list, dest);
78 	}
79 
80 	return 0;
81 
82 free_and_exit:
83 	list_for_each_entry_safe(ex, tmp, dest, list) {
84 		list_del(&ex->list);
85 		kfree(ex);
86 	}
87 	return -ENOMEM;
88 }
89 
90 /*
91  * called under devcgroup_mutex
92  */
93 static int dev_exception_add(struct dev_cgroup *dev_cgroup,
94 			     struct dev_exception_item *ex)
95 {
96 	struct dev_exception_item *excopy, *walk;
97 
98 	lockdep_assert_held(&devcgroup_mutex);
99 
100 	excopy = kmemdup(ex, sizeof(*ex), GFP_KERNEL);
101 	if (!excopy)
102 		return -ENOMEM;
103 
104 	list_for_each_entry(walk, &dev_cgroup->exceptions, list) {
105 		if (walk->type != ex->type)
106 			continue;
107 		if (walk->major != ex->major)
108 			continue;
109 		if (walk->minor != ex->minor)
110 			continue;
111 
112 		walk->access |= ex->access;
113 		kfree(excopy);
114 		excopy = NULL;
115 	}
116 
117 	if (excopy != NULL)
118 		list_add_tail_rcu(&excopy->list, &dev_cgroup->exceptions);
119 	return 0;
120 }
121 
122 /*
123  * called under devcgroup_mutex
124  */
125 static void dev_exception_rm(struct dev_cgroup *dev_cgroup,
126 			     struct dev_exception_item *ex)
127 {
128 	struct dev_exception_item *walk, *tmp;
129 
130 	lockdep_assert_held(&devcgroup_mutex);
131 
132 	list_for_each_entry_safe(walk, tmp, &dev_cgroup->exceptions, list) {
133 		if (walk->type != ex->type)
134 			continue;
135 		if (walk->major != ex->major)
136 			continue;
137 		if (walk->minor != ex->minor)
138 			continue;
139 
140 		walk->access &= ~ex->access;
141 		if (!walk->access) {
142 			list_del_rcu(&walk->list);
143 			kfree_rcu(walk, rcu);
144 		}
145 	}
146 }
147 
148 static void __dev_exception_clean(struct dev_cgroup *dev_cgroup)
149 {
150 	struct dev_exception_item *ex, *tmp;
151 
152 	list_for_each_entry_safe(ex, tmp, &dev_cgroup->exceptions, list) {
153 		list_del_rcu(&ex->list);
154 		kfree_rcu(ex, rcu);
155 	}
156 }
157 
158 /**
159  * dev_exception_clean - frees all entries of the exception list
160  * @dev_cgroup: dev_cgroup with the exception list to be cleaned
161  *
162  * called under devcgroup_mutex
163  */
164 static void dev_exception_clean(struct dev_cgroup *dev_cgroup)
165 {
166 	lockdep_assert_held(&devcgroup_mutex);
167 
168 	__dev_exception_clean(dev_cgroup);
169 }
170 
171 static inline bool is_devcg_online(const struct dev_cgroup *devcg)
172 {
173 	return (devcg->behavior != DEVCG_DEFAULT_NONE);
174 }
175 
176 /**
177  * devcgroup_online - initializes devcgroup's behavior and exceptions based on
178  * 		      parent's
179  * @css: css getting online
180  * returns 0 in case of success, error code otherwise
181  */
182 static int devcgroup_online(struct cgroup_subsys_state *css)
183 {
184 	struct dev_cgroup *dev_cgroup = css_to_devcgroup(css);
185 	struct dev_cgroup *parent_dev_cgroup = css_to_devcgroup(css_parent(css));
186 	int ret = 0;
187 
188 	mutex_lock(&devcgroup_mutex);
189 
190 	if (parent_dev_cgroup == NULL)
191 		dev_cgroup->behavior = DEVCG_DEFAULT_ALLOW;
192 	else {
193 		ret = dev_exceptions_copy(&dev_cgroup->exceptions,
194 					  &parent_dev_cgroup->exceptions);
195 		if (!ret)
196 			dev_cgroup->behavior = parent_dev_cgroup->behavior;
197 	}
198 	mutex_unlock(&devcgroup_mutex);
199 
200 	return ret;
201 }
202 
203 static void devcgroup_offline(struct cgroup_subsys_state *css)
204 {
205 	struct dev_cgroup *dev_cgroup = css_to_devcgroup(css);
206 
207 	mutex_lock(&devcgroup_mutex);
208 	dev_cgroup->behavior = DEVCG_DEFAULT_NONE;
209 	mutex_unlock(&devcgroup_mutex);
210 }
211 
212 /*
213  * called from kernel/cgroup.c with cgroup_lock() held.
214  */
215 static struct cgroup_subsys_state *
216 devcgroup_css_alloc(struct cgroup_subsys_state *parent_css)
217 {
218 	struct dev_cgroup *dev_cgroup;
219 
220 	dev_cgroup = kzalloc(sizeof(*dev_cgroup), GFP_KERNEL);
221 	if (!dev_cgroup)
222 		return ERR_PTR(-ENOMEM);
223 	INIT_LIST_HEAD(&dev_cgroup->exceptions);
224 	dev_cgroup->behavior = DEVCG_DEFAULT_NONE;
225 
226 	return &dev_cgroup->css;
227 }
228 
229 static void devcgroup_css_free(struct cgroup_subsys_state *css)
230 {
231 	struct dev_cgroup *dev_cgroup = css_to_devcgroup(css);
232 
233 	__dev_exception_clean(dev_cgroup);
234 	kfree(dev_cgroup);
235 }
236 
237 #define DEVCG_ALLOW 1
238 #define DEVCG_DENY 2
239 #define DEVCG_LIST 3
240 
241 #define MAJMINLEN 13
242 #define ACCLEN 4
243 
244 static void set_access(char *acc, short access)
245 {
246 	int idx = 0;
247 	memset(acc, 0, ACCLEN);
248 	if (access & ACC_READ)
249 		acc[idx++] = 'r';
250 	if (access & ACC_WRITE)
251 		acc[idx++] = 'w';
252 	if (access & ACC_MKNOD)
253 		acc[idx++] = 'm';
254 }
255 
256 static char type_to_char(short type)
257 {
258 	if (type == DEV_ALL)
259 		return 'a';
260 	if (type == DEV_CHAR)
261 		return 'c';
262 	if (type == DEV_BLOCK)
263 		return 'b';
264 	return 'X';
265 }
266 
267 static void set_majmin(char *str, unsigned m)
268 {
269 	if (m == ~0)
270 		strcpy(str, "*");
271 	else
272 		sprintf(str, "%u", m);
273 }
274 
275 static int devcgroup_seq_show(struct seq_file *m, void *v)
276 {
277 	struct dev_cgroup *devcgroup = css_to_devcgroup(seq_css(m));
278 	struct dev_exception_item *ex;
279 	char maj[MAJMINLEN], min[MAJMINLEN], acc[ACCLEN];
280 
281 	rcu_read_lock();
282 	/*
283 	 * To preserve the compatibility:
284 	 * - Only show the "all devices" when the default policy is to allow
285 	 * - List the exceptions in case the default policy is to deny
286 	 * This way, the file remains as a "whitelist of devices"
287 	 */
288 	if (devcgroup->behavior == DEVCG_DEFAULT_ALLOW) {
289 		set_access(acc, ACC_MASK);
290 		set_majmin(maj, ~0);
291 		set_majmin(min, ~0);
292 		seq_printf(m, "%c %s:%s %s\n", type_to_char(DEV_ALL),
293 			   maj, min, acc);
294 	} else {
295 		list_for_each_entry_rcu(ex, &devcgroup->exceptions, list) {
296 			set_access(acc, ex->access);
297 			set_majmin(maj, ex->major);
298 			set_majmin(min, ex->minor);
299 			seq_printf(m, "%c %s:%s %s\n", type_to_char(ex->type),
300 				   maj, min, acc);
301 		}
302 	}
303 	rcu_read_unlock();
304 
305 	return 0;
306 }
307 
308 /**
309  * match_exception	- iterates the exception list trying to match a rule
310  * 			  based on type, major, minor and access type. It is
311  * 			  considered a match if an exception is found that
312  * 			  will contain the entire range of provided parameters.
313  * @exceptions: list of exceptions
314  * @type: device type (DEV_BLOCK or DEV_CHAR)
315  * @major: device file major number, ~0 to match all
316  * @minor: device file minor number, ~0 to match all
317  * @access: permission mask (ACC_READ, ACC_WRITE, ACC_MKNOD)
318  *
319  * returns: true in case it matches an exception completely
320  */
321 static bool match_exception(struct list_head *exceptions, short type,
322 			    u32 major, u32 minor, short access)
323 {
324 	struct dev_exception_item *ex;
325 
326 	list_for_each_entry_rcu(ex, exceptions, list) {
327 		if ((type & DEV_BLOCK) && !(ex->type & DEV_BLOCK))
328 			continue;
329 		if ((type & DEV_CHAR) && !(ex->type & DEV_CHAR))
330 			continue;
331 		if (ex->major != ~0 && ex->major != major)
332 			continue;
333 		if (ex->minor != ~0 && ex->minor != minor)
334 			continue;
335 		/* provided access cannot have more than the exception rule */
336 		if (access & (~ex->access))
337 			continue;
338 		return true;
339 	}
340 	return false;
341 }
342 
343 /**
344  * match_exception_partial - iterates the exception list trying to match a rule
345  * 			     based on type, major, minor and access type. It is
346  * 			     considered a match if an exception's range is
347  * 			     found to contain *any* of the devices specified by
348  * 			     provided parameters. This is used to make sure no
349  * 			     extra access is being granted that is forbidden by
350  * 			     any of the exception list.
351  * @exceptions: list of exceptions
352  * @type: device type (DEV_BLOCK or DEV_CHAR)
353  * @major: device file major number, ~0 to match all
354  * @minor: device file minor number, ~0 to match all
355  * @access: permission mask (ACC_READ, ACC_WRITE, ACC_MKNOD)
356  *
357  * returns: true in case the provided range mat matches an exception completely
358  */
359 static bool match_exception_partial(struct list_head *exceptions, short type,
360 				    u32 major, u32 minor, short access)
361 {
362 	struct dev_exception_item *ex;
363 
364 	list_for_each_entry_rcu(ex, exceptions, list) {
365 		if ((type & DEV_BLOCK) && !(ex->type & DEV_BLOCK))
366 			continue;
367 		if ((type & DEV_CHAR) && !(ex->type & DEV_CHAR))
368 			continue;
369 		/*
370 		 * We must be sure that both the exception and the provided
371 		 * range aren't masking all devices
372 		 */
373 		if (ex->major != ~0 && major != ~0 && ex->major != major)
374 			continue;
375 		if (ex->minor != ~0 && minor != ~0 && ex->minor != minor)
376 			continue;
377 		/*
378 		 * In order to make sure the provided range isn't matching
379 		 * an exception, all its access bits shouldn't match the
380 		 * exception's access bits
381 		 */
382 		if (!(access & ex->access))
383 			continue;
384 		return true;
385 	}
386 	return false;
387 }
388 
389 /**
390  * verify_new_ex - verifies if a new exception is part of what is allowed
391  *		   by a dev cgroup based on the default policy +
392  *		   exceptions. This is used to make sure a child cgroup
393  *		   won't have more privileges than its parent
394  * @dev_cgroup: dev cgroup to be tested against
395  * @refex: new exception
396  * @behavior: behavior of the exception's dev_cgroup
397  */
398 static bool verify_new_ex(struct dev_cgroup *dev_cgroup,
399 		          struct dev_exception_item *refex,
400 		          enum devcg_behavior behavior)
401 {
402 	bool match = false;
403 
404 	rcu_lockdep_assert(rcu_read_lock_held() ||
405 			   lockdep_is_held(&devcgroup_mutex),
406 			   "device_cgroup:verify_new_ex called without proper synchronization");
407 
408 	if (dev_cgroup->behavior == DEVCG_DEFAULT_ALLOW) {
409 		if (behavior == DEVCG_DEFAULT_ALLOW) {
410 			/*
411 			 * new exception in the child doesn't matter, only
412 			 * adding extra restrictions
413 			 */
414 			return true;
415 		} else {
416 			/*
417 			 * new exception in the child will add more devices
418 			 * that can be acessed, so it can't match any of
419 			 * parent's exceptions, even slightly
420 			 */
421 			match = match_exception_partial(&dev_cgroup->exceptions,
422 							refex->type,
423 							refex->major,
424 							refex->minor,
425 							refex->access);
426 
427 			if (match)
428 				return false;
429 			return true;
430 		}
431 	} else {
432 		/*
433 		 * Only behavior == DEVCG_DEFAULT_DENY allowed here, therefore
434 		 * the new exception will add access to more devices and must
435 		 * be contained completely in an parent's exception to be
436 		 * allowed
437 		 */
438 		match = match_exception(&dev_cgroup->exceptions, refex->type,
439 					refex->major, refex->minor,
440 					refex->access);
441 
442 		if (match)
443 			/* parent has an exception that matches the proposed */
444 			return true;
445 		else
446 			return false;
447 	}
448 	return false;
449 }
450 
451 /*
452  * parent_has_perm:
453  * when adding a new allow rule to a device exception list, the rule
454  * must be allowed in the parent device
455  */
456 static int parent_has_perm(struct dev_cgroup *childcg,
457 				  struct dev_exception_item *ex)
458 {
459 	struct dev_cgroup *parent = css_to_devcgroup(css_parent(&childcg->css));
460 
461 	if (!parent)
462 		return 1;
463 	return verify_new_ex(parent, ex, childcg->behavior);
464 }
465 
466 /**
467  * may_allow_all - checks if it's possible to change the behavior to
468  *		   allow based on parent's rules.
469  * @parent: device cgroup's parent
470  * returns: != 0 in case it's allowed, 0 otherwise
471  */
472 static inline int may_allow_all(struct dev_cgroup *parent)
473 {
474 	if (!parent)
475 		return 1;
476 	return parent->behavior == DEVCG_DEFAULT_ALLOW;
477 }
478 
479 /**
480  * revalidate_active_exceptions - walks through the active exception list and
481  * 				  revalidates the exceptions based on parent's
482  * 				  behavior and exceptions. The exceptions that
483  * 				  are no longer valid will be removed.
484  * 				  Called with devcgroup_mutex held.
485  * @devcg: cgroup which exceptions will be checked
486  *
487  * This is one of the three key functions for hierarchy implementation.
488  * This function is responsible for re-evaluating all the cgroup's active
489  * exceptions due to a parent's exception change.
490  * Refer to Documentation/cgroups/devices.txt for more details.
491  */
492 static void revalidate_active_exceptions(struct dev_cgroup *devcg)
493 {
494 	struct dev_exception_item *ex;
495 	struct list_head *this, *tmp;
496 
497 	list_for_each_safe(this, tmp, &devcg->exceptions) {
498 		ex = container_of(this, struct dev_exception_item, list);
499 		if (!parent_has_perm(devcg, ex))
500 			dev_exception_rm(devcg, ex);
501 	}
502 }
503 
504 /**
505  * propagate_exception - propagates a new exception to the children
506  * @devcg_root: device cgroup that added a new exception
507  * @ex: new exception to be propagated
508  *
509  * returns: 0 in case of success, != 0 in case of error
510  */
511 static int propagate_exception(struct dev_cgroup *devcg_root,
512 			       struct dev_exception_item *ex)
513 {
514 	struct cgroup_subsys_state *pos;
515 	int rc = 0;
516 
517 	rcu_read_lock();
518 
519 	css_for_each_descendant_pre(pos, &devcg_root->css) {
520 		struct dev_cgroup *devcg = css_to_devcgroup(pos);
521 
522 		/*
523 		 * Because devcgroup_mutex is held, no devcg will become
524 		 * online or offline during the tree walk (see on/offline
525 		 * methods), and online ones are safe to access outside RCU
526 		 * read lock without bumping refcnt.
527 		 */
528 		if (pos == &devcg_root->css || !is_devcg_online(devcg))
529 			continue;
530 
531 		rcu_read_unlock();
532 
533 		/*
534 		 * in case both root's behavior and devcg is allow, a new
535 		 * restriction means adding to the exception list
536 		 */
537 		if (devcg_root->behavior == DEVCG_DEFAULT_ALLOW &&
538 		    devcg->behavior == DEVCG_DEFAULT_ALLOW) {
539 			rc = dev_exception_add(devcg, ex);
540 			if (rc)
541 				break;
542 		} else {
543 			/*
544 			 * in the other possible cases:
545 			 * root's behavior: allow, devcg's: deny
546 			 * root's behavior: deny, devcg's: deny
547 			 * the exception will be removed
548 			 */
549 			dev_exception_rm(devcg, ex);
550 		}
551 		revalidate_active_exceptions(devcg);
552 
553 		rcu_read_lock();
554 	}
555 
556 	rcu_read_unlock();
557 	return rc;
558 }
559 
560 static inline bool has_children(struct dev_cgroup *devcgroup)
561 {
562 	struct cgroup *cgrp = devcgroup->css.cgroup;
563 
564 	return !list_empty(&cgrp->children);
565 }
566 
567 /*
568  * Modify the exception list using allow/deny rules.
569  * CAP_SYS_ADMIN is needed for this.  It's at least separate from CAP_MKNOD
570  * so we can give a container CAP_MKNOD to let it create devices but not
571  * modify the exception list.
572  * It seems likely we'll want to add a CAP_CONTAINER capability to allow
573  * us to also grant CAP_SYS_ADMIN to containers without giving away the
574  * device exception list controls, but for now we'll stick with CAP_SYS_ADMIN
575  *
576  * Taking rules away is always allowed (given CAP_SYS_ADMIN).  Granting
577  * new access is only allowed if you're in the top-level cgroup, or your
578  * parent cgroup has the access you're asking for.
579  */
580 static int devcgroup_update_access(struct dev_cgroup *devcgroup,
581 				   int filetype, char *buffer)
582 {
583 	const char *b;
584 	char temp[12];		/* 11 + 1 characters needed for a u32 */
585 	int count, rc = 0;
586 	struct dev_exception_item ex;
587 	struct dev_cgroup *parent = css_to_devcgroup(css_parent(&devcgroup->css));
588 
589 	if (!capable(CAP_SYS_ADMIN))
590 		return -EPERM;
591 
592 	memset(&ex, 0, sizeof(ex));
593 	b = buffer;
594 
595 	switch (*b) {
596 	case 'a':
597 		switch (filetype) {
598 		case DEVCG_ALLOW:
599 			if (has_children(devcgroup))
600 				return -EINVAL;
601 
602 			if (!may_allow_all(parent))
603 				return -EPERM;
604 			dev_exception_clean(devcgroup);
605 			devcgroup->behavior = DEVCG_DEFAULT_ALLOW;
606 			if (!parent)
607 				break;
608 
609 			rc = dev_exceptions_copy(&devcgroup->exceptions,
610 						 &parent->exceptions);
611 			if (rc)
612 				return rc;
613 			break;
614 		case DEVCG_DENY:
615 			if (has_children(devcgroup))
616 				return -EINVAL;
617 
618 			dev_exception_clean(devcgroup);
619 			devcgroup->behavior = DEVCG_DEFAULT_DENY;
620 			break;
621 		default:
622 			return -EINVAL;
623 		}
624 		return 0;
625 	case 'b':
626 		ex.type = DEV_BLOCK;
627 		break;
628 	case 'c':
629 		ex.type = DEV_CHAR;
630 		break;
631 	default:
632 		return -EINVAL;
633 	}
634 	b++;
635 	if (!isspace(*b))
636 		return -EINVAL;
637 	b++;
638 	if (*b == '*') {
639 		ex.major = ~0;
640 		b++;
641 	} else if (isdigit(*b)) {
642 		memset(temp, 0, sizeof(temp));
643 		for (count = 0; count < sizeof(temp) - 1; count++) {
644 			temp[count] = *b;
645 			b++;
646 			if (!isdigit(*b))
647 				break;
648 		}
649 		rc = kstrtou32(temp, 10, &ex.major);
650 		if (rc)
651 			return -EINVAL;
652 	} else {
653 		return -EINVAL;
654 	}
655 	if (*b != ':')
656 		return -EINVAL;
657 	b++;
658 
659 	/* read minor */
660 	if (*b == '*') {
661 		ex.minor = ~0;
662 		b++;
663 	} else if (isdigit(*b)) {
664 		memset(temp, 0, sizeof(temp));
665 		for (count = 0; count < sizeof(temp) - 1; count++) {
666 			temp[count] = *b;
667 			b++;
668 			if (!isdigit(*b))
669 				break;
670 		}
671 		rc = kstrtou32(temp, 10, &ex.minor);
672 		if (rc)
673 			return -EINVAL;
674 	} else {
675 		return -EINVAL;
676 	}
677 	if (!isspace(*b))
678 		return -EINVAL;
679 	for (b++, count = 0; count < 3; count++, b++) {
680 		switch (*b) {
681 		case 'r':
682 			ex.access |= ACC_READ;
683 			break;
684 		case 'w':
685 			ex.access |= ACC_WRITE;
686 			break;
687 		case 'm':
688 			ex.access |= ACC_MKNOD;
689 			break;
690 		case '\n':
691 		case '\0':
692 			count = 3;
693 			break;
694 		default:
695 			return -EINVAL;
696 		}
697 	}
698 
699 	switch (filetype) {
700 	case DEVCG_ALLOW:
701 		if (!parent_has_perm(devcgroup, &ex))
702 			return -EPERM;
703 		/*
704 		 * If the default policy is to allow by default, try to remove
705 		 * an matching exception instead. And be silent about it: we
706 		 * don't want to break compatibility
707 		 */
708 		if (devcgroup->behavior == DEVCG_DEFAULT_ALLOW) {
709 			dev_exception_rm(devcgroup, &ex);
710 			return 0;
711 		}
712 		rc = dev_exception_add(devcgroup, &ex);
713 		break;
714 	case DEVCG_DENY:
715 		/*
716 		 * If the default policy is to deny by default, try to remove
717 		 * an matching exception instead. And be silent about it: we
718 		 * don't want to break compatibility
719 		 */
720 		if (devcgroup->behavior == DEVCG_DEFAULT_DENY)
721 			dev_exception_rm(devcgroup, &ex);
722 		else
723 			rc = dev_exception_add(devcgroup, &ex);
724 
725 		if (rc)
726 			break;
727 		/* we only propagate new restrictions */
728 		rc = propagate_exception(devcgroup, &ex);
729 		break;
730 	default:
731 		rc = -EINVAL;
732 	}
733 	return rc;
734 }
735 
736 static int devcgroup_access_write(struct cgroup_subsys_state *css,
737 				  struct cftype *cft, char *buffer)
738 {
739 	int retval;
740 
741 	mutex_lock(&devcgroup_mutex);
742 	retval = devcgroup_update_access(css_to_devcgroup(css),
743 					 cft->private, buffer);
744 	mutex_unlock(&devcgroup_mutex);
745 	return retval;
746 }
747 
748 static struct cftype dev_cgroup_files[] = {
749 	{
750 		.name = "allow",
751 		.write_string  = devcgroup_access_write,
752 		.private = DEVCG_ALLOW,
753 	},
754 	{
755 		.name = "deny",
756 		.write_string = devcgroup_access_write,
757 		.private = DEVCG_DENY,
758 	},
759 	{
760 		.name = "list",
761 		.seq_show = devcgroup_seq_show,
762 		.private = DEVCG_LIST,
763 	},
764 	{ }	/* terminate */
765 };
766 
767 struct cgroup_subsys devices_cgrp_subsys = {
768 	.css_alloc = devcgroup_css_alloc,
769 	.css_free = devcgroup_css_free,
770 	.css_online = devcgroup_online,
771 	.css_offline = devcgroup_offline,
772 	.base_cftypes = dev_cgroup_files,
773 };
774 
775 /**
776  * __devcgroup_check_permission - checks if an inode operation is permitted
777  * @dev_cgroup: the dev cgroup to be tested against
778  * @type: device type
779  * @major: device major number
780  * @minor: device minor number
781  * @access: combination of ACC_WRITE, ACC_READ and ACC_MKNOD
782  *
783  * returns 0 on success, -EPERM case the operation is not permitted
784  */
785 static int __devcgroup_check_permission(short type, u32 major, u32 minor,
786 				        short access)
787 {
788 	struct dev_cgroup *dev_cgroup;
789 	bool rc;
790 
791 	rcu_read_lock();
792 	dev_cgroup = task_devcgroup(current);
793 	if (dev_cgroup->behavior == DEVCG_DEFAULT_ALLOW)
794 		/* Can't match any of the exceptions, even partially */
795 		rc = !match_exception_partial(&dev_cgroup->exceptions,
796 					      type, major, minor, access);
797 	else
798 		/* Need to match completely one exception to be allowed */
799 		rc = match_exception(&dev_cgroup->exceptions, type, major,
800 				     minor, access);
801 	rcu_read_unlock();
802 
803 	if (!rc)
804 		return -EPERM;
805 
806 	return 0;
807 }
808 
809 int __devcgroup_inode_permission(struct inode *inode, int mask)
810 {
811 	short type, access = 0;
812 
813 	if (S_ISBLK(inode->i_mode))
814 		type = DEV_BLOCK;
815 	if (S_ISCHR(inode->i_mode))
816 		type = DEV_CHAR;
817 	if (mask & MAY_WRITE)
818 		access |= ACC_WRITE;
819 	if (mask & MAY_READ)
820 		access |= ACC_READ;
821 
822 	return __devcgroup_check_permission(type, imajor(inode), iminor(inode),
823 			access);
824 }
825 
826 int devcgroup_inode_mknod(int mode, dev_t dev)
827 {
828 	short type;
829 
830 	if (!S_ISBLK(mode) && !S_ISCHR(mode))
831 		return 0;
832 
833 	if (S_ISBLK(mode))
834 		type = DEV_BLOCK;
835 	else
836 		type = DEV_CHAR;
837 
838 	return __devcgroup_check_permission(type, MAJOR(dev), MINOR(dev),
839 			ACC_MKNOD);
840 
841 }
842