xref: /openbmc/linux/kernel/cgroup/cgroup-v1.c (revision 1fa0a7dc)
1 #include "cgroup-internal.h"
2 
3 #include <linux/ctype.h>
4 #include <linux/kmod.h>
5 #include <linux/sort.h>
6 #include <linux/delay.h>
7 #include <linux/mm.h>
8 #include <linux/sched/signal.h>
9 #include <linux/sched/task.h>
10 #include <linux/magic.h>
11 #include <linux/slab.h>
12 #include <linux/vmalloc.h>
13 #include <linux/delayacct.h>
14 #include <linux/pid_namespace.h>
15 #include <linux/cgroupstats.h>
16 #include <linux/fs_parser.h>
17 
18 #include <trace/events/cgroup.h>
19 
20 #define cg_invalf(fc, fmt, ...) invalf(fc, fmt, ## __VA_ARGS__)
21 
22 /*
23  * pidlists linger the following amount before being destroyed.  The goal
24  * is avoiding frequent destruction in the middle of consecutive read calls
25  * Expiring in the middle is a performance problem not a correctness one.
26  * 1 sec should be enough.
27  */
28 #define CGROUP_PIDLIST_DESTROY_DELAY	HZ
29 
30 /* Controllers blocked by the commandline in v1 */
31 static u16 cgroup_no_v1_mask;
32 
33 /* disable named v1 mounts */
34 static bool cgroup_no_v1_named;
35 
36 /*
37  * pidlist destructions need to be flushed on cgroup destruction.  Use a
38  * separate workqueue as flush domain.
39  */
40 static struct workqueue_struct *cgroup_pidlist_destroy_wq;
41 
42 /*
43  * Protects cgroup_subsys->release_agent_path.  Modifying it also requires
44  * cgroup_mutex.  Reading requires either cgroup_mutex or this spinlock.
45  */
46 static DEFINE_SPINLOCK(release_agent_path_lock);
47 
48 bool cgroup1_ssid_disabled(int ssid)
49 {
50 	return cgroup_no_v1_mask & (1 << ssid);
51 }
52 
53 /**
54  * cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from'
55  * @from: attach to all cgroups of a given task
56  * @tsk: the task to be attached
57  */
58 int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
59 {
60 	struct cgroup_root *root;
61 	int retval = 0;
62 
63 	mutex_lock(&cgroup_mutex);
64 	percpu_down_write(&cgroup_threadgroup_rwsem);
65 	for_each_root(root) {
66 		struct cgroup *from_cgrp;
67 
68 		if (root == &cgrp_dfl_root)
69 			continue;
70 
71 		spin_lock_irq(&css_set_lock);
72 		from_cgrp = task_cgroup_from_root(from, root);
73 		spin_unlock_irq(&css_set_lock);
74 
75 		retval = cgroup_attach_task(from_cgrp, tsk, false);
76 		if (retval)
77 			break;
78 	}
79 	percpu_up_write(&cgroup_threadgroup_rwsem);
80 	mutex_unlock(&cgroup_mutex);
81 
82 	return retval;
83 }
84 EXPORT_SYMBOL_GPL(cgroup_attach_task_all);
85 
86 /**
87  * cgroup_trasnsfer_tasks - move tasks from one cgroup to another
88  * @to: cgroup to which the tasks will be moved
89  * @from: cgroup in which the tasks currently reside
90  *
91  * Locking rules between cgroup_post_fork() and the migration path
92  * guarantee that, if a task is forking while being migrated, the new child
93  * is guaranteed to be either visible in the source cgroup after the
94  * parent's migration is complete or put into the target cgroup.  No task
95  * can slip out of migration through forking.
96  */
97 int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
98 {
99 	DEFINE_CGROUP_MGCTX(mgctx);
100 	struct cgrp_cset_link *link;
101 	struct css_task_iter it;
102 	struct task_struct *task;
103 	int ret;
104 
105 	if (cgroup_on_dfl(to))
106 		return -EINVAL;
107 
108 	ret = cgroup_migrate_vet_dst(to);
109 	if (ret)
110 		return ret;
111 
112 	mutex_lock(&cgroup_mutex);
113 
114 	percpu_down_write(&cgroup_threadgroup_rwsem);
115 
116 	/* all tasks in @from are being moved, all csets are source */
117 	spin_lock_irq(&css_set_lock);
118 	list_for_each_entry(link, &from->cset_links, cset_link)
119 		cgroup_migrate_add_src(link->cset, to, &mgctx);
120 	spin_unlock_irq(&css_set_lock);
121 
122 	ret = cgroup_migrate_prepare_dst(&mgctx);
123 	if (ret)
124 		goto out_err;
125 
126 	/*
127 	 * Migrate tasks one-by-one until @from is empty.  This fails iff
128 	 * ->can_attach() fails.
129 	 */
130 	do {
131 		css_task_iter_start(&from->self, 0, &it);
132 
133 		do {
134 			task = css_task_iter_next(&it);
135 		} while (task && (task->flags & PF_EXITING));
136 
137 		if (task)
138 			get_task_struct(task);
139 		css_task_iter_end(&it);
140 
141 		if (task) {
142 			ret = cgroup_migrate(task, false, &mgctx);
143 			if (!ret)
144 				TRACE_CGROUP_PATH(transfer_tasks, to, task, false);
145 			put_task_struct(task);
146 		}
147 	} while (task && !ret);
148 out_err:
149 	cgroup_migrate_finish(&mgctx);
150 	percpu_up_write(&cgroup_threadgroup_rwsem);
151 	mutex_unlock(&cgroup_mutex);
152 	return ret;
153 }
154 
155 /*
156  * Stuff for reading the 'tasks'/'procs' files.
157  *
158  * Reading this file can return large amounts of data if a cgroup has
159  * *lots* of attached tasks. So it may need several calls to read(),
160  * but we cannot guarantee that the information we produce is correct
161  * unless we produce it entirely atomically.
162  *
163  */
164 
165 /* which pidlist file are we talking about? */
166 enum cgroup_filetype {
167 	CGROUP_FILE_PROCS,
168 	CGROUP_FILE_TASKS,
169 };
170 
171 /*
172  * A pidlist is a list of pids that virtually represents the contents of one
173  * of the cgroup files ("procs" or "tasks"). We keep a list of such pidlists,
174  * a pair (one each for procs, tasks) for each pid namespace that's relevant
175  * to the cgroup.
176  */
177 struct cgroup_pidlist {
178 	/*
179 	 * used to find which pidlist is wanted. doesn't change as long as
180 	 * this particular list stays in the list.
181 	*/
182 	struct { enum cgroup_filetype type; struct pid_namespace *ns; } key;
183 	/* array of xids */
184 	pid_t *list;
185 	/* how many elements the above list has */
186 	int length;
187 	/* each of these stored in a list by its cgroup */
188 	struct list_head links;
189 	/* pointer to the cgroup we belong to, for list removal purposes */
190 	struct cgroup *owner;
191 	/* for delayed destruction */
192 	struct delayed_work destroy_dwork;
193 };
194 
195 /*
196  * The following two functions "fix" the issue where there are more pids
197  * than kmalloc will give memory for; in such cases, we use vmalloc/vfree.
198  * TODO: replace with a kernel-wide solution to this problem
199  */
200 #define PIDLIST_TOO_LARGE(c) ((c) * sizeof(pid_t) > (PAGE_SIZE * 2))
201 static void *pidlist_allocate(int count)
202 {
203 	if (PIDLIST_TOO_LARGE(count))
204 		return vmalloc(array_size(count, sizeof(pid_t)));
205 	else
206 		return kmalloc_array(count, sizeof(pid_t), GFP_KERNEL);
207 }
208 
209 static void pidlist_free(void *p)
210 {
211 	kvfree(p);
212 }
213 
214 /*
215  * Used to destroy all pidlists lingering waiting for destroy timer.  None
216  * should be left afterwards.
217  */
218 void cgroup1_pidlist_destroy_all(struct cgroup *cgrp)
219 {
220 	struct cgroup_pidlist *l, *tmp_l;
221 
222 	mutex_lock(&cgrp->pidlist_mutex);
223 	list_for_each_entry_safe(l, tmp_l, &cgrp->pidlists, links)
224 		mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork, 0);
225 	mutex_unlock(&cgrp->pidlist_mutex);
226 
227 	flush_workqueue(cgroup_pidlist_destroy_wq);
228 	BUG_ON(!list_empty(&cgrp->pidlists));
229 }
230 
231 static void cgroup_pidlist_destroy_work_fn(struct work_struct *work)
232 {
233 	struct delayed_work *dwork = to_delayed_work(work);
234 	struct cgroup_pidlist *l = container_of(dwork, struct cgroup_pidlist,
235 						destroy_dwork);
236 	struct cgroup_pidlist *tofree = NULL;
237 
238 	mutex_lock(&l->owner->pidlist_mutex);
239 
240 	/*
241 	 * Destroy iff we didn't get queued again.  The state won't change
242 	 * as destroy_dwork can only be queued while locked.
243 	 */
244 	if (!delayed_work_pending(dwork)) {
245 		list_del(&l->links);
246 		pidlist_free(l->list);
247 		put_pid_ns(l->key.ns);
248 		tofree = l;
249 	}
250 
251 	mutex_unlock(&l->owner->pidlist_mutex);
252 	kfree(tofree);
253 }
254 
255 /*
256  * pidlist_uniq - given a kmalloc()ed list, strip out all duplicate entries
257  * Returns the number of unique elements.
258  */
259 static int pidlist_uniq(pid_t *list, int length)
260 {
261 	int src, dest = 1;
262 
263 	/*
264 	 * we presume the 0th element is unique, so i starts at 1. trivial
265 	 * edge cases first; no work needs to be done for either
266 	 */
267 	if (length == 0 || length == 1)
268 		return length;
269 	/* src and dest walk down the list; dest counts unique elements */
270 	for (src = 1; src < length; src++) {
271 		/* find next unique element */
272 		while (list[src] == list[src-1]) {
273 			src++;
274 			if (src == length)
275 				goto after;
276 		}
277 		/* dest always points to where the next unique element goes */
278 		list[dest] = list[src];
279 		dest++;
280 	}
281 after:
282 	return dest;
283 }
284 
285 /*
286  * The two pid files - task and cgroup.procs - guaranteed that the result
287  * is sorted, which forced this whole pidlist fiasco.  As pid order is
288  * different per namespace, each namespace needs differently sorted list,
289  * making it impossible to use, for example, single rbtree of member tasks
290  * sorted by task pointer.  As pidlists can be fairly large, allocating one
291  * per open file is dangerous, so cgroup had to implement shared pool of
292  * pidlists keyed by cgroup and namespace.
293  */
294 static int cmppid(const void *a, const void *b)
295 {
296 	return *(pid_t *)a - *(pid_t *)b;
297 }
298 
299 static struct cgroup_pidlist *cgroup_pidlist_find(struct cgroup *cgrp,
300 						  enum cgroup_filetype type)
301 {
302 	struct cgroup_pidlist *l;
303 	/* don't need task_nsproxy() if we're looking at ourself */
304 	struct pid_namespace *ns = task_active_pid_ns(current);
305 
306 	lockdep_assert_held(&cgrp->pidlist_mutex);
307 
308 	list_for_each_entry(l, &cgrp->pidlists, links)
309 		if (l->key.type == type && l->key.ns == ns)
310 			return l;
311 	return NULL;
312 }
313 
314 /*
315  * find the appropriate pidlist for our purpose (given procs vs tasks)
316  * returns with the lock on that pidlist already held, and takes care
317  * of the use count, or returns NULL with no locks held if we're out of
318  * memory.
319  */
320 static struct cgroup_pidlist *cgroup_pidlist_find_create(struct cgroup *cgrp,
321 						enum cgroup_filetype type)
322 {
323 	struct cgroup_pidlist *l;
324 
325 	lockdep_assert_held(&cgrp->pidlist_mutex);
326 
327 	l = cgroup_pidlist_find(cgrp, type);
328 	if (l)
329 		return l;
330 
331 	/* entry not found; create a new one */
332 	l = kzalloc(sizeof(struct cgroup_pidlist), GFP_KERNEL);
333 	if (!l)
334 		return l;
335 
336 	INIT_DELAYED_WORK(&l->destroy_dwork, cgroup_pidlist_destroy_work_fn);
337 	l->key.type = type;
338 	/* don't need task_nsproxy() if we're looking at ourself */
339 	l->key.ns = get_pid_ns(task_active_pid_ns(current));
340 	l->owner = cgrp;
341 	list_add(&l->links, &cgrp->pidlists);
342 	return l;
343 }
344 
345 /*
346  * Load a cgroup's pidarray with either procs' tgids or tasks' pids
347  */
348 static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type,
349 			      struct cgroup_pidlist **lp)
350 {
351 	pid_t *array;
352 	int length;
353 	int pid, n = 0; /* used for populating the array */
354 	struct css_task_iter it;
355 	struct task_struct *tsk;
356 	struct cgroup_pidlist *l;
357 
358 	lockdep_assert_held(&cgrp->pidlist_mutex);
359 
360 	/*
361 	 * If cgroup gets more users after we read count, we won't have
362 	 * enough space - tough.  This race is indistinguishable to the
363 	 * caller from the case that the additional cgroup users didn't
364 	 * show up until sometime later on.
365 	 */
366 	length = cgroup_task_count(cgrp);
367 	array = pidlist_allocate(length);
368 	if (!array)
369 		return -ENOMEM;
370 	/* now, populate the array */
371 	css_task_iter_start(&cgrp->self, 0, &it);
372 	while ((tsk = css_task_iter_next(&it))) {
373 		if (unlikely(n == length))
374 			break;
375 		/* get tgid or pid for procs or tasks file respectively */
376 		if (type == CGROUP_FILE_PROCS)
377 			pid = task_tgid_vnr(tsk);
378 		else
379 			pid = task_pid_vnr(tsk);
380 		if (pid > 0) /* make sure to only use valid results */
381 			array[n++] = pid;
382 	}
383 	css_task_iter_end(&it);
384 	length = n;
385 	/* now sort & (if procs) strip out duplicates */
386 	sort(array, length, sizeof(pid_t), cmppid, NULL);
387 	if (type == CGROUP_FILE_PROCS)
388 		length = pidlist_uniq(array, length);
389 
390 	l = cgroup_pidlist_find_create(cgrp, type);
391 	if (!l) {
392 		pidlist_free(array);
393 		return -ENOMEM;
394 	}
395 
396 	/* store array, freeing old if necessary */
397 	pidlist_free(l->list);
398 	l->list = array;
399 	l->length = length;
400 	*lp = l;
401 	return 0;
402 }
403 
404 /*
405  * seq_file methods for the tasks/procs files. The seq_file position is the
406  * next pid to display; the seq_file iterator is a pointer to the pid
407  * in the cgroup->l->list array.
408  */
409 
410 static void *cgroup_pidlist_start(struct seq_file *s, loff_t *pos)
411 {
412 	/*
413 	 * Initially we receive a position value that corresponds to
414 	 * one more than the last pid shown (or 0 on the first call or
415 	 * after a seek to the start). Use a binary-search to find the
416 	 * next pid to display, if any
417 	 */
418 	struct kernfs_open_file *of = s->private;
419 	struct cgroup *cgrp = seq_css(s)->cgroup;
420 	struct cgroup_pidlist *l;
421 	enum cgroup_filetype type = seq_cft(s)->private;
422 	int index = 0, pid = *pos;
423 	int *iter, ret;
424 
425 	mutex_lock(&cgrp->pidlist_mutex);
426 
427 	/*
428 	 * !NULL @of->priv indicates that this isn't the first start()
429 	 * after open.  If the matching pidlist is around, we can use that.
430 	 * Look for it.  Note that @of->priv can't be used directly.  It
431 	 * could already have been destroyed.
432 	 */
433 	if (of->priv)
434 		of->priv = cgroup_pidlist_find(cgrp, type);
435 
436 	/*
437 	 * Either this is the first start() after open or the matching
438 	 * pidlist has been destroyed inbetween.  Create a new one.
439 	 */
440 	if (!of->priv) {
441 		ret = pidlist_array_load(cgrp, type,
442 					 (struct cgroup_pidlist **)&of->priv);
443 		if (ret)
444 			return ERR_PTR(ret);
445 	}
446 	l = of->priv;
447 
448 	if (pid) {
449 		int end = l->length;
450 
451 		while (index < end) {
452 			int mid = (index + end) / 2;
453 			if (l->list[mid] == pid) {
454 				index = mid;
455 				break;
456 			} else if (l->list[mid] <= pid)
457 				index = mid + 1;
458 			else
459 				end = mid;
460 		}
461 	}
462 	/* If we're off the end of the array, we're done */
463 	if (index >= l->length)
464 		return NULL;
465 	/* Update the abstract position to be the actual pid that we found */
466 	iter = l->list + index;
467 	*pos = *iter;
468 	return iter;
469 }
470 
471 static void cgroup_pidlist_stop(struct seq_file *s, void *v)
472 {
473 	struct kernfs_open_file *of = s->private;
474 	struct cgroup_pidlist *l = of->priv;
475 
476 	if (l)
477 		mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork,
478 				 CGROUP_PIDLIST_DESTROY_DELAY);
479 	mutex_unlock(&seq_css(s)->cgroup->pidlist_mutex);
480 }
481 
482 static void *cgroup_pidlist_next(struct seq_file *s, void *v, loff_t *pos)
483 {
484 	struct kernfs_open_file *of = s->private;
485 	struct cgroup_pidlist *l = of->priv;
486 	pid_t *p = v;
487 	pid_t *end = l->list + l->length;
488 	/*
489 	 * Advance to the next pid in the array. If this goes off the
490 	 * end, we're done
491 	 */
492 	p++;
493 	if (p >= end) {
494 		return NULL;
495 	} else {
496 		*pos = *p;
497 		return p;
498 	}
499 }
500 
501 static int cgroup_pidlist_show(struct seq_file *s, void *v)
502 {
503 	seq_printf(s, "%d\n", *(int *)v);
504 
505 	return 0;
506 }
507 
508 static ssize_t __cgroup1_procs_write(struct kernfs_open_file *of,
509 				     char *buf, size_t nbytes, loff_t off,
510 				     bool threadgroup)
511 {
512 	struct cgroup *cgrp;
513 	struct task_struct *task;
514 	const struct cred *cred, *tcred;
515 	ssize_t ret;
516 
517 	cgrp = cgroup_kn_lock_live(of->kn, false);
518 	if (!cgrp)
519 		return -ENODEV;
520 
521 	task = cgroup_procs_write_start(buf, threadgroup);
522 	ret = PTR_ERR_OR_ZERO(task);
523 	if (ret)
524 		goto out_unlock;
525 
526 	/*
527 	 * Even if we're attaching all tasks in the thread group, we only
528 	 * need to check permissions on one of them.
529 	 */
530 	cred = current_cred();
531 	tcred = get_task_cred(task);
532 	if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
533 	    !uid_eq(cred->euid, tcred->uid) &&
534 	    !uid_eq(cred->euid, tcred->suid))
535 		ret = -EACCES;
536 	put_cred(tcred);
537 	if (ret)
538 		goto out_finish;
539 
540 	ret = cgroup_attach_task(cgrp, task, threadgroup);
541 
542 out_finish:
543 	cgroup_procs_write_finish(task);
544 out_unlock:
545 	cgroup_kn_unlock(of->kn);
546 
547 	return ret ?: nbytes;
548 }
549 
550 static ssize_t cgroup1_procs_write(struct kernfs_open_file *of,
551 				   char *buf, size_t nbytes, loff_t off)
552 {
553 	return __cgroup1_procs_write(of, buf, nbytes, off, true);
554 }
555 
556 static ssize_t cgroup1_tasks_write(struct kernfs_open_file *of,
557 				   char *buf, size_t nbytes, loff_t off)
558 {
559 	return __cgroup1_procs_write(of, buf, nbytes, off, false);
560 }
561 
562 static ssize_t cgroup_release_agent_write(struct kernfs_open_file *of,
563 					  char *buf, size_t nbytes, loff_t off)
564 {
565 	struct cgroup *cgrp;
566 
567 	BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX);
568 
569 	cgrp = cgroup_kn_lock_live(of->kn, false);
570 	if (!cgrp)
571 		return -ENODEV;
572 	spin_lock(&release_agent_path_lock);
573 	strlcpy(cgrp->root->release_agent_path, strstrip(buf),
574 		sizeof(cgrp->root->release_agent_path));
575 	spin_unlock(&release_agent_path_lock);
576 	cgroup_kn_unlock(of->kn);
577 	return nbytes;
578 }
579 
580 static int cgroup_release_agent_show(struct seq_file *seq, void *v)
581 {
582 	struct cgroup *cgrp = seq_css(seq)->cgroup;
583 
584 	spin_lock(&release_agent_path_lock);
585 	seq_puts(seq, cgrp->root->release_agent_path);
586 	spin_unlock(&release_agent_path_lock);
587 	seq_putc(seq, '\n');
588 	return 0;
589 }
590 
591 static int cgroup_sane_behavior_show(struct seq_file *seq, void *v)
592 {
593 	seq_puts(seq, "0\n");
594 	return 0;
595 }
596 
597 static u64 cgroup_read_notify_on_release(struct cgroup_subsys_state *css,
598 					 struct cftype *cft)
599 {
600 	return notify_on_release(css->cgroup);
601 }
602 
603 static int cgroup_write_notify_on_release(struct cgroup_subsys_state *css,
604 					  struct cftype *cft, u64 val)
605 {
606 	if (val)
607 		set_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags);
608 	else
609 		clear_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags);
610 	return 0;
611 }
612 
613 static u64 cgroup_clone_children_read(struct cgroup_subsys_state *css,
614 				      struct cftype *cft)
615 {
616 	return test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
617 }
618 
619 static int cgroup_clone_children_write(struct cgroup_subsys_state *css,
620 				       struct cftype *cft, u64 val)
621 {
622 	if (val)
623 		set_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
624 	else
625 		clear_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
626 	return 0;
627 }
628 
629 /* cgroup core interface files for the legacy hierarchies */
630 struct cftype cgroup1_base_files[] = {
631 	{
632 		.name = "cgroup.procs",
633 		.seq_start = cgroup_pidlist_start,
634 		.seq_next = cgroup_pidlist_next,
635 		.seq_stop = cgroup_pidlist_stop,
636 		.seq_show = cgroup_pidlist_show,
637 		.private = CGROUP_FILE_PROCS,
638 		.write = cgroup1_procs_write,
639 	},
640 	{
641 		.name = "cgroup.clone_children",
642 		.read_u64 = cgroup_clone_children_read,
643 		.write_u64 = cgroup_clone_children_write,
644 	},
645 	{
646 		.name = "cgroup.sane_behavior",
647 		.flags = CFTYPE_ONLY_ON_ROOT,
648 		.seq_show = cgroup_sane_behavior_show,
649 	},
650 	{
651 		.name = "tasks",
652 		.seq_start = cgroup_pidlist_start,
653 		.seq_next = cgroup_pidlist_next,
654 		.seq_stop = cgroup_pidlist_stop,
655 		.seq_show = cgroup_pidlist_show,
656 		.private = CGROUP_FILE_TASKS,
657 		.write = cgroup1_tasks_write,
658 	},
659 	{
660 		.name = "notify_on_release",
661 		.read_u64 = cgroup_read_notify_on_release,
662 		.write_u64 = cgroup_write_notify_on_release,
663 	},
664 	{
665 		.name = "release_agent",
666 		.flags = CFTYPE_ONLY_ON_ROOT,
667 		.seq_show = cgroup_release_agent_show,
668 		.write = cgroup_release_agent_write,
669 		.max_write_len = PATH_MAX - 1,
670 	},
671 	{ }	/* terminate */
672 };
673 
674 /* Display information about each subsystem and each hierarchy */
675 int proc_cgroupstats_show(struct seq_file *m, void *v)
676 {
677 	struct cgroup_subsys *ss;
678 	int i;
679 
680 	seq_puts(m, "#subsys_name\thierarchy\tnum_cgroups\tenabled\n");
681 	/*
682 	 * ideally we don't want subsystems moving around while we do this.
683 	 * cgroup_mutex is also necessary to guarantee an atomic snapshot of
684 	 * subsys/hierarchy state.
685 	 */
686 	mutex_lock(&cgroup_mutex);
687 
688 	for_each_subsys(ss, i)
689 		seq_printf(m, "%s\t%d\t%d\t%d\n",
690 			   ss->legacy_name, ss->root->hierarchy_id,
691 			   atomic_read(&ss->root->nr_cgrps),
692 			   cgroup_ssid_enabled(i));
693 
694 	mutex_unlock(&cgroup_mutex);
695 	return 0;
696 }
697 
698 /**
699  * cgroupstats_build - build and fill cgroupstats
700  * @stats: cgroupstats to fill information into
701  * @dentry: A dentry entry belonging to the cgroup for which stats have
702  * been requested.
703  *
704  * Build and fill cgroupstats so that taskstats can export it to user
705  * space.
706  */
707 int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry)
708 {
709 	struct kernfs_node *kn = kernfs_node_from_dentry(dentry);
710 	struct cgroup *cgrp;
711 	struct css_task_iter it;
712 	struct task_struct *tsk;
713 
714 	/* it should be kernfs_node belonging to cgroupfs and is a directory */
715 	if (dentry->d_sb->s_type != &cgroup_fs_type || !kn ||
716 	    kernfs_type(kn) != KERNFS_DIR)
717 		return -EINVAL;
718 
719 	mutex_lock(&cgroup_mutex);
720 
721 	/*
722 	 * We aren't being called from kernfs and there's no guarantee on
723 	 * @kn->priv's validity.  For this and css_tryget_online_from_dir(),
724 	 * @kn->priv is RCU safe.  Let's do the RCU dancing.
725 	 */
726 	rcu_read_lock();
727 	cgrp = rcu_dereference(*(void __rcu __force **)&kn->priv);
728 	if (!cgrp || cgroup_is_dead(cgrp)) {
729 		rcu_read_unlock();
730 		mutex_unlock(&cgroup_mutex);
731 		return -ENOENT;
732 	}
733 	rcu_read_unlock();
734 
735 	css_task_iter_start(&cgrp->self, 0, &it);
736 	while ((tsk = css_task_iter_next(&it))) {
737 		switch (tsk->state) {
738 		case TASK_RUNNING:
739 			stats->nr_running++;
740 			break;
741 		case TASK_INTERRUPTIBLE:
742 			stats->nr_sleeping++;
743 			break;
744 		case TASK_UNINTERRUPTIBLE:
745 			stats->nr_uninterruptible++;
746 			break;
747 		case TASK_STOPPED:
748 			stats->nr_stopped++;
749 			break;
750 		default:
751 			if (delayacct_is_task_waiting_on_io(tsk))
752 				stats->nr_io_wait++;
753 			break;
754 		}
755 	}
756 	css_task_iter_end(&it);
757 
758 	mutex_unlock(&cgroup_mutex);
759 	return 0;
760 }
761 
762 void cgroup1_check_for_release(struct cgroup *cgrp)
763 {
764 	if (notify_on_release(cgrp) && !cgroup_is_populated(cgrp) &&
765 	    !css_has_online_children(&cgrp->self) && !cgroup_is_dead(cgrp))
766 		schedule_work(&cgrp->release_agent_work);
767 }
768 
769 /*
770  * Notify userspace when a cgroup is released, by running the
771  * configured release agent with the name of the cgroup (path
772  * relative to the root of cgroup file system) as the argument.
773  *
774  * Most likely, this user command will try to rmdir this cgroup.
775  *
776  * This races with the possibility that some other task will be
777  * attached to this cgroup before it is removed, or that some other
778  * user task will 'mkdir' a child cgroup of this cgroup.  That's ok.
779  * The presumed 'rmdir' will fail quietly if this cgroup is no longer
780  * unused, and this cgroup will be reprieved from its death sentence,
781  * to continue to serve a useful existence.  Next time it's released,
782  * we will get notified again, if it still has 'notify_on_release' set.
783  *
784  * The final arg to call_usermodehelper() is UMH_WAIT_EXEC, which
785  * means only wait until the task is successfully execve()'d.  The
786  * separate release agent task is forked by call_usermodehelper(),
787  * then control in this thread returns here, without waiting for the
788  * release agent task.  We don't bother to wait because the caller of
789  * this routine has no use for the exit status of the release agent
790  * task, so no sense holding our caller up for that.
791  */
792 void cgroup1_release_agent(struct work_struct *work)
793 {
794 	struct cgroup *cgrp =
795 		container_of(work, struct cgroup, release_agent_work);
796 	char *pathbuf = NULL, *agentbuf = NULL;
797 	char *argv[3], *envp[3];
798 	int ret;
799 
800 	mutex_lock(&cgroup_mutex);
801 
802 	pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
803 	agentbuf = kstrdup(cgrp->root->release_agent_path, GFP_KERNEL);
804 	if (!pathbuf || !agentbuf)
805 		goto out;
806 
807 	spin_lock_irq(&css_set_lock);
808 	ret = cgroup_path_ns_locked(cgrp, pathbuf, PATH_MAX, &init_cgroup_ns);
809 	spin_unlock_irq(&css_set_lock);
810 	if (ret < 0 || ret >= PATH_MAX)
811 		goto out;
812 
813 	argv[0] = agentbuf;
814 	argv[1] = pathbuf;
815 	argv[2] = NULL;
816 
817 	/* minimal command environment */
818 	envp[0] = "HOME=/";
819 	envp[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
820 	envp[2] = NULL;
821 
822 	mutex_unlock(&cgroup_mutex);
823 	call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC);
824 	goto out_free;
825 out:
826 	mutex_unlock(&cgroup_mutex);
827 out_free:
828 	kfree(agentbuf);
829 	kfree(pathbuf);
830 }
831 
832 /*
833  * cgroup_rename - Only allow simple rename of directories in place.
834  */
835 static int cgroup1_rename(struct kernfs_node *kn, struct kernfs_node *new_parent,
836 			  const char *new_name_str)
837 {
838 	struct cgroup *cgrp = kn->priv;
839 	int ret;
840 
841 	if (kernfs_type(kn) != KERNFS_DIR)
842 		return -ENOTDIR;
843 	if (kn->parent != new_parent)
844 		return -EIO;
845 
846 	/*
847 	 * We're gonna grab cgroup_mutex which nests outside kernfs
848 	 * active_ref.  kernfs_rename() doesn't require active_ref
849 	 * protection.  Break them before grabbing cgroup_mutex.
850 	 */
851 	kernfs_break_active_protection(new_parent);
852 	kernfs_break_active_protection(kn);
853 
854 	mutex_lock(&cgroup_mutex);
855 
856 	ret = kernfs_rename(kn, new_parent, new_name_str);
857 	if (!ret)
858 		TRACE_CGROUP_PATH(rename, cgrp);
859 
860 	mutex_unlock(&cgroup_mutex);
861 
862 	kernfs_unbreak_active_protection(kn);
863 	kernfs_unbreak_active_protection(new_parent);
864 	return ret;
865 }
866 
867 static int cgroup1_show_options(struct seq_file *seq, struct kernfs_root *kf_root)
868 {
869 	struct cgroup_root *root = cgroup_root_from_kf(kf_root);
870 	struct cgroup_subsys *ss;
871 	int ssid;
872 
873 	for_each_subsys(ss, ssid)
874 		if (root->subsys_mask & (1 << ssid))
875 			seq_show_option(seq, ss->legacy_name, NULL);
876 	if (root->flags & CGRP_ROOT_NOPREFIX)
877 		seq_puts(seq, ",noprefix");
878 	if (root->flags & CGRP_ROOT_XATTR)
879 		seq_puts(seq, ",xattr");
880 	if (root->flags & CGRP_ROOT_CPUSET_V2_MODE)
881 		seq_puts(seq, ",cpuset_v2_mode");
882 
883 	spin_lock(&release_agent_path_lock);
884 	if (strlen(root->release_agent_path))
885 		seq_show_option(seq, "release_agent",
886 				root->release_agent_path);
887 	spin_unlock(&release_agent_path_lock);
888 
889 	if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags))
890 		seq_puts(seq, ",clone_children");
891 	if (strlen(root->name))
892 		seq_show_option(seq, "name", root->name);
893 	return 0;
894 }
895 
896 enum cgroup1_param {
897 	Opt_all,
898 	Opt_clone_children,
899 	Opt_cpuset_v2_mode,
900 	Opt_name,
901 	Opt_none,
902 	Opt_noprefix,
903 	Opt_release_agent,
904 	Opt_xattr,
905 };
906 
907 static const struct fs_parameter_spec cgroup1_param_specs[] = {
908 	fsparam_flag  ("all",		Opt_all),
909 	fsparam_flag  ("clone_children", Opt_clone_children),
910 	fsparam_flag  ("cpuset_v2_mode", Opt_cpuset_v2_mode),
911 	fsparam_string("name",		Opt_name),
912 	fsparam_flag  ("none",		Opt_none),
913 	fsparam_flag  ("noprefix",	Opt_noprefix),
914 	fsparam_string("release_agent",	Opt_release_agent),
915 	fsparam_flag  ("xattr",		Opt_xattr),
916 	{}
917 };
918 
919 const struct fs_parameter_description cgroup1_fs_parameters = {
920 	.name		= "cgroup1",
921 	.specs		= cgroup1_param_specs,
922 };
923 
924 int cgroup1_parse_param(struct fs_context *fc, struct fs_parameter *param)
925 {
926 	struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
927 	struct cgroup_subsys *ss;
928 	struct fs_parse_result result;
929 	int opt, i;
930 
931 	opt = fs_parse(fc, &cgroup1_fs_parameters, param, &result);
932 	if (opt == -ENOPARAM) {
933 		if (strcmp(param->key, "source") == 0) {
934 			fc->source = param->string;
935 			param->string = NULL;
936 			return 0;
937 		}
938 		for_each_subsys(ss, i) {
939 			if (strcmp(param->key, ss->legacy_name))
940 				continue;
941 			ctx->subsys_mask |= (1 << i);
942 			return 0;
943 		}
944 		return cg_invalf(fc, "cgroup1: Unknown subsys name '%s'", param->key);
945 	}
946 	if (opt < 0)
947 		return opt;
948 
949 	switch (opt) {
950 	case Opt_none:
951 		/* Explicitly have no subsystems */
952 		ctx->none = true;
953 		break;
954 	case Opt_all:
955 		ctx->all_ss = true;
956 		break;
957 	case Opt_noprefix:
958 		ctx->flags |= CGRP_ROOT_NOPREFIX;
959 		break;
960 	case Opt_clone_children:
961 		ctx->cpuset_clone_children = true;
962 		break;
963 	case Opt_cpuset_v2_mode:
964 		ctx->flags |= CGRP_ROOT_CPUSET_V2_MODE;
965 		break;
966 	case Opt_xattr:
967 		ctx->flags |= CGRP_ROOT_XATTR;
968 		break;
969 	case Opt_release_agent:
970 		/* Specifying two release agents is forbidden */
971 		if (ctx->release_agent)
972 			return cg_invalf(fc, "cgroup1: release_agent respecified");
973 		ctx->release_agent = param->string;
974 		param->string = NULL;
975 		break;
976 	case Opt_name:
977 		/* blocked by boot param? */
978 		if (cgroup_no_v1_named)
979 			return -ENOENT;
980 		/* Can't specify an empty name */
981 		if (!param->size)
982 			return cg_invalf(fc, "cgroup1: Empty name");
983 		if (param->size > MAX_CGROUP_ROOT_NAMELEN - 1)
984 			return cg_invalf(fc, "cgroup1: Name too long");
985 		/* Must match [\w.-]+ */
986 		for (i = 0; i < param->size; i++) {
987 			char c = param->string[i];
988 			if (isalnum(c))
989 				continue;
990 			if ((c == '.') || (c == '-') || (c == '_'))
991 				continue;
992 			return cg_invalf(fc, "cgroup1: Invalid name");
993 		}
994 		/* Specifying two names is forbidden */
995 		if (ctx->name)
996 			return cg_invalf(fc, "cgroup1: name respecified");
997 		ctx->name = param->string;
998 		param->string = NULL;
999 		break;
1000 	}
1001 	return 0;
1002 }
1003 
1004 static int check_cgroupfs_options(struct fs_context *fc)
1005 {
1006 	struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
1007 	u16 mask = U16_MAX;
1008 	u16 enabled = 0;
1009 	struct cgroup_subsys *ss;
1010 	int i;
1011 
1012 #ifdef CONFIG_CPUSETS
1013 	mask = ~((u16)1 << cpuset_cgrp_id);
1014 #endif
1015 	for_each_subsys(ss, i)
1016 		if (cgroup_ssid_enabled(i) && !cgroup1_ssid_disabled(i))
1017 			enabled |= 1 << i;
1018 
1019 	ctx->subsys_mask &= enabled;
1020 
1021 	/*
1022 	 * In absense of 'none', 'name=' or subsystem name options,
1023 	 * let's default to 'all'.
1024 	 */
1025 	if (!ctx->subsys_mask && !ctx->none && !ctx->name)
1026 		ctx->all_ss = true;
1027 
1028 	if (ctx->all_ss) {
1029 		/* Mutually exclusive option 'all' + subsystem name */
1030 		if (ctx->subsys_mask)
1031 			return cg_invalf(fc, "cgroup1: subsys name conflicts with all");
1032 		/* 'all' => select all the subsystems */
1033 		ctx->subsys_mask = enabled;
1034 	}
1035 
1036 	/*
1037 	 * We either have to specify by name or by subsystems. (So all
1038 	 * empty hierarchies must have a name).
1039 	 */
1040 	if (!ctx->subsys_mask && !ctx->name)
1041 		return cg_invalf(fc, "cgroup1: Need name or subsystem set");
1042 
1043 	/*
1044 	 * Option noprefix was introduced just for backward compatibility
1045 	 * with the old cpuset, so we allow noprefix only if mounting just
1046 	 * the cpuset subsystem.
1047 	 */
1048 	if ((ctx->flags & CGRP_ROOT_NOPREFIX) && (ctx->subsys_mask & mask))
1049 		return cg_invalf(fc, "cgroup1: noprefix used incorrectly");
1050 
1051 	/* Can't specify "none" and some subsystems */
1052 	if (ctx->subsys_mask && ctx->none)
1053 		return cg_invalf(fc, "cgroup1: none used incorrectly");
1054 
1055 	return 0;
1056 }
1057 
1058 int cgroup1_reconfigure(struct fs_context *fc)
1059 {
1060 	struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
1061 	struct kernfs_root *kf_root = kernfs_root_from_sb(fc->root->d_sb);
1062 	struct cgroup_root *root = cgroup_root_from_kf(kf_root);
1063 	int ret = 0;
1064 	u16 added_mask, removed_mask;
1065 
1066 	cgroup_lock_and_drain_offline(&cgrp_dfl_root.cgrp);
1067 
1068 	/* See what subsystems are wanted */
1069 	ret = check_cgroupfs_options(fc);
1070 	if (ret)
1071 		goto out_unlock;
1072 
1073 	if (ctx->subsys_mask != root->subsys_mask || ctx->release_agent)
1074 		pr_warn("option changes via remount are deprecated (pid=%d comm=%s)\n",
1075 			task_tgid_nr(current), current->comm);
1076 
1077 	added_mask = ctx->subsys_mask & ~root->subsys_mask;
1078 	removed_mask = root->subsys_mask & ~ctx->subsys_mask;
1079 
1080 	/* Don't allow flags or name to change at remount */
1081 	if ((ctx->flags ^ root->flags) ||
1082 	    (ctx->name && strcmp(ctx->name, root->name))) {
1083 		cg_invalf(fc, "option or name mismatch, new: 0x%x \"%s\", old: 0x%x \"%s\"",
1084 		       ctx->flags, ctx->name ?: "", root->flags, root->name);
1085 		ret = -EINVAL;
1086 		goto out_unlock;
1087 	}
1088 
1089 	/* remounting is not allowed for populated hierarchies */
1090 	if (!list_empty(&root->cgrp.self.children)) {
1091 		ret = -EBUSY;
1092 		goto out_unlock;
1093 	}
1094 
1095 	ret = rebind_subsystems(root, added_mask);
1096 	if (ret)
1097 		goto out_unlock;
1098 
1099 	WARN_ON(rebind_subsystems(&cgrp_dfl_root, removed_mask));
1100 
1101 	if (ctx->release_agent) {
1102 		spin_lock(&release_agent_path_lock);
1103 		strcpy(root->release_agent_path, ctx->release_agent);
1104 		spin_unlock(&release_agent_path_lock);
1105 	}
1106 
1107 	trace_cgroup_remount(root);
1108 
1109  out_unlock:
1110 	mutex_unlock(&cgroup_mutex);
1111 	return ret;
1112 }
1113 
1114 struct kernfs_syscall_ops cgroup1_kf_syscall_ops = {
1115 	.rename			= cgroup1_rename,
1116 	.show_options		= cgroup1_show_options,
1117 	.mkdir			= cgroup_mkdir,
1118 	.rmdir			= cgroup_rmdir,
1119 	.show_path		= cgroup_show_path,
1120 };
1121 
1122 /*
1123  * The guts of cgroup1 mount - find or create cgroup_root to use.
1124  * Called with cgroup_mutex held; returns 0 on success, -E... on
1125  * error and positive - in case when the candidate is busy dying.
1126  * On success it stashes a reference to cgroup_root into given
1127  * cgroup_fs_context; that reference is *NOT* counting towards the
1128  * cgroup_root refcount.
1129  */
1130 static int cgroup1_root_to_use(struct fs_context *fc)
1131 {
1132 	struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
1133 	struct cgroup_root *root;
1134 	struct cgroup_subsys *ss;
1135 	int i, ret;
1136 
1137 	/* First find the desired set of subsystems */
1138 	ret = check_cgroupfs_options(fc);
1139 	if (ret)
1140 		return ret;
1141 
1142 	/*
1143 	 * Destruction of cgroup root is asynchronous, so subsystems may
1144 	 * still be dying after the previous unmount.  Let's drain the
1145 	 * dying subsystems.  We just need to ensure that the ones
1146 	 * unmounted previously finish dying and don't care about new ones
1147 	 * starting.  Testing ref liveliness is good enough.
1148 	 */
1149 	for_each_subsys(ss, i) {
1150 		if (!(ctx->subsys_mask & (1 << i)) ||
1151 		    ss->root == &cgrp_dfl_root)
1152 			continue;
1153 
1154 		if (!percpu_ref_tryget_live(&ss->root->cgrp.self.refcnt))
1155 			return 1;	/* restart */
1156 		cgroup_put(&ss->root->cgrp);
1157 	}
1158 
1159 	for_each_root(root) {
1160 		bool name_match = false;
1161 
1162 		if (root == &cgrp_dfl_root)
1163 			continue;
1164 
1165 		/*
1166 		 * If we asked for a name then it must match.  Also, if
1167 		 * name matches but sybsys_mask doesn't, we should fail.
1168 		 * Remember whether name matched.
1169 		 */
1170 		if (ctx->name) {
1171 			if (strcmp(ctx->name, root->name))
1172 				continue;
1173 			name_match = true;
1174 		}
1175 
1176 		/*
1177 		 * If we asked for subsystems (or explicitly for no
1178 		 * subsystems) then they must match.
1179 		 */
1180 		if ((ctx->subsys_mask || ctx->none) &&
1181 		    (ctx->subsys_mask != root->subsys_mask)) {
1182 			if (!name_match)
1183 				continue;
1184 			return -EBUSY;
1185 		}
1186 
1187 		if (root->flags ^ ctx->flags)
1188 			pr_warn("new mount options do not match the existing superblock, will be ignored\n");
1189 
1190 		ctx->root = root;
1191 		return 0;
1192 	}
1193 
1194 	/*
1195 	 * No such thing, create a new one.  name= matching without subsys
1196 	 * specification is allowed for already existing hierarchies but we
1197 	 * can't create new one without subsys specification.
1198 	 */
1199 	if (!ctx->subsys_mask && !ctx->none)
1200 		return cg_invalf(fc, "cgroup1: No subsys list or none specified");
1201 
1202 	/* Hierarchies may only be created in the initial cgroup namespace. */
1203 	if (ctx->ns != &init_cgroup_ns)
1204 		return -EPERM;
1205 
1206 	root = kzalloc(sizeof(*root), GFP_KERNEL);
1207 	if (!root)
1208 		return -ENOMEM;
1209 
1210 	ctx->root = root;
1211 	init_cgroup_root(ctx);
1212 
1213 	ret = cgroup_setup_root(root, ctx->subsys_mask);
1214 	if (ret)
1215 		cgroup_free_root(root);
1216 	return ret;
1217 }
1218 
1219 int cgroup1_get_tree(struct fs_context *fc)
1220 {
1221 	struct cgroup_fs_context *ctx = cgroup_fc2context(fc);
1222 	int ret;
1223 
1224 	/* Check if the caller has permission to mount. */
1225 	if (!ns_capable(ctx->ns->user_ns, CAP_SYS_ADMIN))
1226 		return -EPERM;
1227 
1228 	cgroup_lock_and_drain_offline(&cgrp_dfl_root.cgrp);
1229 
1230 	ret = cgroup1_root_to_use(fc);
1231 	if (!ret && !percpu_ref_tryget_live(&ctx->root->cgrp.self.refcnt))
1232 		ret = 1;	/* restart */
1233 
1234 	mutex_unlock(&cgroup_mutex);
1235 
1236 	if (!ret)
1237 		ret = cgroup_do_get_tree(fc);
1238 
1239 	if (!ret && percpu_ref_is_dying(&ctx->root->cgrp.self.refcnt)) {
1240 		struct super_block *sb = fc->root->d_sb;
1241 		dput(fc->root);
1242 		deactivate_locked_super(sb);
1243 		ret = 1;
1244 	}
1245 
1246 	if (unlikely(ret > 0)) {
1247 		msleep(10);
1248 		return restart_syscall();
1249 	}
1250 	return ret;
1251 }
1252 
1253 static int __init cgroup1_wq_init(void)
1254 {
1255 	/*
1256 	 * Used to destroy pidlists and separate to serve as flush domain.
1257 	 * Cap @max_active to 1 too.
1258 	 */
1259 	cgroup_pidlist_destroy_wq = alloc_workqueue("cgroup_pidlist_destroy",
1260 						    0, 1);
1261 	BUG_ON(!cgroup_pidlist_destroy_wq);
1262 	return 0;
1263 }
1264 core_initcall(cgroup1_wq_init);
1265 
1266 static int __init cgroup_no_v1(char *str)
1267 {
1268 	struct cgroup_subsys *ss;
1269 	char *token;
1270 	int i;
1271 
1272 	while ((token = strsep(&str, ",")) != NULL) {
1273 		if (!*token)
1274 			continue;
1275 
1276 		if (!strcmp(token, "all")) {
1277 			cgroup_no_v1_mask = U16_MAX;
1278 			continue;
1279 		}
1280 
1281 		if (!strcmp(token, "named")) {
1282 			cgroup_no_v1_named = true;
1283 			continue;
1284 		}
1285 
1286 		for_each_subsys(ss, i) {
1287 			if (strcmp(token, ss->name) &&
1288 			    strcmp(token, ss->legacy_name))
1289 				continue;
1290 
1291 			cgroup_no_v1_mask |= 1 << i;
1292 		}
1293 	}
1294 	return 1;
1295 }
1296 __setup("cgroup_no_v1=", cgroup_no_v1);
1297