xref: /openbmc/linux/kernel/cgroup/cgroup-v1.c (revision e5c86679)
1 #include "cgroup-internal.h"
2 
3 #include <linux/ctype.h>
4 #include <linux/kmod.h>
5 #include <linux/sort.h>
6 #include <linux/delay.h>
7 #include <linux/mm.h>
8 #include <linux/sched/signal.h>
9 #include <linux/sched/task.h>
10 #include <linux/magic.h>
11 #include <linux/slab.h>
12 #include <linux/vmalloc.h>
13 #include <linux/delayacct.h>
14 #include <linux/pid_namespace.h>
15 #include <linux/cgroupstats.h>
16 
17 #include <trace/events/cgroup.h>
18 
19 /*
20  * pidlists linger the following amount before being destroyed.  The goal
21  * is avoiding frequent destruction in the middle of consecutive read calls
22  * Expiring in the middle is a performance problem not a correctness one.
23  * 1 sec should be enough.
24  */
25 #define CGROUP_PIDLIST_DESTROY_DELAY	HZ
26 
27 /* Controllers blocked by the commandline in v1 */
28 static u16 cgroup_no_v1_mask;
29 
30 /*
31  * pidlist destructions need to be flushed on cgroup destruction.  Use a
32  * separate workqueue as flush domain.
33  */
34 static struct workqueue_struct *cgroup_pidlist_destroy_wq;
35 
36 /*
37  * Protects cgroup_subsys->release_agent_path.  Modifying it also requires
38  * cgroup_mutex.  Reading requires either cgroup_mutex or this spinlock.
39  */
40 static DEFINE_SPINLOCK(release_agent_path_lock);
41 
42 bool cgroup1_ssid_disabled(int ssid)
43 {
44 	return cgroup_no_v1_mask & (1 << ssid);
45 }
46 
47 /**
48  * cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from'
49  * @from: attach to all cgroups of a given task
50  * @tsk: the task to be attached
51  */
52 int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
53 {
54 	struct cgroup_root *root;
55 	int retval = 0;
56 
57 	mutex_lock(&cgroup_mutex);
58 	percpu_down_write(&cgroup_threadgroup_rwsem);
59 	for_each_root(root) {
60 		struct cgroup *from_cgrp;
61 
62 		if (root == &cgrp_dfl_root)
63 			continue;
64 
65 		spin_lock_irq(&css_set_lock);
66 		from_cgrp = task_cgroup_from_root(from, root);
67 		spin_unlock_irq(&css_set_lock);
68 
69 		retval = cgroup_attach_task(from_cgrp, tsk, false);
70 		if (retval)
71 			break;
72 	}
73 	percpu_up_write(&cgroup_threadgroup_rwsem);
74 	mutex_unlock(&cgroup_mutex);
75 
76 	return retval;
77 }
78 EXPORT_SYMBOL_GPL(cgroup_attach_task_all);
79 
80 /**
81  * cgroup_trasnsfer_tasks - move tasks from one cgroup to another
82  * @to: cgroup to which the tasks will be moved
83  * @from: cgroup in which the tasks currently reside
84  *
85  * Locking rules between cgroup_post_fork() and the migration path
86  * guarantee that, if a task is forking while being migrated, the new child
87  * is guaranteed to be either visible in the source cgroup after the
88  * parent's migration is complete or put into the target cgroup.  No task
89  * can slip out of migration through forking.
90  */
91 int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
92 {
93 	DEFINE_CGROUP_MGCTX(mgctx);
94 	struct cgrp_cset_link *link;
95 	struct css_task_iter it;
96 	struct task_struct *task;
97 	int ret;
98 
99 	if (cgroup_on_dfl(to))
100 		return -EINVAL;
101 
102 	if (!cgroup_may_migrate_to(to))
103 		return -EBUSY;
104 
105 	mutex_lock(&cgroup_mutex);
106 
107 	percpu_down_write(&cgroup_threadgroup_rwsem);
108 
109 	/* all tasks in @from are being moved, all csets are source */
110 	spin_lock_irq(&css_set_lock);
111 	list_for_each_entry(link, &from->cset_links, cset_link)
112 		cgroup_migrate_add_src(link->cset, to, &mgctx);
113 	spin_unlock_irq(&css_set_lock);
114 
115 	ret = cgroup_migrate_prepare_dst(&mgctx);
116 	if (ret)
117 		goto out_err;
118 
119 	/*
120 	 * Migrate tasks one-by-one until @from is empty.  This fails iff
121 	 * ->can_attach() fails.
122 	 */
123 	do {
124 		css_task_iter_start(&from->self, &it);
125 		task = css_task_iter_next(&it);
126 		if (task)
127 			get_task_struct(task);
128 		css_task_iter_end(&it);
129 
130 		if (task) {
131 			ret = cgroup_migrate(task, false, &mgctx);
132 			if (!ret)
133 				trace_cgroup_transfer_tasks(to, task, false);
134 			put_task_struct(task);
135 		}
136 	} while (task && !ret);
137 out_err:
138 	cgroup_migrate_finish(&mgctx);
139 	percpu_up_write(&cgroup_threadgroup_rwsem);
140 	mutex_unlock(&cgroup_mutex);
141 	return ret;
142 }
143 
144 /*
145  * Stuff for reading the 'tasks'/'procs' files.
146  *
147  * Reading this file can return large amounts of data if a cgroup has
148  * *lots* of attached tasks. So it may need several calls to read(),
149  * but we cannot guarantee that the information we produce is correct
150  * unless we produce it entirely atomically.
151  *
152  */
153 
154 /* which pidlist file are we talking about? */
155 enum cgroup_filetype {
156 	CGROUP_FILE_PROCS,
157 	CGROUP_FILE_TASKS,
158 };
159 
160 /*
161  * A pidlist is a list of pids that virtually represents the contents of one
162  * of the cgroup files ("procs" or "tasks"). We keep a list of such pidlists,
163  * a pair (one each for procs, tasks) for each pid namespace that's relevant
164  * to the cgroup.
165  */
166 struct cgroup_pidlist {
167 	/*
168 	 * used to find which pidlist is wanted. doesn't change as long as
169 	 * this particular list stays in the list.
170 	*/
171 	struct { enum cgroup_filetype type; struct pid_namespace *ns; } key;
172 	/* array of xids */
173 	pid_t *list;
174 	/* how many elements the above list has */
175 	int length;
176 	/* each of these stored in a list by its cgroup */
177 	struct list_head links;
178 	/* pointer to the cgroup we belong to, for list removal purposes */
179 	struct cgroup *owner;
180 	/* for delayed destruction */
181 	struct delayed_work destroy_dwork;
182 };
183 
184 /*
185  * The following two functions "fix" the issue where there are more pids
186  * than kmalloc will give memory for; in such cases, we use vmalloc/vfree.
187  * TODO: replace with a kernel-wide solution to this problem
188  */
189 #define PIDLIST_TOO_LARGE(c) ((c) * sizeof(pid_t) > (PAGE_SIZE * 2))
190 static void *pidlist_allocate(int count)
191 {
192 	if (PIDLIST_TOO_LARGE(count))
193 		return vmalloc(count * sizeof(pid_t));
194 	else
195 		return kmalloc(count * sizeof(pid_t), GFP_KERNEL);
196 }
197 
198 static void pidlist_free(void *p)
199 {
200 	kvfree(p);
201 }
202 
203 /*
204  * Used to destroy all pidlists lingering waiting for destroy timer.  None
205  * should be left afterwards.
206  */
207 void cgroup1_pidlist_destroy_all(struct cgroup *cgrp)
208 {
209 	struct cgroup_pidlist *l, *tmp_l;
210 
211 	mutex_lock(&cgrp->pidlist_mutex);
212 	list_for_each_entry_safe(l, tmp_l, &cgrp->pidlists, links)
213 		mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork, 0);
214 	mutex_unlock(&cgrp->pidlist_mutex);
215 
216 	flush_workqueue(cgroup_pidlist_destroy_wq);
217 	BUG_ON(!list_empty(&cgrp->pidlists));
218 }
219 
220 static void cgroup_pidlist_destroy_work_fn(struct work_struct *work)
221 {
222 	struct delayed_work *dwork = to_delayed_work(work);
223 	struct cgroup_pidlist *l = container_of(dwork, struct cgroup_pidlist,
224 						destroy_dwork);
225 	struct cgroup_pidlist *tofree = NULL;
226 
227 	mutex_lock(&l->owner->pidlist_mutex);
228 
229 	/*
230 	 * Destroy iff we didn't get queued again.  The state won't change
231 	 * as destroy_dwork can only be queued while locked.
232 	 */
233 	if (!delayed_work_pending(dwork)) {
234 		list_del(&l->links);
235 		pidlist_free(l->list);
236 		put_pid_ns(l->key.ns);
237 		tofree = l;
238 	}
239 
240 	mutex_unlock(&l->owner->pidlist_mutex);
241 	kfree(tofree);
242 }
243 
244 /*
245  * pidlist_uniq - given a kmalloc()ed list, strip out all duplicate entries
246  * Returns the number of unique elements.
247  */
248 static int pidlist_uniq(pid_t *list, int length)
249 {
250 	int src, dest = 1;
251 
252 	/*
253 	 * we presume the 0th element is unique, so i starts at 1. trivial
254 	 * edge cases first; no work needs to be done for either
255 	 */
256 	if (length == 0 || length == 1)
257 		return length;
258 	/* src and dest walk down the list; dest counts unique elements */
259 	for (src = 1; src < length; src++) {
260 		/* find next unique element */
261 		while (list[src] == list[src-1]) {
262 			src++;
263 			if (src == length)
264 				goto after;
265 		}
266 		/* dest always points to where the next unique element goes */
267 		list[dest] = list[src];
268 		dest++;
269 	}
270 after:
271 	return dest;
272 }
273 
274 /*
275  * The two pid files - task and cgroup.procs - guaranteed that the result
276  * is sorted, which forced this whole pidlist fiasco.  As pid order is
277  * different per namespace, each namespace needs differently sorted list,
278  * making it impossible to use, for example, single rbtree of member tasks
279  * sorted by task pointer.  As pidlists can be fairly large, allocating one
280  * per open file is dangerous, so cgroup had to implement shared pool of
281  * pidlists keyed by cgroup and namespace.
282  */
283 static int cmppid(const void *a, const void *b)
284 {
285 	return *(pid_t *)a - *(pid_t *)b;
286 }
287 
288 static struct cgroup_pidlist *cgroup_pidlist_find(struct cgroup *cgrp,
289 						  enum cgroup_filetype type)
290 {
291 	struct cgroup_pidlist *l;
292 	/* don't need task_nsproxy() if we're looking at ourself */
293 	struct pid_namespace *ns = task_active_pid_ns(current);
294 
295 	lockdep_assert_held(&cgrp->pidlist_mutex);
296 
297 	list_for_each_entry(l, &cgrp->pidlists, links)
298 		if (l->key.type == type && l->key.ns == ns)
299 			return l;
300 	return NULL;
301 }
302 
303 /*
304  * find the appropriate pidlist for our purpose (given procs vs tasks)
305  * returns with the lock on that pidlist already held, and takes care
306  * of the use count, or returns NULL with no locks held if we're out of
307  * memory.
308  */
309 static struct cgroup_pidlist *cgroup_pidlist_find_create(struct cgroup *cgrp,
310 						enum cgroup_filetype type)
311 {
312 	struct cgroup_pidlist *l;
313 
314 	lockdep_assert_held(&cgrp->pidlist_mutex);
315 
316 	l = cgroup_pidlist_find(cgrp, type);
317 	if (l)
318 		return l;
319 
320 	/* entry not found; create a new one */
321 	l = kzalloc(sizeof(struct cgroup_pidlist), GFP_KERNEL);
322 	if (!l)
323 		return l;
324 
325 	INIT_DELAYED_WORK(&l->destroy_dwork, cgroup_pidlist_destroy_work_fn);
326 	l->key.type = type;
327 	/* don't need task_nsproxy() if we're looking at ourself */
328 	l->key.ns = get_pid_ns(task_active_pid_ns(current));
329 	l->owner = cgrp;
330 	list_add(&l->links, &cgrp->pidlists);
331 	return l;
332 }
333 
334 /**
335  * cgroup_task_count - count the number of tasks in a cgroup.
336  * @cgrp: the cgroup in question
337  *
338  * Return the number of tasks in the cgroup.  The returned number can be
339  * higher than the actual number of tasks due to css_set references from
340  * namespace roots and temporary usages.
341  */
342 static int cgroup_task_count(const struct cgroup *cgrp)
343 {
344 	int count = 0;
345 	struct cgrp_cset_link *link;
346 
347 	spin_lock_irq(&css_set_lock);
348 	list_for_each_entry(link, &cgrp->cset_links, cset_link)
349 		count += atomic_read(&link->cset->refcount);
350 	spin_unlock_irq(&css_set_lock);
351 	return count;
352 }
353 
354 /*
355  * Load a cgroup's pidarray with either procs' tgids or tasks' pids
356  */
357 static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type,
358 			      struct cgroup_pidlist **lp)
359 {
360 	pid_t *array;
361 	int length;
362 	int pid, n = 0; /* used for populating the array */
363 	struct css_task_iter it;
364 	struct task_struct *tsk;
365 	struct cgroup_pidlist *l;
366 
367 	lockdep_assert_held(&cgrp->pidlist_mutex);
368 
369 	/*
370 	 * If cgroup gets more users after we read count, we won't have
371 	 * enough space - tough.  This race is indistinguishable to the
372 	 * caller from the case that the additional cgroup users didn't
373 	 * show up until sometime later on.
374 	 */
375 	length = cgroup_task_count(cgrp);
376 	array = pidlist_allocate(length);
377 	if (!array)
378 		return -ENOMEM;
379 	/* now, populate the array */
380 	css_task_iter_start(&cgrp->self, &it);
381 	while ((tsk = css_task_iter_next(&it))) {
382 		if (unlikely(n == length))
383 			break;
384 		/* get tgid or pid for procs or tasks file respectively */
385 		if (type == CGROUP_FILE_PROCS)
386 			pid = task_tgid_vnr(tsk);
387 		else
388 			pid = task_pid_vnr(tsk);
389 		if (pid > 0) /* make sure to only use valid results */
390 			array[n++] = pid;
391 	}
392 	css_task_iter_end(&it);
393 	length = n;
394 	/* now sort & (if procs) strip out duplicates */
395 	sort(array, length, sizeof(pid_t), cmppid, NULL);
396 	if (type == CGROUP_FILE_PROCS)
397 		length = pidlist_uniq(array, length);
398 
399 	l = cgroup_pidlist_find_create(cgrp, type);
400 	if (!l) {
401 		pidlist_free(array);
402 		return -ENOMEM;
403 	}
404 
405 	/* store array, freeing old if necessary */
406 	pidlist_free(l->list);
407 	l->list = array;
408 	l->length = length;
409 	*lp = l;
410 	return 0;
411 }
412 
413 /*
414  * seq_file methods for the tasks/procs files. The seq_file position is the
415  * next pid to display; the seq_file iterator is a pointer to the pid
416  * in the cgroup->l->list array.
417  */
418 
419 static void *cgroup_pidlist_start(struct seq_file *s, loff_t *pos)
420 {
421 	/*
422 	 * Initially we receive a position value that corresponds to
423 	 * one more than the last pid shown (or 0 on the first call or
424 	 * after a seek to the start). Use a binary-search to find the
425 	 * next pid to display, if any
426 	 */
427 	struct kernfs_open_file *of = s->private;
428 	struct cgroup *cgrp = seq_css(s)->cgroup;
429 	struct cgroup_pidlist *l;
430 	enum cgroup_filetype type = seq_cft(s)->private;
431 	int index = 0, pid = *pos;
432 	int *iter, ret;
433 
434 	mutex_lock(&cgrp->pidlist_mutex);
435 
436 	/*
437 	 * !NULL @of->priv indicates that this isn't the first start()
438 	 * after open.  If the matching pidlist is around, we can use that.
439 	 * Look for it.  Note that @of->priv can't be used directly.  It
440 	 * could already have been destroyed.
441 	 */
442 	if (of->priv)
443 		of->priv = cgroup_pidlist_find(cgrp, type);
444 
445 	/*
446 	 * Either this is the first start() after open or the matching
447 	 * pidlist has been destroyed inbetween.  Create a new one.
448 	 */
449 	if (!of->priv) {
450 		ret = pidlist_array_load(cgrp, type,
451 					 (struct cgroup_pidlist **)&of->priv);
452 		if (ret)
453 			return ERR_PTR(ret);
454 	}
455 	l = of->priv;
456 
457 	if (pid) {
458 		int end = l->length;
459 
460 		while (index < end) {
461 			int mid = (index + end) / 2;
462 			if (l->list[mid] == pid) {
463 				index = mid;
464 				break;
465 			} else if (l->list[mid] <= pid)
466 				index = mid + 1;
467 			else
468 				end = mid;
469 		}
470 	}
471 	/* If we're off the end of the array, we're done */
472 	if (index >= l->length)
473 		return NULL;
474 	/* Update the abstract position to be the actual pid that we found */
475 	iter = l->list + index;
476 	*pos = *iter;
477 	return iter;
478 }
479 
480 static void cgroup_pidlist_stop(struct seq_file *s, void *v)
481 {
482 	struct kernfs_open_file *of = s->private;
483 	struct cgroup_pidlist *l = of->priv;
484 
485 	if (l)
486 		mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork,
487 				 CGROUP_PIDLIST_DESTROY_DELAY);
488 	mutex_unlock(&seq_css(s)->cgroup->pidlist_mutex);
489 }
490 
491 static void *cgroup_pidlist_next(struct seq_file *s, void *v, loff_t *pos)
492 {
493 	struct kernfs_open_file *of = s->private;
494 	struct cgroup_pidlist *l = of->priv;
495 	pid_t *p = v;
496 	pid_t *end = l->list + l->length;
497 	/*
498 	 * Advance to the next pid in the array. If this goes off the
499 	 * end, we're done
500 	 */
501 	p++;
502 	if (p >= end) {
503 		return NULL;
504 	} else {
505 		*pos = *p;
506 		return p;
507 	}
508 }
509 
510 static int cgroup_pidlist_show(struct seq_file *s, void *v)
511 {
512 	seq_printf(s, "%d\n", *(int *)v);
513 
514 	return 0;
515 }
516 
517 static ssize_t cgroup_tasks_write(struct kernfs_open_file *of,
518 				  char *buf, size_t nbytes, loff_t off)
519 {
520 	return __cgroup_procs_write(of, buf, nbytes, off, false);
521 }
522 
523 static ssize_t cgroup_release_agent_write(struct kernfs_open_file *of,
524 					  char *buf, size_t nbytes, loff_t off)
525 {
526 	struct cgroup *cgrp;
527 
528 	BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX);
529 
530 	cgrp = cgroup_kn_lock_live(of->kn, false);
531 	if (!cgrp)
532 		return -ENODEV;
533 	spin_lock(&release_agent_path_lock);
534 	strlcpy(cgrp->root->release_agent_path, strstrip(buf),
535 		sizeof(cgrp->root->release_agent_path));
536 	spin_unlock(&release_agent_path_lock);
537 	cgroup_kn_unlock(of->kn);
538 	return nbytes;
539 }
540 
541 static int cgroup_release_agent_show(struct seq_file *seq, void *v)
542 {
543 	struct cgroup *cgrp = seq_css(seq)->cgroup;
544 
545 	spin_lock(&release_agent_path_lock);
546 	seq_puts(seq, cgrp->root->release_agent_path);
547 	spin_unlock(&release_agent_path_lock);
548 	seq_putc(seq, '\n');
549 	return 0;
550 }
551 
552 static int cgroup_sane_behavior_show(struct seq_file *seq, void *v)
553 {
554 	seq_puts(seq, "0\n");
555 	return 0;
556 }
557 
558 static u64 cgroup_read_notify_on_release(struct cgroup_subsys_state *css,
559 					 struct cftype *cft)
560 {
561 	return notify_on_release(css->cgroup);
562 }
563 
564 static int cgroup_write_notify_on_release(struct cgroup_subsys_state *css,
565 					  struct cftype *cft, u64 val)
566 {
567 	if (val)
568 		set_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags);
569 	else
570 		clear_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags);
571 	return 0;
572 }
573 
574 static u64 cgroup_clone_children_read(struct cgroup_subsys_state *css,
575 				      struct cftype *cft)
576 {
577 	return test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
578 }
579 
580 static int cgroup_clone_children_write(struct cgroup_subsys_state *css,
581 				       struct cftype *cft, u64 val)
582 {
583 	if (val)
584 		set_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
585 	else
586 		clear_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
587 	return 0;
588 }
589 
590 /* cgroup core interface files for the legacy hierarchies */
591 struct cftype cgroup1_base_files[] = {
592 	{
593 		.name = "cgroup.procs",
594 		.seq_start = cgroup_pidlist_start,
595 		.seq_next = cgroup_pidlist_next,
596 		.seq_stop = cgroup_pidlist_stop,
597 		.seq_show = cgroup_pidlist_show,
598 		.private = CGROUP_FILE_PROCS,
599 		.write = cgroup_procs_write,
600 	},
601 	{
602 		.name = "cgroup.clone_children",
603 		.read_u64 = cgroup_clone_children_read,
604 		.write_u64 = cgroup_clone_children_write,
605 	},
606 	{
607 		.name = "cgroup.sane_behavior",
608 		.flags = CFTYPE_ONLY_ON_ROOT,
609 		.seq_show = cgroup_sane_behavior_show,
610 	},
611 	{
612 		.name = "tasks",
613 		.seq_start = cgroup_pidlist_start,
614 		.seq_next = cgroup_pidlist_next,
615 		.seq_stop = cgroup_pidlist_stop,
616 		.seq_show = cgroup_pidlist_show,
617 		.private = CGROUP_FILE_TASKS,
618 		.write = cgroup_tasks_write,
619 	},
620 	{
621 		.name = "notify_on_release",
622 		.read_u64 = cgroup_read_notify_on_release,
623 		.write_u64 = cgroup_write_notify_on_release,
624 	},
625 	{
626 		.name = "release_agent",
627 		.flags = CFTYPE_ONLY_ON_ROOT,
628 		.seq_show = cgroup_release_agent_show,
629 		.write = cgroup_release_agent_write,
630 		.max_write_len = PATH_MAX - 1,
631 	},
632 	{ }	/* terminate */
633 };
634 
635 /* Display information about each subsystem and each hierarchy */
636 static int proc_cgroupstats_show(struct seq_file *m, void *v)
637 {
638 	struct cgroup_subsys *ss;
639 	int i;
640 
641 	seq_puts(m, "#subsys_name\thierarchy\tnum_cgroups\tenabled\n");
642 	/*
643 	 * ideally we don't want subsystems moving around while we do this.
644 	 * cgroup_mutex is also necessary to guarantee an atomic snapshot of
645 	 * subsys/hierarchy state.
646 	 */
647 	mutex_lock(&cgroup_mutex);
648 
649 	for_each_subsys(ss, i)
650 		seq_printf(m, "%s\t%d\t%d\t%d\n",
651 			   ss->legacy_name, ss->root->hierarchy_id,
652 			   atomic_read(&ss->root->nr_cgrps),
653 			   cgroup_ssid_enabled(i));
654 
655 	mutex_unlock(&cgroup_mutex);
656 	return 0;
657 }
658 
659 static int cgroupstats_open(struct inode *inode, struct file *file)
660 {
661 	return single_open(file, proc_cgroupstats_show, NULL);
662 }
663 
664 const struct file_operations proc_cgroupstats_operations = {
665 	.open = cgroupstats_open,
666 	.read = seq_read,
667 	.llseek = seq_lseek,
668 	.release = single_release,
669 };
670 
671 /**
672  * cgroupstats_build - build and fill cgroupstats
673  * @stats: cgroupstats to fill information into
674  * @dentry: A dentry entry belonging to the cgroup for which stats have
675  * been requested.
676  *
677  * Build and fill cgroupstats so that taskstats can export it to user
678  * space.
679  */
680 int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry)
681 {
682 	struct kernfs_node *kn = kernfs_node_from_dentry(dentry);
683 	struct cgroup *cgrp;
684 	struct css_task_iter it;
685 	struct task_struct *tsk;
686 
687 	/* it should be kernfs_node belonging to cgroupfs and is a directory */
688 	if (dentry->d_sb->s_type != &cgroup_fs_type || !kn ||
689 	    kernfs_type(kn) != KERNFS_DIR)
690 		return -EINVAL;
691 
692 	mutex_lock(&cgroup_mutex);
693 
694 	/*
695 	 * We aren't being called from kernfs and there's no guarantee on
696 	 * @kn->priv's validity.  For this and css_tryget_online_from_dir(),
697 	 * @kn->priv is RCU safe.  Let's do the RCU dancing.
698 	 */
699 	rcu_read_lock();
700 	cgrp = rcu_dereference(*(void __rcu __force **)&kn->priv);
701 	if (!cgrp || cgroup_is_dead(cgrp)) {
702 		rcu_read_unlock();
703 		mutex_unlock(&cgroup_mutex);
704 		return -ENOENT;
705 	}
706 	rcu_read_unlock();
707 
708 	css_task_iter_start(&cgrp->self, &it);
709 	while ((tsk = css_task_iter_next(&it))) {
710 		switch (tsk->state) {
711 		case TASK_RUNNING:
712 			stats->nr_running++;
713 			break;
714 		case TASK_INTERRUPTIBLE:
715 			stats->nr_sleeping++;
716 			break;
717 		case TASK_UNINTERRUPTIBLE:
718 			stats->nr_uninterruptible++;
719 			break;
720 		case TASK_STOPPED:
721 			stats->nr_stopped++;
722 			break;
723 		default:
724 			if (delayacct_is_task_waiting_on_io(tsk))
725 				stats->nr_io_wait++;
726 			break;
727 		}
728 	}
729 	css_task_iter_end(&it);
730 
731 	mutex_unlock(&cgroup_mutex);
732 	return 0;
733 }
734 
735 void cgroup1_check_for_release(struct cgroup *cgrp)
736 {
737 	if (notify_on_release(cgrp) && !cgroup_is_populated(cgrp) &&
738 	    !css_has_online_children(&cgrp->self) && !cgroup_is_dead(cgrp))
739 		schedule_work(&cgrp->release_agent_work);
740 }
741 
742 /*
743  * Notify userspace when a cgroup is released, by running the
744  * configured release agent with the name of the cgroup (path
745  * relative to the root of cgroup file system) as the argument.
746  *
747  * Most likely, this user command will try to rmdir this cgroup.
748  *
749  * This races with the possibility that some other task will be
750  * attached to this cgroup before it is removed, or that some other
751  * user task will 'mkdir' a child cgroup of this cgroup.  That's ok.
752  * The presumed 'rmdir' will fail quietly if this cgroup is no longer
753  * unused, and this cgroup will be reprieved from its death sentence,
754  * to continue to serve a useful existence.  Next time it's released,
755  * we will get notified again, if it still has 'notify_on_release' set.
756  *
757  * The final arg to call_usermodehelper() is UMH_WAIT_EXEC, which
758  * means only wait until the task is successfully execve()'d.  The
759  * separate release agent task is forked by call_usermodehelper(),
760  * then control in this thread returns here, without waiting for the
761  * release agent task.  We don't bother to wait because the caller of
762  * this routine has no use for the exit status of the release agent
763  * task, so no sense holding our caller up for that.
764  */
765 void cgroup1_release_agent(struct work_struct *work)
766 {
767 	struct cgroup *cgrp =
768 		container_of(work, struct cgroup, release_agent_work);
769 	char *pathbuf = NULL, *agentbuf = NULL;
770 	char *argv[3], *envp[3];
771 	int ret;
772 
773 	mutex_lock(&cgroup_mutex);
774 
775 	pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
776 	agentbuf = kstrdup(cgrp->root->release_agent_path, GFP_KERNEL);
777 	if (!pathbuf || !agentbuf)
778 		goto out;
779 
780 	spin_lock_irq(&css_set_lock);
781 	ret = cgroup_path_ns_locked(cgrp, pathbuf, PATH_MAX, &init_cgroup_ns);
782 	spin_unlock_irq(&css_set_lock);
783 	if (ret < 0 || ret >= PATH_MAX)
784 		goto out;
785 
786 	argv[0] = agentbuf;
787 	argv[1] = pathbuf;
788 	argv[2] = NULL;
789 
790 	/* minimal command environment */
791 	envp[0] = "HOME=/";
792 	envp[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
793 	envp[2] = NULL;
794 
795 	mutex_unlock(&cgroup_mutex);
796 	call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC);
797 	goto out_free;
798 out:
799 	mutex_unlock(&cgroup_mutex);
800 out_free:
801 	kfree(agentbuf);
802 	kfree(pathbuf);
803 }
804 
805 /*
806  * cgroup_rename - Only allow simple rename of directories in place.
807  */
808 static int cgroup1_rename(struct kernfs_node *kn, struct kernfs_node *new_parent,
809 			  const char *new_name_str)
810 {
811 	struct cgroup *cgrp = kn->priv;
812 	int ret;
813 
814 	if (kernfs_type(kn) != KERNFS_DIR)
815 		return -ENOTDIR;
816 	if (kn->parent != new_parent)
817 		return -EIO;
818 
819 	/*
820 	 * We're gonna grab cgroup_mutex which nests outside kernfs
821 	 * active_ref.  kernfs_rename() doesn't require active_ref
822 	 * protection.  Break them before grabbing cgroup_mutex.
823 	 */
824 	kernfs_break_active_protection(new_parent);
825 	kernfs_break_active_protection(kn);
826 
827 	mutex_lock(&cgroup_mutex);
828 
829 	ret = kernfs_rename(kn, new_parent, new_name_str);
830 	if (!ret)
831 		trace_cgroup_rename(cgrp);
832 
833 	mutex_unlock(&cgroup_mutex);
834 
835 	kernfs_unbreak_active_protection(kn);
836 	kernfs_unbreak_active_protection(new_parent);
837 	return ret;
838 }
839 
840 static int cgroup1_show_options(struct seq_file *seq, struct kernfs_root *kf_root)
841 {
842 	struct cgroup_root *root = cgroup_root_from_kf(kf_root);
843 	struct cgroup_subsys *ss;
844 	int ssid;
845 
846 	for_each_subsys(ss, ssid)
847 		if (root->subsys_mask & (1 << ssid))
848 			seq_show_option(seq, ss->legacy_name, NULL);
849 	if (root->flags & CGRP_ROOT_NOPREFIX)
850 		seq_puts(seq, ",noprefix");
851 	if (root->flags & CGRP_ROOT_XATTR)
852 		seq_puts(seq, ",xattr");
853 
854 	spin_lock(&release_agent_path_lock);
855 	if (strlen(root->release_agent_path))
856 		seq_show_option(seq, "release_agent",
857 				root->release_agent_path);
858 	spin_unlock(&release_agent_path_lock);
859 
860 	if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags))
861 		seq_puts(seq, ",clone_children");
862 	if (strlen(root->name))
863 		seq_show_option(seq, "name", root->name);
864 	return 0;
865 }
866 
867 static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts)
868 {
869 	char *token, *o = data;
870 	bool all_ss = false, one_ss = false;
871 	u16 mask = U16_MAX;
872 	struct cgroup_subsys *ss;
873 	int nr_opts = 0;
874 	int i;
875 
876 #ifdef CONFIG_CPUSETS
877 	mask = ~((u16)1 << cpuset_cgrp_id);
878 #endif
879 
880 	memset(opts, 0, sizeof(*opts));
881 
882 	while ((token = strsep(&o, ",")) != NULL) {
883 		nr_opts++;
884 
885 		if (!*token)
886 			return -EINVAL;
887 		if (!strcmp(token, "none")) {
888 			/* Explicitly have no subsystems */
889 			opts->none = true;
890 			continue;
891 		}
892 		if (!strcmp(token, "all")) {
893 			/* Mutually exclusive option 'all' + subsystem name */
894 			if (one_ss)
895 				return -EINVAL;
896 			all_ss = true;
897 			continue;
898 		}
899 		if (!strcmp(token, "noprefix")) {
900 			opts->flags |= CGRP_ROOT_NOPREFIX;
901 			continue;
902 		}
903 		if (!strcmp(token, "clone_children")) {
904 			opts->cpuset_clone_children = true;
905 			continue;
906 		}
907 		if (!strcmp(token, "xattr")) {
908 			opts->flags |= CGRP_ROOT_XATTR;
909 			continue;
910 		}
911 		if (!strncmp(token, "release_agent=", 14)) {
912 			/* Specifying two release agents is forbidden */
913 			if (opts->release_agent)
914 				return -EINVAL;
915 			opts->release_agent =
916 				kstrndup(token + 14, PATH_MAX - 1, GFP_KERNEL);
917 			if (!opts->release_agent)
918 				return -ENOMEM;
919 			continue;
920 		}
921 		if (!strncmp(token, "name=", 5)) {
922 			const char *name = token + 5;
923 			/* Can't specify an empty name */
924 			if (!strlen(name))
925 				return -EINVAL;
926 			/* Must match [\w.-]+ */
927 			for (i = 0; i < strlen(name); i++) {
928 				char c = name[i];
929 				if (isalnum(c))
930 					continue;
931 				if ((c == '.') || (c == '-') || (c == '_'))
932 					continue;
933 				return -EINVAL;
934 			}
935 			/* Specifying two names is forbidden */
936 			if (opts->name)
937 				return -EINVAL;
938 			opts->name = kstrndup(name,
939 					      MAX_CGROUP_ROOT_NAMELEN - 1,
940 					      GFP_KERNEL);
941 			if (!opts->name)
942 				return -ENOMEM;
943 
944 			continue;
945 		}
946 
947 		for_each_subsys(ss, i) {
948 			if (strcmp(token, ss->legacy_name))
949 				continue;
950 			if (!cgroup_ssid_enabled(i))
951 				continue;
952 			if (cgroup1_ssid_disabled(i))
953 				continue;
954 
955 			/* Mutually exclusive option 'all' + subsystem name */
956 			if (all_ss)
957 				return -EINVAL;
958 			opts->subsys_mask |= (1 << i);
959 			one_ss = true;
960 
961 			break;
962 		}
963 		if (i == CGROUP_SUBSYS_COUNT)
964 			return -ENOENT;
965 	}
966 
967 	/*
968 	 * If the 'all' option was specified select all the subsystems,
969 	 * otherwise if 'none', 'name=' and a subsystem name options were
970 	 * not specified, let's default to 'all'
971 	 */
972 	if (all_ss || (!one_ss && !opts->none && !opts->name))
973 		for_each_subsys(ss, i)
974 			if (cgroup_ssid_enabled(i) && !cgroup1_ssid_disabled(i))
975 				opts->subsys_mask |= (1 << i);
976 
977 	/*
978 	 * We either have to specify by name or by subsystems. (So all
979 	 * empty hierarchies must have a name).
980 	 */
981 	if (!opts->subsys_mask && !opts->name)
982 		return -EINVAL;
983 
984 	/*
985 	 * Option noprefix was introduced just for backward compatibility
986 	 * with the old cpuset, so we allow noprefix only if mounting just
987 	 * the cpuset subsystem.
988 	 */
989 	if ((opts->flags & CGRP_ROOT_NOPREFIX) && (opts->subsys_mask & mask))
990 		return -EINVAL;
991 
992 	/* Can't specify "none" and some subsystems */
993 	if (opts->subsys_mask && opts->none)
994 		return -EINVAL;
995 
996 	return 0;
997 }
998 
999 static int cgroup1_remount(struct kernfs_root *kf_root, int *flags, char *data)
1000 {
1001 	int ret = 0;
1002 	struct cgroup_root *root = cgroup_root_from_kf(kf_root);
1003 	struct cgroup_sb_opts opts;
1004 	u16 added_mask, removed_mask;
1005 
1006 	cgroup_lock_and_drain_offline(&cgrp_dfl_root.cgrp);
1007 
1008 	/* See what subsystems are wanted */
1009 	ret = parse_cgroupfs_options(data, &opts);
1010 	if (ret)
1011 		goto out_unlock;
1012 
1013 	if (opts.subsys_mask != root->subsys_mask || opts.release_agent)
1014 		pr_warn("option changes via remount are deprecated (pid=%d comm=%s)\n",
1015 			task_tgid_nr(current), current->comm);
1016 
1017 	added_mask = opts.subsys_mask & ~root->subsys_mask;
1018 	removed_mask = root->subsys_mask & ~opts.subsys_mask;
1019 
1020 	/* Don't allow flags or name to change at remount */
1021 	if ((opts.flags ^ root->flags) ||
1022 	    (opts.name && strcmp(opts.name, root->name))) {
1023 		pr_err("option or name mismatch, new: 0x%x \"%s\", old: 0x%x \"%s\"\n",
1024 		       opts.flags, opts.name ?: "", root->flags, root->name);
1025 		ret = -EINVAL;
1026 		goto out_unlock;
1027 	}
1028 
1029 	/* remounting is not allowed for populated hierarchies */
1030 	if (!list_empty(&root->cgrp.self.children)) {
1031 		ret = -EBUSY;
1032 		goto out_unlock;
1033 	}
1034 
1035 	ret = rebind_subsystems(root, added_mask);
1036 	if (ret)
1037 		goto out_unlock;
1038 
1039 	WARN_ON(rebind_subsystems(&cgrp_dfl_root, removed_mask));
1040 
1041 	if (opts.release_agent) {
1042 		spin_lock(&release_agent_path_lock);
1043 		strcpy(root->release_agent_path, opts.release_agent);
1044 		spin_unlock(&release_agent_path_lock);
1045 	}
1046 
1047 	trace_cgroup_remount(root);
1048 
1049  out_unlock:
1050 	kfree(opts.release_agent);
1051 	kfree(opts.name);
1052 	mutex_unlock(&cgroup_mutex);
1053 	return ret;
1054 }
1055 
1056 struct kernfs_syscall_ops cgroup1_kf_syscall_ops = {
1057 	.rename			= cgroup1_rename,
1058 	.show_options		= cgroup1_show_options,
1059 	.remount_fs		= cgroup1_remount,
1060 	.mkdir			= cgroup_mkdir,
1061 	.rmdir			= cgroup_rmdir,
1062 	.show_path		= cgroup_show_path,
1063 };
1064 
1065 struct dentry *cgroup1_mount(struct file_system_type *fs_type, int flags,
1066 			     void *data, unsigned long magic,
1067 			     struct cgroup_namespace *ns)
1068 {
1069 	struct super_block *pinned_sb = NULL;
1070 	struct cgroup_sb_opts opts;
1071 	struct cgroup_root *root;
1072 	struct cgroup_subsys *ss;
1073 	struct dentry *dentry;
1074 	int i, ret;
1075 
1076 	cgroup_lock_and_drain_offline(&cgrp_dfl_root.cgrp);
1077 
1078 	/* First find the desired set of subsystems */
1079 	ret = parse_cgroupfs_options(data, &opts);
1080 	if (ret)
1081 		goto out_unlock;
1082 
1083 	/*
1084 	 * Destruction of cgroup root is asynchronous, so subsystems may
1085 	 * still be dying after the previous unmount.  Let's drain the
1086 	 * dying subsystems.  We just need to ensure that the ones
1087 	 * unmounted previously finish dying and don't care about new ones
1088 	 * starting.  Testing ref liveliness is good enough.
1089 	 */
1090 	for_each_subsys(ss, i) {
1091 		if (!(opts.subsys_mask & (1 << i)) ||
1092 		    ss->root == &cgrp_dfl_root)
1093 			continue;
1094 
1095 		if (!percpu_ref_tryget_live(&ss->root->cgrp.self.refcnt)) {
1096 			mutex_unlock(&cgroup_mutex);
1097 			msleep(10);
1098 			ret = restart_syscall();
1099 			goto out_free;
1100 		}
1101 		cgroup_put(&ss->root->cgrp);
1102 	}
1103 
1104 	for_each_root(root) {
1105 		bool name_match = false;
1106 
1107 		if (root == &cgrp_dfl_root)
1108 			continue;
1109 
1110 		/*
1111 		 * If we asked for a name then it must match.  Also, if
1112 		 * name matches but sybsys_mask doesn't, we should fail.
1113 		 * Remember whether name matched.
1114 		 */
1115 		if (opts.name) {
1116 			if (strcmp(opts.name, root->name))
1117 				continue;
1118 			name_match = true;
1119 		}
1120 
1121 		/*
1122 		 * If we asked for subsystems (or explicitly for no
1123 		 * subsystems) then they must match.
1124 		 */
1125 		if ((opts.subsys_mask || opts.none) &&
1126 		    (opts.subsys_mask != root->subsys_mask)) {
1127 			if (!name_match)
1128 				continue;
1129 			ret = -EBUSY;
1130 			goto out_unlock;
1131 		}
1132 
1133 		if (root->flags ^ opts.flags)
1134 			pr_warn("new mount options do not match the existing superblock, will be ignored\n");
1135 
1136 		/*
1137 		 * We want to reuse @root whose lifetime is governed by its
1138 		 * ->cgrp.  Let's check whether @root is alive and keep it
1139 		 * that way.  As cgroup_kill_sb() can happen anytime, we
1140 		 * want to block it by pinning the sb so that @root doesn't
1141 		 * get killed before mount is complete.
1142 		 *
1143 		 * With the sb pinned, tryget_live can reliably indicate
1144 		 * whether @root can be reused.  If it's being killed,
1145 		 * drain it.  We can use wait_queue for the wait but this
1146 		 * path is super cold.  Let's just sleep a bit and retry.
1147 		 */
1148 		pinned_sb = kernfs_pin_sb(root->kf_root, NULL);
1149 		if (IS_ERR(pinned_sb) ||
1150 		    !percpu_ref_tryget_live(&root->cgrp.self.refcnt)) {
1151 			mutex_unlock(&cgroup_mutex);
1152 			if (!IS_ERR_OR_NULL(pinned_sb))
1153 				deactivate_super(pinned_sb);
1154 			msleep(10);
1155 			ret = restart_syscall();
1156 			goto out_free;
1157 		}
1158 
1159 		ret = 0;
1160 		goto out_unlock;
1161 	}
1162 
1163 	/*
1164 	 * No such thing, create a new one.  name= matching without subsys
1165 	 * specification is allowed for already existing hierarchies but we
1166 	 * can't create new one without subsys specification.
1167 	 */
1168 	if (!opts.subsys_mask && !opts.none) {
1169 		ret = -EINVAL;
1170 		goto out_unlock;
1171 	}
1172 
1173 	/* Hierarchies may only be created in the initial cgroup namespace. */
1174 	if (ns != &init_cgroup_ns) {
1175 		ret = -EPERM;
1176 		goto out_unlock;
1177 	}
1178 
1179 	root = kzalloc(sizeof(*root), GFP_KERNEL);
1180 	if (!root) {
1181 		ret = -ENOMEM;
1182 		goto out_unlock;
1183 	}
1184 
1185 	init_cgroup_root(root, &opts);
1186 
1187 	ret = cgroup_setup_root(root, opts.subsys_mask);
1188 	if (ret)
1189 		cgroup_free_root(root);
1190 
1191 out_unlock:
1192 	mutex_unlock(&cgroup_mutex);
1193 out_free:
1194 	kfree(opts.release_agent);
1195 	kfree(opts.name);
1196 
1197 	if (ret)
1198 		return ERR_PTR(ret);
1199 
1200 	dentry = cgroup_do_mount(&cgroup_fs_type, flags, root,
1201 				 CGROUP_SUPER_MAGIC, ns);
1202 
1203 	/*
1204 	 * If @pinned_sb, we're reusing an existing root and holding an
1205 	 * extra ref on its sb.  Mount is complete.  Put the extra ref.
1206 	 */
1207 	if (pinned_sb)
1208 		deactivate_super(pinned_sb);
1209 
1210 	return dentry;
1211 }
1212 
1213 static int __init cgroup1_wq_init(void)
1214 {
1215 	/*
1216 	 * Used to destroy pidlists and separate to serve as flush domain.
1217 	 * Cap @max_active to 1 too.
1218 	 */
1219 	cgroup_pidlist_destroy_wq = alloc_workqueue("cgroup_pidlist_destroy",
1220 						    0, 1);
1221 	BUG_ON(!cgroup_pidlist_destroy_wq);
1222 	return 0;
1223 }
1224 core_initcall(cgroup1_wq_init);
1225 
1226 static int __init cgroup_no_v1(char *str)
1227 {
1228 	struct cgroup_subsys *ss;
1229 	char *token;
1230 	int i;
1231 
1232 	while ((token = strsep(&str, ",")) != NULL) {
1233 		if (!*token)
1234 			continue;
1235 
1236 		if (!strcmp(token, "all")) {
1237 			cgroup_no_v1_mask = U16_MAX;
1238 			break;
1239 		}
1240 
1241 		for_each_subsys(ss, i) {
1242 			if (strcmp(token, ss->name) &&
1243 			    strcmp(token, ss->legacy_name))
1244 				continue;
1245 
1246 			cgroup_no_v1_mask |= 1 << i;
1247 		}
1248 	}
1249 	return 1;
1250 }
1251 __setup("cgroup_no_v1=", cgroup_no_v1);
1252 
1253 
1254 #ifdef CONFIG_CGROUP_DEBUG
1255 static struct cgroup_subsys_state *
1256 debug_css_alloc(struct cgroup_subsys_state *parent_css)
1257 {
1258 	struct cgroup_subsys_state *css = kzalloc(sizeof(*css), GFP_KERNEL);
1259 
1260 	if (!css)
1261 		return ERR_PTR(-ENOMEM);
1262 
1263 	return css;
1264 }
1265 
1266 static void debug_css_free(struct cgroup_subsys_state *css)
1267 {
1268 	kfree(css);
1269 }
1270 
1271 static u64 debug_taskcount_read(struct cgroup_subsys_state *css,
1272 				struct cftype *cft)
1273 {
1274 	return cgroup_task_count(css->cgroup);
1275 }
1276 
1277 static u64 current_css_set_read(struct cgroup_subsys_state *css,
1278 				struct cftype *cft)
1279 {
1280 	return (u64)(unsigned long)current->cgroups;
1281 }
1282 
1283 static u64 current_css_set_refcount_read(struct cgroup_subsys_state *css,
1284 					 struct cftype *cft)
1285 {
1286 	u64 count;
1287 
1288 	rcu_read_lock();
1289 	count = atomic_read(&task_css_set(current)->refcount);
1290 	rcu_read_unlock();
1291 	return count;
1292 }
1293 
1294 static int current_css_set_cg_links_read(struct seq_file *seq, void *v)
1295 {
1296 	struct cgrp_cset_link *link;
1297 	struct css_set *cset;
1298 	char *name_buf;
1299 
1300 	name_buf = kmalloc(NAME_MAX + 1, GFP_KERNEL);
1301 	if (!name_buf)
1302 		return -ENOMEM;
1303 
1304 	spin_lock_irq(&css_set_lock);
1305 	rcu_read_lock();
1306 	cset = rcu_dereference(current->cgroups);
1307 	list_for_each_entry(link, &cset->cgrp_links, cgrp_link) {
1308 		struct cgroup *c = link->cgrp;
1309 
1310 		cgroup_name(c, name_buf, NAME_MAX + 1);
1311 		seq_printf(seq, "Root %d group %s\n",
1312 			   c->root->hierarchy_id, name_buf);
1313 	}
1314 	rcu_read_unlock();
1315 	spin_unlock_irq(&css_set_lock);
1316 	kfree(name_buf);
1317 	return 0;
1318 }
1319 
1320 #define MAX_TASKS_SHOWN_PER_CSS 25
1321 static int cgroup_css_links_read(struct seq_file *seq, void *v)
1322 {
1323 	struct cgroup_subsys_state *css = seq_css(seq);
1324 	struct cgrp_cset_link *link;
1325 
1326 	spin_lock_irq(&css_set_lock);
1327 	list_for_each_entry(link, &css->cgroup->cset_links, cset_link) {
1328 		struct css_set *cset = link->cset;
1329 		struct task_struct *task;
1330 		int count = 0;
1331 
1332 		seq_printf(seq, "css_set %pK\n", cset);
1333 
1334 		list_for_each_entry(task, &cset->tasks, cg_list) {
1335 			if (count++ > MAX_TASKS_SHOWN_PER_CSS)
1336 				goto overflow;
1337 			seq_printf(seq, "  task %d\n", task_pid_vnr(task));
1338 		}
1339 
1340 		list_for_each_entry(task, &cset->mg_tasks, cg_list) {
1341 			if (count++ > MAX_TASKS_SHOWN_PER_CSS)
1342 				goto overflow;
1343 			seq_printf(seq, "  task %d\n", task_pid_vnr(task));
1344 		}
1345 		continue;
1346 	overflow:
1347 		seq_puts(seq, "  ...\n");
1348 	}
1349 	spin_unlock_irq(&css_set_lock);
1350 	return 0;
1351 }
1352 
1353 static u64 releasable_read(struct cgroup_subsys_state *css, struct cftype *cft)
1354 {
1355 	return (!cgroup_is_populated(css->cgroup) &&
1356 		!css_has_online_children(&css->cgroup->self));
1357 }
1358 
1359 static struct cftype debug_files[] =  {
1360 	{
1361 		.name = "taskcount",
1362 		.read_u64 = debug_taskcount_read,
1363 	},
1364 
1365 	{
1366 		.name = "current_css_set",
1367 		.read_u64 = current_css_set_read,
1368 	},
1369 
1370 	{
1371 		.name = "current_css_set_refcount",
1372 		.read_u64 = current_css_set_refcount_read,
1373 	},
1374 
1375 	{
1376 		.name = "current_css_set_cg_links",
1377 		.seq_show = current_css_set_cg_links_read,
1378 	},
1379 
1380 	{
1381 		.name = "cgroup_css_links",
1382 		.seq_show = cgroup_css_links_read,
1383 	},
1384 
1385 	{
1386 		.name = "releasable",
1387 		.read_u64 = releasable_read,
1388 	},
1389 
1390 	{ }	/* terminate */
1391 };
1392 
1393 struct cgroup_subsys debug_cgrp_subsys = {
1394 	.css_alloc = debug_css_alloc,
1395 	.css_free = debug_css_free,
1396 	.legacy_cftypes = debug_files,
1397 };
1398 #endif /* CONFIG_CGROUP_DEBUG */
1399