xref: /openbmc/linux/kernel/cgroup/cgroup-v1.c (revision 174cd4b1)
1 #include "cgroup-internal.h"
2 
3 #include <linux/ctype.h>
4 #include <linux/kmod.h>
5 #include <linux/sort.h>
6 #include <linux/delay.h>
7 #include <linux/mm.h>
8 #include <linux/slab.h>
9 #include <linux/vmalloc.h>
10 #include <linux/delayacct.h>
11 #include <linux/pid_namespace.h>
12 #include <linux/cgroupstats.h>
13 
14 #include <trace/events/cgroup.h>
15 
16 /*
17  * pidlists linger the following amount before being destroyed.  The goal
18  * is avoiding frequent destruction in the middle of consecutive read calls
19  * Expiring in the middle is a performance problem not a correctness one.
20  * 1 sec should be enough.
21  */
22 #define CGROUP_PIDLIST_DESTROY_DELAY	HZ
23 
24 /* Controllers blocked by the commandline in v1 */
25 static u16 cgroup_no_v1_mask;
26 
27 /*
28  * pidlist destructions need to be flushed on cgroup destruction.  Use a
29  * separate workqueue as flush domain.
30  */
31 static struct workqueue_struct *cgroup_pidlist_destroy_wq;
32 
33 /*
34  * Protects cgroup_subsys->release_agent_path.  Modifying it also requires
35  * cgroup_mutex.  Reading requires either cgroup_mutex or this spinlock.
36  */
37 static DEFINE_SPINLOCK(release_agent_path_lock);
38 
39 bool cgroup1_ssid_disabled(int ssid)
40 {
41 	return cgroup_no_v1_mask & (1 << ssid);
42 }
43 
44 /**
45  * cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from'
46  * @from: attach to all cgroups of a given task
47  * @tsk: the task to be attached
48  */
49 int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
50 {
51 	struct cgroup_root *root;
52 	int retval = 0;
53 
54 	mutex_lock(&cgroup_mutex);
55 	percpu_down_write(&cgroup_threadgroup_rwsem);
56 	for_each_root(root) {
57 		struct cgroup *from_cgrp;
58 
59 		if (root == &cgrp_dfl_root)
60 			continue;
61 
62 		spin_lock_irq(&css_set_lock);
63 		from_cgrp = task_cgroup_from_root(from, root);
64 		spin_unlock_irq(&css_set_lock);
65 
66 		retval = cgroup_attach_task(from_cgrp, tsk, false);
67 		if (retval)
68 			break;
69 	}
70 	percpu_up_write(&cgroup_threadgroup_rwsem);
71 	mutex_unlock(&cgroup_mutex);
72 
73 	return retval;
74 }
75 EXPORT_SYMBOL_GPL(cgroup_attach_task_all);
76 
77 /**
78  * cgroup_trasnsfer_tasks - move tasks from one cgroup to another
79  * @to: cgroup to which the tasks will be moved
80  * @from: cgroup in which the tasks currently reside
81  *
82  * Locking rules between cgroup_post_fork() and the migration path
83  * guarantee that, if a task is forking while being migrated, the new child
84  * is guaranteed to be either visible in the source cgroup after the
85  * parent's migration is complete or put into the target cgroup.  No task
86  * can slip out of migration through forking.
87  */
88 int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
89 {
90 	DEFINE_CGROUP_MGCTX(mgctx);
91 	struct cgrp_cset_link *link;
92 	struct css_task_iter it;
93 	struct task_struct *task;
94 	int ret;
95 
96 	if (cgroup_on_dfl(to))
97 		return -EINVAL;
98 
99 	if (!cgroup_may_migrate_to(to))
100 		return -EBUSY;
101 
102 	mutex_lock(&cgroup_mutex);
103 
104 	percpu_down_write(&cgroup_threadgroup_rwsem);
105 
106 	/* all tasks in @from are being moved, all csets are source */
107 	spin_lock_irq(&css_set_lock);
108 	list_for_each_entry(link, &from->cset_links, cset_link)
109 		cgroup_migrate_add_src(link->cset, to, &mgctx);
110 	spin_unlock_irq(&css_set_lock);
111 
112 	ret = cgroup_migrate_prepare_dst(&mgctx);
113 	if (ret)
114 		goto out_err;
115 
116 	/*
117 	 * Migrate tasks one-by-one until @from is empty.  This fails iff
118 	 * ->can_attach() fails.
119 	 */
120 	do {
121 		css_task_iter_start(&from->self, &it);
122 		task = css_task_iter_next(&it);
123 		if (task)
124 			get_task_struct(task);
125 		css_task_iter_end(&it);
126 
127 		if (task) {
128 			ret = cgroup_migrate(task, false, &mgctx);
129 			if (!ret)
130 				trace_cgroup_transfer_tasks(to, task, false);
131 			put_task_struct(task);
132 		}
133 	} while (task && !ret);
134 out_err:
135 	cgroup_migrate_finish(&mgctx);
136 	percpu_up_write(&cgroup_threadgroup_rwsem);
137 	mutex_unlock(&cgroup_mutex);
138 	return ret;
139 }
140 
141 /*
142  * Stuff for reading the 'tasks'/'procs' files.
143  *
144  * Reading this file can return large amounts of data if a cgroup has
145  * *lots* of attached tasks. So it may need several calls to read(),
146  * but we cannot guarantee that the information we produce is correct
147  * unless we produce it entirely atomically.
148  *
149  */
150 
151 /* which pidlist file are we talking about? */
152 enum cgroup_filetype {
153 	CGROUP_FILE_PROCS,
154 	CGROUP_FILE_TASKS,
155 };
156 
157 /*
158  * A pidlist is a list of pids that virtually represents the contents of one
159  * of the cgroup files ("procs" or "tasks"). We keep a list of such pidlists,
160  * a pair (one each for procs, tasks) for each pid namespace that's relevant
161  * to the cgroup.
162  */
163 struct cgroup_pidlist {
164 	/*
165 	 * used to find which pidlist is wanted. doesn't change as long as
166 	 * this particular list stays in the list.
167 	*/
168 	struct { enum cgroup_filetype type; struct pid_namespace *ns; } key;
169 	/* array of xids */
170 	pid_t *list;
171 	/* how many elements the above list has */
172 	int length;
173 	/* each of these stored in a list by its cgroup */
174 	struct list_head links;
175 	/* pointer to the cgroup we belong to, for list removal purposes */
176 	struct cgroup *owner;
177 	/* for delayed destruction */
178 	struct delayed_work destroy_dwork;
179 };
180 
181 /*
182  * The following two functions "fix" the issue where there are more pids
183  * than kmalloc will give memory for; in such cases, we use vmalloc/vfree.
184  * TODO: replace with a kernel-wide solution to this problem
185  */
186 #define PIDLIST_TOO_LARGE(c) ((c) * sizeof(pid_t) > (PAGE_SIZE * 2))
187 static void *pidlist_allocate(int count)
188 {
189 	if (PIDLIST_TOO_LARGE(count))
190 		return vmalloc(count * sizeof(pid_t));
191 	else
192 		return kmalloc(count * sizeof(pid_t), GFP_KERNEL);
193 }
194 
195 static void pidlist_free(void *p)
196 {
197 	kvfree(p);
198 }
199 
200 /*
201  * Used to destroy all pidlists lingering waiting for destroy timer.  None
202  * should be left afterwards.
203  */
204 void cgroup1_pidlist_destroy_all(struct cgroup *cgrp)
205 {
206 	struct cgroup_pidlist *l, *tmp_l;
207 
208 	mutex_lock(&cgrp->pidlist_mutex);
209 	list_for_each_entry_safe(l, tmp_l, &cgrp->pidlists, links)
210 		mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork, 0);
211 	mutex_unlock(&cgrp->pidlist_mutex);
212 
213 	flush_workqueue(cgroup_pidlist_destroy_wq);
214 	BUG_ON(!list_empty(&cgrp->pidlists));
215 }
216 
217 static void cgroup_pidlist_destroy_work_fn(struct work_struct *work)
218 {
219 	struct delayed_work *dwork = to_delayed_work(work);
220 	struct cgroup_pidlist *l = container_of(dwork, struct cgroup_pidlist,
221 						destroy_dwork);
222 	struct cgroup_pidlist *tofree = NULL;
223 
224 	mutex_lock(&l->owner->pidlist_mutex);
225 
226 	/*
227 	 * Destroy iff we didn't get queued again.  The state won't change
228 	 * as destroy_dwork can only be queued while locked.
229 	 */
230 	if (!delayed_work_pending(dwork)) {
231 		list_del(&l->links);
232 		pidlist_free(l->list);
233 		put_pid_ns(l->key.ns);
234 		tofree = l;
235 	}
236 
237 	mutex_unlock(&l->owner->pidlist_mutex);
238 	kfree(tofree);
239 }
240 
241 /*
242  * pidlist_uniq - given a kmalloc()ed list, strip out all duplicate entries
243  * Returns the number of unique elements.
244  */
245 static int pidlist_uniq(pid_t *list, int length)
246 {
247 	int src, dest = 1;
248 
249 	/*
250 	 * we presume the 0th element is unique, so i starts at 1. trivial
251 	 * edge cases first; no work needs to be done for either
252 	 */
253 	if (length == 0 || length == 1)
254 		return length;
255 	/* src and dest walk down the list; dest counts unique elements */
256 	for (src = 1; src < length; src++) {
257 		/* find next unique element */
258 		while (list[src] == list[src-1]) {
259 			src++;
260 			if (src == length)
261 				goto after;
262 		}
263 		/* dest always points to where the next unique element goes */
264 		list[dest] = list[src];
265 		dest++;
266 	}
267 after:
268 	return dest;
269 }
270 
271 /*
272  * The two pid files - task and cgroup.procs - guaranteed that the result
273  * is sorted, which forced this whole pidlist fiasco.  As pid order is
274  * different per namespace, each namespace needs differently sorted list,
275  * making it impossible to use, for example, single rbtree of member tasks
276  * sorted by task pointer.  As pidlists can be fairly large, allocating one
277  * per open file is dangerous, so cgroup had to implement shared pool of
278  * pidlists keyed by cgroup and namespace.
279  */
280 static int cmppid(const void *a, const void *b)
281 {
282 	return *(pid_t *)a - *(pid_t *)b;
283 }
284 
285 static struct cgroup_pidlist *cgroup_pidlist_find(struct cgroup *cgrp,
286 						  enum cgroup_filetype type)
287 {
288 	struct cgroup_pidlist *l;
289 	/* don't need task_nsproxy() if we're looking at ourself */
290 	struct pid_namespace *ns = task_active_pid_ns(current);
291 
292 	lockdep_assert_held(&cgrp->pidlist_mutex);
293 
294 	list_for_each_entry(l, &cgrp->pidlists, links)
295 		if (l->key.type == type && l->key.ns == ns)
296 			return l;
297 	return NULL;
298 }
299 
300 /*
301  * find the appropriate pidlist for our purpose (given procs vs tasks)
302  * returns with the lock on that pidlist already held, and takes care
303  * of the use count, or returns NULL with no locks held if we're out of
304  * memory.
305  */
306 static struct cgroup_pidlist *cgroup_pidlist_find_create(struct cgroup *cgrp,
307 						enum cgroup_filetype type)
308 {
309 	struct cgroup_pidlist *l;
310 
311 	lockdep_assert_held(&cgrp->pidlist_mutex);
312 
313 	l = cgroup_pidlist_find(cgrp, type);
314 	if (l)
315 		return l;
316 
317 	/* entry not found; create a new one */
318 	l = kzalloc(sizeof(struct cgroup_pidlist), GFP_KERNEL);
319 	if (!l)
320 		return l;
321 
322 	INIT_DELAYED_WORK(&l->destroy_dwork, cgroup_pidlist_destroy_work_fn);
323 	l->key.type = type;
324 	/* don't need task_nsproxy() if we're looking at ourself */
325 	l->key.ns = get_pid_ns(task_active_pid_ns(current));
326 	l->owner = cgrp;
327 	list_add(&l->links, &cgrp->pidlists);
328 	return l;
329 }
330 
331 /**
332  * cgroup_task_count - count the number of tasks in a cgroup.
333  * @cgrp: the cgroup in question
334  *
335  * Return the number of tasks in the cgroup.  The returned number can be
336  * higher than the actual number of tasks due to css_set references from
337  * namespace roots and temporary usages.
338  */
339 static int cgroup_task_count(const struct cgroup *cgrp)
340 {
341 	int count = 0;
342 	struct cgrp_cset_link *link;
343 
344 	spin_lock_irq(&css_set_lock);
345 	list_for_each_entry(link, &cgrp->cset_links, cset_link)
346 		count += atomic_read(&link->cset->refcount);
347 	spin_unlock_irq(&css_set_lock);
348 	return count;
349 }
350 
351 /*
352  * Load a cgroup's pidarray with either procs' tgids or tasks' pids
353  */
354 static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type,
355 			      struct cgroup_pidlist **lp)
356 {
357 	pid_t *array;
358 	int length;
359 	int pid, n = 0; /* used for populating the array */
360 	struct css_task_iter it;
361 	struct task_struct *tsk;
362 	struct cgroup_pidlist *l;
363 
364 	lockdep_assert_held(&cgrp->pidlist_mutex);
365 
366 	/*
367 	 * If cgroup gets more users after we read count, we won't have
368 	 * enough space - tough.  This race is indistinguishable to the
369 	 * caller from the case that the additional cgroup users didn't
370 	 * show up until sometime later on.
371 	 */
372 	length = cgroup_task_count(cgrp);
373 	array = pidlist_allocate(length);
374 	if (!array)
375 		return -ENOMEM;
376 	/* now, populate the array */
377 	css_task_iter_start(&cgrp->self, &it);
378 	while ((tsk = css_task_iter_next(&it))) {
379 		if (unlikely(n == length))
380 			break;
381 		/* get tgid or pid for procs or tasks file respectively */
382 		if (type == CGROUP_FILE_PROCS)
383 			pid = task_tgid_vnr(tsk);
384 		else
385 			pid = task_pid_vnr(tsk);
386 		if (pid > 0) /* make sure to only use valid results */
387 			array[n++] = pid;
388 	}
389 	css_task_iter_end(&it);
390 	length = n;
391 	/* now sort & (if procs) strip out duplicates */
392 	sort(array, length, sizeof(pid_t), cmppid, NULL);
393 	if (type == CGROUP_FILE_PROCS)
394 		length = pidlist_uniq(array, length);
395 
396 	l = cgroup_pidlist_find_create(cgrp, type);
397 	if (!l) {
398 		pidlist_free(array);
399 		return -ENOMEM;
400 	}
401 
402 	/* store array, freeing old if necessary */
403 	pidlist_free(l->list);
404 	l->list = array;
405 	l->length = length;
406 	*lp = l;
407 	return 0;
408 }
409 
410 /*
411  * seq_file methods for the tasks/procs files. The seq_file position is the
412  * next pid to display; the seq_file iterator is a pointer to the pid
413  * in the cgroup->l->list array.
414  */
415 
416 static void *cgroup_pidlist_start(struct seq_file *s, loff_t *pos)
417 {
418 	/*
419 	 * Initially we receive a position value that corresponds to
420 	 * one more than the last pid shown (or 0 on the first call or
421 	 * after a seek to the start). Use a binary-search to find the
422 	 * next pid to display, if any
423 	 */
424 	struct kernfs_open_file *of = s->private;
425 	struct cgroup *cgrp = seq_css(s)->cgroup;
426 	struct cgroup_pidlist *l;
427 	enum cgroup_filetype type = seq_cft(s)->private;
428 	int index = 0, pid = *pos;
429 	int *iter, ret;
430 
431 	mutex_lock(&cgrp->pidlist_mutex);
432 
433 	/*
434 	 * !NULL @of->priv indicates that this isn't the first start()
435 	 * after open.  If the matching pidlist is around, we can use that.
436 	 * Look for it.  Note that @of->priv can't be used directly.  It
437 	 * could already have been destroyed.
438 	 */
439 	if (of->priv)
440 		of->priv = cgroup_pidlist_find(cgrp, type);
441 
442 	/*
443 	 * Either this is the first start() after open or the matching
444 	 * pidlist has been destroyed inbetween.  Create a new one.
445 	 */
446 	if (!of->priv) {
447 		ret = pidlist_array_load(cgrp, type,
448 					 (struct cgroup_pidlist **)&of->priv);
449 		if (ret)
450 			return ERR_PTR(ret);
451 	}
452 	l = of->priv;
453 
454 	if (pid) {
455 		int end = l->length;
456 
457 		while (index < end) {
458 			int mid = (index + end) / 2;
459 			if (l->list[mid] == pid) {
460 				index = mid;
461 				break;
462 			} else if (l->list[mid] <= pid)
463 				index = mid + 1;
464 			else
465 				end = mid;
466 		}
467 	}
468 	/* If we're off the end of the array, we're done */
469 	if (index >= l->length)
470 		return NULL;
471 	/* Update the abstract position to be the actual pid that we found */
472 	iter = l->list + index;
473 	*pos = *iter;
474 	return iter;
475 }
476 
477 static void cgroup_pidlist_stop(struct seq_file *s, void *v)
478 {
479 	struct kernfs_open_file *of = s->private;
480 	struct cgroup_pidlist *l = of->priv;
481 
482 	if (l)
483 		mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork,
484 				 CGROUP_PIDLIST_DESTROY_DELAY);
485 	mutex_unlock(&seq_css(s)->cgroup->pidlist_mutex);
486 }
487 
488 static void *cgroup_pidlist_next(struct seq_file *s, void *v, loff_t *pos)
489 {
490 	struct kernfs_open_file *of = s->private;
491 	struct cgroup_pidlist *l = of->priv;
492 	pid_t *p = v;
493 	pid_t *end = l->list + l->length;
494 	/*
495 	 * Advance to the next pid in the array. If this goes off the
496 	 * end, we're done
497 	 */
498 	p++;
499 	if (p >= end) {
500 		return NULL;
501 	} else {
502 		*pos = *p;
503 		return p;
504 	}
505 }
506 
507 static int cgroup_pidlist_show(struct seq_file *s, void *v)
508 {
509 	seq_printf(s, "%d\n", *(int *)v);
510 
511 	return 0;
512 }
513 
514 static ssize_t cgroup_tasks_write(struct kernfs_open_file *of,
515 				  char *buf, size_t nbytes, loff_t off)
516 {
517 	return __cgroup_procs_write(of, buf, nbytes, off, false);
518 }
519 
520 static ssize_t cgroup_release_agent_write(struct kernfs_open_file *of,
521 					  char *buf, size_t nbytes, loff_t off)
522 {
523 	struct cgroup *cgrp;
524 
525 	BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX);
526 
527 	cgrp = cgroup_kn_lock_live(of->kn, false);
528 	if (!cgrp)
529 		return -ENODEV;
530 	spin_lock(&release_agent_path_lock);
531 	strlcpy(cgrp->root->release_agent_path, strstrip(buf),
532 		sizeof(cgrp->root->release_agent_path));
533 	spin_unlock(&release_agent_path_lock);
534 	cgroup_kn_unlock(of->kn);
535 	return nbytes;
536 }
537 
538 static int cgroup_release_agent_show(struct seq_file *seq, void *v)
539 {
540 	struct cgroup *cgrp = seq_css(seq)->cgroup;
541 
542 	spin_lock(&release_agent_path_lock);
543 	seq_puts(seq, cgrp->root->release_agent_path);
544 	spin_unlock(&release_agent_path_lock);
545 	seq_putc(seq, '\n');
546 	return 0;
547 }
548 
549 static int cgroup_sane_behavior_show(struct seq_file *seq, void *v)
550 {
551 	seq_puts(seq, "0\n");
552 	return 0;
553 }
554 
555 static u64 cgroup_read_notify_on_release(struct cgroup_subsys_state *css,
556 					 struct cftype *cft)
557 {
558 	return notify_on_release(css->cgroup);
559 }
560 
561 static int cgroup_write_notify_on_release(struct cgroup_subsys_state *css,
562 					  struct cftype *cft, u64 val)
563 {
564 	if (val)
565 		set_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags);
566 	else
567 		clear_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags);
568 	return 0;
569 }
570 
571 static u64 cgroup_clone_children_read(struct cgroup_subsys_state *css,
572 				      struct cftype *cft)
573 {
574 	return test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
575 }
576 
577 static int cgroup_clone_children_write(struct cgroup_subsys_state *css,
578 				       struct cftype *cft, u64 val)
579 {
580 	if (val)
581 		set_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
582 	else
583 		clear_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
584 	return 0;
585 }
586 
587 /* cgroup core interface files for the legacy hierarchies */
588 struct cftype cgroup1_base_files[] = {
589 	{
590 		.name = "cgroup.procs",
591 		.seq_start = cgroup_pidlist_start,
592 		.seq_next = cgroup_pidlist_next,
593 		.seq_stop = cgroup_pidlist_stop,
594 		.seq_show = cgroup_pidlist_show,
595 		.private = CGROUP_FILE_PROCS,
596 		.write = cgroup_procs_write,
597 	},
598 	{
599 		.name = "cgroup.clone_children",
600 		.read_u64 = cgroup_clone_children_read,
601 		.write_u64 = cgroup_clone_children_write,
602 	},
603 	{
604 		.name = "cgroup.sane_behavior",
605 		.flags = CFTYPE_ONLY_ON_ROOT,
606 		.seq_show = cgroup_sane_behavior_show,
607 	},
608 	{
609 		.name = "tasks",
610 		.seq_start = cgroup_pidlist_start,
611 		.seq_next = cgroup_pidlist_next,
612 		.seq_stop = cgroup_pidlist_stop,
613 		.seq_show = cgroup_pidlist_show,
614 		.private = CGROUP_FILE_TASKS,
615 		.write = cgroup_tasks_write,
616 	},
617 	{
618 		.name = "notify_on_release",
619 		.read_u64 = cgroup_read_notify_on_release,
620 		.write_u64 = cgroup_write_notify_on_release,
621 	},
622 	{
623 		.name = "release_agent",
624 		.flags = CFTYPE_ONLY_ON_ROOT,
625 		.seq_show = cgroup_release_agent_show,
626 		.write = cgroup_release_agent_write,
627 		.max_write_len = PATH_MAX - 1,
628 	},
629 	{ }	/* terminate */
630 };
631 
632 /* Display information about each subsystem and each hierarchy */
633 static int proc_cgroupstats_show(struct seq_file *m, void *v)
634 {
635 	struct cgroup_subsys *ss;
636 	int i;
637 
638 	seq_puts(m, "#subsys_name\thierarchy\tnum_cgroups\tenabled\n");
639 	/*
640 	 * ideally we don't want subsystems moving around while we do this.
641 	 * cgroup_mutex is also necessary to guarantee an atomic snapshot of
642 	 * subsys/hierarchy state.
643 	 */
644 	mutex_lock(&cgroup_mutex);
645 
646 	for_each_subsys(ss, i)
647 		seq_printf(m, "%s\t%d\t%d\t%d\n",
648 			   ss->legacy_name, ss->root->hierarchy_id,
649 			   atomic_read(&ss->root->nr_cgrps),
650 			   cgroup_ssid_enabled(i));
651 
652 	mutex_unlock(&cgroup_mutex);
653 	return 0;
654 }
655 
656 static int cgroupstats_open(struct inode *inode, struct file *file)
657 {
658 	return single_open(file, proc_cgroupstats_show, NULL);
659 }
660 
661 const struct file_operations proc_cgroupstats_operations = {
662 	.open = cgroupstats_open,
663 	.read = seq_read,
664 	.llseek = seq_lseek,
665 	.release = single_release,
666 };
667 
668 /**
669  * cgroupstats_build - build and fill cgroupstats
670  * @stats: cgroupstats to fill information into
671  * @dentry: A dentry entry belonging to the cgroup for which stats have
672  * been requested.
673  *
674  * Build and fill cgroupstats so that taskstats can export it to user
675  * space.
676  */
677 int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry)
678 {
679 	struct kernfs_node *kn = kernfs_node_from_dentry(dentry);
680 	struct cgroup *cgrp;
681 	struct css_task_iter it;
682 	struct task_struct *tsk;
683 
684 	/* it should be kernfs_node belonging to cgroupfs and is a directory */
685 	if (dentry->d_sb->s_type != &cgroup_fs_type || !kn ||
686 	    kernfs_type(kn) != KERNFS_DIR)
687 		return -EINVAL;
688 
689 	mutex_lock(&cgroup_mutex);
690 
691 	/*
692 	 * We aren't being called from kernfs and there's no guarantee on
693 	 * @kn->priv's validity.  For this and css_tryget_online_from_dir(),
694 	 * @kn->priv is RCU safe.  Let's do the RCU dancing.
695 	 */
696 	rcu_read_lock();
697 	cgrp = rcu_dereference(*(void __rcu __force **)&kn->priv);
698 	if (!cgrp || cgroup_is_dead(cgrp)) {
699 		rcu_read_unlock();
700 		mutex_unlock(&cgroup_mutex);
701 		return -ENOENT;
702 	}
703 	rcu_read_unlock();
704 
705 	css_task_iter_start(&cgrp->self, &it);
706 	while ((tsk = css_task_iter_next(&it))) {
707 		switch (tsk->state) {
708 		case TASK_RUNNING:
709 			stats->nr_running++;
710 			break;
711 		case TASK_INTERRUPTIBLE:
712 			stats->nr_sleeping++;
713 			break;
714 		case TASK_UNINTERRUPTIBLE:
715 			stats->nr_uninterruptible++;
716 			break;
717 		case TASK_STOPPED:
718 			stats->nr_stopped++;
719 			break;
720 		default:
721 			if (delayacct_is_task_waiting_on_io(tsk))
722 				stats->nr_io_wait++;
723 			break;
724 		}
725 	}
726 	css_task_iter_end(&it);
727 
728 	mutex_unlock(&cgroup_mutex);
729 	return 0;
730 }
731 
732 void cgroup1_check_for_release(struct cgroup *cgrp)
733 {
734 	if (notify_on_release(cgrp) && !cgroup_is_populated(cgrp) &&
735 	    !css_has_online_children(&cgrp->self) && !cgroup_is_dead(cgrp))
736 		schedule_work(&cgrp->release_agent_work);
737 }
738 
739 /*
740  * Notify userspace when a cgroup is released, by running the
741  * configured release agent with the name of the cgroup (path
742  * relative to the root of cgroup file system) as the argument.
743  *
744  * Most likely, this user command will try to rmdir this cgroup.
745  *
746  * This races with the possibility that some other task will be
747  * attached to this cgroup before it is removed, or that some other
748  * user task will 'mkdir' a child cgroup of this cgroup.  That's ok.
749  * The presumed 'rmdir' will fail quietly if this cgroup is no longer
750  * unused, and this cgroup will be reprieved from its death sentence,
751  * to continue to serve a useful existence.  Next time it's released,
752  * we will get notified again, if it still has 'notify_on_release' set.
753  *
754  * The final arg to call_usermodehelper() is UMH_WAIT_EXEC, which
755  * means only wait until the task is successfully execve()'d.  The
756  * separate release agent task is forked by call_usermodehelper(),
757  * then control in this thread returns here, without waiting for the
758  * release agent task.  We don't bother to wait because the caller of
759  * this routine has no use for the exit status of the release agent
760  * task, so no sense holding our caller up for that.
761  */
762 void cgroup1_release_agent(struct work_struct *work)
763 {
764 	struct cgroup *cgrp =
765 		container_of(work, struct cgroup, release_agent_work);
766 	char *pathbuf = NULL, *agentbuf = NULL;
767 	char *argv[3], *envp[3];
768 	int ret;
769 
770 	mutex_lock(&cgroup_mutex);
771 
772 	pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
773 	agentbuf = kstrdup(cgrp->root->release_agent_path, GFP_KERNEL);
774 	if (!pathbuf || !agentbuf)
775 		goto out;
776 
777 	spin_lock_irq(&css_set_lock);
778 	ret = cgroup_path_ns_locked(cgrp, pathbuf, PATH_MAX, &init_cgroup_ns);
779 	spin_unlock_irq(&css_set_lock);
780 	if (ret < 0 || ret >= PATH_MAX)
781 		goto out;
782 
783 	argv[0] = agentbuf;
784 	argv[1] = pathbuf;
785 	argv[2] = NULL;
786 
787 	/* minimal command environment */
788 	envp[0] = "HOME=/";
789 	envp[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
790 	envp[2] = NULL;
791 
792 	mutex_unlock(&cgroup_mutex);
793 	call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC);
794 	goto out_free;
795 out:
796 	mutex_unlock(&cgroup_mutex);
797 out_free:
798 	kfree(agentbuf);
799 	kfree(pathbuf);
800 }
801 
802 /*
803  * cgroup_rename - Only allow simple rename of directories in place.
804  */
805 static int cgroup1_rename(struct kernfs_node *kn, struct kernfs_node *new_parent,
806 			  const char *new_name_str)
807 {
808 	struct cgroup *cgrp = kn->priv;
809 	int ret;
810 
811 	if (kernfs_type(kn) != KERNFS_DIR)
812 		return -ENOTDIR;
813 	if (kn->parent != new_parent)
814 		return -EIO;
815 
816 	/*
817 	 * We're gonna grab cgroup_mutex which nests outside kernfs
818 	 * active_ref.  kernfs_rename() doesn't require active_ref
819 	 * protection.  Break them before grabbing cgroup_mutex.
820 	 */
821 	kernfs_break_active_protection(new_parent);
822 	kernfs_break_active_protection(kn);
823 
824 	mutex_lock(&cgroup_mutex);
825 
826 	ret = kernfs_rename(kn, new_parent, new_name_str);
827 	if (!ret)
828 		trace_cgroup_rename(cgrp);
829 
830 	mutex_unlock(&cgroup_mutex);
831 
832 	kernfs_unbreak_active_protection(kn);
833 	kernfs_unbreak_active_protection(new_parent);
834 	return ret;
835 }
836 
837 static int cgroup1_show_options(struct seq_file *seq, struct kernfs_root *kf_root)
838 {
839 	struct cgroup_root *root = cgroup_root_from_kf(kf_root);
840 	struct cgroup_subsys *ss;
841 	int ssid;
842 
843 	for_each_subsys(ss, ssid)
844 		if (root->subsys_mask & (1 << ssid))
845 			seq_show_option(seq, ss->legacy_name, NULL);
846 	if (root->flags & CGRP_ROOT_NOPREFIX)
847 		seq_puts(seq, ",noprefix");
848 	if (root->flags & CGRP_ROOT_XATTR)
849 		seq_puts(seq, ",xattr");
850 
851 	spin_lock(&release_agent_path_lock);
852 	if (strlen(root->release_agent_path))
853 		seq_show_option(seq, "release_agent",
854 				root->release_agent_path);
855 	spin_unlock(&release_agent_path_lock);
856 
857 	if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags))
858 		seq_puts(seq, ",clone_children");
859 	if (strlen(root->name))
860 		seq_show_option(seq, "name", root->name);
861 	return 0;
862 }
863 
864 static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts)
865 {
866 	char *token, *o = data;
867 	bool all_ss = false, one_ss = false;
868 	u16 mask = U16_MAX;
869 	struct cgroup_subsys *ss;
870 	int nr_opts = 0;
871 	int i;
872 
873 #ifdef CONFIG_CPUSETS
874 	mask = ~((u16)1 << cpuset_cgrp_id);
875 #endif
876 
877 	memset(opts, 0, sizeof(*opts));
878 
879 	while ((token = strsep(&o, ",")) != NULL) {
880 		nr_opts++;
881 
882 		if (!*token)
883 			return -EINVAL;
884 		if (!strcmp(token, "none")) {
885 			/* Explicitly have no subsystems */
886 			opts->none = true;
887 			continue;
888 		}
889 		if (!strcmp(token, "all")) {
890 			/* Mutually exclusive option 'all' + subsystem name */
891 			if (one_ss)
892 				return -EINVAL;
893 			all_ss = true;
894 			continue;
895 		}
896 		if (!strcmp(token, "noprefix")) {
897 			opts->flags |= CGRP_ROOT_NOPREFIX;
898 			continue;
899 		}
900 		if (!strcmp(token, "clone_children")) {
901 			opts->cpuset_clone_children = true;
902 			continue;
903 		}
904 		if (!strcmp(token, "xattr")) {
905 			opts->flags |= CGRP_ROOT_XATTR;
906 			continue;
907 		}
908 		if (!strncmp(token, "release_agent=", 14)) {
909 			/* Specifying two release agents is forbidden */
910 			if (opts->release_agent)
911 				return -EINVAL;
912 			opts->release_agent =
913 				kstrndup(token + 14, PATH_MAX - 1, GFP_KERNEL);
914 			if (!opts->release_agent)
915 				return -ENOMEM;
916 			continue;
917 		}
918 		if (!strncmp(token, "name=", 5)) {
919 			const char *name = token + 5;
920 			/* Can't specify an empty name */
921 			if (!strlen(name))
922 				return -EINVAL;
923 			/* Must match [\w.-]+ */
924 			for (i = 0; i < strlen(name); i++) {
925 				char c = name[i];
926 				if (isalnum(c))
927 					continue;
928 				if ((c == '.') || (c == '-') || (c == '_'))
929 					continue;
930 				return -EINVAL;
931 			}
932 			/* Specifying two names is forbidden */
933 			if (opts->name)
934 				return -EINVAL;
935 			opts->name = kstrndup(name,
936 					      MAX_CGROUP_ROOT_NAMELEN - 1,
937 					      GFP_KERNEL);
938 			if (!opts->name)
939 				return -ENOMEM;
940 
941 			continue;
942 		}
943 
944 		for_each_subsys(ss, i) {
945 			if (strcmp(token, ss->legacy_name))
946 				continue;
947 			if (!cgroup_ssid_enabled(i))
948 				continue;
949 			if (cgroup1_ssid_disabled(i))
950 				continue;
951 
952 			/* Mutually exclusive option 'all' + subsystem name */
953 			if (all_ss)
954 				return -EINVAL;
955 			opts->subsys_mask |= (1 << i);
956 			one_ss = true;
957 
958 			break;
959 		}
960 		if (i == CGROUP_SUBSYS_COUNT)
961 			return -ENOENT;
962 	}
963 
964 	/*
965 	 * If the 'all' option was specified select all the subsystems,
966 	 * otherwise if 'none', 'name=' and a subsystem name options were
967 	 * not specified, let's default to 'all'
968 	 */
969 	if (all_ss || (!one_ss && !opts->none && !opts->name))
970 		for_each_subsys(ss, i)
971 			if (cgroup_ssid_enabled(i) && !cgroup1_ssid_disabled(i))
972 				opts->subsys_mask |= (1 << i);
973 
974 	/*
975 	 * We either have to specify by name or by subsystems. (So all
976 	 * empty hierarchies must have a name).
977 	 */
978 	if (!opts->subsys_mask && !opts->name)
979 		return -EINVAL;
980 
981 	/*
982 	 * Option noprefix was introduced just for backward compatibility
983 	 * with the old cpuset, so we allow noprefix only if mounting just
984 	 * the cpuset subsystem.
985 	 */
986 	if ((opts->flags & CGRP_ROOT_NOPREFIX) && (opts->subsys_mask & mask))
987 		return -EINVAL;
988 
989 	/* Can't specify "none" and some subsystems */
990 	if (opts->subsys_mask && opts->none)
991 		return -EINVAL;
992 
993 	return 0;
994 }
995 
996 static int cgroup1_remount(struct kernfs_root *kf_root, int *flags, char *data)
997 {
998 	int ret = 0;
999 	struct cgroup_root *root = cgroup_root_from_kf(kf_root);
1000 	struct cgroup_sb_opts opts;
1001 	u16 added_mask, removed_mask;
1002 
1003 	cgroup_lock_and_drain_offline(&cgrp_dfl_root.cgrp);
1004 
1005 	/* See what subsystems are wanted */
1006 	ret = parse_cgroupfs_options(data, &opts);
1007 	if (ret)
1008 		goto out_unlock;
1009 
1010 	if (opts.subsys_mask != root->subsys_mask || opts.release_agent)
1011 		pr_warn("option changes via remount are deprecated (pid=%d comm=%s)\n",
1012 			task_tgid_nr(current), current->comm);
1013 
1014 	added_mask = opts.subsys_mask & ~root->subsys_mask;
1015 	removed_mask = root->subsys_mask & ~opts.subsys_mask;
1016 
1017 	/* Don't allow flags or name to change at remount */
1018 	if ((opts.flags ^ root->flags) ||
1019 	    (opts.name && strcmp(opts.name, root->name))) {
1020 		pr_err("option or name mismatch, new: 0x%x \"%s\", old: 0x%x \"%s\"\n",
1021 		       opts.flags, opts.name ?: "", root->flags, root->name);
1022 		ret = -EINVAL;
1023 		goto out_unlock;
1024 	}
1025 
1026 	/* remounting is not allowed for populated hierarchies */
1027 	if (!list_empty(&root->cgrp.self.children)) {
1028 		ret = -EBUSY;
1029 		goto out_unlock;
1030 	}
1031 
1032 	ret = rebind_subsystems(root, added_mask);
1033 	if (ret)
1034 		goto out_unlock;
1035 
1036 	WARN_ON(rebind_subsystems(&cgrp_dfl_root, removed_mask));
1037 
1038 	if (opts.release_agent) {
1039 		spin_lock(&release_agent_path_lock);
1040 		strcpy(root->release_agent_path, opts.release_agent);
1041 		spin_unlock(&release_agent_path_lock);
1042 	}
1043 
1044 	trace_cgroup_remount(root);
1045 
1046  out_unlock:
1047 	kfree(opts.release_agent);
1048 	kfree(opts.name);
1049 	mutex_unlock(&cgroup_mutex);
1050 	return ret;
1051 }
1052 
1053 struct kernfs_syscall_ops cgroup1_kf_syscall_ops = {
1054 	.rename			= cgroup1_rename,
1055 	.show_options		= cgroup1_show_options,
1056 	.remount_fs		= cgroup1_remount,
1057 	.mkdir			= cgroup_mkdir,
1058 	.rmdir			= cgroup_rmdir,
1059 	.show_path		= cgroup_show_path,
1060 };
1061 
1062 struct dentry *cgroup1_mount(struct file_system_type *fs_type, int flags,
1063 			     void *data, unsigned long magic,
1064 			     struct cgroup_namespace *ns)
1065 {
1066 	struct super_block *pinned_sb = NULL;
1067 	struct cgroup_sb_opts opts;
1068 	struct cgroup_root *root;
1069 	struct cgroup_subsys *ss;
1070 	struct dentry *dentry;
1071 	int i, ret;
1072 
1073 	cgroup_lock_and_drain_offline(&cgrp_dfl_root.cgrp);
1074 
1075 	/* First find the desired set of subsystems */
1076 	ret = parse_cgroupfs_options(data, &opts);
1077 	if (ret)
1078 		goto out_unlock;
1079 
1080 	/*
1081 	 * Destruction of cgroup root is asynchronous, so subsystems may
1082 	 * still be dying after the previous unmount.  Let's drain the
1083 	 * dying subsystems.  We just need to ensure that the ones
1084 	 * unmounted previously finish dying and don't care about new ones
1085 	 * starting.  Testing ref liveliness is good enough.
1086 	 */
1087 	for_each_subsys(ss, i) {
1088 		if (!(opts.subsys_mask & (1 << i)) ||
1089 		    ss->root == &cgrp_dfl_root)
1090 			continue;
1091 
1092 		if (!percpu_ref_tryget_live(&ss->root->cgrp.self.refcnt)) {
1093 			mutex_unlock(&cgroup_mutex);
1094 			msleep(10);
1095 			ret = restart_syscall();
1096 			goto out_free;
1097 		}
1098 		cgroup_put(&ss->root->cgrp);
1099 	}
1100 
1101 	for_each_root(root) {
1102 		bool name_match = false;
1103 
1104 		if (root == &cgrp_dfl_root)
1105 			continue;
1106 
1107 		/*
1108 		 * If we asked for a name then it must match.  Also, if
1109 		 * name matches but sybsys_mask doesn't, we should fail.
1110 		 * Remember whether name matched.
1111 		 */
1112 		if (opts.name) {
1113 			if (strcmp(opts.name, root->name))
1114 				continue;
1115 			name_match = true;
1116 		}
1117 
1118 		/*
1119 		 * If we asked for subsystems (or explicitly for no
1120 		 * subsystems) then they must match.
1121 		 */
1122 		if ((opts.subsys_mask || opts.none) &&
1123 		    (opts.subsys_mask != root->subsys_mask)) {
1124 			if (!name_match)
1125 				continue;
1126 			ret = -EBUSY;
1127 			goto out_unlock;
1128 		}
1129 
1130 		if (root->flags ^ opts.flags)
1131 			pr_warn("new mount options do not match the existing superblock, will be ignored\n");
1132 
1133 		/*
1134 		 * We want to reuse @root whose lifetime is governed by its
1135 		 * ->cgrp.  Let's check whether @root is alive and keep it
1136 		 * that way.  As cgroup_kill_sb() can happen anytime, we
1137 		 * want to block it by pinning the sb so that @root doesn't
1138 		 * get killed before mount is complete.
1139 		 *
1140 		 * With the sb pinned, tryget_live can reliably indicate
1141 		 * whether @root can be reused.  If it's being killed,
1142 		 * drain it.  We can use wait_queue for the wait but this
1143 		 * path is super cold.  Let's just sleep a bit and retry.
1144 		 */
1145 		pinned_sb = kernfs_pin_sb(root->kf_root, NULL);
1146 		if (IS_ERR(pinned_sb) ||
1147 		    !percpu_ref_tryget_live(&root->cgrp.self.refcnt)) {
1148 			mutex_unlock(&cgroup_mutex);
1149 			if (!IS_ERR_OR_NULL(pinned_sb))
1150 				deactivate_super(pinned_sb);
1151 			msleep(10);
1152 			ret = restart_syscall();
1153 			goto out_free;
1154 		}
1155 
1156 		ret = 0;
1157 		goto out_unlock;
1158 	}
1159 
1160 	/*
1161 	 * No such thing, create a new one.  name= matching without subsys
1162 	 * specification is allowed for already existing hierarchies but we
1163 	 * can't create new one without subsys specification.
1164 	 */
1165 	if (!opts.subsys_mask && !opts.none) {
1166 		ret = -EINVAL;
1167 		goto out_unlock;
1168 	}
1169 
1170 	/* Hierarchies may only be created in the initial cgroup namespace. */
1171 	if (ns != &init_cgroup_ns) {
1172 		ret = -EPERM;
1173 		goto out_unlock;
1174 	}
1175 
1176 	root = kzalloc(sizeof(*root), GFP_KERNEL);
1177 	if (!root) {
1178 		ret = -ENOMEM;
1179 		goto out_unlock;
1180 	}
1181 
1182 	init_cgroup_root(root, &opts);
1183 
1184 	ret = cgroup_setup_root(root, opts.subsys_mask);
1185 	if (ret)
1186 		cgroup_free_root(root);
1187 
1188 out_unlock:
1189 	mutex_unlock(&cgroup_mutex);
1190 out_free:
1191 	kfree(opts.release_agent);
1192 	kfree(opts.name);
1193 
1194 	if (ret)
1195 		return ERR_PTR(ret);
1196 
1197 	dentry = cgroup_do_mount(&cgroup_fs_type, flags, root,
1198 				 CGROUP_SUPER_MAGIC, ns);
1199 
1200 	/*
1201 	 * If @pinned_sb, we're reusing an existing root and holding an
1202 	 * extra ref on its sb.  Mount is complete.  Put the extra ref.
1203 	 */
1204 	if (pinned_sb)
1205 		deactivate_super(pinned_sb);
1206 
1207 	return dentry;
1208 }
1209 
1210 static int __init cgroup1_wq_init(void)
1211 {
1212 	/*
1213 	 * Used to destroy pidlists and separate to serve as flush domain.
1214 	 * Cap @max_active to 1 too.
1215 	 */
1216 	cgroup_pidlist_destroy_wq = alloc_workqueue("cgroup_pidlist_destroy",
1217 						    0, 1);
1218 	BUG_ON(!cgroup_pidlist_destroy_wq);
1219 	return 0;
1220 }
1221 core_initcall(cgroup1_wq_init);
1222 
1223 static int __init cgroup_no_v1(char *str)
1224 {
1225 	struct cgroup_subsys *ss;
1226 	char *token;
1227 	int i;
1228 
1229 	while ((token = strsep(&str, ",")) != NULL) {
1230 		if (!*token)
1231 			continue;
1232 
1233 		if (!strcmp(token, "all")) {
1234 			cgroup_no_v1_mask = U16_MAX;
1235 			break;
1236 		}
1237 
1238 		for_each_subsys(ss, i) {
1239 			if (strcmp(token, ss->name) &&
1240 			    strcmp(token, ss->legacy_name))
1241 				continue;
1242 
1243 			cgroup_no_v1_mask |= 1 << i;
1244 		}
1245 	}
1246 	return 1;
1247 }
1248 __setup("cgroup_no_v1=", cgroup_no_v1);
1249 
1250 
1251 #ifdef CONFIG_CGROUP_DEBUG
1252 static struct cgroup_subsys_state *
1253 debug_css_alloc(struct cgroup_subsys_state *parent_css)
1254 {
1255 	struct cgroup_subsys_state *css = kzalloc(sizeof(*css), GFP_KERNEL);
1256 
1257 	if (!css)
1258 		return ERR_PTR(-ENOMEM);
1259 
1260 	return css;
1261 }
1262 
1263 static void debug_css_free(struct cgroup_subsys_state *css)
1264 {
1265 	kfree(css);
1266 }
1267 
1268 static u64 debug_taskcount_read(struct cgroup_subsys_state *css,
1269 				struct cftype *cft)
1270 {
1271 	return cgroup_task_count(css->cgroup);
1272 }
1273 
1274 static u64 current_css_set_read(struct cgroup_subsys_state *css,
1275 				struct cftype *cft)
1276 {
1277 	return (u64)(unsigned long)current->cgroups;
1278 }
1279 
1280 static u64 current_css_set_refcount_read(struct cgroup_subsys_state *css,
1281 					 struct cftype *cft)
1282 {
1283 	u64 count;
1284 
1285 	rcu_read_lock();
1286 	count = atomic_read(&task_css_set(current)->refcount);
1287 	rcu_read_unlock();
1288 	return count;
1289 }
1290 
1291 static int current_css_set_cg_links_read(struct seq_file *seq, void *v)
1292 {
1293 	struct cgrp_cset_link *link;
1294 	struct css_set *cset;
1295 	char *name_buf;
1296 
1297 	name_buf = kmalloc(NAME_MAX + 1, GFP_KERNEL);
1298 	if (!name_buf)
1299 		return -ENOMEM;
1300 
1301 	spin_lock_irq(&css_set_lock);
1302 	rcu_read_lock();
1303 	cset = rcu_dereference(current->cgroups);
1304 	list_for_each_entry(link, &cset->cgrp_links, cgrp_link) {
1305 		struct cgroup *c = link->cgrp;
1306 
1307 		cgroup_name(c, name_buf, NAME_MAX + 1);
1308 		seq_printf(seq, "Root %d group %s\n",
1309 			   c->root->hierarchy_id, name_buf);
1310 	}
1311 	rcu_read_unlock();
1312 	spin_unlock_irq(&css_set_lock);
1313 	kfree(name_buf);
1314 	return 0;
1315 }
1316 
1317 #define MAX_TASKS_SHOWN_PER_CSS 25
1318 static int cgroup_css_links_read(struct seq_file *seq, void *v)
1319 {
1320 	struct cgroup_subsys_state *css = seq_css(seq);
1321 	struct cgrp_cset_link *link;
1322 
1323 	spin_lock_irq(&css_set_lock);
1324 	list_for_each_entry(link, &css->cgroup->cset_links, cset_link) {
1325 		struct css_set *cset = link->cset;
1326 		struct task_struct *task;
1327 		int count = 0;
1328 
1329 		seq_printf(seq, "css_set %p\n", cset);
1330 
1331 		list_for_each_entry(task, &cset->tasks, cg_list) {
1332 			if (count++ > MAX_TASKS_SHOWN_PER_CSS)
1333 				goto overflow;
1334 			seq_printf(seq, "  task %d\n", task_pid_vnr(task));
1335 		}
1336 
1337 		list_for_each_entry(task, &cset->mg_tasks, cg_list) {
1338 			if (count++ > MAX_TASKS_SHOWN_PER_CSS)
1339 				goto overflow;
1340 			seq_printf(seq, "  task %d\n", task_pid_vnr(task));
1341 		}
1342 		continue;
1343 	overflow:
1344 		seq_puts(seq, "  ...\n");
1345 	}
1346 	spin_unlock_irq(&css_set_lock);
1347 	return 0;
1348 }
1349 
1350 static u64 releasable_read(struct cgroup_subsys_state *css, struct cftype *cft)
1351 {
1352 	return (!cgroup_is_populated(css->cgroup) &&
1353 		!css_has_online_children(&css->cgroup->self));
1354 }
1355 
1356 static struct cftype debug_files[] =  {
1357 	{
1358 		.name = "taskcount",
1359 		.read_u64 = debug_taskcount_read,
1360 	},
1361 
1362 	{
1363 		.name = "current_css_set",
1364 		.read_u64 = current_css_set_read,
1365 	},
1366 
1367 	{
1368 		.name = "current_css_set_refcount",
1369 		.read_u64 = current_css_set_refcount_read,
1370 	},
1371 
1372 	{
1373 		.name = "current_css_set_cg_links",
1374 		.seq_show = current_css_set_cg_links_read,
1375 	},
1376 
1377 	{
1378 		.name = "cgroup_css_links",
1379 		.seq_show = cgroup_css_links_read,
1380 	},
1381 
1382 	{
1383 		.name = "releasable",
1384 		.read_u64 = releasable_read,
1385 	},
1386 
1387 	{ }	/* terminate */
1388 };
1389 
1390 struct cgroup_subsys debug_cgrp_subsys = {
1391 	.css_alloc = debug_css_alloc,
1392 	.css_free = debug_css_free,
1393 	.legacy_cftypes = debug_files,
1394 };
1395 #endif /* CONFIG_CGROUP_DEBUG */
1396