xref: /openbmc/linux/kernel/cgroup/cgroup.c (revision 4da722ca19f30f7db250db808d1ab1703607a932)
1 /*
2  *  Generic process-grouping system.
3  *
4  *  Based originally on the cpuset system, extracted by Paul Menage
5  *  Copyright (C) 2006 Google, Inc
6  *
7  *  Notifications support
8  *  Copyright (C) 2009 Nokia Corporation
9  *  Author: Kirill A. Shutemov
10  *
11  *  Copyright notices from the original cpuset code:
12  *  --------------------------------------------------
13  *  Copyright (C) 2003 BULL SA.
14  *  Copyright (C) 2004-2006 Silicon Graphics, Inc.
15  *
16  *  Portions derived from Patrick Mochel's sysfs code.
17  *  sysfs is Copyright (c) 2001-3 Patrick Mochel
18  *
19  *  2003-10-10 Written by Simon Derr.
20  *  2003-10-22 Updates by Stephen Hemminger.
21  *  2004 May-July Rework by Paul Jackson.
22  *  ---------------------------------------------------
23  *
24  *  This file is subject to the terms and conditions of the GNU General Public
25  *  License.  See the file COPYING in the main directory of the Linux
26  *  distribution for more details.
27  */
28 
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30 
31 #include "cgroup-internal.h"
32 
33 #include <linux/cred.h>
34 #include <linux/errno.h>
35 #include <linux/init_task.h>
36 #include <linux/kernel.h>
37 #include <linux/magic.h>
38 #include <linux/mutex.h>
39 #include <linux/mount.h>
40 #include <linux/pagemap.h>
41 #include <linux/proc_fs.h>
42 #include <linux/rcupdate.h>
43 #include <linux/sched.h>
44 #include <linux/sched/task.h>
45 #include <linux/slab.h>
46 #include <linux/spinlock.h>
47 #include <linux/percpu-rwsem.h>
48 #include <linux/string.h>
49 #include <linux/hashtable.h>
50 #include <linux/idr.h>
51 #include <linux/kthread.h>
52 #include <linux/atomic.h>
53 #include <linux/cpuset.h>
54 #include <linux/proc_ns.h>
55 #include <linux/nsproxy.h>
56 #include <linux/file.h>
57 #include <net/sock.h>
58 
59 #define CREATE_TRACE_POINTS
60 #include <trace/events/cgroup.h>
61 
62 #define CGROUP_FILE_NAME_MAX		(MAX_CGROUP_TYPE_NAMELEN +	\
63 					 MAX_CFTYPE_NAME + 2)
64 
65 /*
66  * cgroup_mutex is the master lock.  Any modification to cgroup or its
67  * hierarchy must be performed while holding it.
68  *
69  * css_set_lock protects task->cgroups pointer, the list of css_set
70  * objects, and the chain of tasks off each css_set.
71  *
72  * These locks are exported if CONFIG_PROVE_RCU so that accessors in
73  * cgroup.h can use them for lockdep annotations.
74  */
75 DEFINE_MUTEX(cgroup_mutex);
76 DEFINE_SPINLOCK(css_set_lock);
77 
78 #ifdef CONFIG_PROVE_RCU
79 EXPORT_SYMBOL_GPL(cgroup_mutex);
80 EXPORT_SYMBOL_GPL(css_set_lock);
81 #endif
82 
83 /*
84  * Protects cgroup_idr and css_idr so that IDs can be released without
85  * grabbing cgroup_mutex.
86  */
87 static DEFINE_SPINLOCK(cgroup_idr_lock);
88 
89 /*
90  * Protects cgroup_file->kn for !self csses.  It synchronizes notifications
91  * against file removal/re-creation across css hiding.
92  */
93 static DEFINE_SPINLOCK(cgroup_file_kn_lock);
94 
95 struct percpu_rw_semaphore cgroup_threadgroup_rwsem;
96 
97 #define cgroup_assert_mutex_or_rcu_locked()				\
98 	RCU_LOCKDEP_WARN(!rcu_read_lock_held() &&			\
99 			   !lockdep_is_held(&cgroup_mutex),		\
100 			   "cgroup_mutex or RCU read lock required");
101 
102 /*
103  * cgroup destruction makes heavy use of work items and there can be a lot
104  * of concurrent destructions.  Use a separate workqueue so that cgroup
105  * destruction work items don't end up filling up max_active of system_wq
106  * which may lead to deadlock.
107  */
108 static struct workqueue_struct *cgroup_destroy_wq;
109 
110 /* generate an array of cgroup subsystem pointers */
111 #define SUBSYS(_x) [_x ## _cgrp_id] = &_x ## _cgrp_subsys,
112 struct cgroup_subsys *cgroup_subsys[] = {
113 #include <linux/cgroup_subsys.h>
114 };
115 #undef SUBSYS
116 
117 /* array of cgroup subsystem names */
118 #define SUBSYS(_x) [_x ## _cgrp_id] = #_x,
119 static const char *cgroup_subsys_name[] = {
120 #include <linux/cgroup_subsys.h>
121 };
122 #undef SUBSYS
123 
124 /* array of static_keys for cgroup_subsys_enabled() and cgroup_subsys_on_dfl() */
125 #define SUBSYS(_x)								\
126 	DEFINE_STATIC_KEY_TRUE(_x ## _cgrp_subsys_enabled_key);			\
127 	DEFINE_STATIC_KEY_TRUE(_x ## _cgrp_subsys_on_dfl_key);			\
128 	EXPORT_SYMBOL_GPL(_x ## _cgrp_subsys_enabled_key);			\
129 	EXPORT_SYMBOL_GPL(_x ## _cgrp_subsys_on_dfl_key);
130 #include <linux/cgroup_subsys.h>
131 #undef SUBSYS
132 
133 #define SUBSYS(_x) [_x ## _cgrp_id] = &_x ## _cgrp_subsys_enabled_key,
134 static struct static_key_true *cgroup_subsys_enabled_key[] = {
135 #include <linux/cgroup_subsys.h>
136 };
137 #undef SUBSYS
138 
139 #define SUBSYS(_x) [_x ## _cgrp_id] = &_x ## _cgrp_subsys_on_dfl_key,
140 static struct static_key_true *cgroup_subsys_on_dfl_key[] = {
141 #include <linux/cgroup_subsys.h>
142 };
143 #undef SUBSYS
144 
145 /*
146  * The default hierarchy, reserved for the subsystems that are otherwise
147  * unattached - it never has more than a single cgroup, and all tasks are
148  * part of that cgroup.
149  */
150 struct cgroup_root cgrp_dfl_root;
151 EXPORT_SYMBOL_GPL(cgrp_dfl_root);
152 
153 /*
154  * The default hierarchy always exists but is hidden until mounted for the
155  * first time.  This is for backward compatibility.
156  */
157 static bool cgrp_dfl_visible;
158 
159 /* some controllers are not supported in the default hierarchy */
160 static u16 cgrp_dfl_inhibit_ss_mask;
161 
162 /* some controllers are implicitly enabled on the default hierarchy */
163 static u16 cgrp_dfl_implicit_ss_mask;
164 
165 /* The list of hierarchy roots */
166 LIST_HEAD(cgroup_roots);
167 static int cgroup_root_count;
168 
169 /* hierarchy ID allocation and mapping, protected by cgroup_mutex */
170 static DEFINE_IDR(cgroup_hierarchy_idr);
171 
172 /*
173  * Assign a monotonically increasing serial number to csses.  It guarantees
174  * cgroups with bigger numbers are newer than those with smaller numbers.
175  * Also, as csses are always appended to the parent's ->children list, it
176  * guarantees that sibling csses are always sorted in the ascending serial
177  * number order on the list.  Protected by cgroup_mutex.
178  */
179 static u64 css_serial_nr_next = 1;
180 
181 /*
182  * These bitmasks identify subsystems with specific features to avoid
183  * having to do iterative checks repeatedly.
184  */
185 static u16 have_fork_callback __read_mostly;
186 static u16 have_exit_callback __read_mostly;
187 static u16 have_free_callback __read_mostly;
188 static u16 have_canfork_callback __read_mostly;
189 
190 /* cgroup namespace for init task */
191 struct cgroup_namespace init_cgroup_ns = {
192 	.count		= REFCOUNT_INIT(2),
193 	.user_ns	= &init_user_ns,
194 	.ns.ops		= &cgroupns_operations,
195 	.ns.inum	= PROC_CGROUP_INIT_INO,
196 	.root_cset	= &init_css_set,
197 };
198 
199 static struct file_system_type cgroup2_fs_type;
200 static struct cftype cgroup_base_files[];
201 
202 static int cgroup_apply_control(struct cgroup *cgrp);
203 static void cgroup_finalize_control(struct cgroup *cgrp, int ret);
204 static void css_task_iter_advance(struct css_task_iter *it);
205 static int cgroup_destroy_locked(struct cgroup *cgrp);
206 static struct cgroup_subsys_state *css_create(struct cgroup *cgrp,
207 					      struct cgroup_subsys *ss);
208 static void css_release(struct percpu_ref *ref);
209 static void kill_css(struct cgroup_subsys_state *css);
210 static int cgroup_addrm_files(struct cgroup_subsys_state *css,
211 			      struct cgroup *cgrp, struct cftype cfts[],
212 			      bool is_add);
213 
214 /**
215  * cgroup_ssid_enabled - cgroup subsys enabled test by subsys ID
216  * @ssid: subsys ID of interest
217  *
218  * cgroup_subsys_enabled() can only be used with literal subsys names which
219  * is fine for individual subsystems but unsuitable for cgroup core.  This
220  * is slower static_key_enabled() based test indexed by @ssid.
221  */
222 bool cgroup_ssid_enabled(int ssid)
223 {
224 	if (CGROUP_SUBSYS_COUNT == 0)
225 		return false;
226 
227 	return static_key_enabled(cgroup_subsys_enabled_key[ssid]);
228 }
229 
230 /**
231  * cgroup_on_dfl - test whether a cgroup is on the default hierarchy
232  * @cgrp: the cgroup of interest
233  *
234  * The default hierarchy is the v2 interface of cgroup and this function
235  * can be used to test whether a cgroup is on the default hierarchy for
236  * cases where a subsystem should behave differnetly depending on the
237  * interface version.
238  *
239  * The set of behaviors which change on the default hierarchy are still
240  * being determined and the mount option is prefixed with __DEVEL__.
241  *
242  * List of changed behaviors:
243  *
244  * - Mount options "noprefix", "xattr", "clone_children", "release_agent"
245  *   and "name" are disallowed.
246  *
247  * - When mounting an existing superblock, mount options should match.
248  *
249  * - Remount is disallowed.
250  *
251  * - rename(2) is disallowed.
252  *
253  * - "tasks" is removed.  Everything should be at process granularity.  Use
254  *   "cgroup.procs" instead.
255  *
256  * - "cgroup.procs" is not sorted.  pids will be unique unless they got
257  *   recycled inbetween reads.
258  *
259  * - "release_agent" and "notify_on_release" are removed.  Replacement
260  *   notification mechanism will be implemented.
261  *
262  * - "cgroup.clone_children" is removed.
263  *
264  * - "cgroup.subtree_populated" is available.  Its value is 0 if the cgroup
265  *   and its descendants contain no task; otherwise, 1.  The file also
266  *   generates kernfs notification which can be monitored through poll and
267  *   [di]notify when the value of the file changes.
268  *
269  * - cpuset: tasks will be kept in empty cpusets when hotplug happens and
270  *   take masks of ancestors with non-empty cpus/mems, instead of being
271  *   moved to an ancestor.
272  *
273  * - cpuset: a task can be moved into an empty cpuset, and again it takes
274  *   masks of ancestors.
275  *
276  * - memcg: use_hierarchy is on by default and the cgroup file for the flag
277  *   is not created.
278  *
279  * - blkcg: blk-throttle becomes properly hierarchical.
280  *
281  * - debug: disallowed on the default hierarchy.
282  */
283 bool cgroup_on_dfl(const struct cgroup *cgrp)
284 {
285 	return cgrp->root == &cgrp_dfl_root;
286 }
287 
288 /* IDR wrappers which synchronize using cgroup_idr_lock */
289 static int cgroup_idr_alloc(struct idr *idr, void *ptr, int start, int end,
290 			    gfp_t gfp_mask)
291 {
292 	int ret;
293 
294 	idr_preload(gfp_mask);
295 	spin_lock_bh(&cgroup_idr_lock);
296 	ret = idr_alloc(idr, ptr, start, end, gfp_mask & ~__GFP_DIRECT_RECLAIM);
297 	spin_unlock_bh(&cgroup_idr_lock);
298 	idr_preload_end();
299 	return ret;
300 }
301 
302 static void *cgroup_idr_replace(struct idr *idr, void *ptr, int id)
303 {
304 	void *ret;
305 
306 	spin_lock_bh(&cgroup_idr_lock);
307 	ret = idr_replace(idr, ptr, id);
308 	spin_unlock_bh(&cgroup_idr_lock);
309 	return ret;
310 }
311 
312 static void cgroup_idr_remove(struct idr *idr, int id)
313 {
314 	spin_lock_bh(&cgroup_idr_lock);
315 	idr_remove(idr, id);
316 	spin_unlock_bh(&cgroup_idr_lock);
317 }
318 
319 static struct cgroup *cgroup_parent(struct cgroup *cgrp)
320 {
321 	struct cgroup_subsys_state *parent_css = cgrp->self.parent;
322 
323 	if (parent_css)
324 		return container_of(parent_css, struct cgroup, self);
325 	return NULL;
326 }
327 
328 /* subsystems visibly enabled on a cgroup */
329 static u16 cgroup_control(struct cgroup *cgrp)
330 {
331 	struct cgroup *parent = cgroup_parent(cgrp);
332 	u16 root_ss_mask = cgrp->root->subsys_mask;
333 
334 	if (parent)
335 		return parent->subtree_control;
336 
337 	if (cgroup_on_dfl(cgrp))
338 		root_ss_mask &= ~(cgrp_dfl_inhibit_ss_mask |
339 				  cgrp_dfl_implicit_ss_mask);
340 	return root_ss_mask;
341 }
342 
343 /* subsystems enabled on a cgroup */
344 static u16 cgroup_ss_mask(struct cgroup *cgrp)
345 {
346 	struct cgroup *parent = cgroup_parent(cgrp);
347 
348 	if (parent)
349 		return parent->subtree_ss_mask;
350 
351 	return cgrp->root->subsys_mask;
352 }
353 
354 /**
355  * cgroup_css - obtain a cgroup's css for the specified subsystem
356  * @cgrp: the cgroup of interest
357  * @ss: the subsystem of interest (%NULL returns @cgrp->self)
358  *
359  * Return @cgrp's css (cgroup_subsys_state) associated with @ss.  This
360  * function must be called either under cgroup_mutex or rcu_read_lock() and
361  * the caller is responsible for pinning the returned css if it wants to
362  * keep accessing it outside the said locks.  This function may return
363  * %NULL if @cgrp doesn't have @subsys_id enabled.
364  */
365 static struct cgroup_subsys_state *cgroup_css(struct cgroup *cgrp,
366 					      struct cgroup_subsys *ss)
367 {
368 	if (ss)
369 		return rcu_dereference_check(cgrp->subsys[ss->id],
370 					lockdep_is_held(&cgroup_mutex));
371 	else
372 		return &cgrp->self;
373 }
374 
375 /**
376  * cgroup_e_css - obtain a cgroup's effective css for the specified subsystem
377  * @cgrp: the cgroup of interest
378  * @ss: the subsystem of interest (%NULL returns @cgrp->self)
379  *
380  * Similar to cgroup_css() but returns the effective css, which is defined
381  * as the matching css of the nearest ancestor including self which has @ss
382  * enabled.  If @ss is associated with the hierarchy @cgrp is on, this
383  * function is guaranteed to return non-NULL css.
384  */
385 static struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgrp,
386 						struct cgroup_subsys *ss)
387 {
388 	lockdep_assert_held(&cgroup_mutex);
389 
390 	if (!ss)
391 		return &cgrp->self;
392 
393 	/*
394 	 * This function is used while updating css associations and thus
395 	 * can't test the csses directly.  Test ss_mask.
396 	 */
397 	while (!(cgroup_ss_mask(cgrp) & (1 << ss->id))) {
398 		cgrp = cgroup_parent(cgrp);
399 		if (!cgrp)
400 			return NULL;
401 	}
402 
403 	return cgroup_css(cgrp, ss);
404 }
405 
406 /**
407  * cgroup_get_e_css - get a cgroup's effective css for the specified subsystem
408  * @cgrp: the cgroup of interest
409  * @ss: the subsystem of interest
410  *
411  * Find and get the effective css of @cgrp for @ss.  The effective css is
412  * defined as the matching css of the nearest ancestor including self which
413  * has @ss enabled.  If @ss is not mounted on the hierarchy @cgrp is on,
414  * the root css is returned, so this function always returns a valid css.
415  * The returned css must be put using css_put().
416  */
417 struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgrp,
418 					     struct cgroup_subsys *ss)
419 {
420 	struct cgroup_subsys_state *css;
421 
422 	rcu_read_lock();
423 
424 	do {
425 		css = cgroup_css(cgrp, ss);
426 
427 		if (css && css_tryget_online(css))
428 			goto out_unlock;
429 		cgrp = cgroup_parent(cgrp);
430 	} while (cgrp);
431 
432 	css = init_css_set.subsys[ss->id];
433 	css_get(css);
434 out_unlock:
435 	rcu_read_unlock();
436 	return css;
437 }
438 
439 static void __maybe_unused cgroup_get(struct cgroup *cgrp)
440 {
441 	css_get(&cgrp->self);
442 }
443 
444 static void cgroup_get_live(struct cgroup *cgrp)
445 {
446 	WARN_ON_ONCE(cgroup_is_dead(cgrp));
447 	css_get(&cgrp->self);
448 }
449 
450 static bool cgroup_tryget(struct cgroup *cgrp)
451 {
452 	return css_tryget(&cgrp->self);
453 }
454 
455 struct cgroup_subsys_state *of_css(struct kernfs_open_file *of)
456 {
457 	struct cgroup *cgrp = of->kn->parent->priv;
458 	struct cftype *cft = of_cft(of);
459 
460 	/*
461 	 * This is open and unprotected implementation of cgroup_css().
462 	 * seq_css() is only called from a kernfs file operation which has
463 	 * an active reference on the file.  Because all the subsystem
464 	 * files are drained before a css is disassociated with a cgroup,
465 	 * the matching css from the cgroup's subsys table is guaranteed to
466 	 * be and stay valid until the enclosing operation is complete.
467 	 */
468 	if (cft->ss)
469 		return rcu_dereference_raw(cgrp->subsys[cft->ss->id]);
470 	else
471 		return &cgrp->self;
472 }
473 EXPORT_SYMBOL_GPL(of_css);
474 
475 /**
476  * for_each_css - iterate all css's of a cgroup
477  * @css: the iteration cursor
478  * @ssid: the index of the subsystem, CGROUP_SUBSYS_COUNT after reaching the end
479  * @cgrp: the target cgroup to iterate css's of
480  *
481  * Should be called under cgroup_[tree_]mutex.
482  */
483 #define for_each_css(css, ssid, cgrp)					\
484 	for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++)	\
485 		if (!((css) = rcu_dereference_check(			\
486 				(cgrp)->subsys[(ssid)],			\
487 				lockdep_is_held(&cgroup_mutex)))) { }	\
488 		else
489 
490 /**
491  * for_each_e_css - iterate all effective css's of a cgroup
492  * @css: the iteration cursor
493  * @ssid: the index of the subsystem, CGROUP_SUBSYS_COUNT after reaching the end
494  * @cgrp: the target cgroup to iterate css's of
495  *
496  * Should be called under cgroup_[tree_]mutex.
497  */
498 #define for_each_e_css(css, ssid, cgrp)					\
499 	for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++)	\
500 		if (!((css) = cgroup_e_css(cgrp, cgroup_subsys[(ssid)]))) \
501 			;						\
502 		else
503 
504 /**
505  * do_each_subsys_mask - filter for_each_subsys with a bitmask
506  * @ss: the iteration cursor
507  * @ssid: the index of @ss, CGROUP_SUBSYS_COUNT after reaching the end
508  * @ss_mask: the bitmask
509  *
510  * The block will only run for cases where the ssid-th bit (1 << ssid) of
511  * @ss_mask is set.
512  */
513 #define do_each_subsys_mask(ss, ssid, ss_mask) do {			\
514 	unsigned long __ss_mask = (ss_mask);				\
515 	if (!CGROUP_SUBSYS_COUNT) { /* to avoid spurious gcc warning */	\
516 		(ssid) = 0;						\
517 		break;							\
518 	}								\
519 	for_each_set_bit(ssid, &__ss_mask, CGROUP_SUBSYS_COUNT) {	\
520 		(ss) = cgroup_subsys[ssid];				\
521 		{
522 
523 #define while_each_subsys_mask()					\
524 		}							\
525 	}								\
526 } while (false)
527 
528 /* iterate over child cgrps, lock should be held throughout iteration */
529 #define cgroup_for_each_live_child(child, cgrp)				\
530 	list_for_each_entry((child), &(cgrp)->self.children, self.sibling) \
531 		if (({ lockdep_assert_held(&cgroup_mutex);		\
532 		       cgroup_is_dead(child); }))			\
533 			;						\
534 		else
535 
536 /* walk live descendants in preorder */
537 #define cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp)		\
538 	css_for_each_descendant_pre((d_css), cgroup_css((cgrp), NULL))	\
539 		if (({ lockdep_assert_held(&cgroup_mutex);		\
540 		       (dsct) = (d_css)->cgroup;			\
541 		       cgroup_is_dead(dsct); }))			\
542 			;						\
543 		else
544 
545 /* walk live descendants in postorder */
546 #define cgroup_for_each_live_descendant_post(dsct, d_css, cgrp)		\
547 	css_for_each_descendant_post((d_css), cgroup_css((cgrp), NULL))	\
548 		if (({ lockdep_assert_held(&cgroup_mutex);		\
549 		       (dsct) = (d_css)->cgroup;			\
550 		       cgroup_is_dead(dsct); }))			\
551 			;						\
552 		else
553 
554 /*
555  * The default css_set - used by init and its children prior to any
556  * hierarchies being mounted. It contains a pointer to the root state
557  * for each subsystem. Also used to anchor the list of css_sets. Not
558  * reference-counted, to improve performance when child cgroups
559  * haven't been created.
560  */
561 struct css_set init_css_set = {
562 	.refcount		= REFCOUNT_INIT(1),
563 	.tasks			= LIST_HEAD_INIT(init_css_set.tasks),
564 	.mg_tasks		= LIST_HEAD_INIT(init_css_set.mg_tasks),
565 	.task_iters		= LIST_HEAD_INIT(init_css_set.task_iters),
566 	.cgrp_links		= LIST_HEAD_INIT(init_css_set.cgrp_links),
567 	.mg_preload_node	= LIST_HEAD_INIT(init_css_set.mg_preload_node),
568 	.mg_node		= LIST_HEAD_INIT(init_css_set.mg_node),
569 };
570 
571 static int css_set_count	= 1;	/* 1 for init_css_set */
572 
573 /**
574  * css_set_populated - does a css_set contain any tasks?
575  * @cset: target css_set
576  *
577  * css_set_populated() should be the same as !!cset->nr_tasks at steady
578  * state. However, css_set_populated() can be called while a task is being
579  * added to or removed from the linked list before the nr_tasks is
580  * properly updated. Hence, we can't just look at ->nr_tasks here.
581  */
582 static bool css_set_populated(struct css_set *cset)
583 {
584 	lockdep_assert_held(&css_set_lock);
585 
586 	return !list_empty(&cset->tasks) || !list_empty(&cset->mg_tasks);
587 }
588 
589 /**
590  * cgroup_update_populated - updated populated count of a cgroup
591  * @cgrp: the target cgroup
592  * @populated: inc or dec populated count
593  *
594  * One of the css_sets associated with @cgrp is either getting its first
595  * task or losing the last.  Update @cgrp->populated_cnt accordingly.  The
596  * count is propagated towards root so that a given cgroup's populated_cnt
597  * is zero iff the cgroup and all its descendants don't contain any tasks.
598  *
599  * @cgrp's interface file "cgroup.populated" is zero if
600  * @cgrp->populated_cnt is zero and 1 otherwise.  When @cgrp->populated_cnt
601  * changes from or to zero, userland is notified that the content of the
602  * interface file has changed.  This can be used to detect when @cgrp and
603  * its descendants become populated or empty.
604  */
605 static void cgroup_update_populated(struct cgroup *cgrp, bool populated)
606 {
607 	lockdep_assert_held(&css_set_lock);
608 
609 	do {
610 		bool trigger;
611 
612 		if (populated)
613 			trigger = !cgrp->populated_cnt++;
614 		else
615 			trigger = !--cgrp->populated_cnt;
616 
617 		if (!trigger)
618 			break;
619 
620 		cgroup1_check_for_release(cgrp);
621 		cgroup_file_notify(&cgrp->events_file);
622 
623 		cgrp = cgroup_parent(cgrp);
624 	} while (cgrp);
625 }
626 
627 /**
628  * css_set_update_populated - update populated state of a css_set
629  * @cset: target css_set
630  * @populated: whether @cset is populated or depopulated
631  *
632  * @cset is either getting the first task or losing the last.  Update the
633  * ->populated_cnt of all associated cgroups accordingly.
634  */
635 static void css_set_update_populated(struct css_set *cset, bool populated)
636 {
637 	struct cgrp_cset_link *link;
638 
639 	lockdep_assert_held(&css_set_lock);
640 
641 	list_for_each_entry(link, &cset->cgrp_links, cgrp_link)
642 		cgroup_update_populated(link->cgrp, populated);
643 }
644 
645 /**
646  * css_set_move_task - move a task from one css_set to another
647  * @task: task being moved
648  * @from_cset: css_set @task currently belongs to (may be NULL)
649  * @to_cset: new css_set @task is being moved to (may be NULL)
650  * @use_mg_tasks: move to @to_cset->mg_tasks instead of ->tasks
651  *
652  * Move @task from @from_cset to @to_cset.  If @task didn't belong to any
653  * css_set, @from_cset can be NULL.  If @task is being disassociated
654  * instead of moved, @to_cset can be NULL.
655  *
656  * This function automatically handles populated_cnt updates and
657  * css_task_iter adjustments but the caller is responsible for managing
658  * @from_cset and @to_cset's reference counts.
659  */
660 static void css_set_move_task(struct task_struct *task,
661 			      struct css_set *from_cset, struct css_set *to_cset,
662 			      bool use_mg_tasks)
663 {
664 	lockdep_assert_held(&css_set_lock);
665 
666 	if (to_cset && !css_set_populated(to_cset))
667 		css_set_update_populated(to_cset, true);
668 
669 	if (from_cset) {
670 		struct css_task_iter *it, *pos;
671 
672 		WARN_ON_ONCE(list_empty(&task->cg_list));
673 
674 		/*
675 		 * @task is leaving, advance task iterators which are
676 		 * pointing to it so that they can resume at the next
677 		 * position.  Advancing an iterator might remove it from
678 		 * the list, use safe walk.  See css_task_iter_advance*()
679 		 * for details.
680 		 */
681 		list_for_each_entry_safe(it, pos, &from_cset->task_iters,
682 					 iters_node)
683 			if (it->task_pos == &task->cg_list)
684 				css_task_iter_advance(it);
685 
686 		list_del_init(&task->cg_list);
687 		if (!css_set_populated(from_cset))
688 			css_set_update_populated(from_cset, false);
689 	} else {
690 		WARN_ON_ONCE(!list_empty(&task->cg_list));
691 	}
692 
693 	if (to_cset) {
694 		/*
695 		 * We are synchronized through cgroup_threadgroup_rwsem
696 		 * against PF_EXITING setting such that we can't race
697 		 * against cgroup_exit() changing the css_set to
698 		 * init_css_set and dropping the old one.
699 		 */
700 		WARN_ON_ONCE(task->flags & PF_EXITING);
701 
702 		rcu_assign_pointer(task->cgroups, to_cset);
703 		list_add_tail(&task->cg_list, use_mg_tasks ? &to_cset->mg_tasks :
704 							     &to_cset->tasks);
705 	}
706 }
707 
708 /*
709  * hash table for cgroup groups. This improves the performance to find
710  * an existing css_set. This hash doesn't (currently) take into
711  * account cgroups in empty hierarchies.
712  */
713 #define CSS_SET_HASH_BITS	7
714 static DEFINE_HASHTABLE(css_set_table, CSS_SET_HASH_BITS);
715 
716 static unsigned long css_set_hash(struct cgroup_subsys_state *css[])
717 {
718 	unsigned long key = 0UL;
719 	struct cgroup_subsys *ss;
720 	int i;
721 
722 	for_each_subsys(ss, i)
723 		key += (unsigned long)css[i];
724 	key = (key >> 16) ^ key;
725 
726 	return key;
727 }
728 
729 void put_css_set_locked(struct css_set *cset)
730 {
731 	struct cgrp_cset_link *link, *tmp_link;
732 	struct cgroup_subsys *ss;
733 	int ssid;
734 
735 	lockdep_assert_held(&css_set_lock);
736 
737 	if (!refcount_dec_and_test(&cset->refcount))
738 		return;
739 
740 	/* This css_set is dead. unlink it and release cgroup and css refs */
741 	for_each_subsys(ss, ssid) {
742 		list_del(&cset->e_cset_node[ssid]);
743 		css_put(cset->subsys[ssid]);
744 	}
745 	hash_del(&cset->hlist);
746 	css_set_count--;
747 
748 	list_for_each_entry_safe(link, tmp_link, &cset->cgrp_links, cgrp_link) {
749 		list_del(&link->cset_link);
750 		list_del(&link->cgrp_link);
751 		if (cgroup_parent(link->cgrp))
752 			cgroup_put(link->cgrp);
753 		kfree(link);
754 	}
755 
756 	kfree_rcu(cset, rcu_head);
757 }
758 
759 /**
760  * compare_css_sets - helper function for find_existing_css_set().
761  * @cset: candidate css_set being tested
762  * @old_cset: existing css_set for a task
763  * @new_cgrp: cgroup that's being entered by the task
764  * @template: desired set of css pointers in css_set (pre-calculated)
765  *
766  * Returns true if "cset" matches "old_cset" except for the hierarchy
767  * which "new_cgrp" belongs to, for which it should match "new_cgrp".
768  */
769 static bool compare_css_sets(struct css_set *cset,
770 			     struct css_set *old_cset,
771 			     struct cgroup *new_cgrp,
772 			     struct cgroup_subsys_state *template[])
773 {
774 	struct list_head *l1, *l2;
775 
776 	/*
777 	 * On the default hierarchy, there can be csets which are
778 	 * associated with the same set of cgroups but different csses.
779 	 * Let's first ensure that csses match.
780 	 */
781 	if (memcmp(template, cset->subsys, sizeof(cset->subsys)))
782 		return false;
783 
784 	/*
785 	 * Compare cgroup pointers in order to distinguish between
786 	 * different cgroups in hierarchies.  As different cgroups may
787 	 * share the same effective css, this comparison is always
788 	 * necessary.
789 	 */
790 	l1 = &cset->cgrp_links;
791 	l2 = &old_cset->cgrp_links;
792 	while (1) {
793 		struct cgrp_cset_link *link1, *link2;
794 		struct cgroup *cgrp1, *cgrp2;
795 
796 		l1 = l1->next;
797 		l2 = l2->next;
798 		/* See if we reached the end - both lists are equal length. */
799 		if (l1 == &cset->cgrp_links) {
800 			BUG_ON(l2 != &old_cset->cgrp_links);
801 			break;
802 		} else {
803 			BUG_ON(l2 == &old_cset->cgrp_links);
804 		}
805 		/* Locate the cgroups associated with these links. */
806 		link1 = list_entry(l1, struct cgrp_cset_link, cgrp_link);
807 		link2 = list_entry(l2, struct cgrp_cset_link, cgrp_link);
808 		cgrp1 = link1->cgrp;
809 		cgrp2 = link2->cgrp;
810 		/* Hierarchies should be linked in the same order. */
811 		BUG_ON(cgrp1->root != cgrp2->root);
812 
813 		/*
814 		 * If this hierarchy is the hierarchy of the cgroup
815 		 * that's changing, then we need to check that this
816 		 * css_set points to the new cgroup; if it's any other
817 		 * hierarchy, then this css_set should point to the
818 		 * same cgroup as the old css_set.
819 		 */
820 		if (cgrp1->root == new_cgrp->root) {
821 			if (cgrp1 != new_cgrp)
822 				return false;
823 		} else {
824 			if (cgrp1 != cgrp2)
825 				return false;
826 		}
827 	}
828 	return true;
829 }
830 
831 /**
832  * find_existing_css_set - init css array and find the matching css_set
833  * @old_cset: the css_set that we're using before the cgroup transition
834  * @cgrp: the cgroup that we're moving into
835  * @template: out param for the new set of csses, should be clear on entry
836  */
837 static struct css_set *find_existing_css_set(struct css_set *old_cset,
838 					struct cgroup *cgrp,
839 					struct cgroup_subsys_state *template[])
840 {
841 	struct cgroup_root *root = cgrp->root;
842 	struct cgroup_subsys *ss;
843 	struct css_set *cset;
844 	unsigned long key;
845 	int i;
846 
847 	/*
848 	 * Build the set of subsystem state objects that we want to see in the
849 	 * new css_set. while subsystems can change globally, the entries here
850 	 * won't change, so no need for locking.
851 	 */
852 	for_each_subsys(ss, i) {
853 		if (root->subsys_mask & (1UL << i)) {
854 			/*
855 			 * @ss is in this hierarchy, so we want the
856 			 * effective css from @cgrp.
857 			 */
858 			template[i] = cgroup_e_css(cgrp, ss);
859 		} else {
860 			/*
861 			 * @ss is not in this hierarchy, so we don't want
862 			 * to change the css.
863 			 */
864 			template[i] = old_cset->subsys[i];
865 		}
866 	}
867 
868 	key = css_set_hash(template);
869 	hash_for_each_possible(css_set_table, cset, hlist, key) {
870 		if (!compare_css_sets(cset, old_cset, cgrp, template))
871 			continue;
872 
873 		/* This css_set matches what we need */
874 		return cset;
875 	}
876 
877 	/* No existing cgroup group matched */
878 	return NULL;
879 }
880 
881 static void free_cgrp_cset_links(struct list_head *links_to_free)
882 {
883 	struct cgrp_cset_link *link, *tmp_link;
884 
885 	list_for_each_entry_safe(link, tmp_link, links_to_free, cset_link) {
886 		list_del(&link->cset_link);
887 		kfree(link);
888 	}
889 }
890 
891 /**
892  * allocate_cgrp_cset_links - allocate cgrp_cset_links
893  * @count: the number of links to allocate
894  * @tmp_links: list_head the allocated links are put on
895  *
896  * Allocate @count cgrp_cset_link structures and chain them on @tmp_links
897  * through ->cset_link.  Returns 0 on success or -errno.
898  */
899 static int allocate_cgrp_cset_links(int count, struct list_head *tmp_links)
900 {
901 	struct cgrp_cset_link *link;
902 	int i;
903 
904 	INIT_LIST_HEAD(tmp_links);
905 
906 	for (i = 0; i < count; i++) {
907 		link = kzalloc(sizeof(*link), GFP_KERNEL);
908 		if (!link) {
909 			free_cgrp_cset_links(tmp_links);
910 			return -ENOMEM;
911 		}
912 		list_add(&link->cset_link, tmp_links);
913 	}
914 	return 0;
915 }
916 
917 /**
918  * link_css_set - a helper function to link a css_set to a cgroup
919  * @tmp_links: cgrp_cset_link objects allocated by allocate_cgrp_cset_links()
920  * @cset: the css_set to be linked
921  * @cgrp: the destination cgroup
922  */
923 static void link_css_set(struct list_head *tmp_links, struct css_set *cset,
924 			 struct cgroup *cgrp)
925 {
926 	struct cgrp_cset_link *link;
927 
928 	BUG_ON(list_empty(tmp_links));
929 
930 	if (cgroup_on_dfl(cgrp))
931 		cset->dfl_cgrp = cgrp;
932 
933 	link = list_first_entry(tmp_links, struct cgrp_cset_link, cset_link);
934 	link->cset = cset;
935 	link->cgrp = cgrp;
936 
937 	/*
938 	 * Always add links to the tail of the lists so that the lists are
939 	 * in choronological order.
940 	 */
941 	list_move_tail(&link->cset_link, &cgrp->cset_links);
942 	list_add_tail(&link->cgrp_link, &cset->cgrp_links);
943 
944 	if (cgroup_parent(cgrp))
945 		cgroup_get_live(cgrp);
946 }
947 
948 /**
949  * find_css_set - return a new css_set with one cgroup updated
950  * @old_cset: the baseline css_set
951  * @cgrp: the cgroup to be updated
952  *
953  * Return a new css_set that's equivalent to @old_cset, but with @cgrp
954  * substituted into the appropriate hierarchy.
955  */
956 static struct css_set *find_css_set(struct css_set *old_cset,
957 				    struct cgroup *cgrp)
958 {
959 	struct cgroup_subsys_state *template[CGROUP_SUBSYS_COUNT] = { };
960 	struct css_set *cset;
961 	struct list_head tmp_links;
962 	struct cgrp_cset_link *link;
963 	struct cgroup_subsys *ss;
964 	unsigned long key;
965 	int ssid;
966 
967 	lockdep_assert_held(&cgroup_mutex);
968 
969 	/* First see if we already have a cgroup group that matches
970 	 * the desired set */
971 	spin_lock_irq(&css_set_lock);
972 	cset = find_existing_css_set(old_cset, cgrp, template);
973 	if (cset)
974 		get_css_set(cset);
975 	spin_unlock_irq(&css_set_lock);
976 
977 	if (cset)
978 		return cset;
979 
980 	cset = kzalloc(sizeof(*cset), GFP_KERNEL);
981 	if (!cset)
982 		return NULL;
983 
984 	/* Allocate all the cgrp_cset_link objects that we'll need */
985 	if (allocate_cgrp_cset_links(cgroup_root_count, &tmp_links) < 0) {
986 		kfree(cset);
987 		return NULL;
988 	}
989 
990 	refcount_set(&cset->refcount, 1);
991 	INIT_LIST_HEAD(&cset->tasks);
992 	INIT_LIST_HEAD(&cset->mg_tasks);
993 	INIT_LIST_HEAD(&cset->task_iters);
994 	INIT_HLIST_NODE(&cset->hlist);
995 	INIT_LIST_HEAD(&cset->cgrp_links);
996 	INIT_LIST_HEAD(&cset->mg_preload_node);
997 	INIT_LIST_HEAD(&cset->mg_node);
998 
999 	/* Copy the set of subsystem state objects generated in
1000 	 * find_existing_css_set() */
1001 	memcpy(cset->subsys, template, sizeof(cset->subsys));
1002 
1003 	spin_lock_irq(&css_set_lock);
1004 	/* Add reference counts and links from the new css_set. */
1005 	list_for_each_entry(link, &old_cset->cgrp_links, cgrp_link) {
1006 		struct cgroup *c = link->cgrp;
1007 
1008 		if (c->root == cgrp->root)
1009 			c = cgrp;
1010 		link_css_set(&tmp_links, cset, c);
1011 	}
1012 
1013 	BUG_ON(!list_empty(&tmp_links));
1014 
1015 	css_set_count++;
1016 
1017 	/* Add @cset to the hash table */
1018 	key = css_set_hash(cset->subsys);
1019 	hash_add(css_set_table, &cset->hlist, key);
1020 
1021 	for_each_subsys(ss, ssid) {
1022 		struct cgroup_subsys_state *css = cset->subsys[ssid];
1023 
1024 		list_add_tail(&cset->e_cset_node[ssid],
1025 			      &css->cgroup->e_csets[ssid]);
1026 		css_get(css);
1027 	}
1028 
1029 	spin_unlock_irq(&css_set_lock);
1030 
1031 	return cset;
1032 }
1033 
1034 struct cgroup_root *cgroup_root_from_kf(struct kernfs_root *kf_root)
1035 {
1036 	struct cgroup *root_cgrp = kf_root->kn->priv;
1037 
1038 	return root_cgrp->root;
1039 }
1040 
1041 static int cgroup_init_root_id(struct cgroup_root *root)
1042 {
1043 	int id;
1044 
1045 	lockdep_assert_held(&cgroup_mutex);
1046 
1047 	id = idr_alloc_cyclic(&cgroup_hierarchy_idr, root, 0, 0, GFP_KERNEL);
1048 	if (id < 0)
1049 		return id;
1050 
1051 	root->hierarchy_id = id;
1052 	return 0;
1053 }
1054 
1055 static void cgroup_exit_root_id(struct cgroup_root *root)
1056 {
1057 	lockdep_assert_held(&cgroup_mutex);
1058 
1059 	idr_remove(&cgroup_hierarchy_idr, root->hierarchy_id);
1060 }
1061 
1062 void cgroup_free_root(struct cgroup_root *root)
1063 {
1064 	if (root) {
1065 		idr_destroy(&root->cgroup_idr);
1066 		kfree(root);
1067 	}
1068 }
1069 
1070 static void cgroup_destroy_root(struct cgroup_root *root)
1071 {
1072 	struct cgroup *cgrp = &root->cgrp;
1073 	struct cgrp_cset_link *link, *tmp_link;
1074 
1075 	trace_cgroup_destroy_root(root);
1076 
1077 	cgroup_lock_and_drain_offline(&cgrp_dfl_root.cgrp);
1078 
1079 	BUG_ON(atomic_read(&root->nr_cgrps));
1080 	BUG_ON(!list_empty(&cgrp->self.children));
1081 
1082 	/* Rebind all subsystems back to the default hierarchy */
1083 	WARN_ON(rebind_subsystems(&cgrp_dfl_root, root->subsys_mask));
1084 
1085 	/*
1086 	 * Release all the links from cset_links to this hierarchy's
1087 	 * root cgroup
1088 	 */
1089 	spin_lock_irq(&css_set_lock);
1090 
1091 	list_for_each_entry_safe(link, tmp_link, &cgrp->cset_links, cset_link) {
1092 		list_del(&link->cset_link);
1093 		list_del(&link->cgrp_link);
1094 		kfree(link);
1095 	}
1096 
1097 	spin_unlock_irq(&css_set_lock);
1098 
1099 	if (!list_empty(&root->root_list)) {
1100 		list_del(&root->root_list);
1101 		cgroup_root_count--;
1102 	}
1103 
1104 	cgroup_exit_root_id(root);
1105 
1106 	mutex_unlock(&cgroup_mutex);
1107 
1108 	kernfs_destroy_root(root->kf_root);
1109 	cgroup_free_root(root);
1110 }
1111 
1112 /*
1113  * look up cgroup associated with current task's cgroup namespace on the
1114  * specified hierarchy
1115  */
1116 static struct cgroup *
1117 current_cgns_cgroup_from_root(struct cgroup_root *root)
1118 {
1119 	struct cgroup *res = NULL;
1120 	struct css_set *cset;
1121 
1122 	lockdep_assert_held(&css_set_lock);
1123 
1124 	rcu_read_lock();
1125 
1126 	cset = current->nsproxy->cgroup_ns->root_cset;
1127 	if (cset == &init_css_set) {
1128 		res = &root->cgrp;
1129 	} else {
1130 		struct cgrp_cset_link *link;
1131 
1132 		list_for_each_entry(link, &cset->cgrp_links, cgrp_link) {
1133 			struct cgroup *c = link->cgrp;
1134 
1135 			if (c->root == root) {
1136 				res = c;
1137 				break;
1138 			}
1139 		}
1140 	}
1141 	rcu_read_unlock();
1142 
1143 	BUG_ON(!res);
1144 	return res;
1145 }
1146 
1147 /* look up cgroup associated with given css_set on the specified hierarchy */
1148 static struct cgroup *cset_cgroup_from_root(struct css_set *cset,
1149 					    struct cgroup_root *root)
1150 {
1151 	struct cgroup *res = NULL;
1152 
1153 	lockdep_assert_held(&cgroup_mutex);
1154 	lockdep_assert_held(&css_set_lock);
1155 
1156 	if (cset == &init_css_set) {
1157 		res = &root->cgrp;
1158 	} else {
1159 		struct cgrp_cset_link *link;
1160 
1161 		list_for_each_entry(link, &cset->cgrp_links, cgrp_link) {
1162 			struct cgroup *c = link->cgrp;
1163 
1164 			if (c->root == root) {
1165 				res = c;
1166 				break;
1167 			}
1168 		}
1169 	}
1170 
1171 	BUG_ON(!res);
1172 	return res;
1173 }
1174 
1175 /*
1176  * Return the cgroup for "task" from the given hierarchy. Must be
1177  * called with cgroup_mutex and css_set_lock held.
1178  */
1179 struct cgroup *task_cgroup_from_root(struct task_struct *task,
1180 				     struct cgroup_root *root)
1181 {
1182 	/*
1183 	 * No need to lock the task - since we hold cgroup_mutex the
1184 	 * task can't change groups, so the only thing that can happen
1185 	 * is that it exits and its css is set back to init_css_set.
1186 	 */
1187 	return cset_cgroup_from_root(task_css_set(task), root);
1188 }
1189 
1190 /*
1191  * A task must hold cgroup_mutex to modify cgroups.
1192  *
1193  * Any task can increment and decrement the count field without lock.
1194  * So in general, code holding cgroup_mutex can't rely on the count
1195  * field not changing.  However, if the count goes to zero, then only
1196  * cgroup_attach_task() can increment it again.  Because a count of zero
1197  * means that no tasks are currently attached, therefore there is no
1198  * way a task attached to that cgroup can fork (the other way to
1199  * increment the count).  So code holding cgroup_mutex can safely
1200  * assume that if the count is zero, it will stay zero. Similarly, if
1201  * a task holds cgroup_mutex on a cgroup with zero count, it
1202  * knows that the cgroup won't be removed, as cgroup_rmdir()
1203  * needs that mutex.
1204  *
1205  * A cgroup can only be deleted if both its 'count' of using tasks
1206  * is zero, and its list of 'children' cgroups is empty.  Since all
1207  * tasks in the system use _some_ cgroup, and since there is always at
1208  * least one task in the system (init, pid == 1), therefore, root cgroup
1209  * always has either children cgroups and/or using tasks.  So we don't
1210  * need a special hack to ensure that root cgroup cannot be deleted.
1211  *
1212  * P.S.  One more locking exception.  RCU is used to guard the
1213  * update of a tasks cgroup pointer by cgroup_attach_task()
1214  */
1215 
1216 static struct kernfs_syscall_ops cgroup_kf_syscall_ops;
1217 
1218 static char *cgroup_file_name(struct cgroup *cgrp, const struct cftype *cft,
1219 			      char *buf)
1220 {
1221 	struct cgroup_subsys *ss = cft->ss;
1222 
1223 	if (cft->ss && !(cft->flags & CFTYPE_NO_PREFIX) &&
1224 	    !(cgrp->root->flags & CGRP_ROOT_NOPREFIX))
1225 		snprintf(buf, CGROUP_FILE_NAME_MAX, "%s.%s",
1226 			 cgroup_on_dfl(cgrp) ? ss->name : ss->legacy_name,
1227 			 cft->name);
1228 	else
1229 		strncpy(buf, cft->name, CGROUP_FILE_NAME_MAX);
1230 	return buf;
1231 }
1232 
1233 /**
1234  * cgroup_file_mode - deduce file mode of a control file
1235  * @cft: the control file in question
1236  *
1237  * S_IRUGO for read, S_IWUSR for write.
1238  */
1239 static umode_t cgroup_file_mode(const struct cftype *cft)
1240 {
1241 	umode_t mode = 0;
1242 
1243 	if (cft->read_u64 || cft->read_s64 || cft->seq_show)
1244 		mode |= S_IRUGO;
1245 
1246 	if (cft->write_u64 || cft->write_s64 || cft->write) {
1247 		if (cft->flags & CFTYPE_WORLD_WRITABLE)
1248 			mode |= S_IWUGO;
1249 		else
1250 			mode |= S_IWUSR;
1251 	}
1252 
1253 	return mode;
1254 }
1255 
1256 /**
1257  * cgroup_calc_subtree_ss_mask - calculate subtree_ss_mask
1258  * @subtree_control: the new subtree_control mask to consider
1259  * @this_ss_mask: available subsystems
1260  *
1261  * On the default hierarchy, a subsystem may request other subsystems to be
1262  * enabled together through its ->depends_on mask.  In such cases, more
1263  * subsystems than specified in "cgroup.subtree_control" may be enabled.
1264  *
1265  * This function calculates which subsystems need to be enabled if
1266  * @subtree_control is to be applied while restricted to @this_ss_mask.
1267  */
1268 static u16 cgroup_calc_subtree_ss_mask(u16 subtree_control, u16 this_ss_mask)
1269 {
1270 	u16 cur_ss_mask = subtree_control;
1271 	struct cgroup_subsys *ss;
1272 	int ssid;
1273 
1274 	lockdep_assert_held(&cgroup_mutex);
1275 
1276 	cur_ss_mask |= cgrp_dfl_implicit_ss_mask;
1277 
1278 	while (true) {
1279 		u16 new_ss_mask = cur_ss_mask;
1280 
1281 		do_each_subsys_mask(ss, ssid, cur_ss_mask) {
1282 			new_ss_mask |= ss->depends_on;
1283 		} while_each_subsys_mask();
1284 
1285 		/*
1286 		 * Mask out subsystems which aren't available.  This can
1287 		 * happen only if some depended-upon subsystems were bound
1288 		 * to non-default hierarchies.
1289 		 */
1290 		new_ss_mask &= this_ss_mask;
1291 
1292 		if (new_ss_mask == cur_ss_mask)
1293 			break;
1294 		cur_ss_mask = new_ss_mask;
1295 	}
1296 
1297 	return cur_ss_mask;
1298 }
1299 
1300 /**
1301  * cgroup_kn_unlock - unlocking helper for cgroup kernfs methods
1302  * @kn: the kernfs_node being serviced
1303  *
1304  * This helper undoes cgroup_kn_lock_live() and should be invoked before
1305  * the method finishes if locking succeeded.  Note that once this function
1306  * returns the cgroup returned by cgroup_kn_lock_live() may become
1307  * inaccessible any time.  If the caller intends to continue to access the
1308  * cgroup, it should pin it before invoking this function.
1309  */
1310 void cgroup_kn_unlock(struct kernfs_node *kn)
1311 {
1312 	struct cgroup *cgrp;
1313 
1314 	if (kernfs_type(kn) == KERNFS_DIR)
1315 		cgrp = kn->priv;
1316 	else
1317 		cgrp = kn->parent->priv;
1318 
1319 	mutex_unlock(&cgroup_mutex);
1320 
1321 	kernfs_unbreak_active_protection(kn);
1322 	cgroup_put(cgrp);
1323 }
1324 
1325 /**
1326  * cgroup_kn_lock_live - locking helper for cgroup kernfs methods
1327  * @kn: the kernfs_node being serviced
1328  * @drain_offline: perform offline draining on the cgroup
1329  *
1330  * This helper is to be used by a cgroup kernfs method currently servicing
1331  * @kn.  It breaks the active protection, performs cgroup locking and
1332  * verifies that the associated cgroup is alive.  Returns the cgroup if
1333  * alive; otherwise, %NULL.  A successful return should be undone by a
1334  * matching cgroup_kn_unlock() invocation.  If @drain_offline is %true, the
1335  * cgroup is drained of offlining csses before return.
1336  *
1337  * Any cgroup kernfs method implementation which requires locking the
1338  * associated cgroup should use this helper.  It avoids nesting cgroup
1339  * locking under kernfs active protection and allows all kernfs operations
1340  * including self-removal.
1341  */
1342 struct cgroup *cgroup_kn_lock_live(struct kernfs_node *kn, bool drain_offline)
1343 {
1344 	struct cgroup *cgrp;
1345 
1346 	if (kernfs_type(kn) == KERNFS_DIR)
1347 		cgrp = kn->priv;
1348 	else
1349 		cgrp = kn->parent->priv;
1350 
1351 	/*
1352 	 * We're gonna grab cgroup_mutex which nests outside kernfs
1353 	 * active_ref.  cgroup liveliness check alone provides enough
1354 	 * protection against removal.  Ensure @cgrp stays accessible and
1355 	 * break the active_ref protection.
1356 	 */
1357 	if (!cgroup_tryget(cgrp))
1358 		return NULL;
1359 	kernfs_break_active_protection(kn);
1360 
1361 	if (drain_offline)
1362 		cgroup_lock_and_drain_offline(cgrp);
1363 	else
1364 		mutex_lock(&cgroup_mutex);
1365 
1366 	if (!cgroup_is_dead(cgrp))
1367 		return cgrp;
1368 
1369 	cgroup_kn_unlock(kn);
1370 	return NULL;
1371 }
1372 
1373 static void cgroup_rm_file(struct cgroup *cgrp, const struct cftype *cft)
1374 {
1375 	char name[CGROUP_FILE_NAME_MAX];
1376 
1377 	lockdep_assert_held(&cgroup_mutex);
1378 
1379 	if (cft->file_offset) {
1380 		struct cgroup_subsys_state *css = cgroup_css(cgrp, cft->ss);
1381 		struct cgroup_file *cfile = (void *)css + cft->file_offset;
1382 
1383 		spin_lock_irq(&cgroup_file_kn_lock);
1384 		cfile->kn = NULL;
1385 		spin_unlock_irq(&cgroup_file_kn_lock);
1386 	}
1387 
1388 	kernfs_remove_by_name(cgrp->kn, cgroup_file_name(cgrp, cft, name));
1389 }
1390 
1391 /**
1392  * css_clear_dir - remove subsys files in a cgroup directory
1393  * @css: taget css
1394  */
1395 static void css_clear_dir(struct cgroup_subsys_state *css)
1396 {
1397 	struct cgroup *cgrp = css->cgroup;
1398 	struct cftype *cfts;
1399 
1400 	if (!(css->flags & CSS_VISIBLE))
1401 		return;
1402 
1403 	css->flags &= ~CSS_VISIBLE;
1404 
1405 	list_for_each_entry(cfts, &css->ss->cfts, node)
1406 		cgroup_addrm_files(css, cgrp, cfts, false);
1407 }
1408 
1409 /**
1410  * css_populate_dir - create subsys files in a cgroup directory
1411  * @css: target css
1412  *
1413  * On failure, no file is added.
1414  */
1415 static int css_populate_dir(struct cgroup_subsys_state *css)
1416 {
1417 	struct cgroup *cgrp = css->cgroup;
1418 	struct cftype *cfts, *failed_cfts;
1419 	int ret;
1420 
1421 	if ((css->flags & CSS_VISIBLE) || !cgrp->kn)
1422 		return 0;
1423 
1424 	if (!css->ss) {
1425 		if (cgroup_on_dfl(cgrp))
1426 			cfts = cgroup_base_files;
1427 		else
1428 			cfts = cgroup1_base_files;
1429 
1430 		return cgroup_addrm_files(&cgrp->self, cgrp, cfts, true);
1431 	}
1432 
1433 	list_for_each_entry(cfts, &css->ss->cfts, node) {
1434 		ret = cgroup_addrm_files(css, cgrp, cfts, true);
1435 		if (ret < 0) {
1436 			failed_cfts = cfts;
1437 			goto err;
1438 		}
1439 	}
1440 
1441 	css->flags |= CSS_VISIBLE;
1442 
1443 	return 0;
1444 err:
1445 	list_for_each_entry(cfts, &css->ss->cfts, node) {
1446 		if (cfts == failed_cfts)
1447 			break;
1448 		cgroup_addrm_files(css, cgrp, cfts, false);
1449 	}
1450 	return ret;
1451 }
1452 
1453 int rebind_subsystems(struct cgroup_root *dst_root, u16 ss_mask)
1454 {
1455 	struct cgroup *dcgrp = &dst_root->cgrp;
1456 	struct cgroup_subsys *ss;
1457 	int ssid, i, ret;
1458 
1459 	lockdep_assert_held(&cgroup_mutex);
1460 
1461 	do_each_subsys_mask(ss, ssid, ss_mask) {
1462 		/*
1463 		 * If @ss has non-root csses attached to it, can't move.
1464 		 * If @ss is an implicit controller, it is exempt from this
1465 		 * rule and can be stolen.
1466 		 */
1467 		if (css_next_child(NULL, cgroup_css(&ss->root->cgrp, ss)) &&
1468 		    !ss->implicit_on_dfl)
1469 			return -EBUSY;
1470 
1471 		/* can't move between two non-dummy roots either */
1472 		if (ss->root != &cgrp_dfl_root && dst_root != &cgrp_dfl_root)
1473 			return -EBUSY;
1474 	} while_each_subsys_mask();
1475 
1476 	do_each_subsys_mask(ss, ssid, ss_mask) {
1477 		struct cgroup_root *src_root = ss->root;
1478 		struct cgroup *scgrp = &src_root->cgrp;
1479 		struct cgroup_subsys_state *css = cgroup_css(scgrp, ss);
1480 		struct css_set *cset;
1481 
1482 		WARN_ON(!css || cgroup_css(dcgrp, ss));
1483 
1484 		/* disable from the source */
1485 		src_root->subsys_mask &= ~(1 << ssid);
1486 		WARN_ON(cgroup_apply_control(scgrp));
1487 		cgroup_finalize_control(scgrp, 0);
1488 
1489 		/* rebind */
1490 		RCU_INIT_POINTER(scgrp->subsys[ssid], NULL);
1491 		rcu_assign_pointer(dcgrp->subsys[ssid], css);
1492 		ss->root = dst_root;
1493 		css->cgroup = dcgrp;
1494 
1495 		spin_lock_irq(&css_set_lock);
1496 		hash_for_each(css_set_table, i, cset, hlist)
1497 			list_move_tail(&cset->e_cset_node[ss->id],
1498 				       &dcgrp->e_csets[ss->id]);
1499 		spin_unlock_irq(&css_set_lock);
1500 
1501 		/* default hierarchy doesn't enable controllers by default */
1502 		dst_root->subsys_mask |= 1 << ssid;
1503 		if (dst_root == &cgrp_dfl_root) {
1504 			static_branch_enable(cgroup_subsys_on_dfl_key[ssid]);
1505 		} else {
1506 			dcgrp->subtree_control |= 1 << ssid;
1507 			static_branch_disable(cgroup_subsys_on_dfl_key[ssid]);
1508 		}
1509 
1510 		ret = cgroup_apply_control(dcgrp);
1511 		if (ret)
1512 			pr_warn("partial failure to rebind %s controller (err=%d)\n",
1513 				ss->name, ret);
1514 
1515 		if (ss->bind)
1516 			ss->bind(css);
1517 	} while_each_subsys_mask();
1518 
1519 	kernfs_activate(dcgrp->kn);
1520 	return 0;
1521 }
1522 
1523 int cgroup_show_path(struct seq_file *sf, struct kernfs_node *kf_node,
1524 		     struct kernfs_root *kf_root)
1525 {
1526 	int len = 0;
1527 	char *buf = NULL;
1528 	struct cgroup_root *kf_cgroot = cgroup_root_from_kf(kf_root);
1529 	struct cgroup *ns_cgroup;
1530 
1531 	buf = kmalloc(PATH_MAX, GFP_KERNEL);
1532 	if (!buf)
1533 		return -ENOMEM;
1534 
1535 	spin_lock_irq(&css_set_lock);
1536 	ns_cgroup = current_cgns_cgroup_from_root(kf_cgroot);
1537 	len = kernfs_path_from_node(kf_node, ns_cgroup->kn, buf, PATH_MAX);
1538 	spin_unlock_irq(&css_set_lock);
1539 
1540 	if (len >= PATH_MAX)
1541 		len = -ERANGE;
1542 	else if (len > 0) {
1543 		seq_escape(sf, buf, " \t\n\\");
1544 		len = 0;
1545 	}
1546 	kfree(buf);
1547 	return len;
1548 }
1549 
1550 static int parse_cgroup_root_flags(char *data, unsigned int *root_flags)
1551 {
1552 	char *token;
1553 
1554 	*root_flags = 0;
1555 
1556 	if (!data)
1557 		return 0;
1558 
1559 	while ((token = strsep(&data, ",")) != NULL) {
1560 		if (!strcmp(token, "nsdelegate")) {
1561 			*root_flags |= CGRP_ROOT_NS_DELEGATE;
1562 			continue;
1563 		}
1564 
1565 		pr_err("cgroup2: unknown option \"%s\"\n", token);
1566 		return -EINVAL;
1567 	}
1568 
1569 	return 0;
1570 }
1571 
1572 static void apply_cgroup_root_flags(unsigned int root_flags)
1573 {
1574 	if (current->nsproxy->cgroup_ns == &init_cgroup_ns) {
1575 		if (root_flags & CGRP_ROOT_NS_DELEGATE)
1576 			cgrp_dfl_root.flags |= CGRP_ROOT_NS_DELEGATE;
1577 		else
1578 			cgrp_dfl_root.flags &= ~CGRP_ROOT_NS_DELEGATE;
1579 	}
1580 }
1581 
1582 static int cgroup_show_options(struct seq_file *seq, struct kernfs_root *kf_root)
1583 {
1584 	if (cgrp_dfl_root.flags & CGRP_ROOT_NS_DELEGATE)
1585 		seq_puts(seq, ",nsdelegate");
1586 	return 0;
1587 }
1588 
1589 static int cgroup_remount(struct kernfs_root *kf_root, int *flags, char *data)
1590 {
1591 	unsigned int root_flags;
1592 	int ret;
1593 
1594 	ret = parse_cgroup_root_flags(data, &root_flags);
1595 	if (ret)
1596 		return ret;
1597 
1598 	apply_cgroup_root_flags(root_flags);
1599 	return 0;
1600 }
1601 
1602 /*
1603  * To reduce the fork() overhead for systems that are not actually using
1604  * their cgroups capability, we don't maintain the lists running through
1605  * each css_set to its tasks until we see the list actually used - in other
1606  * words after the first mount.
1607  */
1608 static bool use_task_css_set_links __read_mostly;
1609 
1610 static void cgroup_enable_task_cg_lists(void)
1611 {
1612 	struct task_struct *p, *g;
1613 
1614 	spin_lock_irq(&css_set_lock);
1615 
1616 	if (use_task_css_set_links)
1617 		goto out_unlock;
1618 
1619 	use_task_css_set_links = true;
1620 
1621 	/*
1622 	 * We need tasklist_lock because RCU is not safe against
1623 	 * while_each_thread(). Besides, a forking task that has passed
1624 	 * cgroup_post_fork() without seeing use_task_css_set_links = 1
1625 	 * is not guaranteed to have its child immediately visible in the
1626 	 * tasklist if we walk through it with RCU.
1627 	 */
1628 	read_lock(&tasklist_lock);
1629 	do_each_thread(g, p) {
1630 		WARN_ON_ONCE(!list_empty(&p->cg_list) ||
1631 			     task_css_set(p) != &init_css_set);
1632 
1633 		/*
1634 		 * We should check if the process is exiting, otherwise
1635 		 * it will race with cgroup_exit() in that the list
1636 		 * entry won't be deleted though the process has exited.
1637 		 * Do it while holding siglock so that we don't end up
1638 		 * racing against cgroup_exit().
1639 		 *
1640 		 * Interrupts were already disabled while acquiring
1641 		 * the css_set_lock, so we do not need to disable it
1642 		 * again when acquiring the sighand->siglock here.
1643 		 */
1644 		spin_lock(&p->sighand->siglock);
1645 		if (!(p->flags & PF_EXITING)) {
1646 			struct css_set *cset = task_css_set(p);
1647 
1648 			if (!css_set_populated(cset))
1649 				css_set_update_populated(cset, true);
1650 			list_add_tail(&p->cg_list, &cset->tasks);
1651 			get_css_set(cset);
1652 			cset->nr_tasks++;
1653 		}
1654 		spin_unlock(&p->sighand->siglock);
1655 	} while_each_thread(g, p);
1656 	read_unlock(&tasklist_lock);
1657 out_unlock:
1658 	spin_unlock_irq(&css_set_lock);
1659 }
1660 
1661 static void init_cgroup_housekeeping(struct cgroup *cgrp)
1662 {
1663 	struct cgroup_subsys *ss;
1664 	int ssid;
1665 
1666 	INIT_LIST_HEAD(&cgrp->self.sibling);
1667 	INIT_LIST_HEAD(&cgrp->self.children);
1668 	INIT_LIST_HEAD(&cgrp->cset_links);
1669 	INIT_LIST_HEAD(&cgrp->pidlists);
1670 	mutex_init(&cgrp->pidlist_mutex);
1671 	cgrp->self.cgroup = cgrp;
1672 	cgrp->self.flags |= CSS_ONLINE;
1673 
1674 	for_each_subsys(ss, ssid)
1675 		INIT_LIST_HEAD(&cgrp->e_csets[ssid]);
1676 
1677 	init_waitqueue_head(&cgrp->offline_waitq);
1678 	INIT_WORK(&cgrp->release_agent_work, cgroup1_release_agent);
1679 }
1680 
1681 void init_cgroup_root(struct cgroup_root *root, struct cgroup_sb_opts *opts)
1682 {
1683 	struct cgroup *cgrp = &root->cgrp;
1684 
1685 	INIT_LIST_HEAD(&root->root_list);
1686 	atomic_set(&root->nr_cgrps, 1);
1687 	cgrp->root = root;
1688 	init_cgroup_housekeeping(cgrp);
1689 	idr_init(&root->cgroup_idr);
1690 
1691 	root->flags = opts->flags;
1692 	if (opts->release_agent)
1693 		strcpy(root->release_agent_path, opts->release_agent);
1694 	if (opts->name)
1695 		strcpy(root->name, opts->name);
1696 	if (opts->cpuset_clone_children)
1697 		set_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags);
1698 }
1699 
1700 int cgroup_setup_root(struct cgroup_root *root, u16 ss_mask, int ref_flags)
1701 {
1702 	LIST_HEAD(tmp_links);
1703 	struct cgroup *root_cgrp = &root->cgrp;
1704 	struct kernfs_syscall_ops *kf_sops;
1705 	struct css_set *cset;
1706 	int i, ret;
1707 
1708 	lockdep_assert_held(&cgroup_mutex);
1709 
1710 	ret = cgroup_idr_alloc(&root->cgroup_idr, root_cgrp, 1, 2, GFP_KERNEL);
1711 	if (ret < 0)
1712 		goto out;
1713 	root_cgrp->id = ret;
1714 	root_cgrp->ancestor_ids[0] = ret;
1715 
1716 	ret = percpu_ref_init(&root_cgrp->self.refcnt, css_release,
1717 			      ref_flags, GFP_KERNEL);
1718 	if (ret)
1719 		goto out;
1720 
1721 	/*
1722 	 * We're accessing css_set_count without locking css_set_lock here,
1723 	 * but that's OK - it can only be increased by someone holding
1724 	 * cgroup_lock, and that's us.  Later rebinding may disable
1725 	 * controllers on the default hierarchy and thus create new csets,
1726 	 * which can't be more than the existing ones.  Allocate 2x.
1727 	 */
1728 	ret = allocate_cgrp_cset_links(2 * css_set_count, &tmp_links);
1729 	if (ret)
1730 		goto cancel_ref;
1731 
1732 	ret = cgroup_init_root_id(root);
1733 	if (ret)
1734 		goto cancel_ref;
1735 
1736 	kf_sops = root == &cgrp_dfl_root ?
1737 		&cgroup_kf_syscall_ops : &cgroup1_kf_syscall_ops;
1738 
1739 	root->kf_root = kernfs_create_root(kf_sops,
1740 					   KERNFS_ROOT_CREATE_DEACTIVATED,
1741 					   root_cgrp);
1742 	if (IS_ERR(root->kf_root)) {
1743 		ret = PTR_ERR(root->kf_root);
1744 		goto exit_root_id;
1745 	}
1746 	root_cgrp->kn = root->kf_root->kn;
1747 
1748 	ret = css_populate_dir(&root_cgrp->self);
1749 	if (ret)
1750 		goto destroy_root;
1751 
1752 	ret = rebind_subsystems(root, ss_mask);
1753 	if (ret)
1754 		goto destroy_root;
1755 
1756 	trace_cgroup_setup_root(root);
1757 
1758 	/*
1759 	 * There must be no failure case after here, since rebinding takes
1760 	 * care of subsystems' refcounts, which are explicitly dropped in
1761 	 * the failure exit path.
1762 	 */
1763 	list_add(&root->root_list, &cgroup_roots);
1764 	cgroup_root_count++;
1765 
1766 	/*
1767 	 * Link the root cgroup in this hierarchy into all the css_set
1768 	 * objects.
1769 	 */
1770 	spin_lock_irq(&css_set_lock);
1771 	hash_for_each(css_set_table, i, cset, hlist) {
1772 		link_css_set(&tmp_links, cset, root_cgrp);
1773 		if (css_set_populated(cset))
1774 			cgroup_update_populated(root_cgrp, true);
1775 	}
1776 	spin_unlock_irq(&css_set_lock);
1777 
1778 	BUG_ON(!list_empty(&root_cgrp->self.children));
1779 	BUG_ON(atomic_read(&root->nr_cgrps) != 1);
1780 
1781 	kernfs_activate(root_cgrp->kn);
1782 	ret = 0;
1783 	goto out;
1784 
1785 destroy_root:
1786 	kernfs_destroy_root(root->kf_root);
1787 	root->kf_root = NULL;
1788 exit_root_id:
1789 	cgroup_exit_root_id(root);
1790 cancel_ref:
1791 	percpu_ref_exit(&root_cgrp->self.refcnt);
1792 out:
1793 	free_cgrp_cset_links(&tmp_links);
1794 	return ret;
1795 }
1796 
1797 struct dentry *cgroup_do_mount(struct file_system_type *fs_type, int flags,
1798 			       struct cgroup_root *root, unsigned long magic,
1799 			       struct cgroup_namespace *ns)
1800 {
1801 	struct dentry *dentry;
1802 	bool new_sb;
1803 
1804 	dentry = kernfs_mount(fs_type, flags, root->kf_root, magic, &new_sb);
1805 
1806 	/*
1807 	 * In non-init cgroup namespace, instead of root cgroup's dentry,
1808 	 * we return the dentry corresponding to the cgroupns->root_cgrp.
1809 	 */
1810 	if (!IS_ERR(dentry) && ns != &init_cgroup_ns) {
1811 		struct dentry *nsdentry;
1812 		struct cgroup *cgrp;
1813 
1814 		mutex_lock(&cgroup_mutex);
1815 		spin_lock_irq(&css_set_lock);
1816 
1817 		cgrp = cset_cgroup_from_root(ns->root_cset, root);
1818 
1819 		spin_unlock_irq(&css_set_lock);
1820 		mutex_unlock(&cgroup_mutex);
1821 
1822 		nsdentry = kernfs_node_dentry(cgrp->kn, dentry->d_sb);
1823 		dput(dentry);
1824 		dentry = nsdentry;
1825 	}
1826 
1827 	if (IS_ERR(dentry) || !new_sb)
1828 		cgroup_put(&root->cgrp);
1829 
1830 	return dentry;
1831 }
1832 
1833 static struct dentry *cgroup_mount(struct file_system_type *fs_type,
1834 			 int flags, const char *unused_dev_name,
1835 			 void *data)
1836 {
1837 	struct cgroup_namespace *ns = current->nsproxy->cgroup_ns;
1838 	struct dentry *dentry;
1839 	int ret;
1840 
1841 	get_cgroup_ns(ns);
1842 
1843 	/* Check if the caller has permission to mount. */
1844 	if (!ns_capable(ns->user_ns, CAP_SYS_ADMIN)) {
1845 		put_cgroup_ns(ns);
1846 		return ERR_PTR(-EPERM);
1847 	}
1848 
1849 	/*
1850 	 * The first time anyone tries to mount a cgroup, enable the list
1851 	 * linking each css_set to its tasks and fix up all existing tasks.
1852 	 */
1853 	if (!use_task_css_set_links)
1854 		cgroup_enable_task_cg_lists();
1855 
1856 	if (fs_type == &cgroup2_fs_type) {
1857 		unsigned int root_flags;
1858 
1859 		ret = parse_cgroup_root_flags(data, &root_flags);
1860 		if (ret) {
1861 			put_cgroup_ns(ns);
1862 			return ERR_PTR(ret);
1863 		}
1864 
1865 		cgrp_dfl_visible = true;
1866 		cgroup_get_live(&cgrp_dfl_root.cgrp);
1867 
1868 		dentry = cgroup_do_mount(&cgroup2_fs_type, flags, &cgrp_dfl_root,
1869 					 CGROUP2_SUPER_MAGIC, ns);
1870 		if (!IS_ERR(dentry))
1871 			apply_cgroup_root_flags(root_flags);
1872 	} else {
1873 		dentry = cgroup1_mount(&cgroup_fs_type, flags, data,
1874 				       CGROUP_SUPER_MAGIC, ns);
1875 	}
1876 
1877 	put_cgroup_ns(ns);
1878 	return dentry;
1879 }
1880 
1881 static void cgroup_kill_sb(struct super_block *sb)
1882 {
1883 	struct kernfs_root *kf_root = kernfs_root_from_sb(sb);
1884 	struct cgroup_root *root = cgroup_root_from_kf(kf_root);
1885 
1886 	/*
1887 	 * If @root doesn't have any mounts or children, start killing it.
1888 	 * This prevents new mounts by disabling percpu_ref_tryget_live().
1889 	 * cgroup_mount() may wait for @root's release.
1890 	 *
1891 	 * And don't kill the default root.
1892 	 */
1893 	if (!list_empty(&root->cgrp.self.children) ||
1894 	    root == &cgrp_dfl_root)
1895 		cgroup_put(&root->cgrp);
1896 	else
1897 		percpu_ref_kill(&root->cgrp.self.refcnt);
1898 
1899 	kernfs_kill_sb(sb);
1900 }
1901 
1902 struct file_system_type cgroup_fs_type = {
1903 	.name = "cgroup",
1904 	.mount = cgroup_mount,
1905 	.kill_sb = cgroup_kill_sb,
1906 	.fs_flags = FS_USERNS_MOUNT,
1907 };
1908 
1909 static struct file_system_type cgroup2_fs_type = {
1910 	.name = "cgroup2",
1911 	.mount = cgroup_mount,
1912 	.kill_sb = cgroup_kill_sb,
1913 	.fs_flags = FS_USERNS_MOUNT,
1914 };
1915 
1916 int cgroup_path_ns_locked(struct cgroup *cgrp, char *buf, size_t buflen,
1917 			  struct cgroup_namespace *ns)
1918 {
1919 	struct cgroup *root = cset_cgroup_from_root(ns->root_cset, cgrp->root);
1920 
1921 	return kernfs_path_from_node(cgrp->kn, root->kn, buf, buflen);
1922 }
1923 
1924 int cgroup_path_ns(struct cgroup *cgrp, char *buf, size_t buflen,
1925 		   struct cgroup_namespace *ns)
1926 {
1927 	int ret;
1928 
1929 	mutex_lock(&cgroup_mutex);
1930 	spin_lock_irq(&css_set_lock);
1931 
1932 	ret = cgroup_path_ns_locked(cgrp, buf, buflen, ns);
1933 
1934 	spin_unlock_irq(&css_set_lock);
1935 	mutex_unlock(&cgroup_mutex);
1936 
1937 	return ret;
1938 }
1939 EXPORT_SYMBOL_GPL(cgroup_path_ns);
1940 
1941 /**
1942  * task_cgroup_path - cgroup path of a task in the first cgroup hierarchy
1943  * @task: target task
1944  * @buf: the buffer to write the path into
1945  * @buflen: the length of the buffer
1946  *
1947  * Determine @task's cgroup on the first (the one with the lowest non-zero
1948  * hierarchy_id) cgroup hierarchy and copy its path into @buf.  This
1949  * function grabs cgroup_mutex and shouldn't be used inside locks used by
1950  * cgroup controller callbacks.
1951  *
1952  * Return value is the same as kernfs_path().
1953  */
1954 int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen)
1955 {
1956 	struct cgroup_root *root;
1957 	struct cgroup *cgrp;
1958 	int hierarchy_id = 1;
1959 	int ret;
1960 
1961 	mutex_lock(&cgroup_mutex);
1962 	spin_lock_irq(&css_set_lock);
1963 
1964 	root = idr_get_next(&cgroup_hierarchy_idr, &hierarchy_id);
1965 
1966 	if (root) {
1967 		cgrp = task_cgroup_from_root(task, root);
1968 		ret = cgroup_path_ns_locked(cgrp, buf, buflen, &init_cgroup_ns);
1969 	} else {
1970 		/* if no hierarchy exists, everyone is in "/" */
1971 		ret = strlcpy(buf, "/", buflen);
1972 	}
1973 
1974 	spin_unlock_irq(&css_set_lock);
1975 	mutex_unlock(&cgroup_mutex);
1976 	return ret;
1977 }
1978 EXPORT_SYMBOL_GPL(task_cgroup_path);
1979 
1980 /**
1981  * cgroup_migrate_add_task - add a migration target task to a migration context
1982  * @task: target task
1983  * @mgctx: target migration context
1984  *
1985  * Add @task, which is a migration target, to @mgctx->tset.  This function
1986  * becomes noop if @task doesn't need to be migrated.  @task's css_set
1987  * should have been added as a migration source and @task->cg_list will be
1988  * moved from the css_set's tasks list to mg_tasks one.
1989  */
1990 static void cgroup_migrate_add_task(struct task_struct *task,
1991 				    struct cgroup_mgctx *mgctx)
1992 {
1993 	struct css_set *cset;
1994 
1995 	lockdep_assert_held(&css_set_lock);
1996 
1997 	/* @task either already exited or can't exit until the end */
1998 	if (task->flags & PF_EXITING)
1999 		return;
2000 
2001 	/* leave @task alone if post_fork() hasn't linked it yet */
2002 	if (list_empty(&task->cg_list))
2003 		return;
2004 
2005 	cset = task_css_set(task);
2006 	if (!cset->mg_src_cgrp)
2007 		return;
2008 
2009 	list_move_tail(&task->cg_list, &cset->mg_tasks);
2010 	if (list_empty(&cset->mg_node))
2011 		list_add_tail(&cset->mg_node,
2012 			      &mgctx->tset.src_csets);
2013 	if (list_empty(&cset->mg_dst_cset->mg_node))
2014 		list_add_tail(&cset->mg_dst_cset->mg_node,
2015 			      &mgctx->tset.dst_csets);
2016 }
2017 
2018 /**
2019  * cgroup_taskset_first - reset taskset and return the first task
2020  * @tset: taskset of interest
2021  * @dst_cssp: output variable for the destination css
2022  *
2023  * @tset iteration is initialized and the first task is returned.
2024  */
2025 struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset,
2026 					 struct cgroup_subsys_state **dst_cssp)
2027 {
2028 	tset->cur_cset = list_first_entry(tset->csets, struct css_set, mg_node);
2029 	tset->cur_task = NULL;
2030 
2031 	return cgroup_taskset_next(tset, dst_cssp);
2032 }
2033 
2034 /**
2035  * cgroup_taskset_next - iterate to the next task in taskset
2036  * @tset: taskset of interest
2037  * @dst_cssp: output variable for the destination css
2038  *
2039  * Return the next task in @tset.  Iteration must have been initialized
2040  * with cgroup_taskset_first().
2041  */
2042 struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset,
2043 					struct cgroup_subsys_state **dst_cssp)
2044 {
2045 	struct css_set *cset = tset->cur_cset;
2046 	struct task_struct *task = tset->cur_task;
2047 
2048 	while (&cset->mg_node != tset->csets) {
2049 		if (!task)
2050 			task = list_first_entry(&cset->mg_tasks,
2051 						struct task_struct, cg_list);
2052 		else
2053 			task = list_next_entry(task, cg_list);
2054 
2055 		if (&task->cg_list != &cset->mg_tasks) {
2056 			tset->cur_cset = cset;
2057 			tset->cur_task = task;
2058 
2059 			/*
2060 			 * This function may be called both before and
2061 			 * after cgroup_taskset_migrate().  The two cases
2062 			 * can be distinguished by looking at whether @cset
2063 			 * has its ->mg_dst_cset set.
2064 			 */
2065 			if (cset->mg_dst_cset)
2066 				*dst_cssp = cset->mg_dst_cset->subsys[tset->ssid];
2067 			else
2068 				*dst_cssp = cset->subsys[tset->ssid];
2069 
2070 			return task;
2071 		}
2072 
2073 		cset = list_next_entry(cset, mg_node);
2074 		task = NULL;
2075 	}
2076 
2077 	return NULL;
2078 }
2079 
2080 /**
2081  * cgroup_taskset_migrate - migrate a taskset
2082  * @mgctx: migration context
2083  *
2084  * Migrate tasks in @mgctx as setup by migration preparation functions.
2085  * This function fails iff one of the ->can_attach callbacks fails and
2086  * guarantees that either all or none of the tasks in @mgctx are migrated.
2087  * @mgctx is consumed regardless of success.
2088  */
2089 static int cgroup_migrate_execute(struct cgroup_mgctx *mgctx)
2090 {
2091 	struct cgroup_taskset *tset = &mgctx->tset;
2092 	struct cgroup_subsys *ss;
2093 	struct task_struct *task, *tmp_task;
2094 	struct css_set *cset, *tmp_cset;
2095 	int ssid, failed_ssid, ret;
2096 
2097 	/* methods shouldn't be called if no task is actually migrating */
2098 	if (list_empty(&tset->src_csets))
2099 		return 0;
2100 
2101 	/* check that we can legitimately attach to the cgroup */
2102 	do_each_subsys_mask(ss, ssid, mgctx->ss_mask) {
2103 		if (ss->can_attach) {
2104 			tset->ssid = ssid;
2105 			ret = ss->can_attach(tset);
2106 			if (ret) {
2107 				failed_ssid = ssid;
2108 				goto out_cancel_attach;
2109 			}
2110 		}
2111 	} while_each_subsys_mask();
2112 
2113 	/*
2114 	 * Now that we're guaranteed success, proceed to move all tasks to
2115 	 * the new cgroup.  There are no failure cases after here, so this
2116 	 * is the commit point.
2117 	 */
2118 	spin_lock_irq(&css_set_lock);
2119 	list_for_each_entry(cset, &tset->src_csets, mg_node) {
2120 		list_for_each_entry_safe(task, tmp_task, &cset->mg_tasks, cg_list) {
2121 			struct css_set *from_cset = task_css_set(task);
2122 			struct css_set *to_cset = cset->mg_dst_cset;
2123 
2124 			get_css_set(to_cset);
2125 			to_cset->nr_tasks++;
2126 			css_set_move_task(task, from_cset, to_cset, true);
2127 			put_css_set_locked(from_cset);
2128 			from_cset->nr_tasks--;
2129 		}
2130 	}
2131 	spin_unlock_irq(&css_set_lock);
2132 
2133 	/*
2134 	 * Migration is committed, all target tasks are now on dst_csets.
2135 	 * Nothing is sensitive to fork() after this point.  Notify
2136 	 * controllers that migration is complete.
2137 	 */
2138 	tset->csets = &tset->dst_csets;
2139 
2140 	do_each_subsys_mask(ss, ssid, mgctx->ss_mask) {
2141 		if (ss->attach) {
2142 			tset->ssid = ssid;
2143 			ss->attach(tset);
2144 		}
2145 	} while_each_subsys_mask();
2146 
2147 	ret = 0;
2148 	goto out_release_tset;
2149 
2150 out_cancel_attach:
2151 	do_each_subsys_mask(ss, ssid, mgctx->ss_mask) {
2152 		if (ssid == failed_ssid)
2153 			break;
2154 		if (ss->cancel_attach) {
2155 			tset->ssid = ssid;
2156 			ss->cancel_attach(tset);
2157 		}
2158 	} while_each_subsys_mask();
2159 out_release_tset:
2160 	spin_lock_irq(&css_set_lock);
2161 	list_splice_init(&tset->dst_csets, &tset->src_csets);
2162 	list_for_each_entry_safe(cset, tmp_cset, &tset->src_csets, mg_node) {
2163 		list_splice_tail_init(&cset->mg_tasks, &cset->tasks);
2164 		list_del_init(&cset->mg_node);
2165 	}
2166 	spin_unlock_irq(&css_set_lock);
2167 	return ret;
2168 }
2169 
2170 /**
2171  * cgroup_may_migrate_to - verify whether a cgroup can be migration destination
2172  * @dst_cgrp: destination cgroup to test
2173  *
2174  * On the default hierarchy, except for the root, subtree_control must be
2175  * zero for migration destination cgroups with tasks so that child cgroups
2176  * don't compete against tasks.
2177  */
2178 bool cgroup_may_migrate_to(struct cgroup *dst_cgrp)
2179 {
2180 	return !cgroup_on_dfl(dst_cgrp) || !cgroup_parent(dst_cgrp) ||
2181 		!dst_cgrp->subtree_control;
2182 }
2183 
2184 /**
2185  * cgroup_migrate_finish - cleanup after attach
2186  * @mgctx: migration context
2187  *
2188  * Undo cgroup_migrate_add_src() and cgroup_migrate_prepare_dst().  See
2189  * those functions for details.
2190  */
2191 void cgroup_migrate_finish(struct cgroup_mgctx *mgctx)
2192 {
2193 	LIST_HEAD(preloaded);
2194 	struct css_set *cset, *tmp_cset;
2195 
2196 	lockdep_assert_held(&cgroup_mutex);
2197 
2198 	spin_lock_irq(&css_set_lock);
2199 
2200 	list_splice_tail_init(&mgctx->preloaded_src_csets, &preloaded);
2201 	list_splice_tail_init(&mgctx->preloaded_dst_csets, &preloaded);
2202 
2203 	list_for_each_entry_safe(cset, tmp_cset, &preloaded, mg_preload_node) {
2204 		cset->mg_src_cgrp = NULL;
2205 		cset->mg_dst_cgrp = NULL;
2206 		cset->mg_dst_cset = NULL;
2207 		list_del_init(&cset->mg_preload_node);
2208 		put_css_set_locked(cset);
2209 	}
2210 
2211 	spin_unlock_irq(&css_set_lock);
2212 }
2213 
2214 /**
2215  * cgroup_migrate_add_src - add a migration source css_set
2216  * @src_cset: the source css_set to add
2217  * @dst_cgrp: the destination cgroup
2218  * @mgctx: migration context
2219  *
2220  * Tasks belonging to @src_cset are about to be migrated to @dst_cgrp.  Pin
2221  * @src_cset and add it to @mgctx->src_csets, which should later be cleaned
2222  * up by cgroup_migrate_finish().
2223  *
2224  * This function may be called without holding cgroup_threadgroup_rwsem
2225  * even if the target is a process.  Threads may be created and destroyed
2226  * but as long as cgroup_mutex is not dropped, no new css_set can be put
2227  * into play and the preloaded css_sets are guaranteed to cover all
2228  * migrations.
2229  */
2230 void cgroup_migrate_add_src(struct css_set *src_cset,
2231 			    struct cgroup *dst_cgrp,
2232 			    struct cgroup_mgctx *mgctx)
2233 {
2234 	struct cgroup *src_cgrp;
2235 
2236 	lockdep_assert_held(&cgroup_mutex);
2237 	lockdep_assert_held(&css_set_lock);
2238 
2239 	/*
2240 	 * If ->dead, @src_set is associated with one or more dead cgroups
2241 	 * and doesn't contain any migratable tasks.  Ignore it early so
2242 	 * that the rest of migration path doesn't get confused by it.
2243 	 */
2244 	if (src_cset->dead)
2245 		return;
2246 
2247 	src_cgrp = cset_cgroup_from_root(src_cset, dst_cgrp->root);
2248 
2249 	if (!list_empty(&src_cset->mg_preload_node))
2250 		return;
2251 
2252 	WARN_ON(src_cset->mg_src_cgrp);
2253 	WARN_ON(src_cset->mg_dst_cgrp);
2254 	WARN_ON(!list_empty(&src_cset->mg_tasks));
2255 	WARN_ON(!list_empty(&src_cset->mg_node));
2256 
2257 	src_cset->mg_src_cgrp = src_cgrp;
2258 	src_cset->mg_dst_cgrp = dst_cgrp;
2259 	get_css_set(src_cset);
2260 	list_add_tail(&src_cset->mg_preload_node, &mgctx->preloaded_src_csets);
2261 }
2262 
2263 /**
2264  * cgroup_migrate_prepare_dst - prepare destination css_sets for migration
2265  * @mgctx: migration context
2266  *
2267  * Tasks are about to be moved and all the source css_sets have been
2268  * preloaded to @mgctx->preloaded_src_csets.  This function looks up and
2269  * pins all destination css_sets, links each to its source, and append them
2270  * to @mgctx->preloaded_dst_csets.
2271  *
2272  * This function must be called after cgroup_migrate_add_src() has been
2273  * called on each migration source css_set.  After migration is performed
2274  * using cgroup_migrate(), cgroup_migrate_finish() must be called on
2275  * @mgctx.
2276  */
2277 int cgroup_migrate_prepare_dst(struct cgroup_mgctx *mgctx)
2278 {
2279 	struct css_set *src_cset, *tmp_cset;
2280 
2281 	lockdep_assert_held(&cgroup_mutex);
2282 
2283 	/* look up the dst cset for each src cset and link it to src */
2284 	list_for_each_entry_safe(src_cset, tmp_cset, &mgctx->preloaded_src_csets,
2285 				 mg_preload_node) {
2286 		struct css_set *dst_cset;
2287 		struct cgroup_subsys *ss;
2288 		int ssid;
2289 
2290 		dst_cset = find_css_set(src_cset, src_cset->mg_dst_cgrp);
2291 		if (!dst_cset)
2292 			goto err;
2293 
2294 		WARN_ON_ONCE(src_cset->mg_dst_cset || dst_cset->mg_dst_cset);
2295 
2296 		/*
2297 		 * If src cset equals dst, it's noop.  Drop the src.
2298 		 * cgroup_migrate() will skip the cset too.  Note that we
2299 		 * can't handle src == dst as some nodes are used by both.
2300 		 */
2301 		if (src_cset == dst_cset) {
2302 			src_cset->mg_src_cgrp = NULL;
2303 			src_cset->mg_dst_cgrp = NULL;
2304 			list_del_init(&src_cset->mg_preload_node);
2305 			put_css_set(src_cset);
2306 			put_css_set(dst_cset);
2307 			continue;
2308 		}
2309 
2310 		src_cset->mg_dst_cset = dst_cset;
2311 
2312 		if (list_empty(&dst_cset->mg_preload_node))
2313 			list_add_tail(&dst_cset->mg_preload_node,
2314 				      &mgctx->preloaded_dst_csets);
2315 		else
2316 			put_css_set(dst_cset);
2317 
2318 		for_each_subsys(ss, ssid)
2319 			if (src_cset->subsys[ssid] != dst_cset->subsys[ssid])
2320 				mgctx->ss_mask |= 1 << ssid;
2321 	}
2322 
2323 	return 0;
2324 err:
2325 	cgroup_migrate_finish(mgctx);
2326 	return -ENOMEM;
2327 }
2328 
2329 /**
2330  * cgroup_migrate - migrate a process or task to a cgroup
2331  * @leader: the leader of the process or the task to migrate
2332  * @threadgroup: whether @leader points to the whole process or a single task
2333  * @mgctx: migration context
2334  *
2335  * Migrate a process or task denoted by @leader.  If migrating a process,
2336  * the caller must be holding cgroup_threadgroup_rwsem.  The caller is also
2337  * responsible for invoking cgroup_migrate_add_src() and
2338  * cgroup_migrate_prepare_dst() on the targets before invoking this
2339  * function and following up with cgroup_migrate_finish().
2340  *
2341  * As long as a controller's ->can_attach() doesn't fail, this function is
2342  * guaranteed to succeed.  This means that, excluding ->can_attach()
2343  * failure, when migrating multiple targets, the success or failure can be
2344  * decided for all targets by invoking group_migrate_prepare_dst() before
2345  * actually starting migrating.
2346  */
2347 int cgroup_migrate(struct task_struct *leader, bool threadgroup,
2348 		   struct cgroup_mgctx *mgctx)
2349 {
2350 	struct task_struct *task;
2351 
2352 	/*
2353 	 * Prevent freeing of tasks while we take a snapshot. Tasks that are
2354 	 * already PF_EXITING could be freed from underneath us unless we
2355 	 * take an rcu_read_lock.
2356 	 */
2357 	spin_lock_irq(&css_set_lock);
2358 	rcu_read_lock();
2359 	task = leader;
2360 	do {
2361 		cgroup_migrate_add_task(task, mgctx);
2362 		if (!threadgroup)
2363 			break;
2364 	} while_each_thread(leader, task);
2365 	rcu_read_unlock();
2366 	spin_unlock_irq(&css_set_lock);
2367 
2368 	return cgroup_migrate_execute(mgctx);
2369 }
2370 
2371 /**
2372  * cgroup_attach_task - attach a task or a whole threadgroup to a cgroup
2373  * @dst_cgrp: the cgroup to attach to
2374  * @leader: the task or the leader of the threadgroup to be attached
2375  * @threadgroup: attach the whole threadgroup?
2376  *
2377  * Call holding cgroup_mutex and cgroup_threadgroup_rwsem.
2378  */
2379 int cgroup_attach_task(struct cgroup *dst_cgrp, struct task_struct *leader,
2380 		       bool threadgroup)
2381 {
2382 	DEFINE_CGROUP_MGCTX(mgctx);
2383 	struct task_struct *task;
2384 	int ret;
2385 
2386 	if (!cgroup_may_migrate_to(dst_cgrp))
2387 		return -EBUSY;
2388 
2389 	/* look up all src csets */
2390 	spin_lock_irq(&css_set_lock);
2391 	rcu_read_lock();
2392 	task = leader;
2393 	do {
2394 		cgroup_migrate_add_src(task_css_set(task), dst_cgrp, &mgctx);
2395 		if (!threadgroup)
2396 			break;
2397 	} while_each_thread(leader, task);
2398 	rcu_read_unlock();
2399 	spin_unlock_irq(&css_set_lock);
2400 
2401 	/* prepare dst csets and commit */
2402 	ret = cgroup_migrate_prepare_dst(&mgctx);
2403 	if (!ret)
2404 		ret = cgroup_migrate(leader, threadgroup, &mgctx);
2405 
2406 	cgroup_migrate_finish(&mgctx);
2407 
2408 	if (!ret)
2409 		trace_cgroup_attach_task(dst_cgrp, leader, threadgroup);
2410 
2411 	return ret;
2412 }
2413 
2414 static int cgroup_procs_write_permission(struct task_struct *task,
2415 					 struct cgroup *dst_cgrp,
2416 					 struct kernfs_open_file *of)
2417 {
2418 	struct super_block *sb = of->file->f_path.dentry->d_sb;
2419 	struct cgroup_namespace *ns = current->nsproxy->cgroup_ns;
2420 	struct cgroup *root_cgrp = ns->root_cset->dfl_cgrp;
2421 	struct cgroup *src_cgrp, *com_cgrp;
2422 	struct inode *inode;
2423 	int ret;
2424 
2425 	if (!cgroup_on_dfl(dst_cgrp)) {
2426 		const struct cred *cred = current_cred();
2427 		const struct cred *tcred = get_task_cred(task);
2428 
2429 		/*
2430 		 * even if we're attaching all tasks in the thread group,
2431 		 * we only need to check permissions on one of them.
2432 		 */
2433 		if (uid_eq(cred->euid, GLOBAL_ROOT_UID) ||
2434 		    uid_eq(cred->euid, tcred->uid) ||
2435 		    uid_eq(cred->euid, tcred->suid))
2436 			ret = 0;
2437 		else
2438 			ret = -EACCES;
2439 
2440 		put_cred(tcred);
2441 		return ret;
2442 	}
2443 
2444 	/* find the source cgroup */
2445 	spin_lock_irq(&css_set_lock);
2446 	src_cgrp = task_cgroup_from_root(task, &cgrp_dfl_root);
2447 	spin_unlock_irq(&css_set_lock);
2448 
2449 	/* and the common ancestor */
2450 	com_cgrp = src_cgrp;
2451 	while (!cgroup_is_descendant(dst_cgrp, com_cgrp))
2452 		com_cgrp = cgroup_parent(com_cgrp);
2453 
2454 	/* %current should be authorized to migrate to the common ancestor */
2455 	inode = kernfs_get_inode(sb, com_cgrp->procs_file.kn);
2456 	if (!inode)
2457 		return -ENOMEM;
2458 
2459 	ret = inode_permission(inode, MAY_WRITE);
2460 	iput(inode);
2461 	if (ret)
2462 		return ret;
2463 
2464 	/*
2465 	 * If namespaces are delegation boundaries, %current must be able
2466 	 * to see both source and destination cgroups from its namespace.
2467 	 */
2468 	if ((cgrp_dfl_root.flags & CGRP_ROOT_NS_DELEGATE) &&
2469 	    (!cgroup_is_descendant(src_cgrp, root_cgrp) ||
2470 	     !cgroup_is_descendant(dst_cgrp, root_cgrp)))
2471 		return -ENOENT;
2472 
2473 	return 0;
2474 }
2475 
2476 /*
2477  * Find the task_struct of the task to attach by vpid and pass it along to the
2478  * function to attach either it or all tasks in its threadgroup. Will lock
2479  * cgroup_mutex and threadgroup.
2480  */
2481 ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
2482 			     size_t nbytes, loff_t off, bool threadgroup)
2483 {
2484 	struct task_struct *tsk;
2485 	struct cgroup_subsys *ss;
2486 	struct cgroup *cgrp;
2487 	pid_t pid;
2488 	int ssid, ret;
2489 
2490 	if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0)
2491 		return -EINVAL;
2492 
2493 	cgrp = cgroup_kn_lock_live(of->kn, false);
2494 	if (!cgrp)
2495 		return -ENODEV;
2496 
2497 	percpu_down_write(&cgroup_threadgroup_rwsem);
2498 	rcu_read_lock();
2499 	if (pid) {
2500 		tsk = find_task_by_vpid(pid);
2501 		if (!tsk) {
2502 			ret = -ESRCH;
2503 			goto out_unlock_rcu;
2504 		}
2505 	} else {
2506 		tsk = current;
2507 	}
2508 
2509 	if (threadgroup)
2510 		tsk = tsk->group_leader;
2511 
2512 	/*
2513 	 * kthreads may acquire PF_NO_SETAFFINITY during initialization.
2514 	 * If userland migrates such a kthread to a non-root cgroup, it can
2515 	 * become trapped in a cpuset, or RT kthread may be born in a
2516 	 * cgroup with no rt_runtime allocated.  Just say no.
2517 	 */
2518 	if (tsk->no_cgroup_migration || (tsk->flags & PF_NO_SETAFFINITY)) {
2519 		ret = -EINVAL;
2520 		goto out_unlock_rcu;
2521 	}
2522 
2523 	get_task_struct(tsk);
2524 	rcu_read_unlock();
2525 
2526 	ret = cgroup_procs_write_permission(tsk, cgrp, of);
2527 	if (!ret)
2528 		ret = cgroup_attach_task(cgrp, tsk, threadgroup);
2529 
2530 	put_task_struct(tsk);
2531 	goto out_unlock_threadgroup;
2532 
2533 out_unlock_rcu:
2534 	rcu_read_unlock();
2535 out_unlock_threadgroup:
2536 	percpu_up_write(&cgroup_threadgroup_rwsem);
2537 	for_each_subsys(ss, ssid)
2538 		if (ss->post_attach)
2539 			ss->post_attach();
2540 	cgroup_kn_unlock(of->kn);
2541 	return ret ?: nbytes;
2542 }
2543 
2544 ssize_t cgroup_procs_write(struct kernfs_open_file *of, char *buf, size_t nbytes,
2545 			   loff_t off)
2546 {
2547 	return __cgroup_procs_write(of, buf, nbytes, off, true);
2548 }
2549 
2550 static void cgroup_print_ss_mask(struct seq_file *seq, u16 ss_mask)
2551 {
2552 	struct cgroup_subsys *ss;
2553 	bool printed = false;
2554 	int ssid;
2555 
2556 	do_each_subsys_mask(ss, ssid, ss_mask) {
2557 		if (printed)
2558 			seq_putc(seq, ' ');
2559 		seq_printf(seq, "%s", ss->name);
2560 		printed = true;
2561 	} while_each_subsys_mask();
2562 	if (printed)
2563 		seq_putc(seq, '\n');
2564 }
2565 
2566 /* show controllers which are enabled from the parent */
2567 static int cgroup_controllers_show(struct seq_file *seq, void *v)
2568 {
2569 	struct cgroup *cgrp = seq_css(seq)->cgroup;
2570 
2571 	cgroup_print_ss_mask(seq, cgroup_control(cgrp));
2572 	return 0;
2573 }
2574 
2575 /* show controllers which are enabled for a given cgroup's children */
2576 static int cgroup_subtree_control_show(struct seq_file *seq, void *v)
2577 {
2578 	struct cgroup *cgrp = seq_css(seq)->cgroup;
2579 
2580 	cgroup_print_ss_mask(seq, cgrp->subtree_control);
2581 	return 0;
2582 }
2583 
2584 /**
2585  * cgroup_update_dfl_csses - update css assoc of a subtree in default hierarchy
2586  * @cgrp: root of the subtree to update csses for
2587  *
2588  * @cgrp's control masks have changed and its subtree's css associations
2589  * need to be updated accordingly.  This function looks up all css_sets
2590  * which are attached to the subtree, creates the matching updated css_sets
2591  * and migrates the tasks to the new ones.
2592  */
2593 static int cgroup_update_dfl_csses(struct cgroup *cgrp)
2594 {
2595 	DEFINE_CGROUP_MGCTX(mgctx);
2596 	struct cgroup_subsys_state *d_css;
2597 	struct cgroup *dsct;
2598 	struct css_set *src_cset;
2599 	int ret;
2600 
2601 	lockdep_assert_held(&cgroup_mutex);
2602 
2603 	percpu_down_write(&cgroup_threadgroup_rwsem);
2604 
2605 	/* look up all csses currently attached to @cgrp's subtree */
2606 	spin_lock_irq(&css_set_lock);
2607 	cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) {
2608 		struct cgrp_cset_link *link;
2609 
2610 		list_for_each_entry(link, &dsct->cset_links, cset_link)
2611 			cgroup_migrate_add_src(link->cset, dsct, &mgctx);
2612 	}
2613 	spin_unlock_irq(&css_set_lock);
2614 
2615 	/* NULL dst indicates self on default hierarchy */
2616 	ret = cgroup_migrate_prepare_dst(&mgctx);
2617 	if (ret)
2618 		goto out_finish;
2619 
2620 	spin_lock_irq(&css_set_lock);
2621 	list_for_each_entry(src_cset, &mgctx.preloaded_src_csets, mg_preload_node) {
2622 		struct task_struct *task, *ntask;
2623 
2624 		/* all tasks in src_csets need to be migrated */
2625 		list_for_each_entry_safe(task, ntask, &src_cset->tasks, cg_list)
2626 			cgroup_migrate_add_task(task, &mgctx);
2627 	}
2628 	spin_unlock_irq(&css_set_lock);
2629 
2630 	ret = cgroup_migrate_execute(&mgctx);
2631 out_finish:
2632 	cgroup_migrate_finish(&mgctx);
2633 	percpu_up_write(&cgroup_threadgroup_rwsem);
2634 	return ret;
2635 }
2636 
2637 /**
2638  * cgroup_lock_and_drain_offline - lock cgroup_mutex and drain offlined csses
2639  * @cgrp: root of the target subtree
2640  *
2641  * Because css offlining is asynchronous, userland may try to re-enable a
2642  * controller while the previous css is still around.  This function grabs
2643  * cgroup_mutex and drains the previous css instances of @cgrp's subtree.
2644  */
2645 void cgroup_lock_and_drain_offline(struct cgroup *cgrp)
2646 	__acquires(&cgroup_mutex)
2647 {
2648 	struct cgroup *dsct;
2649 	struct cgroup_subsys_state *d_css;
2650 	struct cgroup_subsys *ss;
2651 	int ssid;
2652 
2653 restart:
2654 	mutex_lock(&cgroup_mutex);
2655 
2656 	cgroup_for_each_live_descendant_post(dsct, d_css, cgrp) {
2657 		for_each_subsys(ss, ssid) {
2658 			struct cgroup_subsys_state *css = cgroup_css(dsct, ss);
2659 			DEFINE_WAIT(wait);
2660 
2661 			if (!css || !percpu_ref_is_dying(&css->refcnt))
2662 				continue;
2663 
2664 			cgroup_get_live(dsct);
2665 			prepare_to_wait(&dsct->offline_waitq, &wait,
2666 					TASK_UNINTERRUPTIBLE);
2667 
2668 			mutex_unlock(&cgroup_mutex);
2669 			schedule();
2670 			finish_wait(&dsct->offline_waitq, &wait);
2671 
2672 			cgroup_put(dsct);
2673 			goto restart;
2674 		}
2675 	}
2676 }
2677 
2678 /**
2679  * cgroup_save_control - save control masks of a subtree
2680  * @cgrp: root of the target subtree
2681  *
2682  * Save ->subtree_control and ->subtree_ss_mask to the respective old_
2683  * prefixed fields for @cgrp's subtree including @cgrp itself.
2684  */
2685 static void cgroup_save_control(struct cgroup *cgrp)
2686 {
2687 	struct cgroup *dsct;
2688 	struct cgroup_subsys_state *d_css;
2689 
2690 	cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) {
2691 		dsct->old_subtree_control = dsct->subtree_control;
2692 		dsct->old_subtree_ss_mask = dsct->subtree_ss_mask;
2693 	}
2694 }
2695 
2696 /**
2697  * cgroup_propagate_control - refresh control masks of a subtree
2698  * @cgrp: root of the target subtree
2699  *
2700  * For @cgrp and its subtree, ensure ->subtree_ss_mask matches
2701  * ->subtree_control and propagate controller availability through the
2702  * subtree so that descendants don't have unavailable controllers enabled.
2703  */
2704 static void cgroup_propagate_control(struct cgroup *cgrp)
2705 {
2706 	struct cgroup *dsct;
2707 	struct cgroup_subsys_state *d_css;
2708 
2709 	cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) {
2710 		dsct->subtree_control &= cgroup_control(dsct);
2711 		dsct->subtree_ss_mask =
2712 			cgroup_calc_subtree_ss_mask(dsct->subtree_control,
2713 						    cgroup_ss_mask(dsct));
2714 	}
2715 }
2716 
2717 /**
2718  * cgroup_restore_control - restore control masks of a subtree
2719  * @cgrp: root of the target subtree
2720  *
2721  * Restore ->subtree_control and ->subtree_ss_mask from the respective old_
2722  * prefixed fields for @cgrp's subtree including @cgrp itself.
2723  */
2724 static void cgroup_restore_control(struct cgroup *cgrp)
2725 {
2726 	struct cgroup *dsct;
2727 	struct cgroup_subsys_state *d_css;
2728 
2729 	cgroup_for_each_live_descendant_post(dsct, d_css, cgrp) {
2730 		dsct->subtree_control = dsct->old_subtree_control;
2731 		dsct->subtree_ss_mask = dsct->old_subtree_ss_mask;
2732 	}
2733 }
2734 
2735 static bool css_visible(struct cgroup_subsys_state *css)
2736 {
2737 	struct cgroup_subsys *ss = css->ss;
2738 	struct cgroup *cgrp = css->cgroup;
2739 
2740 	if (cgroup_control(cgrp) & (1 << ss->id))
2741 		return true;
2742 	if (!(cgroup_ss_mask(cgrp) & (1 << ss->id)))
2743 		return false;
2744 	return cgroup_on_dfl(cgrp) && ss->implicit_on_dfl;
2745 }
2746 
2747 /**
2748  * cgroup_apply_control_enable - enable or show csses according to control
2749  * @cgrp: root of the target subtree
2750  *
2751  * Walk @cgrp's subtree and create new csses or make the existing ones
2752  * visible.  A css is created invisible if it's being implicitly enabled
2753  * through dependency.  An invisible css is made visible when the userland
2754  * explicitly enables it.
2755  *
2756  * Returns 0 on success, -errno on failure.  On failure, csses which have
2757  * been processed already aren't cleaned up.  The caller is responsible for
2758  * cleaning up with cgroup_apply_control_disable().
2759  */
2760 static int cgroup_apply_control_enable(struct cgroup *cgrp)
2761 {
2762 	struct cgroup *dsct;
2763 	struct cgroup_subsys_state *d_css;
2764 	struct cgroup_subsys *ss;
2765 	int ssid, ret;
2766 
2767 	cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) {
2768 		for_each_subsys(ss, ssid) {
2769 			struct cgroup_subsys_state *css = cgroup_css(dsct, ss);
2770 
2771 			WARN_ON_ONCE(css && percpu_ref_is_dying(&css->refcnt));
2772 
2773 			if (!(cgroup_ss_mask(dsct) & (1 << ss->id)))
2774 				continue;
2775 
2776 			if (!css) {
2777 				css = css_create(dsct, ss);
2778 				if (IS_ERR(css))
2779 					return PTR_ERR(css);
2780 			}
2781 
2782 			if (css_visible(css)) {
2783 				ret = css_populate_dir(css);
2784 				if (ret)
2785 					return ret;
2786 			}
2787 		}
2788 	}
2789 
2790 	return 0;
2791 }
2792 
2793 /**
2794  * cgroup_apply_control_disable - kill or hide csses according to control
2795  * @cgrp: root of the target subtree
2796  *
2797  * Walk @cgrp's subtree and kill and hide csses so that they match
2798  * cgroup_ss_mask() and cgroup_visible_mask().
2799  *
2800  * A css is hidden when the userland requests it to be disabled while other
2801  * subsystems are still depending on it.  The css must not actively control
2802  * resources and be in the vanilla state if it's made visible again later.
2803  * Controllers which may be depended upon should provide ->css_reset() for
2804  * this purpose.
2805  */
2806 static void cgroup_apply_control_disable(struct cgroup *cgrp)
2807 {
2808 	struct cgroup *dsct;
2809 	struct cgroup_subsys_state *d_css;
2810 	struct cgroup_subsys *ss;
2811 	int ssid;
2812 
2813 	cgroup_for_each_live_descendant_post(dsct, d_css, cgrp) {
2814 		for_each_subsys(ss, ssid) {
2815 			struct cgroup_subsys_state *css = cgroup_css(dsct, ss);
2816 
2817 			WARN_ON_ONCE(css && percpu_ref_is_dying(&css->refcnt));
2818 
2819 			if (!css)
2820 				continue;
2821 
2822 			if (css->parent &&
2823 			    !(cgroup_ss_mask(dsct) & (1 << ss->id))) {
2824 				kill_css(css);
2825 			} else if (!css_visible(css)) {
2826 				css_clear_dir(css);
2827 				if (ss->css_reset)
2828 					ss->css_reset(css);
2829 			}
2830 		}
2831 	}
2832 }
2833 
2834 /**
2835  * cgroup_apply_control - apply control mask updates to the subtree
2836  * @cgrp: root of the target subtree
2837  *
2838  * subsystems can be enabled and disabled in a subtree using the following
2839  * steps.
2840  *
2841  * 1. Call cgroup_save_control() to stash the current state.
2842  * 2. Update ->subtree_control masks in the subtree as desired.
2843  * 3. Call cgroup_apply_control() to apply the changes.
2844  * 4. Optionally perform other related operations.
2845  * 5. Call cgroup_finalize_control() to finish up.
2846  *
2847  * This function implements step 3 and propagates the mask changes
2848  * throughout @cgrp's subtree, updates csses accordingly and perform
2849  * process migrations.
2850  */
2851 static int cgroup_apply_control(struct cgroup *cgrp)
2852 {
2853 	int ret;
2854 
2855 	cgroup_propagate_control(cgrp);
2856 
2857 	ret = cgroup_apply_control_enable(cgrp);
2858 	if (ret)
2859 		return ret;
2860 
2861 	/*
2862 	 * At this point, cgroup_e_css() results reflect the new csses
2863 	 * making the following cgroup_update_dfl_csses() properly update
2864 	 * css associations of all tasks in the subtree.
2865 	 */
2866 	ret = cgroup_update_dfl_csses(cgrp);
2867 	if (ret)
2868 		return ret;
2869 
2870 	return 0;
2871 }
2872 
2873 /**
2874  * cgroup_finalize_control - finalize control mask update
2875  * @cgrp: root of the target subtree
2876  * @ret: the result of the update
2877  *
2878  * Finalize control mask update.  See cgroup_apply_control() for more info.
2879  */
2880 static void cgroup_finalize_control(struct cgroup *cgrp, int ret)
2881 {
2882 	if (ret) {
2883 		cgroup_restore_control(cgrp);
2884 		cgroup_propagate_control(cgrp);
2885 	}
2886 
2887 	cgroup_apply_control_disable(cgrp);
2888 }
2889 
2890 /* change the enabled child controllers for a cgroup in the default hierarchy */
2891 static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of,
2892 					    char *buf, size_t nbytes,
2893 					    loff_t off)
2894 {
2895 	u16 enable = 0, disable = 0;
2896 	struct cgroup *cgrp, *child;
2897 	struct cgroup_subsys *ss;
2898 	char *tok;
2899 	int ssid, ret;
2900 
2901 	/*
2902 	 * Parse input - space separated list of subsystem names prefixed
2903 	 * with either + or -.
2904 	 */
2905 	buf = strstrip(buf);
2906 	while ((tok = strsep(&buf, " "))) {
2907 		if (tok[0] == '\0')
2908 			continue;
2909 		do_each_subsys_mask(ss, ssid, ~cgrp_dfl_inhibit_ss_mask) {
2910 			if (!cgroup_ssid_enabled(ssid) ||
2911 			    strcmp(tok + 1, ss->name))
2912 				continue;
2913 
2914 			if (*tok == '+') {
2915 				enable |= 1 << ssid;
2916 				disable &= ~(1 << ssid);
2917 			} else if (*tok == '-') {
2918 				disable |= 1 << ssid;
2919 				enable &= ~(1 << ssid);
2920 			} else {
2921 				return -EINVAL;
2922 			}
2923 			break;
2924 		} while_each_subsys_mask();
2925 		if (ssid == CGROUP_SUBSYS_COUNT)
2926 			return -EINVAL;
2927 	}
2928 
2929 	cgrp = cgroup_kn_lock_live(of->kn, true);
2930 	if (!cgrp)
2931 		return -ENODEV;
2932 
2933 	for_each_subsys(ss, ssid) {
2934 		if (enable & (1 << ssid)) {
2935 			if (cgrp->subtree_control & (1 << ssid)) {
2936 				enable &= ~(1 << ssid);
2937 				continue;
2938 			}
2939 
2940 			if (!(cgroup_control(cgrp) & (1 << ssid))) {
2941 				ret = -ENOENT;
2942 				goto out_unlock;
2943 			}
2944 		} else if (disable & (1 << ssid)) {
2945 			if (!(cgrp->subtree_control & (1 << ssid))) {
2946 				disable &= ~(1 << ssid);
2947 				continue;
2948 			}
2949 
2950 			/* a child has it enabled? */
2951 			cgroup_for_each_live_child(child, cgrp) {
2952 				if (child->subtree_control & (1 << ssid)) {
2953 					ret = -EBUSY;
2954 					goto out_unlock;
2955 				}
2956 			}
2957 		}
2958 	}
2959 
2960 	if (!enable && !disable) {
2961 		ret = 0;
2962 		goto out_unlock;
2963 	}
2964 
2965 	/*
2966 	 * Except for the root, subtree_control must be zero for a cgroup
2967 	 * with tasks so that child cgroups don't compete against tasks.
2968 	 */
2969 	if (enable && cgroup_parent(cgrp)) {
2970 		struct cgrp_cset_link *link;
2971 
2972 		/*
2973 		 * Because namespaces pin csets too, @cgrp->cset_links
2974 		 * might not be empty even when @cgrp is empty.  Walk and
2975 		 * verify each cset.
2976 		 */
2977 		spin_lock_irq(&css_set_lock);
2978 
2979 		ret = 0;
2980 		list_for_each_entry(link, &cgrp->cset_links, cset_link) {
2981 			if (css_set_populated(link->cset)) {
2982 				ret = -EBUSY;
2983 				break;
2984 			}
2985 		}
2986 
2987 		spin_unlock_irq(&css_set_lock);
2988 
2989 		if (ret)
2990 			goto out_unlock;
2991 	}
2992 
2993 	/* save and update control masks and prepare csses */
2994 	cgroup_save_control(cgrp);
2995 
2996 	cgrp->subtree_control |= enable;
2997 	cgrp->subtree_control &= ~disable;
2998 
2999 	ret = cgroup_apply_control(cgrp);
3000 
3001 	cgroup_finalize_control(cgrp, ret);
3002 
3003 	kernfs_activate(cgrp->kn);
3004 	ret = 0;
3005 out_unlock:
3006 	cgroup_kn_unlock(of->kn);
3007 	return ret ?: nbytes;
3008 }
3009 
3010 static int cgroup_events_show(struct seq_file *seq, void *v)
3011 {
3012 	seq_printf(seq, "populated %d\n",
3013 		   cgroup_is_populated(seq_css(seq)->cgroup));
3014 	return 0;
3015 }
3016 
3017 static int cgroup_file_open(struct kernfs_open_file *of)
3018 {
3019 	struct cftype *cft = of->kn->priv;
3020 
3021 	if (cft->open)
3022 		return cft->open(of);
3023 	return 0;
3024 }
3025 
3026 static void cgroup_file_release(struct kernfs_open_file *of)
3027 {
3028 	struct cftype *cft = of->kn->priv;
3029 
3030 	if (cft->release)
3031 		cft->release(of);
3032 }
3033 
3034 static ssize_t cgroup_file_write(struct kernfs_open_file *of, char *buf,
3035 				 size_t nbytes, loff_t off)
3036 {
3037 	struct cgroup_namespace *ns = current->nsproxy->cgroup_ns;
3038 	struct cgroup *cgrp = of->kn->parent->priv;
3039 	struct cftype *cft = of->kn->priv;
3040 	struct cgroup_subsys_state *css;
3041 	int ret;
3042 
3043 	/*
3044 	 * If namespaces are delegation boundaries, disallow writes to
3045 	 * files in an non-init namespace root from inside the namespace
3046 	 * except for the files explicitly marked delegatable -
3047 	 * cgroup.procs and cgroup.subtree_control.
3048 	 */
3049 	if ((cgrp->root->flags & CGRP_ROOT_NS_DELEGATE) &&
3050 	    !(cft->flags & CFTYPE_NS_DELEGATABLE) &&
3051 	    ns != &init_cgroup_ns && ns->root_cset->dfl_cgrp == cgrp)
3052 		return -EPERM;
3053 
3054 	if (cft->write)
3055 		return cft->write(of, buf, nbytes, off);
3056 
3057 	/*
3058 	 * kernfs guarantees that a file isn't deleted with operations in
3059 	 * flight, which means that the matching css is and stays alive and
3060 	 * doesn't need to be pinned.  The RCU locking is not necessary
3061 	 * either.  It's just for the convenience of using cgroup_css().
3062 	 */
3063 	rcu_read_lock();
3064 	css = cgroup_css(cgrp, cft->ss);
3065 	rcu_read_unlock();
3066 
3067 	if (cft->write_u64) {
3068 		unsigned long long v;
3069 		ret = kstrtoull(buf, 0, &v);
3070 		if (!ret)
3071 			ret = cft->write_u64(css, cft, v);
3072 	} else if (cft->write_s64) {
3073 		long long v;
3074 		ret = kstrtoll(buf, 0, &v);
3075 		if (!ret)
3076 			ret = cft->write_s64(css, cft, v);
3077 	} else {
3078 		ret = -EINVAL;
3079 	}
3080 
3081 	return ret ?: nbytes;
3082 }
3083 
3084 static void *cgroup_seqfile_start(struct seq_file *seq, loff_t *ppos)
3085 {
3086 	return seq_cft(seq)->seq_start(seq, ppos);
3087 }
3088 
3089 static void *cgroup_seqfile_next(struct seq_file *seq, void *v, loff_t *ppos)
3090 {
3091 	return seq_cft(seq)->seq_next(seq, v, ppos);
3092 }
3093 
3094 static void cgroup_seqfile_stop(struct seq_file *seq, void *v)
3095 {
3096 	if (seq_cft(seq)->seq_stop)
3097 		seq_cft(seq)->seq_stop(seq, v);
3098 }
3099 
3100 static int cgroup_seqfile_show(struct seq_file *m, void *arg)
3101 {
3102 	struct cftype *cft = seq_cft(m);
3103 	struct cgroup_subsys_state *css = seq_css(m);
3104 
3105 	if (cft->seq_show)
3106 		return cft->seq_show(m, arg);
3107 
3108 	if (cft->read_u64)
3109 		seq_printf(m, "%llu\n", cft->read_u64(css, cft));
3110 	else if (cft->read_s64)
3111 		seq_printf(m, "%lld\n", cft->read_s64(css, cft));
3112 	else
3113 		return -EINVAL;
3114 	return 0;
3115 }
3116 
3117 static struct kernfs_ops cgroup_kf_single_ops = {
3118 	.atomic_write_len	= PAGE_SIZE,
3119 	.open			= cgroup_file_open,
3120 	.release		= cgroup_file_release,
3121 	.write			= cgroup_file_write,
3122 	.seq_show		= cgroup_seqfile_show,
3123 };
3124 
3125 static struct kernfs_ops cgroup_kf_ops = {
3126 	.atomic_write_len	= PAGE_SIZE,
3127 	.open			= cgroup_file_open,
3128 	.release		= cgroup_file_release,
3129 	.write			= cgroup_file_write,
3130 	.seq_start		= cgroup_seqfile_start,
3131 	.seq_next		= cgroup_seqfile_next,
3132 	.seq_stop		= cgroup_seqfile_stop,
3133 	.seq_show		= cgroup_seqfile_show,
3134 };
3135 
3136 /* set uid and gid of cgroup dirs and files to that of the creator */
3137 static int cgroup_kn_set_ugid(struct kernfs_node *kn)
3138 {
3139 	struct iattr iattr = { .ia_valid = ATTR_UID | ATTR_GID,
3140 			       .ia_uid = current_fsuid(),
3141 			       .ia_gid = current_fsgid(), };
3142 
3143 	if (uid_eq(iattr.ia_uid, GLOBAL_ROOT_UID) &&
3144 	    gid_eq(iattr.ia_gid, GLOBAL_ROOT_GID))
3145 		return 0;
3146 
3147 	return kernfs_setattr(kn, &iattr);
3148 }
3149 
3150 static int cgroup_add_file(struct cgroup_subsys_state *css, struct cgroup *cgrp,
3151 			   struct cftype *cft)
3152 {
3153 	char name[CGROUP_FILE_NAME_MAX];
3154 	struct kernfs_node *kn;
3155 	struct lock_class_key *key = NULL;
3156 	int ret;
3157 
3158 #ifdef CONFIG_DEBUG_LOCK_ALLOC
3159 	key = &cft->lockdep_key;
3160 #endif
3161 	kn = __kernfs_create_file(cgrp->kn, cgroup_file_name(cgrp, cft, name),
3162 				  cgroup_file_mode(cft), 0, cft->kf_ops, cft,
3163 				  NULL, key);
3164 	if (IS_ERR(kn))
3165 		return PTR_ERR(kn);
3166 
3167 	ret = cgroup_kn_set_ugid(kn);
3168 	if (ret) {
3169 		kernfs_remove(kn);
3170 		return ret;
3171 	}
3172 
3173 	if (cft->file_offset) {
3174 		struct cgroup_file *cfile = (void *)css + cft->file_offset;
3175 
3176 		spin_lock_irq(&cgroup_file_kn_lock);
3177 		cfile->kn = kn;
3178 		spin_unlock_irq(&cgroup_file_kn_lock);
3179 	}
3180 
3181 	return 0;
3182 }
3183 
3184 /**
3185  * cgroup_addrm_files - add or remove files to a cgroup directory
3186  * @css: the target css
3187  * @cgrp: the target cgroup (usually css->cgroup)
3188  * @cfts: array of cftypes to be added
3189  * @is_add: whether to add or remove
3190  *
3191  * Depending on @is_add, add or remove files defined by @cfts on @cgrp.
3192  * For removals, this function never fails.
3193  */
3194 static int cgroup_addrm_files(struct cgroup_subsys_state *css,
3195 			      struct cgroup *cgrp, struct cftype cfts[],
3196 			      bool is_add)
3197 {
3198 	struct cftype *cft, *cft_end = NULL;
3199 	int ret = 0;
3200 
3201 	lockdep_assert_held(&cgroup_mutex);
3202 
3203 restart:
3204 	for (cft = cfts; cft != cft_end && cft->name[0] != '\0'; cft++) {
3205 		/* does cft->flags tell us to skip this file on @cgrp? */
3206 		if ((cft->flags & __CFTYPE_ONLY_ON_DFL) && !cgroup_on_dfl(cgrp))
3207 			continue;
3208 		if ((cft->flags & __CFTYPE_NOT_ON_DFL) && cgroup_on_dfl(cgrp))
3209 			continue;
3210 		if ((cft->flags & CFTYPE_NOT_ON_ROOT) && !cgroup_parent(cgrp))
3211 			continue;
3212 		if ((cft->flags & CFTYPE_ONLY_ON_ROOT) && cgroup_parent(cgrp))
3213 			continue;
3214 
3215 		if (is_add) {
3216 			ret = cgroup_add_file(css, cgrp, cft);
3217 			if (ret) {
3218 				pr_warn("%s: failed to add %s, err=%d\n",
3219 					__func__, cft->name, ret);
3220 				cft_end = cft;
3221 				is_add = false;
3222 				goto restart;
3223 			}
3224 		} else {
3225 			cgroup_rm_file(cgrp, cft);
3226 		}
3227 	}
3228 	return ret;
3229 }
3230 
3231 static int cgroup_apply_cftypes(struct cftype *cfts, bool is_add)
3232 {
3233 	LIST_HEAD(pending);
3234 	struct cgroup_subsys *ss = cfts[0].ss;
3235 	struct cgroup *root = &ss->root->cgrp;
3236 	struct cgroup_subsys_state *css;
3237 	int ret = 0;
3238 
3239 	lockdep_assert_held(&cgroup_mutex);
3240 
3241 	/* add/rm files for all cgroups created before */
3242 	css_for_each_descendant_pre(css, cgroup_css(root, ss)) {
3243 		struct cgroup *cgrp = css->cgroup;
3244 
3245 		if (!(css->flags & CSS_VISIBLE))
3246 			continue;
3247 
3248 		ret = cgroup_addrm_files(css, cgrp, cfts, is_add);
3249 		if (ret)
3250 			break;
3251 	}
3252 
3253 	if (is_add && !ret)
3254 		kernfs_activate(root->kn);
3255 	return ret;
3256 }
3257 
3258 static void cgroup_exit_cftypes(struct cftype *cfts)
3259 {
3260 	struct cftype *cft;
3261 
3262 	for (cft = cfts; cft->name[0] != '\0'; cft++) {
3263 		/* free copy for custom atomic_write_len, see init_cftypes() */
3264 		if (cft->max_write_len && cft->max_write_len != PAGE_SIZE)
3265 			kfree(cft->kf_ops);
3266 		cft->kf_ops = NULL;
3267 		cft->ss = NULL;
3268 
3269 		/* revert flags set by cgroup core while adding @cfts */
3270 		cft->flags &= ~(__CFTYPE_ONLY_ON_DFL | __CFTYPE_NOT_ON_DFL);
3271 	}
3272 }
3273 
3274 static int cgroup_init_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
3275 {
3276 	struct cftype *cft;
3277 
3278 	for (cft = cfts; cft->name[0] != '\0'; cft++) {
3279 		struct kernfs_ops *kf_ops;
3280 
3281 		WARN_ON(cft->ss || cft->kf_ops);
3282 
3283 		if (cft->seq_start)
3284 			kf_ops = &cgroup_kf_ops;
3285 		else
3286 			kf_ops = &cgroup_kf_single_ops;
3287 
3288 		/*
3289 		 * Ugh... if @cft wants a custom max_write_len, we need to
3290 		 * make a copy of kf_ops to set its atomic_write_len.
3291 		 */
3292 		if (cft->max_write_len && cft->max_write_len != PAGE_SIZE) {
3293 			kf_ops = kmemdup(kf_ops, sizeof(*kf_ops), GFP_KERNEL);
3294 			if (!kf_ops) {
3295 				cgroup_exit_cftypes(cfts);
3296 				return -ENOMEM;
3297 			}
3298 			kf_ops->atomic_write_len = cft->max_write_len;
3299 		}
3300 
3301 		cft->kf_ops = kf_ops;
3302 		cft->ss = ss;
3303 	}
3304 
3305 	return 0;
3306 }
3307 
3308 static int cgroup_rm_cftypes_locked(struct cftype *cfts)
3309 {
3310 	lockdep_assert_held(&cgroup_mutex);
3311 
3312 	if (!cfts || !cfts[0].ss)
3313 		return -ENOENT;
3314 
3315 	list_del(&cfts->node);
3316 	cgroup_apply_cftypes(cfts, false);
3317 	cgroup_exit_cftypes(cfts);
3318 	return 0;
3319 }
3320 
3321 /**
3322  * cgroup_rm_cftypes - remove an array of cftypes from a subsystem
3323  * @cfts: zero-length name terminated array of cftypes
3324  *
3325  * Unregister @cfts.  Files described by @cfts are removed from all
3326  * existing cgroups and all future cgroups won't have them either.  This
3327  * function can be called anytime whether @cfts' subsys is attached or not.
3328  *
3329  * Returns 0 on successful unregistration, -ENOENT if @cfts is not
3330  * registered.
3331  */
3332 int cgroup_rm_cftypes(struct cftype *cfts)
3333 {
3334 	int ret;
3335 
3336 	mutex_lock(&cgroup_mutex);
3337 	ret = cgroup_rm_cftypes_locked(cfts);
3338 	mutex_unlock(&cgroup_mutex);
3339 	return ret;
3340 }
3341 
3342 /**
3343  * cgroup_add_cftypes - add an array of cftypes to a subsystem
3344  * @ss: target cgroup subsystem
3345  * @cfts: zero-length name terminated array of cftypes
3346  *
3347  * Register @cfts to @ss.  Files described by @cfts are created for all
3348  * existing cgroups to which @ss is attached and all future cgroups will
3349  * have them too.  This function can be called anytime whether @ss is
3350  * attached or not.
3351  *
3352  * Returns 0 on successful registration, -errno on failure.  Note that this
3353  * function currently returns 0 as long as @cfts registration is successful
3354  * even if some file creation attempts on existing cgroups fail.
3355  */
3356 static int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
3357 {
3358 	int ret;
3359 
3360 	if (!cgroup_ssid_enabled(ss->id))
3361 		return 0;
3362 
3363 	if (!cfts || cfts[0].name[0] == '\0')
3364 		return 0;
3365 
3366 	ret = cgroup_init_cftypes(ss, cfts);
3367 	if (ret)
3368 		return ret;
3369 
3370 	mutex_lock(&cgroup_mutex);
3371 
3372 	list_add_tail(&cfts->node, &ss->cfts);
3373 	ret = cgroup_apply_cftypes(cfts, true);
3374 	if (ret)
3375 		cgroup_rm_cftypes_locked(cfts);
3376 
3377 	mutex_unlock(&cgroup_mutex);
3378 	return ret;
3379 }
3380 
3381 /**
3382  * cgroup_add_dfl_cftypes - add an array of cftypes for default hierarchy
3383  * @ss: target cgroup subsystem
3384  * @cfts: zero-length name terminated array of cftypes
3385  *
3386  * Similar to cgroup_add_cftypes() but the added files are only used for
3387  * the default hierarchy.
3388  */
3389 int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
3390 {
3391 	struct cftype *cft;
3392 
3393 	for (cft = cfts; cft && cft->name[0] != '\0'; cft++)
3394 		cft->flags |= __CFTYPE_ONLY_ON_DFL;
3395 	return cgroup_add_cftypes(ss, cfts);
3396 }
3397 
3398 /**
3399  * cgroup_add_legacy_cftypes - add an array of cftypes for legacy hierarchies
3400  * @ss: target cgroup subsystem
3401  * @cfts: zero-length name terminated array of cftypes
3402  *
3403  * Similar to cgroup_add_cftypes() but the added files are only used for
3404  * the legacy hierarchies.
3405  */
3406 int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
3407 {
3408 	struct cftype *cft;
3409 
3410 	for (cft = cfts; cft && cft->name[0] != '\0'; cft++)
3411 		cft->flags |= __CFTYPE_NOT_ON_DFL;
3412 	return cgroup_add_cftypes(ss, cfts);
3413 }
3414 
3415 /**
3416  * cgroup_file_notify - generate a file modified event for a cgroup_file
3417  * @cfile: target cgroup_file
3418  *
3419  * @cfile must have been obtained by setting cftype->file_offset.
3420  */
3421 void cgroup_file_notify(struct cgroup_file *cfile)
3422 {
3423 	unsigned long flags;
3424 
3425 	spin_lock_irqsave(&cgroup_file_kn_lock, flags);
3426 	if (cfile->kn)
3427 		kernfs_notify(cfile->kn);
3428 	spin_unlock_irqrestore(&cgroup_file_kn_lock, flags);
3429 }
3430 
3431 /**
3432  * css_next_child - find the next child of a given css
3433  * @pos: the current position (%NULL to initiate traversal)
3434  * @parent: css whose children to walk
3435  *
3436  * This function returns the next child of @parent and should be called
3437  * under either cgroup_mutex or RCU read lock.  The only requirement is
3438  * that @parent and @pos are accessible.  The next sibling is guaranteed to
3439  * be returned regardless of their states.
3440  *
3441  * If a subsystem synchronizes ->css_online() and the start of iteration, a
3442  * css which finished ->css_online() is guaranteed to be visible in the
3443  * future iterations and will stay visible until the last reference is put.
3444  * A css which hasn't finished ->css_online() or already finished
3445  * ->css_offline() may show up during traversal.  It's each subsystem's
3446  * responsibility to synchronize against on/offlining.
3447  */
3448 struct cgroup_subsys_state *css_next_child(struct cgroup_subsys_state *pos,
3449 					   struct cgroup_subsys_state *parent)
3450 {
3451 	struct cgroup_subsys_state *next;
3452 
3453 	cgroup_assert_mutex_or_rcu_locked();
3454 
3455 	/*
3456 	 * @pos could already have been unlinked from the sibling list.
3457 	 * Once a cgroup is removed, its ->sibling.next is no longer
3458 	 * updated when its next sibling changes.  CSS_RELEASED is set when
3459 	 * @pos is taken off list, at which time its next pointer is valid,
3460 	 * and, as releases are serialized, the one pointed to by the next
3461 	 * pointer is guaranteed to not have started release yet.  This
3462 	 * implies that if we observe !CSS_RELEASED on @pos in this RCU
3463 	 * critical section, the one pointed to by its next pointer is
3464 	 * guaranteed to not have finished its RCU grace period even if we
3465 	 * have dropped rcu_read_lock() inbetween iterations.
3466 	 *
3467 	 * If @pos has CSS_RELEASED set, its next pointer can't be
3468 	 * dereferenced; however, as each css is given a monotonically
3469 	 * increasing unique serial number and always appended to the
3470 	 * sibling list, the next one can be found by walking the parent's
3471 	 * children until the first css with higher serial number than
3472 	 * @pos's.  While this path can be slower, it happens iff iteration
3473 	 * races against release and the race window is very small.
3474 	 */
3475 	if (!pos) {
3476 		next = list_entry_rcu(parent->children.next, struct cgroup_subsys_state, sibling);
3477 	} else if (likely(!(pos->flags & CSS_RELEASED))) {
3478 		next = list_entry_rcu(pos->sibling.next, struct cgroup_subsys_state, sibling);
3479 	} else {
3480 		list_for_each_entry_rcu(next, &parent->children, sibling)
3481 			if (next->serial_nr > pos->serial_nr)
3482 				break;
3483 	}
3484 
3485 	/*
3486 	 * @next, if not pointing to the head, can be dereferenced and is
3487 	 * the next sibling.
3488 	 */
3489 	if (&next->sibling != &parent->children)
3490 		return next;
3491 	return NULL;
3492 }
3493 
3494 /**
3495  * css_next_descendant_pre - find the next descendant for pre-order walk
3496  * @pos: the current position (%NULL to initiate traversal)
3497  * @root: css whose descendants to walk
3498  *
3499  * To be used by css_for_each_descendant_pre().  Find the next descendant
3500  * to visit for pre-order traversal of @root's descendants.  @root is
3501  * included in the iteration and the first node to be visited.
3502  *
3503  * While this function requires cgroup_mutex or RCU read locking, it
3504  * doesn't require the whole traversal to be contained in a single critical
3505  * section.  This function will return the correct next descendant as long
3506  * as both @pos and @root are accessible and @pos is a descendant of @root.
3507  *
3508  * If a subsystem synchronizes ->css_online() and the start of iteration, a
3509  * css which finished ->css_online() is guaranteed to be visible in the
3510  * future iterations and will stay visible until the last reference is put.
3511  * A css which hasn't finished ->css_online() or already finished
3512  * ->css_offline() may show up during traversal.  It's each subsystem's
3513  * responsibility to synchronize against on/offlining.
3514  */
3515 struct cgroup_subsys_state *
3516 css_next_descendant_pre(struct cgroup_subsys_state *pos,
3517 			struct cgroup_subsys_state *root)
3518 {
3519 	struct cgroup_subsys_state *next;
3520 
3521 	cgroup_assert_mutex_or_rcu_locked();
3522 
3523 	/* if first iteration, visit @root */
3524 	if (!pos)
3525 		return root;
3526 
3527 	/* visit the first child if exists */
3528 	next = css_next_child(NULL, pos);
3529 	if (next)
3530 		return next;
3531 
3532 	/* no child, visit my or the closest ancestor's next sibling */
3533 	while (pos != root) {
3534 		next = css_next_child(pos, pos->parent);
3535 		if (next)
3536 			return next;
3537 		pos = pos->parent;
3538 	}
3539 
3540 	return NULL;
3541 }
3542 
3543 /**
3544  * css_rightmost_descendant - return the rightmost descendant of a css
3545  * @pos: css of interest
3546  *
3547  * Return the rightmost descendant of @pos.  If there's no descendant, @pos
3548  * is returned.  This can be used during pre-order traversal to skip
3549  * subtree of @pos.
3550  *
3551  * While this function requires cgroup_mutex or RCU read locking, it
3552  * doesn't require the whole traversal to be contained in a single critical
3553  * section.  This function will return the correct rightmost descendant as
3554  * long as @pos is accessible.
3555  */
3556 struct cgroup_subsys_state *
3557 css_rightmost_descendant(struct cgroup_subsys_state *pos)
3558 {
3559 	struct cgroup_subsys_state *last, *tmp;
3560 
3561 	cgroup_assert_mutex_or_rcu_locked();
3562 
3563 	do {
3564 		last = pos;
3565 		/* ->prev isn't RCU safe, walk ->next till the end */
3566 		pos = NULL;
3567 		css_for_each_child(tmp, last)
3568 			pos = tmp;
3569 	} while (pos);
3570 
3571 	return last;
3572 }
3573 
3574 static struct cgroup_subsys_state *
3575 css_leftmost_descendant(struct cgroup_subsys_state *pos)
3576 {
3577 	struct cgroup_subsys_state *last;
3578 
3579 	do {
3580 		last = pos;
3581 		pos = css_next_child(NULL, pos);
3582 	} while (pos);
3583 
3584 	return last;
3585 }
3586 
3587 /**
3588  * css_next_descendant_post - find the next descendant for post-order walk
3589  * @pos: the current position (%NULL to initiate traversal)
3590  * @root: css whose descendants to walk
3591  *
3592  * To be used by css_for_each_descendant_post().  Find the next descendant
3593  * to visit for post-order traversal of @root's descendants.  @root is
3594  * included in the iteration and the last node to be visited.
3595  *
3596  * While this function requires cgroup_mutex or RCU read locking, it
3597  * doesn't require the whole traversal to be contained in a single critical
3598  * section.  This function will return the correct next descendant as long
3599  * as both @pos and @cgroup are accessible and @pos is a descendant of
3600  * @cgroup.
3601  *
3602  * If a subsystem synchronizes ->css_online() and the start of iteration, a
3603  * css which finished ->css_online() is guaranteed to be visible in the
3604  * future iterations and will stay visible until the last reference is put.
3605  * A css which hasn't finished ->css_online() or already finished
3606  * ->css_offline() may show up during traversal.  It's each subsystem's
3607  * responsibility to synchronize against on/offlining.
3608  */
3609 struct cgroup_subsys_state *
3610 css_next_descendant_post(struct cgroup_subsys_state *pos,
3611 			 struct cgroup_subsys_state *root)
3612 {
3613 	struct cgroup_subsys_state *next;
3614 
3615 	cgroup_assert_mutex_or_rcu_locked();
3616 
3617 	/* if first iteration, visit leftmost descendant which may be @root */
3618 	if (!pos)
3619 		return css_leftmost_descendant(root);
3620 
3621 	/* if we visited @root, we're done */
3622 	if (pos == root)
3623 		return NULL;
3624 
3625 	/* if there's an unvisited sibling, visit its leftmost descendant */
3626 	next = css_next_child(pos, pos->parent);
3627 	if (next)
3628 		return css_leftmost_descendant(next);
3629 
3630 	/* no sibling left, visit parent */
3631 	return pos->parent;
3632 }
3633 
3634 /**
3635  * css_has_online_children - does a css have online children
3636  * @css: the target css
3637  *
3638  * Returns %true if @css has any online children; otherwise, %false.  This
3639  * function can be called from any context but the caller is responsible
3640  * for synchronizing against on/offlining as necessary.
3641  */
3642 bool css_has_online_children(struct cgroup_subsys_state *css)
3643 {
3644 	struct cgroup_subsys_state *child;
3645 	bool ret = false;
3646 
3647 	rcu_read_lock();
3648 	css_for_each_child(child, css) {
3649 		if (child->flags & CSS_ONLINE) {
3650 			ret = true;
3651 			break;
3652 		}
3653 	}
3654 	rcu_read_unlock();
3655 	return ret;
3656 }
3657 
3658 /**
3659  * css_task_iter_advance_css_set - advance a task itererator to the next css_set
3660  * @it: the iterator to advance
3661  *
3662  * Advance @it to the next css_set to walk.
3663  */
3664 static void css_task_iter_advance_css_set(struct css_task_iter *it)
3665 {
3666 	struct list_head *l = it->cset_pos;
3667 	struct cgrp_cset_link *link;
3668 	struct css_set *cset;
3669 
3670 	lockdep_assert_held(&css_set_lock);
3671 
3672 	/* Advance to the next non-empty css_set */
3673 	do {
3674 		l = l->next;
3675 		if (l == it->cset_head) {
3676 			it->cset_pos = NULL;
3677 			it->task_pos = NULL;
3678 			return;
3679 		}
3680 
3681 		if (it->ss) {
3682 			cset = container_of(l, struct css_set,
3683 					    e_cset_node[it->ss->id]);
3684 		} else {
3685 			link = list_entry(l, struct cgrp_cset_link, cset_link);
3686 			cset = link->cset;
3687 		}
3688 	} while (!css_set_populated(cset));
3689 
3690 	it->cset_pos = l;
3691 
3692 	if (!list_empty(&cset->tasks))
3693 		it->task_pos = cset->tasks.next;
3694 	else
3695 		it->task_pos = cset->mg_tasks.next;
3696 
3697 	it->tasks_head = &cset->tasks;
3698 	it->mg_tasks_head = &cset->mg_tasks;
3699 
3700 	/*
3701 	 * We don't keep css_sets locked across iteration steps and thus
3702 	 * need to take steps to ensure that iteration can be resumed after
3703 	 * the lock is re-acquired.  Iteration is performed at two levels -
3704 	 * css_sets and tasks in them.
3705 	 *
3706 	 * Once created, a css_set never leaves its cgroup lists, so a
3707 	 * pinned css_set is guaranteed to stay put and we can resume
3708 	 * iteration afterwards.
3709 	 *
3710 	 * Tasks may leave @cset across iteration steps.  This is resolved
3711 	 * by registering each iterator with the css_set currently being
3712 	 * walked and making css_set_move_task() advance iterators whose
3713 	 * next task is leaving.
3714 	 */
3715 	if (it->cur_cset) {
3716 		list_del(&it->iters_node);
3717 		put_css_set_locked(it->cur_cset);
3718 	}
3719 	get_css_set(cset);
3720 	it->cur_cset = cset;
3721 	list_add(&it->iters_node, &cset->task_iters);
3722 }
3723 
3724 static void css_task_iter_advance(struct css_task_iter *it)
3725 {
3726 	struct list_head *l = it->task_pos;
3727 
3728 	lockdep_assert_held(&css_set_lock);
3729 	WARN_ON_ONCE(!l);
3730 
3731 	/*
3732 	 * Advance iterator to find next entry.  cset->tasks is consumed
3733 	 * first and then ->mg_tasks.  After ->mg_tasks, we move onto the
3734 	 * next cset.
3735 	 */
3736 	l = l->next;
3737 
3738 	if (l == it->tasks_head)
3739 		l = it->mg_tasks_head->next;
3740 
3741 	if (l == it->mg_tasks_head)
3742 		css_task_iter_advance_css_set(it);
3743 	else
3744 		it->task_pos = l;
3745 }
3746 
3747 /**
3748  * css_task_iter_start - initiate task iteration
3749  * @css: the css to walk tasks of
3750  * @it: the task iterator to use
3751  *
3752  * Initiate iteration through the tasks of @css.  The caller can call
3753  * css_task_iter_next() to walk through the tasks until the function
3754  * returns NULL.  On completion of iteration, css_task_iter_end() must be
3755  * called.
3756  */
3757 void css_task_iter_start(struct cgroup_subsys_state *css,
3758 			 struct css_task_iter *it)
3759 {
3760 	/* no one should try to iterate before mounting cgroups */
3761 	WARN_ON_ONCE(!use_task_css_set_links);
3762 
3763 	memset(it, 0, sizeof(*it));
3764 
3765 	spin_lock_irq(&css_set_lock);
3766 
3767 	it->ss = css->ss;
3768 
3769 	if (it->ss)
3770 		it->cset_pos = &css->cgroup->e_csets[css->ss->id];
3771 	else
3772 		it->cset_pos = &css->cgroup->cset_links;
3773 
3774 	it->cset_head = it->cset_pos;
3775 
3776 	css_task_iter_advance_css_set(it);
3777 
3778 	spin_unlock_irq(&css_set_lock);
3779 }
3780 
3781 /**
3782  * css_task_iter_next - return the next task for the iterator
3783  * @it: the task iterator being iterated
3784  *
3785  * The "next" function for task iteration.  @it should have been
3786  * initialized via css_task_iter_start().  Returns NULL when the iteration
3787  * reaches the end.
3788  */
3789 struct task_struct *css_task_iter_next(struct css_task_iter *it)
3790 {
3791 	if (it->cur_task) {
3792 		put_task_struct(it->cur_task);
3793 		it->cur_task = NULL;
3794 	}
3795 
3796 	spin_lock_irq(&css_set_lock);
3797 
3798 	if (it->task_pos) {
3799 		it->cur_task = list_entry(it->task_pos, struct task_struct,
3800 					  cg_list);
3801 		get_task_struct(it->cur_task);
3802 		css_task_iter_advance(it);
3803 	}
3804 
3805 	spin_unlock_irq(&css_set_lock);
3806 
3807 	return it->cur_task;
3808 }
3809 
3810 /**
3811  * css_task_iter_end - finish task iteration
3812  * @it: the task iterator to finish
3813  *
3814  * Finish task iteration started by css_task_iter_start().
3815  */
3816 void css_task_iter_end(struct css_task_iter *it)
3817 {
3818 	if (it->cur_cset) {
3819 		spin_lock_irq(&css_set_lock);
3820 		list_del(&it->iters_node);
3821 		put_css_set_locked(it->cur_cset);
3822 		spin_unlock_irq(&css_set_lock);
3823 	}
3824 
3825 	if (it->cur_task)
3826 		put_task_struct(it->cur_task);
3827 }
3828 
3829 static void cgroup_procs_release(struct kernfs_open_file *of)
3830 {
3831 	if (of->priv) {
3832 		css_task_iter_end(of->priv);
3833 		kfree(of->priv);
3834 	}
3835 }
3836 
3837 static void *cgroup_procs_next(struct seq_file *s, void *v, loff_t *pos)
3838 {
3839 	struct kernfs_open_file *of = s->private;
3840 	struct css_task_iter *it = of->priv;
3841 	struct task_struct *task;
3842 
3843 	do {
3844 		task = css_task_iter_next(it);
3845 	} while (task && !thread_group_leader(task));
3846 
3847 	return task;
3848 }
3849 
3850 static void *cgroup_procs_start(struct seq_file *s, loff_t *pos)
3851 {
3852 	struct kernfs_open_file *of = s->private;
3853 	struct cgroup *cgrp = seq_css(s)->cgroup;
3854 	struct css_task_iter *it = of->priv;
3855 
3856 	/*
3857 	 * When a seq_file is seeked, it's always traversed sequentially
3858 	 * from position 0, so we can simply keep iterating on !0 *pos.
3859 	 */
3860 	if (!it) {
3861 		if (WARN_ON_ONCE((*pos)++))
3862 			return ERR_PTR(-EINVAL);
3863 
3864 		it = kzalloc(sizeof(*it), GFP_KERNEL);
3865 		if (!it)
3866 			return ERR_PTR(-ENOMEM);
3867 		of->priv = it;
3868 		css_task_iter_start(&cgrp->self, it);
3869 	} else if (!(*pos)++) {
3870 		css_task_iter_end(it);
3871 		css_task_iter_start(&cgrp->self, it);
3872 	}
3873 
3874 	return cgroup_procs_next(s, NULL, NULL);
3875 }
3876 
3877 static int cgroup_procs_show(struct seq_file *s, void *v)
3878 {
3879 	seq_printf(s, "%d\n", task_tgid_vnr(v));
3880 	return 0;
3881 }
3882 
3883 /* cgroup core interface files for the default hierarchy */
3884 static struct cftype cgroup_base_files[] = {
3885 	{
3886 		.name = "cgroup.procs",
3887 		.flags = CFTYPE_NS_DELEGATABLE,
3888 		.file_offset = offsetof(struct cgroup, procs_file),
3889 		.release = cgroup_procs_release,
3890 		.seq_start = cgroup_procs_start,
3891 		.seq_next = cgroup_procs_next,
3892 		.seq_show = cgroup_procs_show,
3893 		.write = cgroup_procs_write,
3894 	},
3895 	{
3896 		.name = "cgroup.controllers",
3897 		.seq_show = cgroup_controllers_show,
3898 	},
3899 	{
3900 		.name = "cgroup.subtree_control",
3901 		.flags = CFTYPE_NS_DELEGATABLE,
3902 		.seq_show = cgroup_subtree_control_show,
3903 		.write = cgroup_subtree_control_write,
3904 	},
3905 	{
3906 		.name = "cgroup.events",
3907 		.flags = CFTYPE_NOT_ON_ROOT,
3908 		.file_offset = offsetof(struct cgroup, events_file),
3909 		.seq_show = cgroup_events_show,
3910 	},
3911 	{ }	/* terminate */
3912 };
3913 
3914 /*
3915  * css destruction is four-stage process.
3916  *
3917  * 1. Destruction starts.  Killing of the percpu_ref is initiated.
3918  *    Implemented in kill_css().
3919  *
3920  * 2. When the percpu_ref is confirmed to be visible as killed on all CPUs
3921  *    and thus css_tryget_online() is guaranteed to fail, the css can be
3922  *    offlined by invoking offline_css().  After offlining, the base ref is
3923  *    put.  Implemented in css_killed_work_fn().
3924  *
3925  * 3. When the percpu_ref reaches zero, the only possible remaining
3926  *    accessors are inside RCU read sections.  css_release() schedules the
3927  *    RCU callback.
3928  *
3929  * 4. After the grace period, the css can be freed.  Implemented in
3930  *    css_free_work_fn().
3931  *
3932  * It is actually hairier because both step 2 and 4 require process context
3933  * and thus involve punting to css->destroy_work adding two additional
3934  * steps to the already complex sequence.
3935  */
3936 static void css_free_work_fn(struct work_struct *work)
3937 {
3938 	struct cgroup_subsys_state *css =
3939 		container_of(work, struct cgroup_subsys_state, destroy_work);
3940 	struct cgroup_subsys *ss = css->ss;
3941 	struct cgroup *cgrp = css->cgroup;
3942 
3943 	percpu_ref_exit(&css->refcnt);
3944 
3945 	if (ss) {
3946 		/* css free path */
3947 		struct cgroup_subsys_state *parent = css->parent;
3948 		int id = css->id;
3949 
3950 		ss->css_free(css);
3951 		cgroup_idr_remove(&ss->css_idr, id);
3952 		cgroup_put(cgrp);
3953 
3954 		if (parent)
3955 			css_put(parent);
3956 	} else {
3957 		/* cgroup free path */
3958 		atomic_dec(&cgrp->root->nr_cgrps);
3959 		cgroup1_pidlist_destroy_all(cgrp);
3960 		cancel_work_sync(&cgrp->release_agent_work);
3961 
3962 		if (cgroup_parent(cgrp)) {
3963 			/*
3964 			 * We get a ref to the parent, and put the ref when
3965 			 * this cgroup is being freed, so it's guaranteed
3966 			 * that the parent won't be destroyed before its
3967 			 * children.
3968 			 */
3969 			cgroup_put(cgroup_parent(cgrp));
3970 			kernfs_put(cgrp->kn);
3971 			kfree(cgrp);
3972 		} else {
3973 			/*
3974 			 * This is root cgroup's refcnt reaching zero,
3975 			 * which indicates that the root should be
3976 			 * released.
3977 			 */
3978 			cgroup_destroy_root(cgrp->root);
3979 		}
3980 	}
3981 }
3982 
3983 static void css_free_rcu_fn(struct rcu_head *rcu_head)
3984 {
3985 	struct cgroup_subsys_state *css =
3986 		container_of(rcu_head, struct cgroup_subsys_state, rcu_head);
3987 
3988 	INIT_WORK(&css->destroy_work, css_free_work_fn);
3989 	queue_work(cgroup_destroy_wq, &css->destroy_work);
3990 }
3991 
3992 static void css_release_work_fn(struct work_struct *work)
3993 {
3994 	struct cgroup_subsys_state *css =
3995 		container_of(work, struct cgroup_subsys_state, destroy_work);
3996 	struct cgroup_subsys *ss = css->ss;
3997 	struct cgroup *cgrp = css->cgroup;
3998 
3999 	mutex_lock(&cgroup_mutex);
4000 
4001 	css->flags |= CSS_RELEASED;
4002 	list_del_rcu(&css->sibling);
4003 
4004 	if (ss) {
4005 		/* css release path */
4006 		cgroup_idr_replace(&ss->css_idr, NULL, css->id);
4007 		if (ss->css_released)
4008 			ss->css_released(css);
4009 	} else {
4010 		/* cgroup release path */
4011 		trace_cgroup_release(cgrp);
4012 
4013 		cgroup_idr_remove(&cgrp->root->cgroup_idr, cgrp->id);
4014 		cgrp->id = -1;
4015 
4016 		/*
4017 		 * There are two control paths which try to determine
4018 		 * cgroup from dentry without going through kernfs -
4019 		 * cgroupstats_build() and css_tryget_online_from_dir().
4020 		 * Those are supported by RCU protecting clearing of
4021 		 * cgrp->kn->priv backpointer.
4022 		 */
4023 		if (cgrp->kn)
4024 			RCU_INIT_POINTER(*(void __rcu __force **)&cgrp->kn->priv,
4025 					 NULL);
4026 
4027 		cgroup_bpf_put(cgrp);
4028 	}
4029 
4030 	mutex_unlock(&cgroup_mutex);
4031 
4032 	call_rcu(&css->rcu_head, css_free_rcu_fn);
4033 }
4034 
4035 static void css_release(struct percpu_ref *ref)
4036 {
4037 	struct cgroup_subsys_state *css =
4038 		container_of(ref, struct cgroup_subsys_state, refcnt);
4039 
4040 	INIT_WORK(&css->destroy_work, css_release_work_fn);
4041 	queue_work(cgroup_destroy_wq, &css->destroy_work);
4042 }
4043 
4044 static void init_and_link_css(struct cgroup_subsys_state *css,
4045 			      struct cgroup_subsys *ss, struct cgroup *cgrp)
4046 {
4047 	lockdep_assert_held(&cgroup_mutex);
4048 
4049 	cgroup_get_live(cgrp);
4050 
4051 	memset(css, 0, sizeof(*css));
4052 	css->cgroup = cgrp;
4053 	css->ss = ss;
4054 	css->id = -1;
4055 	INIT_LIST_HEAD(&css->sibling);
4056 	INIT_LIST_HEAD(&css->children);
4057 	css->serial_nr = css_serial_nr_next++;
4058 	atomic_set(&css->online_cnt, 0);
4059 
4060 	if (cgroup_parent(cgrp)) {
4061 		css->parent = cgroup_css(cgroup_parent(cgrp), ss);
4062 		css_get(css->parent);
4063 	}
4064 
4065 	BUG_ON(cgroup_css(cgrp, ss));
4066 }
4067 
4068 /* invoke ->css_online() on a new CSS and mark it online if successful */
4069 static int online_css(struct cgroup_subsys_state *css)
4070 {
4071 	struct cgroup_subsys *ss = css->ss;
4072 	int ret = 0;
4073 
4074 	lockdep_assert_held(&cgroup_mutex);
4075 
4076 	if (ss->css_online)
4077 		ret = ss->css_online(css);
4078 	if (!ret) {
4079 		css->flags |= CSS_ONLINE;
4080 		rcu_assign_pointer(css->cgroup->subsys[ss->id], css);
4081 
4082 		atomic_inc(&css->online_cnt);
4083 		if (css->parent)
4084 			atomic_inc(&css->parent->online_cnt);
4085 	}
4086 	return ret;
4087 }
4088 
4089 /* if the CSS is online, invoke ->css_offline() on it and mark it offline */
4090 static void offline_css(struct cgroup_subsys_state *css)
4091 {
4092 	struct cgroup_subsys *ss = css->ss;
4093 
4094 	lockdep_assert_held(&cgroup_mutex);
4095 
4096 	if (!(css->flags & CSS_ONLINE))
4097 		return;
4098 
4099 	if (ss->css_reset)
4100 		ss->css_reset(css);
4101 
4102 	if (ss->css_offline)
4103 		ss->css_offline(css);
4104 
4105 	css->flags &= ~CSS_ONLINE;
4106 	RCU_INIT_POINTER(css->cgroup->subsys[ss->id], NULL);
4107 
4108 	wake_up_all(&css->cgroup->offline_waitq);
4109 }
4110 
4111 /**
4112  * css_create - create a cgroup_subsys_state
4113  * @cgrp: the cgroup new css will be associated with
4114  * @ss: the subsys of new css
4115  *
4116  * Create a new css associated with @cgrp - @ss pair.  On success, the new
4117  * css is online and installed in @cgrp.  This function doesn't create the
4118  * interface files.  Returns 0 on success, -errno on failure.
4119  */
4120 static struct cgroup_subsys_state *css_create(struct cgroup *cgrp,
4121 					      struct cgroup_subsys *ss)
4122 {
4123 	struct cgroup *parent = cgroup_parent(cgrp);
4124 	struct cgroup_subsys_state *parent_css = cgroup_css(parent, ss);
4125 	struct cgroup_subsys_state *css;
4126 	int err;
4127 
4128 	lockdep_assert_held(&cgroup_mutex);
4129 
4130 	css = ss->css_alloc(parent_css);
4131 	if (!css)
4132 		css = ERR_PTR(-ENOMEM);
4133 	if (IS_ERR(css))
4134 		return css;
4135 
4136 	init_and_link_css(css, ss, cgrp);
4137 
4138 	err = percpu_ref_init(&css->refcnt, css_release, 0, GFP_KERNEL);
4139 	if (err)
4140 		goto err_free_css;
4141 
4142 	err = cgroup_idr_alloc(&ss->css_idr, NULL, 2, 0, GFP_KERNEL);
4143 	if (err < 0)
4144 		goto err_free_css;
4145 	css->id = err;
4146 
4147 	/* @css is ready to be brought online now, make it visible */
4148 	list_add_tail_rcu(&css->sibling, &parent_css->children);
4149 	cgroup_idr_replace(&ss->css_idr, css, css->id);
4150 
4151 	err = online_css(css);
4152 	if (err)
4153 		goto err_list_del;
4154 
4155 	if (ss->broken_hierarchy && !ss->warned_broken_hierarchy &&
4156 	    cgroup_parent(parent)) {
4157 		pr_warn("%s (%d) created nested cgroup for controller \"%s\" which has incomplete hierarchy support. Nested cgroups may change behavior in the future.\n",
4158 			current->comm, current->pid, ss->name);
4159 		if (!strcmp(ss->name, "memory"))
4160 			pr_warn("\"memory\" requires setting use_hierarchy to 1 on the root\n");
4161 		ss->warned_broken_hierarchy = true;
4162 	}
4163 
4164 	return css;
4165 
4166 err_list_del:
4167 	list_del_rcu(&css->sibling);
4168 err_free_css:
4169 	call_rcu(&css->rcu_head, css_free_rcu_fn);
4170 	return ERR_PTR(err);
4171 }
4172 
4173 /*
4174  * The returned cgroup is fully initialized including its control mask, but
4175  * it isn't associated with its kernfs_node and doesn't have the control
4176  * mask applied.
4177  */
4178 static struct cgroup *cgroup_create(struct cgroup *parent)
4179 {
4180 	struct cgroup_root *root = parent->root;
4181 	struct cgroup *cgrp, *tcgrp;
4182 	int level = parent->level + 1;
4183 	int ret;
4184 
4185 	/* allocate the cgroup and its ID, 0 is reserved for the root */
4186 	cgrp = kzalloc(sizeof(*cgrp) +
4187 		       sizeof(cgrp->ancestor_ids[0]) * (level + 1), GFP_KERNEL);
4188 	if (!cgrp)
4189 		return ERR_PTR(-ENOMEM);
4190 
4191 	ret = percpu_ref_init(&cgrp->self.refcnt, css_release, 0, GFP_KERNEL);
4192 	if (ret)
4193 		goto out_free_cgrp;
4194 
4195 	/*
4196 	 * Temporarily set the pointer to NULL, so idr_find() won't return
4197 	 * a half-baked cgroup.
4198 	 */
4199 	cgrp->id = cgroup_idr_alloc(&root->cgroup_idr, NULL, 2, 0, GFP_KERNEL);
4200 	if (cgrp->id < 0) {
4201 		ret = -ENOMEM;
4202 		goto out_cancel_ref;
4203 	}
4204 
4205 	init_cgroup_housekeeping(cgrp);
4206 
4207 	cgrp->self.parent = &parent->self;
4208 	cgrp->root = root;
4209 	cgrp->level = level;
4210 
4211 	for (tcgrp = cgrp; tcgrp; tcgrp = cgroup_parent(tcgrp))
4212 		cgrp->ancestor_ids[tcgrp->level] = tcgrp->id;
4213 
4214 	if (notify_on_release(parent))
4215 		set_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
4216 
4217 	if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &parent->flags))
4218 		set_bit(CGRP_CPUSET_CLONE_CHILDREN, &cgrp->flags);
4219 
4220 	cgrp->self.serial_nr = css_serial_nr_next++;
4221 
4222 	/* allocation complete, commit to creation */
4223 	list_add_tail_rcu(&cgrp->self.sibling, &cgroup_parent(cgrp)->self.children);
4224 	atomic_inc(&root->nr_cgrps);
4225 	cgroup_get_live(parent);
4226 
4227 	/*
4228 	 * @cgrp is now fully operational.  If something fails after this
4229 	 * point, it'll be released via the normal destruction path.
4230 	 */
4231 	cgroup_idr_replace(&root->cgroup_idr, cgrp, cgrp->id);
4232 
4233 	/*
4234 	 * On the default hierarchy, a child doesn't automatically inherit
4235 	 * subtree_control from the parent.  Each is configured manually.
4236 	 */
4237 	if (!cgroup_on_dfl(cgrp))
4238 		cgrp->subtree_control = cgroup_control(cgrp);
4239 
4240 	if (parent)
4241 		cgroup_bpf_inherit(cgrp, parent);
4242 
4243 	cgroup_propagate_control(cgrp);
4244 
4245 	return cgrp;
4246 
4247 out_cancel_ref:
4248 	percpu_ref_exit(&cgrp->self.refcnt);
4249 out_free_cgrp:
4250 	kfree(cgrp);
4251 	return ERR_PTR(ret);
4252 }
4253 
4254 int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name, umode_t mode)
4255 {
4256 	struct cgroup *parent, *cgrp;
4257 	struct kernfs_node *kn;
4258 	int ret;
4259 
4260 	/* do not accept '\n' to prevent making /proc/<pid>/cgroup unparsable */
4261 	if (strchr(name, '\n'))
4262 		return -EINVAL;
4263 
4264 	parent = cgroup_kn_lock_live(parent_kn, false);
4265 	if (!parent)
4266 		return -ENODEV;
4267 
4268 	cgrp = cgroup_create(parent);
4269 	if (IS_ERR(cgrp)) {
4270 		ret = PTR_ERR(cgrp);
4271 		goto out_unlock;
4272 	}
4273 
4274 	/* create the directory */
4275 	kn = kernfs_create_dir(parent->kn, name, mode, cgrp);
4276 	if (IS_ERR(kn)) {
4277 		ret = PTR_ERR(kn);
4278 		goto out_destroy;
4279 	}
4280 	cgrp->kn = kn;
4281 
4282 	/*
4283 	 * This extra ref will be put in cgroup_free_fn() and guarantees
4284 	 * that @cgrp->kn is always accessible.
4285 	 */
4286 	kernfs_get(kn);
4287 
4288 	ret = cgroup_kn_set_ugid(kn);
4289 	if (ret)
4290 		goto out_destroy;
4291 
4292 	ret = css_populate_dir(&cgrp->self);
4293 	if (ret)
4294 		goto out_destroy;
4295 
4296 	ret = cgroup_apply_control_enable(cgrp);
4297 	if (ret)
4298 		goto out_destroy;
4299 
4300 	trace_cgroup_mkdir(cgrp);
4301 
4302 	/* let's create and online css's */
4303 	kernfs_activate(kn);
4304 
4305 	ret = 0;
4306 	goto out_unlock;
4307 
4308 out_destroy:
4309 	cgroup_destroy_locked(cgrp);
4310 out_unlock:
4311 	cgroup_kn_unlock(parent_kn);
4312 	return ret;
4313 }
4314 
4315 /*
4316  * This is called when the refcnt of a css is confirmed to be killed.
4317  * css_tryget_online() is now guaranteed to fail.  Tell the subsystem to
4318  * initate destruction and put the css ref from kill_css().
4319  */
4320 static void css_killed_work_fn(struct work_struct *work)
4321 {
4322 	struct cgroup_subsys_state *css =
4323 		container_of(work, struct cgroup_subsys_state, destroy_work);
4324 
4325 	mutex_lock(&cgroup_mutex);
4326 
4327 	do {
4328 		offline_css(css);
4329 		css_put(css);
4330 		/* @css can't go away while we're holding cgroup_mutex */
4331 		css = css->parent;
4332 	} while (css && atomic_dec_and_test(&css->online_cnt));
4333 
4334 	mutex_unlock(&cgroup_mutex);
4335 }
4336 
4337 /* css kill confirmation processing requires process context, bounce */
4338 static void css_killed_ref_fn(struct percpu_ref *ref)
4339 {
4340 	struct cgroup_subsys_state *css =
4341 		container_of(ref, struct cgroup_subsys_state, refcnt);
4342 
4343 	if (atomic_dec_and_test(&css->online_cnt)) {
4344 		INIT_WORK(&css->destroy_work, css_killed_work_fn);
4345 		queue_work(cgroup_destroy_wq, &css->destroy_work);
4346 	}
4347 }
4348 
4349 /**
4350  * kill_css - destroy a css
4351  * @css: css to destroy
4352  *
4353  * This function initiates destruction of @css by removing cgroup interface
4354  * files and putting its base reference.  ->css_offline() will be invoked
4355  * asynchronously once css_tryget_online() is guaranteed to fail and when
4356  * the reference count reaches zero, @css will be released.
4357  */
4358 static void kill_css(struct cgroup_subsys_state *css)
4359 {
4360 	lockdep_assert_held(&cgroup_mutex);
4361 
4362 	if (css->flags & CSS_DYING)
4363 		return;
4364 
4365 	css->flags |= CSS_DYING;
4366 
4367 	/*
4368 	 * This must happen before css is disassociated with its cgroup.
4369 	 * See seq_css() for details.
4370 	 */
4371 	css_clear_dir(css);
4372 
4373 	/*
4374 	 * Killing would put the base ref, but we need to keep it alive
4375 	 * until after ->css_offline().
4376 	 */
4377 	css_get(css);
4378 
4379 	/*
4380 	 * cgroup core guarantees that, by the time ->css_offline() is
4381 	 * invoked, no new css reference will be given out via
4382 	 * css_tryget_online().  We can't simply call percpu_ref_kill() and
4383 	 * proceed to offlining css's because percpu_ref_kill() doesn't
4384 	 * guarantee that the ref is seen as killed on all CPUs on return.
4385 	 *
4386 	 * Use percpu_ref_kill_and_confirm() to get notifications as each
4387 	 * css is confirmed to be seen as killed on all CPUs.
4388 	 */
4389 	percpu_ref_kill_and_confirm(&css->refcnt, css_killed_ref_fn);
4390 }
4391 
4392 /**
4393  * cgroup_destroy_locked - the first stage of cgroup destruction
4394  * @cgrp: cgroup to be destroyed
4395  *
4396  * css's make use of percpu refcnts whose killing latency shouldn't be
4397  * exposed to userland and are RCU protected.  Also, cgroup core needs to
4398  * guarantee that css_tryget_online() won't succeed by the time
4399  * ->css_offline() is invoked.  To satisfy all the requirements,
4400  * destruction is implemented in the following two steps.
4401  *
4402  * s1. Verify @cgrp can be destroyed and mark it dying.  Remove all
4403  *     userland visible parts and start killing the percpu refcnts of
4404  *     css's.  Set up so that the next stage will be kicked off once all
4405  *     the percpu refcnts are confirmed to be killed.
4406  *
4407  * s2. Invoke ->css_offline(), mark the cgroup dead and proceed with the
4408  *     rest of destruction.  Once all cgroup references are gone, the
4409  *     cgroup is RCU-freed.
4410  *
4411  * This function implements s1.  After this step, @cgrp is gone as far as
4412  * the userland is concerned and a new cgroup with the same name may be
4413  * created.  As cgroup doesn't care about the names internally, this
4414  * doesn't cause any problem.
4415  */
4416 static int cgroup_destroy_locked(struct cgroup *cgrp)
4417 	__releases(&cgroup_mutex) __acquires(&cgroup_mutex)
4418 {
4419 	struct cgroup_subsys_state *css;
4420 	struct cgrp_cset_link *link;
4421 	int ssid;
4422 
4423 	lockdep_assert_held(&cgroup_mutex);
4424 
4425 	/*
4426 	 * Only migration can raise populated from zero and we're already
4427 	 * holding cgroup_mutex.
4428 	 */
4429 	if (cgroup_is_populated(cgrp))
4430 		return -EBUSY;
4431 
4432 	/*
4433 	 * Make sure there's no live children.  We can't test emptiness of
4434 	 * ->self.children as dead children linger on it while being
4435 	 * drained; otherwise, "rmdir parent/child parent" may fail.
4436 	 */
4437 	if (css_has_online_children(&cgrp->self))
4438 		return -EBUSY;
4439 
4440 	/*
4441 	 * Mark @cgrp and the associated csets dead.  The former prevents
4442 	 * further task migration and child creation by disabling
4443 	 * cgroup_lock_live_group().  The latter makes the csets ignored by
4444 	 * the migration path.
4445 	 */
4446 	cgrp->self.flags &= ~CSS_ONLINE;
4447 
4448 	spin_lock_irq(&css_set_lock);
4449 	list_for_each_entry(link, &cgrp->cset_links, cset_link)
4450 		link->cset->dead = true;
4451 	spin_unlock_irq(&css_set_lock);
4452 
4453 	/* initiate massacre of all css's */
4454 	for_each_css(css, ssid, cgrp)
4455 		kill_css(css);
4456 
4457 	/*
4458 	 * Remove @cgrp directory along with the base files.  @cgrp has an
4459 	 * extra ref on its kn.
4460 	 */
4461 	kernfs_remove(cgrp->kn);
4462 
4463 	cgroup1_check_for_release(cgroup_parent(cgrp));
4464 
4465 	/* put the base reference */
4466 	percpu_ref_kill(&cgrp->self.refcnt);
4467 
4468 	return 0;
4469 };
4470 
4471 int cgroup_rmdir(struct kernfs_node *kn)
4472 {
4473 	struct cgroup *cgrp;
4474 	int ret = 0;
4475 
4476 	cgrp = cgroup_kn_lock_live(kn, false);
4477 	if (!cgrp)
4478 		return 0;
4479 
4480 	ret = cgroup_destroy_locked(cgrp);
4481 
4482 	if (!ret)
4483 		trace_cgroup_rmdir(cgrp);
4484 
4485 	cgroup_kn_unlock(kn);
4486 	return ret;
4487 }
4488 
4489 static struct kernfs_syscall_ops cgroup_kf_syscall_ops = {
4490 	.show_options		= cgroup_show_options,
4491 	.remount_fs		= cgroup_remount,
4492 	.mkdir			= cgroup_mkdir,
4493 	.rmdir			= cgroup_rmdir,
4494 	.show_path		= cgroup_show_path,
4495 };
4496 
4497 static void __init cgroup_init_subsys(struct cgroup_subsys *ss, bool early)
4498 {
4499 	struct cgroup_subsys_state *css;
4500 
4501 	pr_debug("Initializing cgroup subsys %s\n", ss->name);
4502 
4503 	mutex_lock(&cgroup_mutex);
4504 
4505 	idr_init(&ss->css_idr);
4506 	INIT_LIST_HEAD(&ss->cfts);
4507 
4508 	/* Create the root cgroup state for this subsystem */
4509 	ss->root = &cgrp_dfl_root;
4510 	css = ss->css_alloc(cgroup_css(&cgrp_dfl_root.cgrp, ss));
4511 	/* We don't handle early failures gracefully */
4512 	BUG_ON(IS_ERR(css));
4513 	init_and_link_css(css, ss, &cgrp_dfl_root.cgrp);
4514 
4515 	/*
4516 	 * Root csses are never destroyed and we can't initialize
4517 	 * percpu_ref during early init.  Disable refcnting.
4518 	 */
4519 	css->flags |= CSS_NO_REF;
4520 
4521 	if (early) {
4522 		/* allocation can't be done safely during early init */
4523 		css->id = 1;
4524 	} else {
4525 		css->id = cgroup_idr_alloc(&ss->css_idr, css, 1, 2, GFP_KERNEL);
4526 		BUG_ON(css->id < 0);
4527 	}
4528 
4529 	/* Update the init_css_set to contain a subsys
4530 	 * pointer to this state - since the subsystem is
4531 	 * newly registered, all tasks and hence the
4532 	 * init_css_set is in the subsystem's root cgroup. */
4533 	init_css_set.subsys[ss->id] = css;
4534 
4535 	have_fork_callback |= (bool)ss->fork << ss->id;
4536 	have_exit_callback |= (bool)ss->exit << ss->id;
4537 	have_free_callback |= (bool)ss->free << ss->id;
4538 	have_canfork_callback |= (bool)ss->can_fork << ss->id;
4539 
4540 	/* At system boot, before all subsystems have been
4541 	 * registered, no tasks have been forked, so we don't
4542 	 * need to invoke fork callbacks here. */
4543 	BUG_ON(!list_empty(&init_task.tasks));
4544 
4545 	BUG_ON(online_css(css));
4546 
4547 	mutex_unlock(&cgroup_mutex);
4548 }
4549 
4550 /**
4551  * cgroup_init_early - cgroup initialization at system boot
4552  *
4553  * Initialize cgroups at system boot, and initialize any
4554  * subsystems that request early init.
4555  */
4556 int __init cgroup_init_early(void)
4557 {
4558 	static struct cgroup_sb_opts __initdata opts;
4559 	struct cgroup_subsys *ss;
4560 	int i;
4561 
4562 	init_cgroup_root(&cgrp_dfl_root, &opts);
4563 	cgrp_dfl_root.cgrp.self.flags |= CSS_NO_REF;
4564 
4565 	RCU_INIT_POINTER(init_task.cgroups, &init_css_set);
4566 
4567 	for_each_subsys(ss, i) {
4568 		WARN(!ss->css_alloc || !ss->css_free || ss->name || ss->id,
4569 		     "invalid cgroup_subsys %d:%s css_alloc=%p css_free=%p id:name=%d:%s\n",
4570 		     i, cgroup_subsys_name[i], ss->css_alloc, ss->css_free,
4571 		     ss->id, ss->name);
4572 		WARN(strlen(cgroup_subsys_name[i]) > MAX_CGROUP_TYPE_NAMELEN,
4573 		     "cgroup_subsys_name %s too long\n", cgroup_subsys_name[i]);
4574 
4575 		ss->id = i;
4576 		ss->name = cgroup_subsys_name[i];
4577 		if (!ss->legacy_name)
4578 			ss->legacy_name = cgroup_subsys_name[i];
4579 
4580 		if (ss->early_init)
4581 			cgroup_init_subsys(ss, true);
4582 	}
4583 	return 0;
4584 }
4585 
4586 static u16 cgroup_disable_mask __initdata;
4587 
4588 /**
4589  * cgroup_init - cgroup initialization
4590  *
4591  * Register cgroup filesystem and /proc file, and initialize
4592  * any subsystems that didn't request early init.
4593  */
4594 int __init cgroup_init(void)
4595 {
4596 	struct cgroup_subsys *ss;
4597 	int ssid;
4598 
4599 	BUILD_BUG_ON(CGROUP_SUBSYS_COUNT > 16);
4600 	BUG_ON(percpu_init_rwsem(&cgroup_threadgroup_rwsem));
4601 	BUG_ON(cgroup_init_cftypes(NULL, cgroup_base_files));
4602 	BUG_ON(cgroup_init_cftypes(NULL, cgroup1_base_files));
4603 
4604 	/*
4605 	 * The latency of the synchronize_sched() is too high for cgroups,
4606 	 * avoid it at the cost of forcing all readers into the slow path.
4607 	 */
4608 	rcu_sync_enter_start(&cgroup_threadgroup_rwsem.rss);
4609 
4610 	get_user_ns(init_cgroup_ns.user_ns);
4611 
4612 	mutex_lock(&cgroup_mutex);
4613 
4614 	/*
4615 	 * Add init_css_set to the hash table so that dfl_root can link to
4616 	 * it during init.
4617 	 */
4618 	hash_add(css_set_table, &init_css_set.hlist,
4619 		 css_set_hash(init_css_set.subsys));
4620 
4621 	BUG_ON(cgroup_setup_root(&cgrp_dfl_root, 0, 0));
4622 
4623 	mutex_unlock(&cgroup_mutex);
4624 
4625 	for_each_subsys(ss, ssid) {
4626 		if (ss->early_init) {
4627 			struct cgroup_subsys_state *css =
4628 				init_css_set.subsys[ss->id];
4629 
4630 			css->id = cgroup_idr_alloc(&ss->css_idr, css, 1, 2,
4631 						   GFP_KERNEL);
4632 			BUG_ON(css->id < 0);
4633 		} else {
4634 			cgroup_init_subsys(ss, false);
4635 		}
4636 
4637 		list_add_tail(&init_css_set.e_cset_node[ssid],
4638 			      &cgrp_dfl_root.cgrp.e_csets[ssid]);
4639 
4640 		/*
4641 		 * Setting dfl_root subsys_mask needs to consider the
4642 		 * disabled flag and cftype registration needs kmalloc,
4643 		 * both of which aren't available during early_init.
4644 		 */
4645 		if (cgroup_disable_mask & (1 << ssid)) {
4646 			static_branch_disable(cgroup_subsys_enabled_key[ssid]);
4647 			printk(KERN_INFO "Disabling %s control group subsystem\n",
4648 			       ss->name);
4649 			continue;
4650 		}
4651 
4652 		if (cgroup1_ssid_disabled(ssid))
4653 			printk(KERN_INFO "Disabling %s control group subsystem in v1 mounts\n",
4654 			       ss->name);
4655 
4656 		cgrp_dfl_root.subsys_mask |= 1 << ss->id;
4657 
4658 		if (ss->implicit_on_dfl)
4659 			cgrp_dfl_implicit_ss_mask |= 1 << ss->id;
4660 		else if (!ss->dfl_cftypes)
4661 			cgrp_dfl_inhibit_ss_mask |= 1 << ss->id;
4662 
4663 		if (ss->dfl_cftypes == ss->legacy_cftypes) {
4664 			WARN_ON(cgroup_add_cftypes(ss, ss->dfl_cftypes));
4665 		} else {
4666 			WARN_ON(cgroup_add_dfl_cftypes(ss, ss->dfl_cftypes));
4667 			WARN_ON(cgroup_add_legacy_cftypes(ss, ss->legacy_cftypes));
4668 		}
4669 
4670 		if (ss->bind)
4671 			ss->bind(init_css_set.subsys[ssid]);
4672 	}
4673 
4674 	/* init_css_set.subsys[] has been updated, re-hash */
4675 	hash_del(&init_css_set.hlist);
4676 	hash_add(css_set_table, &init_css_set.hlist,
4677 		 css_set_hash(init_css_set.subsys));
4678 
4679 	WARN_ON(sysfs_create_mount_point(fs_kobj, "cgroup"));
4680 	WARN_ON(register_filesystem(&cgroup_fs_type));
4681 	WARN_ON(register_filesystem(&cgroup2_fs_type));
4682 	WARN_ON(!proc_create("cgroups", 0, NULL, &proc_cgroupstats_operations));
4683 
4684 	return 0;
4685 }
4686 
4687 static int __init cgroup_wq_init(void)
4688 {
4689 	/*
4690 	 * There isn't much point in executing destruction path in
4691 	 * parallel.  Good chunk is serialized with cgroup_mutex anyway.
4692 	 * Use 1 for @max_active.
4693 	 *
4694 	 * We would prefer to do this in cgroup_init() above, but that
4695 	 * is called before init_workqueues(): so leave this until after.
4696 	 */
4697 	cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1);
4698 	BUG_ON(!cgroup_destroy_wq);
4699 	return 0;
4700 }
4701 core_initcall(cgroup_wq_init);
4702 
4703 /*
4704  * proc_cgroup_show()
4705  *  - Print task's cgroup paths into seq_file, one line for each hierarchy
4706  *  - Used for /proc/<pid>/cgroup.
4707  */
4708 int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
4709 		     struct pid *pid, struct task_struct *tsk)
4710 {
4711 	char *buf;
4712 	int retval;
4713 	struct cgroup_root *root;
4714 
4715 	retval = -ENOMEM;
4716 	buf = kmalloc(PATH_MAX, GFP_KERNEL);
4717 	if (!buf)
4718 		goto out;
4719 
4720 	mutex_lock(&cgroup_mutex);
4721 	spin_lock_irq(&css_set_lock);
4722 
4723 	for_each_root(root) {
4724 		struct cgroup_subsys *ss;
4725 		struct cgroup *cgrp;
4726 		int ssid, count = 0;
4727 
4728 		if (root == &cgrp_dfl_root && !cgrp_dfl_visible)
4729 			continue;
4730 
4731 		seq_printf(m, "%d:", root->hierarchy_id);
4732 		if (root != &cgrp_dfl_root)
4733 			for_each_subsys(ss, ssid)
4734 				if (root->subsys_mask & (1 << ssid))
4735 					seq_printf(m, "%s%s", count++ ? "," : "",
4736 						   ss->legacy_name);
4737 		if (strlen(root->name))
4738 			seq_printf(m, "%sname=%s", count ? "," : "",
4739 				   root->name);
4740 		seq_putc(m, ':');
4741 
4742 		cgrp = task_cgroup_from_root(tsk, root);
4743 
4744 		/*
4745 		 * On traditional hierarchies, all zombie tasks show up as
4746 		 * belonging to the root cgroup.  On the default hierarchy,
4747 		 * while a zombie doesn't show up in "cgroup.procs" and
4748 		 * thus can't be migrated, its /proc/PID/cgroup keeps
4749 		 * reporting the cgroup it belonged to before exiting.  If
4750 		 * the cgroup is removed before the zombie is reaped,
4751 		 * " (deleted)" is appended to the cgroup path.
4752 		 */
4753 		if (cgroup_on_dfl(cgrp) || !(tsk->flags & PF_EXITING)) {
4754 			retval = cgroup_path_ns_locked(cgrp, buf, PATH_MAX,
4755 						current->nsproxy->cgroup_ns);
4756 			if (retval >= PATH_MAX)
4757 				retval = -ENAMETOOLONG;
4758 			if (retval < 0)
4759 				goto out_unlock;
4760 
4761 			seq_puts(m, buf);
4762 		} else {
4763 			seq_puts(m, "/");
4764 		}
4765 
4766 		if (cgroup_on_dfl(cgrp) && cgroup_is_dead(cgrp))
4767 			seq_puts(m, " (deleted)\n");
4768 		else
4769 			seq_putc(m, '\n');
4770 	}
4771 
4772 	retval = 0;
4773 out_unlock:
4774 	spin_unlock_irq(&css_set_lock);
4775 	mutex_unlock(&cgroup_mutex);
4776 	kfree(buf);
4777 out:
4778 	return retval;
4779 }
4780 
4781 /**
4782  * cgroup_fork - initialize cgroup related fields during copy_process()
4783  * @child: pointer to task_struct of forking parent process.
4784  *
4785  * A task is associated with the init_css_set until cgroup_post_fork()
4786  * attaches it to the parent's css_set.  Empty cg_list indicates that
4787  * @child isn't holding reference to its css_set.
4788  */
4789 void cgroup_fork(struct task_struct *child)
4790 {
4791 	RCU_INIT_POINTER(child->cgroups, &init_css_set);
4792 	INIT_LIST_HEAD(&child->cg_list);
4793 }
4794 
4795 /**
4796  * cgroup_can_fork - called on a new task before the process is exposed
4797  * @child: the task in question.
4798  *
4799  * This calls the subsystem can_fork() callbacks. If the can_fork() callback
4800  * returns an error, the fork aborts with that error code. This allows for
4801  * a cgroup subsystem to conditionally allow or deny new forks.
4802  */
4803 int cgroup_can_fork(struct task_struct *child)
4804 {
4805 	struct cgroup_subsys *ss;
4806 	int i, j, ret;
4807 
4808 	do_each_subsys_mask(ss, i, have_canfork_callback) {
4809 		ret = ss->can_fork(child);
4810 		if (ret)
4811 			goto out_revert;
4812 	} while_each_subsys_mask();
4813 
4814 	return 0;
4815 
4816 out_revert:
4817 	for_each_subsys(ss, j) {
4818 		if (j >= i)
4819 			break;
4820 		if (ss->cancel_fork)
4821 			ss->cancel_fork(child);
4822 	}
4823 
4824 	return ret;
4825 }
4826 
4827 /**
4828  * cgroup_cancel_fork - called if a fork failed after cgroup_can_fork()
4829  * @child: the task in question
4830  *
4831  * This calls the cancel_fork() callbacks if a fork failed *after*
4832  * cgroup_can_fork() succeded.
4833  */
4834 void cgroup_cancel_fork(struct task_struct *child)
4835 {
4836 	struct cgroup_subsys *ss;
4837 	int i;
4838 
4839 	for_each_subsys(ss, i)
4840 		if (ss->cancel_fork)
4841 			ss->cancel_fork(child);
4842 }
4843 
4844 /**
4845  * cgroup_post_fork - called on a new task after adding it to the task list
4846  * @child: the task in question
4847  *
4848  * Adds the task to the list running through its css_set if necessary and
4849  * call the subsystem fork() callbacks.  Has to be after the task is
4850  * visible on the task list in case we race with the first call to
4851  * cgroup_task_iter_start() - to guarantee that the new task ends up on its
4852  * list.
4853  */
4854 void cgroup_post_fork(struct task_struct *child)
4855 {
4856 	struct cgroup_subsys *ss;
4857 	int i;
4858 
4859 	/*
4860 	 * This may race against cgroup_enable_task_cg_lists().  As that
4861 	 * function sets use_task_css_set_links before grabbing
4862 	 * tasklist_lock and we just went through tasklist_lock to add
4863 	 * @child, it's guaranteed that either we see the set
4864 	 * use_task_css_set_links or cgroup_enable_task_cg_lists() sees
4865 	 * @child during its iteration.
4866 	 *
4867 	 * If we won the race, @child is associated with %current's
4868 	 * css_set.  Grabbing css_set_lock guarantees both that the
4869 	 * association is stable, and, on completion of the parent's
4870 	 * migration, @child is visible in the source of migration or
4871 	 * already in the destination cgroup.  This guarantee is necessary
4872 	 * when implementing operations which need to migrate all tasks of
4873 	 * a cgroup to another.
4874 	 *
4875 	 * Note that if we lose to cgroup_enable_task_cg_lists(), @child
4876 	 * will remain in init_css_set.  This is safe because all tasks are
4877 	 * in the init_css_set before cg_links is enabled and there's no
4878 	 * operation which transfers all tasks out of init_css_set.
4879 	 */
4880 	if (use_task_css_set_links) {
4881 		struct css_set *cset;
4882 
4883 		spin_lock_irq(&css_set_lock);
4884 		cset = task_css_set(current);
4885 		if (list_empty(&child->cg_list)) {
4886 			get_css_set(cset);
4887 			cset->nr_tasks++;
4888 			css_set_move_task(child, NULL, cset, false);
4889 		}
4890 		spin_unlock_irq(&css_set_lock);
4891 	}
4892 
4893 	/*
4894 	 * Call ss->fork().  This must happen after @child is linked on
4895 	 * css_set; otherwise, @child might change state between ->fork()
4896 	 * and addition to css_set.
4897 	 */
4898 	do_each_subsys_mask(ss, i, have_fork_callback) {
4899 		ss->fork(child);
4900 	} while_each_subsys_mask();
4901 }
4902 
4903 /**
4904  * cgroup_exit - detach cgroup from exiting task
4905  * @tsk: pointer to task_struct of exiting process
4906  *
4907  * Description: Detach cgroup from @tsk and release it.
4908  *
4909  * Note that cgroups marked notify_on_release force every task in
4910  * them to take the global cgroup_mutex mutex when exiting.
4911  * This could impact scaling on very large systems.  Be reluctant to
4912  * use notify_on_release cgroups where very high task exit scaling
4913  * is required on large systems.
4914  *
4915  * We set the exiting tasks cgroup to the root cgroup (top_cgroup).  We
4916  * call cgroup_exit() while the task is still competent to handle
4917  * notify_on_release(), then leave the task attached to the root cgroup in
4918  * each hierarchy for the remainder of its exit.  No need to bother with
4919  * init_css_set refcnting.  init_css_set never goes away and we can't race
4920  * with migration path - PF_EXITING is visible to migration path.
4921  */
4922 void cgroup_exit(struct task_struct *tsk)
4923 {
4924 	struct cgroup_subsys *ss;
4925 	struct css_set *cset;
4926 	int i;
4927 
4928 	/*
4929 	 * Unlink from @tsk from its css_set.  As migration path can't race
4930 	 * with us, we can check css_set and cg_list without synchronization.
4931 	 */
4932 	cset = task_css_set(tsk);
4933 
4934 	if (!list_empty(&tsk->cg_list)) {
4935 		spin_lock_irq(&css_set_lock);
4936 		css_set_move_task(tsk, cset, NULL, false);
4937 		cset->nr_tasks--;
4938 		spin_unlock_irq(&css_set_lock);
4939 	} else {
4940 		get_css_set(cset);
4941 	}
4942 
4943 	/* see cgroup_post_fork() for details */
4944 	do_each_subsys_mask(ss, i, have_exit_callback) {
4945 		ss->exit(tsk);
4946 	} while_each_subsys_mask();
4947 }
4948 
4949 void cgroup_free(struct task_struct *task)
4950 {
4951 	struct css_set *cset = task_css_set(task);
4952 	struct cgroup_subsys *ss;
4953 	int ssid;
4954 
4955 	do_each_subsys_mask(ss, ssid, have_free_callback) {
4956 		ss->free(task);
4957 	} while_each_subsys_mask();
4958 
4959 	put_css_set(cset);
4960 }
4961 
4962 static int __init cgroup_disable(char *str)
4963 {
4964 	struct cgroup_subsys *ss;
4965 	char *token;
4966 	int i;
4967 
4968 	while ((token = strsep(&str, ",")) != NULL) {
4969 		if (!*token)
4970 			continue;
4971 
4972 		for_each_subsys(ss, i) {
4973 			if (strcmp(token, ss->name) &&
4974 			    strcmp(token, ss->legacy_name))
4975 				continue;
4976 			cgroup_disable_mask |= 1 << i;
4977 		}
4978 	}
4979 	return 1;
4980 }
4981 __setup("cgroup_disable=", cgroup_disable);
4982 
4983 /**
4984  * css_tryget_online_from_dir - get corresponding css from a cgroup dentry
4985  * @dentry: directory dentry of interest
4986  * @ss: subsystem of interest
4987  *
4988  * If @dentry is a directory for a cgroup which has @ss enabled on it, try
4989  * to get the corresponding css and return it.  If such css doesn't exist
4990  * or can't be pinned, an ERR_PTR value is returned.
4991  */
4992 struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
4993 						       struct cgroup_subsys *ss)
4994 {
4995 	struct kernfs_node *kn = kernfs_node_from_dentry(dentry);
4996 	struct file_system_type *s_type = dentry->d_sb->s_type;
4997 	struct cgroup_subsys_state *css = NULL;
4998 	struct cgroup *cgrp;
4999 
5000 	/* is @dentry a cgroup dir? */
5001 	if ((s_type != &cgroup_fs_type && s_type != &cgroup2_fs_type) ||
5002 	    !kn || kernfs_type(kn) != KERNFS_DIR)
5003 		return ERR_PTR(-EBADF);
5004 
5005 	rcu_read_lock();
5006 
5007 	/*
5008 	 * This path doesn't originate from kernfs and @kn could already
5009 	 * have been or be removed at any point.  @kn->priv is RCU
5010 	 * protected for this access.  See css_release_work_fn() for details.
5011 	 */
5012 	cgrp = rcu_dereference(*(void __rcu __force **)&kn->priv);
5013 	if (cgrp)
5014 		css = cgroup_css(cgrp, ss);
5015 
5016 	if (!css || !css_tryget_online(css))
5017 		css = ERR_PTR(-ENOENT);
5018 
5019 	rcu_read_unlock();
5020 	return css;
5021 }
5022 
5023 /**
5024  * css_from_id - lookup css by id
5025  * @id: the cgroup id
5026  * @ss: cgroup subsys to be looked into
5027  *
5028  * Returns the css if there's valid one with @id, otherwise returns NULL.
5029  * Should be called under rcu_read_lock().
5030  */
5031 struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss)
5032 {
5033 	WARN_ON_ONCE(!rcu_read_lock_held());
5034 	return idr_find(&ss->css_idr, id);
5035 }
5036 
5037 /**
5038  * cgroup_get_from_path - lookup and get a cgroup from its default hierarchy path
5039  * @path: path on the default hierarchy
5040  *
5041  * Find the cgroup at @path on the default hierarchy, increment its
5042  * reference count and return it.  Returns pointer to the found cgroup on
5043  * success, ERR_PTR(-ENOENT) if @path doens't exist and ERR_PTR(-ENOTDIR)
5044  * if @path points to a non-directory.
5045  */
5046 struct cgroup *cgroup_get_from_path(const char *path)
5047 {
5048 	struct kernfs_node *kn;
5049 	struct cgroup *cgrp;
5050 
5051 	mutex_lock(&cgroup_mutex);
5052 
5053 	kn = kernfs_walk_and_get(cgrp_dfl_root.cgrp.kn, path);
5054 	if (kn) {
5055 		if (kernfs_type(kn) == KERNFS_DIR) {
5056 			cgrp = kn->priv;
5057 			cgroup_get_live(cgrp);
5058 		} else {
5059 			cgrp = ERR_PTR(-ENOTDIR);
5060 		}
5061 		kernfs_put(kn);
5062 	} else {
5063 		cgrp = ERR_PTR(-ENOENT);
5064 	}
5065 
5066 	mutex_unlock(&cgroup_mutex);
5067 	return cgrp;
5068 }
5069 EXPORT_SYMBOL_GPL(cgroup_get_from_path);
5070 
5071 /**
5072  * cgroup_get_from_fd - get a cgroup pointer from a fd
5073  * @fd: fd obtained by open(cgroup2_dir)
5074  *
5075  * Find the cgroup from a fd which should be obtained
5076  * by opening a cgroup directory.  Returns a pointer to the
5077  * cgroup on success. ERR_PTR is returned if the cgroup
5078  * cannot be found.
5079  */
5080 struct cgroup *cgroup_get_from_fd(int fd)
5081 {
5082 	struct cgroup_subsys_state *css;
5083 	struct cgroup *cgrp;
5084 	struct file *f;
5085 
5086 	f = fget_raw(fd);
5087 	if (!f)
5088 		return ERR_PTR(-EBADF);
5089 
5090 	css = css_tryget_online_from_dir(f->f_path.dentry, NULL);
5091 	fput(f);
5092 	if (IS_ERR(css))
5093 		return ERR_CAST(css);
5094 
5095 	cgrp = css->cgroup;
5096 	if (!cgroup_on_dfl(cgrp)) {
5097 		cgroup_put(cgrp);
5098 		return ERR_PTR(-EBADF);
5099 	}
5100 
5101 	return cgrp;
5102 }
5103 EXPORT_SYMBOL_GPL(cgroup_get_from_fd);
5104 
5105 /*
5106  * sock->sk_cgrp_data handling.  For more info, see sock_cgroup_data
5107  * definition in cgroup-defs.h.
5108  */
5109 #ifdef CONFIG_SOCK_CGROUP_DATA
5110 
5111 #if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID)
5112 
5113 DEFINE_SPINLOCK(cgroup_sk_update_lock);
5114 static bool cgroup_sk_alloc_disabled __read_mostly;
5115 
5116 void cgroup_sk_alloc_disable(void)
5117 {
5118 	if (cgroup_sk_alloc_disabled)
5119 		return;
5120 	pr_info("cgroup: disabling cgroup2 socket matching due to net_prio or net_cls activation\n");
5121 	cgroup_sk_alloc_disabled = true;
5122 }
5123 
5124 #else
5125 
5126 #define cgroup_sk_alloc_disabled	false
5127 
5128 #endif
5129 
5130 void cgroup_sk_alloc(struct sock_cgroup_data *skcd)
5131 {
5132 	if (cgroup_sk_alloc_disabled)
5133 		return;
5134 
5135 	/* Socket clone path */
5136 	if (skcd->val) {
5137 		/*
5138 		 * We might be cloning a socket which is left in an empty
5139 		 * cgroup and the cgroup might have already been rmdir'd.
5140 		 * Don't use cgroup_get_live().
5141 		 */
5142 		cgroup_get(sock_cgroup_ptr(skcd));
5143 		return;
5144 	}
5145 
5146 	rcu_read_lock();
5147 
5148 	while (true) {
5149 		struct css_set *cset;
5150 
5151 		cset = task_css_set(current);
5152 		if (likely(cgroup_tryget(cset->dfl_cgrp))) {
5153 			skcd->val = (unsigned long)cset->dfl_cgrp;
5154 			break;
5155 		}
5156 		cpu_relax();
5157 	}
5158 
5159 	rcu_read_unlock();
5160 }
5161 
5162 void cgroup_sk_free(struct sock_cgroup_data *skcd)
5163 {
5164 	cgroup_put(sock_cgroup_ptr(skcd));
5165 }
5166 
5167 #endif	/* CONFIG_SOCK_CGROUP_DATA */
5168 
5169 #ifdef CONFIG_CGROUP_BPF
5170 int cgroup_bpf_update(struct cgroup *cgrp, struct bpf_prog *prog,
5171 		      enum bpf_attach_type type, bool overridable)
5172 {
5173 	struct cgroup *parent = cgroup_parent(cgrp);
5174 	int ret;
5175 
5176 	mutex_lock(&cgroup_mutex);
5177 	ret = __cgroup_bpf_update(cgrp, parent, prog, type, overridable);
5178 	mutex_unlock(&cgroup_mutex);
5179 	return ret;
5180 }
5181 #endif /* CONFIG_CGROUP_BPF */
5182