xref: /openbmc/linux/kernel/cgroup/cgroup.c (revision e5c86679)
1 /*
2  *  Generic process-grouping system.
3  *
4  *  Based originally on the cpuset system, extracted by Paul Menage
5  *  Copyright (C) 2006 Google, Inc
6  *
7  *  Notifications support
8  *  Copyright (C) 2009 Nokia Corporation
9  *  Author: Kirill A. Shutemov
10  *
11  *  Copyright notices from the original cpuset code:
12  *  --------------------------------------------------
13  *  Copyright (C) 2003 BULL SA.
14  *  Copyright (C) 2004-2006 Silicon Graphics, Inc.
15  *
16  *  Portions derived from Patrick Mochel's sysfs code.
17  *  sysfs is Copyright (c) 2001-3 Patrick Mochel
18  *
19  *  2003-10-10 Written by Simon Derr.
20  *  2003-10-22 Updates by Stephen Hemminger.
21  *  2004 May-July Rework by Paul Jackson.
22  *  ---------------------------------------------------
23  *
24  *  This file is subject to the terms and conditions of the GNU General Public
25  *  License.  See the file COPYING in the main directory of the Linux
26  *  distribution for more details.
27  */
28 
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30 
31 #include "cgroup-internal.h"
32 
33 #include <linux/cred.h>
34 #include <linux/errno.h>
35 #include <linux/init_task.h>
36 #include <linux/kernel.h>
37 #include <linux/magic.h>
38 #include <linux/mutex.h>
39 #include <linux/mount.h>
40 #include <linux/pagemap.h>
41 #include <linux/proc_fs.h>
42 #include <linux/rcupdate.h>
43 #include <linux/sched.h>
44 #include <linux/sched/task.h>
45 #include <linux/slab.h>
46 #include <linux/spinlock.h>
47 #include <linux/percpu-rwsem.h>
48 #include <linux/string.h>
49 #include <linux/hashtable.h>
50 #include <linux/idr.h>
51 #include <linux/kthread.h>
52 #include <linux/atomic.h>
53 #include <linux/cpuset.h>
54 #include <linux/proc_ns.h>
55 #include <linux/nsproxy.h>
56 #include <linux/file.h>
57 #include <net/sock.h>
58 
59 #define CREATE_TRACE_POINTS
60 #include <trace/events/cgroup.h>
61 
62 #define CGROUP_FILE_NAME_MAX		(MAX_CGROUP_TYPE_NAMELEN +	\
63 					 MAX_CFTYPE_NAME + 2)
64 
65 /*
66  * cgroup_mutex is the master lock.  Any modification to cgroup or its
67  * hierarchy must be performed while holding it.
68  *
69  * css_set_lock protects task->cgroups pointer, the list of css_set
70  * objects, and the chain of tasks off each css_set.
71  *
72  * These locks are exported if CONFIG_PROVE_RCU so that accessors in
73  * cgroup.h can use them for lockdep annotations.
74  */
75 DEFINE_MUTEX(cgroup_mutex);
76 DEFINE_SPINLOCK(css_set_lock);
77 
78 #ifdef CONFIG_PROVE_RCU
79 EXPORT_SYMBOL_GPL(cgroup_mutex);
80 EXPORT_SYMBOL_GPL(css_set_lock);
81 #endif
82 
83 /*
84  * Protects cgroup_idr and css_idr so that IDs can be released without
85  * grabbing cgroup_mutex.
86  */
87 static DEFINE_SPINLOCK(cgroup_idr_lock);
88 
89 /*
90  * Protects cgroup_file->kn for !self csses.  It synchronizes notifications
91  * against file removal/re-creation across css hiding.
92  */
93 static DEFINE_SPINLOCK(cgroup_file_kn_lock);
94 
95 struct percpu_rw_semaphore cgroup_threadgroup_rwsem;
96 
97 #define cgroup_assert_mutex_or_rcu_locked()				\
98 	RCU_LOCKDEP_WARN(!rcu_read_lock_held() &&			\
99 			   !lockdep_is_held(&cgroup_mutex),		\
100 			   "cgroup_mutex or RCU read lock required");
101 
102 /*
103  * cgroup destruction makes heavy use of work items and there can be a lot
104  * of concurrent destructions.  Use a separate workqueue so that cgroup
105  * destruction work items don't end up filling up max_active of system_wq
106  * which may lead to deadlock.
107  */
108 static struct workqueue_struct *cgroup_destroy_wq;
109 
110 /* generate an array of cgroup subsystem pointers */
111 #define SUBSYS(_x) [_x ## _cgrp_id] = &_x ## _cgrp_subsys,
112 struct cgroup_subsys *cgroup_subsys[] = {
113 #include <linux/cgroup_subsys.h>
114 };
115 #undef SUBSYS
116 
117 /* array of cgroup subsystem names */
118 #define SUBSYS(_x) [_x ## _cgrp_id] = #_x,
119 static const char *cgroup_subsys_name[] = {
120 #include <linux/cgroup_subsys.h>
121 };
122 #undef SUBSYS
123 
124 /* array of static_keys for cgroup_subsys_enabled() and cgroup_subsys_on_dfl() */
125 #define SUBSYS(_x)								\
126 	DEFINE_STATIC_KEY_TRUE(_x ## _cgrp_subsys_enabled_key);			\
127 	DEFINE_STATIC_KEY_TRUE(_x ## _cgrp_subsys_on_dfl_key);			\
128 	EXPORT_SYMBOL_GPL(_x ## _cgrp_subsys_enabled_key);			\
129 	EXPORT_SYMBOL_GPL(_x ## _cgrp_subsys_on_dfl_key);
130 #include <linux/cgroup_subsys.h>
131 #undef SUBSYS
132 
133 #define SUBSYS(_x) [_x ## _cgrp_id] = &_x ## _cgrp_subsys_enabled_key,
134 static struct static_key_true *cgroup_subsys_enabled_key[] = {
135 #include <linux/cgroup_subsys.h>
136 };
137 #undef SUBSYS
138 
139 #define SUBSYS(_x) [_x ## _cgrp_id] = &_x ## _cgrp_subsys_on_dfl_key,
140 static struct static_key_true *cgroup_subsys_on_dfl_key[] = {
141 #include <linux/cgroup_subsys.h>
142 };
143 #undef SUBSYS
144 
145 /*
146  * The default hierarchy, reserved for the subsystems that are otherwise
147  * unattached - it never has more than a single cgroup, and all tasks are
148  * part of that cgroup.
149  */
150 struct cgroup_root cgrp_dfl_root;
151 EXPORT_SYMBOL_GPL(cgrp_dfl_root);
152 
153 /*
154  * The default hierarchy always exists but is hidden until mounted for the
155  * first time.  This is for backward compatibility.
156  */
157 static bool cgrp_dfl_visible;
158 
159 /* some controllers are not supported in the default hierarchy */
160 static u16 cgrp_dfl_inhibit_ss_mask;
161 
162 /* some controllers are implicitly enabled on the default hierarchy */
163 static u16 cgrp_dfl_implicit_ss_mask;
164 
165 /* The list of hierarchy roots */
166 LIST_HEAD(cgroup_roots);
167 static int cgroup_root_count;
168 
169 /* hierarchy ID allocation and mapping, protected by cgroup_mutex */
170 static DEFINE_IDR(cgroup_hierarchy_idr);
171 
172 /*
173  * Assign a monotonically increasing serial number to csses.  It guarantees
174  * cgroups with bigger numbers are newer than those with smaller numbers.
175  * Also, as csses are always appended to the parent's ->children list, it
176  * guarantees that sibling csses are always sorted in the ascending serial
177  * number order on the list.  Protected by cgroup_mutex.
178  */
179 static u64 css_serial_nr_next = 1;
180 
181 /*
182  * These bitmasks identify subsystems with specific features to avoid
183  * having to do iterative checks repeatedly.
184  */
185 static u16 have_fork_callback __read_mostly;
186 static u16 have_exit_callback __read_mostly;
187 static u16 have_free_callback __read_mostly;
188 static u16 have_canfork_callback __read_mostly;
189 
190 /* cgroup namespace for init task */
191 struct cgroup_namespace init_cgroup_ns = {
192 	.count		= { .counter = 2, },
193 	.user_ns	= &init_user_ns,
194 	.ns.ops		= &cgroupns_operations,
195 	.ns.inum	= PROC_CGROUP_INIT_INO,
196 	.root_cset	= &init_css_set,
197 };
198 
199 static struct file_system_type cgroup2_fs_type;
200 static struct cftype cgroup_base_files[];
201 
202 static int cgroup_apply_control(struct cgroup *cgrp);
203 static void cgroup_finalize_control(struct cgroup *cgrp, int ret);
204 static void css_task_iter_advance(struct css_task_iter *it);
205 static int cgroup_destroy_locked(struct cgroup *cgrp);
206 static struct cgroup_subsys_state *css_create(struct cgroup *cgrp,
207 					      struct cgroup_subsys *ss);
208 static void css_release(struct percpu_ref *ref);
209 static void kill_css(struct cgroup_subsys_state *css);
210 static int cgroup_addrm_files(struct cgroup_subsys_state *css,
211 			      struct cgroup *cgrp, struct cftype cfts[],
212 			      bool is_add);
213 
214 /**
215  * cgroup_ssid_enabled - cgroup subsys enabled test by subsys ID
216  * @ssid: subsys ID of interest
217  *
218  * cgroup_subsys_enabled() can only be used with literal subsys names which
219  * is fine for individual subsystems but unsuitable for cgroup core.  This
220  * is slower static_key_enabled() based test indexed by @ssid.
221  */
222 bool cgroup_ssid_enabled(int ssid)
223 {
224 	if (CGROUP_SUBSYS_COUNT == 0)
225 		return false;
226 
227 	return static_key_enabled(cgroup_subsys_enabled_key[ssid]);
228 }
229 
230 /**
231  * cgroup_on_dfl - test whether a cgroup is on the default hierarchy
232  * @cgrp: the cgroup of interest
233  *
234  * The default hierarchy is the v2 interface of cgroup and this function
235  * can be used to test whether a cgroup is on the default hierarchy for
236  * cases where a subsystem should behave differnetly depending on the
237  * interface version.
238  *
239  * The set of behaviors which change on the default hierarchy are still
240  * being determined and the mount option is prefixed with __DEVEL__.
241  *
242  * List of changed behaviors:
243  *
244  * - Mount options "noprefix", "xattr", "clone_children", "release_agent"
245  *   and "name" are disallowed.
246  *
247  * - When mounting an existing superblock, mount options should match.
248  *
249  * - Remount is disallowed.
250  *
251  * - rename(2) is disallowed.
252  *
253  * - "tasks" is removed.  Everything should be at process granularity.  Use
254  *   "cgroup.procs" instead.
255  *
256  * - "cgroup.procs" is not sorted.  pids will be unique unless they got
257  *   recycled inbetween reads.
258  *
259  * - "release_agent" and "notify_on_release" are removed.  Replacement
260  *   notification mechanism will be implemented.
261  *
262  * - "cgroup.clone_children" is removed.
263  *
264  * - "cgroup.subtree_populated" is available.  Its value is 0 if the cgroup
265  *   and its descendants contain no task; otherwise, 1.  The file also
266  *   generates kernfs notification which can be monitored through poll and
267  *   [di]notify when the value of the file changes.
268  *
269  * - cpuset: tasks will be kept in empty cpusets when hotplug happens and
270  *   take masks of ancestors with non-empty cpus/mems, instead of being
271  *   moved to an ancestor.
272  *
273  * - cpuset: a task can be moved into an empty cpuset, and again it takes
274  *   masks of ancestors.
275  *
276  * - memcg: use_hierarchy is on by default and the cgroup file for the flag
277  *   is not created.
278  *
279  * - blkcg: blk-throttle becomes properly hierarchical.
280  *
281  * - debug: disallowed on the default hierarchy.
282  */
283 bool cgroup_on_dfl(const struct cgroup *cgrp)
284 {
285 	return cgrp->root == &cgrp_dfl_root;
286 }
287 
288 /* IDR wrappers which synchronize using cgroup_idr_lock */
289 static int cgroup_idr_alloc(struct idr *idr, void *ptr, int start, int end,
290 			    gfp_t gfp_mask)
291 {
292 	int ret;
293 
294 	idr_preload(gfp_mask);
295 	spin_lock_bh(&cgroup_idr_lock);
296 	ret = idr_alloc(idr, ptr, start, end, gfp_mask & ~__GFP_DIRECT_RECLAIM);
297 	spin_unlock_bh(&cgroup_idr_lock);
298 	idr_preload_end();
299 	return ret;
300 }
301 
302 static void *cgroup_idr_replace(struct idr *idr, void *ptr, int id)
303 {
304 	void *ret;
305 
306 	spin_lock_bh(&cgroup_idr_lock);
307 	ret = idr_replace(idr, ptr, id);
308 	spin_unlock_bh(&cgroup_idr_lock);
309 	return ret;
310 }
311 
312 static void cgroup_idr_remove(struct idr *idr, int id)
313 {
314 	spin_lock_bh(&cgroup_idr_lock);
315 	idr_remove(idr, id);
316 	spin_unlock_bh(&cgroup_idr_lock);
317 }
318 
319 static struct cgroup *cgroup_parent(struct cgroup *cgrp)
320 {
321 	struct cgroup_subsys_state *parent_css = cgrp->self.parent;
322 
323 	if (parent_css)
324 		return container_of(parent_css, struct cgroup, self);
325 	return NULL;
326 }
327 
328 /* subsystems visibly enabled on a cgroup */
329 static u16 cgroup_control(struct cgroup *cgrp)
330 {
331 	struct cgroup *parent = cgroup_parent(cgrp);
332 	u16 root_ss_mask = cgrp->root->subsys_mask;
333 
334 	if (parent)
335 		return parent->subtree_control;
336 
337 	if (cgroup_on_dfl(cgrp))
338 		root_ss_mask &= ~(cgrp_dfl_inhibit_ss_mask |
339 				  cgrp_dfl_implicit_ss_mask);
340 	return root_ss_mask;
341 }
342 
343 /* subsystems enabled on a cgroup */
344 static u16 cgroup_ss_mask(struct cgroup *cgrp)
345 {
346 	struct cgroup *parent = cgroup_parent(cgrp);
347 
348 	if (parent)
349 		return parent->subtree_ss_mask;
350 
351 	return cgrp->root->subsys_mask;
352 }
353 
354 /**
355  * cgroup_css - obtain a cgroup's css for the specified subsystem
356  * @cgrp: the cgroup of interest
357  * @ss: the subsystem of interest (%NULL returns @cgrp->self)
358  *
359  * Return @cgrp's css (cgroup_subsys_state) associated with @ss.  This
360  * function must be called either under cgroup_mutex or rcu_read_lock() and
361  * the caller is responsible for pinning the returned css if it wants to
362  * keep accessing it outside the said locks.  This function may return
363  * %NULL if @cgrp doesn't have @subsys_id enabled.
364  */
365 static struct cgroup_subsys_state *cgroup_css(struct cgroup *cgrp,
366 					      struct cgroup_subsys *ss)
367 {
368 	if (ss)
369 		return rcu_dereference_check(cgrp->subsys[ss->id],
370 					lockdep_is_held(&cgroup_mutex));
371 	else
372 		return &cgrp->self;
373 }
374 
375 /**
376  * cgroup_e_css - obtain a cgroup's effective css for the specified subsystem
377  * @cgrp: the cgroup of interest
378  * @ss: the subsystem of interest (%NULL returns @cgrp->self)
379  *
380  * Similar to cgroup_css() but returns the effective css, which is defined
381  * as the matching css of the nearest ancestor including self which has @ss
382  * enabled.  If @ss is associated with the hierarchy @cgrp is on, this
383  * function is guaranteed to return non-NULL css.
384  */
385 static struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgrp,
386 						struct cgroup_subsys *ss)
387 {
388 	lockdep_assert_held(&cgroup_mutex);
389 
390 	if (!ss)
391 		return &cgrp->self;
392 
393 	/*
394 	 * This function is used while updating css associations and thus
395 	 * can't test the csses directly.  Test ss_mask.
396 	 */
397 	while (!(cgroup_ss_mask(cgrp) & (1 << ss->id))) {
398 		cgrp = cgroup_parent(cgrp);
399 		if (!cgrp)
400 			return NULL;
401 	}
402 
403 	return cgroup_css(cgrp, ss);
404 }
405 
406 /**
407  * cgroup_get_e_css - get a cgroup's effective css for the specified subsystem
408  * @cgrp: the cgroup of interest
409  * @ss: the subsystem of interest
410  *
411  * Find and get the effective css of @cgrp for @ss.  The effective css is
412  * defined as the matching css of the nearest ancestor including self which
413  * has @ss enabled.  If @ss is not mounted on the hierarchy @cgrp is on,
414  * the root css is returned, so this function always returns a valid css.
415  * The returned css must be put using css_put().
416  */
417 struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgrp,
418 					     struct cgroup_subsys *ss)
419 {
420 	struct cgroup_subsys_state *css;
421 
422 	rcu_read_lock();
423 
424 	do {
425 		css = cgroup_css(cgrp, ss);
426 
427 		if (css && css_tryget_online(css))
428 			goto out_unlock;
429 		cgrp = cgroup_parent(cgrp);
430 	} while (cgrp);
431 
432 	css = init_css_set.subsys[ss->id];
433 	css_get(css);
434 out_unlock:
435 	rcu_read_unlock();
436 	return css;
437 }
438 
439 static void cgroup_get(struct cgroup *cgrp)
440 {
441 	WARN_ON_ONCE(cgroup_is_dead(cgrp));
442 	css_get(&cgrp->self);
443 }
444 
445 static bool cgroup_tryget(struct cgroup *cgrp)
446 {
447 	return css_tryget(&cgrp->self);
448 }
449 
450 struct cgroup_subsys_state *of_css(struct kernfs_open_file *of)
451 {
452 	struct cgroup *cgrp = of->kn->parent->priv;
453 	struct cftype *cft = of_cft(of);
454 
455 	/*
456 	 * This is open and unprotected implementation of cgroup_css().
457 	 * seq_css() is only called from a kernfs file operation which has
458 	 * an active reference on the file.  Because all the subsystem
459 	 * files are drained before a css is disassociated with a cgroup,
460 	 * the matching css from the cgroup's subsys table is guaranteed to
461 	 * be and stay valid until the enclosing operation is complete.
462 	 */
463 	if (cft->ss)
464 		return rcu_dereference_raw(cgrp->subsys[cft->ss->id]);
465 	else
466 		return &cgrp->self;
467 }
468 EXPORT_SYMBOL_GPL(of_css);
469 
470 /**
471  * for_each_css - iterate all css's of a cgroup
472  * @css: the iteration cursor
473  * @ssid: the index of the subsystem, CGROUP_SUBSYS_COUNT after reaching the end
474  * @cgrp: the target cgroup to iterate css's of
475  *
476  * Should be called under cgroup_[tree_]mutex.
477  */
478 #define for_each_css(css, ssid, cgrp)					\
479 	for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++)	\
480 		if (!((css) = rcu_dereference_check(			\
481 				(cgrp)->subsys[(ssid)],			\
482 				lockdep_is_held(&cgroup_mutex)))) { }	\
483 		else
484 
485 /**
486  * for_each_e_css - iterate all effective css's of a cgroup
487  * @css: the iteration cursor
488  * @ssid: the index of the subsystem, CGROUP_SUBSYS_COUNT after reaching the end
489  * @cgrp: the target cgroup to iterate css's of
490  *
491  * Should be called under cgroup_[tree_]mutex.
492  */
493 #define for_each_e_css(css, ssid, cgrp)					\
494 	for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++)	\
495 		if (!((css) = cgroup_e_css(cgrp, cgroup_subsys[(ssid)]))) \
496 			;						\
497 		else
498 
499 /**
500  * do_each_subsys_mask - filter for_each_subsys with a bitmask
501  * @ss: the iteration cursor
502  * @ssid: the index of @ss, CGROUP_SUBSYS_COUNT after reaching the end
503  * @ss_mask: the bitmask
504  *
505  * The block will only run for cases where the ssid-th bit (1 << ssid) of
506  * @ss_mask is set.
507  */
508 #define do_each_subsys_mask(ss, ssid, ss_mask) do {			\
509 	unsigned long __ss_mask = (ss_mask);				\
510 	if (!CGROUP_SUBSYS_COUNT) { /* to avoid spurious gcc warning */	\
511 		(ssid) = 0;						\
512 		break;							\
513 	}								\
514 	for_each_set_bit(ssid, &__ss_mask, CGROUP_SUBSYS_COUNT) {	\
515 		(ss) = cgroup_subsys[ssid];				\
516 		{
517 
518 #define while_each_subsys_mask()					\
519 		}							\
520 	}								\
521 } while (false)
522 
523 /* iterate over child cgrps, lock should be held throughout iteration */
524 #define cgroup_for_each_live_child(child, cgrp)				\
525 	list_for_each_entry((child), &(cgrp)->self.children, self.sibling) \
526 		if (({ lockdep_assert_held(&cgroup_mutex);		\
527 		       cgroup_is_dead(child); }))			\
528 			;						\
529 		else
530 
531 /* walk live descendants in preorder */
532 #define cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp)		\
533 	css_for_each_descendant_pre((d_css), cgroup_css((cgrp), NULL))	\
534 		if (({ lockdep_assert_held(&cgroup_mutex);		\
535 		       (dsct) = (d_css)->cgroup;			\
536 		       cgroup_is_dead(dsct); }))			\
537 			;						\
538 		else
539 
540 /* walk live descendants in postorder */
541 #define cgroup_for_each_live_descendant_post(dsct, d_css, cgrp)		\
542 	css_for_each_descendant_post((d_css), cgroup_css((cgrp), NULL))	\
543 		if (({ lockdep_assert_held(&cgroup_mutex);		\
544 		       (dsct) = (d_css)->cgroup;			\
545 		       cgroup_is_dead(dsct); }))			\
546 			;						\
547 		else
548 
549 /*
550  * The default css_set - used by init and its children prior to any
551  * hierarchies being mounted. It contains a pointer to the root state
552  * for each subsystem. Also used to anchor the list of css_sets. Not
553  * reference-counted, to improve performance when child cgroups
554  * haven't been created.
555  */
556 struct css_set init_css_set = {
557 	.refcount		= ATOMIC_INIT(1),
558 	.tasks			= LIST_HEAD_INIT(init_css_set.tasks),
559 	.mg_tasks		= LIST_HEAD_INIT(init_css_set.mg_tasks),
560 	.task_iters		= LIST_HEAD_INIT(init_css_set.task_iters),
561 	.cgrp_links		= LIST_HEAD_INIT(init_css_set.cgrp_links),
562 	.mg_preload_node	= LIST_HEAD_INIT(init_css_set.mg_preload_node),
563 	.mg_node		= LIST_HEAD_INIT(init_css_set.mg_node),
564 };
565 
566 static int css_set_count	= 1;	/* 1 for init_css_set */
567 
568 /**
569  * css_set_populated - does a css_set contain any tasks?
570  * @cset: target css_set
571  */
572 static bool css_set_populated(struct css_set *cset)
573 {
574 	lockdep_assert_held(&css_set_lock);
575 
576 	return !list_empty(&cset->tasks) || !list_empty(&cset->mg_tasks);
577 }
578 
579 /**
580  * cgroup_update_populated - updated populated count of a cgroup
581  * @cgrp: the target cgroup
582  * @populated: inc or dec populated count
583  *
584  * One of the css_sets associated with @cgrp is either getting its first
585  * task or losing the last.  Update @cgrp->populated_cnt accordingly.  The
586  * count is propagated towards root so that a given cgroup's populated_cnt
587  * is zero iff the cgroup and all its descendants don't contain any tasks.
588  *
589  * @cgrp's interface file "cgroup.populated" is zero if
590  * @cgrp->populated_cnt is zero and 1 otherwise.  When @cgrp->populated_cnt
591  * changes from or to zero, userland is notified that the content of the
592  * interface file has changed.  This can be used to detect when @cgrp and
593  * its descendants become populated or empty.
594  */
595 static void cgroup_update_populated(struct cgroup *cgrp, bool populated)
596 {
597 	lockdep_assert_held(&css_set_lock);
598 
599 	do {
600 		bool trigger;
601 
602 		if (populated)
603 			trigger = !cgrp->populated_cnt++;
604 		else
605 			trigger = !--cgrp->populated_cnt;
606 
607 		if (!trigger)
608 			break;
609 
610 		cgroup1_check_for_release(cgrp);
611 		cgroup_file_notify(&cgrp->events_file);
612 
613 		cgrp = cgroup_parent(cgrp);
614 	} while (cgrp);
615 }
616 
617 /**
618  * css_set_update_populated - update populated state of a css_set
619  * @cset: target css_set
620  * @populated: whether @cset is populated or depopulated
621  *
622  * @cset is either getting the first task or losing the last.  Update the
623  * ->populated_cnt of all associated cgroups accordingly.
624  */
625 static void css_set_update_populated(struct css_set *cset, bool populated)
626 {
627 	struct cgrp_cset_link *link;
628 
629 	lockdep_assert_held(&css_set_lock);
630 
631 	list_for_each_entry(link, &cset->cgrp_links, cgrp_link)
632 		cgroup_update_populated(link->cgrp, populated);
633 }
634 
635 /**
636  * css_set_move_task - move a task from one css_set to another
637  * @task: task being moved
638  * @from_cset: css_set @task currently belongs to (may be NULL)
639  * @to_cset: new css_set @task is being moved to (may be NULL)
640  * @use_mg_tasks: move to @to_cset->mg_tasks instead of ->tasks
641  *
642  * Move @task from @from_cset to @to_cset.  If @task didn't belong to any
643  * css_set, @from_cset can be NULL.  If @task is being disassociated
644  * instead of moved, @to_cset can be NULL.
645  *
646  * This function automatically handles populated_cnt updates and
647  * css_task_iter adjustments but the caller is responsible for managing
648  * @from_cset and @to_cset's reference counts.
649  */
650 static void css_set_move_task(struct task_struct *task,
651 			      struct css_set *from_cset, struct css_set *to_cset,
652 			      bool use_mg_tasks)
653 {
654 	lockdep_assert_held(&css_set_lock);
655 
656 	if (to_cset && !css_set_populated(to_cset))
657 		css_set_update_populated(to_cset, true);
658 
659 	if (from_cset) {
660 		struct css_task_iter *it, *pos;
661 
662 		WARN_ON_ONCE(list_empty(&task->cg_list));
663 
664 		/*
665 		 * @task is leaving, advance task iterators which are
666 		 * pointing to it so that they can resume at the next
667 		 * position.  Advancing an iterator might remove it from
668 		 * the list, use safe walk.  See css_task_iter_advance*()
669 		 * for details.
670 		 */
671 		list_for_each_entry_safe(it, pos, &from_cset->task_iters,
672 					 iters_node)
673 			if (it->task_pos == &task->cg_list)
674 				css_task_iter_advance(it);
675 
676 		list_del_init(&task->cg_list);
677 		if (!css_set_populated(from_cset))
678 			css_set_update_populated(from_cset, false);
679 	} else {
680 		WARN_ON_ONCE(!list_empty(&task->cg_list));
681 	}
682 
683 	if (to_cset) {
684 		/*
685 		 * We are synchronized through cgroup_threadgroup_rwsem
686 		 * against PF_EXITING setting such that we can't race
687 		 * against cgroup_exit() changing the css_set to
688 		 * init_css_set and dropping the old one.
689 		 */
690 		WARN_ON_ONCE(task->flags & PF_EXITING);
691 
692 		rcu_assign_pointer(task->cgroups, to_cset);
693 		list_add_tail(&task->cg_list, use_mg_tasks ? &to_cset->mg_tasks :
694 							     &to_cset->tasks);
695 	}
696 }
697 
698 /*
699  * hash table for cgroup groups. This improves the performance to find
700  * an existing css_set. This hash doesn't (currently) take into
701  * account cgroups in empty hierarchies.
702  */
703 #define CSS_SET_HASH_BITS	7
704 static DEFINE_HASHTABLE(css_set_table, CSS_SET_HASH_BITS);
705 
706 static unsigned long css_set_hash(struct cgroup_subsys_state *css[])
707 {
708 	unsigned long key = 0UL;
709 	struct cgroup_subsys *ss;
710 	int i;
711 
712 	for_each_subsys(ss, i)
713 		key += (unsigned long)css[i];
714 	key = (key >> 16) ^ key;
715 
716 	return key;
717 }
718 
719 void put_css_set_locked(struct css_set *cset)
720 {
721 	struct cgrp_cset_link *link, *tmp_link;
722 	struct cgroup_subsys *ss;
723 	int ssid;
724 
725 	lockdep_assert_held(&css_set_lock);
726 
727 	if (!atomic_dec_and_test(&cset->refcount))
728 		return;
729 
730 	/* This css_set is dead. unlink it and release cgroup and css refs */
731 	for_each_subsys(ss, ssid) {
732 		list_del(&cset->e_cset_node[ssid]);
733 		css_put(cset->subsys[ssid]);
734 	}
735 	hash_del(&cset->hlist);
736 	css_set_count--;
737 
738 	list_for_each_entry_safe(link, tmp_link, &cset->cgrp_links, cgrp_link) {
739 		list_del(&link->cset_link);
740 		list_del(&link->cgrp_link);
741 		if (cgroup_parent(link->cgrp))
742 			cgroup_put(link->cgrp);
743 		kfree(link);
744 	}
745 
746 	kfree_rcu(cset, rcu_head);
747 }
748 
749 /**
750  * compare_css_sets - helper function for find_existing_css_set().
751  * @cset: candidate css_set being tested
752  * @old_cset: existing css_set for a task
753  * @new_cgrp: cgroup that's being entered by the task
754  * @template: desired set of css pointers in css_set (pre-calculated)
755  *
756  * Returns true if "cset" matches "old_cset" except for the hierarchy
757  * which "new_cgrp" belongs to, for which it should match "new_cgrp".
758  */
759 static bool compare_css_sets(struct css_set *cset,
760 			     struct css_set *old_cset,
761 			     struct cgroup *new_cgrp,
762 			     struct cgroup_subsys_state *template[])
763 {
764 	struct list_head *l1, *l2;
765 
766 	/*
767 	 * On the default hierarchy, there can be csets which are
768 	 * associated with the same set of cgroups but different csses.
769 	 * Let's first ensure that csses match.
770 	 */
771 	if (memcmp(template, cset->subsys, sizeof(cset->subsys)))
772 		return false;
773 
774 	/*
775 	 * Compare cgroup pointers in order to distinguish between
776 	 * different cgroups in hierarchies.  As different cgroups may
777 	 * share the same effective css, this comparison is always
778 	 * necessary.
779 	 */
780 	l1 = &cset->cgrp_links;
781 	l2 = &old_cset->cgrp_links;
782 	while (1) {
783 		struct cgrp_cset_link *link1, *link2;
784 		struct cgroup *cgrp1, *cgrp2;
785 
786 		l1 = l1->next;
787 		l2 = l2->next;
788 		/* See if we reached the end - both lists are equal length. */
789 		if (l1 == &cset->cgrp_links) {
790 			BUG_ON(l2 != &old_cset->cgrp_links);
791 			break;
792 		} else {
793 			BUG_ON(l2 == &old_cset->cgrp_links);
794 		}
795 		/* Locate the cgroups associated with these links. */
796 		link1 = list_entry(l1, struct cgrp_cset_link, cgrp_link);
797 		link2 = list_entry(l2, struct cgrp_cset_link, cgrp_link);
798 		cgrp1 = link1->cgrp;
799 		cgrp2 = link2->cgrp;
800 		/* Hierarchies should be linked in the same order. */
801 		BUG_ON(cgrp1->root != cgrp2->root);
802 
803 		/*
804 		 * If this hierarchy is the hierarchy of the cgroup
805 		 * that's changing, then we need to check that this
806 		 * css_set points to the new cgroup; if it's any other
807 		 * hierarchy, then this css_set should point to the
808 		 * same cgroup as the old css_set.
809 		 */
810 		if (cgrp1->root == new_cgrp->root) {
811 			if (cgrp1 != new_cgrp)
812 				return false;
813 		} else {
814 			if (cgrp1 != cgrp2)
815 				return false;
816 		}
817 	}
818 	return true;
819 }
820 
821 /**
822  * find_existing_css_set - init css array and find the matching css_set
823  * @old_cset: the css_set that we're using before the cgroup transition
824  * @cgrp: the cgroup that we're moving into
825  * @template: out param for the new set of csses, should be clear on entry
826  */
827 static struct css_set *find_existing_css_set(struct css_set *old_cset,
828 					struct cgroup *cgrp,
829 					struct cgroup_subsys_state *template[])
830 {
831 	struct cgroup_root *root = cgrp->root;
832 	struct cgroup_subsys *ss;
833 	struct css_set *cset;
834 	unsigned long key;
835 	int i;
836 
837 	/*
838 	 * Build the set of subsystem state objects that we want to see in the
839 	 * new css_set. while subsystems can change globally, the entries here
840 	 * won't change, so no need for locking.
841 	 */
842 	for_each_subsys(ss, i) {
843 		if (root->subsys_mask & (1UL << i)) {
844 			/*
845 			 * @ss is in this hierarchy, so we want the
846 			 * effective css from @cgrp.
847 			 */
848 			template[i] = cgroup_e_css(cgrp, ss);
849 		} else {
850 			/*
851 			 * @ss is not in this hierarchy, so we don't want
852 			 * to change the css.
853 			 */
854 			template[i] = old_cset->subsys[i];
855 		}
856 	}
857 
858 	key = css_set_hash(template);
859 	hash_for_each_possible(css_set_table, cset, hlist, key) {
860 		if (!compare_css_sets(cset, old_cset, cgrp, template))
861 			continue;
862 
863 		/* This css_set matches what we need */
864 		return cset;
865 	}
866 
867 	/* No existing cgroup group matched */
868 	return NULL;
869 }
870 
871 static void free_cgrp_cset_links(struct list_head *links_to_free)
872 {
873 	struct cgrp_cset_link *link, *tmp_link;
874 
875 	list_for_each_entry_safe(link, tmp_link, links_to_free, cset_link) {
876 		list_del(&link->cset_link);
877 		kfree(link);
878 	}
879 }
880 
881 /**
882  * allocate_cgrp_cset_links - allocate cgrp_cset_links
883  * @count: the number of links to allocate
884  * @tmp_links: list_head the allocated links are put on
885  *
886  * Allocate @count cgrp_cset_link structures and chain them on @tmp_links
887  * through ->cset_link.  Returns 0 on success or -errno.
888  */
889 static int allocate_cgrp_cset_links(int count, struct list_head *tmp_links)
890 {
891 	struct cgrp_cset_link *link;
892 	int i;
893 
894 	INIT_LIST_HEAD(tmp_links);
895 
896 	for (i = 0; i < count; i++) {
897 		link = kzalloc(sizeof(*link), GFP_KERNEL);
898 		if (!link) {
899 			free_cgrp_cset_links(tmp_links);
900 			return -ENOMEM;
901 		}
902 		list_add(&link->cset_link, tmp_links);
903 	}
904 	return 0;
905 }
906 
907 /**
908  * link_css_set - a helper function to link a css_set to a cgroup
909  * @tmp_links: cgrp_cset_link objects allocated by allocate_cgrp_cset_links()
910  * @cset: the css_set to be linked
911  * @cgrp: the destination cgroup
912  */
913 static void link_css_set(struct list_head *tmp_links, struct css_set *cset,
914 			 struct cgroup *cgrp)
915 {
916 	struct cgrp_cset_link *link;
917 
918 	BUG_ON(list_empty(tmp_links));
919 
920 	if (cgroup_on_dfl(cgrp))
921 		cset->dfl_cgrp = cgrp;
922 
923 	link = list_first_entry(tmp_links, struct cgrp_cset_link, cset_link);
924 	link->cset = cset;
925 	link->cgrp = cgrp;
926 
927 	/*
928 	 * Always add links to the tail of the lists so that the lists are
929 	 * in choronological order.
930 	 */
931 	list_move_tail(&link->cset_link, &cgrp->cset_links);
932 	list_add_tail(&link->cgrp_link, &cset->cgrp_links);
933 
934 	if (cgroup_parent(cgrp))
935 		cgroup_get(cgrp);
936 }
937 
938 /**
939  * find_css_set - return a new css_set with one cgroup updated
940  * @old_cset: the baseline css_set
941  * @cgrp: the cgroup to be updated
942  *
943  * Return a new css_set that's equivalent to @old_cset, but with @cgrp
944  * substituted into the appropriate hierarchy.
945  */
946 static struct css_set *find_css_set(struct css_set *old_cset,
947 				    struct cgroup *cgrp)
948 {
949 	struct cgroup_subsys_state *template[CGROUP_SUBSYS_COUNT] = { };
950 	struct css_set *cset;
951 	struct list_head tmp_links;
952 	struct cgrp_cset_link *link;
953 	struct cgroup_subsys *ss;
954 	unsigned long key;
955 	int ssid;
956 
957 	lockdep_assert_held(&cgroup_mutex);
958 
959 	/* First see if we already have a cgroup group that matches
960 	 * the desired set */
961 	spin_lock_irq(&css_set_lock);
962 	cset = find_existing_css_set(old_cset, cgrp, template);
963 	if (cset)
964 		get_css_set(cset);
965 	spin_unlock_irq(&css_set_lock);
966 
967 	if (cset)
968 		return cset;
969 
970 	cset = kzalloc(sizeof(*cset), GFP_KERNEL);
971 	if (!cset)
972 		return NULL;
973 
974 	/* Allocate all the cgrp_cset_link objects that we'll need */
975 	if (allocate_cgrp_cset_links(cgroup_root_count, &tmp_links) < 0) {
976 		kfree(cset);
977 		return NULL;
978 	}
979 
980 	atomic_set(&cset->refcount, 1);
981 	INIT_LIST_HEAD(&cset->tasks);
982 	INIT_LIST_HEAD(&cset->mg_tasks);
983 	INIT_LIST_HEAD(&cset->task_iters);
984 	INIT_HLIST_NODE(&cset->hlist);
985 	INIT_LIST_HEAD(&cset->cgrp_links);
986 	INIT_LIST_HEAD(&cset->mg_preload_node);
987 	INIT_LIST_HEAD(&cset->mg_node);
988 
989 	/* Copy the set of subsystem state objects generated in
990 	 * find_existing_css_set() */
991 	memcpy(cset->subsys, template, sizeof(cset->subsys));
992 
993 	spin_lock_irq(&css_set_lock);
994 	/* Add reference counts and links from the new css_set. */
995 	list_for_each_entry(link, &old_cset->cgrp_links, cgrp_link) {
996 		struct cgroup *c = link->cgrp;
997 
998 		if (c->root == cgrp->root)
999 			c = cgrp;
1000 		link_css_set(&tmp_links, cset, c);
1001 	}
1002 
1003 	BUG_ON(!list_empty(&tmp_links));
1004 
1005 	css_set_count++;
1006 
1007 	/* Add @cset to the hash table */
1008 	key = css_set_hash(cset->subsys);
1009 	hash_add(css_set_table, &cset->hlist, key);
1010 
1011 	for_each_subsys(ss, ssid) {
1012 		struct cgroup_subsys_state *css = cset->subsys[ssid];
1013 
1014 		list_add_tail(&cset->e_cset_node[ssid],
1015 			      &css->cgroup->e_csets[ssid]);
1016 		css_get(css);
1017 	}
1018 
1019 	spin_unlock_irq(&css_set_lock);
1020 
1021 	return cset;
1022 }
1023 
1024 struct cgroup_root *cgroup_root_from_kf(struct kernfs_root *kf_root)
1025 {
1026 	struct cgroup *root_cgrp = kf_root->kn->priv;
1027 
1028 	return root_cgrp->root;
1029 }
1030 
1031 static int cgroup_init_root_id(struct cgroup_root *root)
1032 {
1033 	int id;
1034 
1035 	lockdep_assert_held(&cgroup_mutex);
1036 
1037 	id = idr_alloc_cyclic(&cgroup_hierarchy_idr, root, 0, 0, GFP_KERNEL);
1038 	if (id < 0)
1039 		return id;
1040 
1041 	root->hierarchy_id = id;
1042 	return 0;
1043 }
1044 
1045 static void cgroup_exit_root_id(struct cgroup_root *root)
1046 {
1047 	lockdep_assert_held(&cgroup_mutex);
1048 
1049 	idr_remove(&cgroup_hierarchy_idr, root->hierarchy_id);
1050 }
1051 
1052 void cgroup_free_root(struct cgroup_root *root)
1053 {
1054 	if (root) {
1055 		idr_destroy(&root->cgroup_idr);
1056 		kfree(root);
1057 	}
1058 }
1059 
1060 static void cgroup_destroy_root(struct cgroup_root *root)
1061 {
1062 	struct cgroup *cgrp = &root->cgrp;
1063 	struct cgrp_cset_link *link, *tmp_link;
1064 
1065 	trace_cgroup_destroy_root(root);
1066 
1067 	cgroup_lock_and_drain_offline(&cgrp_dfl_root.cgrp);
1068 
1069 	BUG_ON(atomic_read(&root->nr_cgrps));
1070 	BUG_ON(!list_empty(&cgrp->self.children));
1071 
1072 	/* Rebind all subsystems back to the default hierarchy */
1073 	WARN_ON(rebind_subsystems(&cgrp_dfl_root, root->subsys_mask));
1074 
1075 	/*
1076 	 * Release all the links from cset_links to this hierarchy's
1077 	 * root cgroup
1078 	 */
1079 	spin_lock_irq(&css_set_lock);
1080 
1081 	list_for_each_entry_safe(link, tmp_link, &cgrp->cset_links, cset_link) {
1082 		list_del(&link->cset_link);
1083 		list_del(&link->cgrp_link);
1084 		kfree(link);
1085 	}
1086 
1087 	spin_unlock_irq(&css_set_lock);
1088 
1089 	if (!list_empty(&root->root_list)) {
1090 		list_del(&root->root_list);
1091 		cgroup_root_count--;
1092 	}
1093 
1094 	cgroup_exit_root_id(root);
1095 
1096 	mutex_unlock(&cgroup_mutex);
1097 
1098 	kernfs_destroy_root(root->kf_root);
1099 	cgroup_free_root(root);
1100 }
1101 
1102 /*
1103  * look up cgroup associated with current task's cgroup namespace on the
1104  * specified hierarchy
1105  */
1106 static struct cgroup *
1107 current_cgns_cgroup_from_root(struct cgroup_root *root)
1108 {
1109 	struct cgroup *res = NULL;
1110 	struct css_set *cset;
1111 
1112 	lockdep_assert_held(&css_set_lock);
1113 
1114 	rcu_read_lock();
1115 
1116 	cset = current->nsproxy->cgroup_ns->root_cset;
1117 	if (cset == &init_css_set) {
1118 		res = &root->cgrp;
1119 	} else {
1120 		struct cgrp_cset_link *link;
1121 
1122 		list_for_each_entry(link, &cset->cgrp_links, cgrp_link) {
1123 			struct cgroup *c = link->cgrp;
1124 
1125 			if (c->root == root) {
1126 				res = c;
1127 				break;
1128 			}
1129 		}
1130 	}
1131 	rcu_read_unlock();
1132 
1133 	BUG_ON(!res);
1134 	return res;
1135 }
1136 
1137 /* look up cgroup associated with given css_set on the specified hierarchy */
1138 static struct cgroup *cset_cgroup_from_root(struct css_set *cset,
1139 					    struct cgroup_root *root)
1140 {
1141 	struct cgroup *res = NULL;
1142 
1143 	lockdep_assert_held(&cgroup_mutex);
1144 	lockdep_assert_held(&css_set_lock);
1145 
1146 	if (cset == &init_css_set) {
1147 		res = &root->cgrp;
1148 	} else {
1149 		struct cgrp_cset_link *link;
1150 
1151 		list_for_each_entry(link, &cset->cgrp_links, cgrp_link) {
1152 			struct cgroup *c = link->cgrp;
1153 
1154 			if (c->root == root) {
1155 				res = c;
1156 				break;
1157 			}
1158 		}
1159 	}
1160 
1161 	BUG_ON(!res);
1162 	return res;
1163 }
1164 
1165 /*
1166  * Return the cgroup for "task" from the given hierarchy. Must be
1167  * called with cgroup_mutex and css_set_lock held.
1168  */
1169 struct cgroup *task_cgroup_from_root(struct task_struct *task,
1170 				     struct cgroup_root *root)
1171 {
1172 	/*
1173 	 * No need to lock the task - since we hold cgroup_mutex the
1174 	 * task can't change groups, so the only thing that can happen
1175 	 * is that it exits and its css is set back to init_css_set.
1176 	 */
1177 	return cset_cgroup_from_root(task_css_set(task), root);
1178 }
1179 
1180 /*
1181  * A task must hold cgroup_mutex to modify cgroups.
1182  *
1183  * Any task can increment and decrement the count field without lock.
1184  * So in general, code holding cgroup_mutex can't rely on the count
1185  * field not changing.  However, if the count goes to zero, then only
1186  * cgroup_attach_task() can increment it again.  Because a count of zero
1187  * means that no tasks are currently attached, therefore there is no
1188  * way a task attached to that cgroup can fork (the other way to
1189  * increment the count).  So code holding cgroup_mutex can safely
1190  * assume that if the count is zero, it will stay zero. Similarly, if
1191  * a task holds cgroup_mutex on a cgroup with zero count, it
1192  * knows that the cgroup won't be removed, as cgroup_rmdir()
1193  * needs that mutex.
1194  *
1195  * A cgroup can only be deleted if both its 'count' of using tasks
1196  * is zero, and its list of 'children' cgroups is empty.  Since all
1197  * tasks in the system use _some_ cgroup, and since there is always at
1198  * least one task in the system (init, pid == 1), therefore, root cgroup
1199  * always has either children cgroups and/or using tasks.  So we don't
1200  * need a special hack to ensure that root cgroup cannot be deleted.
1201  *
1202  * P.S.  One more locking exception.  RCU is used to guard the
1203  * update of a tasks cgroup pointer by cgroup_attach_task()
1204  */
1205 
1206 static struct kernfs_syscall_ops cgroup_kf_syscall_ops;
1207 
1208 static char *cgroup_file_name(struct cgroup *cgrp, const struct cftype *cft,
1209 			      char *buf)
1210 {
1211 	struct cgroup_subsys *ss = cft->ss;
1212 
1213 	if (cft->ss && !(cft->flags & CFTYPE_NO_PREFIX) &&
1214 	    !(cgrp->root->flags & CGRP_ROOT_NOPREFIX))
1215 		snprintf(buf, CGROUP_FILE_NAME_MAX, "%s.%s",
1216 			 cgroup_on_dfl(cgrp) ? ss->name : ss->legacy_name,
1217 			 cft->name);
1218 	else
1219 		strncpy(buf, cft->name, CGROUP_FILE_NAME_MAX);
1220 	return buf;
1221 }
1222 
1223 /**
1224  * cgroup_file_mode - deduce file mode of a control file
1225  * @cft: the control file in question
1226  *
1227  * S_IRUGO for read, S_IWUSR for write.
1228  */
1229 static umode_t cgroup_file_mode(const struct cftype *cft)
1230 {
1231 	umode_t mode = 0;
1232 
1233 	if (cft->read_u64 || cft->read_s64 || cft->seq_show)
1234 		mode |= S_IRUGO;
1235 
1236 	if (cft->write_u64 || cft->write_s64 || cft->write) {
1237 		if (cft->flags & CFTYPE_WORLD_WRITABLE)
1238 			mode |= S_IWUGO;
1239 		else
1240 			mode |= S_IWUSR;
1241 	}
1242 
1243 	return mode;
1244 }
1245 
1246 /**
1247  * cgroup_calc_subtree_ss_mask - calculate subtree_ss_mask
1248  * @subtree_control: the new subtree_control mask to consider
1249  * @this_ss_mask: available subsystems
1250  *
1251  * On the default hierarchy, a subsystem may request other subsystems to be
1252  * enabled together through its ->depends_on mask.  In such cases, more
1253  * subsystems than specified in "cgroup.subtree_control" may be enabled.
1254  *
1255  * This function calculates which subsystems need to be enabled if
1256  * @subtree_control is to be applied while restricted to @this_ss_mask.
1257  */
1258 static u16 cgroup_calc_subtree_ss_mask(u16 subtree_control, u16 this_ss_mask)
1259 {
1260 	u16 cur_ss_mask = subtree_control;
1261 	struct cgroup_subsys *ss;
1262 	int ssid;
1263 
1264 	lockdep_assert_held(&cgroup_mutex);
1265 
1266 	cur_ss_mask |= cgrp_dfl_implicit_ss_mask;
1267 
1268 	while (true) {
1269 		u16 new_ss_mask = cur_ss_mask;
1270 
1271 		do_each_subsys_mask(ss, ssid, cur_ss_mask) {
1272 			new_ss_mask |= ss->depends_on;
1273 		} while_each_subsys_mask();
1274 
1275 		/*
1276 		 * Mask out subsystems which aren't available.  This can
1277 		 * happen only if some depended-upon subsystems were bound
1278 		 * to non-default hierarchies.
1279 		 */
1280 		new_ss_mask &= this_ss_mask;
1281 
1282 		if (new_ss_mask == cur_ss_mask)
1283 			break;
1284 		cur_ss_mask = new_ss_mask;
1285 	}
1286 
1287 	return cur_ss_mask;
1288 }
1289 
1290 /**
1291  * cgroup_kn_unlock - unlocking helper for cgroup kernfs methods
1292  * @kn: the kernfs_node being serviced
1293  *
1294  * This helper undoes cgroup_kn_lock_live() and should be invoked before
1295  * the method finishes if locking succeeded.  Note that once this function
1296  * returns the cgroup returned by cgroup_kn_lock_live() may become
1297  * inaccessible any time.  If the caller intends to continue to access the
1298  * cgroup, it should pin it before invoking this function.
1299  */
1300 void cgroup_kn_unlock(struct kernfs_node *kn)
1301 {
1302 	struct cgroup *cgrp;
1303 
1304 	if (kernfs_type(kn) == KERNFS_DIR)
1305 		cgrp = kn->priv;
1306 	else
1307 		cgrp = kn->parent->priv;
1308 
1309 	mutex_unlock(&cgroup_mutex);
1310 
1311 	kernfs_unbreak_active_protection(kn);
1312 	cgroup_put(cgrp);
1313 }
1314 
1315 /**
1316  * cgroup_kn_lock_live - locking helper for cgroup kernfs methods
1317  * @kn: the kernfs_node being serviced
1318  * @drain_offline: perform offline draining on the cgroup
1319  *
1320  * This helper is to be used by a cgroup kernfs method currently servicing
1321  * @kn.  It breaks the active protection, performs cgroup locking and
1322  * verifies that the associated cgroup is alive.  Returns the cgroup if
1323  * alive; otherwise, %NULL.  A successful return should be undone by a
1324  * matching cgroup_kn_unlock() invocation.  If @drain_offline is %true, the
1325  * cgroup is drained of offlining csses before return.
1326  *
1327  * Any cgroup kernfs method implementation which requires locking the
1328  * associated cgroup should use this helper.  It avoids nesting cgroup
1329  * locking under kernfs active protection and allows all kernfs operations
1330  * including self-removal.
1331  */
1332 struct cgroup *cgroup_kn_lock_live(struct kernfs_node *kn, bool drain_offline)
1333 {
1334 	struct cgroup *cgrp;
1335 
1336 	if (kernfs_type(kn) == KERNFS_DIR)
1337 		cgrp = kn->priv;
1338 	else
1339 		cgrp = kn->parent->priv;
1340 
1341 	/*
1342 	 * We're gonna grab cgroup_mutex which nests outside kernfs
1343 	 * active_ref.  cgroup liveliness check alone provides enough
1344 	 * protection against removal.  Ensure @cgrp stays accessible and
1345 	 * break the active_ref protection.
1346 	 */
1347 	if (!cgroup_tryget(cgrp))
1348 		return NULL;
1349 	kernfs_break_active_protection(kn);
1350 
1351 	if (drain_offline)
1352 		cgroup_lock_and_drain_offline(cgrp);
1353 	else
1354 		mutex_lock(&cgroup_mutex);
1355 
1356 	if (!cgroup_is_dead(cgrp))
1357 		return cgrp;
1358 
1359 	cgroup_kn_unlock(kn);
1360 	return NULL;
1361 }
1362 
1363 static void cgroup_rm_file(struct cgroup *cgrp, const struct cftype *cft)
1364 {
1365 	char name[CGROUP_FILE_NAME_MAX];
1366 
1367 	lockdep_assert_held(&cgroup_mutex);
1368 
1369 	if (cft->file_offset) {
1370 		struct cgroup_subsys_state *css = cgroup_css(cgrp, cft->ss);
1371 		struct cgroup_file *cfile = (void *)css + cft->file_offset;
1372 
1373 		spin_lock_irq(&cgroup_file_kn_lock);
1374 		cfile->kn = NULL;
1375 		spin_unlock_irq(&cgroup_file_kn_lock);
1376 	}
1377 
1378 	kernfs_remove_by_name(cgrp->kn, cgroup_file_name(cgrp, cft, name));
1379 }
1380 
1381 /**
1382  * css_clear_dir - remove subsys files in a cgroup directory
1383  * @css: taget css
1384  */
1385 static void css_clear_dir(struct cgroup_subsys_state *css)
1386 {
1387 	struct cgroup *cgrp = css->cgroup;
1388 	struct cftype *cfts;
1389 
1390 	if (!(css->flags & CSS_VISIBLE))
1391 		return;
1392 
1393 	css->flags &= ~CSS_VISIBLE;
1394 
1395 	list_for_each_entry(cfts, &css->ss->cfts, node)
1396 		cgroup_addrm_files(css, cgrp, cfts, false);
1397 }
1398 
1399 /**
1400  * css_populate_dir - create subsys files in a cgroup directory
1401  * @css: target css
1402  *
1403  * On failure, no file is added.
1404  */
1405 static int css_populate_dir(struct cgroup_subsys_state *css)
1406 {
1407 	struct cgroup *cgrp = css->cgroup;
1408 	struct cftype *cfts, *failed_cfts;
1409 	int ret;
1410 
1411 	if ((css->flags & CSS_VISIBLE) || !cgrp->kn)
1412 		return 0;
1413 
1414 	if (!css->ss) {
1415 		if (cgroup_on_dfl(cgrp))
1416 			cfts = cgroup_base_files;
1417 		else
1418 			cfts = cgroup1_base_files;
1419 
1420 		return cgroup_addrm_files(&cgrp->self, cgrp, cfts, true);
1421 	}
1422 
1423 	list_for_each_entry(cfts, &css->ss->cfts, node) {
1424 		ret = cgroup_addrm_files(css, cgrp, cfts, true);
1425 		if (ret < 0) {
1426 			failed_cfts = cfts;
1427 			goto err;
1428 		}
1429 	}
1430 
1431 	css->flags |= CSS_VISIBLE;
1432 
1433 	return 0;
1434 err:
1435 	list_for_each_entry(cfts, &css->ss->cfts, node) {
1436 		if (cfts == failed_cfts)
1437 			break;
1438 		cgroup_addrm_files(css, cgrp, cfts, false);
1439 	}
1440 	return ret;
1441 }
1442 
1443 int rebind_subsystems(struct cgroup_root *dst_root, u16 ss_mask)
1444 {
1445 	struct cgroup *dcgrp = &dst_root->cgrp;
1446 	struct cgroup_subsys *ss;
1447 	int ssid, i, ret;
1448 
1449 	lockdep_assert_held(&cgroup_mutex);
1450 
1451 	do_each_subsys_mask(ss, ssid, ss_mask) {
1452 		/*
1453 		 * If @ss has non-root csses attached to it, can't move.
1454 		 * If @ss is an implicit controller, it is exempt from this
1455 		 * rule and can be stolen.
1456 		 */
1457 		if (css_next_child(NULL, cgroup_css(&ss->root->cgrp, ss)) &&
1458 		    !ss->implicit_on_dfl)
1459 			return -EBUSY;
1460 
1461 		/* can't move between two non-dummy roots either */
1462 		if (ss->root != &cgrp_dfl_root && dst_root != &cgrp_dfl_root)
1463 			return -EBUSY;
1464 	} while_each_subsys_mask();
1465 
1466 	do_each_subsys_mask(ss, ssid, ss_mask) {
1467 		struct cgroup_root *src_root = ss->root;
1468 		struct cgroup *scgrp = &src_root->cgrp;
1469 		struct cgroup_subsys_state *css = cgroup_css(scgrp, ss);
1470 		struct css_set *cset;
1471 
1472 		WARN_ON(!css || cgroup_css(dcgrp, ss));
1473 
1474 		/* disable from the source */
1475 		src_root->subsys_mask &= ~(1 << ssid);
1476 		WARN_ON(cgroup_apply_control(scgrp));
1477 		cgroup_finalize_control(scgrp, 0);
1478 
1479 		/* rebind */
1480 		RCU_INIT_POINTER(scgrp->subsys[ssid], NULL);
1481 		rcu_assign_pointer(dcgrp->subsys[ssid], css);
1482 		ss->root = dst_root;
1483 		css->cgroup = dcgrp;
1484 
1485 		spin_lock_irq(&css_set_lock);
1486 		hash_for_each(css_set_table, i, cset, hlist)
1487 			list_move_tail(&cset->e_cset_node[ss->id],
1488 				       &dcgrp->e_csets[ss->id]);
1489 		spin_unlock_irq(&css_set_lock);
1490 
1491 		/* default hierarchy doesn't enable controllers by default */
1492 		dst_root->subsys_mask |= 1 << ssid;
1493 		if (dst_root == &cgrp_dfl_root) {
1494 			static_branch_enable(cgroup_subsys_on_dfl_key[ssid]);
1495 		} else {
1496 			dcgrp->subtree_control |= 1 << ssid;
1497 			static_branch_disable(cgroup_subsys_on_dfl_key[ssid]);
1498 		}
1499 
1500 		ret = cgroup_apply_control(dcgrp);
1501 		if (ret)
1502 			pr_warn("partial failure to rebind %s controller (err=%d)\n",
1503 				ss->name, ret);
1504 
1505 		if (ss->bind)
1506 			ss->bind(css);
1507 	} while_each_subsys_mask();
1508 
1509 	kernfs_activate(dcgrp->kn);
1510 	return 0;
1511 }
1512 
1513 int cgroup_show_path(struct seq_file *sf, struct kernfs_node *kf_node,
1514 		     struct kernfs_root *kf_root)
1515 {
1516 	int len = 0;
1517 	char *buf = NULL;
1518 	struct cgroup_root *kf_cgroot = cgroup_root_from_kf(kf_root);
1519 	struct cgroup *ns_cgroup;
1520 
1521 	buf = kmalloc(PATH_MAX, GFP_KERNEL);
1522 	if (!buf)
1523 		return -ENOMEM;
1524 
1525 	spin_lock_irq(&css_set_lock);
1526 	ns_cgroup = current_cgns_cgroup_from_root(kf_cgroot);
1527 	len = kernfs_path_from_node(kf_node, ns_cgroup->kn, buf, PATH_MAX);
1528 	spin_unlock_irq(&css_set_lock);
1529 
1530 	if (len >= PATH_MAX)
1531 		len = -ERANGE;
1532 	else if (len > 0) {
1533 		seq_escape(sf, buf, " \t\n\\");
1534 		len = 0;
1535 	}
1536 	kfree(buf);
1537 	return len;
1538 }
1539 
1540 static int cgroup_remount(struct kernfs_root *kf_root, int *flags, char *data)
1541 {
1542 	pr_err("remount is not allowed\n");
1543 	return -EINVAL;
1544 }
1545 
1546 /*
1547  * To reduce the fork() overhead for systems that are not actually using
1548  * their cgroups capability, we don't maintain the lists running through
1549  * each css_set to its tasks until we see the list actually used - in other
1550  * words after the first mount.
1551  */
1552 static bool use_task_css_set_links __read_mostly;
1553 
1554 static void cgroup_enable_task_cg_lists(void)
1555 {
1556 	struct task_struct *p, *g;
1557 
1558 	spin_lock_irq(&css_set_lock);
1559 
1560 	if (use_task_css_set_links)
1561 		goto out_unlock;
1562 
1563 	use_task_css_set_links = true;
1564 
1565 	/*
1566 	 * We need tasklist_lock because RCU is not safe against
1567 	 * while_each_thread(). Besides, a forking task that has passed
1568 	 * cgroup_post_fork() without seeing use_task_css_set_links = 1
1569 	 * is not guaranteed to have its child immediately visible in the
1570 	 * tasklist if we walk through it with RCU.
1571 	 */
1572 	read_lock(&tasklist_lock);
1573 	do_each_thread(g, p) {
1574 		WARN_ON_ONCE(!list_empty(&p->cg_list) ||
1575 			     task_css_set(p) != &init_css_set);
1576 
1577 		/*
1578 		 * We should check if the process is exiting, otherwise
1579 		 * it will race with cgroup_exit() in that the list
1580 		 * entry won't be deleted though the process has exited.
1581 		 * Do it while holding siglock so that we don't end up
1582 		 * racing against cgroup_exit().
1583 		 *
1584 		 * Interrupts were already disabled while acquiring
1585 		 * the css_set_lock, so we do not need to disable it
1586 		 * again when acquiring the sighand->siglock here.
1587 		 */
1588 		spin_lock(&p->sighand->siglock);
1589 		if (!(p->flags & PF_EXITING)) {
1590 			struct css_set *cset = task_css_set(p);
1591 
1592 			if (!css_set_populated(cset))
1593 				css_set_update_populated(cset, true);
1594 			list_add_tail(&p->cg_list, &cset->tasks);
1595 			get_css_set(cset);
1596 		}
1597 		spin_unlock(&p->sighand->siglock);
1598 	} while_each_thread(g, p);
1599 	read_unlock(&tasklist_lock);
1600 out_unlock:
1601 	spin_unlock_irq(&css_set_lock);
1602 }
1603 
1604 static void init_cgroup_housekeeping(struct cgroup *cgrp)
1605 {
1606 	struct cgroup_subsys *ss;
1607 	int ssid;
1608 
1609 	INIT_LIST_HEAD(&cgrp->self.sibling);
1610 	INIT_LIST_HEAD(&cgrp->self.children);
1611 	INIT_LIST_HEAD(&cgrp->cset_links);
1612 	INIT_LIST_HEAD(&cgrp->pidlists);
1613 	mutex_init(&cgrp->pidlist_mutex);
1614 	cgrp->self.cgroup = cgrp;
1615 	cgrp->self.flags |= CSS_ONLINE;
1616 
1617 	for_each_subsys(ss, ssid)
1618 		INIT_LIST_HEAD(&cgrp->e_csets[ssid]);
1619 
1620 	init_waitqueue_head(&cgrp->offline_waitq);
1621 	INIT_WORK(&cgrp->release_agent_work, cgroup1_release_agent);
1622 }
1623 
1624 void init_cgroup_root(struct cgroup_root *root, struct cgroup_sb_opts *opts)
1625 {
1626 	struct cgroup *cgrp = &root->cgrp;
1627 
1628 	INIT_LIST_HEAD(&root->root_list);
1629 	atomic_set(&root->nr_cgrps, 1);
1630 	cgrp->root = root;
1631 	init_cgroup_housekeeping(cgrp);
1632 	idr_init(&root->cgroup_idr);
1633 
1634 	root->flags = opts->flags;
1635 	if (opts->release_agent)
1636 		strcpy(root->release_agent_path, opts->release_agent);
1637 	if (opts->name)
1638 		strcpy(root->name, opts->name);
1639 	if (opts->cpuset_clone_children)
1640 		set_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags);
1641 }
1642 
1643 int cgroup_setup_root(struct cgroup_root *root, u16 ss_mask)
1644 {
1645 	LIST_HEAD(tmp_links);
1646 	struct cgroup *root_cgrp = &root->cgrp;
1647 	struct kernfs_syscall_ops *kf_sops;
1648 	struct css_set *cset;
1649 	int i, ret;
1650 
1651 	lockdep_assert_held(&cgroup_mutex);
1652 
1653 	ret = cgroup_idr_alloc(&root->cgroup_idr, root_cgrp, 1, 2, GFP_KERNEL);
1654 	if (ret < 0)
1655 		goto out;
1656 	root_cgrp->id = ret;
1657 	root_cgrp->ancestor_ids[0] = ret;
1658 
1659 	ret = percpu_ref_init(&root_cgrp->self.refcnt, css_release, 0,
1660 			      GFP_KERNEL);
1661 	if (ret)
1662 		goto out;
1663 
1664 	/*
1665 	 * We're accessing css_set_count without locking css_set_lock here,
1666 	 * but that's OK - it can only be increased by someone holding
1667 	 * cgroup_lock, and that's us.  Later rebinding may disable
1668 	 * controllers on the default hierarchy and thus create new csets,
1669 	 * which can't be more than the existing ones.  Allocate 2x.
1670 	 */
1671 	ret = allocate_cgrp_cset_links(2 * css_set_count, &tmp_links);
1672 	if (ret)
1673 		goto cancel_ref;
1674 
1675 	ret = cgroup_init_root_id(root);
1676 	if (ret)
1677 		goto cancel_ref;
1678 
1679 	kf_sops = root == &cgrp_dfl_root ?
1680 		&cgroup_kf_syscall_ops : &cgroup1_kf_syscall_ops;
1681 
1682 	root->kf_root = kernfs_create_root(kf_sops,
1683 					   KERNFS_ROOT_CREATE_DEACTIVATED,
1684 					   root_cgrp);
1685 	if (IS_ERR(root->kf_root)) {
1686 		ret = PTR_ERR(root->kf_root);
1687 		goto exit_root_id;
1688 	}
1689 	root_cgrp->kn = root->kf_root->kn;
1690 
1691 	ret = css_populate_dir(&root_cgrp->self);
1692 	if (ret)
1693 		goto destroy_root;
1694 
1695 	ret = rebind_subsystems(root, ss_mask);
1696 	if (ret)
1697 		goto destroy_root;
1698 
1699 	trace_cgroup_setup_root(root);
1700 
1701 	/*
1702 	 * There must be no failure case after here, since rebinding takes
1703 	 * care of subsystems' refcounts, which are explicitly dropped in
1704 	 * the failure exit path.
1705 	 */
1706 	list_add(&root->root_list, &cgroup_roots);
1707 	cgroup_root_count++;
1708 
1709 	/*
1710 	 * Link the root cgroup in this hierarchy into all the css_set
1711 	 * objects.
1712 	 */
1713 	spin_lock_irq(&css_set_lock);
1714 	hash_for_each(css_set_table, i, cset, hlist) {
1715 		link_css_set(&tmp_links, cset, root_cgrp);
1716 		if (css_set_populated(cset))
1717 			cgroup_update_populated(root_cgrp, true);
1718 	}
1719 	spin_unlock_irq(&css_set_lock);
1720 
1721 	BUG_ON(!list_empty(&root_cgrp->self.children));
1722 	BUG_ON(atomic_read(&root->nr_cgrps) != 1);
1723 
1724 	kernfs_activate(root_cgrp->kn);
1725 	ret = 0;
1726 	goto out;
1727 
1728 destroy_root:
1729 	kernfs_destroy_root(root->kf_root);
1730 	root->kf_root = NULL;
1731 exit_root_id:
1732 	cgroup_exit_root_id(root);
1733 cancel_ref:
1734 	percpu_ref_exit(&root_cgrp->self.refcnt);
1735 out:
1736 	free_cgrp_cset_links(&tmp_links);
1737 	return ret;
1738 }
1739 
1740 struct dentry *cgroup_do_mount(struct file_system_type *fs_type, int flags,
1741 			       struct cgroup_root *root, unsigned long magic,
1742 			       struct cgroup_namespace *ns)
1743 {
1744 	struct dentry *dentry;
1745 	bool new_sb;
1746 
1747 	dentry = kernfs_mount(fs_type, flags, root->kf_root, magic, &new_sb);
1748 
1749 	/*
1750 	 * In non-init cgroup namespace, instead of root cgroup's dentry,
1751 	 * we return the dentry corresponding to the cgroupns->root_cgrp.
1752 	 */
1753 	if (!IS_ERR(dentry) && ns != &init_cgroup_ns) {
1754 		struct dentry *nsdentry;
1755 		struct cgroup *cgrp;
1756 
1757 		mutex_lock(&cgroup_mutex);
1758 		spin_lock_irq(&css_set_lock);
1759 
1760 		cgrp = cset_cgroup_from_root(ns->root_cset, root);
1761 
1762 		spin_unlock_irq(&css_set_lock);
1763 		mutex_unlock(&cgroup_mutex);
1764 
1765 		nsdentry = kernfs_node_dentry(cgrp->kn, dentry->d_sb);
1766 		dput(dentry);
1767 		dentry = nsdentry;
1768 	}
1769 
1770 	if (IS_ERR(dentry) || !new_sb)
1771 		cgroup_put(&root->cgrp);
1772 
1773 	return dentry;
1774 }
1775 
1776 static struct dentry *cgroup_mount(struct file_system_type *fs_type,
1777 			 int flags, const char *unused_dev_name,
1778 			 void *data)
1779 {
1780 	struct cgroup_namespace *ns = current->nsproxy->cgroup_ns;
1781 	struct dentry *dentry;
1782 
1783 	get_cgroup_ns(ns);
1784 
1785 	/* Check if the caller has permission to mount. */
1786 	if (!ns_capable(ns->user_ns, CAP_SYS_ADMIN)) {
1787 		put_cgroup_ns(ns);
1788 		return ERR_PTR(-EPERM);
1789 	}
1790 
1791 	/*
1792 	 * The first time anyone tries to mount a cgroup, enable the list
1793 	 * linking each css_set to its tasks and fix up all existing tasks.
1794 	 */
1795 	if (!use_task_css_set_links)
1796 		cgroup_enable_task_cg_lists();
1797 
1798 	if (fs_type == &cgroup2_fs_type) {
1799 		if (data) {
1800 			pr_err("cgroup2: unknown option \"%s\"\n", (char *)data);
1801 			put_cgroup_ns(ns);
1802 			return ERR_PTR(-EINVAL);
1803 		}
1804 		cgrp_dfl_visible = true;
1805 		cgroup_get(&cgrp_dfl_root.cgrp);
1806 
1807 		dentry = cgroup_do_mount(&cgroup2_fs_type, flags, &cgrp_dfl_root,
1808 					 CGROUP2_SUPER_MAGIC, ns);
1809 	} else {
1810 		dentry = cgroup1_mount(&cgroup_fs_type, flags, data,
1811 				       CGROUP_SUPER_MAGIC, ns);
1812 	}
1813 
1814 	put_cgroup_ns(ns);
1815 	return dentry;
1816 }
1817 
1818 static void cgroup_kill_sb(struct super_block *sb)
1819 {
1820 	struct kernfs_root *kf_root = kernfs_root_from_sb(sb);
1821 	struct cgroup_root *root = cgroup_root_from_kf(kf_root);
1822 
1823 	/*
1824 	 * If @root doesn't have any mounts or children, start killing it.
1825 	 * This prevents new mounts by disabling percpu_ref_tryget_live().
1826 	 * cgroup_mount() may wait for @root's release.
1827 	 *
1828 	 * And don't kill the default root.
1829 	 */
1830 	if (!list_empty(&root->cgrp.self.children) ||
1831 	    root == &cgrp_dfl_root)
1832 		cgroup_put(&root->cgrp);
1833 	else
1834 		percpu_ref_kill(&root->cgrp.self.refcnt);
1835 
1836 	kernfs_kill_sb(sb);
1837 }
1838 
1839 struct file_system_type cgroup_fs_type = {
1840 	.name = "cgroup",
1841 	.mount = cgroup_mount,
1842 	.kill_sb = cgroup_kill_sb,
1843 	.fs_flags = FS_USERNS_MOUNT,
1844 };
1845 
1846 static struct file_system_type cgroup2_fs_type = {
1847 	.name = "cgroup2",
1848 	.mount = cgroup_mount,
1849 	.kill_sb = cgroup_kill_sb,
1850 	.fs_flags = FS_USERNS_MOUNT,
1851 };
1852 
1853 int cgroup_path_ns_locked(struct cgroup *cgrp, char *buf, size_t buflen,
1854 			  struct cgroup_namespace *ns)
1855 {
1856 	struct cgroup *root = cset_cgroup_from_root(ns->root_cset, cgrp->root);
1857 
1858 	return kernfs_path_from_node(cgrp->kn, root->kn, buf, buflen);
1859 }
1860 
1861 int cgroup_path_ns(struct cgroup *cgrp, char *buf, size_t buflen,
1862 		   struct cgroup_namespace *ns)
1863 {
1864 	int ret;
1865 
1866 	mutex_lock(&cgroup_mutex);
1867 	spin_lock_irq(&css_set_lock);
1868 
1869 	ret = cgroup_path_ns_locked(cgrp, buf, buflen, ns);
1870 
1871 	spin_unlock_irq(&css_set_lock);
1872 	mutex_unlock(&cgroup_mutex);
1873 
1874 	return ret;
1875 }
1876 EXPORT_SYMBOL_GPL(cgroup_path_ns);
1877 
1878 /**
1879  * task_cgroup_path - cgroup path of a task in the first cgroup hierarchy
1880  * @task: target task
1881  * @buf: the buffer to write the path into
1882  * @buflen: the length of the buffer
1883  *
1884  * Determine @task's cgroup on the first (the one with the lowest non-zero
1885  * hierarchy_id) cgroup hierarchy and copy its path into @buf.  This
1886  * function grabs cgroup_mutex and shouldn't be used inside locks used by
1887  * cgroup controller callbacks.
1888  *
1889  * Return value is the same as kernfs_path().
1890  */
1891 int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen)
1892 {
1893 	struct cgroup_root *root;
1894 	struct cgroup *cgrp;
1895 	int hierarchy_id = 1;
1896 	int ret;
1897 
1898 	mutex_lock(&cgroup_mutex);
1899 	spin_lock_irq(&css_set_lock);
1900 
1901 	root = idr_get_next(&cgroup_hierarchy_idr, &hierarchy_id);
1902 
1903 	if (root) {
1904 		cgrp = task_cgroup_from_root(task, root);
1905 		ret = cgroup_path_ns_locked(cgrp, buf, buflen, &init_cgroup_ns);
1906 	} else {
1907 		/* if no hierarchy exists, everyone is in "/" */
1908 		ret = strlcpy(buf, "/", buflen);
1909 	}
1910 
1911 	spin_unlock_irq(&css_set_lock);
1912 	mutex_unlock(&cgroup_mutex);
1913 	return ret;
1914 }
1915 EXPORT_SYMBOL_GPL(task_cgroup_path);
1916 
1917 /**
1918  * cgroup_migrate_add_task - add a migration target task to a migration context
1919  * @task: target task
1920  * @mgctx: target migration context
1921  *
1922  * Add @task, which is a migration target, to @mgctx->tset.  This function
1923  * becomes noop if @task doesn't need to be migrated.  @task's css_set
1924  * should have been added as a migration source and @task->cg_list will be
1925  * moved from the css_set's tasks list to mg_tasks one.
1926  */
1927 static void cgroup_migrate_add_task(struct task_struct *task,
1928 				    struct cgroup_mgctx *mgctx)
1929 {
1930 	struct css_set *cset;
1931 
1932 	lockdep_assert_held(&css_set_lock);
1933 
1934 	/* @task either already exited or can't exit until the end */
1935 	if (task->flags & PF_EXITING)
1936 		return;
1937 
1938 	/* leave @task alone if post_fork() hasn't linked it yet */
1939 	if (list_empty(&task->cg_list))
1940 		return;
1941 
1942 	cset = task_css_set(task);
1943 	if (!cset->mg_src_cgrp)
1944 		return;
1945 
1946 	list_move_tail(&task->cg_list, &cset->mg_tasks);
1947 	if (list_empty(&cset->mg_node))
1948 		list_add_tail(&cset->mg_node,
1949 			      &mgctx->tset.src_csets);
1950 	if (list_empty(&cset->mg_dst_cset->mg_node))
1951 		list_add_tail(&cset->mg_dst_cset->mg_node,
1952 			      &mgctx->tset.dst_csets);
1953 }
1954 
1955 /**
1956  * cgroup_taskset_first - reset taskset and return the first task
1957  * @tset: taskset of interest
1958  * @dst_cssp: output variable for the destination css
1959  *
1960  * @tset iteration is initialized and the first task is returned.
1961  */
1962 struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset,
1963 					 struct cgroup_subsys_state **dst_cssp)
1964 {
1965 	tset->cur_cset = list_first_entry(tset->csets, struct css_set, mg_node);
1966 	tset->cur_task = NULL;
1967 
1968 	return cgroup_taskset_next(tset, dst_cssp);
1969 }
1970 
1971 /**
1972  * cgroup_taskset_next - iterate to the next task in taskset
1973  * @tset: taskset of interest
1974  * @dst_cssp: output variable for the destination css
1975  *
1976  * Return the next task in @tset.  Iteration must have been initialized
1977  * with cgroup_taskset_first().
1978  */
1979 struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset,
1980 					struct cgroup_subsys_state **dst_cssp)
1981 {
1982 	struct css_set *cset = tset->cur_cset;
1983 	struct task_struct *task = tset->cur_task;
1984 
1985 	while (&cset->mg_node != tset->csets) {
1986 		if (!task)
1987 			task = list_first_entry(&cset->mg_tasks,
1988 						struct task_struct, cg_list);
1989 		else
1990 			task = list_next_entry(task, cg_list);
1991 
1992 		if (&task->cg_list != &cset->mg_tasks) {
1993 			tset->cur_cset = cset;
1994 			tset->cur_task = task;
1995 
1996 			/*
1997 			 * This function may be called both before and
1998 			 * after cgroup_taskset_migrate().  The two cases
1999 			 * can be distinguished by looking at whether @cset
2000 			 * has its ->mg_dst_cset set.
2001 			 */
2002 			if (cset->mg_dst_cset)
2003 				*dst_cssp = cset->mg_dst_cset->subsys[tset->ssid];
2004 			else
2005 				*dst_cssp = cset->subsys[tset->ssid];
2006 
2007 			return task;
2008 		}
2009 
2010 		cset = list_next_entry(cset, mg_node);
2011 		task = NULL;
2012 	}
2013 
2014 	return NULL;
2015 }
2016 
2017 /**
2018  * cgroup_taskset_migrate - migrate a taskset
2019  * @mgctx: migration context
2020  *
2021  * Migrate tasks in @mgctx as setup by migration preparation functions.
2022  * This function fails iff one of the ->can_attach callbacks fails and
2023  * guarantees that either all or none of the tasks in @mgctx are migrated.
2024  * @mgctx is consumed regardless of success.
2025  */
2026 static int cgroup_migrate_execute(struct cgroup_mgctx *mgctx)
2027 {
2028 	struct cgroup_taskset *tset = &mgctx->tset;
2029 	struct cgroup_subsys *ss;
2030 	struct task_struct *task, *tmp_task;
2031 	struct css_set *cset, *tmp_cset;
2032 	int ssid, failed_ssid, ret;
2033 
2034 	/* methods shouldn't be called if no task is actually migrating */
2035 	if (list_empty(&tset->src_csets))
2036 		return 0;
2037 
2038 	/* check that we can legitimately attach to the cgroup */
2039 	do_each_subsys_mask(ss, ssid, mgctx->ss_mask) {
2040 		if (ss->can_attach) {
2041 			tset->ssid = ssid;
2042 			ret = ss->can_attach(tset);
2043 			if (ret) {
2044 				failed_ssid = ssid;
2045 				goto out_cancel_attach;
2046 			}
2047 		}
2048 	} while_each_subsys_mask();
2049 
2050 	/*
2051 	 * Now that we're guaranteed success, proceed to move all tasks to
2052 	 * the new cgroup.  There are no failure cases after here, so this
2053 	 * is the commit point.
2054 	 */
2055 	spin_lock_irq(&css_set_lock);
2056 	list_for_each_entry(cset, &tset->src_csets, mg_node) {
2057 		list_for_each_entry_safe(task, tmp_task, &cset->mg_tasks, cg_list) {
2058 			struct css_set *from_cset = task_css_set(task);
2059 			struct css_set *to_cset = cset->mg_dst_cset;
2060 
2061 			get_css_set(to_cset);
2062 			css_set_move_task(task, from_cset, to_cset, true);
2063 			put_css_set_locked(from_cset);
2064 		}
2065 	}
2066 	spin_unlock_irq(&css_set_lock);
2067 
2068 	/*
2069 	 * Migration is committed, all target tasks are now on dst_csets.
2070 	 * Nothing is sensitive to fork() after this point.  Notify
2071 	 * controllers that migration is complete.
2072 	 */
2073 	tset->csets = &tset->dst_csets;
2074 
2075 	do_each_subsys_mask(ss, ssid, mgctx->ss_mask) {
2076 		if (ss->attach) {
2077 			tset->ssid = ssid;
2078 			ss->attach(tset);
2079 		}
2080 	} while_each_subsys_mask();
2081 
2082 	ret = 0;
2083 	goto out_release_tset;
2084 
2085 out_cancel_attach:
2086 	do_each_subsys_mask(ss, ssid, mgctx->ss_mask) {
2087 		if (ssid == failed_ssid)
2088 			break;
2089 		if (ss->cancel_attach) {
2090 			tset->ssid = ssid;
2091 			ss->cancel_attach(tset);
2092 		}
2093 	} while_each_subsys_mask();
2094 out_release_tset:
2095 	spin_lock_irq(&css_set_lock);
2096 	list_splice_init(&tset->dst_csets, &tset->src_csets);
2097 	list_for_each_entry_safe(cset, tmp_cset, &tset->src_csets, mg_node) {
2098 		list_splice_tail_init(&cset->mg_tasks, &cset->tasks);
2099 		list_del_init(&cset->mg_node);
2100 	}
2101 	spin_unlock_irq(&css_set_lock);
2102 	return ret;
2103 }
2104 
2105 /**
2106  * cgroup_may_migrate_to - verify whether a cgroup can be migration destination
2107  * @dst_cgrp: destination cgroup to test
2108  *
2109  * On the default hierarchy, except for the root, subtree_control must be
2110  * zero for migration destination cgroups with tasks so that child cgroups
2111  * don't compete against tasks.
2112  */
2113 bool cgroup_may_migrate_to(struct cgroup *dst_cgrp)
2114 {
2115 	return !cgroup_on_dfl(dst_cgrp) || !cgroup_parent(dst_cgrp) ||
2116 		!dst_cgrp->subtree_control;
2117 }
2118 
2119 /**
2120  * cgroup_migrate_finish - cleanup after attach
2121  * @mgctx: migration context
2122  *
2123  * Undo cgroup_migrate_add_src() and cgroup_migrate_prepare_dst().  See
2124  * those functions for details.
2125  */
2126 void cgroup_migrate_finish(struct cgroup_mgctx *mgctx)
2127 {
2128 	LIST_HEAD(preloaded);
2129 	struct css_set *cset, *tmp_cset;
2130 
2131 	lockdep_assert_held(&cgroup_mutex);
2132 
2133 	spin_lock_irq(&css_set_lock);
2134 
2135 	list_splice_tail_init(&mgctx->preloaded_src_csets, &preloaded);
2136 	list_splice_tail_init(&mgctx->preloaded_dst_csets, &preloaded);
2137 
2138 	list_for_each_entry_safe(cset, tmp_cset, &preloaded, mg_preload_node) {
2139 		cset->mg_src_cgrp = NULL;
2140 		cset->mg_dst_cgrp = NULL;
2141 		cset->mg_dst_cset = NULL;
2142 		list_del_init(&cset->mg_preload_node);
2143 		put_css_set_locked(cset);
2144 	}
2145 
2146 	spin_unlock_irq(&css_set_lock);
2147 }
2148 
2149 /**
2150  * cgroup_migrate_add_src - add a migration source css_set
2151  * @src_cset: the source css_set to add
2152  * @dst_cgrp: the destination cgroup
2153  * @mgctx: migration context
2154  *
2155  * Tasks belonging to @src_cset are about to be migrated to @dst_cgrp.  Pin
2156  * @src_cset and add it to @mgctx->src_csets, which should later be cleaned
2157  * up by cgroup_migrate_finish().
2158  *
2159  * This function may be called without holding cgroup_threadgroup_rwsem
2160  * even if the target is a process.  Threads may be created and destroyed
2161  * but as long as cgroup_mutex is not dropped, no new css_set can be put
2162  * into play and the preloaded css_sets are guaranteed to cover all
2163  * migrations.
2164  */
2165 void cgroup_migrate_add_src(struct css_set *src_cset,
2166 			    struct cgroup *dst_cgrp,
2167 			    struct cgroup_mgctx *mgctx)
2168 {
2169 	struct cgroup *src_cgrp;
2170 
2171 	lockdep_assert_held(&cgroup_mutex);
2172 	lockdep_assert_held(&css_set_lock);
2173 
2174 	/*
2175 	 * If ->dead, @src_set is associated with one or more dead cgroups
2176 	 * and doesn't contain any migratable tasks.  Ignore it early so
2177 	 * that the rest of migration path doesn't get confused by it.
2178 	 */
2179 	if (src_cset->dead)
2180 		return;
2181 
2182 	src_cgrp = cset_cgroup_from_root(src_cset, dst_cgrp->root);
2183 
2184 	if (!list_empty(&src_cset->mg_preload_node))
2185 		return;
2186 
2187 	WARN_ON(src_cset->mg_src_cgrp);
2188 	WARN_ON(src_cset->mg_dst_cgrp);
2189 	WARN_ON(!list_empty(&src_cset->mg_tasks));
2190 	WARN_ON(!list_empty(&src_cset->mg_node));
2191 
2192 	src_cset->mg_src_cgrp = src_cgrp;
2193 	src_cset->mg_dst_cgrp = dst_cgrp;
2194 	get_css_set(src_cset);
2195 	list_add_tail(&src_cset->mg_preload_node, &mgctx->preloaded_src_csets);
2196 }
2197 
2198 /**
2199  * cgroup_migrate_prepare_dst - prepare destination css_sets for migration
2200  * @mgctx: migration context
2201  *
2202  * Tasks are about to be moved and all the source css_sets have been
2203  * preloaded to @mgctx->preloaded_src_csets.  This function looks up and
2204  * pins all destination css_sets, links each to its source, and append them
2205  * to @mgctx->preloaded_dst_csets.
2206  *
2207  * This function must be called after cgroup_migrate_add_src() has been
2208  * called on each migration source css_set.  After migration is performed
2209  * using cgroup_migrate(), cgroup_migrate_finish() must be called on
2210  * @mgctx.
2211  */
2212 int cgroup_migrate_prepare_dst(struct cgroup_mgctx *mgctx)
2213 {
2214 	struct css_set *src_cset, *tmp_cset;
2215 
2216 	lockdep_assert_held(&cgroup_mutex);
2217 
2218 	/* look up the dst cset for each src cset and link it to src */
2219 	list_for_each_entry_safe(src_cset, tmp_cset, &mgctx->preloaded_src_csets,
2220 				 mg_preload_node) {
2221 		struct css_set *dst_cset;
2222 		struct cgroup_subsys *ss;
2223 		int ssid;
2224 
2225 		dst_cset = find_css_set(src_cset, src_cset->mg_dst_cgrp);
2226 		if (!dst_cset)
2227 			goto err;
2228 
2229 		WARN_ON_ONCE(src_cset->mg_dst_cset || dst_cset->mg_dst_cset);
2230 
2231 		/*
2232 		 * If src cset equals dst, it's noop.  Drop the src.
2233 		 * cgroup_migrate() will skip the cset too.  Note that we
2234 		 * can't handle src == dst as some nodes are used by both.
2235 		 */
2236 		if (src_cset == dst_cset) {
2237 			src_cset->mg_src_cgrp = NULL;
2238 			src_cset->mg_dst_cgrp = NULL;
2239 			list_del_init(&src_cset->mg_preload_node);
2240 			put_css_set(src_cset);
2241 			put_css_set(dst_cset);
2242 			continue;
2243 		}
2244 
2245 		src_cset->mg_dst_cset = dst_cset;
2246 
2247 		if (list_empty(&dst_cset->mg_preload_node))
2248 			list_add_tail(&dst_cset->mg_preload_node,
2249 				      &mgctx->preloaded_dst_csets);
2250 		else
2251 			put_css_set(dst_cset);
2252 
2253 		for_each_subsys(ss, ssid)
2254 			if (src_cset->subsys[ssid] != dst_cset->subsys[ssid])
2255 				mgctx->ss_mask |= 1 << ssid;
2256 	}
2257 
2258 	return 0;
2259 err:
2260 	cgroup_migrate_finish(mgctx);
2261 	return -ENOMEM;
2262 }
2263 
2264 /**
2265  * cgroup_migrate - migrate a process or task to a cgroup
2266  * @leader: the leader of the process or the task to migrate
2267  * @threadgroup: whether @leader points to the whole process or a single task
2268  * @mgctx: migration context
2269  *
2270  * Migrate a process or task denoted by @leader.  If migrating a process,
2271  * the caller must be holding cgroup_threadgroup_rwsem.  The caller is also
2272  * responsible for invoking cgroup_migrate_add_src() and
2273  * cgroup_migrate_prepare_dst() on the targets before invoking this
2274  * function and following up with cgroup_migrate_finish().
2275  *
2276  * As long as a controller's ->can_attach() doesn't fail, this function is
2277  * guaranteed to succeed.  This means that, excluding ->can_attach()
2278  * failure, when migrating multiple targets, the success or failure can be
2279  * decided for all targets by invoking group_migrate_prepare_dst() before
2280  * actually starting migrating.
2281  */
2282 int cgroup_migrate(struct task_struct *leader, bool threadgroup,
2283 		   struct cgroup_mgctx *mgctx)
2284 {
2285 	struct task_struct *task;
2286 
2287 	/*
2288 	 * Prevent freeing of tasks while we take a snapshot. Tasks that are
2289 	 * already PF_EXITING could be freed from underneath us unless we
2290 	 * take an rcu_read_lock.
2291 	 */
2292 	spin_lock_irq(&css_set_lock);
2293 	rcu_read_lock();
2294 	task = leader;
2295 	do {
2296 		cgroup_migrate_add_task(task, mgctx);
2297 		if (!threadgroup)
2298 			break;
2299 	} while_each_thread(leader, task);
2300 	rcu_read_unlock();
2301 	spin_unlock_irq(&css_set_lock);
2302 
2303 	return cgroup_migrate_execute(mgctx);
2304 }
2305 
2306 /**
2307  * cgroup_attach_task - attach a task or a whole threadgroup to a cgroup
2308  * @dst_cgrp: the cgroup to attach to
2309  * @leader: the task or the leader of the threadgroup to be attached
2310  * @threadgroup: attach the whole threadgroup?
2311  *
2312  * Call holding cgroup_mutex and cgroup_threadgroup_rwsem.
2313  */
2314 int cgroup_attach_task(struct cgroup *dst_cgrp, struct task_struct *leader,
2315 		       bool threadgroup)
2316 {
2317 	DEFINE_CGROUP_MGCTX(mgctx);
2318 	struct task_struct *task;
2319 	int ret;
2320 
2321 	if (!cgroup_may_migrate_to(dst_cgrp))
2322 		return -EBUSY;
2323 
2324 	/* look up all src csets */
2325 	spin_lock_irq(&css_set_lock);
2326 	rcu_read_lock();
2327 	task = leader;
2328 	do {
2329 		cgroup_migrate_add_src(task_css_set(task), dst_cgrp, &mgctx);
2330 		if (!threadgroup)
2331 			break;
2332 	} while_each_thread(leader, task);
2333 	rcu_read_unlock();
2334 	spin_unlock_irq(&css_set_lock);
2335 
2336 	/* prepare dst csets and commit */
2337 	ret = cgroup_migrate_prepare_dst(&mgctx);
2338 	if (!ret)
2339 		ret = cgroup_migrate(leader, threadgroup, &mgctx);
2340 
2341 	cgroup_migrate_finish(&mgctx);
2342 
2343 	if (!ret)
2344 		trace_cgroup_attach_task(dst_cgrp, leader, threadgroup);
2345 
2346 	return ret;
2347 }
2348 
2349 static int cgroup_procs_write_permission(struct task_struct *task,
2350 					 struct cgroup *dst_cgrp,
2351 					 struct kernfs_open_file *of)
2352 {
2353 	int ret = 0;
2354 
2355 	if (cgroup_on_dfl(dst_cgrp)) {
2356 		struct super_block *sb = of->file->f_path.dentry->d_sb;
2357 		struct cgroup *cgrp;
2358 		struct inode *inode;
2359 
2360 		spin_lock_irq(&css_set_lock);
2361 		cgrp = task_cgroup_from_root(task, &cgrp_dfl_root);
2362 		spin_unlock_irq(&css_set_lock);
2363 
2364 		while (!cgroup_is_descendant(dst_cgrp, cgrp))
2365 			cgrp = cgroup_parent(cgrp);
2366 
2367 		ret = -ENOMEM;
2368 		inode = kernfs_get_inode(sb, cgrp->procs_file.kn);
2369 		if (inode) {
2370 			ret = inode_permission(inode, MAY_WRITE);
2371 			iput(inode);
2372 		}
2373 	} else {
2374 		const struct cred *cred = current_cred();
2375 		const struct cred *tcred = get_task_cred(task);
2376 
2377 		/*
2378 		 * even if we're attaching all tasks in the thread group,
2379 		 * we only need to check permissions on one of them.
2380 		 */
2381 		if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
2382 		    !uid_eq(cred->euid, tcred->uid) &&
2383 		    !uid_eq(cred->euid, tcred->suid))
2384 			ret = -EACCES;
2385 		put_cred(tcred);
2386 	}
2387 
2388 	return ret;
2389 }
2390 
2391 /*
2392  * Find the task_struct of the task to attach by vpid and pass it along to the
2393  * function to attach either it or all tasks in its threadgroup. Will lock
2394  * cgroup_mutex and threadgroup.
2395  */
2396 ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
2397 			     size_t nbytes, loff_t off, bool threadgroup)
2398 {
2399 	struct task_struct *tsk;
2400 	struct cgroup_subsys *ss;
2401 	struct cgroup *cgrp;
2402 	pid_t pid;
2403 	int ssid, ret;
2404 
2405 	if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0)
2406 		return -EINVAL;
2407 
2408 	cgrp = cgroup_kn_lock_live(of->kn, false);
2409 	if (!cgrp)
2410 		return -ENODEV;
2411 
2412 	percpu_down_write(&cgroup_threadgroup_rwsem);
2413 	rcu_read_lock();
2414 	if (pid) {
2415 		tsk = find_task_by_vpid(pid);
2416 		if (!tsk) {
2417 			ret = -ESRCH;
2418 			goto out_unlock_rcu;
2419 		}
2420 	} else {
2421 		tsk = current;
2422 	}
2423 
2424 	if (threadgroup)
2425 		tsk = tsk->group_leader;
2426 
2427 	/*
2428 	 * kthreads may acquire PF_NO_SETAFFINITY during initialization.
2429 	 * If userland migrates such a kthread to a non-root cgroup, it can
2430 	 * become trapped in a cpuset, or RT kthread may be born in a
2431 	 * cgroup with no rt_runtime allocated.  Just say no.
2432 	 */
2433 	if (tsk->no_cgroup_migration || (tsk->flags & PF_NO_SETAFFINITY)) {
2434 		ret = -EINVAL;
2435 		goto out_unlock_rcu;
2436 	}
2437 
2438 	get_task_struct(tsk);
2439 	rcu_read_unlock();
2440 
2441 	ret = cgroup_procs_write_permission(tsk, cgrp, of);
2442 	if (!ret)
2443 		ret = cgroup_attach_task(cgrp, tsk, threadgroup);
2444 
2445 	put_task_struct(tsk);
2446 	goto out_unlock_threadgroup;
2447 
2448 out_unlock_rcu:
2449 	rcu_read_unlock();
2450 out_unlock_threadgroup:
2451 	percpu_up_write(&cgroup_threadgroup_rwsem);
2452 	for_each_subsys(ss, ssid)
2453 		if (ss->post_attach)
2454 			ss->post_attach();
2455 	cgroup_kn_unlock(of->kn);
2456 	return ret ?: nbytes;
2457 }
2458 
2459 ssize_t cgroup_procs_write(struct kernfs_open_file *of, char *buf, size_t nbytes,
2460 			   loff_t off)
2461 {
2462 	return __cgroup_procs_write(of, buf, nbytes, off, true);
2463 }
2464 
2465 static void cgroup_print_ss_mask(struct seq_file *seq, u16 ss_mask)
2466 {
2467 	struct cgroup_subsys *ss;
2468 	bool printed = false;
2469 	int ssid;
2470 
2471 	do_each_subsys_mask(ss, ssid, ss_mask) {
2472 		if (printed)
2473 			seq_putc(seq, ' ');
2474 		seq_printf(seq, "%s", ss->name);
2475 		printed = true;
2476 	} while_each_subsys_mask();
2477 	if (printed)
2478 		seq_putc(seq, '\n');
2479 }
2480 
2481 /* show controllers which are enabled from the parent */
2482 static int cgroup_controllers_show(struct seq_file *seq, void *v)
2483 {
2484 	struct cgroup *cgrp = seq_css(seq)->cgroup;
2485 
2486 	cgroup_print_ss_mask(seq, cgroup_control(cgrp));
2487 	return 0;
2488 }
2489 
2490 /* show controllers which are enabled for a given cgroup's children */
2491 static int cgroup_subtree_control_show(struct seq_file *seq, void *v)
2492 {
2493 	struct cgroup *cgrp = seq_css(seq)->cgroup;
2494 
2495 	cgroup_print_ss_mask(seq, cgrp->subtree_control);
2496 	return 0;
2497 }
2498 
2499 /**
2500  * cgroup_update_dfl_csses - update css assoc of a subtree in default hierarchy
2501  * @cgrp: root of the subtree to update csses for
2502  *
2503  * @cgrp's control masks have changed and its subtree's css associations
2504  * need to be updated accordingly.  This function looks up all css_sets
2505  * which are attached to the subtree, creates the matching updated css_sets
2506  * and migrates the tasks to the new ones.
2507  */
2508 static int cgroup_update_dfl_csses(struct cgroup *cgrp)
2509 {
2510 	DEFINE_CGROUP_MGCTX(mgctx);
2511 	struct cgroup_subsys_state *d_css;
2512 	struct cgroup *dsct;
2513 	struct css_set *src_cset;
2514 	int ret;
2515 
2516 	lockdep_assert_held(&cgroup_mutex);
2517 
2518 	percpu_down_write(&cgroup_threadgroup_rwsem);
2519 
2520 	/* look up all csses currently attached to @cgrp's subtree */
2521 	spin_lock_irq(&css_set_lock);
2522 	cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) {
2523 		struct cgrp_cset_link *link;
2524 
2525 		list_for_each_entry(link, &dsct->cset_links, cset_link)
2526 			cgroup_migrate_add_src(link->cset, dsct, &mgctx);
2527 	}
2528 	spin_unlock_irq(&css_set_lock);
2529 
2530 	/* NULL dst indicates self on default hierarchy */
2531 	ret = cgroup_migrate_prepare_dst(&mgctx);
2532 	if (ret)
2533 		goto out_finish;
2534 
2535 	spin_lock_irq(&css_set_lock);
2536 	list_for_each_entry(src_cset, &mgctx.preloaded_src_csets, mg_preload_node) {
2537 		struct task_struct *task, *ntask;
2538 
2539 		/* all tasks in src_csets need to be migrated */
2540 		list_for_each_entry_safe(task, ntask, &src_cset->tasks, cg_list)
2541 			cgroup_migrate_add_task(task, &mgctx);
2542 	}
2543 	spin_unlock_irq(&css_set_lock);
2544 
2545 	ret = cgroup_migrate_execute(&mgctx);
2546 out_finish:
2547 	cgroup_migrate_finish(&mgctx);
2548 	percpu_up_write(&cgroup_threadgroup_rwsem);
2549 	return ret;
2550 }
2551 
2552 /**
2553  * cgroup_lock_and_drain_offline - lock cgroup_mutex and drain offlined csses
2554  * @cgrp: root of the target subtree
2555  *
2556  * Because css offlining is asynchronous, userland may try to re-enable a
2557  * controller while the previous css is still around.  This function grabs
2558  * cgroup_mutex and drains the previous css instances of @cgrp's subtree.
2559  */
2560 void cgroup_lock_and_drain_offline(struct cgroup *cgrp)
2561 	__acquires(&cgroup_mutex)
2562 {
2563 	struct cgroup *dsct;
2564 	struct cgroup_subsys_state *d_css;
2565 	struct cgroup_subsys *ss;
2566 	int ssid;
2567 
2568 restart:
2569 	mutex_lock(&cgroup_mutex);
2570 
2571 	cgroup_for_each_live_descendant_post(dsct, d_css, cgrp) {
2572 		for_each_subsys(ss, ssid) {
2573 			struct cgroup_subsys_state *css = cgroup_css(dsct, ss);
2574 			DEFINE_WAIT(wait);
2575 
2576 			if (!css || !percpu_ref_is_dying(&css->refcnt))
2577 				continue;
2578 
2579 			cgroup_get(dsct);
2580 			prepare_to_wait(&dsct->offline_waitq, &wait,
2581 					TASK_UNINTERRUPTIBLE);
2582 
2583 			mutex_unlock(&cgroup_mutex);
2584 			schedule();
2585 			finish_wait(&dsct->offline_waitq, &wait);
2586 
2587 			cgroup_put(dsct);
2588 			goto restart;
2589 		}
2590 	}
2591 }
2592 
2593 /**
2594  * cgroup_save_control - save control masks of a subtree
2595  * @cgrp: root of the target subtree
2596  *
2597  * Save ->subtree_control and ->subtree_ss_mask to the respective old_
2598  * prefixed fields for @cgrp's subtree including @cgrp itself.
2599  */
2600 static void cgroup_save_control(struct cgroup *cgrp)
2601 {
2602 	struct cgroup *dsct;
2603 	struct cgroup_subsys_state *d_css;
2604 
2605 	cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) {
2606 		dsct->old_subtree_control = dsct->subtree_control;
2607 		dsct->old_subtree_ss_mask = dsct->subtree_ss_mask;
2608 	}
2609 }
2610 
2611 /**
2612  * cgroup_propagate_control - refresh control masks of a subtree
2613  * @cgrp: root of the target subtree
2614  *
2615  * For @cgrp and its subtree, ensure ->subtree_ss_mask matches
2616  * ->subtree_control and propagate controller availability through the
2617  * subtree so that descendants don't have unavailable controllers enabled.
2618  */
2619 static void cgroup_propagate_control(struct cgroup *cgrp)
2620 {
2621 	struct cgroup *dsct;
2622 	struct cgroup_subsys_state *d_css;
2623 
2624 	cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) {
2625 		dsct->subtree_control &= cgroup_control(dsct);
2626 		dsct->subtree_ss_mask =
2627 			cgroup_calc_subtree_ss_mask(dsct->subtree_control,
2628 						    cgroup_ss_mask(dsct));
2629 	}
2630 }
2631 
2632 /**
2633  * cgroup_restore_control - restore control masks of a subtree
2634  * @cgrp: root of the target subtree
2635  *
2636  * Restore ->subtree_control and ->subtree_ss_mask from the respective old_
2637  * prefixed fields for @cgrp's subtree including @cgrp itself.
2638  */
2639 static void cgroup_restore_control(struct cgroup *cgrp)
2640 {
2641 	struct cgroup *dsct;
2642 	struct cgroup_subsys_state *d_css;
2643 
2644 	cgroup_for_each_live_descendant_post(dsct, d_css, cgrp) {
2645 		dsct->subtree_control = dsct->old_subtree_control;
2646 		dsct->subtree_ss_mask = dsct->old_subtree_ss_mask;
2647 	}
2648 }
2649 
2650 static bool css_visible(struct cgroup_subsys_state *css)
2651 {
2652 	struct cgroup_subsys *ss = css->ss;
2653 	struct cgroup *cgrp = css->cgroup;
2654 
2655 	if (cgroup_control(cgrp) & (1 << ss->id))
2656 		return true;
2657 	if (!(cgroup_ss_mask(cgrp) & (1 << ss->id)))
2658 		return false;
2659 	return cgroup_on_dfl(cgrp) && ss->implicit_on_dfl;
2660 }
2661 
2662 /**
2663  * cgroup_apply_control_enable - enable or show csses according to control
2664  * @cgrp: root of the target subtree
2665  *
2666  * Walk @cgrp's subtree and create new csses or make the existing ones
2667  * visible.  A css is created invisible if it's being implicitly enabled
2668  * through dependency.  An invisible css is made visible when the userland
2669  * explicitly enables it.
2670  *
2671  * Returns 0 on success, -errno on failure.  On failure, csses which have
2672  * been processed already aren't cleaned up.  The caller is responsible for
2673  * cleaning up with cgroup_apply_control_disable().
2674  */
2675 static int cgroup_apply_control_enable(struct cgroup *cgrp)
2676 {
2677 	struct cgroup *dsct;
2678 	struct cgroup_subsys_state *d_css;
2679 	struct cgroup_subsys *ss;
2680 	int ssid, ret;
2681 
2682 	cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) {
2683 		for_each_subsys(ss, ssid) {
2684 			struct cgroup_subsys_state *css = cgroup_css(dsct, ss);
2685 
2686 			WARN_ON_ONCE(css && percpu_ref_is_dying(&css->refcnt));
2687 
2688 			if (!(cgroup_ss_mask(dsct) & (1 << ss->id)))
2689 				continue;
2690 
2691 			if (!css) {
2692 				css = css_create(dsct, ss);
2693 				if (IS_ERR(css))
2694 					return PTR_ERR(css);
2695 			}
2696 
2697 			if (css_visible(css)) {
2698 				ret = css_populate_dir(css);
2699 				if (ret)
2700 					return ret;
2701 			}
2702 		}
2703 	}
2704 
2705 	return 0;
2706 }
2707 
2708 /**
2709  * cgroup_apply_control_disable - kill or hide csses according to control
2710  * @cgrp: root of the target subtree
2711  *
2712  * Walk @cgrp's subtree and kill and hide csses so that they match
2713  * cgroup_ss_mask() and cgroup_visible_mask().
2714  *
2715  * A css is hidden when the userland requests it to be disabled while other
2716  * subsystems are still depending on it.  The css must not actively control
2717  * resources and be in the vanilla state if it's made visible again later.
2718  * Controllers which may be depended upon should provide ->css_reset() for
2719  * this purpose.
2720  */
2721 static void cgroup_apply_control_disable(struct cgroup *cgrp)
2722 {
2723 	struct cgroup *dsct;
2724 	struct cgroup_subsys_state *d_css;
2725 	struct cgroup_subsys *ss;
2726 	int ssid;
2727 
2728 	cgroup_for_each_live_descendant_post(dsct, d_css, cgrp) {
2729 		for_each_subsys(ss, ssid) {
2730 			struct cgroup_subsys_state *css = cgroup_css(dsct, ss);
2731 
2732 			WARN_ON_ONCE(css && percpu_ref_is_dying(&css->refcnt));
2733 
2734 			if (!css)
2735 				continue;
2736 
2737 			if (css->parent &&
2738 			    !(cgroup_ss_mask(dsct) & (1 << ss->id))) {
2739 				kill_css(css);
2740 			} else if (!css_visible(css)) {
2741 				css_clear_dir(css);
2742 				if (ss->css_reset)
2743 					ss->css_reset(css);
2744 			}
2745 		}
2746 	}
2747 }
2748 
2749 /**
2750  * cgroup_apply_control - apply control mask updates to the subtree
2751  * @cgrp: root of the target subtree
2752  *
2753  * subsystems can be enabled and disabled in a subtree using the following
2754  * steps.
2755  *
2756  * 1. Call cgroup_save_control() to stash the current state.
2757  * 2. Update ->subtree_control masks in the subtree as desired.
2758  * 3. Call cgroup_apply_control() to apply the changes.
2759  * 4. Optionally perform other related operations.
2760  * 5. Call cgroup_finalize_control() to finish up.
2761  *
2762  * This function implements step 3 and propagates the mask changes
2763  * throughout @cgrp's subtree, updates csses accordingly and perform
2764  * process migrations.
2765  */
2766 static int cgroup_apply_control(struct cgroup *cgrp)
2767 {
2768 	int ret;
2769 
2770 	cgroup_propagate_control(cgrp);
2771 
2772 	ret = cgroup_apply_control_enable(cgrp);
2773 	if (ret)
2774 		return ret;
2775 
2776 	/*
2777 	 * At this point, cgroup_e_css() results reflect the new csses
2778 	 * making the following cgroup_update_dfl_csses() properly update
2779 	 * css associations of all tasks in the subtree.
2780 	 */
2781 	ret = cgroup_update_dfl_csses(cgrp);
2782 	if (ret)
2783 		return ret;
2784 
2785 	return 0;
2786 }
2787 
2788 /**
2789  * cgroup_finalize_control - finalize control mask update
2790  * @cgrp: root of the target subtree
2791  * @ret: the result of the update
2792  *
2793  * Finalize control mask update.  See cgroup_apply_control() for more info.
2794  */
2795 static void cgroup_finalize_control(struct cgroup *cgrp, int ret)
2796 {
2797 	if (ret) {
2798 		cgroup_restore_control(cgrp);
2799 		cgroup_propagate_control(cgrp);
2800 	}
2801 
2802 	cgroup_apply_control_disable(cgrp);
2803 }
2804 
2805 /* change the enabled child controllers for a cgroup in the default hierarchy */
2806 static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of,
2807 					    char *buf, size_t nbytes,
2808 					    loff_t off)
2809 {
2810 	u16 enable = 0, disable = 0;
2811 	struct cgroup *cgrp, *child;
2812 	struct cgroup_subsys *ss;
2813 	char *tok;
2814 	int ssid, ret;
2815 
2816 	/*
2817 	 * Parse input - space separated list of subsystem names prefixed
2818 	 * with either + or -.
2819 	 */
2820 	buf = strstrip(buf);
2821 	while ((tok = strsep(&buf, " "))) {
2822 		if (tok[0] == '\0')
2823 			continue;
2824 		do_each_subsys_mask(ss, ssid, ~cgrp_dfl_inhibit_ss_mask) {
2825 			if (!cgroup_ssid_enabled(ssid) ||
2826 			    strcmp(tok + 1, ss->name))
2827 				continue;
2828 
2829 			if (*tok == '+') {
2830 				enable |= 1 << ssid;
2831 				disable &= ~(1 << ssid);
2832 			} else if (*tok == '-') {
2833 				disable |= 1 << ssid;
2834 				enable &= ~(1 << ssid);
2835 			} else {
2836 				return -EINVAL;
2837 			}
2838 			break;
2839 		} while_each_subsys_mask();
2840 		if (ssid == CGROUP_SUBSYS_COUNT)
2841 			return -EINVAL;
2842 	}
2843 
2844 	cgrp = cgroup_kn_lock_live(of->kn, true);
2845 	if (!cgrp)
2846 		return -ENODEV;
2847 
2848 	for_each_subsys(ss, ssid) {
2849 		if (enable & (1 << ssid)) {
2850 			if (cgrp->subtree_control & (1 << ssid)) {
2851 				enable &= ~(1 << ssid);
2852 				continue;
2853 			}
2854 
2855 			if (!(cgroup_control(cgrp) & (1 << ssid))) {
2856 				ret = -ENOENT;
2857 				goto out_unlock;
2858 			}
2859 		} else if (disable & (1 << ssid)) {
2860 			if (!(cgrp->subtree_control & (1 << ssid))) {
2861 				disable &= ~(1 << ssid);
2862 				continue;
2863 			}
2864 
2865 			/* a child has it enabled? */
2866 			cgroup_for_each_live_child(child, cgrp) {
2867 				if (child->subtree_control & (1 << ssid)) {
2868 					ret = -EBUSY;
2869 					goto out_unlock;
2870 				}
2871 			}
2872 		}
2873 	}
2874 
2875 	if (!enable && !disable) {
2876 		ret = 0;
2877 		goto out_unlock;
2878 	}
2879 
2880 	/*
2881 	 * Except for the root, subtree_control must be zero for a cgroup
2882 	 * with tasks so that child cgroups don't compete against tasks.
2883 	 */
2884 	if (enable && cgroup_parent(cgrp)) {
2885 		struct cgrp_cset_link *link;
2886 
2887 		/*
2888 		 * Because namespaces pin csets too, @cgrp->cset_links
2889 		 * might not be empty even when @cgrp is empty.  Walk and
2890 		 * verify each cset.
2891 		 */
2892 		spin_lock_irq(&css_set_lock);
2893 
2894 		ret = 0;
2895 		list_for_each_entry(link, &cgrp->cset_links, cset_link) {
2896 			if (css_set_populated(link->cset)) {
2897 				ret = -EBUSY;
2898 				break;
2899 			}
2900 		}
2901 
2902 		spin_unlock_irq(&css_set_lock);
2903 
2904 		if (ret)
2905 			goto out_unlock;
2906 	}
2907 
2908 	/* save and update control masks and prepare csses */
2909 	cgroup_save_control(cgrp);
2910 
2911 	cgrp->subtree_control |= enable;
2912 	cgrp->subtree_control &= ~disable;
2913 
2914 	ret = cgroup_apply_control(cgrp);
2915 
2916 	cgroup_finalize_control(cgrp, ret);
2917 
2918 	kernfs_activate(cgrp->kn);
2919 	ret = 0;
2920 out_unlock:
2921 	cgroup_kn_unlock(of->kn);
2922 	return ret ?: nbytes;
2923 }
2924 
2925 static int cgroup_events_show(struct seq_file *seq, void *v)
2926 {
2927 	seq_printf(seq, "populated %d\n",
2928 		   cgroup_is_populated(seq_css(seq)->cgroup));
2929 	return 0;
2930 }
2931 
2932 static int cgroup_file_open(struct kernfs_open_file *of)
2933 {
2934 	struct cftype *cft = of->kn->priv;
2935 
2936 	if (cft->open)
2937 		return cft->open(of);
2938 	return 0;
2939 }
2940 
2941 static void cgroup_file_release(struct kernfs_open_file *of)
2942 {
2943 	struct cftype *cft = of->kn->priv;
2944 
2945 	if (cft->release)
2946 		cft->release(of);
2947 }
2948 
2949 static ssize_t cgroup_file_write(struct kernfs_open_file *of, char *buf,
2950 				 size_t nbytes, loff_t off)
2951 {
2952 	struct cgroup *cgrp = of->kn->parent->priv;
2953 	struct cftype *cft = of->kn->priv;
2954 	struct cgroup_subsys_state *css;
2955 	int ret;
2956 
2957 	if (cft->write)
2958 		return cft->write(of, buf, nbytes, off);
2959 
2960 	/*
2961 	 * kernfs guarantees that a file isn't deleted with operations in
2962 	 * flight, which means that the matching css is and stays alive and
2963 	 * doesn't need to be pinned.  The RCU locking is not necessary
2964 	 * either.  It's just for the convenience of using cgroup_css().
2965 	 */
2966 	rcu_read_lock();
2967 	css = cgroup_css(cgrp, cft->ss);
2968 	rcu_read_unlock();
2969 
2970 	if (cft->write_u64) {
2971 		unsigned long long v;
2972 		ret = kstrtoull(buf, 0, &v);
2973 		if (!ret)
2974 			ret = cft->write_u64(css, cft, v);
2975 	} else if (cft->write_s64) {
2976 		long long v;
2977 		ret = kstrtoll(buf, 0, &v);
2978 		if (!ret)
2979 			ret = cft->write_s64(css, cft, v);
2980 	} else {
2981 		ret = -EINVAL;
2982 	}
2983 
2984 	return ret ?: nbytes;
2985 }
2986 
2987 static void *cgroup_seqfile_start(struct seq_file *seq, loff_t *ppos)
2988 {
2989 	return seq_cft(seq)->seq_start(seq, ppos);
2990 }
2991 
2992 static void *cgroup_seqfile_next(struct seq_file *seq, void *v, loff_t *ppos)
2993 {
2994 	return seq_cft(seq)->seq_next(seq, v, ppos);
2995 }
2996 
2997 static void cgroup_seqfile_stop(struct seq_file *seq, void *v)
2998 {
2999 	if (seq_cft(seq)->seq_stop)
3000 		seq_cft(seq)->seq_stop(seq, v);
3001 }
3002 
3003 static int cgroup_seqfile_show(struct seq_file *m, void *arg)
3004 {
3005 	struct cftype *cft = seq_cft(m);
3006 	struct cgroup_subsys_state *css = seq_css(m);
3007 
3008 	if (cft->seq_show)
3009 		return cft->seq_show(m, arg);
3010 
3011 	if (cft->read_u64)
3012 		seq_printf(m, "%llu\n", cft->read_u64(css, cft));
3013 	else if (cft->read_s64)
3014 		seq_printf(m, "%lld\n", cft->read_s64(css, cft));
3015 	else
3016 		return -EINVAL;
3017 	return 0;
3018 }
3019 
3020 static struct kernfs_ops cgroup_kf_single_ops = {
3021 	.atomic_write_len	= PAGE_SIZE,
3022 	.open			= cgroup_file_open,
3023 	.release		= cgroup_file_release,
3024 	.write			= cgroup_file_write,
3025 	.seq_show		= cgroup_seqfile_show,
3026 };
3027 
3028 static struct kernfs_ops cgroup_kf_ops = {
3029 	.atomic_write_len	= PAGE_SIZE,
3030 	.open			= cgroup_file_open,
3031 	.release		= cgroup_file_release,
3032 	.write			= cgroup_file_write,
3033 	.seq_start		= cgroup_seqfile_start,
3034 	.seq_next		= cgroup_seqfile_next,
3035 	.seq_stop		= cgroup_seqfile_stop,
3036 	.seq_show		= cgroup_seqfile_show,
3037 };
3038 
3039 /* set uid and gid of cgroup dirs and files to that of the creator */
3040 static int cgroup_kn_set_ugid(struct kernfs_node *kn)
3041 {
3042 	struct iattr iattr = { .ia_valid = ATTR_UID | ATTR_GID,
3043 			       .ia_uid = current_fsuid(),
3044 			       .ia_gid = current_fsgid(), };
3045 
3046 	if (uid_eq(iattr.ia_uid, GLOBAL_ROOT_UID) &&
3047 	    gid_eq(iattr.ia_gid, GLOBAL_ROOT_GID))
3048 		return 0;
3049 
3050 	return kernfs_setattr(kn, &iattr);
3051 }
3052 
3053 static int cgroup_add_file(struct cgroup_subsys_state *css, struct cgroup *cgrp,
3054 			   struct cftype *cft)
3055 {
3056 	char name[CGROUP_FILE_NAME_MAX];
3057 	struct kernfs_node *kn;
3058 	struct lock_class_key *key = NULL;
3059 	int ret;
3060 
3061 #ifdef CONFIG_DEBUG_LOCK_ALLOC
3062 	key = &cft->lockdep_key;
3063 #endif
3064 	kn = __kernfs_create_file(cgrp->kn, cgroup_file_name(cgrp, cft, name),
3065 				  cgroup_file_mode(cft), 0, cft->kf_ops, cft,
3066 				  NULL, key);
3067 	if (IS_ERR(kn))
3068 		return PTR_ERR(kn);
3069 
3070 	ret = cgroup_kn_set_ugid(kn);
3071 	if (ret) {
3072 		kernfs_remove(kn);
3073 		return ret;
3074 	}
3075 
3076 	if (cft->file_offset) {
3077 		struct cgroup_file *cfile = (void *)css + cft->file_offset;
3078 
3079 		spin_lock_irq(&cgroup_file_kn_lock);
3080 		cfile->kn = kn;
3081 		spin_unlock_irq(&cgroup_file_kn_lock);
3082 	}
3083 
3084 	return 0;
3085 }
3086 
3087 /**
3088  * cgroup_addrm_files - add or remove files to a cgroup directory
3089  * @css: the target css
3090  * @cgrp: the target cgroup (usually css->cgroup)
3091  * @cfts: array of cftypes to be added
3092  * @is_add: whether to add or remove
3093  *
3094  * Depending on @is_add, add or remove files defined by @cfts on @cgrp.
3095  * For removals, this function never fails.
3096  */
3097 static int cgroup_addrm_files(struct cgroup_subsys_state *css,
3098 			      struct cgroup *cgrp, struct cftype cfts[],
3099 			      bool is_add)
3100 {
3101 	struct cftype *cft, *cft_end = NULL;
3102 	int ret = 0;
3103 
3104 	lockdep_assert_held(&cgroup_mutex);
3105 
3106 restart:
3107 	for (cft = cfts; cft != cft_end && cft->name[0] != '\0'; cft++) {
3108 		/* does cft->flags tell us to skip this file on @cgrp? */
3109 		if ((cft->flags & __CFTYPE_ONLY_ON_DFL) && !cgroup_on_dfl(cgrp))
3110 			continue;
3111 		if ((cft->flags & __CFTYPE_NOT_ON_DFL) && cgroup_on_dfl(cgrp))
3112 			continue;
3113 		if ((cft->flags & CFTYPE_NOT_ON_ROOT) && !cgroup_parent(cgrp))
3114 			continue;
3115 		if ((cft->flags & CFTYPE_ONLY_ON_ROOT) && cgroup_parent(cgrp))
3116 			continue;
3117 
3118 		if (is_add) {
3119 			ret = cgroup_add_file(css, cgrp, cft);
3120 			if (ret) {
3121 				pr_warn("%s: failed to add %s, err=%d\n",
3122 					__func__, cft->name, ret);
3123 				cft_end = cft;
3124 				is_add = false;
3125 				goto restart;
3126 			}
3127 		} else {
3128 			cgroup_rm_file(cgrp, cft);
3129 		}
3130 	}
3131 	return ret;
3132 }
3133 
3134 static int cgroup_apply_cftypes(struct cftype *cfts, bool is_add)
3135 {
3136 	LIST_HEAD(pending);
3137 	struct cgroup_subsys *ss = cfts[0].ss;
3138 	struct cgroup *root = &ss->root->cgrp;
3139 	struct cgroup_subsys_state *css;
3140 	int ret = 0;
3141 
3142 	lockdep_assert_held(&cgroup_mutex);
3143 
3144 	/* add/rm files for all cgroups created before */
3145 	css_for_each_descendant_pre(css, cgroup_css(root, ss)) {
3146 		struct cgroup *cgrp = css->cgroup;
3147 
3148 		if (!(css->flags & CSS_VISIBLE))
3149 			continue;
3150 
3151 		ret = cgroup_addrm_files(css, cgrp, cfts, is_add);
3152 		if (ret)
3153 			break;
3154 	}
3155 
3156 	if (is_add && !ret)
3157 		kernfs_activate(root->kn);
3158 	return ret;
3159 }
3160 
3161 static void cgroup_exit_cftypes(struct cftype *cfts)
3162 {
3163 	struct cftype *cft;
3164 
3165 	for (cft = cfts; cft->name[0] != '\0'; cft++) {
3166 		/* free copy for custom atomic_write_len, see init_cftypes() */
3167 		if (cft->max_write_len && cft->max_write_len != PAGE_SIZE)
3168 			kfree(cft->kf_ops);
3169 		cft->kf_ops = NULL;
3170 		cft->ss = NULL;
3171 
3172 		/* revert flags set by cgroup core while adding @cfts */
3173 		cft->flags &= ~(__CFTYPE_ONLY_ON_DFL | __CFTYPE_NOT_ON_DFL);
3174 	}
3175 }
3176 
3177 static int cgroup_init_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
3178 {
3179 	struct cftype *cft;
3180 
3181 	for (cft = cfts; cft->name[0] != '\0'; cft++) {
3182 		struct kernfs_ops *kf_ops;
3183 
3184 		WARN_ON(cft->ss || cft->kf_ops);
3185 
3186 		if (cft->seq_start)
3187 			kf_ops = &cgroup_kf_ops;
3188 		else
3189 			kf_ops = &cgroup_kf_single_ops;
3190 
3191 		/*
3192 		 * Ugh... if @cft wants a custom max_write_len, we need to
3193 		 * make a copy of kf_ops to set its atomic_write_len.
3194 		 */
3195 		if (cft->max_write_len && cft->max_write_len != PAGE_SIZE) {
3196 			kf_ops = kmemdup(kf_ops, sizeof(*kf_ops), GFP_KERNEL);
3197 			if (!kf_ops) {
3198 				cgroup_exit_cftypes(cfts);
3199 				return -ENOMEM;
3200 			}
3201 			kf_ops->atomic_write_len = cft->max_write_len;
3202 		}
3203 
3204 		cft->kf_ops = kf_ops;
3205 		cft->ss = ss;
3206 	}
3207 
3208 	return 0;
3209 }
3210 
3211 static int cgroup_rm_cftypes_locked(struct cftype *cfts)
3212 {
3213 	lockdep_assert_held(&cgroup_mutex);
3214 
3215 	if (!cfts || !cfts[0].ss)
3216 		return -ENOENT;
3217 
3218 	list_del(&cfts->node);
3219 	cgroup_apply_cftypes(cfts, false);
3220 	cgroup_exit_cftypes(cfts);
3221 	return 0;
3222 }
3223 
3224 /**
3225  * cgroup_rm_cftypes - remove an array of cftypes from a subsystem
3226  * @cfts: zero-length name terminated array of cftypes
3227  *
3228  * Unregister @cfts.  Files described by @cfts are removed from all
3229  * existing cgroups and all future cgroups won't have them either.  This
3230  * function can be called anytime whether @cfts' subsys is attached or not.
3231  *
3232  * Returns 0 on successful unregistration, -ENOENT if @cfts is not
3233  * registered.
3234  */
3235 int cgroup_rm_cftypes(struct cftype *cfts)
3236 {
3237 	int ret;
3238 
3239 	mutex_lock(&cgroup_mutex);
3240 	ret = cgroup_rm_cftypes_locked(cfts);
3241 	mutex_unlock(&cgroup_mutex);
3242 	return ret;
3243 }
3244 
3245 /**
3246  * cgroup_add_cftypes - add an array of cftypes to a subsystem
3247  * @ss: target cgroup subsystem
3248  * @cfts: zero-length name terminated array of cftypes
3249  *
3250  * Register @cfts to @ss.  Files described by @cfts are created for all
3251  * existing cgroups to which @ss is attached and all future cgroups will
3252  * have them too.  This function can be called anytime whether @ss is
3253  * attached or not.
3254  *
3255  * Returns 0 on successful registration, -errno on failure.  Note that this
3256  * function currently returns 0 as long as @cfts registration is successful
3257  * even if some file creation attempts on existing cgroups fail.
3258  */
3259 static int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
3260 {
3261 	int ret;
3262 
3263 	if (!cgroup_ssid_enabled(ss->id))
3264 		return 0;
3265 
3266 	if (!cfts || cfts[0].name[0] == '\0')
3267 		return 0;
3268 
3269 	ret = cgroup_init_cftypes(ss, cfts);
3270 	if (ret)
3271 		return ret;
3272 
3273 	mutex_lock(&cgroup_mutex);
3274 
3275 	list_add_tail(&cfts->node, &ss->cfts);
3276 	ret = cgroup_apply_cftypes(cfts, true);
3277 	if (ret)
3278 		cgroup_rm_cftypes_locked(cfts);
3279 
3280 	mutex_unlock(&cgroup_mutex);
3281 	return ret;
3282 }
3283 
3284 /**
3285  * cgroup_add_dfl_cftypes - add an array of cftypes for default hierarchy
3286  * @ss: target cgroup subsystem
3287  * @cfts: zero-length name terminated array of cftypes
3288  *
3289  * Similar to cgroup_add_cftypes() but the added files are only used for
3290  * the default hierarchy.
3291  */
3292 int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
3293 {
3294 	struct cftype *cft;
3295 
3296 	for (cft = cfts; cft && cft->name[0] != '\0'; cft++)
3297 		cft->flags |= __CFTYPE_ONLY_ON_DFL;
3298 	return cgroup_add_cftypes(ss, cfts);
3299 }
3300 
3301 /**
3302  * cgroup_add_legacy_cftypes - add an array of cftypes for legacy hierarchies
3303  * @ss: target cgroup subsystem
3304  * @cfts: zero-length name terminated array of cftypes
3305  *
3306  * Similar to cgroup_add_cftypes() but the added files are only used for
3307  * the legacy hierarchies.
3308  */
3309 int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
3310 {
3311 	struct cftype *cft;
3312 
3313 	for (cft = cfts; cft && cft->name[0] != '\0'; cft++)
3314 		cft->flags |= __CFTYPE_NOT_ON_DFL;
3315 	return cgroup_add_cftypes(ss, cfts);
3316 }
3317 
3318 /**
3319  * cgroup_file_notify - generate a file modified event for a cgroup_file
3320  * @cfile: target cgroup_file
3321  *
3322  * @cfile must have been obtained by setting cftype->file_offset.
3323  */
3324 void cgroup_file_notify(struct cgroup_file *cfile)
3325 {
3326 	unsigned long flags;
3327 
3328 	spin_lock_irqsave(&cgroup_file_kn_lock, flags);
3329 	if (cfile->kn)
3330 		kernfs_notify(cfile->kn);
3331 	spin_unlock_irqrestore(&cgroup_file_kn_lock, flags);
3332 }
3333 
3334 /**
3335  * css_next_child - find the next child of a given css
3336  * @pos: the current position (%NULL to initiate traversal)
3337  * @parent: css whose children to walk
3338  *
3339  * This function returns the next child of @parent and should be called
3340  * under either cgroup_mutex or RCU read lock.  The only requirement is
3341  * that @parent and @pos are accessible.  The next sibling is guaranteed to
3342  * be returned regardless of their states.
3343  *
3344  * If a subsystem synchronizes ->css_online() and the start of iteration, a
3345  * css which finished ->css_online() is guaranteed to be visible in the
3346  * future iterations and will stay visible until the last reference is put.
3347  * A css which hasn't finished ->css_online() or already finished
3348  * ->css_offline() may show up during traversal.  It's each subsystem's
3349  * responsibility to synchronize against on/offlining.
3350  */
3351 struct cgroup_subsys_state *css_next_child(struct cgroup_subsys_state *pos,
3352 					   struct cgroup_subsys_state *parent)
3353 {
3354 	struct cgroup_subsys_state *next;
3355 
3356 	cgroup_assert_mutex_or_rcu_locked();
3357 
3358 	/*
3359 	 * @pos could already have been unlinked from the sibling list.
3360 	 * Once a cgroup is removed, its ->sibling.next is no longer
3361 	 * updated when its next sibling changes.  CSS_RELEASED is set when
3362 	 * @pos is taken off list, at which time its next pointer is valid,
3363 	 * and, as releases are serialized, the one pointed to by the next
3364 	 * pointer is guaranteed to not have started release yet.  This
3365 	 * implies that if we observe !CSS_RELEASED on @pos in this RCU
3366 	 * critical section, the one pointed to by its next pointer is
3367 	 * guaranteed to not have finished its RCU grace period even if we
3368 	 * have dropped rcu_read_lock() inbetween iterations.
3369 	 *
3370 	 * If @pos has CSS_RELEASED set, its next pointer can't be
3371 	 * dereferenced; however, as each css is given a monotonically
3372 	 * increasing unique serial number and always appended to the
3373 	 * sibling list, the next one can be found by walking the parent's
3374 	 * children until the first css with higher serial number than
3375 	 * @pos's.  While this path can be slower, it happens iff iteration
3376 	 * races against release and the race window is very small.
3377 	 */
3378 	if (!pos) {
3379 		next = list_entry_rcu(parent->children.next, struct cgroup_subsys_state, sibling);
3380 	} else if (likely(!(pos->flags & CSS_RELEASED))) {
3381 		next = list_entry_rcu(pos->sibling.next, struct cgroup_subsys_state, sibling);
3382 	} else {
3383 		list_for_each_entry_rcu(next, &parent->children, sibling)
3384 			if (next->serial_nr > pos->serial_nr)
3385 				break;
3386 	}
3387 
3388 	/*
3389 	 * @next, if not pointing to the head, can be dereferenced and is
3390 	 * the next sibling.
3391 	 */
3392 	if (&next->sibling != &parent->children)
3393 		return next;
3394 	return NULL;
3395 }
3396 
3397 /**
3398  * css_next_descendant_pre - find the next descendant for pre-order walk
3399  * @pos: the current position (%NULL to initiate traversal)
3400  * @root: css whose descendants to walk
3401  *
3402  * To be used by css_for_each_descendant_pre().  Find the next descendant
3403  * to visit for pre-order traversal of @root's descendants.  @root is
3404  * included in the iteration and the first node to be visited.
3405  *
3406  * While this function requires cgroup_mutex or RCU read locking, it
3407  * doesn't require the whole traversal to be contained in a single critical
3408  * section.  This function will return the correct next descendant as long
3409  * as both @pos and @root are accessible and @pos is a descendant of @root.
3410  *
3411  * If a subsystem synchronizes ->css_online() and the start of iteration, a
3412  * css which finished ->css_online() is guaranteed to be visible in the
3413  * future iterations and will stay visible until the last reference is put.
3414  * A css which hasn't finished ->css_online() or already finished
3415  * ->css_offline() may show up during traversal.  It's each subsystem's
3416  * responsibility to synchronize against on/offlining.
3417  */
3418 struct cgroup_subsys_state *
3419 css_next_descendant_pre(struct cgroup_subsys_state *pos,
3420 			struct cgroup_subsys_state *root)
3421 {
3422 	struct cgroup_subsys_state *next;
3423 
3424 	cgroup_assert_mutex_or_rcu_locked();
3425 
3426 	/* if first iteration, visit @root */
3427 	if (!pos)
3428 		return root;
3429 
3430 	/* visit the first child if exists */
3431 	next = css_next_child(NULL, pos);
3432 	if (next)
3433 		return next;
3434 
3435 	/* no child, visit my or the closest ancestor's next sibling */
3436 	while (pos != root) {
3437 		next = css_next_child(pos, pos->parent);
3438 		if (next)
3439 			return next;
3440 		pos = pos->parent;
3441 	}
3442 
3443 	return NULL;
3444 }
3445 
3446 /**
3447  * css_rightmost_descendant - return the rightmost descendant of a css
3448  * @pos: css of interest
3449  *
3450  * Return the rightmost descendant of @pos.  If there's no descendant, @pos
3451  * is returned.  This can be used during pre-order traversal to skip
3452  * subtree of @pos.
3453  *
3454  * While this function requires cgroup_mutex or RCU read locking, it
3455  * doesn't require the whole traversal to be contained in a single critical
3456  * section.  This function will return the correct rightmost descendant as
3457  * long as @pos is accessible.
3458  */
3459 struct cgroup_subsys_state *
3460 css_rightmost_descendant(struct cgroup_subsys_state *pos)
3461 {
3462 	struct cgroup_subsys_state *last, *tmp;
3463 
3464 	cgroup_assert_mutex_or_rcu_locked();
3465 
3466 	do {
3467 		last = pos;
3468 		/* ->prev isn't RCU safe, walk ->next till the end */
3469 		pos = NULL;
3470 		css_for_each_child(tmp, last)
3471 			pos = tmp;
3472 	} while (pos);
3473 
3474 	return last;
3475 }
3476 
3477 static struct cgroup_subsys_state *
3478 css_leftmost_descendant(struct cgroup_subsys_state *pos)
3479 {
3480 	struct cgroup_subsys_state *last;
3481 
3482 	do {
3483 		last = pos;
3484 		pos = css_next_child(NULL, pos);
3485 	} while (pos);
3486 
3487 	return last;
3488 }
3489 
3490 /**
3491  * css_next_descendant_post - find the next descendant for post-order walk
3492  * @pos: the current position (%NULL to initiate traversal)
3493  * @root: css whose descendants to walk
3494  *
3495  * To be used by css_for_each_descendant_post().  Find the next descendant
3496  * to visit for post-order traversal of @root's descendants.  @root is
3497  * included in the iteration and the last node to be visited.
3498  *
3499  * While this function requires cgroup_mutex or RCU read locking, it
3500  * doesn't require the whole traversal to be contained in a single critical
3501  * section.  This function will return the correct next descendant as long
3502  * as both @pos and @cgroup are accessible and @pos is a descendant of
3503  * @cgroup.
3504  *
3505  * If a subsystem synchronizes ->css_online() and the start of iteration, a
3506  * css which finished ->css_online() is guaranteed to be visible in the
3507  * future iterations and will stay visible until the last reference is put.
3508  * A css which hasn't finished ->css_online() or already finished
3509  * ->css_offline() may show up during traversal.  It's each subsystem's
3510  * responsibility to synchronize against on/offlining.
3511  */
3512 struct cgroup_subsys_state *
3513 css_next_descendant_post(struct cgroup_subsys_state *pos,
3514 			 struct cgroup_subsys_state *root)
3515 {
3516 	struct cgroup_subsys_state *next;
3517 
3518 	cgroup_assert_mutex_or_rcu_locked();
3519 
3520 	/* if first iteration, visit leftmost descendant which may be @root */
3521 	if (!pos)
3522 		return css_leftmost_descendant(root);
3523 
3524 	/* if we visited @root, we're done */
3525 	if (pos == root)
3526 		return NULL;
3527 
3528 	/* if there's an unvisited sibling, visit its leftmost descendant */
3529 	next = css_next_child(pos, pos->parent);
3530 	if (next)
3531 		return css_leftmost_descendant(next);
3532 
3533 	/* no sibling left, visit parent */
3534 	return pos->parent;
3535 }
3536 
3537 /**
3538  * css_has_online_children - does a css have online children
3539  * @css: the target css
3540  *
3541  * Returns %true if @css has any online children; otherwise, %false.  This
3542  * function can be called from any context but the caller is responsible
3543  * for synchronizing against on/offlining as necessary.
3544  */
3545 bool css_has_online_children(struct cgroup_subsys_state *css)
3546 {
3547 	struct cgroup_subsys_state *child;
3548 	bool ret = false;
3549 
3550 	rcu_read_lock();
3551 	css_for_each_child(child, css) {
3552 		if (child->flags & CSS_ONLINE) {
3553 			ret = true;
3554 			break;
3555 		}
3556 	}
3557 	rcu_read_unlock();
3558 	return ret;
3559 }
3560 
3561 /**
3562  * css_task_iter_advance_css_set - advance a task itererator to the next css_set
3563  * @it: the iterator to advance
3564  *
3565  * Advance @it to the next css_set to walk.
3566  */
3567 static void css_task_iter_advance_css_set(struct css_task_iter *it)
3568 {
3569 	struct list_head *l = it->cset_pos;
3570 	struct cgrp_cset_link *link;
3571 	struct css_set *cset;
3572 
3573 	lockdep_assert_held(&css_set_lock);
3574 
3575 	/* Advance to the next non-empty css_set */
3576 	do {
3577 		l = l->next;
3578 		if (l == it->cset_head) {
3579 			it->cset_pos = NULL;
3580 			it->task_pos = NULL;
3581 			return;
3582 		}
3583 
3584 		if (it->ss) {
3585 			cset = container_of(l, struct css_set,
3586 					    e_cset_node[it->ss->id]);
3587 		} else {
3588 			link = list_entry(l, struct cgrp_cset_link, cset_link);
3589 			cset = link->cset;
3590 		}
3591 	} while (!css_set_populated(cset));
3592 
3593 	it->cset_pos = l;
3594 
3595 	if (!list_empty(&cset->tasks))
3596 		it->task_pos = cset->tasks.next;
3597 	else
3598 		it->task_pos = cset->mg_tasks.next;
3599 
3600 	it->tasks_head = &cset->tasks;
3601 	it->mg_tasks_head = &cset->mg_tasks;
3602 
3603 	/*
3604 	 * We don't keep css_sets locked across iteration steps and thus
3605 	 * need to take steps to ensure that iteration can be resumed after
3606 	 * the lock is re-acquired.  Iteration is performed at two levels -
3607 	 * css_sets and tasks in them.
3608 	 *
3609 	 * Once created, a css_set never leaves its cgroup lists, so a
3610 	 * pinned css_set is guaranteed to stay put and we can resume
3611 	 * iteration afterwards.
3612 	 *
3613 	 * Tasks may leave @cset across iteration steps.  This is resolved
3614 	 * by registering each iterator with the css_set currently being
3615 	 * walked and making css_set_move_task() advance iterators whose
3616 	 * next task is leaving.
3617 	 */
3618 	if (it->cur_cset) {
3619 		list_del(&it->iters_node);
3620 		put_css_set_locked(it->cur_cset);
3621 	}
3622 	get_css_set(cset);
3623 	it->cur_cset = cset;
3624 	list_add(&it->iters_node, &cset->task_iters);
3625 }
3626 
3627 static void css_task_iter_advance(struct css_task_iter *it)
3628 {
3629 	struct list_head *l = it->task_pos;
3630 
3631 	lockdep_assert_held(&css_set_lock);
3632 	WARN_ON_ONCE(!l);
3633 
3634 	/*
3635 	 * Advance iterator to find next entry.  cset->tasks is consumed
3636 	 * first and then ->mg_tasks.  After ->mg_tasks, we move onto the
3637 	 * next cset.
3638 	 */
3639 	l = l->next;
3640 
3641 	if (l == it->tasks_head)
3642 		l = it->mg_tasks_head->next;
3643 
3644 	if (l == it->mg_tasks_head)
3645 		css_task_iter_advance_css_set(it);
3646 	else
3647 		it->task_pos = l;
3648 }
3649 
3650 /**
3651  * css_task_iter_start - initiate task iteration
3652  * @css: the css to walk tasks of
3653  * @it: the task iterator to use
3654  *
3655  * Initiate iteration through the tasks of @css.  The caller can call
3656  * css_task_iter_next() to walk through the tasks until the function
3657  * returns NULL.  On completion of iteration, css_task_iter_end() must be
3658  * called.
3659  */
3660 void css_task_iter_start(struct cgroup_subsys_state *css,
3661 			 struct css_task_iter *it)
3662 {
3663 	/* no one should try to iterate before mounting cgroups */
3664 	WARN_ON_ONCE(!use_task_css_set_links);
3665 
3666 	memset(it, 0, sizeof(*it));
3667 
3668 	spin_lock_irq(&css_set_lock);
3669 
3670 	it->ss = css->ss;
3671 
3672 	if (it->ss)
3673 		it->cset_pos = &css->cgroup->e_csets[css->ss->id];
3674 	else
3675 		it->cset_pos = &css->cgroup->cset_links;
3676 
3677 	it->cset_head = it->cset_pos;
3678 
3679 	css_task_iter_advance_css_set(it);
3680 
3681 	spin_unlock_irq(&css_set_lock);
3682 }
3683 
3684 /**
3685  * css_task_iter_next - return the next task for the iterator
3686  * @it: the task iterator being iterated
3687  *
3688  * The "next" function for task iteration.  @it should have been
3689  * initialized via css_task_iter_start().  Returns NULL when the iteration
3690  * reaches the end.
3691  */
3692 struct task_struct *css_task_iter_next(struct css_task_iter *it)
3693 {
3694 	if (it->cur_task) {
3695 		put_task_struct(it->cur_task);
3696 		it->cur_task = NULL;
3697 	}
3698 
3699 	spin_lock_irq(&css_set_lock);
3700 
3701 	if (it->task_pos) {
3702 		it->cur_task = list_entry(it->task_pos, struct task_struct,
3703 					  cg_list);
3704 		get_task_struct(it->cur_task);
3705 		css_task_iter_advance(it);
3706 	}
3707 
3708 	spin_unlock_irq(&css_set_lock);
3709 
3710 	return it->cur_task;
3711 }
3712 
3713 /**
3714  * css_task_iter_end - finish task iteration
3715  * @it: the task iterator to finish
3716  *
3717  * Finish task iteration started by css_task_iter_start().
3718  */
3719 void css_task_iter_end(struct css_task_iter *it)
3720 {
3721 	if (it->cur_cset) {
3722 		spin_lock_irq(&css_set_lock);
3723 		list_del(&it->iters_node);
3724 		put_css_set_locked(it->cur_cset);
3725 		spin_unlock_irq(&css_set_lock);
3726 	}
3727 
3728 	if (it->cur_task)
3729 		put_task_struct(it->cur_task);
3730 }
3731 
3732 static void cgroup_procs_release(struct kernfs_open_file *of)
3733 {
3734 	if (of->priv) {
3735 		css_task_iter_end(of->priv);
3736 		kfree(of->priv);
3737 	}
3738 }
3739 
3740 static void *cgroup_procs_next(struct seq_file *s, void *v, loff_t *pos)
3741 {
3742 	struct kernfs_open_file *of = s->private;
3743 	struct css_task_iter *it = of->priv;
3744 	struct task_struct *task;
3745 
3746 	do {
3747 		task = css_task_iter_next(it);
3748 	} while (task && !thread_group_leader(task));
3749 
3750 	return task;
3751 }
3752 
3753 static void *cgroup_procs_start(struct seq_file *s, loff_t *pos)
3754 {
3755 	struct kernfs_open_file *of = s->private;
3756 	struct cgroup *cgrp = seq_css(s)->cgroup;
3757 	struct css_task_iter *it = of->priv;
3758 
3759 	/*
3760 	 * When a seq_file is seeked, it's always traversed sequentially
3761 	 * from position 0, so we can simply keep iterating on !0 *pos.
3762 	 */
3763 	if (!it) {
3764 		if (WARN_ON_ONCE((*pos)++))
3765 			return ERR_PTR(-EINVAL);
3766 
3767 		it = kzalloc(sizeof(*it), GFP_KERNEL);
3768 		if (!it)
3769 			return ERR_PTR(-ENOMEM);
3770 		of->priv = it;
3771 		css_task_iter_start(&cgrp->self, it);
3772 	} else if (!(*pos)++) {
3773 		css_task_iter_end(it);
3774 		css_task_iter_start(&cgrp->self, it);
3775 	}
3776 
3777 	return cgroup_procs_next(s, NULL, NULL);
3778 }
3779 
3780 static int cgroup_procs_show(struct seq_file *s, void *v)
3781 {
3782 	seq_printf(s, "%d\n", task_tgid_vnr(v));
3783 	return 0;
3784 }
3785 
3786 /* cgroup core interface files for the default hierarchy */
3787 static struct cftype cgroup_base_files[] = {
3788 	{
3789 		.name = "cgroup.procs",
3790 		.file_offset = offsetof(struct cgroup, procs_file),
3791 		.release = cgroup_procs_release,
3792 		.seq_start = cgroup_procs_start,
3793 		.seq_next = cgroup_procs_next,
3794 		.seq_show = cgroup_procs_show,
3795 		.write = cgroup_procs_write,
3796 	},
3797 	{
3798 		.name = "cgroup.controllers",
3799 		.seq_show = cgroup_controllers_show,
3800 	},
3801 	{
3802 		.name = "cgroup.subtree_control",
3803 		.seq_show = cgroup_subtree_control_show,
3804 		.write = cgroup_subtree_control_write,
3805 	},
3806 	{
3807 		.name = "cgroup.events",
3808 		.flags = CFTYPE_NOT_ON_ROOT,
3809 		.file_offset = offsetof(struct cgroup, events_file),
3810 		.seq_show = cgroup_events_show,
3811 	},
3812 	{ }	/* terminate */
3813 };
3814 
3815 /*
3816  * css destruction is four-stage process.
3817  *
3818  * 1. Destruction starts.  Killing of the percpu_ref is initiated.
3819  *    Implemented in kill_css().
3820  *
3821  * 2. When the percpu_ref is confirmed to be visible as killed on all CPUs
3822  *    and thus css_tryget_online() is guaranteed to fail, the css can be
3823  *    offlined by invoking offline_css().  After offlining, the base ref is
3824  *    put.  Implemented in css_killed_work_fn().
3825  *
3826  * 3. When the percpu_ref reaches zero, the only possible remaining
3827  *    accessors are inside RCU read sections.  css_release() schedules the
3828  *    RCU callback.
3829  *
3830  * 4. After the grace period, the css can be freed.  Implemented in
3831  *    css_free_work_fn().
3832  *
3833  * It is actually hairier because both step 2 and 4 require process context
3834  * and thus involve punting to css->destroy_work adding two additional
3835  * steps to the already complex sequence.
3836  */
3837 static void css_free_work_fn(struct work_struct *work)
3838 {
3839 	struct cgroup_subsys_state *css =
3840 		container_of(work, struct cgroup_subsys_state, destroy_work);
3841 	struct cgroup_subsys *ss = css->ss;
3842 	struct cgroup *cgrp = css->cgroup;
3843 
3844 	percpu_ref_exit(&css->refcnt);
3845 
3846 	if (ss) {
3847 		/* css free path */
3848 		struct cgroup_subsys_state *parent = css->parent;
3849 		int id = css->id;
3850 
3851 		ss->css_free(css);
3852 		cgroup_idr_remove(&ss->css_idr, id);
3853 		cgroup_put(cgrp);
3854 
3855 		if (parent)
3856 			css_put(parent);
3857 	} else {
3858 		/* cgroup free path */
3859 		atomic_dec(&cgrp->root->nr_cgrps);
3860 		cgroup1_pidlist_destroy_all(cgrp);
3861 		cancel_work_sync(&cgrp->release_agent_work);
3862 
3863 		if (cgroup_parent(cgrp)) {
3864 			/*
3865 			 * We get a ref to the parent, and put the ref when
3866 			 * this cgroup is being freed, so it's guaranteed
3867 			 * that the parent won't be destroyed before its
3868 			 * children.
3869 			 */
3870 			cgroup_put(cgroup_parent(cgrp));
3871 			kernfs_put(cgrp->kn);
3872 			kfree(cgrp);
3873 		} else {
3874 			/*
3875 			 * This is root cgroup's refcnt reaching zero,
3876 			 * which indicates that the root should be
3877 			 * released.
3878 			 */
3879 			cgroup_destroy_root(cgrp->root);
3880 		}
3881 	}
3882 }
3883 
3884 static void css_free_rcu_fn(struct rcu_head *rcu_head)
3885 {
3886 	struct cgroup_subsys_state *css =
3887 		container_of(rcu_head, struct cgroup_subsys_state, rcu_head);
3888 
3889 	INIT_WORK(&css->destroy_work, css_free_work_fn);
3890 	queue_work(cgroup_destroy_wq, &css->destroy_work);
3891 }
3892 
3893 static void css_release_work_fn(struct work_struct *work)
3894 {
3895 	struct cgroup_subsys_state *css =
3896 		container_of(work, struct cgroup_subsys_state, destroy_work);
3897 	struct cgroup_subsys *ss = css->ss;
3898 	struct cgroup *cgrp = css->cgroup;
3899 
3900 	mutex_lock(&cgroup_mutex);
3901 
3902 	css->flags |= CSS_RELEASED;
3903 	list_del_rcu(&css->sibling);
3904 
3905 	if (ss) {
3906 		/* css release path */
3907 		cgroup_idr_replace(&ss->css_idr, NULL, css->id);
3908 		if (ss->css_released)
3909 			ss->css_released(css);
3910 	} else {
3911 		/* cgroup release path */
3912 		trace_cgroup_release(cgrp);
3913 
3914 		cgroup_idr_remove(&cgrp->root->cgroup_idr, cgrp->id);
3915 		cgrp->id = -1;
3916 
3917 		/*
3918 		 * There are two control paths which try to determine
3919 		 * cgroup from dentry without going through kernfs -
3920 		 * cgroupstats_build() and css_tryget_online_from_dir().
3921 		 * Those are supported by RCU protecting clearing of
3922 		 * cgrp->kn->priv backpointer.
3923 		 */
3924 		if (cgrp->kn)
3925 			RCU_INIT_POINTER(*(void __rcu __force **)&cgrp->kn->priv,
3926 					 NULL);
3927 
3928 		cgroup_bpf_put(cgrp);
3929 	}
3930 
3931 	mutex_unlock(&cgroup_mutex);
3932 
3933 	call_rcu(&css->rcu_head, css_free_rcu_fn);
3934 }
3935 
3936 static void css_release(struct percpu_ref *ref)
3937 {
3938 	struct cgroup_subsys_state *css =
3939 		container_of(ref, struct cgroup_subsys_state, refcnt);
3940 
3941 	INIT_WORK(&css->destroy_work, css_release_work_fn);
3942 	queue_work(cgroup_destroy_wq, &css->destroy_work);
3943 }
3944 
3945 static void init_and_link_css(struct cgroup_subsys_state *css,
3946 			      struct cgroup_subsys *ss, struct cgroup *cgrp)
3947 {
3948 	lockdep_assert_held(&cgroup_mutex);
3949 
3950 	cgroup_get(cgrp);
3951 
3952 	memset(css, 0, sizeof(*css));
3953 	css->cgroup = cgrp;
3954 	css->ss = ss;
3955 	css->id = -1;
3956 	INIT_LIST_HEAD(&css->sibling);
3957 	INIT_LIST_HEAD(&css->children);
3958 	css->serial_nr = css_serial_nr_next++;
3959 	atomic_set(&css->online_cnt, 0);
3960 
3961 	if (cgroup_parent(cgrp)) {
3962 		css->parent = cgroup_css(cgroup_parent(cgrp), ss);
3963 		css_get(css->parent);
3964 	}
3965 
3966 	BUG_ON(cgroup_css(cgrp, ss));
3967 }
3968 
3969 /* invoke ->css_online() on a new CSS and mark it online if successful */
3970 static int online_css(struct cgroup_subsys_state *css)
3971 {
3972 	struct cgroup_subsys *ss = css->ss;
3973 	int ret = 0;
3974 
3975 	lockdep_assert_held(&cgroup_mutex);
3976 
3977 	if (ss->css_online)
3978 		ret = ss->css_online(css);
3979 	if (!ret) {
3980 		css->flags |= CSS_ONLINE;
3981 		rcu_assign_pointer(css->cgroup->subsys[ss->id], css);
3982 
3983 		atomic_inc(&css->online_cnt);
3984 		if (css->parent)
3985 			atomic_inc(&css->parent->online_cnt);
3986 	}
3987 	return ret;
3988 }
3989 
3990 /* if the CSS is online, invoke ->css_offline() on it and mark it offline */
3991 static void offline_css(struct cgroup_subsys_state *css)
3992 {
3993 	struct cgroup_subsys *ss = css->ss;
3994 
3995 	lockdep_assert_held(&cgroup_mutex);
3996 
3997 	if (!(css->flags & CSS_ONLINE))
3998 		return;
3999 
4000 	if (ss->css_reset)
4001 		ss->css_reset(css);
4002 
4003 	if (ss->css_offline)
4004 		ss->css_offline(css);
4005 
4006 	css->flags &= ~CSS_ONLINE;
4007 	RCU_INIT_POINTER(css->cgroup->subsys[ss->id], NULL);
4008 
4009 	wake_up_all(&css->cgroup->offline_waitq);
4010 }
4011 
4012 /**
4013  * css_create - create a cgroup_subsys_state
4014  * @cgrp: the cgroup new css will be associated with
4015  * @ss: the subsys of new css
4016  *
4017  * Create a new css associated with @cgrp - @ss pair.  On success, the new
4018  * css is online and installed in @cgrp.  This function doesn't create the
4019  * interface files.  Returns 0 on success, -errno on failure.
4020  */
4021 static struct cgroup_subsys_state *css_create(struct cgroup *cgrp,
4022 					      struct cgroup_subsys *ss)
4023 {
4024 	struct cgroup *parent = cgroup_parent(cgrp);
4025 	struct cgroup_subsys_state *parent_css = cgroup_css(parent, ss);
4026 	struct cgroup_subsys_state *css;
4027 	int err;
4028 
4029 	lockdep_assert_held(&cgroup_mutex);
4030 
4031 	css = ss->css_alloc(parent_css);
4032 	if (!css)
4033 		css = ERR_PTR(-ENOMEM);
4034 	if (IS_ERR(css))
4035 		return css;
4036 
4037 	init_and_link_css(css, ss, cgrp);
4038 
4039 	err = percpu_ref_init(&css->refcnt, css_release, 0, GFP_KERNEL);
4040 	if (err)
4041 		goto err_free_css;
4042 
4043 	err = cgroup_idr_alloc(&ss->css_idr, NULL, 2, 0, GFP_KERNEL);
4044 	if (err < 0)
4045 		goto err_free_css;
4046 	css->id = err;
4047 
4048 	/* @css is ready to be brought online now, make it visible */
4049 	list_add_tail_rcu(&css->sibling, &parent_css->children);
4050 	cgroup_idr_replace(&ss->css_idr, css, css->id);
4051 
4052 	err = online_css(css);
4053 	if (err)
4054 		goto err_list_del;
4055 
4056 	if (ss->broken_hierarchy && !ss->warned_broken_hierarchy &&
4057 	    cgroup_parent(parent)) {
4058 		pr_warn("%s (%d) created nested cgroup for controller \"%s\" which has incomplete hierarchy support. Nested cgroups may change behavior in the future.\n",
4059 			current->comm, current->pid, ss->name);
4060 		if (!strcmp(ss->name, "memory"))
4061 			pr_warn("\"memory\" requires setting use_hierarchy to 1 on the root\n");
4062 		ss->warned_broken_hierarchy = true;
4063 	}
4064 
4065 	return css;
4066 
4067 err_list_del:
4068 	list_del_rcu(&css->sibling);
4069 err_free_css:
4070 	call_rcu(&css->rcu_head, css_free_rcu_fn);
4071 	return ERR_PTR(err);
4072 }
4073 
4074 /*
4075  * The returned cgroup is fully initialized including its control mask, but
4076  * it isn't associated with its kernfs_node and doesn't have the control
4077  * mask applied.
4078  */
4079 static struct cgroup *cgroup_create(struct cgroup *parent)
4080 {
4081 	struct cgroup_root *root = parent->root;
4082 	struct cgroup *cgrp, *tcgrp;
4083 	int level = parent->level + 1;
4084 	int ret;
4085 
4086 	/* allocate the cgroup and its ID, 0 is reserved for the root */
4087 	cgrp = kzalloc(sizeof(*cgrp) +
4088 		       sizeof(cgrp->ancestor_ids[0]) * (level + 1), GFP_KERNEL);
4089 	if (!cgrp)
4090 		return ERR_PTR(-ENOMEM);
4091 
4092 	ret = percpu_ref_init(&cgrp->self.refcnt, css_release, 0, GFP_KERNEL);
4093 	if (ret)
4094 		goto out_free_cgrp;
4095 
4096 	/*
4097 	 * Temporarily set the pointer to NULL, so idr_find() won't return
4098 	 * a half-baked cgroup.
4099 	 */
4100 	cgrp->id = cgroup_idr_alloc(&root->cgroup_idr, NULL, 2, 0, GFP_KERNEL);
4101 	if (cgrp->id < 0) {
4102 		ret = -ENOMEM;
4103 		goto out_cancel_ref;
4104 	}
4105 
4106 	init_cgroup_housekeeping(cgrp);
4107 
4108 	cgrp->self.parent = &parent->self;
4109 	cgrp->root = root;
4110 	cgrp->level = level;
4111 
4112 	for (tcgrp = cgrp; tcgrp; tcgrp = cgroup_parent(tcgrp))
4113 		cgrp->ancestor_ids[tcgrp->level] = tcgrp->id;
4114 
4115 	if (notify_on_release(parent))
4116 		set_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
4117 
4118 	if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &parent->flags))
4119 		set_bit(CGRP_CPUSET_CLONE_CHILDREN, &cgrp->flags);
4120 
4121 	cgrp->self.serial_nr = css_serial_nr_next++;
4122 
4123 	/* allocation complete, commit to creation */
4124 	list_add_tail_rcu(&cgrp->self.sibling, &cgroup_parent(cgrp)->self.children);
4125 	atomic_inc(&root->nr_cgrps);
4126 	cgroup_get(parent);
4127 
4128 	/*
4129 	 * @cgrp is now fully operational.  If something fails after this
4130 	 * point, it'll be released via the normal destruction path.
4131 	 */
4132 	cgroup_idr_replace(&root->cgroup_idr, cgrp, cgrp->id);
4133 
4134 	/*
4135 	 * On the default hierarchy, a child doesn't automatically inherit
4136 	 * subtree_control from the parent.  Each is configured manually.
4137 	 */
4138 	if (!cgroup_on_dfl(cgrp))
4139 		cgrp->subtree_control = cgroup_control(cgrp);
4140 
4141 	if (parent)
4142 		cgroup_bpf_inherit(cgrp, parent);
4143 
4144 	cgroup_propagate_control(cgrp);
4145 
4146 	return cgrp;
4147 
4148 out_cancel_ref:
4149 	percpu_ref_exit(&cgrp->self.refcnt);
4150 out_free_cgrp:
4151 	kfree(cgrp);
4152 	return ERR_PTR(ret);
4153 }
4154 
4155 int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name, umode_t mode)
4156 {
4157 	struct cgroup *parent, *cgrp;
4158 	struct kernfs_node *kn;
4159 	int ret;
4160 
4161 	/* do not accept '\n' to prevent making /proc/<pid>/cgroup unparsable */
4162 	if (strchr(name, '\n'))
4163 		return -EINVAL;
4164 
4165 	parent = cgroup_kn_lock_live(parent_kn, false);
4166 	if (!parent)
4167 		return -ENODEV;
4168 
4169 	cgrp = cgroup_create(parent);
4170 	if (IS_ERR(cgrp)) {
4171 		ret = PTR_ERR(cgrp);
4172 		goto out_unlock;
4173 	}
4174 
4175 	/* create the directory */
4176 	kn = kernfs_create_dir(parent->kn, name, mode, cgrp);
4177 	if (IS_ERR(kn)) {
4178 		ret = PTR_ERR(kn);
4179 		goto out_destroy;
4180 	}
4181 	cgrp->kn = kn;
4182 
4183 	/*
4184 	 * This extra ref will be put in cgroup_free_fn() and guarantees
4185 	 * that @cgrp->kn is always accessible.
4186 	 */
4187 	kernfs_get(kn);
4188 
4189 	ret = cgroup_kn_set_ugid(kn);
4190 	if (ret)
4191 		goto out_destroy;
4192 
4193 	ret = css_populate_dir(&cgrp->self);
4194 	if (ret)
4195 		goto out_destroy;
4196 
4197 	ret = cgroup_apply_control_enable(cgrp);
4198 	if (ret)
4199 		goto out_destroy;
4200 
4201 	trace_cgroup_mkdir(cgrp);
4202 
4203 	/* let's create and online css's */
4204 	kernfs_activate(kn);
4205 
4206 	ret = 0;
4207 	goto out_unlock;
4208 
4209 out_destroy:
4210 	cgroup_destroy_locked(cgrp);
4211 out_unlock:
4212 	cgroup_kn_unlock(parent_kn);
4213 	return ret;
4214 }
4215 
4216 /*
4217  * This is called when the refcnt of a css is confirmed to be killed.
4218  * css_tryget_online() is now guaranteed to fail.  Tell the subsystem to
4219  * initate destruction and put the css ref from kill_css().
4220  */
4221 static void css_killed_work_fn(struct work_struct *work)
4222 {
4223 	struct cgroup_subsys_state *css =
4224 		container_of(work, struct cgroup_subsys_state, destroy_work);
4225 
4226 	mutex_lock(&cgroup_mutex);
4227 
4228 	do {
4229 		offline_css(css);
4230 		css_put(css);
4231 		/* @css can't go away while we're holding cgroup_mutex */
4232 		css = css->parent;
4233 	} while (css && atomic_dec_and_test(&css->online_cnt));
4234 
4235 	mutex_unlock(&cgroup_mutex);
4236 }
4237 
4238 /* css kill confirmation processing requires process context, bounce */
4239 static void css_killed_ref_fn(struct percpu_ref *ref)
4240 {
4241 	struct cgroup_subsys_state *css =
4242 		container_of(ref, struct cgroup_subsys_state, refcnt);
4243 
4244 	if (atomic_dec_and_test(&css->online_cnt)) {
4245 		INIT_WORK(&css->destroy_work, css_killed_work_fn);
4246 		queue_work(cgroup_destroy_wq, &css->destroy_work);
4247 	}
4248 }
4249 
4250 /**
4251  * kill_css - destroy a css
4252  * @css: css to destroy
4253  *
4254  * This function initiates destruction of @css by removing cgroup interface
4255  * files and putting its base reference.  ->css_offline() will be invoked
4256  * asynchronously once css_tryget_online() is guaranteed to fail and when
4257  * the reference count reaches zero, @css will be released.
4258  */
4259 static void kill_css(struct cgroup_subsys_state *css)
4260 {
4261 	lockdep_assert_held(&cgroup_mutex);
4262 
4263 	/*
4264 	 * This must happen before css is disassociated with its cgroup.
4265 	 * See seq_css() for details.
4266 	 */
4267 	css_clear_dir(css);
4268 
4269 	/*
4270 	 * Killing would put the base ref, but we need to keep it alive
4271 	 * until after ->css_offline().
4272 	 */
4273 	css_get(css);
4274 
4275 	/*
4276 	 * cgroup core guarantees that, by the time ->css_offline() is
4277 	 * invoked, no new css reference will be given out via
4278 	 * css_tryget_online().  We can't simply call percpu_ref_kill() and
4279 	 * proceed to offlining css's because percpu_ref_kill() doesn't
4280 	 * guarantee that the ref is seen as killed on all CPUs on return.
4281 	 *
4282 	 * Use percpu_ref_kill_and_confirm() to get notifications as each
4283 	 * css is confirmed to be seen as killed on all CPUs.
4284 	 */
4285 	percpu_ref_kill_and_confirm(&css->refcnt, css_killed_ref_fn);
4286 }
4287 
4288 /**
4289  * cgroup_destroy_locked - the first stage of cgroup destruction
4290  * @cgrp: cgroup to be destroyed
4291  *
4292  * css's make use of percpu refcnts whose killing latency shouldn't be
4293  * exposed to userland and are RCU protected.  Also, cgroup core needs to
4294  * guarantee that css_tryget_online() won't succeed by the time
4295  * ->css_offline() is invoked.  To satisfy all the requirements,
4296  * destruction is implemented in the following two steps.
4297  *
4298  * s1. Verify @cgrp can be destroyed and mark it dying.  Remove all
4299  *     userland visible parts and start killing the percpu refcnts of
4300  *     css's.  Set up so that the next stage will be kicked off once all
4301  *     the percpu refcnts are confirmed to be killed.
4302  *
4303  * s2. Invoke ->css_offline(), mark the cgroup dead and proceed with the
4304  *     rest of destruction.  Once all cgroup references are gone, the
4305  *     cgroup is RCU-freed.
4306  *
4307  * This function implements s1.  After this step, @cgrp is gone as far as
4308  * the userland is concerned and a new cgroup with the same name may be
4309  * created.  As cgroup doesn't care about the names internally, this
4310  * doesn't cause any problem.
4311  */
4312 static int cgroup_destroy_locked(struct cgroup *cgrp)
4313 	__releases(&cgroup_mutex) __acquires(&cgroup_mutex)
4314 {
4315 	struct cgroup_subsys_state *css;
4316 	struct cgrp_cset_link *link;
4317 	int ssid;
4318 
4319 	lockdep_assert_held(&cgroup_mutex);
4320 
4321 	/*
4322 	 * Only migration can raise populated from zero and we're already
4323 	 * holding cgroup_mutex.
4324 	 */
4325 	if (cgroup_is_populated(cgrp))
4326 		return -EBUSY;
4327 
4328 	/*
4329 	 * Make sure there's no live children.  We can't test emptiness of
4330 	 * ->self.children as dead children linger on it while being
4331 	 * drained; otherwise, "rmdir parent/child parent" may fail.
4332 	 */
4333 	if (css_has_online_children(&cgrp->self))
4334 		return -EBUSY;
4335 
4336 	/*
4337 	 * Mark @cgrp and the associated csets dead.  The former prevents
4338 	 * further task migration and child creation by disabling
4339 	 * cgroup_lock_live_group().  The latter makes the csets ignored by
4340 	 * the migration path.
4341 	 */
4342 	cgrp->self.flags &= ~CSS_ONLINE;
4343 
4344 	spin_lock_irq(&css_set_lock);
4345 	list_for_each_entry(link, &cgrp->cset_links, cset_link)
4346 		link->cset->dead = true;
4347 	spin_unlock_irq(&css_set_lock);
4348 
4349 	/* initiate massacre of all css's */
4350 	for_each_css(css, ssid, cgrp)
4351 		kill_css(css);
4352 
4353 	/*
4354 	 * Remove @cgrp directory along with the base files.  @cgrp has an
4355 	 * extra ref on its kn.
4356 	 */
4357 	kernfs_remove(cgrp->kn);
4358 
4359 	cgroup1_check_for_release(cgroup_parent(cgrp));
4360 
4361 	/* put the base reference */
4362 	percpu_ref_kill(&cgrp->self.refcnt);
4363 
4364 	return 0;
4365 };
4366 
4367 int cgroup_rmdir(struct kernfs_node *kn)
4368 {
4369 	struct cgroup *cgrp;
4370 	int ret = 0;
4371 
4372 	cgrp = cgroup_kn_lock_live(kn, false);
4373 	if (!cgrp)
4374 		return 0;
4375 
4376 	ret = cgroup_destroy_locked(cgrp);
4377 
4378 	if (!ret)
4379 		trace_cgroup_rmdir(cgrp);
4380 
4381 	cgroup_kn_unlock(kn);
4382 	return ret;
4383 }
4384 
4385 static struct kernfs_syscall_ops cgroup_kf_syscall_ops = {
4386 	.remount_fs		= cgroup_remount,
4387 	.mkdir			= cgroup_mkdir,
4388 	.rmdir			= cgroup_rmdir,
4389 	.show_path		= cgroup_show_path,
4390 };
4391 
4392 static void __init cgroup_init_subsys(struct cgroup_subsys *ss, bool early)
4393 {
4394 	struct cgroup_subsys_state *css;
4395 
4396 	pr_debug("Initializing cgroup subsys %s\n", ss->name);
4397 
4398 	mutex_lock(&cgroup_mutex);
4399 
4400 	idr_init(&ss->css_idr);
4401 	INIT_LIST_HEAD(&ss->cfts);
4402 
4403 	/* Create the root cgroup state for this subsystem */
4404 	ss->root = &cgrp_dfl_root;
4405 	css = ss->css_alloc(cgroup_css(&cgrp_dfl_root.cgrp, ss));
4406 	/* We don't handle early failures gracefully */
4407 	BUG_ON(IS_ERR(css));
4408 	init_and_link_css(css, ss, &cgrp_dfl_root.cgrp);
4409 
4410 	/*
4411 	 * Root csses are never destroyed and we can't initialize
4412 	 * percpu_ref during early init.  Disable refcnting.
4413 	 */
4414 	css->flags |= CSS_NO_REF;
4415 
4416 	if (early) {
4417 		/* allocation can't be done safely during early init */
4418 		css->id = 1;
4419 	} else {
4420 		css->id = cgroup_idr_alloc(&ss->css_idr, css, 1, 2, GFP_KERNEL);
4421 		BUG_ON(css->id < 0);
4422 	}
4423 
4424 	/* Update the init_css_set to contain a subsys
4425 	 * pointer to this state - since the subsystem is
4426 	 * newly registered, all tasks and hence the
4427 	 * init_css_set is in the subsystem's root cgroup. */
4428 	init_css_set.subsys[ss->id] = css;
4429 
4430 	have_fork_callback |= (bool)ss->fork << ss->id;
4431 	have_exit_callback |= (bool)ss->exit << ss->id;
4432 	have_free_callback |= (bool)ss->free << ss->id;
4433 	have_canfork_callback |= (bool)ss->can_fork << ss->id;
4434 
4435 	/* At system boot, before all subsystems have been
4436 	 * registered, no tasks have been forked, so we don't
4437 	 * need to invoke fork callbacks here. */
4438 	BUG_ON(!list_empty(&init_task.tasks));
4439 
4440 	BUG_ON(online_css(css));
4441 
4442 	mutex_unlock(&cgroup_mutex);
4443 }
4444 
4445 /**
4446  * cgroup_init_early - cgroup initialization at system boot
4447  *
4448  * Initialize cgroups at system boot, and initialize any
4449  * subsystems that request early init.
4450  */
4451 int __init cgroup_init_early(void)
4452 {
4453 	static struct cgroup_sb_opts __initdata opts;
4454 	struct cgroup_subsys *ss;
4455 	int i;
4456 
4457 	init_cgroup_root(&cgrp_dfl_root, &opts);
4458 	cgrp_dfl_root.cgrp.self.flags |= CSS_NO_REF;
4459 
4460 	RCU_INIT_POINTER(init_task.cgroups, &init_css_set);
4461 
4462 	for_each_subsys(ss, i) {
4463 		WARN(!ss->css_alloc || !ss->css_free || ss->name || ss->id,
4464 		     "invalid cgroup_subsys %d:%s css_alloc=%p css_free=%p id:name=%d:%s\n",
4465 		     i, cgroup_subsys_name[i], ss->css_alloc, ss->css_free,
4466 		     ss->id, ss->name);
4467 		WARN(strlen(cgroup_subsys_name[i]) > MAX_CGROUP_TYPE_NAMELEN,
4468 		     "cgroup_subsys_name %s too long\n", cgroup_subsys_name[i]);
4469 
4470 		ss->id = i;
4471 		ss->name = cgroup_subsys_name[i];
4472 		if (!ss->legacy_name)
4473 			ss->legacy_name = cgroup_subsys_name[i];
4474 
4475 		if (ss->early_init)
4476 			cgroup_init_subsys(ss, true);
4477 	}
4478 	return 0;
4479 }
4480 
4481 static u16 cgroup_disable_mask __initdata;
4482 
4483 /**
4484  * cgroup_init - cgroup initialization
4485  *
4486  * Register cgroup filesystem and /proc file, and initialize
4487  * any subsystems that didn't request early init.
4488  */
4489 int __init cgroup_init(void)
4490 {
4491 	struct cgroup_subsys *ss;
4492 	int ssid;
4493 
4494 	BUILD_BUG_ON(CGROUP_SUBSYS_COUNT > 16);
4495 	BUG_ON(percpu_init_rwsem(&cgroup_threadgroup_rwsem));
4496 	BUG_ON(cgroup_init_cftypes(NULL, cgroup_base_files));
4497 	BUG_ON(cgroup_init_cftypes(NULL, cgroup1_base_files));
4498 
4499 	/*
4500 	 * The latency of the synchronize_sched() is too high for cgroups,
4501 	 * avoid it at the cost of forcing all readers into the slow path.
4502 	 */
4503 	rcu_sync_enter_start(&cgroup_threadgroup_rwsem.rss);
4504 
4505 	get_user_ns(init_cgroup_ns.user_ns);
4506 
4507 	mutex_lock(&cgroup_mutex);
4508 
4509 	/*
4510 	 * Add init_css_set to the hash table so that dfl_root can link to
4511 	 * it during init.
4512 	 */
4513 	hash_add(css_set_table, &init_css_set.hlist,
4514 		 css_set_hash(init_css_set.subsys));
4515 
4516 	BUG_ON(cgroup_setup_root(&cgrp_dfl_root, 0));
4517 
4518 	mutex_unlock(&cgroup_mutex);
4519 
4520 	for_each_subsys(ss, ssid) {
4521 		if (ss->early_init) {
4522 			struct cgroup_subsys_state *css =
4523 				init_css_set.subsys[ss->id];
4524 
4525 			css->id = cgroup_idr_alloc(&ss->css_idr, css, 1, 2,
4526 						   GFP_KERNEL);
4527 			BUG_ON(css->id < 0);
4528 		} else {
4529 			cgroup_init_subsys(ss, false);
4530 		}
4531 
4532 		list_add_tail(&init_css_set.e_cset_node[ssid],
4533 			      &cgrp_dfl_root.cgrp.e_csets[ssid]);
4534 
4535 		/*
4536 		 * Setting dfl_root subsys_mask needs to consider the
4537 		 * disabled flag and cftype registration needs kmalloc,
4538 		 * both of which aren't available during early_init.
4539 		 */
4540 		if (cgroup_disable_mask & (1 << ssid)) {
4541 			static_branch_disable(cgroup_subsys_enabled_key[ssid]);
4542 			printk(KERN_INFO "Disabling %s control group subsystem\n",
4543 			       ss->name);
4544 			continue;
4545 		}
4546 
4547 		if (cgroup1_ssid_disabled(ssid))
4548 			printk(KERN_INFO "Disabling %s control group subsystem in v1 mounts\n",
4549 			       ss->name);
4550 
4551 		cgrp_dfl_root.subsys_mask |= 1 << ss->id;
4552 
4553 		if (ss->implicit_on_dfl)
4554 			cgrp_dfl_implicit_ss_mask |= 1 << ss->id;
4555 		else if (!ss->dfl_cftypes)
4556 			cgrp_dfl_inhibit_ss_mask |= 1 << ss->id;
4557 
4558 		if (ss->dfl_cftypes == ss->legacy_cftypes) {
4559 			WARN_ON(cgroup_add_cftypes(ss, ss->dfl_cftypes));
4560 		} else {
4561 			WARN_ON(cgroup_add_dfl_cftypes(ss, ss->dfl_cftypes));
4562 			WARN_ON(cgroup_add_legacy_cftypes(ss, ss->legacy_cftypes));
4563 		}
4564 
4565 		if (ss->bind)
4566 			ss->bind(init_css_set.subsys[ssid]);
4567 	}
4568 
4569 	/* init_css_set.subsys[] has been updated, re-hash */
4570 	hash_del(&init_css_set.hlist);
4571 	hash_add(css_set_table, &init_css_set.hlist,
4572 		 css_set_hash(init_css_set.subsys));
4573 
4574 	WARN_ON(sysfs_create_mount_point(fs_kobj, "cgroup"));
4575 	WARN_ON(register_filesystem(&cgroup_fs_type));
4576 	WARN_ON(register_filesystem(&cgroup2_fs_type));
4577 	WARN_ON(!proc_create("cgroups", 0, NULL, &proc_cgroupstats_operations));
4578 
4579 	return 0;
4580 }
4581 
4582 static int __init cgroup_wq_init(void)
4583 {
4584 	/*
4585 	 * There isn't much point in executing destruction path in
4586 	 * parallel.  Good chunk is serialized with cgroup_mutex anyway.
4587 	 * Use 1 for @max_active.
4588 	 *
4589 	 * We would prefer to do this in cgroup_init() above, but that
4590 	 * is called before init_workqueues(): so leave this until after.
4591 	 */
4592 	cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1);
4593 	BUG_ON(!cgroup_destroy_wq);
4594 	return 0;
4595 }
4596 core_initcall(cgroup_wq_init);
4597 
4598 /*
4599  * proc_cgroup_show()
4600  *  - Print task's cgroup paths into seq_file, one line for each hierarchy
4601  *  - Used for /proc/<pid>/cgroup.
4602  */
4603 int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
4604 		     struct pid *pid, struct task_struct *tsk)
4605 {
4606 	char *buf;
4607 	int retval;
4608 	struct cgroup_root *root;
4609 
4610 	retval = -ENOMEM;
4611 	buf = kmalloc(PATH_MAX, GFP_KERNEL);
4612 	if (!buf)
4613 		goto out;
4614 
4615 	mutex_lock(&cgroup_mutex);
4616 	spin_lock_irq(&css_set_lock);
4617 
4618 	for_each_root(root) {
4619 		struct cgroup_subsys *ss;
4620 		struct cgroup *cgrp;
4621 		int ssid, count = 0;
4622 
4623 		if (root == &cgrp_dfl_root && !cgrp_dfl_visible)
4624 			continue;
4625 
4626 		seq_printf(m, "%d:", root->hierarchy_id);
4627 		if (root != &cgrp_dfl_root)
4628 			for_each_subsys(ss, ssid)
4629 				if (root->subsys_mask & (1 << ssid))
4630 					seq_printf(m, "%s%s", count++ ? "," : "",
4631 						   ss->legacy_name);
4632 		if (strlen(root->name))
4633 			seq_printf(m, "%sname=%s", count ? "," : "",
4634 				   root->name);
4635 		seq_putc(m, ':');
4636 
4637 		cgrp = task_cgroup_from_root(tsk, root);
4638 
4639 		/*
4640 		 * On traditional hierarchies, all zombie tasks show up as
4641 		 * belonging to the root cgroup.  On the default hierarchy,
4642 		 * while a zombie doesn't show up in "cgroup.procs" and
4643 		 * thus can't be migrated, its /proc/PID/cgroup keeps
4644 		 * reporting the cgroup it belonged to before exiting.  If
4645 		 * the cgroup is removed before the zombie is reaped,
4646 		 * " (deleted)" is appended to the cgroup path.
4647 		 */
4648 		if (cgroup_on_dfl(cgrp) || !(tsk->flags & PF_EXITING)) {
4649 			retval = cgroup_path_ns_locked(cgrp, buf, PATH_MAX,
4650 						current->nsproxy->cgroup_ns);
4651 			if (retval >= PATH_MAX)
4652 				retval = -ENAMETOOLONG;
4653 			if (retval < 0)
4654 				goto out_unlock;
4655 
4656 			seq_puts(m, buf);
4657 		} else {
4658 			seq_puts(m, "/");
4659 		}
4660 
4661 		if (cgroup_on_dfl(cgrp) && cgroup_is_dead(cgrp))
4662 			seq_puts(m, " (deleted)\n");
4663 		else
4664 			seq_putc(m, '\n');
4665 	}
4666 
4667 	retval = 0;
4668 out_unlock:
4669 	spin_unlock_irq(&css_set_lock);
4670 	mutex_unlock(&cgroup_mutex);
4671 	kfree(buf);
4672 out:
4673 	return retval;
4674 }
4675 
4676 /**
4677  * cgroup_fork - initialize cgroup related fields during copy_process()
4678  * @child: pointer to task_struct of forking parent process.
4679  *
4680  * A task is associated with the init_css_set until cgroup_post_fork()
4681  * attaches it to the parent's css_set.  Empty cg_list indicates that
4682  * @child isn't holding reference to its css_set.
4683  */
4684 void cgroup_fork(struct task_struct *child)
4685 {
4686 	RCU_INIT_POINTER(child->cgroups, &init_css_set);
4687 	INIT_LIST_HEAD(&child->cg_list);
4688 }
4689 
4690 /**
4691  * cgroup_can_fork - called on a new task before the process is exposed
4692  * @child: the task in question.
4693  *
4694  * This calls the subsystem can_fork() callbacks. If the can_fork() callback
4695  * returns an error, the fork aborts with that error code. This allows for
4696  * a cgroup subsystem to conditionally allow or deny new forks.
4697  */
4698 int cgroup_can_fork(struct task_struct *child)
4699 {
4700 	struct cgroup_subsys *ss;
4701 	int i, j, ret;
4702 
4703 	do_each_subsys_mask(ss, i, have_canfork_callback) {
4704 		ret = ss->can_fork(child);
4705 		if (ret)
4706 			goto out_revert;
4707 	} while_each_subsys_mask();
4708 
4709 	return 0;
4710 
4711 out_revert:
4712 	for_each_subsys(ss, j) {
4713 		if (j >= i)
4714 			break;
4715 		if (ss->cancel_fork)
4716 			ss->cancel_fork(child);
4717 	}
4718 
4719 	return ret;
4720 }
4721 
4722 /**
4723  * cgroup_cancel_fork - called if a fork failed after cgroup_can_fork()
4724  * @child: the task in question
4725  *
4726  * This calls the cancel_fork() callbacks if a fork failed *after*
4727  * cgroup_can_fork() succeded.
4728  */
4729 void cgroup_cancel_fork(struct task_struct *child)
4730 {
4731 	struct cgroup_subsys *ss;
4732 	int i;
4733 
4734 	for_each_subsys(ss, i)
4735 		if (ss->cancel_fork)
4736 			ss->cancel_fork(child);
4737 }
4738 
4739 /**
4740  * cgroup_post_fork - called on a new task after adding it to the task list
4741  * @child: the task in question
4742  *
4743  * Adds the task to the list running through its css_set if necessary and
4744  * call the subsystem fork() callbacks.  Has to be after the task is
4745  * visible on the task list in case we race with the first call to
4746  * cgroup_task_iter_start() - to guarantee that the new task ends up on its
4747  * list.
4748  */
4749 void cgroup_post_fork(struct task_struct *child)
4750 {
4751 	struct cgroup_subsys *ss;
4752 	int i;
4753 
4754 	/*
4755 	 * This may race against cgroup_enable_task_cg_lists().  As that
4756 	 * function sets use_task_css_set_links before grabbing
4757 	 * tasklist_lock and we just went through tasklist_lock to add
4758 	 * @child, it's guaranteed that either we see the set
4759 	 * use_task_css_set_links or cgroup_enable_task_cg_lists() sees
4760 	 * @child during its iteration.
4761 	 *
4762 	 * If we won the race, @child is associated with %current's
4763 	 * css_set.  Grabbing css_set_lock guarantees both that the
4764 	 * association is stable, and, on completion of the parent's
4765 	 * migration, @child is visible in the source of migration or
4766 	 * already in the destination cgroup.  This guarantee is necessary
4767 	 * when implementing operations which need to migrate all tasks of
4768 	 * a cgroup to another.
4769 	 *
4770 	 * Note that if we lose to cgroup_enable_task_cg_lists(), @child
4771 	 * will remain in init_css_set.  This is safe because all tasks are
4772 	 * in the init_css_set before cg_links is enabled and there's no
4773 	 * operation which transfers all tasks out of init_css_set.
4774 	 */
4775 	if (use_task_css_set_links) {
4776 		struct css_set *cset;
4777 
4778 		spin_lock_irq(&css_set_lock);
4779 		cset = task_css_set(current);
4780 		if (list_empty(&child->cg_list)) {
4781 			get_css_set(cset);
4782 			css_set_move_task(child, NULL, cset, false);
4783 		}
4784 		spin_unlock_irq(&css_set_lock);
4785 	}
4786 
4787 	/*
4788 	 * Call ss->fork().  This must happen after @child is linked on
4789 	 * css_set; otherwise, @child might change state between ->fork()
4790 	 * and addition to css_set.
4791 	 */
4792 	do_each_subsys_mask(ss, i, have_fork_callback) {
4793 		ss->fork(child);
4794 	} while_each_subsys_mask();
4795 }
4796 
4797 /**
4798  * cgroup_exit - detach cgroup from exiting task
4799  * @tsk: pointer to task_struct of exiting process
4800  *
4801  * Description: Detach cgroup from @tsk and release it.
4802  *
4803  * Note that cgroups marked notify_on_release force every task in
4804  * them to take the global cgroup_mutex mutex when exiting.
4805  * This could impact scaling on very large systems.  Be reluctant to
4806  * use notify_on_release cgroups where very high task exit scaling
4807  * is required on large systems.
4808  *
4809  * We set the exiting tasks cgroup to the root cgroup (top_cgroup).  We
4810  * call cgroup_exit() while the task is still competent to handle
4811  * notify_on_release(), then leave the task attached to the root cgroup in
4812  * each hierarchy for the remainder of its exit.  No need to bother with
4813  * init_css_set refcnting.  init_css_set never goes away and we can't race
4814  * with migration path - PF_EXITING is visible to migration path.
4815  */
4816 void cgroup_exit(struct task_struct *tsk)
4817 {
4818 	struct cgroup_subsys *ss;
4819 	struct css_set *cset;
4820 	int i;
4821 
4822 	/*
4823 	 * Unlink from @tsk from its css_set.  As migration path can't race
4824 	 * with us, we can check css_set and cg_list without synchronization.
4825 	 */
4826 	cset = task_css_set(tsk);
4827 
4828 	if (!list_empty(&tsk->cg_list)) {
4829 		spin_lock_irq(&css_set_lock);
4830 		css_set_move_task(tsk, cset, NULL, false);
4831 		spin_unlock_irq(&css_set_lock);
4832 	} else {
4833 		get_css_set(cset);
4834 	}
4835 
4836 	/* see cgroup_post_fork() for details */
4837 	do_each_subsys_mask(ss, i, have_exit_callback) {
4838 		ss->exit(tsk);
4839 	} while_each_subsys_mask();
4840 }
4841 
4842 void cgroup_free(struct task_struct *task)
4843 {
4844 	struct css_set *cset = task_css_set(task);
4845 	struct cgroup_subsys *ss;
4846 	int ssid;
4847 
4848 	do_each_subsys_mask(ss, ssid, have_free_callback) {
4849 		ss->free(task);
4850 	} while_each_subsys_mask();
4851 
4852 	put_css_set(cset);
4853 }
4854 
4855 static int __init cgroup_disable(char *str)
4856 {
4857 	struct cgroup_subsys *ss;
4858 	char *token;
4859 	int i;
4860 
4861 	while ((token = strsep(&str, ",")) != NULL) {
4862 		if (!*token)
4863 			continue;
4864 
4865 		for_each_subsys(ss, i) {
4866 			if (strcmp(token, ss->name) &&
4867 			    strcmp(token, ss->legacy_name))
4868 				continue;
4869 			cgroup_disable_mask |= 1 << i;
4870 		}
4871 	}
4872 	return 1;
4873 }
4874 __setup("cgroup_disable=", cgroup_disable);
4875 
4876 /**
4877  * css_tryget_online_from_dir - get corresponding css from a cgroup dentry
4878  * @dentry: directory dentry of interest
4879  * @ss: subsystem of interest
4880  *
4881  * If @dentry is a directory for a cgroup which has @ss enabled on it, try
4882  * to get the corresponding css and return it.  If such css doesn't exist
4883  * or can't be pinned, an ERR_PTR value is returned.
4884  */
4885 struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
4886 						       struct cgroup_subsys *ss)
4887 {
4888 	struct kernfs_node *kn = kernfs_node_from_dentry(dentry);
4889 	struct file_system_type *s_type = dentry->d_sb->s_type;
4890 	struct cgroup_subsys_state *css = NULL;
4891 	struct cgroup *cgrp;
4892 
4893 	/* is @dentry a cgroup dir? */
4894 	if ((s_type != &cgroup_fs_type && s_type != &cgroup2_fs_type) ||
4895 	    !kn || kernfs_type(kn) != KERNFS_DIR)
4896 		return ERR_PTR(-EBADF);
4897 
4898 	rcu_read_lock();
4899 
4900 	/*
4901 	 * This path doesn't originate from kernfs and @kn could already
4902 	 * have been or be removed at any point.  @kn->priv is RCU
4903 	 * protected for this access.  See css_release_work_fn() for details.
4904 	 */
4905 	cgrp = rcu_dereference(*(void __rcu __force **)&kn->priv);
4906 	if (cgrp)
4907 		css = cgroup_css(cgrp, ss);
4908 
4909 	if (!css || !css_tryget_online(css))
4910 		css = ERR_PTR(-ENOENT);
4911 
4912 	rcu_read_unlock();
4913 	return css;
4914 }
4915 
4916 /**
4917  * css_from_id - lookup css by id
4918  * @id: the cgroup id
4919  * @ss: cgroup subsys to be looked into
4920  *
4921  * Returns the css if there's valid one with @id, otherwise returns NULL.
4922  * Should be called under rcu_read_lock().
4923  */
4924 struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss)
4925 {
4926 	WARN_ON_ONCE(!rcu_read_lock_held());
4927 	return idr_find(&ss->css_idr, id);
4928 }
4929 
4930 /**
4931  * cgroup_get_from_path - lookup and get a cgroup from its default hierarchy path
4932  * @path: path on the default hierarchy
4933  *
4934  * Find the cgroup at @path on the default hierarchy, increment its
4935  * reference count and return it.  Returns pointer to the found cgroup on
4936  * success, ERR_PTR(-ENOENT) if @path doens't exist and ERR_PTR(-ENOTDIR)
4937  * if @path points to a non-directory.
4938  */
4939 struct cgroup *cgroup_get_from_path(const char *path)
4940 {
4941 	struct kernfs_node *kn;
4942 	struct cgroup *cgrp;
4943 
4944 	mutex_lock(&cgroup_mutex);
4945 
4946 	kn = kernfs_walk_and_get(cgrp_dfl_root.cgrp.kn, path);
4947 	if (kn) {
4948 		if (kernfs_type(kn) == KERNFS_DIR) {
4949 			cgrp = kn->priv;
4950 			cgroup_get(cgrp);
4951 		} else {
4952 			cgrp = ERR_PTR(-ENOTDIR);
4953 		}
4954 		kernfs_put(kn);
4955 	} else {
4956 		cgrp = ERR_PTR(-ENOENT);
4957 	}
4958 
4959 	mutex_unlock(&cgroup_mutex);
4960 	return cgrp;
4961 }
4962 EXPORT_SYMBOL_GPL(cgroup_get_from_path);
4963 
4964 /**
4965  * cgroup_get_from_fd - get a cgroup pointer from a fd
4966  * @fd: fd obtained by open(cgroup2_dir)
4967  *
4968  * Find the cgroup from a fd which should be obtained
4969  * by opening a cgroup directory.  Returns a pointer to the
4970  * cgroup on success. ERR_PTR is returned if the cgroup
4971  * cannot be found.
4972  */
4973 struct cgroup *cgroup_get_from_fd(int fd)
4974 {
4975 	struct cgroup_subsys_state *css;
4976 	struct cgroup *cgrp;
4977 	struct file *f;
4978 
4979 	f = fget_raw(fd);
4980 	if (!f)
4981 		return ERR_PTR(-EBADF);
4982 
4983 	css = css_tryget_online_from_dir(f->f_path.dentry, NULL);
4984 	fput(f);
4985 	if (IS_ERR(css))
4986 		return ERR_CAST(css);
4987 
4988 	cgrp = css->cgroup;
4989 	if (!cgroup_on_dfl(cgrp)) {
4990 		cgroup_put(cgrp);
4991 		return ERR_PTR(-EBADF);
4992 	}
4993 
4994 	return cgrp;
4995 }
4996 EXPORT_SYMBOL_GPL(cgroup_get_from_fd);
4997 
4998 /*
4999  * sock->sk_cgrp_data handling.  For more info, see sock_cgroup_data
5000  * definition in cgroup-defs.h.
5001  */
5002 #ifdef CONFIG_SOCK_CGROUP_DATA
5003 
5004 #if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID)
5005 
5006 DEFINE_SPINLOCK(cgroup_sk_update_lock);
5007 static bool cgroup_sk_alloc_disabled __read_mostly;
5008 
5009 void cgroup_sk_alloc_disable(void)
5010 {
5011 	if (cgroup_sk_alloc_disabled)
5012 		return;
5013 	pr_info("cgroup: disabling cgroup2 socket matching due to net_prio or net_cls activation\n");
5014 	cgroup_sk_alloc_disabled = true;
5015 }
5016 
5017 #else
5018 
5019 #define cgroup_sk_alloc_disabled	false
5020 
5021 #endif
5022 
5023 void cgroup_sk_alloc(struct sock_cgroup_data *skcd)
5024 {
5025 	if (cgroup_sk_alloc_disabled)
5026 		return;
5027 
5028 	/* Socket clone path */
5029 	if (skcd->val) {
5030 		cgroup_get(sock_cgroup_ptr(skcd));
5031 		return;
5032 	}
5033 
5034 	rcu_read_lock();
5035 
5036 	while (true) {
5037 		struct css_set *cset;
5038 
5039 		cset = task_css_set(current);
5040 		if (likely(cgroup_tryget(cset->dfl_cgrp))) {
5041 			skcd->val = (unsigned long)cset->dfl_cgrp;
5042 			break;
5043 		}
5044 		cpu_relax();
5045 	}
5046 
5047 	rcu_read_unlock();
5048 }
5049 
5050 void cgroup_sk_free(struct sock_cgroup_data *skcd)
5051 {
5052 	cgroup_put(sock_cgroup_ptr(skcd));
5053 }
5054 
5055 #endif	/* CONFIG_SOCK_CGROUP_DATA */
5056 
5057 #ifdef CONFIG_CGROUP_BPF
5058 int cgroup_bpf_update(struct cgroup *cgrp, struct bpf_prog *prog,
5059 		      enum bpf_attach_type type, bool overridable)
5060 {
5061 	struct cgroup *parent = cgroup_parent(cgrp);
5062 	int ret;
5063 
5064 	mutex_lock(&cgroup_mutex);
5065 	ret = __cgroup_bpf_update(cgrp, parent, prog, type, overridable);
5066 	mutex_unlock(&cgroup_mutex);
5067 	return ret;
5068 }
5069 #endif /* CONFIG_CGROUP_BPF */
5070