1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __CGROUP_INTERNAL_H
3 #define __CGROUP_INTERNAL_H
4 
5 #include <linux/cgroup.h>
6 #include <linux/kernfs.h>
7 #include <linux/workqueue.h>
8 #include <linux/list.h>
9 #include <linux/refcount.h>
10 #include <linux/fs_parser.h>
11 
12 #define TRACE_CGROUP_PATH_LEN 1024
13 extern spinlock_t trace_cgroup_path_lock;
14 extern char trace_cgroup_path[TRACE_CGROUP_PATH_LEN];
15 extern bool cgroup_debug;
16 extern void __init enable_debug_cgroup(void);
17 
18 /*
19  * cgroup_path() takes a spin lock. It is good practice not to take
20  * spin locks within trace point handlers, as they are mostly hidden
21  * from normal view. As cgroup_path() can take the kernfs_rename_lock
22  * spin lock, it is best to not call that function from the trace event
23  * handler.
24  *
25  * Note: trace_cgroup_##type##_enabled() is a static branch that will only
26  *       be set when the trace event is enabled.
27  */
28 #define TRACE_CGROUP_PATH(type, cgrp, ...)				\
29 	do {								\
30 		if (trace_cgroup_##type##_enabled()) {			\
31 			unsigned long flags;				\
32 			spin_lock_irqsave(&trace_cgroup_path_lock,	\
33 					  flags);			\
34 			cgroup_path(cgrp, trace_cgroup_path,		\
35 				    TRACE_CGROUP_PATH_LEN);		\
36 			trace_cgroup_##type(cgrp, trace_cgroup_path,	\
37 					    ##__VA_ARGS__);		\
38 			spin_unlock_irqrestore(&trace_cgroup_path_lock, \
39 					       flags);			\
40 		}							\
41 	} while (0)
42 
43 /*
44  * The cgroup filesystem superblock creation/mount context.
45  */
46 struct cgroup_fs_context {
47 	struct kernfs_fs_context kfc;
48 	struct cgroup_root	*root;
49 	struct cgroup_namespace	*ns;
50 	unsigned int	flags;			/* CGRP_ROOT_* flags */
51 
52 	/* cgroup1 bits */
53 	bool		cpuset_clone_children;
54 	bool		none;			/* User explicitly requested empty subsystem */
55 	bool		all_ss;			/* Seen 'all' option */
56 	u16		subsys_mask;		/* Selected subsystems */
57 	char		*name;			/* Hierarchy name */
58 	char		*release_agent;		/* Path for release notifications */
59 };
60 
61 static inline struct cgroup_fs_context *cgroup_fc2context(struct fs_context *fc)
62 {
63 	struct kernfs_fs_context *kfc = fc->fs_private;
64 
65 	return container_of(kfc, struct cgroup_fs_context, kfc);
66 }
67 
68 /*
69  * A cgroup can be associated with multiple css_sets as different tasks may
70  * belong to different cgroups on different hierarchies.  In the other
71  * direction, a css_set is naturally associated with multiple cgroups.
72  * This M:N relationship is represented by the following link structure
73  * which exists for each association and allows traversing the associations
74  * from both sides.
75  */
76 struct cgrp_cset_link {
77 	/* the cgroup and css_set this link associates */
78 	struct cgroup		*cgrp;
79 	struct css_set		*cset;
80 
81 	/* list of cgrp_cset_links anchored at cgrp->cset_links */
82 	struct list_head	cset_link;
83 
84 	/* list of cgrp_cset_links anchored at css_set->cgrp_links */
85 	struct list_head	cgrp_link;
86 };
87 
88 /* used to track tasks and csets during migration */
89 struct cgroup_taskset {
90 	/* the src and dst cset list running through cset->mg_node */
91 	struct list_head	src_csets;
92 	struct list_head	dst_csets;
93 
94 	/* the number of tasks in the set */
95 	int			nr_tasks;
96 
97 	/* the subsys currently being processed */
98 	int			ssid;
99 
100 	/*
101 	 * Fields for cgroup_taskset_*() iteration.
102 	 *
103 	 * Before migration is committed, the target migration tasks are on
104 	 * ->mg_tasks of the csets on ->src_csets.  After, on ->mg_tasks of
105 	 * the csets on ->dst_csets.  ->csets point to either ->src_csets
106 	 * or ->dst_csets depending on whether migration is committed.
107 	 *
108 	 * ->cur_csets and ->cur_task point to the current task position
109 	 * during iteration.
110 	 */
111 	struct list_head	*csets;
112 	struct css_set		*cur_cset;
113 	struct task_struct	*cur_task;
114 };
115 
116 /* migration context also tracks preloading */
117 struct cgroup_mgctx {
118 	/*
119 	 * Preloaded source and destination csets.  Used to guarantee
120 	 * atomic success or failure on actual migration.
121 	 */
122 	struct list_head	preloaded_src_csets;
123 	struct list_head	preloaded_dst_csets;
124 
125 	/* tasks and csets to migrate */
126 	struct cgroup_taskset	tset;
127 
128 	/* subsystems affected by migration */
129 	u16			ss_mask;
130 };
131 
132 #define CGROUP_TASKSET_INIT(tset)						\
133 {										\
134 	.src_csets		= LIST_HEAD_INIT(tset.src_csets),		\
135 	.dst_csets		= LIST_HEAD_INIT(tset.dst_csets),		\
136 	.csets			= &tset.src_csets,				\
137 }
138 
139 #define CGROUP_MGCTX_INIT(name)							\
140 {										\
141 	LIST_HEAD_INIT(name.preloaded_src_csets),				\
142 	LIST_HEAD_INIT(name.preloaded_dst_csets),				\
143 	CGROUP_TASKSET_INIT(name.tset),						\
144 }
145 
146 #define DEFINE_CGROUP_MGCTX(name)						\
147 	struct cgroup_mgctx name = CGROUP_MGCTX_INIT(name)
148 
149 extern struct mutex cgroup_mutex;
150 extern spinlock_t css_set_lock;
151 extern struct cgroup_subsys *cgroup_subsys[];
152 extern struct list_head cgroup_roots;
153 extern struct file_system_type cgroup_fs_type;
154 
155 /* iterate across the hierarchies */
156 #define for_each_root(root)						\
157 	list_for_each_entry((root), &cgroup_roots, root_list)
158 
159 /**
160  * for_each_subsys - iterate all enabled cgroup subsystems
161  * @ss: the iteration cursor
162  * @ssid: the index of @ss, CGROUP_SUBSYS_COUNT after reaching the end
163  */
164 #define for_each_subsys(ss, ssid)					\
165 	for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT &&		\
166 	     (((ss) = cgroup_subsys[ssid]) || true); (ssid)++)
167 
168 static inline bool cgroup_is_dead(const struct cgroup *cgrp)
169 {
170 	return !(cgrp->self.flags & CSS_ONLINE);
171 }
172 
173 static inline bool notify_on_release(const struct cgroup *cgrp)
174 {
175 	return test_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
176 }
177 
178 void put_css_set_locked(struct css_set *cset);
179 
180 static inline void put_css_set(struct css_set *cset)
181 {
182 	unsigned long flags;
183 
184 	/*
185 	 * Ensure that the refcount doesn't hit zero while any readers
186 	 * can see it. Similar to atomic_dec_and_lock(), but for an
187 	 * rwlock
188 	 */
189 	if (refcount_dec_not_one(&cset->refcount))
190 		return;
191 
192 	spin_lock_irqsave(&css_set_lock, flags);
193 	put_css_set_locked(cset);
194 	spin_unlock_irqrestore(&css_set_lock, flags);
195 }
196 
197 /*
198  * refcounted get/put for css_set objects
199  */
200 static inline void get_css_set(struct css_set *cset)
201 {
202 	refcount_inc(&cset->refcount);
203 }
204 
205 bool cgroup_ssid_enabled(int ssid);
206 bool cgroup_on_dfl(const struct cgroup *cgrp);
207 bool cgroup_is_thread_root(struct cgroup *cgrp);
208 bool cgroup_is_threaded(struct cgroup *cgrp);
209 
210 struct cgroup_root *cgroup_root_from_kf(struct kernfs_root *kf_root);
211 struct cgroup *task_cgroup_from_root(struct task_struct *task,
212 				     struct cgroup_root *root);
213 struct cgroup *cgroup_kn_lock_live(struct kernfs_node *kn, bool drain_offline);
214 void cgroup_kn_unlock(struct kernfs_node *kn);
215 int cgroup_path_ns_locked(struct cgroup *cgrp, char *buf, size_t buflen,
216 			  struct cgroup_namespace *ns);
217 
218 void cgroup_free_root(struct cgroup_root *root);
219 void init_cgroup_root(struct cgroup_fs_context *ctx);
220 int cgroup_setup_root(struct cgroup_root *root, u16 ss_mask);
221 int rebind_subsystems(struct cgroup_root *dst_root, u16 ss_mask);
222 int cgroup_do_get_tree(struct fs_context *fc);
223 
224 int cgroup_migrate_vet_dst(struct cgroup *dst_cgrp);
225 void cgroup_migrate_finish(struct cgroup_mgctx *mgctx);
226 void cgroup_migrate_add_src(struct css_set *src_cset, struct cgroup *dst_cgrp,
227 			    struct cgroup_mgctx *mgctx);
228 int cgroup_migrate_prepare_dst(struct cgroup_mgctx *mgctx);
229 int cgroup_migrate(struct task_struct *leader, bool threadgroup,
230 		   struct cgroup_mgctx *mgctx);
231 
232 int cgroup_attach_task(struct cgroup *dst_cgrp, struct task_struct *leader,
233 		       bool threadgroup);
234 struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup,
235 					     bool *locked)
236 	__acquires(&cgroup_threadgroup_rwsem);
237 void cgroup_procs_write_finish(struct task_struct *task, bool locked)
238 	__releases(&cgroup_threadgroup_rwsem);
239 
240 void cgroup_lock_and_drain_offline(struct cgroup *cgrp);
241 
242 int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name, umode_t mode);
243 int cgroup_rmdir(struct kernfs_node *kn);
244 int cgroup_show_path(struct seq_file *sf, struct kernfs_node *kf_node,
245 		     struct kernfs_root *kf_root);
246 
247 int __cgroup_task_count(const struct cgroup *cgrp);
248 int cgroup_task_count(const struct cgroup *cgrp);
249 
250 /*
251  * rstat.c
252  */
253 int cgroup_rstat_init(struct cgroup *cgrp);
254 void cgroup_rstat_exit(struct cgroup *cgrp);
255 void cgroup_rstat_boot(void);
256 void cgroup_base_stat_cputime_show(struct seq_file *seq);
257 
258 /*
259  * namespace.c
260  */
261 extern const struct proc_ns_operations cgroupns_operations;
262 
263 /*
264  * cgroup-v1.c
265  */
266 extern struct cftype cgroup1_base_files[];
267 extern struct kernfs_syscall_ops cgroup1_kf_syscall_ops;
268 extern const struct fs_parameter_spec cgroup1_fs_parameters[];
269 
270 int proc_cgroupstats_show(struct seq_file *m, void *v);
271 bool cgroup1_ssid_disabled(int ssid);
272 void cgroup1_pidlist_destroy_all(struct cgroup *cgrp);
273 void cgroup1_release_agent(struct work_struct *work);
274 void cgroup1_check_for_release(struct cgroup *cgrp);
275 int cgroup1_parse_param(struct fs_context *fc, struct fs_parameter *param);
276 int cgroup1_get_tree(struct fs_context *fc);
277 int cgroup1_reconfigure(struct fs_context *ctx);
278 
279 #endif /* __CGROUP_INTERNAL_H */
280