xref: /openbmc/linux/kernel/cgroup/debug.c (revision d522bb6a)
1 /*
2  * Debug controller
3  *
4  * WARNING: This controller is for cgroup core debugging only.
5  * Its interfaces are unstable and subject to changes at any time.
6  */
7 #include <linux/ctype.h>
8 #include <linux/mm.h>
9 #include <linux/slab.h>
10 
11 #include "cgroup-internal.h"
12 
13 static struct cgroup_subsys_state *
14 debug_css_alloc(struct cgroup_subsys_state *parent_css)
15 {
16 	struct cgroup_subsys_state *css = kzalloc(sizeof(*css), GFP_KERNEL);
17 
18 	if (!css)
19 		return ERR_PTR(-ENOMEM);
20 
21 	return css;
22 }
23 
24 static void debug_css_free(struct cgroup_subsys_state *css)
25 {
26 	kfree(css);
27 }
28 
29 /*
30  * debug_taskcount_read - return the number of tasks in a cgroup.
31  * @cgrp: the cgroup in question
32  */
33 static u64 debug_taskcount_read(struct cgroup_subsys_state *css,
34 				struct cftype *cft)
35 {
36 	return cgroup_task_count(css->cgroup);
37 }
38 
39 static int current_css_set_read(struct seq_file *seq, void *v)
40 {
41 	struct kernfs_open_file *of = seq->private;
42 	struct css_set *cset;
43 	struct cgroup_subsys *ss;
44 	struct cgroup_subsys_state *css;
45 	int i, refcnt;
46 
47 	if (!cgroup_kn_lock_live(of->kn, false))
48 		return -ENODEV;
49 
50 	spin_lock_irq(&css_set_lock);
51 	rcu_read_lock();
52 	cset = rcu_dereference(current->cgroups);
53 	refcnt = refcount_read(&cset->refcount);
54 	seq_printf(seq, "css_set %pK %d", cset, refcnt);
55 	if (refcnt > cset->nr_tasks)
56 		seq_printf(seq, " +%d", refcnt - cset->nr_tasks);
57 	seq_puts(seq, "\n");
58 
59 	/*
60 	 * Print the css'es stored in the current css_set.
61 	 */
62 	for_each_subsys(ss, i) {
63 		css = cset->subsys[ss->id];
64 		if (!css)
65 			continue;
66 		seq_printf(seq, "%2d: %-4s\t- %lx[%d]\n", ss->id, ss->name,
67 			  (unsigned long)css, css->id);
68 	}
69 	rcu_read_unlock();
70 	spin_unlock_irq(&css_set_lock);
71 	cgroup_kn_unlock(of->kn);
72 	return 0;
73 }
74 
75 static u64 current_css_set_refcount_read(struct cgroup_subsys_state *css,
76 					 struct cftype *cft)
77 {
78 	u64 count;
79 
80 	rcu_read_lock();
81 	count = refcount_read(&task_css_set(current)->refcount);
82 	rcu_read_unlock();
83 	return count;
84 }
85 
86 static int current_css_set_cg_links_read(struct seq_file *seq, void *v)
87 {
88 	struct cgrp_cset_link *link;
89 	struct css_set *cset;
90 	char *name_buf;
91 
92 	name_buf = kmalloc(NAME_MAX + 1, GFP_KERNEL);
93 	if (!name_buf)
94 		return -ENOMEM;
95 
96 	spin_lock_irq(&css_set_lock);
97 	rcu_read_lock();
98 	cset = rcu_dereference(current->cgroups);
99 	list_for_each_entry(link, &cset->cgrp_links, cgrp_link) {
100 		struct cgroup *c = link->cgrp;
101 
102 		cgroup_name(c, name_buf, NAME_MAX + 1);
103 		seq_printf(seq, "Root %d group %s\n",
104 			   c->root->hierarchy_id, name_buf);
105 	}
106 	rcu_read_unlock();
107 	spin_unlock_irq(&css_set_lock);
108 	kfree(name_buf);
109 	return 0;
110 }
111 
112 #define MAX_TASKS_SHOWN_PER_CSS 25
113 static int cgroup_css_links_read(struct seq_file *seq, void *v)
114 {
115 	struct cgroup_subsys_state *css = seq_css(seq);
116 	struct cgrp_cset_link *link;
117 	int dead_cnt = 0, extra_refs = 0, threaded_csets = 0;
118 
119 	spin_lock_irq(&css_set_lock);
120 
121 	list_for_each_entry(link, &css->cgroup->cset_links, cset_link) {
122 		struct css_set *cset = link->cset;
123 		struct task_struct *task;
124 		int count = 0;
125 		int refcnt = refcount_read(&cset->refcount);
126 
127 		/*
128 		 * Print out the proc_cset and threaded_cset relationship
129 		 * and highlight difference between refcount and task_count.
130 		 */
131 		seq_printf(seq, "css_set %pK", cset);
132 		if (rcu_dereference_protected(cset->dom_cset, 1) != cset) {
133 			threaded_csets++;
134 			seq_printf(seq, "=>%pK", cset->dom_cset);
135 		}
136 		if (!list_empty(&cset->threaded_csets)) {
137 			struct css_set *tcset;
138 			int idx = 0;
139 
140 			list_for_each_entry(tcset, &cset->threaded_csets,
141 					    threaded_csets_node) {
142 				seq_puts(seq, idx ? "," : "<=");
143 				seq_printf(seq, "%pK", tcset);
144 				idx++;
145 			}
146 		} else {
147 			seq_printf(seq, " %d", refcnt);
148 			if (refcnt - cset->nr_tasks > 0) {
149 				int extra = refcnt - cset->nr_tasks;
150 
151 				seq_printf(seq, " +%d", extra);
152 				/*
153 				 * Take out the one additional reference in
154 				 * init_css_set.
155 				 */
156 				if (cset == &init_css_set)
157 					extra--;
158 				extra_refs += extra;
159 			}
160 		}
161 		seq_puts(seq, "\n");
162 
163 		list_for_each_entry(task, &cset->tasks, cg_list) {
164 			if (count++ <= MAX_TASKS_SHOWN_PER_CSS)
165 				seq_printf(seq, "  task %d\n",
166 					   task_pid_vnr(task));
167 		}
168 
169 		list_for_each_entry(task, &cset->mg_tasks, cg_list) {
170 			if (count++ <= MAX_TASKS_SHOWN_PER_CSS)
171 				seq_printf(seq, "  task %d\n",
172 					   task_pid_vnr(task));
173 		}
174 		/* show # of overflowed tasks */
175 		if (count > MAX_TASKS_SHOWN_PER_CSS)
176 			seq_printf(seq, "  ... (%d)\n",
177 				   count - MAX_TASKS_SHOWN_PER_CSS);
178 
179 		if (cset->dead) {
180 			seq_puts(seq, "    [dead]\n");
181 			dead_cnt++;
182 		}
183 
184 		WARN_ON(count != cset->nr_tasks);
185 	}
186 	spin_unlock_irq(&css_set_lock);
187 
188 	if (!dead_cnt && !extra_refs && !threaded_csets)
189 		return 0;
190 
191 	seq_puts(seq, "\n");
192 	if (threaded_csets)
193 		seq_printf(seq, "threaded css_sets = %d\n", threaded_csets);
194 	if (extra_refs)
195 		seq_printf(seq, "extra references = %d\n", extra_refs);
196 	if (dead_cnt)
197 		seq_printf(seq, "dead css_sets = %d\n", dead_cnt);
198 
199 	return 0;
200 }
201 
202 static int cgroup_subsys_states_read(struct seq_file *seq, void *v)
203 {
204 	struct kernfs_open_file *of = seq->private;
205 	struct cgroup *cgrp;
206 	struct cgroup_subsys *ss;
207 	struct cgroup_subsys_state *css;
208 	char pbuf[16];
209 	int i;
210 
211 	cgrp = cgroup_kn_lock_live(of->kn, false);
212 	if (!cgrp)
213 		return -ENODEV;
214 
215 	for_each_subsys(ss, i) {
216 		css = rcu_dereference_check(cgrp->subsys[ss->id], true);
217 		if (!css)
218 			continue;
219 
220 		pbuf[0] = '\0';
221 
222 		/* Show the parent CSS if applicable*/
223 		if (css->parent)
224 			snprintf(pbuf, sizeof(pbuf) - 1, " P=%d",
225 				 css->parent->id);
226 		seq_printf(seq, "%2d: %-4s\t- %lx[%d] %d%s\n", ss->id, ss->name,
227 			  (unsigned long)css, css->id,
228 			  atomic_read(&css->online_cnt), pbuf);
229 	}
230 
231 	cgroup_kn_unlock(of->kn);
232 	return 0;
233 }
234 
235 static void cgroup_masks_read_one(struct seq_file *seq, const char *name,
236 				  u16 mask)
237 {
238 	struct cgroup_subsys *ss;
239 	int ssid;
240 	bool first = true;
241 
242 	seq_printf(seq, "%-17s: ", name);
243 	for_each_subsys(ss, ssid) {
244 		if (!(mask & (1 << ssid)))
245 			continue;
246 		if (!first)
247 			seq_puts(seq, ", ");
248 		seq_puts(seq, ss->name);
249 		first = false;
250 	}
251 	seq_putc(seq, '\n');
252 }
253 
254 static int cgroup_masks_read(struct seq_file *seq, void *v)
255 {
256 	struct kernfs_open_file *of = seq->private;
257 	struct cgroup *cgrp;
258 
259 	cgrp = cgroup_kn_lock_live(of->kn, false);
260 	if (!cgrp)
261 		return -ENODEV;
262 
263 	cgroup_masks_read_one(seq, "subtree_control", cgrp->subtree_control);
264 	cgroup_masks_read_one(seq, "subtree_ss_mask", cgrp->subtree_ss_mask);
265 
266 	cgroup_kn_unlock(of->kn);
267 	return 0;
268 }
269 
270 static u64 releasable_read(struct cgroup_subsys_state *css, struct cftype *cft)
271 {
272 	return (!cgroup_is_populated(css->cgroup) &&
273 		!css_has_online_children(&css->cgroup->self));
274 }
275 
276 static struct cftype debug_legacy_files[] =  {
277 	{
278 		.name = "taskcount",
279 		.read_u64 = debug_taskcount_read,
280 	},
281 
282 	{
283 		.name = "current_css_set",
284 		.seq_show = current_css_set_read,
285 		.flags = CFTYPE_ONLY_ON_ROOT,
286 	},
287 
288 	{
289 		.name = "current_css_set_refcount",
290 		.read_u64 = current_css_set_refcount_read,
291 		.flags = CFTYPE_ONLY_ON_ROOT,
292 	},
293 
294 	{
295 		.name = "current_css_set_cg_links",
296 		.seq_show = current_css_set_cg_links_read,
297 		.flags = CFTYPE_ONLY_ON_ROOT,
298 	},
299 
300 	{
301 		.name = "cgroup_css_links",
302 		.seq_show = cgroup_css_links_read,
303 	},
304 
305 	{
306 		.name = "cgroup_subsys_states",
307 		.seq_show = cgroup_subsys_states_read,
308 	},
309 
310 	{
311 		.name = "cgroup_masks",
312 		.seq_show = cgroup_masks_read,
313 	},
314 
315 	{
316 		.name = "releasable",
317 		.read_u64 = releasable_read,
318 	},
319 
320 	{ }	/* terminate */
321 };
322 
323 static struct cftype debug_files[] =  {
324 	{
325 		.name = "taskcount",
326 		.read_u64 = debug_taskcount_read,
327 	},
328 
329 	{
330 		.name = "current_css_set",
331 		.seq_show = current_css_set_read,
332 		.flags = CFTYPE_ONLY_ON_ROOT,
333 	},
334 
335 	{
336 		.name = "current_css_set_refcount",
337 		.read_u64 = current_css_set_refcount_read,
338 		.flags = CFTYPE_ONLY_ON_ROOT,
339 	},
340 
341 	{
342 		.name = "current_css_set_cg_links",
343 		.seq_show = current_css_set_cg_links_read,
344 		.flags = CFTYPE_ONLY_ON_ROOT,
345 	},
346 
347 	{
348 		.name = "css_links",
349 		.seq_show = cgroup_css_links_read,
350 	},
351 
352 	{
353 		.name = "csses",
354 		.seq_show = cgroup_subsys_states_read,
355 	},
356 
357 	{
358 		.name = "masks",
359 		.seq_show = cgroup_masks_read,
360 	},
361 
362 	{ }	/* terminate */
363 };
364 
365 struct cgroup_subsys debug_cgrp_subsys = {
366 	.css_alloc	= debug_css_alloc,
367 	.css_free	= debug_css_free,
368 	.legacy_cftypes	= debug_legacy_files,
369 };
370 
371 /*
372  * On v2, debug is an implicit controller enabled by "cgroup_debug" boot
373  * parameter.
374  */
375 static int __init enable_cgroup_debug(char *str)
376 {
377 	debug_cgrp_subsys.dfl_cftypes = debug_files;
378 	debug_cgrp_subsys.implicit_on_dfl = true;
379 	debug_cgrp_subsys.threaded = true;
380 	return 1;
381 }
382 __setup("cgroup_debug", enable_cgroup_debug);
383