1 /* SPDX-License-Identifier: GPL-2.0 */
2 
3 #include <linux/limits.h>
4 #include <sys/types.h>
5 #include <unistd.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <signal.h>
9 #include <string.h>
10 #include <pthread.h>
11 
12 #include "../kselftest.h"
13 #include "cgroup_util.h"
14 
15 /*
16  * A(0) - B(0) - C(1)
17  *        \ D(0)
18  *
19  * A, B and C's "populated" fields would be 1 while D's 0.
20  * test that after the one process in C is moved to root,
21  * A,B and C's "populated" fields would flip to "0" and file
22  * modified events will be generated on the
23  * "cgroup.events" files of both cgroups.
24  */
25 static int test_cgcore_populated(const char *root)
26 {
27 	int ret = KSFT_FAIL;
28 	char *cg_test_a = NULL, *cg_test_b = NULL;
29 	char *cg_test_c = NULL, *cg_test_d = NULL;
30 
31 	cg_test_a = cg_name(root, "cg_test_a");
32 	cg_test_b = cg_name(root, "cg_test_a/cg_test_b");
33 	cg_test_c = cg_name(root, "cg_test_a/cg_test_b/cg_test_c");
34 	cg_test_d = cg_name(root, "cg_test_a/cg_test_b/cg_test_d");
35 
36 	if (!cg_test_a || !cg_test_b || !cg_test_c || !cg_test_d)
37 		goto cleanup;
38 
39 	if (cg_create(cg_test_a))
40 		goto cleanup;
41 
42 	if (cg_create(cg_test_b))
43 		goto cleanup;
44 
45 	if (cg_create(cg_test_c))
46 		goto cleanup;
47 
48 	if (cg_create(cg_test_d))
49 		goto cleanup;
50 
51 	if (cg_enter_current(cg_test_c))
52 		goto cleanup;
53 
54 	if (cg_read_strcmp(cg_test_a, "cgroup.events", "populated 1\n"))
55 		goto cleanup;
56 
57 	if (cg_read_strcmp(cg_test_b, "cgroup.events", "populated 1\n"))
58 		goto cleanup;
59 
60 	if (cg_read_strcmp(cg_test_c, "cgroup.events", "populated 1\n"))
61 		goto cleanup;
62 
63 	if (cg_read_strcmp(cg_test_d, "cgroup.events", "populated 0\n"))
64 		goto cleanup;
65 
66 	if (cg_enter_current(root))
67 		goto cleanup;
68 
69 	if (cg_read_strcmp(cg_test_a, "cgroup.events", "populated 0\n"))
70 		goto cleanup;
71 
72 	if (cg_read_strcmp(cg_test_b, "cgroup.events", "populated 0\n"))
73 		goto cleanup;
74 
75 	if (cg_read_strcmp(cg_test_c, "cgroup.events", "populated 0\n"))
76 		goto cleanup;
77 
78 	if (cg_read_strcmp(cg_test_d, "cgroup.events", "populated 0\n"))
79 		goto cleanup;
80 
81 	ret = KSFT_PASS;
82 
83 cleanup:
84 	if (cg_test_d)
85 		cg_destroy(cg_test_d);
86 	if (cg_test_c)
87 		cg_destroy(cg_test_c);
88 	if (cg_test_b)
89 		cg_destroy(cg_test_b);
90 	if (cg_test_a)
91 		cg_destroy(cg_test_a);
92 	free(cg_test_d);
93 	free(cg_test_c);
94 	free(cg_test_b);
95 	free(cg_test_a);
96 	return ret;
97 }
98 
99 /*
100  * A (domain threaded) - B (threaded) - C (domain)
101  *
102  * test that C can't be used until it is turned into a
103  * threaded cgroup.  "cgroup.type" file will report "domain (invalid)" in
104  * these cases. Operations which fail due to invalid topology use
105  * EOPNOTSUPP as the errno.
106  */
107 static int test_cgcore_invalid_domain(const char *root)
108 {
109 	int ret = KSFT_FAIL;
110 	char *grandparent = NULL, *parent = NULL, *child = NULL;
111 
112 	grandparent = cg_name(root, "cg_test_grandparent");
113 	parent = cg_name(root, "cg_test_grandparent/cg_test_parent");
114 	child = cg_name(root, "cg_test_grandparent/cg_test_parent/cg_test_child");
115 	if (!parent || !child || !grandparent)
116 		goto cleanup;
117 
118 	if (cg_create(grandparent))
119 		goto cleanup;
120 
121 	if (cg_create(parent))
122 		goto cleanup;
123 
124 	if (cg_create(child))
125 		goto cleanup;
126 
127 	if (cg_write(parent, "cgroup.type", "threaded"))
128 		goto cleanup;
129 
130 	if (cg_read_strcmp(child, "cgroup.type", "domain invalid\n"))
131 		goto cleanup;
132 
133 	if (!cg_enter_current(child))
134 		goto cleanup;
135 
136 	if (errno != EOPNOTSUPP)
137 		goto cleanup;
138 
139 	ret = KSFT_PASS;
140 
141 cleanup:
142 	cg_enter_current(root);
143 	if (child)
144 		cg_destroy(child);
145 	if (parent)
146 		cg_destroy(parent);
147 	if (grandparent)
148 		cg_destroy(grandparent);
149 	free(child);
150 	free(parent);
151 	free(grandparent);
152 	return ret;
153 }
154 
155 /*
156  * Test that when a child becomes threaded
157  * the parent type becomes domain threaded.
158  */
159 static int test_cgcore_parent_becomes_threaded(const char *root)
160 {
161 	int ret = KSFT_FAIL;
162 	char *parent = NULL, *child = NULL;
163 
164 	parent = cg_name(root, "cg_test_parent");
165 	child = cg_name(root, "cg_test_parent/cg_test_child");
166 	if (!parent || !child)
167 		goto cleanup;
168 
169 	if (cg_create(parent))
170 		goto cleanup;
171 
172 	if (cg_create(child))
173 		goto cleanup;
174 
175 	if (cg_write(child, "cgroup.type", "threaded"))
176 		goto cleanup;
177 
178 	if (cg_read_strcmp(parent, "cgroup.type", "domain threaded\n"))
179 		goto cleanup;
180 
181 	ret = KSFT_PASS;
182 
183 cleanup:
184 	if (child)
185 		cg_destroy(child);
186 	if (parent)
187 		cg_destroy(parent);
188 	free(child);
189 	free(parent);
190 	return ret;
191 
192 }
193 
194 /*
195  * Test that there's no internal process constrain on threaded cgroups.
196  * You can add threads/processes on a parent with a controller enabled.
197  */
198 static int test_cgcore_no_internal_process_constraint_on_threads(const char *root)
199 {
200 	int ret = KSFT_FAIL;
201 	char *parent = NULL, *child = NULL;
202 
203 	if (cg_read_strstr(root, "cgroup.controllers", "cpu") ||
204 	    cg_write(root, "cgroup.subtree_control", "+cpu")) {
205 		ret = KSFT_SKIP;
206 		goto cleanup;
207 	}
208 
209 	parent = cg_name(root, "cg_test_parent");
210 	child = cg_name(root, "cg_test_parent/cg_test_child");
211 	if (!parent || !child)
212 		goto cleanup;
213 
214 	if (cg_create(parent))
215 		goto cleanup;
216 
217 	if (cg_create(child))
218 		goto cleanup;
219 
220 	if (cg_write(parent, "cgroup.type", "threaded"))
221 		goto cleanup;
222 
223 	if (cg_write(child, "cgroup.type", "threaded"))
224 		goto cleanup;
225 
226 	if (cg_write(parent, "cgroup.subtree_control", "+cpu"))
227 		goto cleanup;
228 
229 	if (cg_enter_current(parent))
230 		goto cleanup;
231 
232 	ret = KSFT_PASS;
233 
234 cleanup:
235 	cg_enter_current(root);
236 	cg_enter_current(root);
237 	if (child)
238 		cg_destroy(child);
239 	if (parent)
240 		cg_destroy(parent);
241 	free(child);
242 	free(parent);
243 	return ret;
244 }
245 
246 /*
247  * Test that you can't enable a controller on a child if it's not enabled
248  * on the parent.
249  */
250 static int test_cgcore_top_down_constraint_enable(const char *root)
251 {
252 	int ret = KSFT_FAIL;
253 	char *parent = NULL, *child = NULL;
254 
255 	parent = cg_name(root, "cg_test_parent");
256 	child = cg_name(root, "cg_test_parent/cg_test_child");
257 	if (!parent || !child)
258 		goto cleanup;
259 
260 	if (cg_create(parent))
261 		goto cleanup;
262 
263 	if (cg_create(child))
264 		goto cleanup;
265 
266 	if (!cg_write(child, "cgroup.subtree_control", "+memory"))
267 		goto cleanup;
268 
269 	ret = KSFT_PASS;
270 
271 cleanup:
272 	if (child)
273 		cg_destroy(child);
274 	if (parent)
275 		cg_destroy(parent);
276 	free(child);
277 	free(parent);
278 	return ret;
279 }
280 
281 /*
282  * Test that you can't disable a controller on a parent
283  * if it's enabled in a child.
284  */
285 static int test_cgcore_top_down_constraint_disable(const char *root)
286 {
287 	int ret = KSFT_FAIL;
288 	char *parent = NULL, *child = NULL;
289 
290 	parent = cg_name(root, "cg_test_parent");
291 	child = cg_name(root, "cg_test_parent/cg_test_child");
292 	if (!parent || !child)
293 		goto cleanup;
294 
295 	if (cg_create(parent))
296 		goto cleanup;
297 
298 	if (cg_create(child))
299 		goto cleanup;
300 
301 	if (cg_write(parent, "cgroup.subtree_control", "+memory"))
302 		goto cleanup;
303 
304 	if (cg_write(child, "cgroup.subtree_control", "+memory"))
305 		goto cleanup;
306 
307 	if (!cg_write(parent, "cgroup.subtree_control", "-memory"))
308 		goto cleanup;
309 
310 	ret = KSFT_PASS;
311 
312 cleanup:
313 	if (child)
314 		cg_destroy(child);
315 	if (parent)
316 		cg_destroy(parent);
317 	free(child);
318 	free(parent);
319 	return ret;
320 }
321 
322 /*
323  * Test internal process constraint.
324  * You can't add a pid to a domain parent if a controller is enabled.
325  */
326 static int test_cgcore_internal_process_constraint(const char *root)
327 {
328 	int ret = KSFT_FAIL;
329 	char *parent = NULL, *child = NULL;
330 
331 	parent = cg_name(root, "cg_test_parent");
332 	child = cg_name(root, "cg_test_parent/cg_test_child");
333 	if (!parent || !child)
334 		goto cleanup;
335 
336 	if (cg_create(parent))
337 		goto cleanup;
338 
339 	if (cg_create(child))
340 		goto cleanup;
341 
342 	if (cg_write(parent, "cgroup.subtree_control", "+memory"))
343 		goto cleanup;
344 
345 	if (!cg_enter_current(parent))
346 		goto cleanup;
347 
348 	ret = KSFT_PASS;
349 
350 cleanup:
351 	if (child)
352 		cg_destroy(child);
353 	if (parent)
354 		cg_destroy(parent);
355 	free(child);
356 	free(parent);
357 	return ret;
358 }
359 
360 static void *dummy_thread_fn(void *arg)
361 {
362 	return (void *)(size_t)pause();
363 }
364 
365 /*
366  * Test threadgroup migration.
367  * All threads of a process are migrated together.
368  */
369 static int test_cgcore_proc_migration(const char *root)
370 {
371 	int ret = KSFT_FAIL;
372 	int t, c_threads, n_threads = 13;
373 	char *src = NULL, *dst = NULL;
374 	pthread_t threads[n_threads];
375 
376 	src = cg_name(root, "cg_src");
377 	dst = cg_name(root, "cg_dst");
378 	if (!src || !dst)
379 		goto cleanup;
380 
381 	if (cg_create(src))
382 		goto cleanup;
383 	if (cg_create(dst))
384 		goto cleanup;
385 
386 	if (cg_enter_current(src))
387 		goto cleanup;
388 
389 	for (c_threads = 0; c_threads < n_threads; ++c_threads) {
390 		if (pthread_create(&threads[c_threads], NULL, dummy_thread_fn, NULL))
391 			goto cleanup;
392 	}
393 
394 	cg_enter_current(dst);
395 	if (cg_read_lc(dst, "cgroup.threads") != n_threads + 1)
396 		goto cleanup;
397 
398 	ret = KSFT_PASS;
399 
400 cleanup:
401 	for (t = 0; t < c_threads; ++t) {
402 		pthread_cancel(threads[t]);
403 	}
404 
405 	for (t = 0; t < c_threads; ++t) {
406 		pthread_join(threads[t], NULL);
407 	}
408 
409 	cg_enter_current(root);
410 
411 	if (dst)
412 		cg_destroy(dst);
413 	if (src)
414 		cg_destroy(src);
415 	free(dst);
416 	free(src);
417 	return ret;
418 }
419 
420 static void *migrating_thread_fn(void *arg)
421 {
422 	int g, i, n_iterations = 1000;
423 	char **grps = arg;
424 	char lines[3][PATH_MAX];
425 
426 	for (g = 1; g < 3; ++g)
427 		snprintf(lines[g], sizeof(lines[g]), "0::%s", grps[g] + strlen(grps[0]));
428 
429 	for (i = 0; i < n_iterations; ++i) {
430 		cg_enter_current_thread(grps[(i % 2) + 1]);
431 
432 		if (proc_read_strstr(0, 1, "cgroup", lines[(i % 2) + 1]))
433 			return (void *)-1;
434 	}
435 	return NULL;
436 }
437 
438 /*
439  * Test single thread migration.
440  * Threaded cgroups allow successful migration of a thread.
441  */
442 static int test_cgcore_thread_migration(const char *root)
443 {
444 	int ret = KSFT_FAIL;
445 	char *dom = NULL;
446 	char line[PATH_MAX];
447 	char *grps[3] = { (char *)root, NULL, NULL };
448 	pthread_t thr;
449 	void *retval;
450 
451 	dom = cg_name(root, "cg_dom");
452 	grps[1] = cg_name(root, "cg_dom/cg_src");
453 	grps[2] = cg_name(root, "cg_dom/cg_dst");
454 	if (!grps[1] || !grps[2] || !dom)
455 		goto cleanup;
456 
457 	if (cg_create(dom))
458 		goto cleanup;
459 	if (cg_create(grps[1]))
460 		goto cleanup;
461 	if (cg_create(grps[2]))
462 		goto cleanup;
463 
464 	if (cg_write(grps[1], "cgroup.type", "threaded"))
465 		goto cleanup;
466 	if (cg_write(grps[2], "cgroup.type", "threaded"))
467 		goto cleanup;
468 
469 	if (cg_enter_current(grps[1]))
470 		goto cleanup;
471 
472 	if (pthread_create(&thr, NULL, migrating_thread_fn, grps))
473 		goto cleanup;
474 
475 	if (pthread_join(thr, &retval))
476 		goto cleanup;
477 
478 	if (retval)
479 		goto cleanup;
480 
481 	snprintf(line, sizeof(line), "0::%s", grps[1] + strlen(grps[0]));
482 	if (proc_read_strstr(0, 1, "cgroup", line))
483 		goto cleanup;
484 
485 	ret = KSFT_PASS;
486 
487 cleanup:
488 	cg_enter_current(root);
489 	if (grps[2])
490 		cg_destroy(grps[2]);
491 	if (grps[1])
492 		cg_destroy(grps[1]);
493 	if (dom)
494 		cg_destroy(dom);
495 	free(grps[2]);
496 	free(grps[1]);
497 	free(dom);
498 	return ret;
499 }
500 
501 #define T(x) { x, #x }
502 struct corecg_test {
503 	int (*fn)(const char *root);
504 	const char *name;
505 } tests[] = {
506 	T(test_cgcore_internal_process_constraint),
507 	T(test_cgcore_top_down_constraint_enable),
508 	T(test_cgcore_top_down_constraint_disable),
509 	T(test_cgcore_no_internal_process_constraint_on_threads),
510 	T(test_cgcore_parent_becomes_threaded),
511 	T(test_cgcore_invalid_domain),
512 	T(test_cgcore_populated),
513 	T(test_cgcore_proc_migration),
514 	T(test_cgcore_thread_migration),
515 };
516 #undef T
517 
518 int main(int argc, char *argv[])
519 {
520 	char root[PATH_MAX];
521 	int i, ret = EXIT_SUCCESS;
522 
523 	if (cg_find_unified_root(root, sizeof(root)))
524 		ksft_exit_skip("cgroup v2 isn't mounted\n");
525 
526 	if (cg_read_strstr(root, "cgroup.subtree_control", "memory"))
527 		if (cg_write(root, "cgroup.subtree_control", "+memory"))
528 			ksft_exit_skip("Failed to set memory controller\n");
529 
530 	for (i = 0; i < ARRAY_SIZE(tests); i++) {
531 		switch (tests[i].fn(root)) {
532 		case KSFT_PASS:
533 			ksft_test_result_pass("%s\n", tests[i].name);
534 			break;
535 		case KSFT_SKIP:
536 			ksft_test_result_skip("%s\n", tests[i].name);
537 			break;
538 		default:
539 			ret = EXIT_FAILURE;
540 			ksft_test_result_fail("%s\n", tests[i].name);
541 			break;
542 		}
543 	}
544 
545 	return ret;
546 }
547