1 /* SPDX-License-Identifier: GPL-2.0 */
2
3 #define _GNU_SOURCE
4 #include <linux/limits.h>
5 #include <linux/sched.h>
6 #include <sys/types.h>
7 #include <sys/mman.h>
8 #include <sys/wait.h>
9 #include <unistd.h>
10 #include <fcntl.h>
11 #include <sched.h>
12 #include <stdio.h>
13 #include <errno.h>
14 #include <signal.h>
15 #include <string.h>
16 #include <pthread.h>
17
18 #include "../kselftest.h"
19 #include "cgroup_util.h"
20
21 static bool nsdelegate;
22
touch_anon(char * buf,size_t size)23 static int touch_anon(char *buf, size_t size)
24 {
25 int fd;
26 char *pos = buf;
27
28 fd = open("/dev/urandom", O_RDONLY);
29 if (fd < 0)
30 return -1;
31
32 while (size > 0) {
33 ssize_t ret = read(fd, pos, size);
34
35 if (ret < 0) {
36 if (errno != EINTR) {
37 close(fd);
38 return -1;
39 }
40 } else {
41 pos += ret;
42 size -= ret;
43 }
44 }
45 close(fd);
46
47 return 0;
48 }
49
alloc_and_touch_anon_noexit(const char * cgroup,void * arg)50 static int alloc_and_touch_anon_noexit(const char *cgroup, void *arg)
51 {
52 int ppid = getppid();
53 size_t size = (size_t)arg;
54 void *buf;
55
56 buf = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON,
57 0, 0);
58 if (buf == MAP_FAILED)
59 return -1;
60
61 if (touch_anon((char *)buf, size)) {
62 munmap(buf, size);
63 return -1;
64 }
65
66 while (getppid() == ppid)
67 sleep(1);
68
69 munmap(buf, size);
70 return 0;
71 }
72
73 /*
74 * Create a child process that allocates and touches 100MB, then waits to be
75 * killed. Wait until the child is attached to the cgroup, kill all processes
76 * in that cgroup and wait until "cgroup.procs" is empty. At this point try to
77 * destroy the empty cgroup. The test helps detect race conditions between
78 * dying processes leaving the cgroup and cgroup destruction path.
79 */
test_cgcore_destroy(const char * root)80 static int test_cgcore_destroy(const char *root)
81 {
82 int ret = KSFT_FAIL;
83 char *cg_test = NULL;
84 int child_pid;
85 char buf[PAGE_SIZE];
86
87 cg_test = cg_name(root, "cg_test");
88
89 if (!cg_test)
90 goto cleanup;
91
92 for (int i = 0; i < 10; i++) {
93 if (cg_create(cg_test))
94 goto cleanup;
95
96 child_pid = cg_run_nowait(cg_test, alloc_and_touch_anon_noexit,
97 (void *) MB(100));
98
99 if (child_pid < 0)
100 goto cleanup;
101
102 /* wait for the child to enter cgroup */
103 if (cg_wait_for_proc_count(cg_test, 1))
104 goto cleanup;
105
106 if (cg_killall(cg_test))
107 goto cleanup;
108
109 /* wait for cgroup to be empty */
110 while (1) {
111 if (cg_read(cg_test, "cgroup.procs", buf, sizeof(buf)))
112 goto cleanup;
113 if (buf[0] == '\0')
114 break;
115 usleep(1000);
116 }
117
118 if (rmdir(cg_test))
119 goto cleanup;
120
121 if (waitpid(child_pid, NULL, 0) < 0)
122 goto cleanup;
123 }
124 ret = KSFT_PASS;
125 cleanup:
126 if (cg_test)
127 cg_destroy(cg_test);
128 free(cg_test);
129 return ret;
130 }
131
132 /*
133 * A(0) - B(0) - C(1)
134 * \ D(0)
135 *
136 * A, B and C's "populated" fields would be 1 while D's 0.
137 * test that after the one process in C is moved to root,
138 * A,B and C's "populated" fields would flip to "0" and file
139 * modified events will be generated on the
140 * "cgroup.events" files of both cgroups.
141 */
test_cgcore_populated(const char * root)142 static int test_cgcore_populated(const char *root)
143 {
144 int ret = KSFT_FAIL;
145 int err;
146 char *cg_test_a = NULL, *cg_test_b = NULL;
147 char *cg_test_c = NULL, *cg_test_d = NULL;
148 int cgroup_fd = -EBADF;
149 pid_t pid;
150
151 cg_test_a = cg_name(root, "cg_test_a");
152 cg_test_b = cg_name(root, "cg_test_a/cg_test_b");
153 cg_test_c = cg_name(root, "cg_test_a/cg_test_b/cg_test_c");
154 cg_test_d = cg_name(root, "cg_test_a/cg_test_b/cg_test_d");
155
156 if (!cg_test_a || !cg_test_b || !cg_test_c || !cg_test_d)
157 goto cleanup;
158
159 if (cg_create(cg_test_a))
160 goto cleanup;
161
162 if (cg_create(cg_test_b))
163 goto cleanup;
164
165 if (cg_create(cg_test_c))
166 goto cleanup;
167
168 if (cg_create(cg_test_d))
169 goto cleanup;
170
171 if (cg_enter_current(cg_test_c))
172 goto cleanup;
173
174 if (cg_read_strcmp(cg_test_a, "cgroup.events", "populated 1\n"))
175 goto cleanup;
176
177 if (cg_read_strcmp(cg_test_b, "cgroup.events", "populated 1\n"))
178 goto cleanup;
179
180 if (cg_read_strcmp(cg_test_c, "cgroup.events", "populated 1\n"))
181 goto cleanup;
182
183 if (cg_read_strcmp(cg_test_d, "cgroup.events", "populated 0\n"))
184 goto cleanup;
185
186 if (cg_enter_current(root))
187 goto cleanup;
188
189 if (cg_read_strcmp(cg_test_a, "cgroup.events", "populated 0\n"))
190 goto cleanup;
191
192 if (cg_read_strcmp(cg_test_b, "cgroup.events", "populated 0\n"))
193 goto cleanup;
194
195 if (cg_read_strcmp(cg_test_c, "cgroup.events", "populated 0\n"))
196 goto cleanup;
197
198 if (cg_read_strcmp(cg_test_d, "cgroup.events", "populated 0\n"))
199 goto cleanup;
200
201 /* Test that we can directly clone into a new cgroup. */
202 cgroup_fd = dirfd_open_opath(cg_test_d);
203 if (cgroup_fd < 0)
204 goto cleanup;
205
206 pid = clone_into_cgroup(cgroup_fd);
207 if (pid < 0) {
208 if (errno == ENOSYS)
209 goto cleanup_pass;
210 goto cleanup;
211 }
212
213 if (pid == 0) {
214 if (raise(SIGSTOP))
215 exit(EXIT_FAILURE);
216 exit(EXIT_SUCCESS);
217 }
218
219 err = cg_read_strcmp(cg_test_d, "cgroup.events", "populated 1\n");
220
221 (void)clone_reap(pid, WSTOPPED);
222 (void)kill(pid, SIGCONT);
223 (void)clone_reap(pid, WEXITED);
224
225 if (err)
226 goto cleanup;
227
228 if (cg_read_strcmp(cg_test_d, "cgroup.events", "populated 0\n"))
229 goto cleanup;
230
231 /* Remove cgroup. */
232 if (cg_test_d) {
233 cg_destroy(cg_test_d);
234 free(cg_test_d);
235 cg_test_d = NULL;
236 }
237
238 pid = clone_into_cgroup(cgroup_fd);
239 if (pid < 0)
240 goto cleanup_pass;
241 if (pid == 0)
242 exit(EXIT_SUCCESS);
243 (void)clone_reap(pid, WEXITED);
244 goto cleanup;
245
246 cleanup_pass:
247 ret = KSFT_PASS;
248
249 cleanup:
250 if (cg_test_d)
251 cg_destroy(cg_test_d);
252 if (cg_test_c)
253 cg_destroy(cg_test_c);
254 if (cg_test_b)
255 cg_destroy(cg_test_b);
256 if (cg_test_a)
257 cg_destroy(cg_test_a);
258 free(cg_test_d);
259 free(cg_test_c);
260 free(cg_test_b);
261 free(cg_test_a);
262 if (cgroup_fd >= 0)
263 close(cgroup_fd);
264 return ret;
265 }
266
267 /*
268 * A (domain threaded) - B (threaded) - C (domain)
269 *
270 * test that C can't be used until it is turned into a
271 * threaded cgroup. "cgroup.type" file will report "domain (invalid)" in
272 * these cases. Operations which fail due to invalid topology use
273 * EOPNOTSUPP as the errno.
274 */
test_cgcore_invalid_domain(const char * root)275 static int test_cgcore_invalid_domain(const char *root)
276 {
277 int ret = KSFT_FAIL;
278 char *grandparent = NULL, *parent = NULL, *child = NULL;
279
280 grandparent = cg_name(root, "cg_test_grandparent");
281 parent = cg_name(root, "cg_test_grandparent/cg_test_parent");
282 child = cg_name(root, "cg_test_grandparent/cg_test_parent/cg_test_child");
283 if (!parent || !child || !grandparent)
284 goto cleanup;
285
286 if (cg_create(grandparent))
287 goto cleanup;
288
289 if (cg_create(parent))
290 goto cleanup;
291
292 if (cg_create(child))
293 goto cleanup;
294
295 if (cg_write(parent, "cgroup.type", "threaded"))
296 goto cleanup;
297
298 if (cg_read_strcmp(child, "cgroup.type", "domain invalid\n"))
299 goto cleanup;
300
301 if (!cg_enter_current(child))
302 goto cleanup;
303
304 if (errno != EOPNOTSUPP)
305 goto cleanup;
306
307 if (!clone_into_cgroup_run_wait(child))
308 goto cleanup;
309
310 if (errno == ENOSYS)
311 goto cleanup_pass;
312
313 if (errno != EOPNOTSUPP)
314 goto cleanup;
315
316 cleanup_pass:
317 ret = KSFT_PASS;
318
319 cleanup:
320 cg_enter_current(root);
321 if (child)
322 cg_destroy(child);
323 if (parent)
324 cg_destroy(parent);
325 if (grandparent)
326 cg_destroy(grandparent);
327 free(child);
328 free(parent);
329 free(grandparent);
330 return ret;
331 }
332
333 /*
334 * Test that when a child becomes threaded
335 * the parent type becomes domain threaded.
336 */
test_cgcore_parent_becomes_threaded(const char * root)337 static int test_cgcore_parent_becomes_threaded(const char *root)
338 {
339 int ret = KSFT_FAIL;
340 char *parent = NULL, *child = NULL;
341
342 parent = cg_name(root, "cg_test_parent");
343 child = cg_name(root, "cg_test_parent/cg_test_child");
344 if (!parent || !child)
345 goto cleanup;
346
347 if (cg_create(parent))
348 goto cleanup;
349
350 if (cg_create(child))
351 goto cleanup;
352
353 if (cg_write(child, "cgroup.type", "threaded"))
354 goto cleanup;
355
356 if (cg_read_strcmp(parent, "cgroup.type", "domain threaded\n"))
357 goto cleanup;
358
359 ret = KSFT_PASS;
360
361 cleanup:
362 if (child)
363 cg_destroy(child);
364 if (parent)
365 cg_destroy(parent);
366 free(child);
367 free(parent);
368 return ret;
369
370 }
371
372 /*
373 * Test that there's no internal process constrain on threaded cgroups.
374 * You can add threads/processes on a parent with a controller enabled.
375 */
test_cgcore_no_internal_process_constraint_on_threads(const char * root)376 static int test_cgcore_no_internal_process_constraint_on_threads(const char *root)
377 {
378 int ret = KSFT_FAIL;
379 char *parent = NULL, *child = NULL;
380
381 if (cg_read_strstr(root, "cgroup.controllers", "cpu") ||
382 cg_write(root, "cgroup.subtree_control", "+cpu")) {
383 ret = KSFT_SKIP;
384 goto cleanup;
385 }
386
387 parent = cg_name(root, "cg_test_parent");
388 child = cg_name(root, "cg_test_parent/cg_test_child");
389 if (!parent || !child)
390 goto cleanup;
391
392 if (cg_create(parent))
393 goto cleanup;
394
395 if (cg_create(child))
396 goto cleanup;
397
398 if (cg_write(parent, "cgroup.type", "threaded"))
399 goto cleanup;
400
401 if (cg_write(child, "cgroup.type", "threaded"))
402 goto cleanup;
403
404 if (cg_write(parent, "cgroup.subtree_control", "+cpu"))
405 goto cleanup;
406
407 if (cg_enter_current(parent))
408 goto cleanup;
409
410 ret = KSFT_PASS;
411
412 cleanup:
413 cg_enter_current(root);
414 cg_enter_current(root);
415 if (child)
416 cg_destroy(child);
417 if (parent)
418 cg_destroy(parent);
419 free(child);
420 free(parent);
421 return ret;
422 }
423
424 /*
425 * Test that you can't enable a controller on a child if it's not enabled
426 * on the parent.
427 */
test_cgcore_top_down_constraint_enable(const char * root)428 static int test_cgcore_top_down_constraint_enable(const char *root)
429 {
430 int ret = KSFT_FAIL;
431 char *parent = NULL, *child = NULL;
432
433 parent = cg_name(root, "cg_test_parent");
434 child = cg_name(root, "cg_test_parent/cg_test_child");
435 if (!parent || !child)
436 goto cleanup;
437
438 if (cg_create(parent))
439 goto cleanup;
440
441 if (cg_create(child))
442 goto cleanup;
443
444 if (!cg_write(child, "cgroup.subtree_control", "+memory"))
445 goto cleanup;
446
447 ret = KSFT_PASS;
448
449 cleanup:
450 if (child)
451 cg_destroy(child);
452 if (parent)
453 cg_destroy(parent);
454 free(child);
455 free(parent);
456 return ret;
457 }
458
459 /*
460 * Test that you can't disable a controller on a parent
461 * if it's enabled in a child.
462 */
test_cgcore_top_down_constraint_disable(const char * root)463 static int test_cgcore_top_down_constraint_disable(const char *root)
464 {
465 int ret = KSFT_FAIL;
466 char *parent = NULL, *child = NULL;
467
468 parent = cg_name(root, "cg_test_parent");
469 child = cg_name(root, "cg_test_parent/cg_test_child");
470 if (!parent || !child)
471 goto cleanup;
472
473 if (cg_create(parent))
474 goto cleanup;
475
476 if (cg_create(child))
477 goto cleanup;
478
479 if (cg_write(parent, "cgroup.subtree_control", "+memory"))
480 goto cleanup;
481
482 if (cg_write(child, "cgroup.subtree_control", "+memory"))
483 goto cleanup;
484
485 if (!cg_write(parent, "cgroup.subtree_control", "-memory"))
486 goto cleanup;
487
488 ret = KSFT_PASS;
489
490 cleanup:
491 if (child)
492 cg_destroy(child);
493 if (parent)
494 cg_destroy(parent);
495 free(child);
496 free(parent);
497 return ret;
498 }
499
500 /*
501 * Test internal process constraint.
502 * You can't add a pid to a domain parent if a controller is enabled.
503 */
test_cgcore_internal_process_constraint(const char * root)504 static int test_cgcore_internal_process_constraint(const char *root)
505 {
506 int ret = KSFT_FAIL;
507 char *parent = NULL, *child = NULL;
508
509 parent = cg_name(root, "cg_test_parent");
510 child = cg_name(root, "cg_test_parent/cg_test_child");
511 if (!parent || !child)
512 goto cleanup;
513
514 if (cg_create(parent))
515 goto cleanup;
516
517 if (cg_create(child))
518 goto cleanup;
519
520 if (cg_write(parent, "cgroup.subtree_control", "+memory"))
521 goto cleanup;
522
523 if (!cg_enter_current(parent))
524 goto cleanup;
525
526 if (!clone_into_cgroup_run_wait(parent))
527 goto cleanup;
528
529 ret = KSFT_PASS;
530
531 cleanup:
532 if (child)
533 cg_destroy(child);
534 if (parent)
535 cg_destroy(parent);
536 free(child);
537 free(parent);
538 return ret;
539 }
540
dummy_thread_fn(void * arg)541 static void *dummy_thread_fn(void *arg)
542 {
543 return (void *)(size_t)pause();
544 }
545
546 /*
547 * Test threadgroup migration.
548 * All threads of a process are migrated together.
549 */
test_cgcore_proc_migration(const char * root)550 static int test_cgcore_proc_migration(const char *root)
551 {
552 int ret = KSFT_FAIL;
553 int t, c_threads = 0, n_threads = 13;
554 char *src = NULL, *dst = NULL;
555 pthread_t threads[n_threads];
556
557 src = cg_name(root, "cg_src");
558 dst = cg_name(root, "cg_dst");
559 if (!src || !dst)
560 goto cleanup;
561
562 if (cg_create(src))
563 goto cleanup;
564 if (cg_create(dst))
565 goto cleanup;
566
567 if (cg_enter_current(src))
568 goto cleanup;
569
570 for (c_threads = 0; c_threads < n_threads; ++c_threads) {
571 if (pthread_create(&threads[c_threads], NULL, dummy_thread_fn, NULL))
572 goto cleanup;
573 }
574
575 cg_enter_current(dst);
576 if (cg_read_lc(dst, "cgroup.threads") != n_threads + 1)
577 goto cleanup;
578
579 ret = KSFT_PASS;
580
581 cleanup:
582 for (t = 0; t < c_threads; ++t) {
583 pthread_cancel(threads[t]);
584 }
585
586 for (t = 0; t < c_threads; ++t) {
587 pthread_join(threads[t], NULL);
588 }
589
590 cg_enter_current(root);
591
592 if (dst)
593 cg_destroy(dst);
594 if (src)
595 cg_destroy(src);
596 free(dst);
597 free(src);
598 return ret;
599 }
600
migrating_thread_fn(void * arg)601 static void *migrating_thread_fn(void *arg)
602 {
603 int g, i, n_iterations = 1000;
604 char **grps = arg;
605 char lines[3][PATH_MAX];
606
607 for (g = 1; g < 3; ++g)
608 snprintf(lines[g], sizeof(lines[g]), "0::%s", grps[g] + strlen(grps[0]));
609
610 for (i = 0; i < n_iterations; ++i) {
611 cg_enter_current_thread(grps[(i % 2) + 1]);
612
613 if (proc_read_strstr(0, 1, "cgroup", lines[(i % 2) + 1]))
614 return (void *)-1;
615 }
616 return NULL;
617 }
618
619 /*
620 * Test single thread migration.
621 * Threaded cgroups allow successful migration of a thread.
622 */
test_cgcore_thread_migration(const char * root)623 static int test_cgcore_thread_migration(const char *root)
624 {
625 int ret = KSFT_FAIL;
626 char *dom = NULL;
627 char line[PATH_MAX];
628 char *grps[3] = { (char *)root, NULL, NULL };
629 pthread_t thr;
630 void *retval;
631
632 dom = cg_name(root, "cg_dom");
633 grps[1] = cg_name(root, "cg_dom/cg_src");
634 grps[2] = cg_name(root, "cg_dom/cg_dst");
635 if (!grps[1] || !grps[2] || !dom)
636 goto cleanup;
637
638 if (cg_create(dom))
639 goto cleanup;
640 if (cg_create(grps[1]))
641 goto cleanup;
642 if (cg_create(grps[2]))
643 goto cleanup;
644
645 if (cg_write(grps[1], "cgroup.type", "threaded"))
646 goto cleanup;
647 if (cg_write(grps[2], "cgroup.type", "threaded"))
648 goto cleanup;
649
650 if (cg_enter_current(grps[1]))
651 goto cleanup;
652
653 if (pthread_create(&thr, NULL, migrating_thread_fn, grps))
654 goto cleanup;
655
656 if (pthread_join(thr, &retval))
657 goto cleanup;
658
659 if (retval)
660 goto cleanup;
661
662 snprintf(line, sizeof(line), "0::%s", grps[1] + strlen(grps[0]));
663 if (proc_read_strstr(0, 1, "cgroup", line))
664 goto cleanup;
665
666 ret = KSFT_PASS;
667
668 cleanup:
669 cg_enter_current(root);
670 if (grps[2])
671 cg_destroy(grps[2]);
672 if (grps[1])
673 cg_destroy(grps[1]);
674 if (dom)
675 cg_destroy(dom);
676 free(grps[2]);
677 free(grps[1]);
678 free(dom);
679 return ret;
680 }
681
682 /*
683 * cgroup migration permission check should be performed based on the
684 * credentials at the time of open instead of write.
685 */
test_cgcore_lesser_euid_open(const char * root)686 static int test_cgcore_lesser_euid_open(const char *root)
687 {
688 const uid_t test_euid = TEST_UID;
689 int ret = KSFT_FAIL;
690 char *cg_test_a = NULL, *cg_test_b = NULL;
691 char *cg_test_a_procs = NULL, *cg_test_b_procs = NULL;
692 int cg_test_b_procs_fd = -1;
693 uid_t saved_uid;
694
695 cg_test_a = cg_name(root, "cg_test_a");
696 cg_test_b = cg_name(root, "cg_test_b");
697
698 if (!cg_test_a || !cg_test_b)
699 goto cleanup;
700
701 cg_test_a_procs = cg_name(cg_test_a, "cgroup.procs");
702 cg_test_b_procs = cg_name(cg_test_b, "cgroup.procs");
703
704 if (!cg_test_a_procs || !cg_test_b_procs)
705 goto cleanup;
706
707 if (cg_create(cg_test_a) || cg_create(cg_test_b))
708 goto cleanup;
709
710 if (cg_enter_current(cg_test_a))
711 goto cleanup;
712
713 if (chown(cg_test_a_procs, test_euid, -1) ||
714 chown(cg_test_b_procs, test_euid, -1))
715 goto cleanup;
716
717 saved_uid = geteuid();
718 if (seteuid(test_euid))
719 goto cleanup;
720
721 cg_test_b_procs_fd = open(cg_test_b_procs, O_RDWR);
722
723 if (seteuid(saved_uid))
724 goto cleanup;
725
726 if (cg_test_b_procs_fd < 0)
727 goto cleanup;
728
729 if (write(cg_test_b_procs_fd, "0", 1) >= 0 || errno != EACCES)
730 goto cleanup;
731
732 ret = KSFT_PASS;
733
734 cleanup:
735 cg_enter_current(root);
736 if (cg_test_b_procs_fd >= 0)
737 close(cg_test_b_procs_fd);
738 if (cg_test_b)
739 cg_destroy(cg_test_b);
740 if (cg_test_a)
741 cg_destroy(cg_test_a);
742 free(cg_test_b_procs);
743 free(cg_test_a_procs);
744 free(cg_test_b);
745 free(cg_test_a);
746 return ret;
747 }
748
749 struct lesser_ns_open_thread_arg {
750 const char *path;
751 int fd;
752 int err;
753 };
754
lesser_ns_open_thread_fn(void * arg)755 static int lesser_ns_open_thread_fn(void *arg)
756 {
757 struct lesser_ns_open_thread_arg *targ = arg;
758
759 targ->fd = open(targ->path, O_RDWR);
760 targ->err = errno;
761 return 0;
762 }
763
764 /*
765 * cgroup migration permission check should be performed based on the cgroup
766 * namespace at the time of open instead of write.
767 */
test_cgcore_lesser_ns_open(const char * root)768 static int test_cgcore_lesser_ns_open(const char *root)
769 {
770 static char stack[65536];
771 const uid_t test_euid = 65534; /* usually nobody, any !root is fine */
772 int ret = KSFT_FAIL;
773 char *cg_test_a = NULL, *cg_test_b = NULL;
774 char *cg_test_a_procs = NULL, *cg_test_b_procs = NULL;
775 int cg_test_b_procs_fd = -1;
776 struct lesser_ns_open_thread_arg targ = { .fd = -1 };
777 pid_t pid;
778 int status;
779
780 if (!nsdelegate)
781 return KSFT_SKIP;
782
783 cg_test_a = cg_name(root, "cg_test_a");
784 cg_test_b = cg_name(root, "cg_test_b");
785
786 if (!cg_test_a || !cg_test_b)
787 goto cleanup;
788
789 cg_test_a_procs = cg_name(cg_test_a, "cgroup.procs");
790 cg_test_b_procs = cg_name(cg_test_b, "cgroup.procs");
791
792 if (!cg_test_a_procs || !cg_test_b_procs)
793 goto cleanup;
794
795 if (cg_create(cg_test_a) || cg_create(cg_test_b))
796 goto cleanup;
797
798 if (cg_enter_current(cg_test_b))
799 goto cleanup;
800
801 if (chown(cg_test_a_procs, test_euid, -1) ||
802 chown(cg_test_b_procs, test_euid, -1))
803 goto cleanup;
804
805 targ.path = cg_test_b_procs;
806 pid = clone(lesser_ns_open_thread_fn, stack + sizeof(stack),
807 CLONE_NEWCGROUP | CLONE_FILES | CLONE_VM | SIGCHLD,
808 &targ);
809 if (pid < 0)
810 goto cleanup;
811
812 if (waitpid(pid, &status, 0) < 0)
813 goto cleanup;
814
815 if (!WIFEXITED(status))
816 goto cleanup;
817
818 cg_test_b_procs_fd = targ.fd;
819 if (cg_test_b_procs_fd < 0)
820 goto cleanup;
821
822 if (cg_enter_current(cg_test_a))
823 goto cleanup;
824
825 if ((status = write(cg_test_b_procs_fd, "0", 1)) >= 0 || errno != ENOENT)
826 goto cleanup;
827
828 ret = KSFT_PASS;
829
830 cleanup:
831 cg_enter_current(root);
832 if (cg_test_b_procs_fd >= 0)
833 close(cg_test_b_procs_fd);
834 if (cg_test_b)
835 cg_destroy(cg_test_b);
836 if (cg_test_a)
837 cg_destroy(cg_test_a);
838 free(cg_test_b_procs);
839 free(cg_test_a_procs);
840 free(cg_test_b);
841 free(cg_test_a);
842 return ret;
843 }
844
845 #define T(x) { x, #x }
846 struct corecg_test {
847 int (*fn)(const char *root);
848 const char *name;
849 } tests[] = {
850 T(test_cgcore_internal_process_constraint),
851 T(test_cgcore_top_down_constraint_enable),
852 T(test_cgcore_top_down_constraint_disable),
853 T(test_cgcore_no_internal_process_constraint_on_threads),
854 T(test_cgcore_parent_becomes_threaded),
855 T(test_cgcore_invalid_domain),
856 T(test_cgcore_populated),
857 T(test_cgcore_proc_migration),
858 T(test_cgcore_thread_migration),
859 T(test_cgcore_destroy),
860 T(test_cgcore_lesser_euid_open),
861 T(test_cgcore_lesser_ns_open),
862 };
863 #undef T
864
main(int argc,char * argv[])865 int main(int argc, char *argv[])
866 {
867 char root[PATH_MAX];
868 int i, ret = EXIT_SUCCESS;
869
870 if (cg_find_unified_root(root, sizeof(root), &nsdelegate))
871 ksft_exit_skip("cgroup v2 isn't mounted\n");
872
873 if (cg_read_strstr(root, "cgroup.subtree_control", "memory"))
874 if (cg_write(root, "cgroup.subtree_control", "+memory"))
875 ksft_exit_skip("Failed to set memory controller\n");
876
877 for (i = 0; i < ARRAY_SIZE(tests); i++) {
878 switch (tests[i].fn(root)) {
879 case KSFT_PASS:
880 ksft_test_result_pass("%s\n", tests[i].name);
881 break;
882 case KSFT_SKIP:
883 ksft_test_result_skip("%s\n", tests[i].name);
884 break;
885 default:
886 ret = EXIT_FAILURE;
887 ksft_test_result_fail("%s\n", tests[i].name);
888 break;
889 }
890 }
891
892 return ret;
893 }
894