1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2012 The Chromium OS Authors. All rights reserved. 4 * 5 * Test code for seccomp bpf. 6 */ 7 8 #define _GNU_SOURCE 9 #include <sys/types.h> 10 11 /* 12 * glibc 2.26 and later have SIGSYS in siginfo_t. Before that, 13 * we need to use the kernel's siginfo.h file and trick glibc 14 * into accepting it. 15 */ 16 #if !__GLIBC_PREREQ(2, 26) 17 # include <asm/siginfo.h> 18 # define __have_siginfo_t 1 19 # define __have_sigval_t 1 20 # define __have_sigevent_t 1 21 #endif 22 23 #include <errno.h> 24 #include <linux/filter.h> 25 #include <sys/prctl.h> 26 #include <sys/ptrace.h> 27 #include <sys/user.h> 28 #include <linux/prctl.h> 29 #include <linux/ptrace.h> 30 #include <linux/seccomp.h> 31 #include <pthread.h> 32 #include <semaphore.h> 33 #include <signal.h> 34 #include <stddef.h> 35 #include <stdbool.h> 36 #include <string.h> 37 #include <time.h> 38 #include <limits.h> 39 #include <linux/elf.h> 40 #include <sys/uio.h> 41 #include <sys/utsname.h> 42 #include <sys/fcntl.h> 43 #include <sys/mman.h> 44 #include <sys/times.h> 45 #include <sys/socket.h> 46 #include <sys/ioctl.h> 47 #include <linux/kcmp.h> 48 #include <sys/resource.h> 49 #include <sys/capability.h> 50 51 #include <unistd.h> 52 #include <sys/syscall.h> 53 #include <poll.h> 54 55 #include "../kselftest_harness.h" 56 #include "../clone3/clone3_selftests.h" 57 58 /* Attempt to de-conflict with the selftests tree. */ 59 #ifndef SKIP 60 #define SKIP(s, ...) XFAIL(s, ##__VA_ARGS__) 61 #endif 62 63 #define MIN(X, Y) ((X) < (Y) ? (X) : (Y)) 64 65 #ifndef PR_SET_PTRACER 66 # define PR_SET_PTRACER 0x59616d61 67 #endif 68 69 #ifndef PR_SET_NO_NEW_PRIVS 70 #define PR_SET_NO_NEW_PRIVS 38 71 #define PR_GET_NO_NEW_PRIVS 39 72 #endif 73 74 #ifndef PR_SECCOMP_EXT 75 #define PR_SECCOMP_EXT 43 76 #endif 77 78 #ifndef SECCOMP_EXT_ACT 79 #define SECCOMP_EXT_ACT 1 80 #endif 81 82 #ifndef SECCOMP_EXT_ACT_TSYNC 83 #define SECCOMP_EXT_ACT_TSYNC 1 84 #endif 85 86 #ifndef SECCOMP_MODE_STRICT 87 #define SECCOMP_MODE_STRICT 1 88 #endif 89 90 #ifndef SECCOMP_MODE_FILTER 91 #define SECCOMP_MODE_FILTER 2 92 #endif 93 94 #ifndef SECCOMP_RET_ALLOW 95 struct seccomp_data { 96 int nr; 97 __u32 arch; 98 __u64 instruction_pointer; 99 __u64 args[6]; 100 }; 101 #endif 102 103 #ifndef SECCOMP_RET_KILL_PROCESS 104 #define SECCOMP_RET_KILL_PROCESS 0x80000000U /* kill the process */ 105 #define SECCOMP_RET_KILL_THREAD 0x00000000U /* kill the thread */ 106 #endif 107 #ifndef SECCOMP_RET_KILL 108 #define SECCOMP_RET_KILL SECCOMP_RET_KILL_THREAD 109 #define SECCOMP_RET_TRAP 0x00030000U /* disallow and force a SIGSYS */ 110 #define SECCOMP_RET_ERRNO 0x00050000U /* returns an errno */ 111 #define SECCOMP_RET_TRACE 0x7ff00000U /* pass to a tracer or disallow */ 112 #define SECCOMP_RET_ALLOW 0x7fff0000U /* allow */ 113 #endif 114 #ifndef SECCOMP_RET_LOG 115 #define SECCOMP_RET_LOG 0x7ffc0000U /* allow after logging */ 116 #endif 117 118 #ifndef __NR_seccomp 119 # if defined(__i386__) 120 # define __NR_seccomp 354 121 # elif defined(__x86_64__) 122 # define __NR_seccomp 317 123 # elif defined(__arm__) 124 # define __NR_seccomp 383 125 # elif defined(__aarch64__) 126 # define __NR_seccomp 277 127 # elif defined(__riscv) 128 # define __NR_seccomp 277 129 # elif defined(__csky__) 130 # define __NR_seccomp 277 131 # elif defined(__hppa__) 132 # define __NR_seccomp 338 133 # elif defined(__powerpc__) 134 # define __NR_seccomp 358 135 # elif defined(__s390__) 136 # define __NR_seccomp 348 137 # elif defined(__xtensa__) 138 # define __NR_seccomp 337 139 # elif defined(__sh__) 140 # define __NR_seccomp 372 141 # else 142 # warning "seccomp syscall number unknown for this architecture" 143 # define __NR_seccomp 0xffff 144 # endif 145 #endif 146 147 #ifndef SECCOMP_SET_MODE_STRICT 148 #define SECCOMP_SET_MODE_STRICT 0 149 #endif 150 151 #ifndef SECCOMP_SET_MODE_FILTER 152 #define SECCOMP_SET_MODE_FILTER 1 153 #endif 154 155 #ifndef SECCOMP_GET_ACTION_AVAIL 156 #define SECCOMP_GET_ACTION_AVAIL 2 157 #endif 158 159 #ifndef SECCOMP_GET_NOTIF_SIZES 160 #define SECCOMP_GET_NOTIF_SIZES 3 161 #endif 162 163 #ifndef SECCOMP_FILTER_FLAG_TSYNC 164 #define SECCOMP_FILTER_FLAG_TSYNC (1UL << 0) 165 #endif 166 167 #ifndef SECCOMP_FILTER_FLAG_LOG 168 #define SECCOMP_FILTER_FLAG_LOG (1UL << 1) 169 #endif 170 171 #ifndef SECCOMP_FILTER_FLAG_SPEC_ALLOW 172 #define SECCOMP_FILTER_FLAG_SPEC_ALLOW (1UL << 2) 173 #endif 174 175 #ifndef PTRACE_SECCOMP_GET_METADATA 176 #define PTRACE_SECCOMP_GET_METADATA 0x420d 177 178 struct seccomp_metadata { 179 __u64 filter_off; /* Input: which filter */ 180 __u64 flags; /* Output: filter's flags */ 181 }; 182 #endif 183 184 #ifndef SECCOMP_FILTER_FLAG_NEW_LISTENER 185 #define SECCOMP_FILTER_FLAG_NEW_LISTENER (1UL << 3) 186 #endif 187 188 #ifndef SECCOMP_RET_USER_NOTIF 189 #define SECCOMP_RET_USER_NOTIF 0x7fc00000U 190 191 #define SECCOMP_IOC_MAGIC '!' 192 #define SECCOMP_IO(nr) _IO(SECCOMP_IOC_MAGIC, nr) 193 #define SECCOMP_IOR(nr, type) _IOR(SECCOMP_IOC_MAGIC, nr, type) 194 #define SECCOMP_IOW(nr, type) _IOW(SECCOMP_IOC_MAGIC, nr, type) 195 #define SECCOMP_IOWR(nr, type) _IOWR(SECCOMP_IOC_MAGIC, nr, type) 196 197 /* Flags for seccomp notification fd ioctl. */ 198 #define SECCOMP_IOCTL_NOTIF_RECV SECCOMP_IOWR(0, struct seccomp_notif) 199 #define SECCOMP_IOCTL_NOTIF_SEND SECCOMP_IOWR(1, \ 200 struct seccomp_notif_resp) 201 #define SECCOMP_IOCTL_NOTIF_ID_VALID SECCOMP_IOW(2, __u64) 202 203 struct seccomp_notif { 204 __u64 id; 205 __u32 pid; 206 __u32 flags; 207 struct seccomp_data data; 208 }; 209 210 struct seccomp_notif_resp { 211 __u64 id; 212 __s64 val; 213 __s32 error; 214 __u32 flags; 215 }; 216 217 struct seccomp_notif_sizes { 218 __u16 seccomp_notif; 219 __u16 seccomp_notif_resp; 220 __u16 seccomp_data; 221 }; 222 #endif 223 224 #ifndef SECCOMP_IOCTL_NOTIF_ADDFD 225 /* On success, the return value is the remote process's added fd number */ 226 #define SECCOMP_IOCTL_NOTIF_ADDFD SECCOMP_IOW(3, \ 227 struct seccomp_notif_addfd) 228 229 /* valid flags for seccomp_notif_addfd */ 230 #define SECCOMP_ADDFD_FLAG_SETFD (1UL << 0) /* Specify remote fd */ 231 232 struct seccomp_notif_addfd { 233 __u64 id; 234 __u32 flags; 235 __u32 srcfd; 236 __u32 newfd; 237 __u32 newfd_flags; 238 }; 239 #endif 240 241 #ifndef SECCOMP_ADDFD_FLAG_SEND 242 #define SECCOMP_ADDFD_FLAG_SEND (1UL << 1) /* Addfd and return it, atomically */ 243 #endif 244 245 struct seccomp_notif_addfd_small { 246 __u64 id; 247 char weird[4]; 248 }; 249 #define SECCOMP_IOCTL_NOTIF_ADDFD_SMALL \ 250 SECCOMP_IOW(3, struct seccomp_notif_addfd_small) 251 252 struct seccomp_notif_addfd_big { 253 union { 254 struct seccomp_notif_addfd addfd; 255 char buf[sizeof(struct seccomp_notif_addfd) + 8]; 256 }; 257 }; 258 #define SECCOMP_IOCTL_NOTIF_ADDFD_BIG \ 259 SECCOMP_IOWR(3, struct seccomp_notif_addfd_big) 260 261 #ifndef PTRACE_EVENTMSG_SYSCALL_ENTRY 262 #define PTRACE_EVENTMSG_SYSCALL_ENTRY 1 263 #define PTRACE_EVENTMSG_SYSCALL_EXIT 2 264 #endif 265 266 #ifndef SECCOMP_USER_NOTIF_FLAG_CONTINUE 267 #define SECCOMP_USER_NOTIF_FLAG_CONTINUE 0x00000001 268 #endif 269 270 #ifndef SECCOMP_FILTER_FLAG_TSYNC_ESRCH 271 #define SECCOMP_FILTER_FLAG_TSYNC_ESRCH (1UL << 4) 272 #endif 273 274 #ifndef SECCOMP_FILTER_FLAG_WAIT_KILLABLE_RECV 275 #define SECCOMP_FILTER_FLAG_WAIT_KILLABLE_RECV (1UL << 5) 276 #endif 277 278 #ifndef seccomp 279 int seccomp(unsigned int op, unsigned int flags, void *args) 280 { 281 errno = 0; 282 return syscall(__NR_seccomp, op, flags, args); 283 } 284 #endif 285 286 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ 287 #define syscall_arg(_n) (offsetof(struct seccomp_data, args[_n])) 288 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ 289 #define syscall_arg(_n) (offsetof(struct seccomp_data, args[_n]) + sizeof(__u32)) 290 #else 291 #error "wut? Unknown __BYTE_ORDER__?!" 292 #endif 293 294 #define SIBLING_EXIT_UNKILLED 0xbadbeef 295 #define SIBLING_EXIT_FAILURE 0xbadface 296 #define SIBLING_EXIT_NEWPRIVS 0xbadfeed 297 298 static int __filecmp(pid_t pid1, pid_t pid2, int fd1, int fd2) 299 { 300 #ifdef __NR_kcmp 301 errno = 0; 302 return syscall(__NR_kcmp, pid1, pid2, KCMP_FILE, fd1, fd2); 303 #else 304 errno = ENOSYS; 305 return -1; 306 #endif 307 } 308 309 /* Have TH_LOG report actual location filecmp() is used. */ 310 #define filecmp(pid1, pid2, fd1, fd2) ({ \ 311 int _ret; \ 312 \ 313 _ret = __filecmp(pid1, pid2, fd1, fd2); \ 314 if (_ret != 0) { \ 315 if (_ret < 0 && errno == ENOSYS) { \ 316 TH_LOG("kcmp() syscall missing (test is less accurate)");\ 317 _ret = 0; \ 318 } \ 319 } \ 320 _ret; }) 321 322 TEST(kcmp) 323 { 324 int ret; 325 326 ret = __filecmp(getpid(), getpid(), 1, 1); 327 EXPECT_EQ(ret, 0); 328 if (ret != 0 && errno == ENOSYS) 329 SKIP(return, "Kernel does not support kcmp() (missing CONFIG_KCMP?)"); 330 } 331 332 TEST(mode_strict_support) 333 { 334 long ret; 335 336 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_STRICT, NULL, NULL, NULL); 337 ASSERT_EQ(0, ret) { 338 TH_LOG("Kernel does not support CONFIG_SECCOMP"); 339 } 340 syscall(__NR_exit, 0); 341 } 342 343 TEST_SIGNAL(mode_strict_cannot_call_prctl, SIGKILL) 344 { 345 long ret; 346 347 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_STRICT, NULL, NULL, NULL); 348 ASSERT_EQ(0, ret) { 349 TH_LOG("Kernel does not support CONFIG_SECCOMP"); 350 } 351 syscall(__NR_prctl, PR_SET_SECCOMP, SECCOMP_MODE_FILTER, 352 NULL, NULL, NULL); 353 EXPECT_FALSE(true) { 354 TH_LOG("Unreachable!"); 355 } 356 } 357 358 /* Note! This doesn't test no new privs behavior */ 359 TEST(no_new_privs_support) 360 { 361 long ret; 362 363 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 364 EXPECT_EQ(0, ret) { 365 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 366 } 367 } 368 369 /* Tests kernel support by checking for a copy_from_user() fault on NULL. */ 370 TEST(mode_filter_support) 371 { 372 long ret; 373 374 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, NULL, 0, 0); 375 ASSERT_EQ(0, ret) { 376 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 377 } 378 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, NULL, NULL, NULL); 379 EXPECT_EQ(-1, ret); 380 EXPECT_EQ(EFAULT, errno) { 381 TH_LOG("Kernel does not support CONFIG_SECCOMP_FILTER!"); 382 } 383 } 384 385 TEST(mode_filter_without_nnp) 386 { 387 struct sock_filter filter[] = { 388 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 389 }; 390 struct sock_fprog prog = { 391 .len = (unsigned short)ARRAY_SIZE(filter), 392 .filter = filter, 393 }; 394 long ret; 395 cap_t cap = cap_get_proc(); 396 cap_flag_value_t is_cap_sys_admin = 0; 397 398 ret = prctl(PR_GET_NO_NEW_PRIVS, 0, NULL, 0, 0); 399 ASSERT_LE(0, ret) { 400 TH_LOG("Expected 0 or unsupported for NO_NEW_PRIVS"); 401 } 402 errno = 0; 403 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0); 404 /* Succeeds with CAP_SYS_ADMIN, fails without */ 405 cap_get_flag(cap, CAP_SYS_ADMIN, CAP_EFFECTIVE, &is_cap_sys_admin); 406 if (!is_cap_sys_admin) { 407 EXPECT_EQ(-1, ret); 408 EXPECT_EQ(EACCES, errno); 409 } else { 410 EXPECT_EQ(0, ret); 411 } 412 } 413 414 #define MAX_INSNS_PER_PATH 32768 415 416 TEST(filter_size_limits) 417 { 418 int i; 419 int count = BPF_MAXINSNS + 1; 420 struct sock_filter allow[] = { 421 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 422 }; 423 struct sock_filter *filter; 424 struct sock_fprog prog = { }; 425 long ret; 426 427 filter = calloc(count, sizeof(*filter)); 428 ASSERT_NE(NULL, filter); 429 430 for (i = 0; i < count; i++) 431 filter[i] = allow[0]; 432 433 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 434 ASSERT_EQ(0, ret); 435 436 prog.filter = filter; 437 prog.len = count; 438 439 /* Too many filter instructions in a single filter. */ 440 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0); 441 ASSERT_NE(0, ret) { 442 TH_LOG("Installing %d insn filter was allowed", prog.len); 443 } 444 445 /* One less is okay, though. */ 446 prog.len -= 1; 447 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0); 448 ASSERT_EQ(0, ret) { 449 TH_LOG("Installing %d insn filter wasn't allowed", prog.len); 450 } 451 } 452 453 TEST(filter_chain_limits) 454 { 455 int i; 456 int count = BPF_MAXINSNS; 457 struct sock_filter allow[] = { 458 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 459 }; 460 struct sock_filter *filter; 461 struct sock_fprog prog = { }; 462 long ret; 463 464 filter = calloc(count, sizeof(*filter)); 465 ASSERT_NE(NULL, filter); 466 467 for (i = 0; i < count; i++) 468 filter[i] = allow[0]; 469 470 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 471 ASSERT_EQ(0, ret); 472 473 prog.filter = filter; 474 prog.len = 1; 475 476 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0); 477 ASSERT_EQ(0, ret); 478 479 prog.len = count; 480 481 /* Too many total filter instructions. */ 482 for (i = 0; i < MAX_INSNS_PER_PATH; i++) { 483 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0); 484 if (ret != 0) 485 break; 486 } 487 ASSERT_NE(0, ret) { 488 TH_LOG("Allowed %d %d-insn filters (total with penalties:%d)", 489 i, count, i * (count + 4)); 490 } 491 } 492 493 TEST(mode_filter_cannot_move_to_strict) 494 { 495 struct sock_filter filter[] = { 496 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 497 }; 498 struct sock_fprog prog = { 499 .len = (unsigned short)ARRAY_SIZE(filter), 500 .filter = filter, 501 }; 502 long ret; 503 504 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 505 ASSERT_EQ(0, ret); 506 507 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0); 508 ASSERT_EQ(0, ret); 509 510 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_STRICT, NULL, 0, 0); 511 EXPECT_EQ(-1, ret); 512 EXPECT_EQ(EINVAL, errno); 513 } 514 515 516 TEST(mode_filter_get_seccomp) 517 { 518 struct sock_filter filter[] = { 519 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 520 }; 521 struct sock_fprog prog = { 522 .len = (unsigned short)ARRAY_SIZE(filter), 523 .filter = filter, 524 }; 525 long ret; 526 527 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 528 ASSERT_EQ(0, ret); 529 530 ret = prctl(PR_GET_SECCOMP, 0, 0, 0, 0); 531 EXPECT_EQ(0, ret); 532 533 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0); 534 ASSERT_EQ(0, ret); 535 536 ret = prctl(PR_GET_SECCOMP, 0, 0, 0, 0); 537 EXPECT_EQ(2, ret); 538 } 539 540 541 TEST(ALLOW_all) 542 { 543 struct sock_filter filter[] = { 544 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 545 }; 546 struct sock_fprog prog = { 547 .len = (unsigned short)ARRAY_SIZE(filter), 548 .filter = filter, 549 }; 550 long ret; 551 552 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 553 ASSERT_EQ(0, ret); 554 555 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog); 556 ASSERT_EQ(0, ret); 557 } 558 559 TEST(empty_prog) 560 { 561 struct sock_filter filter[] = { 562 }; 563 struct sock_fprog prog = { 564 .len = (unsigned short)ARRAY_SIZE(filter), 565 .filter = filter, 566 }; 567 long ret; 568 569 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 570 ASSERT_EQ(0, ret); 571 572 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog); 573 EXPECT_EQ(-1, ret); 574 EXPECT_EQ(EINVAL, errno); 575 } 576 577 TEST(log_all) 578 { 579 struct sock_filter filter[] = { 580 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_LOG), 581 }; 582 struct sock_fprog prog = { 583 .len = (unsigned short)ARRAY_SIZE(filter), 584 .filter = filter, 585 }; 586 long ret; 587 pid_t parent = getppid(); 588 589 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 590 ASSERT_EQ(0, ret); 591 592 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog); 593 ASSERT_EQ(0, ret); 594 595 /* getppid() should succeed and be logged (no check for logging) */ 596 EXPECT_EQ(parent, syscall(__NR_getppid)); 597 } 598 599 TEST_SIGNAL(unknown_ret_is_kill_inside, SIGSYS) 600 { 601 struct sock_filter filter[] = { 602 BPF_STMT(BPF_RET|BPF_K, 0x10000000U), 603 }; 604 struct sock_fprog prog = { 605 .len = (unsigned short)ARRAY_SIZE(filter), 606 .filter = filter, 607 }; 608 long ret; 609 610 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 611 ASSERT_EQ(0, ret); 612 613 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog); 614 ASSERT_EQ(0, ret); 615 EXPECT_EQ(0, syscall(__NR_getpid)) { 616 TH_LOG("getpid() shouldn't ever return"); 617 } 618 } 619 620 /* return code >= 0x80000000 is unused. */ 621 TEST_SIGNAL(unknown_ret_is_kill_above_allow, SIGSYS) 622 { 623 struct sock_filter filter[] = { 624 BPF_STMT(BPF_RET|BPF_K, 0x90000000U), 625 }; 626 struct sock_fprog prog = { 627 .len = (unsigned short)ARRAY_SIZE(filter), 628 .filter = filter, 629 }; 630 long ret; 631 632 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 633 ASSERT_EQ(0, ret); 634 635 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog); 636 ASSERT_EQ(0, ret); 637 EXPECT_EQ(0, syscall(__NR_getpid)) { 638 TH_LOG("getpid() shouldn't ever return"); 639 } 640 } 641 642 TEST_SIGNAL(KILL_all, SIGSYS) 643 { 644 struct sock_filter filter[] = { 645 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL), 646 }; 647 struct sock_fprog prog = { 648 .len = (unsigned short)ARRAY_SIZE(filter), 649 .filter = filter, 650 }; 651 long ret; 652 653 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 654 ASSERT_EQ(0, ret); 655 656 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog); 657 ASSERT_EQ(0, ret); 658 } 659 660 TEST_SIGNAL(KILL_one, SIGSYS) 661 { 662 struct sock_filter filter[] = { 663 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, 664 offsetof(struct seccomp_data, nr)), 665 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 0, 1), 666 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL), 667 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 668 }; 669 struct sock_fprog prog = { 670 .len = (unsigned short)ARRAY_SIZE(filter), 671 .filter = filter, 672 }; 673 long ret; 674 pid_t parent = getppid(); 675 676 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 677 ASSERT_EQ(0, ret); 678 679 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog); 680 ASSERT_EQ(0, ret); 681 682 EXPECT_EQ(parent, syscall(__NR_getppid)); 683 /* getpid() should never return. */ 684 EXPECT_EQ(0, syscall(__NR_getpid)); 685 } 686 687 TEST_SIGNAL(KILL_one_arg_one, SIGSYS) 688 { 689 void *fatal_address; 690 struct sock_filter filter[] = { 691 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, 692 offsetof(struct seccomp_data, nr)), 693 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_times, 1, 0), 694 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 695 /* Only both with lower 32-bit for now. */ 696 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, syscall_arg(0)), 697 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, 698 (unsigned long)&fatal_address, 0, 1), 699 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL), 700 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 701 }; 702 struct sock_fprog prog = { 703 .len = (unsigned short)ARRAY_SIZE(filter), 704 .filter = filter, 705 }; 706 long ret; 707 pid_t parent = getppid(); 708 struct tms timebuf; 709 clock_t clock = times(&timebuf); 710 711 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 712 ASSERT_EQ(0, ret); 713 714 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog); 715 ASSERT_EQ(0, ret); 716 717 EXPECT_EQ(parent, syscall(__NR_getppid)); 718 EXPECT_LE(clock, syscall(__NR_times, &timebuf)); 719 /* times() should never return. */ 720 EXPECT_EQ(0, syscall(__NR_times, &fatal_address)); 721 } 722 723 TEST_SIGNAL(KILL_one_arg_six, SIGSYS) 724 { 725 #ifndef __NR_mmap2 726 int sysno = __NR_mmap; 727 #else 728 int sysno = __NR_mmap2; 729 #endif 730 struct sock_filter filter[] = { 731 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, 732 offsetof(struct seccomp_data, nr)), 733 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, sysno, 1, 0), 734 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 735 /* Only both with lower 32-bit for now. */ 736 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, syscall_arg(5)), 737 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, 0x0C0FFEE, 0, 1), 738 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL), 739 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 740 }; 741 struct sock_fprog prog = { 742 .len = (unsigned short)ARRAY_SIZE(filter), 743 .filter = filter, 744 }; 745 long ret; 746 pid_t parent = getppid(); 747 int fd; 748 void *map1, *map2; 749 int page_size = sysconf(_SC_PAGESIZE); 750 751 ASSERT_LT(0, page_size); 752 753 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 754 ASSERT_EQ(0, ret); 755 756 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog); 757 ASSERT_EQ(0, ret); 758 759 fd = open("/dev/zero", O_RDONLY); 760 ASSERT_NE(-1, fd); 761 762 EXPECT_EQ(parent, syscall(__NR_getppid)); 763 map1 = (void *)syscall(sysno, 764 NULL, page_size, PROT_READ, MAP_PRIVATE, fd, page_size); 765 EXPECT_NE(MAP_FAILED, map1); 766 /* mmap2() should never return. */ 767 map2 = (void *)syscall(sysno, 768 NULL, page_size, PROT_READ, MAP_PRIVATE, fd, 0x0C0FFEE); 769 EXPECT_EQ(MAP_FAILED, map2); 770 771 /* The test failed, so clean up the resources. */ 772 munmap(map1, page_size); 773 munmap(map2, page_size); 774 close(fd); 775 } 776 777 /* This is a thread task to die via seccomp filter violation. */ 778 void *kill_thread(void *data) 779 { 780 bool die = (bool)data; 781 782 if (die) { 783 prctl(PR_GET_SECCOMP, 0, 0, 0, 0); 784 return (void *)SIBLING_EXIT_FAILURE; 785 } 786 787 return (void *)SIBLING_EXIT_UNKILLED; 788 } 789 790 enum kill_t { 791 KILL_THREAD, 792 KILL_PROCESS, 793 RET_UNKNOWN 794 }; 795 796 /* Prepare a thread that will kill itself or both of us. */ 797 void kill_thread_or_group(struct __test_metadata *_metadata, 798 enum kill_t kill_how) 799 { 800 pthread_t thread; 801 void *status; 802 /* Kill only when calling __NR_prctl. */ 803 struct sock_filter filter_thread[] = { 804 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, 805 offsetof(struct seccomp_data, nr)), 806 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_prctl, 0, 1), 807 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL_THREAD), 808 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 809 }; 810 struct sock_fprog prog_thread = { 811 .len = (unsigned short)ARRAY_SIZE(filter_thread), 812 .filter = filter_thread, 813 }; 814 int kill = kill_how == KILL_PROCESS ? SECCOMP_RET_KILL_PROCESS : 0xAAAAAAAA; 815 struct sock_filter filter_process[] = { 816 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, 817 offsetof(struct seccomp_data, nr)), 818 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_prctl, 0, 1), 819 BPF_STMT(BPF_RET|BPF_K, kill), 820 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 821 }; 822 struct sock_fprog prog_process = { 823 .len = (unsigned short)ARRAY_SIZE(filter_process), 824 .filter = filter_process, 825 }; 826 827 ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) { 828 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 829 } 830 831 ASSERT_EQ(0, seccomp(SECCOMP_SET_MODE_FILTER, 0, 832 kill_how == KILL_THREAD ? &prog_thread 833 : &prog_process)); 834 835 /* 836 * Add the KILL_THREAD rule again to make sure that the KILL_PROCESS 837 * flag cannot be downgraded by a new filter. 838 */ 839 if (kill_how == KILL_PROCESS) 840 ASSERT_EQ(0, seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog_thread)); 841 842 /* Start a thread that will exit immediately. */ 843 ASSERT_EQ(0, pthread_create(&thread, NULL, kill_thread, (void *)false)); 844 ASSERT_EQ(0, pthread_join(thread, &status)); 845 ASSERT_EQ(SIBLING_EXIT_UNKILLED, (unsigned long)status); 846 847 /* Start a thread that will die immediately. */ 848 ASSERT_EQ(0, pthread_create(&thread, NULL, kill_thread, (void *)true)); 849 ASSERT_EQ(0, pthread_join(thread, &status)); 850 ASSERT_NE(SIBLING_EXIT_FAILURE, (unsigned long)status); 851 852 /* 853 * If we get here, only the spawned thread died. Let the parent know 854 * the whole process didn't die (i.e. this thread, the spawner, 855 * stayed running). 856 */ 857 exit(42); 858 } 859 860 TEST(KILL_thread) 861 { 862 int status; 863 pid_t child_pid; 864 865 child_pid = fork(); 866 ASSERT_LE(0, child_pid); 867 if (child_pid == 0) { 868 kill_thread_or_group(_metadata, KILL_THREAD); 869 _exit(38); 870 } 871 872 ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0)); 873 874 /* If only the thread was killed, we'll see exit 42. */ 875 ASSERT_TRUE(WIFEXITED(status)); 876 ASSERT_EQ(42, WEXITSTATUS(status)); 877 } 878 879 TEST(KILL_process) 880 { 881 int status; 882 pid_t child_pid; 883 884 child_pid = fork(); 885 ASSERT_LE(0, child_pid); 886 if (child_pid == 0) { 887 kill_thread_or_group(_metadata, KILL_PROCESS); 888 _exit(38); 889 } 890 891 ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0)); 892 893 /* If the entire process was killed, we'll see SIGSYS. */ 894 ASSERT_TRUE(WIFSIGNALED(status)); 895 ASSERT_EQ(SIGSYS, WTERMSIG(status)); 896 } 897 898 TEST(KILL_unknown) 899 { 900 int status; 901 pid_t child_pid; 902 903 child_pid = fork(); 904 ASSERT_LE(0, child_pid); 905 if (child_pid == 0) { 906 kill_thread_or_group(_metadata, RET_UNKNOWN); 907 _exit(38); 908 } 909 910 ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0)); 911 912 /* If the entire process was killed, we'll see SIGSYS. */ 913 EXPECT_TRUE(WIFSIGNALED(status)) { 914 TH_LOG("Unknown SECCOMP_RET is only killing the thread?"); 915 } 916 ASSERT_EQ(SIGSYS, WTERMSIG(status)); 917 } 918 919 /* TODO(wad) add 64-bit versus 32-bit arg tests. */ 920 TEST(arg_out_of_range) 921 { 922 struct sock_filter filter[] = { 923 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, syscall_arg(6)), 924 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 925 }; 926 struct sock_fprog prog = { 927 .len = (unsigned short)ARRAY_SIZE(filter), 928 .filter = filter, 929 }; 930 long ret; 931 932 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 933 ASSERT_EQ(0, ret); 934 935 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog); 936 EXPECT_EQ(-1, ret); 937 EXPECT_EQ(EINVAL, errno); 938 } 939 940 #define ERRNO_FILTER(name, errno) \ 941 struct sock_filter _read_filter_##name[] = { \ 942 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, \ 943 offsetof(struct seccomp_data, nr)), \ 944 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_read, 0, 1), \ 945 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ERRNO | errno), \ 946 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), \ 947 }; \ 948 struct sock_fprog prog_##name = { \ 949 .len = (unsigned short)ARRAY_SIZE(_read_filter_##name), \ 950 .filter = _read_filter_##name, \ 951 } 952 953 /* Make sure basic errno values are correctly passed through a filter. */ 954 TEST(ERRNO_valid) 955 { 956 ERRNO_FILTER(valid, E2BIG); 957 long ret; 958 pid_t parent = getppid(); 959 960 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 961 ASSERT_EQ(0, ret); 962 963 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog_valid); 964 ASSERT_EQ(0, ret); 965 966 EXPECT_EQ(parent, syscall(__NR_getppid)); 967 EXPECT_EQ(-1, read(-1, NULL, 0)); 968 EXPECT_EQ(E2BIG, errno); 969 } 970 971 /* Make sure an errno of zero is correctly handled by the arch code. */ 972 TEST(ERRNO_zero) 973 { 974 ERRNO_FILTER(zero, 0); 975 long ret; 976 pid_t parent = getppid(); 977 978 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 979 ASSERT_EQ(0, ret); 980 981 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog_zero); 982 ASSERT_EQ(0, ret); 983 984 EXPECT_EQ(parent, syscall(__NR_getppid)); 985 /* "errno" of 0 is ok. */ 986 EXPECT_EQ(0, read(-1, NULL, 0)); 987 } 988 989 /* 990 * The SECCOMP_RET_DATA mask is 16 bits wide, but errno is smaller. 991 * This tests that the errno value gets capped correctly, fixed by 992 * 580c57f10768 ("seccomp: cap SECCOMP_RET_ERRNO data to MAX_ERRNO"). 993 */ 994 TEST(ERRNO_capped) 995 { 996 ERRNO_FILTER(capped, 4096); 997 long ret; 998 pid_t parent = getppid(); 999 1000 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 1001 ASSERT_EQ(0, ret); 1002 1003 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog_capped); 1004 ASSERT_EQ(0, ret); 1005 1006 EXPECT_EQ(parent, syscall(__NR_getppid)); 1007 EXPECT_EQ(-1, read(-1, NULL, 0)); 1008 EXPECT_EQ(4095, errno); 1009 } 1010 1011 /* 1012 * Filters are processed in reverse order: last applied is executed first. 1013 * Since only the SECCOMP_RET_ACTION mask is tested for return values, the 1014 * SECCOMP_RET_DATA mask results will follow the most recently applied 1015 * matching filter return (and not the lowest or highest value). 1016 */ 1017 TEST(ERRNO_order) 1018 { 1019 ERRNO_FILTER(first, 11); 1020 ERRNO_FILTER(second, 13); 1021 ERRNO_FILTER(third, 12); 1022 long ret; 1023 pid_t parent = getppid(); 1024 1025 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 1026 ASSERT_EQ(0, ret); 1027 1028 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog_first); 1029 ASSERT_EQ(0, ret); 1030 1031 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog_second); 1032 ASSERT_EQ(0, ret); 1033 1034 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog_third); 1035 ASSERT_EQ(0, ret); 1036 1037 EXPECT_EQ(parent, syscall(__NR_getppid)); 1038 EXPECT_EQ(-1, read(-1, NULL, 0)); 1039 EXPECT_EQ(12, errno); 1040 } 1041 1042 FIXTURE(TRAP) { 1043 struct sock_fprog prog; 1044 }; 1045 1046 FIXTURE_SETUP(TRAP) 1047 { 1048 struct sock_filter filter[] = { 1049 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, 1050 offsetof(struct seccomp_data, nr)), 1051 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 0, 1), 1052 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRAP), 1053 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 1054 }; 1055 1056 memset(&self->prog, 0, sizeof(self->prog)); 1057 self->prog.filter = malloc(sizeof(filter)); 1058 ASSERT_NE(NULL, self->prog.filter); 1059 memcpy(self->prog.filter, filter, sizeof(filter)); 1060 self->prog.len = (unsigned short)ARRAY_SIZE(filter); 1061 } 1062 1063 FIXTURE_TEARDOWN(TRAP) 1064 { 1065 if (self->prog.filter) 1066 free(self->prog.filter); 1067 } 1068 1069 TEST_F_SIGNAL(TRAP, dfl, SIGSYS) 1070 { 1071 long ret; 1072 1073 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 1074 ASSERT_EQ(0, ret); 1075 1076 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog); 1077 ASSERT_EQ(0, ret); 1078 syscall(__NR_getpid); 1079 } 1080 1081 /* Ensure that SIGSYS overrides SIG_IGN */ 1082 TEST_F_SIGNAL(TRAP, ign, SIGSYS) 1083 { 1084 long ret; 1085 1086 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 1087 ASSERT_EQ(0, ret); 1088 1089 signal(SIGSYS, SIG_IGN); 1090 1091 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog); 1092 ASSERT_EQ(0, ret); 1093 syscall(__NR_getpid); 1094 } 1095 1096 static siginfo_t TRAP_info; 1097 static volatile int TRAP_nr; 1098 static void TRAP_action(int nr, siginfo_t *info, void *void_context) 1099 { 1100 memcpy(&TRAP_info, info, sizeof(TRAP_info)); 1101 TRAP_nr = nr; 1102 } 1103 1104 TEST_F(TRAP, handler) 1105 { 1106 int ret, test; 1107 struct sigaction act; 1108 sigset_t mask; 1109 1110 memset(&act, 0, sizeof(act)); 1111 sigemptyset(&mask); 1112 sigaddset(&mask, SIGSYS); 1113 1114 act.sa_sigaction = &TRAP_action; 1115 act.sa_flags = SA_SIGINFO; 1116 ret = sigaction(SIGSYS, &act, NULL); 1117 ASSERT_EQ(0, ret) { 1118 TH_LOG("sigaction failed"); 1119 } 1120 ret = sigprocmask(SIG_UNBLOCK, &mask, NULL); 1121 ASSERT_EQ(0, ret) { 1122 TH_LOG("sigprocmask failed"); 1123 } 1124 1125 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 1126 ASSERT_EQ(0, ret); 1127 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog); 1128 ASSERT_EQ(0, ret); 1129 TRAP_nr = 0; 1130 memset(&TRAP_info, 0, sizeof(TRAP_info)); 1131 /* Expect the registers to be rolled back. (nr = error) may vary 1132 * based on arch. */ 1133 ret = syscall(__NR_getpid); 1134 /* Silence gcc warning about volatile. */ 1135 test = TRAP_nr; 1136 EXPECT_EQ(SIGSYS, test); 1137 struct local_sigsys { 1138 void *_call_addr; /* calling user insn */ 1139 int _syscall; /* triggering system call number */ 1140 unsigned int _arch; /* AUDIT_ARCH_* of syscall */ 1141 } *sigsys = (struct local_sigsys *) 1142 #ifdef si_syscall 1143 &(TRAP_info.si_call_addr); 1144 #else 1145 &TRAP_info.si_pid; 1146 #endif 1147 EXPECT_EQ(__NR_getpid, sigsys->_syscall); 1148 /* Make sure arch is non-zero. */ 1149 EXPECT_NE(0, sigsys->_arch); 1150 EXPECT_NE(0, (unsigned long)sigsys->_call_addr); 1151 } 1152 1153 FIXTURE(precedence) { 1154 struct sock_fprog allow; 1155 struct sock_fprog log; 1156 struct sock_fprog trace; 1157 struct sock_fprog error; 1158 struct sock_fprog trap; 1159 struct sock_fprog kill; 1160 }; 1161 1162 FIXTURE_SETUP(precedence) 1163 { 1164 struct sock_filter allow_insns[] = { 1165 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 1166 }; 1167 struct sock_filter log_insns[] = { 1168 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, 1169 offsetof(struct seccomp_data, nr)), 1170 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 1, 0), 1171 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 1172 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_LOG), 1173 }; 1174 struct sock_filter trace_insns[] = { 1175 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, 1176 offsetof(struct seccomp_data, nr)), 1177 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 1, 0), 1178 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 1179 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE), 1180 }; 1181 struct sock_filter error_insns[] = { 1182 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, 1183 offsetof(struct seccomp_data, nr)), 1184 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 1, 0), 1185 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 1186 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ERRNO), 1187 }; 1188 struct sock_filter trap_insns[] = { 1189 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, 1190 offsetof(struct seccomp_data, nr)), 1191 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 1, 0), 1192 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 1193 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRAP), 1194 }; 1195 struct sock_filter kill_insns[] = { 1196 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, 1197 offsetof(struct seccomp_data, nr)), 1198 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 1, 0), 1199 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 1200 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL), 1201 }; 1202 1203 memset(self, 0, sizeof(*self)); 1204 #define FILTER_ALLOC(_x) \ 1205 self->_x.filter = malloc(sizeof(_x##_insns)); \ 1206 ASSERT_NE(NULL, self->_x.filter); \ 1207 memcpy(self->_x.filter, &_x##_insns, sizeof(_x##_insns)); \ 1208 self->_x.len = (unsigned short)ARRAY_SIZE(_x##_insns) 1209 FILTER_ALLOC(allow); 1210 FILTER_ALLOC(log); 1211 FILTER_ALLOC(trace); 1212 FILTER_ALLOC(error); 1213 FILTER_ALLOC(trap); 1214 FILTER_ALLOC(kill); 1215 } 1216 1217 FIXTURE_TEARDOWN(precedence) 1218 { 1219 #define FILTER_FREE(_x) if (self->_x.filter) free(self->_x.filter) 1220 FILTER_FREE(allow); 1221 FILTER_FREE(log); 1222 FILTER_FREE(trace); 1223 FILTER_FREE(error); 1224 FILTER_FREE(trap); 1225 FILTER_FREE(kill); 1226 } 1227 1228 TEST_F(precedence, allow_ok) 1229 { 1230 pid_t parent, res = 0; 1231 long ret; 1232 1233 parent = getppid(); 1234 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 1235 ASSERT_EQ(0, ret); 1236 1237 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow); 1238 ASSERT_EQ(0, ret); 1239 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log); 1240 ASSERT_EQ(0, ret); 1241 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace); 1242 ASSERT_EQ(0, ret); 1243 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error); 1244 ASSERT_EQ(0, ret); 1245 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trap); 1246 ASSERT_EQ(0, ret); 1247 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->kill); 1248 ASSERT_EQ(0, ret); 1249 /* Should work just fine. */ 1250 res = syscall(__NR_getppid); 1251 EXPECT_EQ(parent, res); 1252 } 1253 1254 TEST_F_SIGNAL(precedence, kill_is_highest, SIGSYS) 1255 { 1256 pid_t parent, res = 0; 1257 long ret; 1258 1259 parent = getppid(); 1260 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 1261 ASSERT_EQ(0, ret); 1262 1263 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow); 1264 ASSERT_EQ(0, ret); 1265 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log); 1266 ASSERT_EQ(0, ret); 1267 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace); 1268 ASSERT_EQ(0, ret); 1269 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error); 1270 ASSERT_EQ(0, ret); 1271 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trap); 1272 ASSERT_EQ(0, ret); 1273 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->kill); 1274 ASSERT_EQ(0, ret); 1275 /* Should work just fine. */ 1276 res = syscall(__NR_getppid); 1277 EXPECT_EQ(parent, res); 1278 /* getpid() should never return. */ 1279 res = syscall(__NR_getpid); 1280 EXPECT_EQ(0, res); 1281 } 1282 1283 TEST_F_SIGNAL(precedence, kill_is_highest_in_any_order, SIGSYS) 1284 { 1285 pid_t parent; 1286 long ret; 1287 1288 parent = getppid(); 1289 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 1290 ASSERT_EQ(0, ret); 1291 1292 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow); 1293 ASSERT_EQ(0, ret); 1294 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->kill); 1295 ASSERT_EQ(0, ret); 1296 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error); 1297 ASSERT_EQ(0, ret); 1298 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log); 1299 ASSERT_EQ(0, ret); 1300 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace); 1301 ASSERT_EQ(0, ret); 1302 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trap); 1303 ASSERT_EQ(0, ret); 1304 /* Should work just fine. */ 1305 EXPECT_EQ(parent, syscall(__NR_getppid)); 1306 /* getpid() should never return. */ 1307 EXPECT_EQ(0, syscall(__NR_getpid)); 1308 } 1309 1310 TEST_F_SIGNAL(precedence, trap_is_second, SIGSYS) 1311 { 1312 pid_t parent; 1313 long ret; 1314 1315 parent = getppid(); 1316 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 1317 ASSERT_EQ(0, ret); 1318 1319 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow); 1320 ASSERT_EQ(0, ret); 1321 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log); 1322 ASSERT_EQ(0, ret); 1323 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace); 1324 ASSERT_EQ(0, ret); 1325 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error); 1326 ASSERT_EQ(0, ret); 1327 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trap); 1328 ASSERT_EQ(0, ret); 1329 /* Should work just fine. */ 1330 EXPECT_EQ(parent, syscall(__NR_getppid)); 1331 /* getpid() should never return. */ 1332 EXPECT_EQ(0, syscall(__NR_getpid)); 1333 } 1334 1335 TEST_F_SIGNAL(precedence, trap_is_second_in_any_order, SIGSYS) 1336 { 1337 pid_t parent; 1338 long ret; 1339 1340 parent = getppid(); 1341 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 1342 ASSERT_EQ(0, ret); 1343 1344 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow); 1345 ASSERT_EQ(0, ret); 1346 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trap); 1347 ASSERT_EQ(0, ret); 1348 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log); 1349 ASSERT_EQ(0, ret); 1350 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace); 1351 ASSERT_EQ(0, ret); 1352 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error); 1353 ASSERT_EQ(0, ret); 1354 /* Should work just fine. */ 1355 EXPECT_EQ(parent, syscall(__NR_getppid)); 1356 /* getpid() should never return. */ 1357 EXPECT_EQ(0, syscall(__NR_getpid)); 1358 } 1359 1360 TEST_F(precedence, errno_is_third) 1361 { 1362 pid_t parent; 1363 long ret; 1364 1365 parent = getppid(); 1366 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 1367 ASSERT_EQ(0, ret); 1368 1369 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow); 1370 ASSERT_EQ(0, ret); 1371 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log); 1372 ASSERT_EQ(0, ret); 1373 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace); 1374 ASSERT_EQ(0, ret); 1375 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error); 1376 ASSERT_EQ(0, ret); 1377 /* Should work just fine. */ 1378 EXPECT_EQ(parent, syscall(__NR_getppid)); 1379 EXPECT_EQ(0, syscall(__NR_getpid)); 1380 } 1381 1382 TEST_F(precedence, errno_is_third_in_any_order) 1383 { 1384 pid_t parent; 1385 long ret; 1386 1387 parent = getppid(); 1388 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 1389 ASSERT_EQ(0, ret); 1390 1391 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log); 1392 ASSERT_EQ(0, ret); 1393 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error); 1394 ASSERT_EQ(0, ret); 1395 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace); 1396 ASSERT_EQ(0, ret); 1397 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow); 1398 ASSERT_EQ(0, ret); 1399 /* Should work just fine. */ 1400 EXPECT_EQ(parent, syscall(__NR_getppid)); 1401 EXPECT_EQ(0, syscall(__NR_getpid)); 1402 } 1403 1404 TEST_F(precedence, trace_is_fourth) 1405 { 1406 pid_t parent; 1407 long ret; 1408 1409 parent = getppid(); 1410 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 1411 ASSERT_EQ(0, ret); 1412 1413 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow); 1414 ASSERT_EQ(0, ret); 1415 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log); 1416 ASSERT_EQ(0, ret); 1417 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace); 1418 ASSERT_EQ(0, ret); 1419 /* Should work just fine. */ 1420 EXPECT_EQ(parent, syscall(__NR_getppid)); 1421 /* No ptracer */ 1422 EXPECT_EQ(-1, syscall(__NR_getpid)); 1423 } 1424 1425 TEST_F(precedence, trace_is_fourth_in_any_order) 1426 { 1427 pid_t parent; 1428 long ret; 1429 1430 parent = getppid(); 1431 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 1432 ASSERT_EQ(0, ret); 1433 1434 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace); 1435 ASSERT_EQ(0, ret); 1436 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow); 1437 ASSERT_EQ(0, ret); 1438 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log); 1439 ASSERT_EQ(0, ret); 1440 /* Should work just fine. */ 1441 EXPECT_EQ(parent, syscall(__NR_getppid)); 1442 /* No ptracer */ 1443 EXPECT_EQ(-1, syscall(__NR_getpid)); 1444 } 1445 1446 TEST_F(precedence, log_is_fifth) 1447 { 1448 pid_t mypid, parent; 1449 long ret; 1450 1451 mypid = getpid(); 1452 parent = getppid(); 1453 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 1454 ASSERT_EQ(0, ret); 1455 1456 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow); 1457 ASSERT_EQ(0, ret); 1458 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log); 1459 ASSERT_EQ(0, ret); 1460 /* Should work just fine. */ 1461 EXPECT_EQ(parent, syscall(__NR_getppid)); 1462 /* Should also work just fine */ 1463 EXPECT_EQ(mypid, syscall(__NR_getpid)); 1464 } 1465 1466 TEST_F(precedence, log_is_fifth_in_any_order) 1467 { 1468 pid_t mypid, parent; 1469 long ret; 1470 1471 mypid = getpid(); 1472 parent = getppid(); 1473 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 1474 ASSERT_EQ(0, ret); 1475 1476 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log); 1477 ASSERT_EQ(0, ret); 1478 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow); 1479 ASSERT_EQ(0, ret); 1480 /* Should work just fine. */ 1481 EXPECT_EQ(parent, syscall(__NR_getppid)); 1482 /* Should also work just fine */ 1483 EXPECT_EQ(mypid, syscall(__NR_getpid)); 1484 } 1485 1486 #ifndef PTRACE_O_TRACESECCOMP 1487 #define PTRACE_O_TRACESECCOMP 0x00000080 1488 #endif 1489 1490 /* Catch the Ubuntu 12.04 value error. */ 1491 #if PTRACE_EVENT_SECCOMP != 7 1492 #undef PTRACE_EVENT_SECCOMP 1493 #endif 1494 1495 #ifndef PTRACE_EVENT_SECCOMP 1496 #define PTRACE_EVENT_SECCOMP 7 1497 #endif 1498 1499 #define PTRACE_EVENT_MASK(status) ((status) >> 16) 1500 bool tracer_running; 1501 void tracer_stop(int sig) 1502 { 1503 tracer_running = false; 1504 } 1505 1506 typedef void tracer_func_t(struct __test_metadata *_metadata, 1507 pid_t tracee, int status, void *args); 1508 1509 void start_tracer(struct __test_metadata *_metadata, int fd, pid_t tracee, 1510 tracer_func_t tracer_func, void *args, bool ptrace_syscall) 1511 { 1512 int ret = -1; 1513 struct sigaction action = { 1514 .sa_handler = tracer_stop, 1515 }; 1516 1517 /* Allow external shutdown. */ 1518 tracer_running = true; 1519 ASSERT_EQ(0, sigaction(SIGUSR1, &action, NULL)); 1520 1521 errno = 0; 1522 while (ret == -1 && errno != EINVAL) 1523 ret = ptrace(PTRACE_ATTACH, tracee, NULL, 0); 1524 ASSERT_EQ(0, ret) { 1525 kill(tracee, SIGKILL); 1526 } 1527 /* Wait for attach stop */ 1528 wait(NULL); 1529 1530 ret = ptrace(PTRACE_SETOPTIONS, tracee, NULL, ptrace_syscall ? 1531 PTRACE_O_TRACESYSGOOD : 1532 PTRACE_O_TRACESECCOMP); 1533 ASSERT_EQ(0, ret) { 1534 TH_LOG("Failed to set PTRACE_O_TRACESECCOMP"); 1535 kill(tracee, SIGKILL); 1536 } 1537 ret = ptrace(ptrace_syscall ? PTRACE_SYSCALL : PTRACE_CONT, 1538 tracee, NULL, 0); 1539 ASSERT_EQ(0, ret); 1540 1541 /* Unblock the tracee */ 1542 ASSERT_EQ(1, write(fd, "A", 1)); 1543 ASSERT_EQ(0, close(fd)); 1544 1545 /* Run until we're shut down. Must assert to stop execution. */ 1546 while (tracer_running) { 1547 int status; 1548 1549 if (wait(&status) != tracee) 1550 continue; 1551 1552 if (WIFSIGNALED(status)) { 1553 /* Child caught a fatal signal. */ 1554 return; 1555 } 1556 if (WIFEXITED(status)) { 1557 /* Child exited with code. */ 1558 return; 1559 } 1560 1561 /* Check if we got an expected event. */ 1562 ASSERT_EQ(WIFCONTINUED(status), false); 1563 ASSERT_EQ(WIFSTOPPED(status), true); 1564 ASSERT_EQ(WSTOPSIG(status) & SIGTRAP, SIGTRAP) { 1565 TH_LOG("Unexpected WSTOPSIG: %d", WSTOPSIG(status)); 1566 } 1567 1568 tracer_func(_metadata, tracee, status, args); 1569 1570 ret = ptrace(ptrace_syscall ? PTRACE_SYSCALL : PTRACE_CONT, 1571 tracee, NULL, 0); 1572 ASSERT_EQ(0, ret); 1573 } 1574 /* Directly report the status of our test harness results. */ 1575 syscall(__NR_exit, _metadata->passed ? EXIT_SUCCESS : EXIT_FAILURE); 1576 } 1577 1578 /* Common tracer setup/teardown functions. */ 1579 void cont_handler(int num) 1580 { } 1581 pid_t setup_trace_fixture(struct __test_metadata *_metadata, 1582 tracer_func_t func, void *args, bool ptrace_syscall) 1583 { 1584 char sync; 1585 int pipefd[2]; 1586 pid_t tracer_pid; 1587 pid_t tracee = getpid(); 1588 1589 /* Setup a pipe for clean synchronization. */ 1590 ASSERT_EQ(0, pipe(pipefd)); 1591 1592 /* Fork a child which we'll promote to tracer */ 1593 tracer_pid = fork(); 1594 ASSERT_LE(0, tracer_pid); 1595 signal(SIGALRM, cont_handler); 1596 if (tracer_pid == 0) { 1597 close(pipefd[0]); 1598 start_tracer(_metadata, pipefd[1], tracee, func, args, 1599 ptrace_syscall); 1600 syscall(__NR_exit, 0); 1601 } 1602 close(pipefd[1]); 1603 prctl(PR_SET_PTRACER, tracer_pid, 0, 0, 0); 1604 read(pipefd[0], &sync, 1); 1605 close(pipefd[0]); 1606 1607 return tracer_pid; 1608 } 1609 1610 void teardown_trace_fixture(struct __test_metadata *_metadata, 1611 pid_t tracer) 1612 { 1613 if (tracer) { 1614 int status; 1615 /* 1616 * Extract the exit code from the other process and 1617 * adopt it for ourselves in case its asserts failed. 1618 */ 1619 ASSERT_EQ(0, kill(tracer, SIGUSR1)); 1620 ASSERT_EQ(tracer, waitpid(tracer, &status, 0)); 1621 if (WEXITSTATUS(status)) 1622 _metadata->passed = 0; 1623 } 1624 } 1625 1626 /* "poke" tracer arguments and function. */ 1627 struct tracer_args_poke_t { 1628 unsigned long poke_addr; 1629 }; 1630 1631 void tracer_poke(struct __test_metadata *_metadata, pid_t tracee, int status, 1632 void *args) 1633 { 1634 int ret; 1635 unsigned long msg; 1636 struct tracer_args_poke_t *info = (struct tracer_args_poke_t *)args; 1637 1638 ret = ptrace(PTRACE_GETEVENTMSG, tracee, NULL, &msg); 1639 EXPECT_EQ(0, ret); 1640 /* If this fails, don't try to recover. */ 1641 ASSERT_EQ(0x1001, msg) { 1642 kill(tracee, SIGKILL); 1643 } 1644 /* 1645 * Poke in the message. 1646 * Registers are not touched to try to keep this relatively arch 1647 * agnostic. 1648 */ 1649 ret = ptrace(PTRACE_POKEDATA, tracee, info->poke_addr, 0x1001); 1650 EXPECT_EQ(0, ret); 1651 } 1652 1653 FIXTURE(TRACE_poke) { 1654 struct sock_fprog prog; 1655 pid_t tracer; 1656 long poked; 1657 struct tracer_args_poke_t tracer_args; 1658 }; 1659 1660 FIXTURE_SETUP(TRACE_poke) 1661 { 1662 struct sock_filter filter[] = { 1663 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, 1664 offsetof(struct seccomp_data, nr)), 1665 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_read, 0, 1), 1666 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1001), 1667 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 1668 }; 1669 1670 self->poked = 0; 1671 memset(&self->prog, 0, sizeof(self->prog)); 1672 self->prog.filter = malloc(sizeof(filter)); 1673 ASSERT_NE(NULL, self->prog.filter); 1674 memcpy(self->prog.filter, filter, sizeof(filter)); 1675 self->prog.len = (unsigned short)ARRAY_SIZE(filter); 1676 1677 /* Set up tracer args. */ 1678 self->tracer_args.poke_addr = (unsigned long)&self->poked; 1679 1680 /* Launch tracer. */ 1681 self->tracer = setup_trace_fixture(_metadata, tracer_poke, 1682 &self->tracer_args, false); 1683 } 1684 1685 FIXTURE_TEARDOWN(TRACE_poke) 1686 { 1687 teardown_trace_fixture(_metadata, self->tracer); 1688 if (self->prog.filter) 1689 free(self->prog.filter); 1690 } 1691 1692 TEST_F(TRACE_poke, read_has_side_effects) 1693 { 1694 ssize_t ret; 1695 1696 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 1697 ASSERT_EQ(0, ret); 1698 1699 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0); 1700 ASSERT_EQ(0, ret); 1701 1702 EXPECT_EQ(0, self->poked); 1703 ret = read(-1, NULL, 0); 1704 EXPECT_EQ(-1, ret); 1705 EXPECT_EQ(0x1001, self->poked); 1706 } 1707 1708 TEST_F(TRACE_poke, getpid_runs_normally) 1709 { 1710 long ret; 1711 1712 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 1713 ASSERT_EQ(0, ret); 1714 1715 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0); 1716 ASSERT_EQ(0, ret); 1717 1718 EXPECT_EQ(0, self->poked); 1719 EXPECT_NE(0, syscall(__NR_getpid)); 1720 EXPECT_EQ(0, self->poked); 1721 } 1722 1723 #if defined(__x86_64__) 1724 # define ARCH_REGS struct user_regs_struct 1725 # define SYSCALL_NUM(_regs) (_regs).orig_rax 1726 # define SYSCALL_RET(_regs) (_regs).rax 1727 #elif defined(__i386__) 1728 # define ARCH_REGS struct user_regs_struct 1729 # define SYSCALL_NUM(_regs) (_regs).orig_eax 1730 # define SYSCALL_RET(_regs) (_regs).eax 1731 #elif defined(__arm__) 1732 # define ARCH_REGS struct pt_regs 1733 # define SYSCALL_NUM(_regs) (_regs).ARM_r7 1734 # ifndef PTRACE_SET_SYSCALL 1735 # define PTRACE_SET_SYSCALL 23 1736 # endif 1737 # define SYSCALL_NUM_SET(_regs, _nr) \ 1738 EXPECT_EQ(0, ptrace(PTRACE_SET_SYSCALL, tracee, NULL, _nr)) 1739 # define SYSCALL_RET(_regs) (_regs).ARM_r0 1740 #elif defined(__aarch64__) 1741 # define ARCH_REGS struct user_pt_regs 1742 # define SYSCALL_NUM(_regs) (_regs).regs[8] 1743 # ifndef NT_ARM_SYSTEM_CALL 1744 # define NT_ARM_SYSTEM_CALL 0x404 1745 # endif 1746 # define SYSCALL_NUM_SET(_regs, _nr) \ 1747 do { \ 1748 struct iovec __v; \ 1749 typeof(_nr) __nr = (_nr); \ 1750 __v.iov_base = &__nr; \ 1751 __v.iov_len = sizeof(__nr); \ 1752 EXPECT_EQ(0, ptrace(PTRACE_SETREGSET, tracee, \ 1753 NT_ARM_SYSTEM_CALL, &__v)); \ 1754 } while (0) 1755 # define SYSCALL_RET(_regs) (_regs).regs[0] 1756 #elif defined(__riscv) && __riscv_xlen == 64 1757 # define ARCH_REGS struct user_regs_struct 1758 # define SYSCALL_NUM(_regs) (_regs).a7 1759 # define SYSCALL_RET(_regs) (_regs).a0 1760 #elif defined(__csky__) 1761 # define ARCH_REGS struct pt_regs 1762 # if defined(__CSKYABIV2__) 1763 # define SYSCALL_NUM(_regs) (_regs).regs[3] 1764 # else 1765 # define SYSCALL_NUM(_regs) (_regs).regs[9] 1766 # endif 1767 # define SYSCALL_RET(_regs) (_regs).a0 1768 #elif defined(__hppa__) 1769 # define ARCH_REGS struct user_regs_struct 1770 # define SYSCALL_NUM(_regs) (_regs).gr[20] 1771 # define SYSCALL_RET(_regs) (_regs).gr[28] 1772 #elif defined(__powerpc__) 1773 # define ARCH_REGS struct pt_regs 1774 # define SYSCALL_NUM(_regs) (_regs).gpr[0] 1775 # define SYSCALL_RET(_regs) (_regs).gpr[3] 1776 # define SYSCALL_RET_SET(_regs, _val) \ 1777 do { \ 1778 typeof(_val) _result = (_val); \ 1779 if ((_regs.trap & 0xfff0) == 0x3000) { \ 1780 /* \ 1781 * scv 0 system call uses -ve result \ 1782 * for error, so no need to adjust. \ 1783 */ \ 1784 SYSCALL_RET(_regs) = _result; \ 1785 } else { \ 1786 /* \ 1787 * A syscall error is signaled by the \ 1788 * CR0 SO bit and the code is stored as \ 1789 * a positive value. \ 1790 */ \ 1791 if (_result < 0) { \ 1792 SYSCALL_RET(_regs) = -_result; \ 1793 (_regs).ccr |= 0x10000000; \ 1794 } else { \ 1795 SYSCALL_RET(_regs) = _result; \ 1796 (_regs).ccr &= ~0x10000000; \ 1797 } \ 1798 } \ 1799 } while (0) 1800 # define SYSCALL_RET_SET_ON_PTRACE_EXIT 1801 #elif defined(__s390__) 1802 # define ARCH_REGS s390_regs 1803 # define SYSCALL_NUM(_regs) (_regs).gprs[2] 1804 # define SYSCALL_RET_SET(_regs, _val) \ 1805 TH_LOG("Can't modify syscall return on this architecture") 1806 #elif defined(__mips__) 1807 # include <asm/unistd_nr_n32.h> 1808 # include <asm/unistd_nr_n64.h> 1809 # include <asm/unistd_nr_o32.h> 1810 # define ARCH_REGS struct pt_regs 1811 # define SYSCALL_NUM(_regs) \ 1812 ({ \ 1813 typeof((_regs).regs[2]) _nr; \ 1814 if ((_regs).regs[2] == __NR_O32_Linux) \ 1815 _nr = (_regs).regs[4]; \ 1816 else \ 1817 _nr = (_regs).regs[2]; \ 1818 _nr; \ 1819 }) 1820 # define SYSCALL_NUM_SET(_regs, _nr) \ 1821 do { \ 1822 if ((_regs).regs[2] == __NR_O32_Linux) \ 1823 (_regs).regs[4] = _nr; \ 1824 else \ 1825 (_regs).regs[2] = _nr; \ 1826 } while (0) 1827 # define SYSCALL_RET_SET(_regs, _val) \ 1828 TH_LOG("Can't modify syscall return on this architecture") 1829 #elif defined(__xtensa__) 1830 # define ARCH_REGS struct user_pt_regs 1831 # define SYSCALL_NUM(_regs) (_regs).syscall 1832 /* 1833 * On xtensa syscall return value is in the register 1834 * a2 of the current window which is not fixed. 1835 */ 1836 #define SYSCALL_RET(_regs) (_regs).a[(_regs).windowbase * 4 + 2] 1837 #elif defined(__sh__) 1838 # define ARCH_REGS struct pt_regs 1839 # define SYSCALL_NUM(_regs) (_regs).regs[3] 1840 # define SYSCALL_RET(_regs) (_regs).regs[0] 1841 #else 1842 # error "Do not know how to find your architecture's registers and syscalls" 1843 #endif 1844 1845 /* 1846 * Most architectures can change the syscall by just updating the 1847 * associated register. This is the default if not defined above. 1848 */ 1849 #ifndef SYSCALL_NUM_SET 1850 # define SYSCALL_NUM_SET(_regs, _nr) \ 1851 do { \ 1852 SYSCALL_NUM(_regs) = (_nr); \ 1853 } while (0) 1854 #endif 1855 /* 1856 * Most architectures can change the syscall return value by just 1857 * writing to the SYSCALL_RET register. This is the default if not 1858 * defined above. If an architecture cannot set the return value 1859 * (for example when the syscall and return value register is 1860 * shared), report it with TH_LOG() in an arch-specific definition 1861 * of SYSCALL_RET_SET() above, and leave SYSCALL_RET undefined. 1862 */ 1863 #if !defined(SYSCALL_RET) && !defined(SYSCALL_RET_SET) 1864 # error "One of SYSCALL_RET or SYSCALL_RET_SET is needed for this arch" 1865 #endif 1866 #ifndef SYSCALL_RET_SET 1867 # define SYSCALL_RET_SET(_regs, _val) \ 1868 do { \ 1869 SYSCALL_RET(_regs) = (_val); \ 1870 } while (0) 1871 #endif 1872 1873 /* When the syscall return can't be changed, stub out the tests for it. */ 1874 #ifndef SYSCALL_RET 1875 # define EXPECT_SYSCALL_RETURN(val, action) EXPECT_EQ(-1, action) 1876 #else 1877 # define EXPECT_SYSCALL_RETURN(val, action) \ 1878 do { \ 1879 errno = 0; \ 1880 if (val < 0) { \ 1881 EXPECT_EQ(-1, action); \ 1882 EXPECT_EQ(-(val), errno); \ 1883 } else { \ 1884 EXPECT_EQ(val, action); \ 1885 } \ 1886 } while (0) 1887 #endif 1888 1889 /* 1890 * Some architectures (e.g. powerpc) can only set syscall 1891 * return values on syscall exit during ptrace. 1892 */ 1893 const bool ptrace_entry_set_syscall_nr = true; 1894 const bool ptrace_entry_set_syscall_ret = 1895 #ifndef SYSCALL_RET_SET_ON_PTRACE_EXIT 1896 true; 1897 #else 1898 false; 1899 #endif 1900 1901 /* 1902 * Use PTRACE_GETREGS and PTRACE_SETREGS when available. This is useful for 1903 * architectures without HAVE_ARCH_TRACEHOOK (e.g. User-mode Linux). 1904 */ 1905 #if defined(__x86_64__) || defined(__i386__) || defined(__mips__) 1906 # define ARCH_GETREGS(_regs) ptrace(PTRACE_GETREGS, tracee, 0, &(_regs)) 1907 # define ARCH_SETREGS(_regs) ptrace(PTRACE_SETREGS, tracee, 0, &(_regs)) 1908 #else 1909 # define ARCH_GETREGS(_regs) ({ \ 1910 struct iovec __v; \ 1911 __v.iov_base = &(_regs); \ 1912 __v.iov_len = sizeof(_regs); \ 1913 ptrace(PTRACE_GETREGSET, tracee, NT_PRSTATUS, &__v); \ 1914 }) 1915 # define ARCH_SETREGS(_regs) ({ \ 1916 struct iovec __v; \ 1917 __v.iov_base = &(_regs); \ 1918 __v.iov_len = sizeof(_regs); \ 1919 ptrace(PTRACE_SETREGSET, tracee, NT_PRSTATUS, &__v); \ 1920 }) 1921 #endif 1922 1923 /* Architecture-specific syscall fetching routine. */ 1924 int get_syscall(struct __test_metadata *_metadata, pid_t tracee) 1925 { 1926 ARCH_REGS regs; 1927 1928 EXPECT_EQ(0, ARCH_GETREGS(regs)) { 1929 return -1; 1930 } 1931 1932 return SYSCALL_NUM(regs); 1933 } 1934 1935 /* Architecture-specific syscall changing routine. */ 1936 void __change_syscall(struct __test_metadata *_metadata, 1937 pid_t tracee, long *syscall, long *ret) 1938 { 1939 ARCH_REGS orig, regs; 1940 1941 /* Do not get/set registers if we have nothing to do. */ 1942 if (!syscall && !ret) 1943 return; 1944 1945 EXPECT_EQ(0, ARCH_GETREGS(regs)) { 1946 return; 1947 } 1948 orig = regs; 1949 1950 if (syscall) 1951 SYSCALL_NUM_SET(regs, *syscall); 1952 1953 if (ret) 1954 SYSCALL_RET_SET(regs, *ret); 1955 1956 /* Flush any register changes made. */ 1957 if (memcmp(&orig, ®s, sizeof(orig)) != 0) 1958 EXPECT_EQ(0, ARCH_SETREGS(regs)); 1959 } 1960 1961 /* Change only syscall number. */ 1962 void change_syscall_nr(struct __test_metadata *_metadata, 1963 pid_t tracee, long syscall) 1964 { 1965 __change_syscall(_metadata, tracee, &syscall, NULL); 1966 } 1967 1968 /* Change syscall return value (and set syscall number to -1). */ 1969 void change_syscall_ret(struct __test_metadata *_metadata, 1970 pid_t tracee, long ret) 1971 { 1972 long syscall = -1; 1973 1974 __change_syscall(_metadata, tracee, &syscall, &ret); 1975 } 1976 1977 void tracer_seccomp(struct __test_metadata *_metadata, pid_t tracee, 1978 int status, void *args) 1979 { 1980 int ret; 1981 unsigned long msg; 1982 1983 EXPECT_EQ(PTRACE_EVENT_MASK(status), PTRACE_EVENT_SECCOMP) { 1984 TH_LOG("Unexpected ptrace event: %d", PTRACE_EVENT_MASK(status)); 1985 return; 1986 } 1987 1988 /* Make sure we got the right message. */ 1989 ret = ptrace(PTRACE_GETEVENTMSG, tracee, NULL, &msg); 1990 EXPECT_EQ(0, ret); 1991 1992 /* Validate and take action on expected syscalls. */ 1993 switch (msg) { 1994 case 0x1002: 1995 /* change getpid to getppid. */ 1996 EXPECT_EQ(__NR_getpid, get_syscall(_metadata, tracee)); 1997 change_syscall_nr(_metadata, tracee, __NR_getppid); 1998 break; 1999 case 0x1003: 2000 /* skip gettid with valid return code. */ 2001 EXPECT_EQ(__NR_gettid, get_syscall(_metadata, tracee)); 2002 change_syscall_ret(_metadata, tracee, 45000); 2003 break; 2004 case 0x1004: 2005 /* skip openat with error. */ 2006 EXPECT_EQ(__NR_openat, get_syscall(_metadata, tracee)); 2007 change_syscall_ret(_metadata, tracee, -ESRCH); 2008 break; 2009 case 0x1005: 2010 /* do nothing (allow getppid) */ 2011 EXPECT_EQ(__NR_getppid, get_syscall(_metadata, tracee)); 2012 break; 2013 default: 2014 EXPECT_EQ(0, msg) { 2015 TH_LOG("Unknown PTRACE_GETEVENTMSG: 0x%lx", msg); 2016 kill(tracee, SIGKILL); 2017 } 2018 } 2019 2020 } 2021 2022 FIXTURE(TRACE_syscall) { 2023 struct sock_fprog prog; 2024 pid_t tracer, mytid, mypid, parent; 2025 long syscall_nr; 2026 }; 2027 2028 void tracer_ptrace(struct __test_metadata *_metadata, pid_t tracee, 2029 int status, void *args) 2030 { 2031 int ret; 2032 unsigned long msg; 2033 static bool entry; 2034 long syscall_nr_val, syscall_ret_val; 2035 long *syscall_nr = NULL, *syscall_ret = NULL; 2036 FIXTURE_DATA(TRACE_syscall) *self = args; 2037 2038 EXPECT_EQ(WSTOPSIG(status) & 0x80, 0x80) { 2039 TH_LOG("Unexpected WSTOPSIG: %d", WSTOPSIG(status)); 2040 return; 2041 } 2042 2043 /* 2044 * The traditional way to tell PTRACE_SYSCALL entry/exit 2045 * is by counting. 2046 */ 2047 entry = !entry; 2048 2049 /* Make sure we got an appropriate message. */ 2050 ret = ptrace(PTRACE_GETEVENTMSG, tracee, NULL, &msg); 2051 EXPECT_EQ(0, ret); 2052 EXPECT_EQ(entry ? PTRACE_EVENTMSG_SYSCALL_ENTRY 2053 : PTRACE_EVENTMSG_SYSCALL_EXIT, msg); 2054 2055 /* 2056 * Some architectures only support setting return values during 2057 * syscall exit under ptrace, and on exit the syscall number may 2058 * no longer be available. Therefore, save the initial sycall 2059 * number here, so it can be examined during both entry and exit 2060 * phases. 2061 */ 2062 if (entry) 2063 self->syscall_nr = get_syscall(_metadata, tracee); 2064 2065 /* 2066 * Depending on the architecture's syscall setting abilities, we 2067 * pick which things to set during this phase (entry or exit). 2068 */ 2069 if (entry == ptrace_entry_set_syscall_nr) 2070 syscall_nr = &syscall_nr_val; 2071 if (entry == ptrace_entry_set_syscall_ret) 2072 syscall_ret = &syscall_ret_val; 2073 2074 /* Now handle the actual rewriting cases. */ 2075 switch (self->syscall_nr) { 2076 case __NR_getpid: 2077 syscall_nr_val = __NR_getppid; 2078 /* Never change syscall return for this case. */ 2079 syscall_ret = NULL; 2080 break; 2081 case __NR_gettid: 2082 syscall_nr_val = -1; 2083 syscall_ret_val = 45000; 2084 break; 2085 case __NR_openat: 2086 syscall_nr_val = -1; 2087 syscall_ret_val = -ESRCH; 2088 break; 2089 default: 2090 /* Unhandled, do nothing. */ 2091 return; 2092 } 2093 2094 __change_syscall(_metadata, tracee, syscall_nr, syscall_ret); 2095 } 2096 2097 FIXTURE_VARIANT(TRACE_syscall) { 2098 /* 2099 * All of the SECCOMP_RET_TRACE behaviors can be tested with either 2100 * SECCOMP_RET_TRACE+PTRACE_CONT or plain ptrace()+PTRACE_SYSCALL. 2101 * This indicates if we should use SECCOMP_RET_TRACE (false), or 2102 * ptrace (true). 2103 */ 2104 bool use_ptrace; 2105 }; 2106 2107 FIXTURE_VARIANT_ADD(TRACE_syscall, ptrace) { 2108 .use_ptrace = true, 2109 }; 2110 2111 FIXTURE_VARIANT_ADD(TRACE_syscall, seccomp) { 2112 .use_ptrace = false, 2113 }; 2114 2115 FIXTURE_SETUP(TRACE_syscall) 2116 { 2117 struct sock_filter filter[] = { 2118 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, 2119 offsetof(struct seccomp_data, nr)), 2120 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 0, 1), 2121 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1002), 2122 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_gettid, 0, 1), 2123 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1003), 2124 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_openat, 0, 1), 2125 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1004), 2126 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getppid, 0, 1), 2127 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1005), 2128 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 2129 }; 2130 struct sock_fprog prog = { 2131 .len = (unsigned short)ARRAY_SIZE(filter), 2132 .filter = filter, 2133 }; 2134 long ret; 2135 2136 /* Prepare some testable syscall results. */ 2137 self->mytid = syscall(__NR_gettid); 2138 ASSERT_GT(self->mytid, 0); 2139 ASSERT_NE(self->mytid, 1) { 2140 TH_LOG("Running this test as init is not supported. :)"); 2141 } 2142 2143 self->mypid = getpid(); 2144 ASSERT_GT(self->mypid, 0); 2145 ASSERT_EQ(self->mytid, self->mypid); 2146 2147 self->parent = getppid(); 2148 ASSERT_GT(self->parent, 0); 2149 ASSERT_NE(self->parent, self->mypid); 2150 2151 /* Launch tracer. */ 2152 self->tracer = setup_trace_fixture(_metadata, 2153 variant->use_ptrace ? tracer_ptrace 2154 : tracer_seccomp, 2155 self, variant->use_ptrace); 2156 2157 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 2158 ASSERT_EQ(0, ret); 2159 2160 /* Do not install seccomp rewrite filters, as we'll use ptrace instead. */ 2161 if (variant->use_ptrace) 2162 return; 2163 2164 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0); 2165 ASSERT_EQ(0, ret); 2166 } 2167 2168 FIXTURE_TEARDOWN(TRACE_syscall) 2169 { 2170 teardown_trace_fixture(_metadata, self->tracer); 2171 } 2172 2173 TEST(negative_ENOSYS) 2174 { 2175 /* 2176 * There should be no difference between an "internal" skip 2177 * and userspace asking for syscall "-1". 2178 */ 2179 errno = 0; 2180 EXPECT_EQ(-1, syscall(-1)); 2181 EXPECT_EQ(errno, ENOSYS); 2182 /* And no difference for "still not valid but not -1". */ 2183 errno = 0; 2184 EXPECT_EQ(-1, syscall(-101)); 2185 EXPECT_EQ(errno, ENOSYS); 2186 } 2187 2188 TEST_F(TRACE_syscall, negative_ENOSYS) 2189 { 2190 negative_ENOSYS(_metadata); 2191 } 2192 2193 TEST_F(TRACE_syscall, syscall_allowed) 2194 { 2195 /* getppid works as expected (no changes). */ 2196 EXPECT_EQ(self->parent, syscall(__NR_getppid)); 2197 EXPECT_NE(self->mypid, syscall(__NR_getppid)); 2198 } 2199 2200 TEST_F(TRACE_syscall, syscall_redirected) 2201 { 2202 /* getpid has been redirected to getppid as expected. */ 2203 EXPECT_EQ(self->parent, syscall(__NR_getpid)); 2204 EXPECT_NE(self->mypid, syscall(__NR_getpid)); 2205 } 2206 2207 TEST_F(TRACE_syscall, syscall_errno) 2208 { 2209 /* Tracer should skip the open syscall, resulting in ESRCH. */ 2210 EXPECT_SYSCALL_RETURN(-ESRCH, syscall(__NR_openat)); 2211 } 2212 2213 TEST_F(TRACE_syscall, syscall_faked) 2214 { 2215 /* Tracer skips the gettid syscall and store altered return value. */ 2216 EXPECT_SYSCALL_RETURN(45000, syscall(__NR_gettid)); 2217 } 2218 2219 TEST_F_SIGNAL(TRACE_syscall, kill_immediate, SIGSYS) 2220 { 2221 struct sock_filter filter[] = { 2222 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, 2223 offsetof(struct seccomp_data, nr)), 2224 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_mknodat, 0, 1), 2225 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL_THREAD), 2226 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 2227 }; 2228 struct sock_fprog prog = { 2229 .len = (unsigned short)ARRAY_SIZE(filter), 2230 .filter = filter, 2231 }; 2232 long ret; 2233 2234 /* Install "kill on mknodat" filter. */ 2235 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0); 2236 ASSERT_EQ(0, ret); 2237 2238 /* This should immediately die with SIGSYS, regardless of tracer. */ 2239 EXPECT_EQ(-1, syscall(__NR_mknodat, -1, NULL, 0, 0)); 2240 } 2241 2242 TEST_F(TRACE_syscall, skip_after) 2243 { 2244 struct sock_filter filter[] = { 2245 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, 2246 offsetof(struct seccomp_data, nr)), 2247 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getppid, 0, 1), 2248 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ERRNO | EPERM), 2249 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 2250 }; 2251 struct sock_fprog prog = { 2252 .len = (unsigned short)ARRAY_SIZE(filter), 2253 .filter = filter, 2254 }; 2255 long ret; 2256 2257 /* Install additional "errno on getppid" filter. */ 2258 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0); 2259 ASSERT_EQ(0, ret); 2260 2261 /* Tracer will redirect getpid to getppid, and we should see EPERM. */ 2262 errno = 0; 2263 EXPECT_EQ(-1, syscall(__NR_getpid)); 2264 EXPECT_EQ(EPERM, errno); 2265 } 2266 2267 TEST_F_SIGNAL(TRACE_syscall, kill_after, SIGSYS) 2268 { 2269 struct sock_filter filter[] = { 2270 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, 2271 offsetof(struct seccomp_data, nr)), 2272 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getppid, 0, 1), 2273 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL), 2274 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 2275 }; 2276 struct sock_fprog prog = { 2277 .len = (unsigned short)ARRAY_SIZE(filter), 2278 .filter = filter, 2279 }; 2280 long ret; 2281 2282 /* Install additional "death on getppid" filter. */ 2283 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0); 2284 ASSERT_EQ(0, ret); 2285 2286 /* Tracer will redirect getpid to getppid, and we should die. */ 2287 EXPECT_NE(self->mypid, syscall(__NR_getpid)); 2288 } 2289 2290 TEST(seccomp_syscall) 2291 { 2292 struct sock_filter filter[] = { 2293 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 2294 }; 2295 struct sock_fprog prog = { 2296 .len = (unsigned short)ARRAY_SIZE(filter), 2297 .filter = filter, 2298 }; 2299 long ret; 2300 2301 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 2302 ASSERT_EQ(0, ret) { 2303 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 2304 } 2305 2306 /* Reject insane operation. */ 2307 ret = seccomp(-1, 0, &prog); 2308 ASSERT_NE(ENOSYS, errno) { 2309 TH_LOG("Kernel does not support seccomp syscall!"); 2310 } 2311 EXPECT_EQ(EINVAL, errno) { 2312 TH_LOG("Did not reject crazy op value!"); 2313 } 2314 2315 /* Reject strict with flags or pointer. */ 2316 ret = seccomp(SECCOMP_SET_MODE_STRICT, -1, NULL); 2317 EXPECT_EQ(EINVAL, errno) { 2318 TH_LOG("Did not reject mode strict with flags!"); 2319 } 2320 ret = seccomp(SECCOMP_SET_MODE_STRICT, 0, &prog); 2321 EXPECT_EQ(EINVAL, errno) { 2322 TH_LOG("Did not reject mode strict with uargs!"); 2323 } 2324 2325 /* Reject insane args for filter. */ 2326 ret = seccomp(SECCOMP_SET_MODE_FILTER, -1, &prog); 2327 EXPECT_EQ(EINVAL, errno) { 2328 TH_LOG("Did not reject crazy filter flags!"); 2329 } 2330 ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, NULL); 2331 EXPECT_EQ(EFAULT, errno) { 2332 TH_LOG("Did not reject NULL filter!"); 2333 } 2334 2335 ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog); 2336 EXPECT_EQ(0, errno) { 2337 TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER: %s", 2338 strerror(errno)); 2339 } 2340 } 2341 2342 TEST(seccomp_syscall_mode_lock) 2343 { 2344 struct sock_filter filter[] = { 2345 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 2346 }; 2347 struct sock_fprog prog = { 2348 .len = (unsigned short)ARRAY_SIZE(filter), 2349 .filter = filter, 2350 }; 2351 long ret; 2352 2353 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, NULL, 0, 0); 2354 ASSERT_EQ(0, ret) { 2355 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 2356 } 2357 2358 ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog); 2359 ASSERT_NE(ENOSYS, errno) { 2360 TH_LOG("Kernel does not support seccomp syscall!"); 2361 } 2362 EXPECT_EQ(0, ret) { 2363 TH_LOG("Could not install filter!"); 2364 } 2365 2366 /* Make sure neither entry point will switch to strict. */ 2367 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_STRICT, 0, 0, 0); 2368 EXPECT_EQ(EINVAL, errno) { 2369 TH_LOG("Switched to mode strict!"); 2370 } 2371 2372 ret = seccomp(SECCOMP_SET_MODE_STRICT, 0, NULL); 2373 EXPECT_EQ(EINVAL, errno) { 2374 TH_LOG("Switched to mode strict!"); 2375 } 2376 } 2377 2378 /* 2379 * Test detection of known and unknown filter flags. Userspace needs to be able 2380 * to check if a filter flag is supported by the current kernel and a good way 2381 * of doing that is by attempting to enter filter mode, with the flag bit in 2382 * question set, and a NULL pointer for the _args_ parameter. EFAULT indicates 2383 * that the flag is valid and EINVAL indicates that the flag is invalid. 2384 */ 2385 TEST(detect_seccomp_filter_flags) 2386 { 2387 unsigned int flags[] = { SECCOMP_FILTER_FLAG_TSYNC, 2388 SECCOMP_FILTER_FLAG_LOG, 2389 SECCOMP_FILTER_FLAG_SPEC_ALLOW, 2390 SECCOMP_FILTER_FLAG_NEW_LISTENER, 2391 SECCOMP_FILTER_FLAG_TSYNC_ESRCH }; 2392 unsigned int exclusive[] = { 2393 SECCOMP_FILTER_FLAG_TSYNC, 2394 SECCOMP_FILTER_FLAG_NEW_LISTENER }; 2395 unsigned int flag, all_flags, exclusive_mask; 2396 int i; 2397 long ret; 2398 2399 /* Test detection of individual known-good filter flags */ 2400 for (i = 0, all_flags = 0; i < ARRAY_SIZE(flags); i++) { 2401 int bits = 0; 2402 2403 flag = flags[i]; 2404 /* Make sure the flag is a single bit! */ 2405 while (flag) { 2406 if (flag & 0x1) 2407 bits ++; 2408 flag >>= 1; 2409 } 2410 ASSERT_EQ(1, bits); 2411 flag = flags[i]; 2412 2413 ret = seccomp(SECCOMP_SET_MODE_FILTER, flag, NULL); 2414 ASSERT_NE(ENOSYS, errno) { 2415 TH_LOG("Kernel does not support seccomp syscall!"); 2416 } 2417 EXPECT_EQ(-1, ret); 2418 EXPECT_EQ(EFAULT, errno) { 2419 TH_LOG("Failed to detect that a known-good filter flag (0x%X) is supported!", 2420 flag); 2421 } 2422 2423 all_flags |= flag; 2424 } 2425 2426 /* 2427 * Test detection of all known-good filter flags combined. But 2428 * for the exclusive flags we need to mask them out and try them 2429 * individually for the "all flags" testing. 2430 */ 2431 exclusive_mask = 0; 2432 for (i = 0; i < ARRAY_SIZE(exclusive); i++) 2433 exclusive_mask |= exclusive[i]; 2434 for (i = 0; i < ARRAY_SIZE(exclusive); i++) { 2435 flag = all_flags & ~exclusive_mask; 2436 flag |= exclusive[i]; 2437 2438 ret = seccomp(SECCOMP_SET_MODE_FILTER, flag, NULL); 2439 EXPECT_EQ(-1, ret); 2440 EXPECT_EQ(EFAULT, errno) { 2441 TH_LOG("Failed to detect that all known-good filter flags (0x%X) are supported!", 2442 flag); 2443 } 2444 } 2445 2446 /* Test detection of an unknown filter flags, without exclusives. */ 2447 flag = -1; 2448 flag &= ~exclusive_mask; 2449 ret = seccomp(SECCOMP_SET_MODE_FILTER, flag, NULL); 2450 EXPECT_EQ(-1, ret); 2451 EXPECT_EQ(EINVAL, errno) { 2452 TH_LOG("Failed to detect that an unknown filter flag (0x%X) is unsupported!", 2453 flag); 2454 } 2455 2456 /* 2457 * Test detection of an unknown filter flag that may simply need to be 2458 * added to this test 2459 */ 2460 flag = flags[ARRAY_SIZE(flags) - 1] << 1; 2461 ret = seccomp(SECCOMP_SET_MODE_FILTER, flag, NULL); 2462 EXPECT_EQ(-1, ret); 2463 EXPECT_EQ(EINVAL, errno) { 2464 TH_LOG("Failed to detect that an unknown filter flag (0x%X) is unsupported! Does a new flag need to be added to this test?", 2465 flag); 2466 } 2467 } 2468 2469 TEST(TSYNC_first) 2470 { 2471 struct sock_filter filter[] = { 2472 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 2473 }; 2474 struct sock_fprog prog = { 2475 .len = (unsigned short)ARRAY_SIZE(filter), 2476 .filter = filter, 2477 }; 2478 long ret; 2479 2480 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, NULL, 0, 0); 2481 ASSERT_EQ(0, ret) { 2482 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 2483 } 2484 2485 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC, 2486 &prog); 2487 ASSERT_NE(ENOSYS, errno) { 2488 TH_LOG("Kernel does not support seccomp syscall!"); 2489 } 2490 EXPECT_EQ(0, ret) { 2491 TH_LOG("Could not install initial filter with TSYNC!"); 2492 } 2493 } 2494 2495 #define TSYNC_SIBLINGS 2 2496 struct tsync_sibling { 2497 pthread_t tid; 2498 pid_t system_tid; 2499 sem_t *started; 2500 pthread_cond_t *cond; 2501 pthread_mutex_t *mutex; 2502 int diverge; 2503 int num_waits; 2504 struct sock_fprog *prog; 2505 struct __test_metadata *metadata; 2506 }; 2507 2508 /* 2509 * To avoid joining joined threads (which is not allowed by Bionic), 2510 * make sure we both successfully join and clear the tid to skip a 2511 * later join attempt during fixture teardown. Any remaining threads 2512 * will be directly killed during teardown. 2513 */ 2514 #define PTHREAD_JOIN(tid, status) \ 2515 do { \ 2516 int _rc = pthread_join(tid, status); \ 2517 if (_rc) { \ 2518 TH_LOG("pthread_join of tid %u failed: %d\n", \ 2519 (unsigned int)tid, _rc); \ 2520 } else { \ 2521 tid = 0; \ 2522 } \ 2523 } while (0) 2524 2525 FIXTURE(TSYNC) { 2526 struct sock_fprog root_prog, apply_prog; 2527 struct tsync_sibling sibling[TSYNC_SIBLINGS]; 2528 sem_t started; 2529 pthread_cond_t cond; 2530 pthread_mutex_t mutex; 2531 int sibling_count; 2532 }; 2533 2534 FIXTURE_SETUP(TSYNC) 2535 { 2536 struct sock_filter root_filter[] = { 2537 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 2538 }; 2539 struct sock_filter apply_filter[] = { 2540 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, 2541 offsetof(struct seccomp_data, nr)), 2542 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_read, 0, 1), 2543 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL), 2544 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 2545 }; 2546 2547 memset(&self->root_prog, 0, sizeof(self->root_prog)); 2548 memset(&self->apply_prog, 0, sizeof(self->apply_prog)); 2549 memset(&self->sibling, 0, sizeof(self->sibling)); 2550 self->root_prog.filter = malloc(sizeof(root_filter)); 2551 ASSERT_NE(NULL, self->root_prog.filter); 2552 memcpy(self->root_prog.filter, &root_filter, sizeof(root_filter)); 2553 self->root_prog.len = (unsigned short)ARRAY_SIZE(root_filter); 2554 2555 self->apply_prog.filter = malloc(sizeof(apply_filter)); 2556 ASSERT_NE(NULL, self->apply_prog.filter); 2557 memcpy(self->apply_prog.filter, &apply_filter, sizeof(apply_filter)); 2558 self->apply_prog.len = (unsigned short)ARRAY_SIZE(apply_filter); 2559 2560 self->sibling_count = 0; 2561 pthread_mutex_init(&self->mutex, NULL); 2562 pthread_cond_init(&self->cond, NULL); 2563 sem_init(&self->started, 0, 0); 2564 self->sibling[0].tid = 0; 2565 self->sibling[0].cond = &self->cond; 2566 self->sibling[0].started = &self->started; 2567 self->sibling[0].mutex = &self->mutex; 2568 self->sibling[0].diverge = 0; 2569 self->sibling[0].num_waits = 1; 2570 self->sibling[0].prog = &self->root_prog; 2571 self->sibling[0].metadata = _metadata; 2572 self->sibling[1].tid = 0; 2573 self->sibling[1].cond = &self->cond; 2574 self->sibling[1].started = &self->started; 2575 self->sibling[1].mutex = &self->mutex; 2576 self->sibling[1].diverge = 0; 2577 self->sibling[1].prog = &self->root_prog; 2578 self->sibling[1].num_waits = 1; 2579 self->sibling[1].metadata = _metadata; 2580 } 2581 2582 FIXTURE_TEARDOWN(TSYNC) 2583 { 2584 int sib = 0; 2585 2586 if (self->root_prog.filter) 2587 free(self->root_prog.filter); 2588 if (self->apply_prog.filter) 2589 free(self->apply_prog.filter); 2590 2591 for ( ; sib < self->sibling_count; ++sib) { 2592 struct tsync_sibling *s = &self->sibling[sib]; 2593 2594 if (!s->tid) 2595 continue; 2596 /* 2597 * If a thread is still running, it may be stuck, so hit 2598 * it over the head really hard. 2599 */ 2600 pthread_kill(s->tid, 9); 2601 } 2602 pthread_mutex_destroy(&self->mutex); 2603 pthread_cond_destroy(&self->cond); 2604 sem_destroy(&self->started); 2605 } 2606 2607 void *tsync_sibling(void *data) 2608 { 2609 long ret = 0; 2610 struct tsync_sibling *me = data; 2611 2612 me->system_tid = syscall(__NR_gettid); 2613 2614 pthread_mutex_lock(me->mutex); 2615 if (me->diverge) { 2616 /* Just re-apply the root prog to fork the tree */ 2617 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, 2618 me->prog, 0, 0); 2619 } 2620 sem_post(me->started); 2621 /* Return outside of started so parent notices failures. */ 2622 if (ret) { 2623 pthread_mutex_unlock(me->mutex); 2624 return (void *)SIBLING_EXIT_FAILURE; 2625 } 2626 do { 2627 pthread_cond_wait(me->cond, me->mutex); 2628 me->num_waits = me->num_waits - 1; 2629 } while (me->num_waits); 2630 pthread_mutex_unlock(me->mutex); 2631 2632 ret = prctl(PR_GET_NO_NEW_PRIVS, 0, 0, 0, 0); 2633 if (!ret) 2634 return (void *)SIBLING_EXIT_NEWPRIVS; 2635 read(-1, NULL, 0); 2636 return (void *)SIBLING_EXIT_UNKILLED; 2637 } 2638 2639 void tsync_start_sibling(struct tsync_sibling *sibling) 2640 { 2641 pthread_create(&sibling->tid, NULL, tsync_sibling, (void *)sibling); 2642 } 2643 2644 TEST_F(TSYNC, siblings_fail_prctl) 2645 { 2646 long ret; 2647 void *status; 2648 struct sock_filter filter[] = { 2649 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, 2650 offsetof(struct seccomp_data, nr)), 2651 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_prctl, 0, 1), 2652 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ERRNO | EINVAL), 2653 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 2654 }; 2655 struct sock_fprog prog = { 2656 .len = (unsigned short)ARRAY_SIZE(filter), 2657 .filter = filter, 2658 }; 2659 2660 ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) { 2661 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 2662 } 2663 2664 /* Check prctl failure detection by requesting sib 0 diverge. */ 2665 ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog); 2666 ASSERT_NE(ENOSYS, errno) { 2667 TH_LOG("Kernel does not support seccomp syscall!"); 2668 } 2669 ASSERT_EQ(0, ret) { 2670 TH_LOG("setting filter failed"); 2671 } 2672 2673 self->sibling[0].diverge = 1; 2674 tsync_start_sibling(&self->sibling[0]); 2675 tsync_start_sibling(&self->sibling[1]); 2676 2677 while (self->sibling_count < TSYNC_SIBLINGS) { 2678 sem_wait(&self->started); 2679 self->sibling_count++; 2680 } 2681 2682 /* Signal the threads to clean up*/ 2683 pthread_mutex_lock(&self->mutex); 2684 ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) { 2685 TH_LOG("cond broadcast non-zero"); 2686 } 2687 pthread_mutex_unlock(&self->mutex); 2688 2689 /* Ensure diverging sibling failed to call prctl. */ 2690 PTHREAD_JOIN(self->sibling[0].tid, &status); 2691 EXPECT_EQ(SIBLING_EXIT_FAILURE, (long)status); 2692 PTHREAD_JOIN(self->sibling[1].tid, &status); 2693 EXPECT_EQ(SIBLING_EXIT_UNKILLED, (long)status); 2694 } 2695 2696 TEST_F(TSYNC, two_siblings_with_ancestor) 2697 { 2698 long ret; 2699 void *status; 2700 2701 ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) { 2702 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 2703 } 2704 2705 ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &self->root_prog); 2706 ASSERT_NE(ENOSYS, errno) { 2707 TH_LOG("Kernel does not support seccomp syscall!"); 2708 } 2709 ASSERT_EQ(0, ret) { 2710 TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER!"); 2711 } 2712 tsync_start_sibling(&self->sibling[0]); 2713 tsync_start_sibling(&self->sibling[1]); 2714 2715 while (self->sibling_count < TSYNC_SIBLINGS) { 2716 sem_wait(&self->started); 2717 self->sibling_count++; 2718 } 2719 2720 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC, 2721 &self->apply_prog); 2722 ASSERT_EQ(0, ret) { 2723 TH_LOG("Could install filter on all threads!"); 2724 } 2725 /* Tell the siblings to test the policy */ 2726 pthread_mutex_lock(&self->mutex); 2727 ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) { 2728 TH_LOG("cond broadcast non-zero"); 2729 } 2730 pthread_mutex_unlock(&self->mutex); 2731 /* Ensure they are both killed and don't exit cleanly. */ 2732 PTHREAD_JOIN(self->sibling[0].tid, &status); 2733 EXPECT_EQ(0x0, (long)status); 2734 PTHREAD_JOIN(self->sibling[1].tid, &status); 2735 EXPECT_EQ(0x0, (long)status); 2736 } 2737 2738 TEST_F(TSYNC, two_sibling_want_nnp) 2739 { 2740 void *status; 2741 2742 /* start siblings before any prctl() operations */ 2743 tsync_start_sibling(&self->sibling[0]); 2744 tsync_start_sibling(&self->sibling[1]); 2745 while (self->sibling_count < TSYNC_SIBLINGS) { 2746 sem_wait(&self->started); 2747 self->sibling_count++; 2748 } 2749 2750 /* Tell the siblings to test no policy */ 2751 pthread_mutex_lock(&self->mutex); 2752 ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) { 2753 TH_LOG("cond broadcast non-zero"); 2754 } 2755 pthread_mutex_unlock(&self->mutex); 2756 2757 /* Ensure they are both upset about lacking nnp. */ 2758 PTHREAD_JOIN(self->sibling[0].tid, &status); 2759 EXPECT_EQ(SIBLING_EXIT_NEWPRIVS, (long)status); 2760 PTHREAD_JOIN(self->sibling[1].tid, &status); 2761 EXPECT_EQ(SIBLING_EXIT_NEWPRIVS, (long)status); 2762 } 2763 2764 TEST_F(TSYNC, two_siblings_with_no_filter) 2765 { 2766 long ret; 2767 void *status; 2768 2769 /* start siblings before any prctl() operations */ 2770 tsync_start_sibling(&self->sibling[0]); 2771 tsync_start_sibling(&self->sibling[1]); 2772 while (self->sibling_count < TSYNC_SIBLINGS) { 2773 sem_wait(&self->started); 2774 self->sibling_count++; 2775 } 2776 2777 ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) { 2778 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 2779 } 2780 2781 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC, 2782 &self->apply_prog); 2783 ASSERT_NE(ENOSYS, errno) { 2784 TH_LOG("Kernel does not support seccomp syscall!"); 2785 } 2786 ASSERT_EQ(0, ret) { 2787 TH_LOG("Could install filter on all threads!"); 2788 } 2789 2790 /* Tell the siblings to test the policy */ 2791 pthread_mutex_lock(&self->mutex); 2792 ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) { 2793 TH_LOG("cond broadcast non-zero"); 2794 } 2795 pthread_mutex_unlock(&self->mutex); 2796 2797 /* Ensure they are both killed and don't exit cleanly. */ 2798 PTHREAD_JOIN(self->sibling[0].tid, &status); 2799 EXPECT_EQ(0x0, (long)status); 2800 PTHREAD_JOIN(self->sibling[1].tid, &status); 2801 EXPECT_EQ(0x0, (long)status); 2802 } 2803 2804 TEST_F(TSYNC, two_siblings_with_one_divergence) 2805 { 2806 long ret; 2807 void *status; 2808 2809 ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) { 2810 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 2811 } 2812 2813 ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &self->root_prog); 2814 ASSERT_NE(ENOSYS, errno) { 2815 TH_LOG("Kernel does not support seccomp syscall!"); 2816 } 2817 ASSERT_EQ(0, ret) { 2818 TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER!"); 2819 } 2820 self->sibling[0].diverge = 1; 2821 tsync_start_sibling(&self->sibling[0]); 2822 tsync_start_sibling(&self->sibling[1]); 2823 2824 while (self->sibling_count < TSYNC_SIBLINGS) { 2825 sem_wait(&self->started); 2826 self->sibling_count++; 2827 } 2828 2829 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC, 2830 &self->apply_prog); 2831 ASSERT_EQ(self->sibling[0].system_tid, ret) { 2832 TH_LOG("Did not fail on diverged sibling."); 2833 } 2834 2835 /* Wake the threads */ 2836 pthread_mutex_lock(&self->mutex); 2837 ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) { 2838 TH_LOG("cond broadcast non-zero"); 2839 } 2840 pthread_mutex_unlock(&self->mutex); 2841 2842 /* Ensure they are both unkilled. */ 2843 PTHREAD_JOIN(self->sibling[0].tid, &status); 2844 EXPECT_EQ(SIBLING_EXIT_UNKILLED, (long)status); 2845 PTHREAD_JOIN(self->sibling[1].tid, &status); 2846 EXPECT_EQ(SIBLING_EXIT_UNKILLED, (long)status); 2847 } 2848 2849 TEST_F(TSYNC, two_siblings_with_one_divergence_no_tid_in_err) 2850 { 2851 long ret, flags; 2852 void *status; 2853 2854 ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) { 2855 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 2856 } 2857 2858 ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &self->root_prog); 2859 ASSERT_NE(ENOSYS, errno) { 2860 TH_LOG("Kernel does not support seccomp syscall!"); 2861 } 2862 ASSERT_EQ(0, ret) { 2863 TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER!"); 2864 } 2865 self->sibling[0].diverge = 1; 2866 tsync_start_sibling(&self->sibling[0]); 2867 tsync_start_sibling(&self->sibling[1]); 2868 2869 while (self->sibling_count < TSYNC_SIBLINGS) { 2870 sem_wait(&self->started); 2871 self->sibling_count++; 2872 } 2873 2874 flags = SECCOMP_FILTER_FLAG_TSYNC | \ 2875 SECCOMP_FILTER_FLAG_TSYNC_ESRCH; 2876 ret = seccomp(SECCOMP_SET_MODE_FILTER, flags, &self->apply_prog); 2877 ASSERT_EQ(ESRCH, errno) { 2878 TH_LOG("Did not return ESRCH for diverged sibling."); 2879 } 2880 ASSERT_EQ(-1, ret) { 2881 TH_LOG("Did not fail on diverged sibling."); 2882 } 2883 2884 /* Wake the threads */ 2885 pthread_mutex_lock(&self->mutex); 2886 ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) { 2887 TH_LOG("cond broadcast non-zero"); 2888 } 2889 pthread_mutex_unlock(&self->mutex); 2890 2891 /* Ensure they are both unkilled. */ 2892 PTHREAD_JOIN(self->sibling[0].tid, &status); 2893 EXPECT_EQ(SIBLING_EXIT_UNKILLED, (long)status); 2894 PTHREAD_JOIN(self->sibling[1].tid, &status); 2895 EXPECT_EQ(SIBLING_EXIT_UNKILLED, (long)status); 2896 } 2897 2898 TEST_F(TSYNC, two_siblings_not_under_filter) 2899 { 2900 long ret, sib; 2901 void *status; 2902 struct timespec delay = { .tv_nsec = 100000000 }; 2903 2904 ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) { 2905 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 2906 } 2907 2908 /* 2909 * Sibling 0 will have its own seccomp policy 2910 * and Sibling 1 will not be under seccomp at 2911 * all. Sibling 1 will enter seccomp and 0 2912 * will cause failure. 2913 */ 2914 self->sibling[0].diverge = 1; 2915 tsync_start_sibling(&self->sibling[0]); 2916 tsync_start_sibling(&self->sibling[1]); 2917 2918 while (self->sibling_count < TSYNC_SIBLINGS) { 2919 sem_wait(&self->started); 2920 self->sibling_count++; 2921 } 2922 2923 ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &self->root_prog); 2924 ASSERT_NE(ENOSYS, errno) { 2925 TH_LOG("Kernel does not support seccomp syscall!"); 2926 } 2927 ASSERT_EQ(0, ret) { 2928 TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER!"); 2929 } 2930 2931 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC, 2932 &self->apply_prog); 2933 ASSERT_EQ(ret, self->sibling[0].system_tid) { 2934 TH_LOG("Did not fail on diverged sibling."); 2935 } 2936 sib = 1; 2937 if (ret == self->sibling[0].system_tid) 2938 sib = 0; 2939 2940 pthread_mutex_lock(&self->mutex); 2941 2942 /* Increment the other siblings num_waits so we can clean up 2943 * the one we just saw. 2944 */ 2945 self->sibling[!sib].num_waits += 1; 2946 2947 /* Signal the thread to clean up*/ 2948 ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) { 2949 TH_LOG("cond broadcast non-zero"); 2950 } 2951 pthread_mutex_unlock(&self->mutex); 2952 PTHREAD_JOIN(self->sibling[sib].tid, &status); 2953 EXPECT_EQ(SIBLING_EXIT_UNKILLED, (long)status); 2954 /* Poll for actual task death. pthread_join doesn't guarantee it. */ 2955 while (!kill(self->sibling[sib].system_tid, 0)) 2956 nanosleep(&delay, NULL); 2957 /* Switch to the remaining sibling */ 2958 sib = !sib; 2959 2960 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC, 2961 &self->apply_prog); 2962 ASSERT_EQ(0, ret) { 2963 TH_LOG("Expected the remaining sibling to sync"); 2964 }; 2965 2966 pthread_mutex_lock(&self->mutex); 2967 2968 /* If remaining sibling didn't have a chance to wake up during 2969 * the first broadcast, manually reduce the num_waits now. 2970 */ 2971 if (self->sibling[sib].num_waits > 1) 2972 self->sibling[sib].num_waits = 1; 2973 ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) { 2974 TH_LOG("cond broadcast non-zero"); 2975 } 2976 pthread_mutex_unlock(&self->mutex); 2977 PTHREAD_JOIN(self->sibling[sib].tid, &status); 2978 EXPECT_EQ(0, (long)status); 2979 /* Poll for actual task death. pthread_join doesn't guarantee it. */ 2980 while (!kill(self->sibling[sib].system_tid, 0)) 2981 nanosleep(&delay, NULL); 2982 2983 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC, 2984 &self->apply_prog); 2985 ASSERT_EQ(0, ret); /* just us chickens */ 2986 } 2987 2988 /* Make sure restarted syscalls are seen directly as "restart_syscall". */ 2989 TEST(syscall_restart) 2990 { 2991 long ret; 2992 unsigned long msg; 2993 pid_t child_pid; 2994 int pipefd[2]; 2995 int status; 2996 siginfo_t info = { }; 2997 struct sock_filter filter[] = { 2998 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, 2999 offsetof(struct seccomp_data, nr)), 3000 3001 #ifdef __NR_sigreturn 3002 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_sigreturn, 7, 0), 3003 #endif 3004 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_read, 6, 0), 3005 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_exit, 5, 0), 3006 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_rt_sigreturn, 4, 0), 3007 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_nanosleep, 5, 0), 3008 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_clock_nanosleep, 4, 0), 3009 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_restart_syscall, 4, 0), 3010 3011 /* Allow __NR_write for easy logging. */ 3012 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_write, 0, 1), 3013 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 3014 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL), 3015 /* The nanosleep jump target. */ 3016 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE|0x100), 3017 /* The restart_syscall jump target. */ 3018 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE|0x200), 3019 }; 3020 struct sock_fprog prog = { 3021 .len = (unsigned short)ARRAY_SIZE(filter), 3022 .filter = filter, 3023 }; 3024 #if defined(__arm__) 3025 struct utsname utsbuf; 3026 #endif 3027 3028 ASSERT_EQ(0, pipe(pipefd)); 3029 3030 child_pid = fork(); 3031 ASSERT_LE(0, child_pid); 3032 if (child_pid == 0) { 3033 /* Child uses EXPECT not ASSERT to deliver status correctly. */ 3034 char buf = ' '; 3035 struct timespec timeout = { }; 3036 3037 /* Attach parent as tracer and stop. */ 3038 EXPECT_EQ(0, ptrace(PTRACE_TRACEME)); 3039 EXPECT_EQ(0, raise(SIGSTOP)); 3040 3041 EXPECT_EQ(0, close(pipefd[1])); 3042 3043 EXPECT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) { 3044 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 3045 } 3046 3047 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0); 3048 EXPECT_EQ(0, ret) { 3049 TH_LOG("Failed to install filter!"); 3050 } 3051 3052 EXPECT_EQ(1, read(pipefd[0], &buf, 1)) { 3053 TH_LOG("Failed to read() sync from parent"); 3054 } 3055 EXPECT_EQ('.', buf) { 3056 TH_LOG("Failed to get sync data from read()"); 3057 } 3058 3059 /* Start nanosleep to be interrupted. */ 3060 timeout.tv_sec = 1; 3061 errno = 0; 3062 EXPECT_EQ(0, nanosleep(&timeout, NULL)) { 3063 TH_LOG("Call to nanosleep() failed (errno %d)", errno); 3064 } 3065 3066 /* Read final sync from parent. */ 3067 EXPECT_EQ(1, read(pipefd[0], &buf, 1)) { 3068 TH_LOG("Failed final read() from parent"); 3069 } 3070 EXPECT_EQ('!', buf) { 3071 TH_LOG("Failed to get final data from read()"); 3072 } 3073 3074 /* Directly report the status of our test harness results. */ 3075 syscall(__NR_exit, _metadata->passed ? EXIT_SUCCESS 3076 : EXIT_FAILURE); 3077 } 3078 EXPECT_EQ(0, close(pipefd[0])); 3079 3080 /* Attach to child, setup options, and release. */ 3081 ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0)); 3082 ASSERT_EQ(true, WIFSTOPPED(status)); 3083 ASSERT_EQ(0, ptrace(PTRACE_SETOPTIONS, child_pid, NULL, 3084 PTRACE_O_TRACESECCOMP)); 3085 ASSERT_EQ(0, ptrace(PTRACE_CONT, child_pid, NULL, 0)); 3086 ASSERT_EQ(1, write(pipefd[1], ".", 1)); 3087 3088 /* Wait for nanosleep() to start. */ 3089 ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0)); 3090 ASSERT_EQ(true, WIFSTOPPED(status)); 3091 ASSERT_EQ(SIGTRAP, WSTOPSIG(status)); 3092 ASSERT_EQ(PTRACE_EVENT_SECCOMP, (status >> 16)); 3093 ASSERT_EQ(0, ptrace(PTRACE_GETEVENTMSG, child_pid, NULL, &msg)); 3094 ASSERT_EQ(0x100, msg); 3095 ret = get_syscall(_metadata, child_pid); 3096 EXPECT_TRUE(ret == __NR_nanosleep || ret == __NR_clock_nanosleep); 3097 3098 /* Might as well check siginfo for sanity while we're here. */ 3099 ASSERT_EQ(0, ptrace(PTRACE_GETSIGINFO, child_pid, NULL, &info)); 3100 ASSERT_EQ(SIGTRAP, info.si_signo); 3101 ASSERT_EQ(SIGTRAP | (PTRACE_EVENT_SECCOMP << 8), info.si_code); 3102 EXPECT_EQ(0, info.si_errno); 3103 EXPECT_EQ(getuid(), info.si_uid); 3104 /* Verify signal delivery came from child (seccomp-triggered). */ 3105 EXPECT_EQ(child_pid, info.si_pid); 3106 3107 /* Interrupt nanosleep with SIGSTOP (which we'll need to handle). */ 3108 ASSERT_EQ(0, kill(child_pid, SIGSTOP)); 3109 ASSERT_EQ(0, ptrace(PTRACE_CONT, child_pid, NULL, 0)); 3110 ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0)); 3111 ASSERT_EQ(true, WIFSTOPPED(status)); 3112 ASSERT_EQ(SIGSTOP, WSTOPSIG(status)); 3113 ASSERT_EQ(0, ptrace(PTRACE_GETSIGINFO, child_pid, NULL, &info)); 3114 /* 3115 * There is no siginfo on SIGSTOP any more, so we can't verify 3116 * signal delivery came from parent now (getpid() == info.si_pid). 3117 * https://lkml.kernel.org/r/CAGXu5jJaZAOzP1qFz66tYrtbuywqb+UN2SOA1VLHpCCOiYvYeg@mail.gmail.com 3118 * At least verify the SIGSTOP via PTRACE_GETSIGINFO. 3119 */ 3120 EXPECT_EQ(SIGSTOP, info.si_signo); 3121 3122 /* Restart nanosleep with SIGCONT, which triggers restart_syscall. */ 3123 ASSERT_EQ(0, kill(child_pid, SIGCONT)); 3124 ASSERT_EQ(0, ptrace(PTRACE_CONT, child_pid, NULL, 0)); 3125 ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0)); 3126 ASSERT_EQ(true, WIFSTOPPED(status)); 3127 ASSERT_EQ(SIGCONT, WSTOPSIG(status)); 3128 ASSERT_EQ(0, ptrace(PTRACE_CONT, child_pid, NULL, 0)); 3129 3130 /* Wait for restart_syscall() to start. */ 3131 ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0)); 3132 ASSERT_EQ(true, WIFSTOPPED(status)); 3133 ASSERT_EQ(SIGTRAP, WSTOPSIG(status)); 3134 ASSERT_EQ(PTRACE_EVENT_SECCOMP, (status >> 16)); 3135 ASSERT_EQ(0, ptrace(PTRACE_GETEVENTMSG, child_pid, NULL, &msg)); 3136 3137 ASSERT_EQ(0x200, msg); 3138 ret = get_syscall(_metadata, child_pid); 3139 #if defined(__arm__) 3140 /* 3141 * FIXME: 3142 * - native ARM registers do NOT expose true syscall. 3143 * - compat ARM registers on ARM64 DO expose true syscall. 3144 */ 3145 ASSERT_EQ(0, uname(&utsbuf)); 3146 if (strncmp(utsbuf.machine, "arm", 3) == 0) { 3147 EXPECT_EQ(__NR_nanosleep, ret); 3148 } else 3149 #endif 3150 { 3151 EXPECT_EQ(__NR_restart_syscall, ret); 3152 } 3153 3154 /* Write again to end test. */ 3155 ASSERT_EQ(0, ptrace(PTRACE_CONT, child_pid, NULL, 0)); 3156 ASSERT_EQ(1, write(pipefd[1], "!", 1)); 3157 EXPECT_EQ(0, close(pipefd[1])); 3158 3159 ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0)); 3160 if (WIFSIGNALED(status) || WEXITSTATUS(status)) 3161 _metadata->passed = 0; 3162 } 3163 3164 TEST_SIGNAL(filter_flag_log, SIGSYS) 3165 { 3166 struct sock_filter allow_filter[] = { 3167 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 3168 }; 3169 struct sock_filter kill_filter[] = { 3170 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, 3171 offsetof(struct seccomp_data, nr)), 3172 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 0, 1), 3173 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL), 3174 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 3175 }; 3176 struct sock_fprog allow_prog = { 3177 .len = (unsigned short)ARRAY_SIZE(allow_filter), 3178 .filter = allow_filter, 3179 }; 3180 struct sock_fprog kill_prog = { 3181 .len = (unsigned short)ARRAY_SIZE(kill_filter), 3182 .filter = kill_filter, 3183 }; 3184 long ret; 3185 pid_t parent = getppid(); 3186 3187 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 3188 ASSERT_EQ(0, ret); 3189 3190 /* Verify that the FILTER_FLAG_LOG flag isn't accepted in strict mode */ 3191 ret = seccomp(SECCOMP_SET_MODE_STRICT, SECCOMP_FILTER_FLAG_LOG, 3192 &allow_prog); 3193 ASSERT_NE(ENOSYS, errno) { 3194 TH_LOG("Kernel does not support seccomp syscall!"); 3195 } 3196 EXPECT_NE(0, ret) { 3197 TH_LOG("Kernel accepted FILTER_FLAG_LOG flag in strict mode!"); 3198 } 3199 EXPECT_EQ(EINVAL, errno) { 3200 TH_LOG("Kernel returned unexpected errno for FILTER_FLAG_LOG flag in strict mode!"); 3201 } 3202 3203 /* Verify that a simple, permissive filter can be added with no flags */ 3204 ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &allow_prog); 3205 EXPECT_EQ(0, ret); 3206 3207 /* See if the same filter can be added with the FILTER_FLAG_LOG flag */ 3208 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_LOG, 3209 &allow_prog); 3210 ASSERT_NE(EINVAL, errno) { 3211 TH_LOG("Kernel does not support the FILTER_FLAG_LOG flag!"); 3212 } 3213 EXPECT_EQ(0, ret); 3214 3215 /* Ensure that the kill filter works with the FILTER_FLAG_LOG flag */ 3216 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_LOG, 3217 &kill_prog); 3218 EXPECT_EQ(0, ret); 3219 3220 EXPECT_EQ(parent, syscall(__NR_getppid)); 3221 /* getpid() should never return. */ 3222 EXPECT_EQ(0, syscall(__NR_getpid)); 3223 } 3224 3225 TEST(get_action_avail) 3226 { 3227 __u32 actions[] = { SECCOMP_RET_KILL_THREAD, SECCOMP_RET_TRAP, 3228 SECCOMP_RET_ERRNO, SECCOMP_RET_TRACE, 3229 SECCOMP_RET_LOG, SECCOMP_RET_ALLOW }; 3230 __u32 unknown_action = 0x10000000U; 3231 int i; 3232 long ret; 3233 3234 ret = seccomp(SECCOMP_GET_ACTION_AVAIL, 0, &actions[0]); 3235 ASSERT_NE(ENOSYS, errno) { 3236 TH_LOG("Kernel does not support seccomp syscall!"); 3237 } 3238 ASSERT_NE(EINVAL, errno) { 3239 TH_LOG("Kernel does not support SECCOMP_GET_ACTION_AVAIL operation!"); 3240 } 3241 EXPECT_EQ(ret, 0); 3242 3243 for (i = 0; i < ARRAY_SIZE(actions); i++) { 3244 ret = seccomp(SECCOMP_GET_ACTION_AVAIL, 0, &actions[i]); 3245 EXPECT_EQ(ret, 0) { 3246 TH_LOG("Expected action (0x%X) not available!", 3247 actions[i]); 3248 } 3249 } 3250 3251 /* Check that an unknown action is handled properly (EOPNOTSUPP) */ 3252 ret = seccomp(SECCOMP_GET_ACTION_AVAIL, 0, &unknown_action); 3253 EXPECT_EQ(ret, -1); 3254 EXPECT_EQ(errno, EOPNOTSUPP); 3255 } 3256 3257 TEST(get_metadata) 3258 { 3259 pid_t pid; 3260 int pipefd[2]; 3261 char buf; 3262 struct seccomp_metadata md; 3263 long ret; 3264 3265 /* Only real root can get metadata. */ 3266 if (geteuid()) { 3267 SKIP(return, "get_metadata requires real root"); 3268 return; 3269 } 3270 3271 ASSERT_EQ(0, pipe(pipefd)); 3272 3273 pid = fork(); 3274 ASSERT_GE(pid, 0); 3275 if (pid == 0) { 3276 struct sock_filter filter[] = { 3277 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 3278 }; 3279 struct sock_fprog prog = { 3280 .len = (unsigned short)ARRAY_SIZE(filter), 3281 .filter = filter, 3282 }; 3283 3284 /* one with log, one without */ 3285 EXPECT_EQ(0, seccomp(SECCOMP_SET_MODE_FILTER, 3286 SECCOMP_FILTER_FLAG_LOG, &prog)); 3287 EXPECT_EQ(0, seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog)); 3288 3289 EXPECT_EQ(0, close(pipefd[0])); 3290 ASSERT_EQ(1, write(pipefd[1], "1", 1)); 3291 ASSERT_EQ(0, close(pipefd[1])); 3292 3293 while (1) 3294 sleep(100); 3295 } 3296 3297 ASSERT_EQ(0, close(pipefd[1])); 3298 ASSERT_EQ(1, read(pipefd[0], &buf, 1)); 3299 3300 ASSERT_EQ(0, ptrace(PTRACE_ATTACH, pid)); 3301 ASSERT_EQ(pid, waitpid(pid, NULL, 0)); 3302 3303 /* Past here must not use ASSERT or child process is never killed. */ 3304 3305 md.filter_off = 0; 3306 errno = 0; 3307 ret = ptrace(PTRACE_SECCOMP_GET_METADATA, pid, sizeof(md), &md); 3308 EXPECT_EQ(sizeof(md), ret) { 3309 if (errno == EINVAL) 3310 SKIP(goto skip, "Kernel does not support PTRACE_SECCOMP_GET_METADATA (missing CONFIG_CHECKPOINT_RESTORE?)"); 3311 } 3312 3313 EXPECT_EQ(md.flags, SECCOMP_FILTER_FLAG_LOG); 3314 EXPECT_EQ(md.filter_off, 0); 3315 3316 md.filter_off = 1; 3317 ret = ptrace(PTRACE_SECCOMP_GET_METADATA, pid, sizeof(md), &md); 3318 EXPECT_EQ(sizeof(md), ret); 3319 EXPECT_EQ(md.flags, 0); 3320 EXPECT_EQ(md.filter_off, 1); 3321 3322 skip: 3323 ASSERT_EQ(0, kill(pid, SIGKILL)); 3324 } 3325 3326 static int user_notif_syscall(int nr, unsigned int flags) 3327 { 3328 struct sock_filter filter[] = { 3329 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, 3330 offsetof(struct seccomp_data, nr)), 3331 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, nr, 0, 1), 3332 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_USER_NOTIF), 3333 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 3334 }; 3335 3336 struct sock_fprog prog = { 3337 .len = (unsigned short)ARRAY_SIZE(filter), 3338 .filter = filter, 3339 }; 3340 3341 return seccomp(SECCOMP_SET_MODE_FILTER, flags, &prog); 3342 } 3343 3344 #define USER_NOTIF_MAGIC INT_MAX 3345 TEST(user_notification_basic) 3346 { 3347 pid_t pid; 3348 long ret; 3349 int status, listener; 3350 struct seccomp_notif req = {}; 3351 struct seccomp_notif_resp resp = {}; 3352 struct pollfd pollfd; 3353 3354 struct sock_filter filter[] = { 3355 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 3356 }; 3357 struct sock_fprog prog = { 3358 .len = (unsigned short)ARRAY_SIZE(filter), 3359 .filter = filter, 3360 }; 3361 3362 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 3363 ASSERT_EQ(0, ret) { 3364 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 3365 } 3366 3367 pid = fork(); 3368 ASSERT_GE(pid, 0); 3369 3370 /* Check that we get -ENOSYS with no listener attached */ 3371 if (pid == 0) { 3372 if (user_notif_syscall(__NR_getppid, 0) < 0) 3373 exit(1); 3374 ret = syscall(__NR_getppid); 3375 exit(ret >= 0 || errno != ENOSYS); 3376 } 3377 3378 EXPECT_EQ(waitpid(pid, &status, 0), pid); 3379 EXPECT_EQ(true, WIFEXITED(status)); 3380 EXPECT_EQ(0, WEXITSTATUS(status)); 3381 3382 /* Add some no-op filters for grins. */ 3383 EXPECT_EQ(seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog), 0); 3384 EXPECT_EQ(seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog), 0); 3385 EXPECT_EQ(seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog), 0); 3386 EXPECT_EQ(seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog), 0); 3387 3388 /* Check that the basic notification machinery works */ 3389 listener = user_notif_syscall(__NR_getppid, 3390 SECCOMP_FILTER_FLAG_NEW_LISTENER); 3391 ASSERT_GE(listener, 0); 3392 3393 /* Installing a second listener in the chain should EBUSY */ 3394 EXPECT_EQ(user_notif_syscall(__NR_getppid, 3395 SECCOMP_FILTER_FLAG_NEW_LISTENER), 3396 -1); 3397 EXPECT_EQ(errno, EBUSY); 3398 3399 pid = fork(); 3400 ASSERT_GE(pid, 0); 3401 3402 if (pid == 0) { 3403 ret = syscall(__NR_getppid); 3404 exit(ret != USER_NOTIF_MAGIC); 3405 } 3406 3407 pollfd.fd = listener; 3408 pollfd.events = POLLIN | POLLOUT; 3409 3410 EXPECT_GT(poll(&pollfd, 1, -1), 0); 3411 EXPECT_EQ(pollfd.revents, POLLIN); 3412 3413 /* Test that we can't pass garbage to the kernel. */ 3414 memset(&req, 0, sizeof(req)); 3415 req.pid = -1; 3416 errno = 0; 3417 ret = ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req); 3418 EXPECT_EQ(-1, ret); 3419 EXPECT_EQ(EINVAL, errno); 3420 3421 if (ret) { 3422 req.pid = 0; 3423 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0); 3424 } 3425 3426 pollfd.fd = listener; 3427 pollfd.events = POLLIN | POLLOUT; 3428 3429 EXPECT_GT(poll(&pollfd, 1, -1), 0); 3430 EXPECT_EQ(pollfd.revents, POLLOUT); 3431 3432 EXPECT_EQ(req.data.nr, __NR_getppid); 3433 3434 resp.id = req.id; 3435 resp.error = 0; 3436 resp.val = USER_NOTIF_MAGIC; 3437 3438 /* check that we make sure flags == 0 */ 3439 resp.flags = 1; 3440 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), -1); 3441 EXPECT_EQ(errno, EINVAL); 3442 3443 resp.flags = 0; 3444 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), 0); 3445 3446 EXPECT_EQ(waitpid(pid, &status, 0), pid); 3447 EXPECT_EQ(true, WIFEXITED(status)); 3448 EXPECT_EQ(0, WEXITSTATUS(status)); 3449 } 3450 3451 TEST(user_notification_with_tsync) 3452 { 3453 int ret; 3454 unsigned int flags; 3455 3456 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 3457 ASSERT_EQ(0, ret) { 3458 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 3459 } 3460 3461 /* these were exclusive */ 3462 flags = SECCOMP_FILTER_FLAG_NEW_LISTENER | 3463 SECCOMP_FILTER_FLAG_TSYNC; 3464 ASSERT_EQ(-1, user_notif_syscall(__NR_getppid, flags)); 3465 ASSERT_EQ(EINVAL, errno); 3466 3467 /* but now they're not */ 3468 flags |= SECCOMP_FILTER_FLAG_TSYNC_ESRCH; 3469 ret = user_notif_syscall(__NR_getppid, flags); 3470 close(ret); 3471 ASSERT_LE(0, ret); 3472 } 3473 3474 TEST(user_notification_kill_in_middle) 3475 { 3476 pid_t pid; 3477 long ret; 3478 int listener; 3479 struct seccomp_notif req = {}; 3480 struct seccomp_notif_resp resp = {}; 3481 3482 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 3483 ASSERT_EQ(0, ret) { 3484 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 3485 } 3486 3487 listener = user_notif_syscall(__NR_getppid, 3488 SECCOMP_FILTER_FLAG_NEW_LISTENER); 3489 ASSERT_GE(listener, 0); 3490 3491 /* 3492 * Check that nothing bad happens when we kill the task in the middle 3493 * of a syscall. 3494 */ 3495 pid = fork(); 3496 ASSERT_GE(pid, 0); 3497 3498 if (pid == 0) { 3499 ret = syscall(__NR_getppid); 3500 exit(ret != USER_NOTIF_MAGIC); 3501 } 3502 3503 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0); 3504 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_ID_VALID, &req.id), 0); 3505 3506 EXPECT_EQ(kill(pid, SIGKILL), 0); 3507 EXPECT_EQ(waitpid(pid, NULL, 0), pid); 3508 3509 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_ID_VALID, &req.id), -1); 3510 3511 resp.id = req.id; 3512 ret = ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp); 3513 EXPECT_EQ(ret, -1); 3514 EXPECT_EQ(errno, ENOENT); 3515 } 3516 3517 static int handled = -1; 3518 3519 static void signal_handler(int signal) 3520 { 3521 if (write(handled, "c", 1) != 1) 3522 perror("write from signal"); 3523 } 3524 3525 TEST(user_notification_signal) 3526 { 3527 pid_t pid; 3528 long ret; 3529 int status, listener, sk_pair[2]; 3530 struct seccomp_notif req = {}; 3531 struct seccomp_notif_resp resp = {}; 3532 char c; 3533 3534 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 3535 ASSERT_EQ(0, ret) { 3536 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 3537 } 3538 3539 ASSERT_EQ(socketpair(PF_LOCAL, SOCK_SEQPACKET, 0, sk_pair), 0); 3540 3541 listener = user_notif_syscall(__NR_gettid, 3542 SECCOMP_FILTER_FLAG_NEW_LISTENER); 3543 ASSERT_GE(listener, 0); 3544 3545 pid = fork(); 3546 ASSERT_GE(pid, 0); 3547 3548 if (pid == 0) { 3549 close(sk_pair[0]); 3550 handled = sk_pair[1]; 3551 if (signal(SIGUSR1, signal_handler) == SIG_ERR) { 3552 perror("signal"); 3553 exit(1); 3554 } 3555 /* 3556 * ERESTARTSYS behavior is a bit hard to test, because we need 3557 * to rely on a signal that has not yet been handled. Let's at 3558 * least check that the error code gets propagated through, and 3559 * hope that it doesn't break when there is actually a signal :) 3560 */ 3561 ret = syscall(__NR_gettid); 3562 exit(!(ret == -1 && errno == 512)); 3563 } 3564 3565 close(sk_pair[1]); 3566 3567 memset(&req, 0, sizeof(req)); 3568 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0); 3569 3570 EXPECT_EQ(kill(pid, SIGUSR1), 0); 3571 3572 /* 3573 * Make sure the signal really is delivered, which means we're not 3574 * stuck in the user notification code any more and the notification 3575 * should be dead. 3576 */ 3577 EXPECT_EQ(read(sk_pair[0], &c, 1), 1); 3578 3579 resp.id = req.id; 3580 resp.error = -EPERM; 3581 resp.val = 0; 3582 3583 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), -1); 3584 EXPECT_EQ(errno, ENOENT); 3585 3586 memset(&req, 0, sizeof(req)); 3587 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0); 3588 3589 resp.id = req.id; 3590 resp.error = -512; /* -ERESTARTSYS */ 3591 resp.val = 0; 3592 3593 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), 0); 3594 3595 EXPECT_EQ(waitpid(pid, &status, 0), pid); 3596 EXPECT_EQ(true, WIFEXITED(status)); 3597 EXPECT_EQ(0, WEXITSTATUS(status)); 3598 } 3599 3600 TEST(user_notification_closed_listener) 3601 { 3602 pid_t pid; 3603 long ret; 3604 int status, listener; 3605 3606 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 3607 ASSERT_EQ(0, ret) { 3608 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 3609 } 3610 3611 listener = user_notif_syscall(__NR_getppid, 3612 SECCOMP_FILTER_FLAG_NEW_LISTENER); 3613 ASSERT_GE(listener, 0); 3614 3615 /* 3616 * Check that we get an ENOSYS when the listener is closed. 3617 */ 3618 pid = fork(); 3619 ASSERT_GE(pid, 0); 3620 if (pid == 0) { 3621 close(listener); 3622 ret = syscall(__NR_getppid); 3623 exit(ret != -1 && errno != ENOSYS); 3624 } 3625 3626 close(listener); 3627 3628 EXPECT_EQ(waitpid(pid, &status, 0), pid); 3629 EXPECT_EQ(true, WIFEXITED(status)); 3630 EXPECT_EQ(0, WEXITSTATUS(status)); 3631 } 3632 3633 /* 3634 * Check that a pid in a child namespace still shows up as valid in ours. 3635 */ 3636 TEST(user_notification_child_pid_ns) 3637 { 3638 pid_t pid; 3639 int status, listener; 3640 struct seccomp_notif req = {}; 3641 struct seccomp_notif_resp resp = {}; 3642 3643 ASSERT_EQ(unshare(CLONE_NEWUSER | CLONE_NEWPID), 0) { 3644 if (errno == EINVAL) 3645 SKIP(return, "kernel missing CLONE_NEWUSER support"); 3646 }; 3647 3648 listener = user_notif_syscall(__NR_getppid, 3649 SECCOMP_FILTER_FLAG_NEW_LISTENER); 3650 ASSERT_GE(listener, 0); 3651 3652 pid = fork(); 3653 ASSERT_GE(pid, 0); 3654 3655 if (pid == 0) 3656 exit(syscall(__NR_getppid) != USER_NOTIF_MAGIC); 3657 3658 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0); 3659 EXPECT_EQ(req.pid, pid); 3660 3661 resp.id = req.id; 3662 resp.error = 0; 3663 resp.val = USER_NOTIF_MAGIC; 3664 3665 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), 0); 3666 3667 EXPECT_EQ(waitpid(pid, &status, 0), pid); 3668 EXPECT_EQ(true, WIFEXITED(status)); 3669 EXPECT_EQ(0, WEXITSTATUS(status)); 3670 close(listener); 3671 } 3672 3673 /* 3674 * Check that a pid in a sibling (i.e. unrelated) namespace shows up as 0, i.e. 3675 * invalid. 3676 */ 3677 TEST(user_notification_sibling_pid_ns) 3678 { 3679 pid_t pid, pid2; 3680 int status, listener; 3681 struct seccomp_notif req = {}; 3682 struct seccomp_notif_resp resp = {}; 3683 3684 ASSERT_EQ(prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0), 0) { 3685 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 3686 } 3687 3688 listener = user_notif_syscall(__NR_getppid, 3689 SECCOMP_FILTER_FLAG_NEW_LISTENER); 3690 ASSERT_GE(listener, 0); 3691 3692 pid = fork(); 3693 ASSERT_GE(pid, 0); 3694 3695 if (pid == 0) { 3696 ASSERT_EQ(unshare(CLONE_NEWPID), 0); 3697 3698 pid2 = fork(); 3699 ASSERT_GE(pid2, 0); 3700 3701 if (pid2 == 0) 3702 exit(syscall(__NR_getppid) != USER_NOTIF_MAGIC); 3703 3704 EXPECT_EQ(waitpid(pid2, &status, 0), pid2); 3705 EXPECT_EQ(true, WIFEXITED(status)); 3706 EXPECT_EQ(0, WEXITSTATUS(status)); 3707 exit(WEXITSTATUS(status)); 3708 } 3709 3710 /* Create the sibling ns, and sibling in it. */ 3711 ASSERT_EQ(unshare(CLONE_NEWPID), 0) { 3712 if (errno == EPERM) 3713 SKIP(return, "CLONE_NEWPID requires CAP_SYS_ADMIN"); 3714 } 3715 ASSERT_EQ(errno, 0); 3716 3717 pid2 = fork(); 3718 ASSERT_GE(pid2, 0); 3719 3720 if (pid2 == 0) { 3721 ASSERT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0); 3722 /* 3723 * The pid should be 0, i.e. the task is in some namespace that 3724 * we can't "see". 3725 */ 3726 EXPECT_EQ(req.pid, 0); 3727 3728 resp.id = req.id; 3729 resp.error = 0; 3730 resp.val = USER_NOTIF_MAGIC; 3731 3732 ASSERT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), 0); 3733 exit(0); 3734 } 3735 3736 close(listener); 3737 3738 EXPECT_EQ(waitpid(pid, &status, 0), pid); 3739 EXPECT_EQ(true, WIFEXITED(status)); 3740 EXPECT_EQ(0, WEXITSTATUS(status)); 3741 3742 EXPECT_EQ(waitpid(pid2, &status, 0), pid2); 3743 EXPECT_EQ(true, WIFEXITED(status)); 3744 EXPECT_EQ(0, WEXITSTATUS(status)); 3745 } 3746 3747 TEST(user_notification_fault_recv) 3748 { 3749 pid_t pid; 3750 int status, listener; 3751 struct seccomp_notif req = {}; 3752 struct seccomp_notif_resp resp = {}; 3753 3754 ASSERT_EQ(unshare(CLONE_NEWUSER), 0) { 3755 if (errno == EINVAL) 3756 SKIP(return, "kernel missing CLONE_NEWUSER support"); 3757 } 3758 3759 listener = user_notif_syscall(__NR_getppid, 3760 SECCOMP_FILTER_FLAG_NEW_LISTENER); 3761 ASSERT_GE(listener, 0); 3762 3763 pid = fork(); 3764 ASSERT_GE(pid, 0); 3765 3766 if (pid == 0) 3767 exit(syscall(__NR_getppid) != USER_NOTIF_MAGIC); 3768 3769 /* Do a bad recv() */ 3770 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, NULL), -1); 3771 EXPECT_EQ(errno, EFAULT); 3772 3773 /* We should still be able to receive this notification, though. */ 3774 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0); 3775 EXPECT_EQ(req.pid, pid); 3776 3777 resp.id = req.id; 3778 resp.error = 0; 3779 resp.val = USER_NOTIF_MAGIC; 3780 3781 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), 0); 3782 3783 EXPECT_EQ(waitpid(pid, &status, 0), pid); 3784 EXPECT_EQ(true, WIFEXITED(status)); 3785 EXPECT_EQ(0, WEXITSTATUS(status)); 3786 } 3787 3788 TEST(seccomp_get_notif_sizes) 3789 { 3790 struct seccomp_notif_sizes sizes; 3791 3792 ASSERT_EQ(seccomp(SECCOMP_GET_NOTIF_SIZES, 0, &sizes), 0); 3793 EXPECT_EQ(sizes.seccomp_notif, sizeof(struct seccomp_notif)); 3794 EXPECT_EQ(sizes.seccomp_notif_resp, sizeof(struct seccomp_notif_resp)); 3795 } 3796 3797 TEST(user_notification_continue) 3798 { 3799 pid_t pid; 3800 long ret; 3801 int status, listener; 3802 struct seccomp_notif req = {}; 3803 struct seccomp_notif_resp resp = {}; 3804 struct pollfd pollfd; 3805 3806 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 3807 ASSERT_EQ(0, ret) { 3808 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 3809 } 3810 3811 listener = user_notif_syscall(__NR_dup, SECCOMP_FILTER_FLAG_NEW_LISTENER); 3812 ASSERT_GE(listener, 0); 3813 3814 pid = fork(); 3815 ASSERT_GE(pid, 0); 3816 3817 if (pid == 0) { 3818 int dup_fd, pipe_fds[2]; 3819 pid_t self; 3820 3821 ASSERT_GE(pipe(pipe_fds), 0); 3822 3823 dup_fd = dup(pipe_fds[0]); 3824 ASSERT_GE(dup_fd, 0); 3825 EXPECT_NE(pipe_fds[0], dup_fd); 3826 3827 self = getpid(); 3828 ASSERT_EQ(filecmp(self, self, pipe_fds[0], dup_fd), 0); 3829 exit(0); 3830 } 3831 3832 pollfd.fd = listener; 3833 pollfd.events = POLLIN | POLLOUT; 3834 3835 EXPECT_GT(poll(&pollfd, 1, -1), 0); 3836 EXPECT_EQ(pollfd.revents, POLLIN); 3837 3838 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0); 3839 3840 pollfd.fd = listener; 3841 pollfd.events = POLLIN | POLLOUT; 3842 3843 EXPECT_GT(poll(&pollfd, 1, -1), 0); 3844 EXPECT_EQ(pollfd.revents, POLLOUT); 3845 3846 EXPECT_EQ(req.data.nr, __NR_dup); 3847 3848 resp.id = req.id; 3849 resp.flags = SECCOMP_USER_NOTIF_FLAG_CONTINUE; 3850 3851 /* 3852 * Verify that setting SECCOMP_USER_NOTIF_FLAG_CONTINUE enforces other 3853 * args be set to 0. 3854 */ 3855 resp.error = 0; 3856 resp.val = USER_NOTIF_MAGIC; 3857 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), -1); 3858 EXPECT_EQ(errno, EINVAL); 3859 3860 resp.error = USER_NOTIF_MAGIC; 3861 resp.val = 0; 3862 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), -1); 3863 EXPECT_EQ(errno, EINVAL); 3864 3865 resp.error = 0; 3866 resp.val = 0; 3867 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), 0) { 3868 if (errno == EINVAL) 3869 SKIP(goto skip, "Kernel does not support SECCOMP_USER_NOTIF_FLAG_CONTINUE"); 3870 } 3871 3872 skip: 3873 EXPECT_EQ(waitpid(pid, &status, 0), pid); 3874 EXPECT_EQ(true, WIFEXITED(status)); 3875 EXPECT_EQ(0, WEXITSTATUS(status)) { 3876 if (WEXITSTATUS(status) == 2) { 3877 SKIP(return, "Kernel does not support kcmp() syscall"); 3878 return; 3879 } 3880 } 3881 } 3882 3883 TEST(user_notification_filter_empty) 3884 { 3885 pid_t pid; 3886 long ret; 3887 int status; 3888 struct pollfd pollfd; 3889 struct __clone_args args = { 3890 .flags = CLONE_FILES, 3891 .exit_signal = SIGCHLD, 3892 }; 3893 3894 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 3895 ASSERT_EQ(0, ret) { 3896 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 3897 } 3898 3899 pid = sys_clone3(&args, sizeof(args)); 3900 ASSERT_GE(pid, 0); 3901 3902 if (pid == 0) { 3903 int listener; 3904 3905 listener = user_notif_syscall(__NR_mknodat, SECCOMP_FILTER_FLAG_NEW_LISTENER); 3906 if (listener < 0) 3907 _exit(EXIT_FAILURE); 3908 3909 if (dup2(listener, 200) != 200) 3910 _exit(EXIT_FAILURE); 3911 3912 close(listener); 3913 3914 _exit(EXIT_SUCCESS); 3915 } 3916 3917 EXPECT_EQ(waitpid(pid, &status, 0), pid); 3918 EXPECT_EQ(true, WIFEXITED(status)); 3919 EXPECT_EQ(0, WEXITSTATUS(status)); 3920 3921 /* 3922 * The seccomp filter has become unused so we should be notified once 3923 * the kernel gets around to cleaning up task struct. 3924 */ 3925 pollfd.fd = 200; 3926 pollfd.events = POLLHUP; 3927 3928 EXPECT_GT(poll(&pollfd, 1, 2000), 0); 3929 EXPECT_GT((pollfd.revents & POLLHUP) ?: 0, 0); 3930 } 3931 3932 static void *do_thread(void *data) 3933 { 3934 return NULL; 3935 } 3936 3937 TEST(user_notification_filter_empty_threaded) 3938 { 3939 pid_t pid; 3940 long ret; 3941 int status; 3942 struct pollfd pollfd; 3943 struct __clone_args args = { 3944 .flags = CLONE_FILES, 3945 .exit_signal = SIGCHLD, 3946 }; 3947 3948 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 3949 ASSERT_EQ(0, ret) { 3950 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 3951 } 3952 3953 pid = sys_clone3(&args, sizeof(args)); 3954 ASSERT_GE(pid, 0); 3955 3956 if (pid == 0) { 3957 pid_t pid1, pid2; 3958 int listener, status; 3959 pthread_t thread; 3960 3961 listener = user_notif_syscall(__NR_dup, SECCOMP_FILTER_FLAG_NEW_LISTENER); 3962 if (listener < 0) 3963 _exit(EXIT_FAILURE); 3964 3965 if (dup2(listener, 200) != 200) 3966 _exit(EXIT_FAILURE); 3967 3968 close(listener); 3969 3970 pid1 = fork(); 3971 if (pid1 < 0) 3972 _exit(EXIT_FAILURE); 3973 3974 if (pid1 == 0) 3975 _exit(EXIT_SUCCESS); 3976 3977 pid2 = fork(); 3978 if (pid2 < 0) 3979 _exit(EXIT_FAILURE); 3980 3981 if (pid2 == 0) 3982 _exit(EXIT_SUCCESS); 3983 3984 if (pthread_create(&thread, NULL, do_thread, NULL) || 3985 pthread_join(thread, NULL)) 3986 _exit(EXIT_FAILURE); 3987 3988 if (pthread_create(&thread, NULL, do_thread, NULL) || 3989 pthread_join(thread, NULL)) 3990 _exit(EXIT_FAILURE); 3991 3992 if (waitpid(pid1, &status, 0) != pid1 || !WIFEXITED(status) || 3993 WEXITSTATUS(status)) 3994 _exit(EXIT_FAILURE); 3995 3996 if (waitpid(pid2, &status, 0) != pid2 || !WIFEXITED(status) || 3997 WEXITSTATUS(status)) 3998 _exit(EXIT_FAILURE); 3999 4000 exit(EXIT_SUCCESS); 4001 } 4002 4003 EXPECT_EQ(waitpid(pid, &status, 0), pid); 4004 EXPECT_EQ(true, WIFEXITED(status)); 4005 EXPECT_EQ(0, WEXITSTATUS(status)); 4006 4007 /* 4008 * The seccomp filter has become unused so we should be notified once 4009 * the kernel gets around to cleaning up task struct. 4010 */ 4011 pollfd.fd = 200; 4012 pollfd.events = POLLHUP; 4013 4014 EXPECT_GT(poll(&pollfd, 1, 2000), 0); 4015 EXPECT_GT((pollfd.revents & POLLHUP) ?: 0, 0); 4016 } 4017 4018 TEST(user_notification_addfd) 4019 { 4020 pid_t pid; 4021 long ret; 4022 int status, listener, memfd, fd, nextfd; 4023 struct seccomp_notif_addfd addfd = {}; 4024 struct seccomp_notif_addfd_small small = {}; 4025 struct seccomp_notif_addfd_big big = {}; 4026 struct seccomp_notif req = {}; 4027 struct seccomp_notif_resp resp = {}; 4028 /* 100 ms */ 4029 struct timespec delay = { .tv_nsec = 100000000 }; 4030 4031 /* There may be arbitrary already-open fds at test start. */ 4032 memfd = memfd_create("test", 0); 4033 ASSERT_GE(memfd, 0); 4034 nextfd = memfd + 1; 4035 4036 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 4037 ASSERT_EQ(0, ret) { 4038 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 4039 } 4040 4041 /* fd: 4 */ 4042 /* Check that the basic notification machinery works */ 4043 listener = user_notif_syscall(__NR_getppid, 4044 SECCOMP_FILTER_FLAG_NEW_LISTENER); 4045 ASSERT_EQ(listener, nextfd++); 4046 4047 pid = fork(); 4048 ASSERT_GE(pid, 0); 4049 4050 if (pid == 0) { 4051 /* fds will be added and this value is expected */ 4052 if (syscall(__NR_getppid) != USER_NOTIF_MAGIC) 4053 exit(1); 4054 4055 /* Atomic addfd+send is received here. Check it is a valid fd */ 4056 if (fcntl(syscall(__NR_getppid), F_GETFD) == -1) 4057 exit(1); 4058 4059 exit(syscall(__NR_getppid) != USER_NOTIF_MAGIC); 4060 } 4061 4062 ASSERT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0); 4063 4064 addfd.srcfd = memfd; 4065 addfd.newfd = 0; 4066 addfd.id = req.id; 4067 addfd.flags = 0x0; 4068 4069 /* Verify bad newfd_flags cannot be set */ 4070 addfd.newfd_flags = ~O_CLOEXEC; 4071 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_ADDFD, &addfd), -1); 4072 EXPECT_EQ(errno, EINVAL); 4073 addfd.newfd_flags = O_CLOEXEC; 4074 4075 /* Verify bad flags cannot be set */ 4076 addfd.flags = 0xff; 4077 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_ADDFD, &addfd), -1); 4078 EXPECT_EQ(errno, EINVAL); 4079 addfd.flags = 0; 4080 4081 /* Verify that remote_fd cannot be set without setting flags */ 4082 addfd.newfd = 1; 4083 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_ADDFD, &addfd), -1); 4084 EXPECT_EQ(errno, EINVAL); 4085 addfd.newfd = 0; 4086 4087 /* Verify small size cannot be set */ 4088 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_ADDFD_SMALL, &small), -1); 4089 EXPECT_EQ(errno, EINVAL); 4090 4091 /* Verify we can't send bits filled in unknown buffer area */ 4092 memset(&big, 0xAA, sizeof(big)); 4093 big.addfd = addfd; 4094 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_ADDFD_BIG, &big), -1); 4095 EXPECT_EQ(errno, E2BIG); 4096 4097 4098 /* Verify we can set an arbitrary remote fd */ 4099 fd = ioctl(listener, SECCOMP_IOCTL_NOTIF_ADDFD, &addfd); 4100 EXPECT_EQ(fd, nextfd++); 4101 EXPECT_EQ(filecmp(getpid(), pid, memfd, fd), 0); 4102 4103 /* Verify we can set an arbitrary remote fd with large size */ 4104 memset(&big, 0x0, sizeof(big)); 4105 big.addfd = addfd; 4106 fd = ioctl(listener, SECCOMP_IOCTL_NOTIF_ADDFD_BIG, &big); 4107 EXPECT_EQ(fd, nextfd++); 4108 4109 /* Verify we can set a specific remote fd */ 4110 addfd.newfd = 42; 4111 addfd.flags = SECCOMP_ADDFD_FLAG_SETFD; 4112 fd = ioctl(listener, SECCOMP_IOCTL_NOTIF_ADDFD, &addfd); 4113 EXPECT_EQ(fd, 42); 4114 EXPECT_EQ(filecmp(getpid(), pid, memfd, fd), 0); 4115 4116 /* Resume syscall */ 4117 resp.id = req.id; 4118 resp.error = 0; 4119 resp.val = USER_NOTIF_MAGIC; 4120 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), 0); 4121 4122 /* 4123 * This sets the ID of the ADD FD to the last request plus 1. The 4124 * notification ID increments 1 per notification. 4125 */ 4126 addfd.id = req.id + 1; 4127 4128 /* This spins until the underlying notification is generated */ 4129 while (ioctl(listener, SECCOMP_IOCTL_NOTIF_ADDFD, &addfd) != -1 && 4130 errno != -EINPROGRESS) 4131 nanosleep(&delay, NULL); 4132 4133 memset(&req, 0, sizeof(req)); 4134 ASSERT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0); 4135 ASSERT_EQ(addfd.id, req.id); 4136 4137 /* Verify we can do an atomic addfd and send */ 4138 addfd.newfd = 0; 4139 addfd.flags = SECCOMP_ADDFD_FLAG_SEND; 4140 fd = ioctl(listener, SECCOMP_IOCTL_NOTIF_ADDFD, &addfd); 4141 /* 4142 * Child has earlier "low" fds and now 42, so we expect the next 4143 * lowest available fd to be assigned here. 4144 */ 4145 EXPECT_EQ(fd, nextfd++); 4146 ASSERT_EQ(filecmp(getpid(), pid, memfd, fd), 0); 4147 4148 /* 4149 * This sets the ID of the ADD FD to the last request plus 1. The 4150 * notification ID increments 1 per notification. 4151 */ 4152 addfd.id = req.id + 1; 4153 4154 /* This spins until the underlying notification is generated */ 4155 while (ioctl(listener, SECCOMP_IOCTL_NOTIF_ADDFD, &addfd) != -1 && 4156 errno != -EINPROGRESS) 4157 nanosleep(&delay, NULL); 4158 4159 memset(&req, 0, sizeof(req)); 4160 ASSERT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0); 4161 ASSERT_EQ(addfd.id, req.id); 4162 4163 resp.id = req.id; 4164 resp.error = 0; 4165 resp.val = USER_NOTIF_MAGIC; 4166 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), 0); 4167 4168 /* Wait for child to finish. */ 4169 EXPECT_EQ(waitpid(pid, &status, 0), pid); 4170 EXPECT_EQ(true, WIFEXITED(status)); 4171 EXPECT_EQ(0, WEXITSTATUS(status)); 4172 4173 close(memfd); 4174 } 4175 4176 TEST(user_notification_addfd_rlimit) 4177 { 4178 pid_t pid; 4179 long ret; 4180 int status, listener, memfd; 4181 struct seccomp_notif_addfd addfd = {}; 4182 struct seccomp_notif req = {}; 4183 struct seccomp_notif_resp resp = {}; 4184 const struct rlimit lim = { 4185 .rlim_cur = 0, 4186 .rlim_max = 0, 4187 }; 4188 4189 memfd = memfd_create("test", 0); 4190 ASSERT_GE(memfd, 0); 4191 4192 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 4193 ASSERT_EQ(0, ret) { 4194 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 4195 } 4196 4197 /* Check that the basic notification machinery works */ 4198 listener = user_notif_syscall(__NR_getppid, 4199 SECCOMP_FILTER_FLAG_NEW_LISTENER); 4200 ASSERT_GE(listener, 0); 4201 4202 pid = fork(); 4203 ASSERT_GE(pid, 0); 4204 4205 if (pid == 0) 4206 exit(syscall(__NR_getppid) != USER_NOTIF_MAGIC); 4207 4208 4209 ASSERT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0); 4210 4211 ASSERT_EQ(prlimit(pid, RLIMIT_NOFILE, &lim, NULL), 0); 4212 4213 addfd.srcfd = memfd; 4214 addfd.newfd_flags = O_CLOEXEC; 4215 addfd.newfd = 0; 4216 addfd.id = req.id; 4217 addfd.flags = 0; 4218 4219 /* Should probably spot check /proc/sys/fs/file-nr */ 4220 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_ADDFD, &addfd), -1); 4221 EXPECT_EQ(errno, EMFILE); 4222 4223 addfd.flags = SECCOMP_ADDFD_FLAG_SEND; 4224 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_ADDFD, &addfd), -1); 4225 EXPECT_EQ(errno, EMFILE); 4226 4227 addfd.newfd = 100; 4228 addfd.flags = SECCOMP_ADDFD_FLAG_SETFD; 4229 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_ADDFD, &addfd), -1); 4230 EXPECT_EQ(errno, EBADF); 4231 4232 resp.id = req.id; 4233 resp.error = 0; 4234 resp.val = USER_NOTIF_MAGIC; 4235 4236 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), 0); 4237 4238 /* Wait for child to finish. */ 4239 EXPECT_EQ(waitpid(pid, &status, 0), pid); 4240 EXPECT_EQ(true, WIFEXITED(status)); 4241 EXPECT_EQ(0, WEXITSTATUS(status)); 4242 4243 close(memfd); 4244 } 4245 4246 /* Make sure PTRACE_O_SUSPEND_SECCOMP requires CAP_SYS_ADMIN. */ 4247 FIXTURE(O_SUSPEND_SECCOMP) { 4248 pid_t pid; 4249 }; 4250 4251 FIXTURE_SETUP(O_SUSPEND_SECCOMP) 4252 { 4253 ERRNO_FILTER(block_read, E2BIG); 4254 cap_value_t cap_list[] = { CAP_SYS_ADMIN }; 4255 cap_t caps; 4256 4257 self->pid = 0; 4258 4259 /* make sure we don't have CAP_SYS_ADMIN */ 4260 caps = cap_get_proc(); 4261 ASSERT_NE(NULL, caps); 4262 ASSERT_EQ(0, cap_set_flag(caps, CAP_EFFECTIVE, 1, cap_list, CAP_CLEAR)); 4263 ASSERT_EQ(0, cap_set_proc(caps)); 4264 cap_free(caps); 4265 4266 ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)); 4267 ASSERT_EQ(0, prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog_block_read)); 4268 4269 self->pid = fork(); 4270 ASSERT_GE(self->pid, 0); 4271 4272 if (self->pid == 0) { 4273 while (1) 4274 pause(); 4275 _exit(127); 4276 } 4277 } 4278 4279 FIXTURE_TEARDOWN(O_SUSPEND_SECCOMP) 4280 { 4281 if (self->pid) 4282 kill(self->pid, SIGKILL); 4283 } 4284 4285 TEST_F(O_SUSPEND_SECCOMP, setoptions) 4286 { 4287 int wstatus; 4288 4289 ASSERT_EQ(0, ptrace(PTRACE_ATTACH, self->pid, NULL, 0)); 4290 ASSERT_EQ(self->pid, wait(&wstatus)); 4291 ASSERT_EQ(-1, ptrace(PTRACE_SETOPTIONS, self->pid, NULL, PTRACE_O_SUSPEND_SECCOMP)); 4292 if (errno == EINVAL) 4293 SKIP(return, "Kernel does not support PTRACE_O_SUSPEND_SECCOMP (missing CONFIG_CHECKPOINT_RESTORE?)"); 4294 ASSERT_EQ(EPERM, errno); 4295 } 4296 4297 TEST_F(O_SUSPEND_SECCOMP, seize) 4298 { 4299 int ret; 4300 4301 ret = ptrace(PTRACE_SEIZE, self->pid, NULL, PTRACE_O_SUSPEND_SECCOMP); 4302 ASSERT_EQ(-1, ret); 4303 if (errno == EINVAL) 4304 SKIP(return, "Kernel does not support PTRACE_O_SUSPEND_SECCOMP (missing CONFIG_CHECKPOINT_RESTORE?)"); 4305 ASSERT_EQ(EPERM, errno); 4306 } 4307 4308 /* 4309 * get_nth - Get the nth, space separated entry in a file. 4310 * 4311 * Returns the length of the read field. 4312 * Throws error if field is zero-lengthed. 4313 */ 4314 static ssize_t get_nth(struct __test_metadata *_metadata, const char *path, 4315 const unsigned int position, char **entry) 4316 { 4317 char *line = NULL; 4318 unsigned int i; 4319 ssize_t nread; 4320 size_t len = 0; 4321 FILE *f; 4322 4323 f = fopen(path, "r"); 4324 ASSERT_NE(f, NULL) { 4325 TH_LOG("Could not open %s: %s", path, strerror(errno)); 4326 } 4327 4328 for (i = 0; i < position; i++) { 4329 nread = getdelim(&line, &len, ' ', f); 4330 ASSERT_GE(nread, 0) { 4331 TH_LOG("Failed to read %d entry in file %s", i, path); 4332 } 4333 } 4334 fclose(f); 4335 4336 ASSERT_GT(nread, 0) { 4337 TH_LOG("Entry in file %s had zero length", path); 4338 } 4339 4340 *entry = line; 4341 return nread - 1; 4342 } 4343 4344 /* For a given PID, get the task state (D, R, etc...) */ 4345 static char get_proc_stat(struct __test_metadata *_metadata, pid_t pid) 4346 { 4347 char proc_path[100] = {0}; 4348 char status; 4349 char *line; 4350 4351 snprintf(proc_path, sizeof(proc_path), "/proc/%d/stat", pid); 4352 ASSERT_EQ(get_nth(_metadata, proc_path, 3, &line), 1); 4353 4354 status = *line; 4355 free(line); 4356 4357 return status; 4358 } 4359 4360 TEST(user_notification_fifo) 4361 { 4362 struct seccomp_notif_resp resp = {}; 4363 struct seccomp_notif req = {}; 4364 int i, status, listener; 4365 pid_t pid, pids[3]; 4366 __u64 baseid; 4367 long ret; 4368 /* 100 ms */ 4369 struct timespec delay = { .tv_nsec = 100000000 }; 4370 4371 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 4372 ASSERT_EQ(0, ret) { 4373 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 4374 } 4375 4376 /* Setup a listener */ 4377 listener = user_notif_syscall(__NR_getppid, 4378 SECCOMP_FILTER_FLAG_NEW_LISTENER); 4379 ASSERT_GE(listener, 0); 4380 4381 pid = fork(); 4382 ASSERT_GE(pid, 0); 4383 4384 if (pid == 0) { 4385 ret = syscall(__NR_getppid); 4386 exit(ret != USER_NOTIF_MAGIC); 4387 } 4388 4389 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0); 4390 baseid = req.id + 1; 4391 4392 resp.id = req.id; 4393 resp.error = 0; 4394 resp.val = USER_NOTIF_MAGIC; 4395 4396 /* check that we make sure flags == 0 */ 4397 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), 0); 4398 4399 EXPECT_EQ(waitpid(pid, &status, 0), pid); 4400 EXPECT_EQ(true, WIFEXITED(status)); 4401 EXPECT_EQ(0, WEXITSTATUS(status)); 4402 4403 /* Start children, and generate notifications */ 4404 for (i = 0; i < ARRAY_SIZE(pids); i++) { 4405 pid = fork(); 4406 if (pid == 0) { 4407 ret = syscall(__NR_getppid); 4408 exit(ret != USER_NOTIF_MAGIC); 4409 } 4410 pids[i] = pid; 4411 } 4412 4413 /* This spins until all of the children are sleeping */ 4414 restart_wait: 4415 for (i = 0; i < ARRAY_SIZE(pids); i++) { 4416 if (get_proc_stat(_metadata, pids[i]) != 'S') { 4417 nanosleep(&delay, NULL); 4418 goto restart_wait; 4419 } 4420 } 4421 4422 /* Read the notifications in order (and respond) */ 4423 for (i = 0; i < ARRAY_SIZE(pids); i++) { 4424 memset(&req, 0, sizeof(req)); 4425 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0); 4426 EXPECT_EQ(req.id, baseid + i); 4427 resp.id = req.id; 4428 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), 0); 4429 } 4430 4431 /* Make sure notifications were received */ 4432 for (i = 0; i < ARRAY_SIZE(pids); i++) { 4433 EXPECT_EQ(waitpid(pids[i], &status, 0), pids[i]); 4434 EXPECT_EQ(true, WIFEXITED(status)); 4435 EXPECT_EQ(0, WEXITSTATUS(status)); 4436 } 4437 } 4438 4439 /* get_proc_syscall - Get the syscall in progress for a given pid 4440 * 4441 * Returns the current syscall number for a given process 4442 * Returns -1 if not in syscall (running or blocked) 4443 */ 4444 static long get_proc_syscall(struct __test_metadata *_metadata, int pid) 4445 { 4446 char proc_path[100] = {0}; 4447 long ret = -1; 4448 ssize_t nread; 4449 char *line; 4450 4451 snprintf(proc_path, sizeof(proc_path), "/proc/%d/syscall", pid); 4452 nread = get_nth(_metadata, proc_path, 1, &line); 4453 ASSERT_GT(nread, 0); 4454 4455 if (!strncmp("running", line, MIN(7, nread))) 4456 ret = strtol(line, NULL, 16); 4457 4458 free(line); 4459 return ret; 4460 } 4461 4462 /* Ensure non-fatal signals prior to receive are unmodified */ 4463 TEST(user_notification_wait_killable_pre_notification) 4464 { 4465 struct sigaction new_action = { 4466 .sa_handler = signal_handler, 4467 }; 4468 int listener, status, sk_pair[2]; 4469 pid_t pid; 4470 long ret; 4471 char c; 4472 /* 100 ms */ 4473 struct timespec delay = { .tv_nsec = 100000000 }; 4474 4475 ASSERT_EQ(sigemptyset(&new_action.sa_mask), 0); 4476 4477 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 4478 ASSERT_EQ(0, ret) 4479 { 4480 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 4481 } 4482 4483 ASSERT_EQ(socketpair(PF_LOCAL, SOCK_SEQPACKET, 0, sk_pair), 0); 4484 4485 listener = user_notif_syscall( 4486 __NR_getppid, SECCOMP_FILTER_FLAG_NEW_LISTENER | 4487 SECCOMP_FILTER_FLAG_WAIT_KILLABLE_RECV); 4488 ASSERT_GE(listener, 0); 4489 4490 /* 4491 * Check that we can kill the process with SIGUSR1 prior to receiving 4492 * the notification. SIGUSR1 is wired up to a custom signal handler, 4493 * and make sure it gets called. 4494 */ 4495 pid = fork(); 4496 ASSERT_GE(pid, 0); 4497 4498 if (pid == 0) { 4499 close(sk_pair[0]); 4500 handled = sk_pair[1]; 4501 4502 /* Setup the non-fatal sigaction without SA_RESTART */ 4503 if (sigaction(SIGUSR1, &new_action, NULL)) { 4504 perror("sigaction"); 4505 exit(1); 4506 } 4507 4508 ret = syscall(__NR_getppid); 4509 /* Make sure we got a return from a signal interruption */ 4510 exit(ret != -1 || errno != EINTR); 4511 } 4512 4513 /* 4514 * Make sure we've gotten to the seccomp user notification wait 4515 * from getppid prior to sending any signals 4516 */ 4517 while (get_proc_syscall(_metadata, pid) != __NR_getppid && 4518 get_proc_stat(_metadata, pid) != 'S') 4519 nanosleep(&delay, NULL); 4520 4521 /* Send non-fatal kill signal */ 4522 EXPECT_EQ(kill(pid, SIGUSR1), 0); 4523 4524 /* wait for process to exit (exit checks for EINTR) */ 4525 EXPECT_EQ(waitpid(pid, &status, 0), pid); 4526 EXPECT_EQ(true, WIFEXITED(status)); 4527 EXPECT_EQ(0, WEXITSTATUS(status)); 4528 4529 EXPECT_EQ(read(sk_pair[0], &c, 1), 1); 4530 } 4531 4532 /* Ensure non-fatal signals after receive are blocked */ 4533 TEST(user_notification_wait_killable) 4534 { 4535 struct sigaction new_action = { 4536 .sa_handler = signal_handler, 4537 }; 4538 struct seccomp_notif_resp resp = {}; 4539 struct seccomp_notif req = {}; 4540 int listener, status, sk_pair[2]; 4541 pid_t pid; 4542 long ret; 4543 char c; 4544 /* 100 ms */ 4545 struct timespec delay = { .tv_nsec = 100000000 }; 4546 4547 ASSERT_EQ(sigemptyset(&new_action.sa_mask), 0); 4548 4549 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 4550 ASSERT_EQ(0, ret) 4551 { 4552 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 4553 } 4554 4555 ASSERT_EQ(socketpair(PF_LOCAL, SOCK_SEQPACKET, 0, sk_pair), 0); 4556 4557 listener = user_notif_syscall( 4558 __NR_getppid, SECCOMP_FILTER_FLAG_NEW_LISTENER | 4559 SECCOMP_FILTER_FLAG_WAIT_KILLABLE_RECV); 4560 ASSERT_GE(listener, 0); 4561 4562 pid = fork(); 4563 ASSERT_GE(pid, 0); 4564 4565 if (pid == 0) { 4566 close(sk_pair[0]); 4567 handled = sk_pair[1]; 4568 4569 /* Setup the sigaction without SA_RESTART */ 4570 if (sigaction(SIGUSR1, &new_action, NULL)) { 4571 perror("sigaction"); 4572 exit(1); 4573 } 4574 4575 /* Make sure that the syscall is completed (no EINTR) */ 4576 ret = syscall(__NR_getppid); 4577 exit(ret != USER_NOTIF_MAGIC); 4578 } 4579 4580 /* 4581 * Get the notification, to make move the notifying process into a 4582 * non-preemptible (TASK_KILLABLE) state. 4583 */ 4584 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0); 4585 /* Send non-fatal kill signal */ 4586 EXPECT_EQ(kill(pid, SIGUSR1), 0); 4587 4588 /* 4589 * Make sure the task enters moves to TASK_KILLABLE by waiting for 4590 * D (Disk Sleep) state after receiving non-fatal signal. 4591 */ 4592 while (get_proc_stat(_metadata, pid) != 'D') 4593 nanosleep(&delay, NULL); 4594 4595 resp.id = req.id; 4596 resp.val = USER_NOTIF_MAGIC; 4597 /* Make sure the notification is found and able to be replied to */ 4598 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), 0); 4599 4600 /* 4601 * Make sure that the signal handler does get called once we're back in 4602 * userspace. 4603 */ 4604 EXPECT_EQ(read(sk_pair[0], &c, 1), 1); 4605 /* wait for process to exit (exit checks for USER_NOTIF_MAGIC) */ 4606 EXPECT_EQ(waitpid(pid, &status, 0), pid); 4607 EXPECT_EQ(true, WIFEXITED(status)); 4608 EXPECT_EQ(0, WEXITSTATUS(status)); 4609 } 4610 4611 /* Ensure fatal signals after receive are not blocked */ 4612 TEST(user_notification_wait_killable_fatal) 4613 { 4614 struct seccomp_notif req = {}; 4615 int listener, status; 4616 pid_t pid; 4617 long ret; 4618 /* 100 ms */ 4619 struct timespec delay = { .tv_nsec = 100000000 }; 4620 4621 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 4622 ASSERT_EQ(0, ret) 4623 { 4624 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 4625 } 4626 4627 listener = user_notif_syscall( 4628 __NR_getppid, SECCOMP_FILTER_FLAG_NEW_LISTENER | 4629 SECCOMP_FILTER_FLAG_WAIT_KILLABLE_RECV); 4630 ASSERT_GE(listener, 0); 4631 4632 pid = fork(); 4633 ASSERT_GE(pid, 0); 4634 4635 if (pid == 0) { 4636 /* This should never complete as it should get a SIGTERM */ 4637 syscall(__NR_getppid); 4638 exit(1); 4639 } 4640 4641 while (get_proc_stat(_metadata, pid) != 'S') 4642 nanosleep(&delay, NULL); 4643 4644 /* 4645 * Get the notification, to make move the notifying process into a 4646 * non-preemptible (TASK_KILLABLE) state. 4647 */ 4648 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0); 4649 /* Kill the process with a fatal signal */ 4650 EXPECT_EQ(kill(pid, SIGTERM), 0); 4651 4652 /* 4653 * Wait for the process to exit, and make sure the process terminated 4654 * due to the SIGTERM signal. 4655 */ 4656 EXPECT_EQ(waitpid(pid, &status, 0), pid); 4657 EXPECT_EQ(true, WIFSIGNALED(status)); 4658 EXPECT_EQ(SIGTERM, WTERMSIG(status)); 4659 } 4660 4661 /* 4662 * TODO: 4663 * - expand NNP testing 4664 * - better arch-specific TRACE and TRAP handlers. 4665 * - endianness checking when appropriate 4666 * - 64-bit arg prodding 4667 * - arch value testing (x86 modes especially) 4668 * - verify that FILTER_FLAG_LOG filters generate log messages 4669 * - verify that RET_LOG generates log messages 4670 */ 4671 4672 TEST_HARNESS_MAIN 4673