1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2012 The Chromium OS Authors. All rights reserved. 4 * 5 * Test code for seccomp bpf. 6 */ 7 8 #define _GNU_SOURCE 9 #include <sys/types.h> 10 11 /* 12 * glibc 2.26 and later have SIGSYS in siginfo_t. Before that, 13 * we need to use the kernel's siginfo.h file and trick glibc 14 * into accepting it. 15 */ 16 #if !__GLIBC_PREREQ(2, 26) 17 # include <asm/siginfo.h> 18 # define __have_siginfo_t 1 19 # define __have_sigval_t 1 20 # define __have_sigevent_t 1 21 #endif 22 23 #include <errno.h> 24 #include <linux/filter.h> 25 #include <sys/prctl.h> 26 #include <sys/ptrace.h> 27 #include <sys/user.h> 28 #include <linux/prctl.h> 29 #include <linux/ptrace.h> 30 #include <linux/seccomp.h> 31 #include <pthread.h> 32 #include <semaphore.h> 33 #include <signal.h> 34 #include <stddef.h> 35 #include <stdbool.h> 36 #include <string.h> 37 #include <time.h> 38 #include <limits.h> 39 #include <linux/elf.h> 40 #include <sys/uio.h> 41 #include <sys/utsname.h> 42 #include <sys/fcntl.h> 43 #include <sys/mman.h> 44 #include <sys/times.h> 45 #include <sys/socket.h> 46 #include <sys/ioctl.h> 47 #include <linux/kcmp.h> 48 #include <sys/resource.h> 49 #include <sys/capability.h> 50 51 #include <unistd.h> 52 #include <sys/syscall.h> 53 #include <poll.h> 54 55 #include "../kselftest_harness.h" 56 #include "../clone3/clone3_selftests.h" 57 58 /* Attempt to de-conflict with the selftests tree. */ 59 #ifndef SKIP 60 #define SKIP(s, ...) XFAIL(s, ##__VA_ARGS__) 61 #endif 62 63 #define MIN(X, Y) ((X) < (Y) ? (X) : (Y)) 64 65 #ifndef PR_SET_PTRACER 66 # define PR_SET_PTRACER 0x59616d61 67 #endif 68 69 #ifndef PR_SET_NO_NEW_PRIVS 70 #define PR_SET_NO_NEW_PRIVS 38 71 #define PR_GET_NO_NEW_PRIVS 39 72 #endif 73 74 #ifndef PR_SECCOMP_EXT 75 #define PR_SECCOMP_EXT 43 76 #endif 77 78 #ifndef SECCOMP_EXT_ACT 79 #define SECCOMP_EXT_ACT 1 80 #endif 81 82 #ifndef SECCOMP_EXT_ACT_TSYNC 83 #define SECCOMP_EXT_ACT_TSYNC 1 84 #endif 85 86 #ifndef SECCOMP_MODE_STRICT 87 #define SECCOMP_MODE_STRICT 1 88 #endif 89 90 #ifndef SECCOMP_MODE_FILTER 91 #define SECCOMP_MODE_FILTER 2 92 #endif 93 94 #ifndef SECCOMP_RET_ALLOW 95 struct seccomp_data { 96 int nr; 97 __u32 arch; 98 __u64 instruction_pointer; 99 __u64 args[6]; 100 }; 101 #endif 102 103 #ifndef SECCOMP_RET_KILL_PROCESS 104 #define SECCOMP_RET_KILL_PROCESS 0x80000000U /* kill the process */ 105 #define SECCOMP_RET_KILL_THREAD 0x00000000U /* kill the thread */ 106 #endif 107 #ifndef SECCOMP_RET_KILL 108 #define SECCOMP_RET_KILL SECCOMP_RET_KILL_THREAD 109 #define SECCOMP_RET_TRAP 0x00030000U /* disallow and force a SIGSYS */ 110 #define SECCOMP_RET_ERRNO 0x00050000U /* returns an errno */ 111 #define SECCOMP_RET_TRACE 0x7ff00000U /* pass to a tracer or disallow */ 112 #define SECCOMP_RET_ALLOW 0x7fff0000U /* allow */ 113 #endif 114 #ifndef SECCOMP_RET_LOG 115 #define SECCOMP_RET_LOG 0x7ffc0000U /* allow after logging */ 116 #endif 117 118 #ifndef __NR_seccomp 119 # if defined(__i386__) 120 # define __NR_seccomp 354 121 # elif defined(__x86_64__) 122 # define __NR_seccomp 317 123 # elif defined(__arm__) 124 # define __NR_seccomp 383 125 # elif defined(__aarch64__) 126 # define __NR_seccomp 277 127 # elif defined(__riscv) 128 # define __NR_seccomp 277 129 # elif defined(__csky__) 130 # define __NR_seccomp 277 131 # elif defined(__loongarch__) 132 # define __NR_seccomp 277 133 # elif defined(__hppa__) 134 # define __NR_seccomp 338 135 # elif defined(__powerpc__) 136 # define __NR_seccomp 358 137 # elif defined(__s390__) 138 # define __NR_seccomp 348 139 # elif defined(__xtensa__) 140 # define __NR_seccomp 337 141 # elif defined(__sh__) 142 # define __NR_seccomp 372 143 # elif defined(__mc68000__) 144 # define __NR_seccomp 380 145 # else 146 # warning "seccomp syscall number unknown for this architecture" 147 # define __NR_seccomp 0xffff 148 # endif 149 #endif 150 151 #ifndef SECCOMP_SET_MODE_STRICT 152 #define SECCOMP_SET_MODE_STRICT 0 153 #endif 154 155 #ifndef SECCOMP_SET_MODE_FILTER 156 #define SECCOMP_SET_MODE_FILTER 1 157 #endif 158 159 #ifndef SECCOMP_GET_ACTION_AVAIL 160 #define SECCOMP_GET_ACTION_AVAIL 2 161 #endif 162 163 #ifndef SECCOMP_GET_NOTIF_SIZES 164 #define SECCOMP_GET_NOTIF_SIZES 3 165 #endif 166 167 #ifndef SECCOMP_FILTER_FLAG_TSYNC 168 #define SECCOMP_FILTER_FLAG_TSYNC (1UL << 0) 169 #endif 170 171 #ifndef SECCOMP_FILTER_FLAG_LOG 172 #define SECCOMP_FILTER_FLAG_LOG (1UL << 1) 173 #endif 174 175 #ifndef SECCOMP_FILTER_FLAG_SPEC_ALLOW 176 #define SECCOMP_FILTER_FLAG_SPEC_ALLOW (1UL << 2) 177 #endif 178 179 #ifndef PTRACE_SECCOMP_GET_METADATA 180 #define PTRACE_SECCOMP_GET_METADATA 0x420d 181 182 struct seccomp_metadata { 183 __u64 filter_off; /* Input: which filter */ 184 __u64 flags; /* Output: filter's flags */ 185 }; 186 #endif 187 188 #ifndef SECCOMP_FILTER_FLAG_NEW_LISTENER 189 #define SECCOMP_FILTER_FLAG_NEW_LISTENER (1UL << 3) 190 #endif 191 192 #ifndef SECCOMP_RET_USER_NOTIF 193 #define SECCOMP_RET_USER_NOTIF 0x7fc00000U 194 195 #define SECCOMP_IOC_MAGIC '!' 196 #define SECCOMP_IO(nr) _IO(SECCOMP_IOC_MAGIC, nr) 197 #define SECCOMP_IOR(nr, type) _IOR(SECCOMP_IOC_MAGIC, nr, type) 198 #define SECCOMP_IOW(nr, type) _IOW(SECCOMP_IOC_MAGIC, nr, type) 199 #define SECCOMP_IOWR(nr, type) _IOWR(SECCOMP_IOC_MAGIC, nr, type) 200 201 /* Flags for seccomp notification fd ioctl. */ 202 #define SECCOMP_IOCTL_NOTIF_RECV SECCOMP_IOWR(0, struct seccomp_notif) 203 #define SECCOMP_IOCTL_NOTIF_SEND SECCOMP_IOWR(1, \ 204 struct seccomp_notif_resp) 205 #define SECCOMP_IOCTL_NOTIF_ID_VALID SECCOMP_IOW(2, __u64) 206 207 struct seccomp_notif { 208 __u64 id; 209 __u32 pid; 210 __u32 flags; 211 struct seccomp_data data; 212 }; 213 214 struct seccomp_notif_resp { 215 __u64 id; 216 __s64 val; 217 __s32 error; 218 __u32 flags; 219 }; 220 221 struct seccomp_notif_sizes { 222 __u16 seccomp_notif; 223 __u16 seccomp_notif_resp; 224 __u16 seccomp_data; 225 }; 226 #endif 227 228 #ifndef SECCOMP_IOCTL_NOTIF_ADDFD 229 /* On success, the return value is the remote process's added fd number */ 230 #define SECCOMP_IOCTL_NOTIF_ADDFD SECCOMP_IOW(3, \ 231 struct seccomp_notif_addfd) 232 233 /* valid flags for seccomp_notif_addfd */ 234 #define SECCOMP_ADDFD_FLAG_SETFD (1UL << 0) /* Specify remote fd */ 235 236 struct seccomp_notif_addfd { 237 __u64 id; 238 __u32 flags; 239 __u32 srcfd; 240 __u32 newfd; 241 __u32 newfd_flags; 242 }; 243 #endif 244 245 #ifndef SECCOMP_ADDFD_FLAG_SEND 246 #define SECCOMP_ADDFD_FLAG_SEND (1UL << 1) /* Addfd and return it, atomically */ 247 #endif 248 249 struct seccomp_notif_addfd_small { 250 __u64 id; 251 char weird[4]; 252 }; 253 #define SECCOMP_IOCTL_NOTIF_ADDFD_SMALL \ 254 SECCOMP_IOW(3, struct seccomp_notif_addfd_small) 255 256 struct seccomp_notif_addfd_big { 257 union { 258 struct seccomp_notif_addfd addfd; 259 char buf[sizeof(struct seccomp_notif_addfd) + 8]; 260 }; 261 }; 262 #define SECCOMP_IOCTL_NOTIF_ADDFD_BIG \ 263 SECCOMP_IOWR(3, struct seccomp_notif_addfd_big) 264 265 #ifndef PTRACE_EVENTMSG_SYSCALL_ENTRY 266 #define PTRACE_EVENTMSG_SYSCALL_ENTRY 1 267 #define PTRACE_EVENTMSG_SYSCALL_EXIT 2 268 #endif 269 270 #ifndef SECCOMP_USER_NOTIF_FLAG_CONTINUE 271 #define SECCOMP_USER_NOTIF_FLAG_CONTINUE 0x00000001 272 #endif 273 274 #ifndef SECCOMP_FILTER_FLAG_TSYNC_ESRCH 275 #define SECCOMP_FILTER_FLAG_TSYNC_ESRCH (1UL << 4) 276 #endif 277 278 #ifndef SECCOMP_FILTER_FLAG_WAIT_KILLABLE_RECV 279 #define SECCOMP_FILTER_FLAG_WAIT_KILLABLE_RECV (1UL << 5) 280 #endif 281 282 #ifndef seccomp 283 int seccomp(unsigned int op, unsigned int flags, void *args) 284 { 285 errno = 0; 286 return syscall(__NR_seccomp, op, flags, args); 287 } 288 #endif 289 290 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ 291 #define syscall_arg(_n) (offsetof(struct seccomp_data, args[_n])) 292 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ 293 #define syscall_arg(_n) (offsetof(struct seccomp_data, args[_n]) + sizeof(__u32)) 294 #else 295 #error "wut? Unknown __BYTE_ORDER__?!" 296 #endif 297 298 #define SIBLING_EXIT_UNKILLED 0xbadbeef 299 #define SIBLING_EXIT_FAILURE 0xbadface 300 #define SIBLING_EXIT_NEWPRIVS 0xbadfeed 301 302 static int __filecmp(pid_t pid1, pid_t pid2, int fd1, int fd2) 303 { 304 #ifdef __NR_kcmp 305 errno = 0; 306 return syscall(__NR_kcmp, pid1, pid2, KCMP_FILE, fd1, fd2); 307 #else 308 errno = ENOSYS; 309 return -1; 310 #endif 311 } 312 313 /* Have TH_LOG report actual location filecmp() is used. */ 314 #define filecmp(pid1, pid2, fd1, fd2) ({ \ 315 int _ret; \ 316 \ 317 _ret = __filecmp(pid1, pid2, fd1, fd2); \ 318 if (_ret != 0) { \ 319 if (_ret < 0 && errno == ENOSYS) { \ 320 TH_LOG("kcmp() syscall missing (test is less accurate)");\ 321 _ret = 0; \ 322 } \ 323 } \ 324 _ret; }) 325 326 TEST(kcmp) 327 { 328 int ret; 329 330 ret = __filecmp(getpid(), getpid(), 1, 1); 331 EXPECT_EQ(ret, 0); 332 if (ret != 0 && errno == ENOSYS) 333 SKIP(return, "Kernel does not support kcmp() (missing CONFIG_KCMP?)"); 334 } 335 336 TEST(mode_strict_support) 337 { 338 long ret; 339 340 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_STRICT, NULL, NULL, NULL); 341 ASSERT_EQ(0, ret) { 342 TH_LOG("Kernel does not support CONFIG_SECCOMP"); 343 } 344 syscall(__NR_exit, 0); 345 } 346 347 TEST_SIGNAL(mode_strict_cannot_call_prctl, SIGKILL) 348 { 349 long ret; 350 351 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_STRICT, NULL, NULL, NULL); 352 ASSERT_EQ(0, ret) { 353 TH_LOG("Kernel does not support CONFIG_SECCOMP"); 354 } 355 syscall(__NR_prctl, PR_SET_SECCOMP, SECCOMP_MODE_FILTER, 356 NULL, NULL, NULL); 357 EXPECT_FALSE(true) { 358 TH_LOG("Unreachable!"); 359 } 360 } 361 362 /* Note! This doesn't test no new privs behavior */ 363 TEST(no_new_privs_support) 364 { 365 long ret; 366 367 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 368 EXPECT_EQ(0, ret) { 369 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 370 } 371 } 372 373 /* Tests kernel support by checking for a copy_from_user() fault on NULL. */ 374 TEST(mode_filter_support) 375 { 376 long ret; 377 378 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, NULL, 0, 0); 379 ASSERT_EQ(0, ret) { 380 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 381 } 382 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, NULL, NULL, NULL); 383 EXPECT_EQ(-1, ret); 384 EXPECT_EQ(EFAULT, errno) { 385 TH_LOG("Kernel does not support CONFIG_SECCOMP_FILTER!"); 386 } 387 } 388 389 TEST(mode_filter_without_nnp) 390 { 391 struct sock_filter filter[] = { 392 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 393 }; 394 struct sock_fprog prog = { 395 .len = (unsigned short)ARRAY_SIZE(filter), 396 .filter = filter, 397 }; 398 long ret; 399 cap_t cap = cap_get_proc(); 400 cap_flag_value_t is_cap_sys_admin = 0; 401 402 ret = prctl(PR_GET_NO_NEW_PRIVS, 0, NULL, 0, 0); 403 ASSERT_LE(0, ret) { 404 TH_LOG("Expected 0 or unsupported for NO_NEW_PRIVS"); 405 } 406 errno = 0; 407 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0); 408 /* Succeeds with CAP_SYS_ADMIN, fails without */ 409 cap_get_flag(cap, CAP_SYS_ADMIN, CAP_EFFECTIVE, &is_cap_sys_admin); 410 if (!is_cap_sys_admin) { 411 EXPECT_EQ(-1, ret); 412 EXPECT_EQ(EACCES, errno); 413 } else { 414 EXPECT_EQ(0, ret); 415 } 416 } 417 418 #define MAX_INSNS_PER_PATH 32768 419 420 TEST(filter_size_limits) 421 { 422 int i; 423 int count = BPF_MAXINSNS + 1; 424 struct sock_filter allow[] = { 425 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 426 }; 427 struct sock_filter *filter; 428 struct sock_fprog prog = { }; 429 long ret; 430 431 filter = calloc(count, sizeof(*filter)); 432 ASSERT_NE(NULL, filter); 433 434 for (i = 0; i < count; i++) 435 filter[i] = allow[0]; 436 437 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 438 ASSERT_EQ(0, ret); 439 440 prog.filter = filter; 441 prog.len = count; 442 443 /* Too many filter instructions in a single filter. */ 444 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0); 445 ASSERT_NE(0, ret) { 446 TH_LOG("Installing %d insn filter was allowed", prog.len); 447 } 448 449 /* One less is okay, though. */ 450 prog.len -= 1; 451 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0); 452 ASSERT_EQ(0, ret) { 453 TH_LOG("Installing %d insn filter wasn't allowed", prog.len); 454 } 455 } 456 457 TEST(filter_chain_limits) 458 { 459 int i; 460 int count = BPF_MAXINSNS; 461 struct sock_filter allow[] = { 462 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 463 }; 464 struct sock_filter *filter; 465 struct sock_fprog prog = { }; 466 long ret; 467 468 filter = calloc(count, sizeof(*filter)); 469 ASSERT_NE(NULL, filter); 470 471 for (i = 0; i < count; i++) 472 filter[i] = allow[0]; 473 474 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 475 ASSERT_EQ(0, ret); 476 477 prog.filter = filter; 478 prog.len = 1; 479 480 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0); 481 ASSERT_EQ(0, ret); 482 483 prog.len = count; 484 485 /* Too many total filter instructions. */ 486 for (i = 0; i < MAX_INSNS_PER_PATH; i++) { 487 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0); 488 if (ret != 0) 489 break; 490 } 491 ASSERT_NE(0, ret) { 492 TH_LOG("Allowed %d %d-insn filters (total with penalties:%d)", 493 i, count, i * (count + 4)); 494 } 495 } 496 497 TEST(mode_filter_cannot_move_to_strict) 498 { 499 struct sock_filter filter[] = { 500 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 501 }; 502 struct sock_fprog prog = { 503 .len = (unsigned short)ARRAY_SIZE(filter), 504 .filter = filter, 505 }; 506 long ret; 507 508 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 509 ASSERT_EQ(0, ret); 510 511 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0); 512 ASSERT_EQ(0, ret); 513 514 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_STRICT, NULL, 0, 0); 515 EXPECT_EQ(-1, ret); 516 EXPECT_EQ(EINVAL, errno); 517 } 518 519 520 TEST(mode_filter_get_seccomp) 521 { 522 struct sock_filter filter[] = { 523 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 524 }; 525 struct sock_fprog prog = { 526 .len = (unsigned short)ARRAY_SIZE(filter), 527 .filter = filter, 528 }; 529 long ret; 530 531 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 532 ASSERT_EQ(0, ret); 533 534 ret = prctl(PR_GET_SECCOMP, 0, 0, 0, 0); 535 EXPECT_EQ(0, ret); 536 537 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0); 538 ASSERT_EQ(0, ret); 539 540 ret = prctl(PR_GET_SECCOMP, 0, 0, 0, 0); 541 EXPECT_EQ(2, ret); 542 } 543 544 545 TEST(ALLOW_all) 546 { 547 struct sock_filter filter[] = { 548 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 549 }; 550 struct sock_fprog prog = { 551 .len = (unsigned short)ARRAY_SIZE(filter), 552 .filter = filter, 553 }; 554 long ret; 555 556 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 557 ASSERT_EQ(0, ret); 558 559 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog); 560 ASSERT_EQ(0, ret); 561 } 562 563 TEST(empty_prog) 564 { 565 struct sock_filter filter[] = { 566 }; 567 struct sock_fprog prog = { 568 .len = (unsigned short)ARRAY_SIZE(filter), 569 .filter = filter, 570 }; 571 long ret; 572 573 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 574 ASSERT_EQ(0, ret); 575 576 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog); 577 EXPECT_EQ(-1, ret); 578 EXPECT_EQ(EINVAL, errno); 579 } 580 581 TEST(log_all) 582 { 583 struct sock_filter filter[] = { 584 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_LOG), 585 }; 586 struct sock_fprog prog = { 587 .len = (unsigned short)ARRAY_SIZE(filter), 588 .filter = filter, 589 }; 590 long ret; 591 pid_t parent = getppid(); 592 593 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 594 ASSERT_EQ(0, ret); 595 596 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog); 597 ASSERT_EQ(0, ret); 598 599 /* getppid() should succeed and be logged (no check for logging) */ 600 EXPECT_EQ(parent, syscall(__NR_getppid)); 601 } 602 603 TEST_SIGNAL(unknown_ret_is_kill_inside, SIGSYS) 604 { 605 struct sock_filter filter[] = { 606 BPF_STMT(BPF_RET|BPF_K, 0x10000000U), 607 }; 608 struct sock_fprog prog = { 609 .len = (unsigned short)ARRAY_SIZE(filter), 610 .filter = filter, 611 }; 612 long ret; 613 614 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 615 ASSERT_EQ(0, ret); 616 617 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog); 618 ASSERT_EQ(0, ret); 619 EXPECT_EQ(0, syscall(__NR_getpid)) { 620 TH_LOG("getpid() shouldn't ever return"); 621 } 622 } 623 624 /* return code >= 0x80000000 is unused. */ 625 TEST_SIGNAL(unknown_ret_is_kill_above_allow, SIGSYS) 626 { 627 struct sock_filter filter[] = { 628 BPF_STMT(BPF_RET|BPF_K, 0x90000000U), 629 }; 630 struct sock_fprog prog = { 631 .len = (unsigned short)ARRAY_SIZE(filter), 632 .filter = filter, 633 }; 634 long ret; 635 636 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 637 ASSERT_EQ(0, ret); 638 639 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog); 640 ASSERT_EQ(0, ret); 641 EXPECT_EQ(0, syscall(__NR_getpid)) { 642 TH_LOG("getpid() shouldn't ever return"); 643 } 644 } 645 646 TEST_SIGNAL(KILL_all, SIGSYS) 647 { 648 struct sock_filter filter[] = { 649 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL), 650 }; 651 struct sock_fprog prog = { 652 .len = (unsigned short)ARRAY_SIZE(filter), 653 .filter = filter, 654 }; 655 long ret; 656 657 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 658 ASSERT_EQ(0, ret); 659 660 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog); 661 ASSERT_EQ(0, ret); 662 } 663 664 TEST_SIGNAL(KILL_one, SIGSYS) 665 { 666 struct sock_filter filter[] = { 667 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, 668 offsetof(struct seccomp_data, nr)), 669 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 0, 1), 670 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL), 671 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 672 }; 673 struct sock_fprog prog = { 674 .len = (unsigned short)ARRAY_SIZE(filter), 675 .filter = filter, 676 }; 677 long ret; 678 pid_t parent = getppid(); 679 680 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 681 ASSERT_EQ(0, ret); 682 683 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog); 684 ASSERT_EQ(0, ret); 685 686 EXPECT_EQ(parent, syscall(__NR_getppid)); 687 /* getpid() should never return. */ 688 EXPECT_EQ(0, syscall(__NR_getpid)); 689 } 690 691 TEST_SIGNAL(KILL_one_arg_one, SIGSYS) 692 { 693 void *fatal_address; 694 struct sock_filter filter[] = { 695 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, 696 offsetof(struct seccomp_data, nr)), 697 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_times, 1, 0), 698 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 699 /* Only both with lower 32-bit for now. */ 700 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, syscall_arg(0)), 701 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, 702 (unsigned long)&fatal_address, 0, 1), 703 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL), 704 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 705 }; 706 struct sock_fprog prog = { 707 .len = (unsigned short)ARRAY_SIZE(filter), 708 .filter = filter, 709 }; 710 long ret; 711 pid_t parent = getppid(); 712 struct tms timebuf; 713 clock_t clock = times(&timebuf); 714 715 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 716 ASSERT_EQ(0, ret); 717 718 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog); 719 ASSERT_EQ(0, ret); 720 721 EXPECT_EQ(parent, syscall(__NR_getppid)); 722 EXPECT_LE(clock, syscall(__NR_times, &timebuf)); 723 /* times() should never return. */ 724 EXPECT_EQ(0, syscall(__NR_times, &fatal_address)); 725 } 726 727 TEST_SIGNAL(KILL_one_arg_six, SIGSYS) 728 { 729 #ifndef __NR_mmap2 730 int sysno = __NR_mmap; 731 #else 732 int sysno = __NR_mmap2; 733 #endif 734 struct sock_filter filter[] = { 735 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, 736 offsetof(struct seccomp_data, nr)), 737 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, sysno, 1, 0), 738 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 739 /* Only both with lower 32-bit for now. */ 740 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, syscall_arg(5)), 741 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, 0x0C0FFEE, 0, 1), 742 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL), 743 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 744 }; 745 struct sock_fprog prog = { 746 .len = (unsigned short)ARRAY_SIZE(filter), 747 .filter = filter, 748 }; 749 long ret; 750 pid_t parent = getppid(); 751 int fd; 752 void *map1, *map2; 753 int page_size = sysconf(_SC_PAGESIZE); 754 755 ASSERT_LT(0, page_size); 756 757 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 758 ASSERT_EQ(0, ret); 759 760 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog); 761 ASSERT_EQ(0, ret); 762 763 fd = open("/dev/zero", O_RDONLY); 764 ASSERT_NE(-1, fd); 765 766 EXPECT_EQ(parent, syscall(__NR_getppid)); 767 map1 = (void *)syscall(sysno, 768 NULL, page_size, PROT_READ, MAP_PRIVATE, fd, page_size); 769 EXPECT_NE(MAP_FAILED, map1); 770 /* mmap2() should never return. */ 771 map2 = (void *)syscall(sysno, 772 NULL, page_size, PROT_READ, MAP_PRIVATE, fd, 0x0C0FFEE); 773 EXPECT_EQ(MAP_FAILED, map2); 774 775 /* The test failed, so clean up the resources. */ 776 munmap(map1, page_size); 777 munmap(map2, page_size); 778 close(fd); 779 } 780 781 /* This is a thread task to die via seccomp filter violation. */ 782 void *kill_thread(void *data) 783 { 784 bool die = (bool)data; 785 786 if (die) { 787 prctl(PR_GET_SECCOMP, 0, 0, 0, 0); 788 return (void *)SIBLING_EXIT_FAILURE; 789 } 790 791 return (void *)SIBLING_EXIT_UNKILLED; 792 } 793 794 enum kill_t { 795 KILL_THREAD, 796 KILL_PROCESS, 797 RET_UNKNOWN 798 }; 799 800 /* Prepare a thread that will kill itself or both of us. */ 801 void kill_thread_or_group(struct __test_metadata *_metadata, 802 enum kill_t kill_how) 803 { 804 pthread_t thread; 805 void *status; 806 /* Kill only when calling __NR_prctl. */ 807 struct sock_filter filter_thread[] = { 808 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, 809 offsetof(struct seccomp_data, nr)), 810 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_prctl, 0, 1), 811 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL_THREAD), 812 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 813 }; 814 struct sock_fprog prog_thread = { 815 .len = (unsigned short)ARRAY_SIZE(filter_thread), 816 .filter = filter_thread, 817 }; 818 int kill = kill_how == KILL_PROCESS ? SECCOMP_RET_KILL_PROCESS : 0xAAAAAAAA; 819 struct sock_filter filter_process[] = { 820 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, 821 offsetof(struct seccomp_data, nr)), 822 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_prctl, 0, 1), 823 BPF_STMT(BPF_RET|BPF_K, kill), 824 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 825 }; 826 struct sock_fprog prog_process = { 827 .len = (unsigned short)ARRAY_SIZE(filter_process), 828 .filter = filter_process, 829 }; 830 831 ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) { 832 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 833 } 834 835 ASSERT_EQ(0, seccomp(SECCOMP_SET_MODE_FILTER, 0, 836 kill_how == KILL_THREAD ? &prog_thread 837 : &prog_process)); 838 839 /* 840 * Add the KILL_THREAD rule again to make sure that the KILL_PROCESS 841 * flag cannot be downgraded by a new filter. 842 */ 843 if (kill_how == KILL_PROCESS) 844 ASSERT_EQ(0, seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog_thread)); 845 846 /* Start a thread that will exit immediately. */ 847 ASSERT_EQ(0, pthread_create(&thread, NULL, kill_thread, (void *)false)); 848 ASSERT_EQ(0, pthread_join(thread, &status)); 849 ASSERT_EQ(SIBLING_EXIT_UNKILLED, (unsigned long)status); 850 851 /* Start a thread that will die immediately. */ 852 ASSERT_EQ(0, pthread_create(&thread, NULL, kill_thread, (void *)true)); 853 ASSERT_EQ(0, pthread_join(thread, &status)); 854 ASSERT_NE(SIBLING_EXIT_FAILURE, (unsigned long)status); 855 856 /* 857 * If we get here, only the spawned thread died. Let the parent know 858 * the whole process didn't die (i.e. this thread, the spawner, 859 * stayed running). 860 */ 861 exit(42); 862 } 863 864 TEST(KILL_thread) 865 { 866 int status; 867 pid_t child_pid; 868 869 child_pid = fork(); 870 ASSERT_LE(0, child_pid); 871 if (child_pid == 0) { 872 kill_thread_or_group(_metadata, KILL_THREAD); 873 _exit(38); 874 } 875 876 ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0)); 877 878 /* If only the thread was killed, we'll see exit 42. */ 879 ASSERT_TRUE(WIFEXITED(status)); 880 ASSERT_EQ(42, WEXITSTATUS(status)); 881 } 882 883 TEST(KILL_process) 884 { 885 int status; 886 pid_t child_pid; 887 888 child_pid = fork(); 889 ASSERT_LE(0, child_pid); 890 if (child_pid == 0) { 891 kill_thread_or_group(_metadata, KILL_PROCESS); 892 _exit(38); 893 } 894 895 ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0)); 896 897 /* If the entire process was killed, we'll see SIGSYS. */ 898 ASSERT_TRUE(WIFSIGNALED(status)); 899 ASSERT_EQ(SIGSYS, WTERMSIG(status)); 900 } 901 902 TEST(KILL_unknown) 903 { 904 int status; 905 pid_t child_pid; 906 907 child_pid = fork(); 908 ASSERT_LE(0, child_pid); 909 if (child_pid == 0) { 910 kill_thread_or_group(_metadata, RET_UNKNOWN); 911 _exit(38); 912 } 913 914 ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0)); 915 916 /* If the entire process was killed, we'll see SIGSYS. */ 917 EXPECT_TRUE(WIFSIGNALED(status)) { 918 TH_LOG("Unknown SECCOMP_RET is only killing the thread?"); 919 } 920 ASSERT_EQ(SIGSYS, WTERMSIG(status)); 921 } 922 923 /* TODO(wad) add 64-bit versus 32-bit arg tests. */ 924 TEST(arg_out_of_range) 925 { 926 struct sock_filter filter[] = { 927 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, syscall_arg(6)), 928 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 929 }; 930 struct sock_fprog prog = { 931 .len = (unsigned short)ARRAY_SIZE(filter), 932 .filter = filter, 933 }; 934 long ret; 935 936 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 937 ASSERT_EQ(0, ret); 938 939 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog); 940 EXPECT_EQ(-1, ret); 941 EXPECT_EQ(EINVAL, errno); 942 } 943 944 #define ERRNO_FILTER(name, errno) \ 945 struct sock_filter _read_filter_##name[] = { \ 946 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, \ 947 offsetof(struct seccomp_data, nr)), \ 948 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_read, 0, 1), \ 949 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ERRNO | errno), \ 950 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), \ 951 }; \ 952 struct sock_fprog prog_##name = { \ 953 .len = (unsigned short)ARRAY_SIZE(_read_filter_##name), \ 954 .filter = _read_filter_##name, \ 955 } 956 957 /* Make sure basic errno values are correctly passed through a filter. */ 958 TEST(ERRNO_valid) 959 { 960 ERRNO_FILTER(valid, E2BIG); 961 long ret; 962 pid_t parent = getppid(); 963 964 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 965 ASSERT_EQ(0, ret); 966 967 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog_valid); 968 ASSERT_EQ(0, ret); 969 970 EXPECT_EQ(parent, syscall(__NR_getppid)); 971 EXPECT_EQ(-1, read(-1, NULL, 0)); 972 EXPECT_EQ(E2BIG, errno); 973 } 974 975 /* Make sure an errno of zero is correctly handled by the arch code. */ 976 TEST(ERRNO_zero) 977 { 978 ERRNO_FILTER(zero, 0); 979 long ret; 980 pid_t parent = getppid(); 981 982 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 983 ASSERT_EQ(0, ret); 984 985 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog_zero); 986 ASSERT_EQ(0, ret); 987 988 EXPECT_EQ(parent, syscall(__NR_getppid)); 989 /* "errno" of 0 is ok. */ 990 EXPECT_EQ(0, read(-1, NULL, 0)); 991 } 992 993 /* 994 * The SECCOMP_RET_DATA mask is 16 bits wide, but errno is smaller. 995 * This tests that the errno value gets capped correctly, fixed by 996 * 580c57f10768 ("seccomp: cap SECCOMP_RET_ERRNO data to MAX_ERRNO"). 997 */ 998 TEST(ERRNO_capped) 999 { 1000 ERRNO_FILTER(capped, 4096); 1001 long ret; 1002 pid_t parent = getppid(); 1003 1004 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 1005 ASSERT_EQ(0, ret); 1006 1007 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog_capped); 1008 ASSERT_EQ(0, ret); 1009 1010 EXPECT_EQ(parent, syscall(__NR_getppid)); 1011 EXPECT_EQ(-1, read(-1, NULL, 0)); 1012 EXPECT_EQ(4095, errno); 1013 } 1014 1015 /* 1016 * Filters are processed in reverse order: last applied is executed first. 1017 * Since only the SECCOMP_RET_ACTION mask is tested for return values, the 1018 * SECCOMP_RET_DATA mask results will follow the most recently applied 1019 * matching filter return (and not the lowest or highest value). 1020 */ 1021 TEST(ERRNO_order) 1022 { 1023 ERRNO_FILTER(first, 11); 1024 ERRNO_FILTER(second, 13); 1025 ERRNO_FILTER(third, 12); 1026 long ret; 1027 pid_t parent = getppid(); 1028 1029 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 1030 ASSERT_EQ(0, ret); 1031 1032 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog_first); 1033 ASSERT_EQ(0, ret); 1034 1035 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog_second); 1036 ASSERT_EQ(0, ret); 1037 1038 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog_third); 1039 ASSERT_EQ(0, ret); 1040 1041 EXPECT_EQ(parent, syscall(__NR_getppid)); 1042 EXPECT_EQ(-1, read(-1, NULL, 0)); 1043 EXPECT_EQ(12, errno); 1044 } 1045 1046 FIXTURE(TRAP) { 1047 struct sock_fprog prog; 1048 }; 1049 1050 FIXTURE_SETUP(TRAP) 1051 { 1052 struct sock_filter filter[] = { 1053 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, 1054 offsetof(struct seccomp_data, nr)), 1055 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 0, 1), 1056 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRAP), 1057 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 1058 }; 1059 1060 memset(&self->prog, 0, sizeof(self->prog)); 1061 self->prog.filter = malloc(sizeof(filter)); 1062 ASSERT_NE(NULL, self->prog.filter); 1063 memcpy(self->prog.filter, filter, sizeof(filter)); 1064 self->prog.len = (unsigned short)ARRAY_SIZE(filter); 1065 } 1066 1067 FIXTURE_TEARDOWN(TRAP) 1068 { 1069 if (self->prog.filter) 1070 free(self->prog.filter); 1071 } 1072 1073 TEST_F_SIGNAL(TRAP, dfl, SIGSYS) 1074 { 1075 long ret; 1076 1077 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 1078 ASSERT_EQ(0, ret); 1079 1080 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog); 1081 ASSERT_EQ(0, ret); 1082 syscall(__NR_getpid); 1083 } 1084 1085 /* Ensure that SIGSYS overrides SIG_IGN */ 1086 TEST_F_SIGNAL(TRAP, ign, SIGSYS) 1087 { 1088 long ret; 1089 1090 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 1091 ASSERT_EQ(0, ret); 1092 1093 signal(SIGSYS, SIG_IGN); 1094 1095 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog); 1096 ASSERT_EQ(0, ret); 1097 syscall(__NR_getpid); 1098 } 1099 1100 static siginfo_t TRAP_info; 1101 static volatile int TRAP_nr; 1102 static void TRAP_action(int nr, siginfo_t *info, void *void_context) 1103 { 1104 memcpy(&TRAP_info, info, sizeof(TRAP_info)); 1105 TRAP_nr = nr; 1106 } 1107 1108 TEST_F(TRAP, handler) 1109 { 1110 int ret, test; 1111 struct sigaction act; 1112 sigset_t mask; 1113 1114 memset(&act, 0, sizeof(act)); 1115 sigemptyset(&mask); 1116 sigaddset(&mask, SIGSYS); 1117 1118 act.sa_sigaction = &TRAP_action; 1119 act.sa_flags = SA_SIGINFO; 1120 ret = sigaction(SIGSYS, &act, NULL); 1121 ASSERT_EQ(0, ret) { 1122 TH_LOG("sigaction failed"); 1123 } 1124 ret = sigprocmask(SIG_UNBLOCK, &mask, NULL); 1125 ASSERT_EQ(0, ret) { 1126 TH_LOG("sigprocmask failed"); 1127 } 1128 1129 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 1130 ASSERT_EQ(0, ret); 1131 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog); 1132 ASSERT_EQ(0, ret); 1133 TRAP_nr = 0; 1134 memset(&TRAP_info, 0, sizeof(TRAP_info)); 1135 /* Expect the registers to be rolled back. (nr = error) may vary 1136 * based on arch. */ 1137 ret = syscall(__NR_getpid); 1138 /* Silence gcc warning about volatile. */ 1139 test = TRAP_nr; 1140 EXPECT_EQ(SIGSYS, test); 1141 struct local_sigsys { 1142 void *_call_addr; /* calling user insn */ 1143 int _syscall; /* triggering system call number */ 1144 unsigned int _arch; /* AUDIT_ARCH_* of syscall */ 1145 } *sigsys = (struct local_sigsys *) 1146 #ifdef si_syscall 1147 &(TRAP_info.si_call_addr); 1148 #else 1149 &TRAP_info.si_pid; 1150 #endif 1151 EXPECT_EQ(__NR_getpid, sigsys->_syscall); 1152 /* Make sure arch is non-zero. */ 1153 EXPECT_NE(0, sigsys->_arch); 1154 EXPECT_NE(0, (unsigned long)sigsys->_call_addr); 1155 } 1156 1157 FIXTURE(precedence) { 1158 struct sock_fprog allow; 1159 struct sock_fprog log; 1160 struct sock_fprog trace; 1161 struct sock_fprog error; 1162 struct sock_fprog trap; 1163 struct sock_fprog kill; 1164 }; 1165 1166 FIXTURE_SETUP(precedence) 1167 { 1168 struct sock_filter allow_insns[] = { 1169 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 1170 }; 1171 struct sock_filter log_insns[] = { 1172 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, 1173 offsetof(struct seccomp_data, nr)), 1174 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 1, 0), 1175 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 1176 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_LOG), 1177 }; 1178 struct sock_filter trace_insns[] = { 1179 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, 1180 offsetof(struct seccomp_data, nr)), 1181 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 1, 0), 1182 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 1183 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE), 1184 }; 1185 struct sock_filter error_insns[] = { 1186 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, 1187 offsetof(struct seccomp_data, nr)), 1188 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 1, 0), 1189 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 1190 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ERRNO), 1191 }; 1192 struct sock_filter trap_insns[] = { 1193 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, 1194 offsetof(struct seccomp_data, nr)), 1195 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 1, 0), 1196 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 1197 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRAP), 1198 }; 1199 struct sock_filter kill_insns[] = { 1200 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, 1201 offsetof(struct seccomp_data, nr)), 1202 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 1, 0), 1203 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 1204 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL), 1205 }; 1206 1207 memset(self, 0, sizeof(*self)); 1208 #define FILTER_ALLOC(_x) \ 1209 self->_x.filter = malloc(sizeof(_x##_insns)); \ 1210 ASSERT_NE(NULL, self->_x.filter); \ 1211 memcpy(self->_x.filter, &_x##_insns, sizeof(_x##_insns)); \ 1212 self->_x.len = (unsigned short)ARRAY_SIZE(_x##_insns) 1213 FILTER_ALLOC(allow); 1214 FILTER_ALLOC(log); 1215 FILTER_ALLOC(trace); 1216 FILTER_ALLOC(error); 1217 FILTER_ALLOC(trap); 1218 FILTER_ALLOC(kill); 1219 } 1220 1221 FIXTURE_TEARDOWN(precedence) 1222 { 1223 #define FILTER_FREE(_x) if (self->_x.filter) free(self->_x.filter) 1224 FILTER_FREE(allow); 1225 FILTER_FREE(log); 1226 FILTER_FREE(trace); 1227 FILTER_FREE(error); 1228 FILTER_FREE(trap); 1229 FILTER_FREE(kill); 1230 } 1231 1232 TEST_F(precedence, allow_ok) 1233 { 1234 pid_t parent, res = 0; 1235 long ret; 1236 1237 parent = getppid(); 1238 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 1239 ASSERT_EQ(0, ret); 1240 1241 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow); 1242 ASSERT_EQ(0, ret); 1243 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log); 1244 ASSERT_EQ(0, ret); 1245 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace); 1246 ASSERT_EQ(0, ret); 1247 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error); 1248 ASSERT_EQ(0, ret); 1249 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trap); 1250 ASSERT_EQ(0, ret); 1251 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->kill); 1252 ASSERT_EQ(0, ret); 1253 /* Should work just fine. */ 1254 res = syscall(__NR_getppid); 1255 EXPECT_EQ(parent, res); 1256 } 1257 1258 TEST_F_SIGNAL(precedence, kill_is_highest, SIGSYS) 1259 { 1260 pid_t parent, res = 0; 1261 long ret; 1262 1263 parent = getppid(); 1264 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 1265 ASSERT_EQ(0, ret); 1266 1267 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow); 1268 ASSERT_EQ(0, ret); 1269 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log); 1270 ASSERT_EQ(0, ret); 1271 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace); 1272 ASSERT_EQ(0, ret); 1273 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error); 1274 ASSERT_EQ(0, ret); 1275 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trap); 1276 ASSERT_EQ(0, ret); 1277 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->kill); 1278 ASSERT_EQ(0, ret); 1279 /* Should work just fine. */ 1280 res = syscall(__NR_getppid); 1281 EXPECT_EQ(parent, res); 1282 /* getpid() should never return. */ 1283 res = syscall(__NR_getpid); 1284 EXPECT_EQ(0, res); 1285 } 1286 1287 TEST_F_SIGNAL(precedence, kill_is_highest_in_any_order, SIGSYS) 1288 { 1289 pid_t parent; 1290 long ret; 1291 1292 parent = getppid(); 1293 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 1294 ASSERT_EQ(0, ret); 1295 1296 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow); 1297 ASSERT_EQ(0, ret); 1298 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->kill); 1299 ASSERT_EQ(0, ret); 1300 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error); 1301 ASSERT_EQ(0, ret); 1302 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log); 1303 ASSERT_EQ(0, ret); 1304 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace); 1305 ASSERT_EQ(0, ret); 1306 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trap); 1307 ASSERT_EQ(0, ret); 1308 /* Should work just fine. */ 1309 EXPECT_EQ(parent, syscall(__NR_getppid)); 1310 /* getpid() should never return. */ 1311 EXPECT_EQ(0, syscall(__NR_getpid)); 1312 } 1313 1314 TEST_F_SIGNAL(precedence, trap_is_second, SIGSYS) 1315 { 1316 pid_t parent; 1317 long ret; 1318 1319 parent = getppid(); 1320 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 1321 ASSERT_EQ(0, ret); 1322 1323 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow); 1324 ASSERT_EQ(0, ret); 1325 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log); 1326 ASSERT_EQ(0, ret); 1327 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace); 1328 ASSERT_EQ(0, ret); 1329 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error); 1330 ASSERT_EQ(0, ret); 1331 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trap); 1332 ASSERT_EQ(0, ret); 1333 /* Should work just fine. */ 1334 EXPECT_EQ(parent, syscall(__NR_getppid)); 1335 /* getpid() should never return. */ 1336 EXPECT_EQ(0, syscall(__NR_getpid)); 1337 } 1338 1339 TEST_F_SIGNAL(precedence, trap_is_second_in_any_order, SIGSYS) 1340 { 1341 pid_t parent; 1342 long ret; 1343 1344 parent = getppid(); 1345 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 1346 ASSERT_EQ(0, ret); 1347 1348 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow); 1349 ASSERT_EQ(0, ret); 1350 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trap); 1351 ASSERT_EQ(0, ret); 1352 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log); 1353 ASSERT_EQ(0, ret); 1354 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace); 1355 ASSERT_EQ(0, ret); 1356 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error); 1357 ASSERT_EQ(0, ret); 1358 /* Should work just fine. */ 1359 EXPECT_EQ(parent, syscall(__NR_getppid)); 1360 /* getpid() should never return. */ 1361 EXPECT_EQ(0, syscall(__NR_getpid)); 1362 } 1363 1364 TEST_F(precedence, errno_is_third) 1365 { 1366 pid_t parent; 1367 long ret; 1368 1369 parent = getppid(); 1370 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 1371 ASSERT_EQ(0, ret); 1372 1373 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow); 1374 ASSERT_EQ(0, ret); 1375 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log); 1376 ASSERT_EQ(0, ret); 1377 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace); 1378 ASSERT_EQ(0, ret); 1379 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error); 1380 ASSERT_EQ(0, ret); 1381 /* Should work just fine. */ 1382 EXPECT_EQ(parent, syscall(__NR_getppid)); 1383 EXPECT_EQ(0, syscall(__NR_getpid)); 1384 } 1385 1386 TEST_F(precedence, errno_is_third_in_any_order) 1387 { 1388 pid_t parent; 1389 long ret; 1390 1391 parent = getppid(); 1392 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 1393 ASSERT_EQ(0, ret); 1394 1395 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log); 1396 ASSERT_EQ(0, ret); 1397 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error); 1398 ASSERT_EQ(0, ret); 1399 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace); 1400 ASSERT_EQ(0, ret); 1401 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow); 1402 ASSERT_EQ(0, ret); 1403 /* Should work just fine. */ 1404 EXPECT_EQ(parent, syscall(__NR_getppid)); 1405 EXPECT_EQ(0, syscall(__NR_getpid)); 1406 } 1407 1408 TEST_F(precedence, trace_is_fourth) 1409 { 1410 pid_t parent; 1411 long ret; 1412 1413 parent = getppid(); 1414 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 1415 ASSERT_EQ(0, ret); 1416 1417 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow); 1418 ASSERT_EQ(0, ret); 1419 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log); 1420 ASSERT_EQ(0, ret); 1421 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace); 1422 ASSERT_EQ(0, ret); 1423 /* Should work just fine. */ 1424 EXPECT_EQ(parent, syscall(__NR_getppid)); 1425 /* No ptracer */ 1426 EXPECT_EQ(-1, syscall(__NR_getpid)); 1427 } 1428 1429 TEST_F(precedence, trace_is_fourth_in_any_order) 1430 { 1431 pid_t parent; 1432 long ret; 1433 1434 parent = getppid(); 1435 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 1436 ASSERT_EQ(0, ret); 1437 1438 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace); 1439 ASSERT_EQ(0, ret); 1440 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow); 1441 ASSERT_EQ(0, ret); 1442 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log); 1443 ASSERT_EQ(0, ret); 1444 /* Should work just fine. */ 1445 EXPECT_EQ(parent, syscall(__NR_getppid)); 1446 /* No ptracer */ 1447 EXPECT_EQ(-1, syscall(__NR_getpid)); 1448 } 1449 1450 TEST_F(precedence, log_is_fifth) 1451 { 1452 pid_t mypid, parent; 1453 long ret; 1454 1455 mypid = getpid(); 1456 parent = getppid(); 1457 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 1458 ASSERT_EQ(0, ret); 1459 1460 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow); 1461 ASSERT_EQ(0, ret); 1462 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log); 1463 ASSERT_EQ(0, ret); 1464 /* Should work just fine. */ 1465 EXPECT_EQ(parent, syscall(__NR_getppid)); 1466 /* Should also work just fine */ 1467 EXPECT_EQ(mypid, syscall(__NR_getpid)); 1468 } 1469 1470 TEST_F(precedence, log_is_fifth_in_any_order) 1471 { 1472 pid_t mypid, parent; 1473 long ret; 1474 1475 mypid = getpid(); 1476 parent = getppid(); 1477 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 1478 ASSERT_EQ(0, ret); 1479 1480 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log); 1481 ASSERT_EQ(0, ret); 1482 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow); 1483 ASSERT_EQ(0, ret); 1484 /* Should work just fine. */ 1485 EXPECT_EQ(parent, syscall(__NR_getppid)); 1486 /* Should also work just fine */ 1487 EXPECT_EQ(mypid, syscall(__NR_getpid)); 1488 } 1489 1490 #ifndef PTRACE_O_TRACESECCOMP 1491 #define PTRACE_O_TRACESECCOMP 0x00000080 1492 #endif 1493 1494 /* Catch the Ubuntu 12.04 value error. */ 1495 #if PTRACE_EVENT_SECCOMP != 7 1496 #undef PTRACE_EVENT_SECCOMP 1497 #endif 1498 1499 #ifndef PTRACE_EVENT_SECCOMP 1500 #define PTRACE_EVENT_SECCOMP 7 1501 #endif 1502 1503 #define PTRACE_EVENT_MASK(status) ((status) >> 16) 1504 bool tracer_running; 1505 void tracer_stop(int sig) 1506 { 1507 tracer_running = false; 1508 } 1509 1510 typedef void tracer_func_t(struct __test_metadata *_metadata, 1511 pid_t tracee, int status, void *args); 1512 1513 void start_tracer(struct __test_metadata *_metadata, int fd, pid_t tracee, 1514 tracer_func_t tracer_func, void *args, bool ptrace_syscall) 1515 { 1516 int ret = -1; 1517 struct sigaction action = { 1518 .sa_handler = tracer_stop, 1519 }; 1520 1521 /* Allow external shutdown. */ 1522 tracer_running = true; 1523 ASSERT_EQ(0, sigaction(SIGUSR1, &action, NULL)); 1524 1525 errno = 0; 1526 while (ret == -1 && errno != EINVAL) 1527 ret = ptrace(PTRACE_ATTACH, tracee, NULL, 0); 1528 ASSERT_EQ(0, ret) { 1529 kill(tracee, SIGKILL); 1530 } 1531 /* Wait for attach stop */ 1532 wait(NULL); 1533 1534 ret = ptrace(PTRACE_SETOPTIONS, tracee, NULL, ptrace_syscall ? 1535 PTRACE_O_TRACESYSGOOD : 1536 PTRACE_O_TRACESECCOMP); 1537 ASSERT_EQ(0, ret) { 1538 TH_LOG("Failed to set PTRACE_O_TRACESECCOMP"); 1539 kill(tracee, SIGKILL); 1540 } 1541 ret = ptrace(ptrace_syscall ? PTRACE_SYSCALL : PTRACE_CONT, 1542 tracee, NULL, 0); 1543 ASSERT_EQ(0, ret); 1544 1545 /* Unblock the tracee */ 1546 ASSERT_EQ(1, write(fd, "A", 1)); 1547 ASSERT_EQ(0, close(fd)); 1548 1549 /* Run until we're shut down. Must assert to stop execution. */ 1550 while (tracer_running) { 1551 int status; 1552 1553 if (wait(&status) != tracee) 1554 continue; 1555 1556 if (WIFSIGNALED(status)) { 1557 /* Child caught a fatal signal. */ 1558 return; 1559 } 1560 if (WIFEXITED(status)) { 1561 /* Child exited with code. */ 1562 return; 1563 } 1564 1565 /* Check if we got an expected event. */ 1566 ASSERT_EQ(WIFCONTINUED(status), false); 1567 ASSERT_EQ(WIFSTOPPED(status), true); 1568 ASSERT_EQ(WSTOPSIG(status) & SIGTRAP, SIGTRAP) { 1569 TH_LOG("Unexpected WSTOPSIG: %d", WSTOPSIG(status)); 1570 } 1571 1572 tracer_func(_metadata, tracee, status, args); 1573 1574 ret = ptrace(ptrace_syscall ? PTRACE_SYSCALL : PTRACE_CONT, 1575 tracee, NULL, 0); 1576 ASSERT_EQ(0, ret); 1577 } 1578 /* Directly report the status of our test harness results. */ 1579 syscall(__NR_exit, _metadata->passed ? EXIT_SUCCESS : EXIT_FAILURE); 1580 } 1581 1582 /* Common tracer setup/teardown functions. */ 1583 void cont_handler(int num) 1584 { } 1585 pid_t setup_trace_fixture(struct __test_metadata *_metadata, 1586 tracer_func_t func, void *args, bool ptrace_syscall) 1587 { 1588 char sync; 1589 int pipefd[2]; 1590 pid_t tracer_pid; 1591 pid_t tracee = getpid(); 1592 1593 /* Setup a pipe for clean synchronization. */ 1594 ASSERT_EQ(0, pipe(pipefd)); 1595 1596 /* Fork a child which we'll promote to tracer */ 1597 tracer_pid = fork(); 1598 ASSERT_LE(0, tracer_pid); 1599 signal(SIGALRM, cont_handler); 1600 if (tracer_pid == 0) { 1601 close(pipefd[0]); 1602 start_tracer(_metadata, pipefd[1], tracee, func, args, 1603 ptrace_syscall); 1604 syscall(__NR_exit, 0); 1605 } 1606 close(pipefd[1]); 1607 prctl(PR_SET_PTRACER, tracer_pid, 0, 0, 0); 1608 read(pipefd[0], &sync, 1); 1609 close(pipefd[0]); 1610 1611 return tracer_pid; 1612 } 1613 1614 void teardown_trace_fixture(struct __test_metadata *_metadata, 1615 pid_t tracer) 1616 { 1617 if (tracer) { 1618 int status; 1619 /* 1620 * Extract the exit code from the other process and 1621 * adopt it for ourselves in case its asserts failed. 1622 */ 1623 ASSERT_EQ(0, kill(tracer, SIGUSR1)); 1624 ASSERT_EQ(tracer, waitpid(tracer, &status, 0)); 1625 if (WEXITSTATUS(status)) 1626 _metadata->passed = 0; 1627 } 1628 } 1629 1630 /* "poke" tracer arguments and function. */ 1631 struct tracer_args_poke_t { 1632 unsigned long poke_addr; 1633 }; 1634 1635 void tracer_poke(struct __test_metadata *_metadata, pid_t tracee, int status, 1636 void *args) 1637 { 1638 int ret; 1639 unsigned long msg; 1640 struct tracer_args_poke_t *info = (struct tracer_args_poke_t *)args; 1641 1642 ret = ptrace(PTRACE_GETEVENTMSG, tracee, NULL, &msg); 1643 EXPECT_EQ(0, ret); 1644 /* If this fails, don't try to recover. */ 1645 ASSERT_EQ(0x1001, msg) { 1646 kill(tracee, SIGKILL); 1647 } 1648 /* 1649 * Poke in the message. 1650 * Registers are not touched to try to keep this relatively arch 1651 * agnostic. 1652 */ 1653 ret = ptrace(PTRACE_POKEDATA, tracee, info->poke_addr, 0x1001); 1654 EXPECT_EQ(0, ret); 1655 } 1656 1657 FIXTURE(TRACE_poke) { 1658 struct sock_fprog prog; 1659 pid_t tracer; 1660 long poked; 1661 struct tracer_args_poke_t tracer_args; 1662 }; 1663 1664 FIXTURE_SETUP(TRACE_poke) 1665 { 1666 struct sock_filter filter[] = { 1667 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, 1668 offsetof(struct seccomp_data, nr)), 1669 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_read, 0, 1), 1670 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1001), 1671 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 1672 }; 1673 1674 self->poked = 0; 1675 memset(&self->prog, 0, sizeof(self->prog)); 1676 self->prog.filter = malloc(sizeof(filter)); 1677 ASSERT_NE(NULL, self->prog.filter); 1678 memcpy(self->prog.filter, filter, sizeof(filter)); 1679 self->prog.len = (unsigned short)ARRAY_SIZE(filter); 1680 1681 /* Set up tracer args. */ 1682 self->tracer_args.poke_addr = (unsigned long)&self->poked; 1683 1684 /* Launch tracer. */ 1685 self->tracer = setup_trace_fixture(_metadata, tracer_poke, 1686 &self->tracer_args, false); 1687 } 1688 1689 FIXTURE_TEARDOWN(TRACE_poke) 1690 { 1691 teardown_trace_fixture(_metadata, self->tracer); 1692 if (self->prog.filter) 1693 free(self->prog.filter); 1694 } 1695 1696 TEST_F(TRACE_poke, read_has_side_effects) 1697 { 1698 ssize_t ret; 1699 1700 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 1701 ASSERT_EQ(0, ret); 1702 1703 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0); 1704 ASSERT_EQ(0, ret); 1705 1706 EXPECT_EQ(0, self->poked); 1707 ret = read(-1, NULL, 0); 1708 EXPECT_EQ(-1, ret); 1709 EXPECT_EQ(0x1001, self->poked); 1710 } 1711 1712 TEST_F(TRACE_poke, getpid_runs_normally) 1713 { 1714 long ret; 1715 1716 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 1717 ASSERT_EQ(0, ret); 1718 1719 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0); 1720 ASSERT_EQ(0, ret); 1721 1722 EXPECT_EQ(0, self->poked); 1723 EXPECT_NE(0, syscall(__NR_getpid)); 1724 EXPECT_EQ(0, self->poked); 1725 } 1726 1727 #if defined(__x86_64__) 1728 # define ARCH_REGS struct user_regs_struct 1729 # define SYSCALL_NUM(_regs) (_regs).orig_rax 1730 # define SYSCALL_RET(_regs) (_regs).rax 1731 #elif defined(__i386__) 1732 # define ARCH_REGS struct user_regs_struct 1733 # define SYSCALL_NUM(_regs) (_regs).orig_eax 1734 # define SYSCALL_RET(_regs) (_regs).eax 1735 #elif defined(__arm__) 1736 # define ARCH_REGS struct pt_regs 1737 # define SYSCALL_NUM(_regs) (_regs).ARM_r7 1738 # ifndef PTRACE_SET_SYSCALL 1739 # define PTRACE_SET_SYSCALL 23 1740 # endif 1741 # define SYSCALL_NUM_SET(_regs, _nr) \ 1742 EXPECT_EQ(0, ptrace(PTRACE_SET_SYSCALL, tracee, NULL, _nr)) 1743 # define SYSCALL_RET(_regs) (_regs).ARM_r0 1744 #elif defined(__aarch64__) 1745 # define ARCH_REGS struct user_pt_regs 1746 # define SYSCALL_NUM(_regs) (_regs).regs[8] 1747 # ifndef NT_ARM_SYSTEM_CALL 1748 # define NT_ARM_SYSTEM_CALL 0x404 1749 # endif 1750 # define SYSCALL_NUM_SET(_regs, _nr) \ 1751 do { \ 1752 struct iovec __v; \ 1753 typeof(_nr) __nr = (_nr); \ 1754 __v.iov_base = &__nr; \ 1755 __v.iov_len = sizeof(__nr); \ 1756 EXPECT_EQ(0, ptrace(PTRACE_SETREGSET, tracee, \ 1757 NT_ARM_SYSTEM_CALL, &__v)); \ 1758 } while (0) 1759 # define SYSCALL_RET(_regs) (_regs).regs[0] 1760 #elif defined(__loongarch__) 1761 # define ARCH_REGS struct user_pt_regs 1762 # define SYSCALL_NUM(_regs) (_regs).regs[11] 1763 # define SYSCALL_RET(_regs) (_regs).regs[4] 1764 #elif defined(__riscv) && __riscv_xlen == 64 1765 # define ARCH_REGS struct user_regs_struct 1766 # define SYSCALL_NUM(_regs) (_regs).a7 1767 # define SYSCALL_RET(_regs) (_regs).a0 1768 #elif defined(__csky__) 1769 # define ARCH_REGS struct pt_regs 1770 # if defined(__CSKYABIV2__) 1771 # define SYSCALL_NUM(_regs) (_regs).regs[3] 1772 # else 1773 # define SYSCALL_NUM(_regs) (_regs).regs[9] 1774 # endif 1775 # define SYSCALL_RET(_regs) (_regs).a0 1776 #elif defined(__hppa__) 1777 # define ARCH_REGS struct user_regs_struct 1778 # define SYSCALL_NUM(_regs) (_regs).gr[20] 1779 # define SYSCALL_RET(_regs) (_regs).gr[28] 1780 #elif defined(__powerpc__) 1781 # define ARCH_REGS struct pt_regs 1782 # define SYSCALL_NUM(_regs) (_regs).gpr[0] 1783 # define SYSCALL_RET(_regs) (_regs).gpr[3] 1784 # define SYSCALL_RET_SET(_regs, _val) \ 1785 do { \ 1786 typeof(_val) _result = (_val); \ 1787 if ((_regs.trap & 0xfff0) == 0x3000) { \ 1788 /* \ 1789 * scv 0 system call uses -ve result \ 1790 * for error, so no need to adjust. \ 1791 */ \ 1792 SYSCALL_RET(_regs) = _result; \ 1793 } else { \ 1794 /* \ 1795 * A syscall error is signaled by the \ 1796 * CR0 SO bit and the code is stored as \ 1797 * a positive value. \ 1798 */ \ 1799 if (_result < 0) { \ 1800 SYSCALL_RET(_regs) = -_result; \ 1801 (_regs).ccr |= 0x10000000; \ 1802 } else { \ 1803 SYSCALL_RET(_regs) = _result; \ 1804 (_regs).ccr &= ~0x10000000; \ 1805 } \ 1806 } \ 1807 } while (0) 1808 # define SYSCALL_RET_SET_ON_PTRACE_EXIT 1809 #elif defined(__s390__) 1810 # define ARCH_REGS s390_regs 1811 # define SYSCALL_NUM(_regs) (_regs).gprs[2] 1812 # define SYSCALL_RET_SET(_regs, _val) \ 1813 TH_LOG("Can't modify syscall return on this architecture") 1814 #elif defined(__mips__) 1815 # include <asm/unistd_nr_n32.h> 1816 # include <asm/unistd_nr_n64.h> 1817 # include <asm/unistd_nr_o32.h> 1818 # define ARCH_REGS struct pt_regs 1819 # define SYSCALL_NUM(_regs) \ 1820 ({ \ 1821 typeof((_regs).regs[2]) _nr; \ 1822 if ((_regs).regs[2] == __NR_O32_Linux) \ 1823 _nr = (_regs).regs[4]; \ 1824 else \ 1825 _nr = (_regs).regs[2]; \ 1826 _nr; \ 1827 }) 1828 # define SYSCALL_NUM_SET(_regs, _nr) \ 1829 do { \ 1830 if ((_regs).regs[2] == __NR_O32_Linux) \ 1831 (_regs).regs[4] = _nr; \ 1832 else \ 1833 (_regs).regs[2] = _nr; \ 1834 } while (0) 1835 # define SYSCALL_RET_SET(_regs, _val) \ 1836 TH_LOG("Can't modify syscall return on this architecture") 1837 #elif defined(__xtensa__) 1838 # define ARCH_REGS struct user_pt_regs 1839 # define SYSCALL_NUM(_regs) (_regs).syscall 1840 /* 1841 * On xtensa syscall return value is in the register 1842 * a2 of the current window which is not fixed. 1843 */ 1844 #define SYSCALL_RET(_regs) (_regs).a[(_regs).windowbase * 4 + 2] 1845 #elif defined(__sh__) 1846 # define ARCH_REGS struct pt_regs 1847 # define SYSCALL_NUM(_regs) (_regs).regs[3] 1848 # define SYSCALL_RET(_regs) (_regs).regs[0] 1849 #elif defined(__mc68000__) 1850 # define ARCH_REGS struct user_regs_struct 1851 # define SYSCALL_NUM(_regs) (_regs).orig_d0 1852 # define SYSCALL_RET(_regs) (_regs).d0 1853 #else 1854 # error "Do not know how to find your architecture's registers and syscalls" 1855 #endif 1856 1857 /* 1858 * Most architectures can change the syscall by just updating the 1859 * associated register. This is the default if not defined above. 1860 */ 1861 #ifndef SYSCALL_NUM_SET 1862 # define SYSCALL_NUM_SET(_regs, _nr) \ 1863 do { \ 1864 SYSCALL_NUM(_regs) = (_nr); \ 1865 } while (0) 1866 #endif 1867 /* 1868 * Most architectures can change the syscall return value by just 1869 * writing to the SYSCALL_RET register. This is the default if not 1870 * defined above. If an architecture cannot set the return value 1871 * (for example when the syscall and return value register is 1872 * shared), report it with TH_LOG() in an arch-specific definition 1873 * of SYSCALL_RET_SET() above, and leave SYSCALL_RET undefined. 1874 */ 1875 #if !defined(SYSCALL_RET) && !defined(SYSCALL_RET_SET) 1876 # error "One of SYSCALL_RET or SYSCALL_RET_SET is needed for this arch" 1877 #endif 1878 #ifndef SYSCALL_RET_SET 1879 # define SYSCALL_RET_SET(_regs, _val) \ 1880 do { \ 1881 SYSCALL_RET(_regs) = (_val); \ 1882 } while (0) 1883 #endif 1884 1885 /* When the syscall return can't be changed, stub out the tests for it. */ 1886 #ifndef SYSCALL_RET 1887 # define EXPECT_SYSCALL_RETURN(val, action) EXPECT_EQ(-1, action) 1888 #else 1889 # define EXPECT_SYSCALL_RETURN(val, action) \ 1890 do { \ 1891 errno = 0; \ 1892 if (val < 0) { \ 1893 EXPECT_EQ(-1, action); \ 1894 EXPECT_EQ(-(val), errno); \ 1895 } else { \ 1896 EXPECT_EQ(val, action); \ 1897 } \ 1898 } while (0) 1899 #endif 1900 1901 /* 1902 * Some architectures (e.g. powerpc) can only set syscall 1903 * return values on syscall exit during ptrace. 1904 */ 1905 const bool ptrace_entry_set_syscall_nr = true; 1906 const bool ptrace_entry_set_syscall_ret = 1907 #ifndef SYSCALL_RET_SET_ON_PTRACE_EXIT 1908 true; 1909 #else 1910 false; 1911 #endif 1912 1913 /* 1914 * Use PTRACE_GETREGS and PTRACE_SETREGS when available. This is useful for 1915 * architectures without HAVE_ARCH_TRACEHOOK (e.g. User-mode Linux). 1916 */ 1917 #if defined(__x86_64__) || defined(__i386__) || defined(__mips__) || defined(__mc68000__) 1918 # define ARCH_GETREGS(_regs) ptrace(PTRACE_GETREGS, tracee, 0, &(_regs)) 1919 # define ARCH_SETREGS(_regs) ptrace(PTRACE_SETREGS, tracee, 0, &(_regs)) 1920 #else 1921 # define ARCH_GETREGS(_regs) ({ \ 1922 struct iovec __v; \ 1923 __v.iov_base = &(_regs); \ 1924 __v.iov_len = sizeof(_regs); \ 1925 ptrace(PTRACE_GETREGSET, tracee, NT_PRSTATUS, &__v); \ 1926 }) 1927 # define ARCH_SETREGS(_regs) ({ \ 1928 struct iovec __v; \ 1929 __v.iov_base = &(_regs); \ 1930 __v.iov_len = sizeof(_regs); \ 1931 ptrace(PTRACE_SETREGSET, tracee, NT_PRSTATUS, &__v); \ 1932 }) 1933 #endif 1934 1935 /* Architecture-specific syscall fetching routine. */ 1936 int get_syscall(struct __test_metadata *_metadata, pid_t tracee) 1937 { 1938 ARCH_REGS regs; 1939 1940 EXPECT_EQ(0, ARCH_GETREGS(regs)) { 1941 return -1; 1942 } 1943 1944 return SYSCALL_NUM(regs); 1945 } 1946 1947 /* Architecture-specific syscall changing routine. */ 1948 void __change_syscall(struct __test_metadata *_metadata, 1949 pid_t tracee, long *syscall, long *ret) 1950 { 1951 ARCH_REGS orig, regs; 1952 1953 /* Do not get/set registers if we have nothing to do. */ 1954 if (!syscall && !ret) 1955 return; 1956 1957 EXPECT_EQ(0, ARCH_GETREGS(regs)) { 1958 return; 1959 } 1960 orig = regs; 1961 1962 if (syscall) 1963 SYSCALL_NUM_SET(regs, *syscall); 1964 1965 if (ret) 1966 SYSCALL_RET_SET(regs, *ret); 1967 1968 /* Flush any register changes made. */ 1969 if (memcmp(&orig, ®s, sizeof(orig)) != 0) 1970 EXPECT_EQ(0, ARCH_SETREGS(regs)); 1971 } 1972 1973 /* Change only syscall number. */ 1974 void change_syscall_nr(struct __test_metadata *_metadata, 1975 pid_t tracee, long syscall) 1976 { 1977 __change_syscall(_metadata, tracee, &syscall, NULL); 1978 } 1979 1980 /* Change syscall return value (and set syscall number to -1). */ 1981 void change_syscall_ret(struct __test_metadata *_metadata, 1982 pid_t tracee, long ret) 1983 { 1984 long syscall = -1; 1985 1986 __change_syscall(_metadata, tracee, &syscall, &ret); 1987 } 1988 1989 void tracer_seccomp(struct __test_metadata *_metadata, pid_t tracee, 1990 int status, void *args) 1991 { 1992 int ret; 1993 unsigned long msg; 1994 1995 EXPECT_EQ(PTRACE_EVENT_MASK(status), PTRACE_EVENT_SECCOMP) { 1996 TH_LOG("Unexpected ptrace event: %d", PTRACE_EVENT_MASK(status)); 1997 return; 1998 } 1999 2000 /* Make sure we got the right message. */ 2001 ret = ptrace(PTRACE_GETEVENTMSG, tracee, NULL, &msg); 2002 EXPECT_EQ(0, ret); 2003 2004 /* Validate and take action on expected syscalls. */ 2005 switch (msg) { 2006 case 0x1002: 2007 /* change getpid to getppid. */ 2008 EXPECT_EQ(__NR_getpid, get_syscall(_metadata, tracee)); 2009 change_syscall_nr(_metadata, tracee, __NR_getppid); 2010 break; 2011 case 0x1003: 2012 /* skip gettid with valid return code. */ 2013 EXPECT_EQ(__NR_gettid, get_syscall(_metadata, tracee)); 2014 change_syscall_ret(_metadata, tracee, 45000); 2015 break; 2016 case 0x1004: 2017 /* skip openat with error. */ 2018 EXPECT_EQ(__NR_openat, get_syscall(_metadata, tracee)); 2019 change_syscall_ret(_metadata, tracee, -ESRCH); 2020 break; 2021 case 0x1005: 2022 /* do nothing (allow getppid) */ 2023 EXPECT_EQ(__NR_getppid, get_syscall(_metadata, tracee)); 2024 break; 2025 default: 2026 EXPECT_EQ(0, msg) { 2027 TH_LOG("Unknown PTRACE_GETEVENTMSG: 0x%lx", msg); 2028 kill(tracee, SIGKILL); 2029 } 2030 } 2031 2032 } 2033 2034 FIXTURE(TRACE_syscall) { 2035 struct sock_fprog prog; 2036 pid_t tracer, mytid, mypid, parent; 2037 long syscall_nr; 2038 }; 2039 2040 void tracer_ptrace(struct __test_metadata *_metadata, pid_t tracee, 2041 int status, void *args) 2042 { 2043 int ret; 2044 unsigned long msg; 2045 static bool entry; 2046 long syscall_nr_val, syscall_ret_val; 2047 long *syscall_nr = NULL, *syscall_ret = NULL; 2048 FIXTURE_DATA(TRACE_syscall) *self = args; 2049 2050 EXPECT_EQ(WSTOPSIG(status) & 0x80, 0x80) { 2051 TH_LOG("Unexpected WSTOPSIG: %d", WSTOPSIG(status)); 2052 return; 2053 } 2054 2055 /* 2056 * The traditional way to tell PTRACE_SYSCALL entry/exit 2057 * is by counting. 2058 */ 2059 entry = !entry; 2060 2061 /* Make sure we got an appropriate message. */ 2062 ret = ptrace(PTRACE_GETEVENTMSG, tracee, NULL, &msg); 2063 EXPECT_EQ(0, ret); 2064 EXPECT_EQ(entry ? PTRACE_EVENTMSG_SYSCALL_ENTRY 2065 : PTRACE_EVENTMSG_SYSCALL_EXIT, msg); 2066 2067 /* 2068 * Some architectures only support setting return values during 2069 * syscall exit under ptrace, and on exit the syscall number may 2070 * no longer be available. Therefore, save the initial sycall 2071 * number here, so it can be examined during both entry and exit 2072 * phases. 2073 */ 2074 if (entry) 2075 self->syscall_nr = get_syscall(_metadata, tracee); 2076 2077 /* 2078 * Depending on the architecture's syscall setting abilities, we 2079 * pick which things to set during this phase (entry or exit). 2080 */ 2081 if (entry == ptrace_entry_set_syscall_nr) 2082 syscall_nr = &syscall_nr_val; 2083 if (entry == ptrace_entry_set_syscall_ret) 2084 syscall_ret = &syscall_ret_val; 2085 2086 /* Now handle the actual rewriting cases. */ 2087 switch (self->syscall_nr) { 2088 case __NR_getpid: 2089 syscall_nr_val = __NR_getppid; 2090 /* Never change syscall return for this case. */ 2091 syscall_ret = NULL; 2092 break; 2093 case __NR_gettid: 2094 syscall_nr_val = -1; 2095 syscall_ret_val = 45000; 2096 break; 2097 case __NR_openat: 2098 syscall_nr_val = -1; 2099 syscall_ret_val = -ESRCH; 2100 break; 2101 default: 2102 /* Unhandled, do nothing. */ 2103 return; 2104 } 2105 2106 __change_syscall(_metadata, tracee, syscall_nr, syscall_ret); 2107 } 2108 2109 FIXTURE_VARIANT(TRACE_syscall) { 2110 /* 2111 * All of the SECCOMP_RET_TRACE behaviors can be tested with either 2112 * SECCOMP_RET_TRACE+PTRACE_CONT or plain ptrace()+PTRACE_SYSCALL. 2113 * This indicates if we should use SECCOMP_RET_TRACE (false), or 2114 * ptrace (true). 2115 */ 2116 bool use_ptrace; 2117 }; 2118 2119 FIXTURE_VARIANT_ADD(TRACE_syscall, ptrace) { 2120 .use_ptrace = true, 2121 }; 2122 2123 FIXTURE_VARIANT_ADD(TRACE_syscall, seccomp) { 2124 .use_ptrace = false, 2125 }; 2126 2127 FIXTURE_SETUP(TRACE_syscall) 2128 { 2129 struct sock_filter filter[] = { 2130 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, 2131 offsetof(struct seccomp_data, nr)), 2132 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 0, 1), 2133 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1002), 2134 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_gettid, 0, 1), 2135 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1003), 2136 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_openat, 0, 1), 2137 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1004), 2138 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getppid, 0, 1), 2139 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1005), 2140 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 2141 }; 2142 struct sock_fprog prog = { 2143 .len = (unsigned short)ARRAY_SIZE(filter), 2144 .filter = filter, 2145 }; 2146 long ret; 2147 2148 /* Prepare some testable syscall results. */ 2149 self->mytid = syscall(__NR_gettid); 2150 ASSERT_GT(self->mytid, 0); 2151 ASSERT_NE(self->mytid, 1) { 2152 TH_LOG("Running this test as init is not supported. :)"); 2153 } 2154 2155 self->mypid = getpid(); 2156 ASSERT_GT(self->mypid, 0); 2157 ASSERT_EQ(self->mytid, self->mypid); 2158 2159 self->parent = getppid(); 2160 ASSERT_GT(self->parent, 0); 2161 ASSERT_NE(self->parent, self->mypid); 2162 2163 /* Launch tracer. */ 2164 self->tracer = setup_trace_fixture(_metadata, 2165 variant->use_ptrace ? tracer_ptrace 2166 : tracer_seccomp, 2167 self, variant->use_ptrace); 2168 2169 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 2170 ASSERT_EQ(0, ret); 2171 2172 /* Do not install seccomp rewrite filters, as we'll use ptrace instead. */ 2173 if (variant->use_ptrace) 2174 return; 2175 2176 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0); 2177 ASSERT_EQ(0, ret); 2178 } 2179 2180 FIXTURE_TEARDOWN(TRACE_syscall) 2181 { 2182 teardown_trace_fixture(_metadata, self->tracer); 2183 } 2184 2185 TEST(negative_ENOSYS) 2186 { 2187 /* 2188 * There should be no difference between an "internal" skip 2189 * and userspace asking for syscall "-1". 2190 */ 2191 errno = 0; 2192 EXPECT_EQ(-1, syscall(-1)); 2193 EXPECT_EQ(errno, ENOSYS); 2194 /* And no difference for "still not valid but not -1". */ 2195 errno = 0; 2196 EXPECT_EQ(-1, syscall(-101)); 2197 EXPECT_EQ(errno, ENOSYS); 2198 } 2199 2200 TEST_F(TRACE_syscall, negative_ENOSYS) 2201 { 2202 negative_ENOSYS(_metadata); 2203 } 2204 2205 TEST_F(TRACE_syscall, syscall_allowed) 2206 { 2207 /* getppid works as expected (no changes). */ 2208 EXPECT_EQ(self->parent, syscall(__NR_getppid)); 2209 EXPECT_NE(self->mypid, syscall(__NR_getppid)); 2210 } 2211 2212 TEST_F(TRACE_syscall, syscall_redirected) 2213 { 2214 /* getpid has been redirected to getppid as expected. */ 2215 EXPECT_EQ(self->parent, syscall(__NR_getpid)); 2216 EXPECT_NE(self->mypid, syscall(__NR_getpid)); 2217 } 2218 2219 TEST_F(TRACE_syscall, syscall_errno) 2220 { 2221 /* Tracer should skip the open syscall, resulting in ESRCH. */ 2222 EXPECT_SYSCALL_RETURN(-ESRCH, syscall(__NR_openat)); 2223 } 2224 2225 TEST_F(TRACE_syscall, syscall_faked) 2226 { 2227 /* Tracer skips the gettid syscall and store altered return value. */ 2228 EXPECT_SYSCALL_RETURN(45000, syscall(__NR_gettid)); 2229 } 2230 2231 TEST_F_SIGNAL(TRACE_syscall, kill_immediate, SIGSYS) 2232 { 2233 struct sock_filter filter[] = { 2234 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, 2235 offsetof(struct seccomp_data, nr)), 2236 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_mknodat, 0, 1), 2237 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL_THREAD), 2238 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 2239 }; 2240 struct sock_fprog prog = { 2241 .len = (unsigned short)ARRAY_SIZE(filter), 2242 .filter = filter, 2243 }; 2244 long ret; 2245 2246 /* Install "kill on mknodat" filter. */ 2247 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0); 2248 ASSERT_EQ(0, ret); 2249 2250 /* This should immediately die with SIGSYS, regardless of tracer. */ 2251 EXPECT_EQ(-1, syscall(__NR_mknodat, -1, NULL, 0, 0)); 2252 } 2253 2254 TEST_F(TRACE_syscall, skip_after) 2255 { 2256 struct sock_filter filter[] = { 2257 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, 2258 offsetof(struct seccomp_data, nr)), 2259 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getppid, 0, 1), 2260 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ERRNO | EPERM), 2261 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 2262 }; 2263 struct sock_fprog prog = { 2264 .len = (unsigned short)ARRAY_SIZE(filter), 2265 .filter = filter, 2266 }; 2267 long ret; 2268 2269 /* Install additional "errno on getppid" filter. */ 2270 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0); 2271 ASSERT_EQ(0, ret); 2272 2273 /* Tracer will redirect getpid to getppid, and we should see EPERM. */ 2274 errno = 0; 2275 EXPECT_EQ(-1, syscall(__NR_getpid)); 2276 EXPECT_EQ(EPERM, errno); 2277 } 2278 2279 TEST_F_SIGNAL(TRACE_syscall, kill_after, SIGSYS) 2280 { 2281 struct sock_filter filter[] = { 2282 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, 2283 offsetof(struct seccomp_data, nr)), 2284 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getppid, 0, 1), 2285 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL), 2286 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 2287 }; 2288 struct sock_fprog prog = { 2289 .len = (unsigned short)ARRAY_SIZE(filter), 2290 .filter = filter, 2291 }; 2292 long ret; 2293 2294 /* Install additional "death on getppid" filter. */ 2295 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0); 2296 ASSERT_EQ(0, ret); 2297 2298 /* Tracer will redirect getpid to getppid, and we should die. */ 2299 EXPECT_NE(self->mypid, syscall(__NR_getpid)); 2300 } 2301 2302 TEST(seccomp_syscall) 2303 { 2304 struct sock_filter filter[] = { 2305 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 2306 }; 2307 struct sock_fprog prog = { 2308 .len = (unsigned short)ARRAY_SIZE(filter), 2309 .filter = filter, 2310 }; 2311 long ret; 2312 2313 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 2314 ASSERT_EQ(0, ret) { 2315 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 2316 } 2317 2318 /* Reject insane operation. */ 2319 ret = seccomp(-1, 0, &prog); 2320 ASSERT_NE(ENOSYS, errno) { 2321 TH_LOG("Kernel does not support seccomp syscall!"); 2322 } 2323 EXPECT_EQ(EINVAL, errno) { 2324 TH_LOG("Did not reject crazy op value!"); 2325 } 2326 2327 /* Reject strict with flags or pointer. */ 2328 ret = seccomp(SECCOMP_SET_MODE_STRICT, -1, NULL); 2329 EXPECT_EQ(EINVAL, errno) { 2330 TH_LOG("Did not reject mode strict with flags!"); 2331 } 2332 ret = seccomp(SECCOMP_SET_MODE_STRICT, 0, &prog); 2333 EXPECT_EQ(EINVAL, errno) { 2334 TH_LOG("Did not reject mode strict with uargs!"); 2335 } 2336 2337 /* Reject insane args for filter. */ 2338 ret = seccomp(SECCOMP_SET_MODE_FILTER, -1, &prog); 2339 EXPECT_EQ(EINVAL, errno) { 2340 TH_LOG("Did not reject crazy filter flags!"); 2341 } 2342 ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, NULL); 2343 EXPECT_EQ(EFAULT, errno) { 2344 TH_LOG("Did not reject NULL filter!"); 2345 } 2346 2347 ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog); 2348 EXPECT_EQ(0, errno) { 2349 TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER: %s", 2350 strerror(errno)); 2351 } 2352 } 2353 2354 TEST(seccomp_syscall_mode_lock) 2355 { 2356 struct sock_filter filter[] = { 2357 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 2358 }; 2359 struct sock_fprog prog = { 2360 .len = (unsigned short)ARRAY_SIZE(filter), 2361 .filter = filter, 2362 }; 2363 long ret; 2364 2365 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, NULL, 0, 0); 2366 ASSERT_EQ(0, ret) { 2367 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 2368 } 2369 2370 ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog); 2371 ASSERT_NE(ENOSYS, errno) { 2372 TH_LOG("Kernel does not support seccomp syscall!"); 2373 } 2374 EXPECT_EQ(0, ret) { 2375 TH_LOG("Could not install filter!"); 2376 } 2377 2378 /* Make sure neither entry point will switch to strict. */ 2379 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_STRICT, 0, 0, 0); 2380 EXPECT_EQ(EINVAL, errno) { 2381 TH_LOG("Switched to mode strict!"); 2382 } 2383 2384 ret = seccomp(SECCOMP_SET_MODE_STRICT, 0, NULL); 2385 EXPECT_EQ(EINVAL, errno) { 2386 TH_LOG("Switched to mode strict!"); 2387 } 2388 } 2389 2390 /* 2391 * Test detection of known and unknown filter flags. Userspace needs to be able 2392 * to check if a filter flag is supported by the current kernel and a good way 2393 * of doing that is by attempting to enter filter mode, with the flag bit in 2394 * question set, and a NULL pointer for the _args_ parameter. EFAULT indicates 2395 * that the flag is valid and EINVAL indicates that the flag is invalid. 2396 */ 2397 TEST(detect_seccomp_filter_flags) 2398 { 2399 unsigned int flags[] = { SECCOMP_FILTER_FLAG_TSYNC, 2400 SECCOMP_FILTER_FLAG_LOG, 2401 SECCOMP_FILTER_FLAG_SPEC_ALLOW, 2402 SECCOMP_FILTER_FLAG_NEW_LISTENER, 2403 SECCOMP_FILTER_FLAG_TSYNC_ESRCH }; 2404 unsigned int exclusive[] = { 2405 SECCOMP_FILTER_FLAG_TSYNC, 2406 SECCOMP_FILTER_FLAG_NEW_LISTENER }; 2407 unsigned int flag, all_flags, exclusive_mask; 2408 int i; 2409 long ret; 2410 2411 /* Test detection of individual known-good filter flags */ 2412 for (i = 0, all_flags = 0; i < ARRAY_SIZE(flags); i++) { 2413 int bits = 0; 2414 2415 flag = flags[i]; 2416 /* Make sure the flag is a single bit! */ 2417 while (flag) { 2418 if (flag & 0x1) 2419 bits ++; 2420 flag >>= 1; 2421 } 2422 ASSERT_EQ(1, bits); 2423 flag = flags[i]; 2424 2425 ret = seccomp(SECCOMP_SET_MODE_FILTER, flag, NULL); 2426 ASSERT_NE(ENOSYS, errno) { 2427 TH_LOG("Kernel does not support seccomp syscall!"); 2428 } 2429 EXPECT_EQ(-1, ret); 2430 EXPECT_EQ(EFAULT, errno) { 2431 TH_LOG("Failed to detect that a known-good filter flag (0x%X) is supported!", 2432 flag); 2433 } 2434 2435 all_flags |= flag; 2436 } 2437 2438 /* 2439 * Test detection of all known-good filter flags combined. But 2440 * for the exclusive flags we need to mask them out and try them 2441 * individually for the "all flags" testing. 2442 */ 2443 exclusive_mask = 0; 2444 for (i = 0; i < ARRAY_SIZE(exclusive); i++) 2445 exclusive_mask |= exclusive[i]; 2446 for (i = 0; i < ARRAY_SIZE(exclusive); i++) { 2447 flag = all_flags & ~exclusive_mask; 2448 flag |= exclusive[i]; 2449 2450 ret = seccomp(SECCOMP_SET_MODE_FILTER, flag, NULL); 2451 EXPECT_EQ(-1, ret); 2452 EXPECT_EQ(EFAULT, errno) { 2453 TH_LOG("Failed to detect that all known-good filter flags (0x%X) are supported!", 2454 flag); 2455 } 2456 } 2457 2458 /* Test detection of an unknown filter flags, without exclusives. */ 2459 flag = -1; 2460 flag &= ~exclusive_mask; 2461 ret = seccomp(SECCOMP_SET_MODE_FILTER, flag, NULL); 2462 EXPECT_EQ(-1, ret); 2463 EXPECT_EQ(EINVAL, errno) { 2464 TH_LOG("Failed to detect that an unknown filter flag (0x%X) is unsupported!", 2465 flag); 2466 } 2467 2468 /* 2469 * Test detection of an unknown filter flag that may simply need to be 2470 * added to this test 2471 */ 2472 flag = flags[ARRAY_SIZE(flags) - 1] << 1; 2473 ret = seccomp(SECCOMP_SET_MODE_FILTER, flag, NULL); 2474 EXPECT_EQ(-1, ret); 2475 EXPECT_EQ(EINVAL, errno) { 2476 TH_LOG("Failed to detect that an unknown filter flag (0x%X) is unsupported! Does a new flag need to be added to this test?", 2477 flag); 2478 } 2479 } 2480 2481 TEST(TSYNC_first) 2482 { 2483 struct sock_filter filter[] = { 2484 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 2485 }; 2486 struct sock_fprog prog = { 2487 .len = (unsigned short)ARRAY_SIZE(filter), 2488 .filter = filter, 2489 }; 2490 long ret; 2491 2492 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, NULL, 0, 0); 2493 ASSERT_EQ(0, ret) { 2494 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 2495 } 2496 2497 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC, 2498 &prog); 2499 ASSERT_NE(ENOSYS, errno) { 2500 TH_LOG("Kernel does not support seccomp syscall!"); 2501 } 2502 EXPECT_EQ(0, ret) { 2503 TH_LOG("Could not install initial filter with TSYNC!"); 2504 } 2505 } 2506 2507 #define TSYNC_SIBLINGS 2 2508 struct tsync_sibling { 2509 pthread_t tid; 2510 pid_t system_tid; 2511 sem_t *started; 2512 pthread_cond_t *cond; 2513 pthread_mutex_t *mutex; 2514 int diverge; 2515 int num_waits; 2516 struct sock_fprog *prog; 2517 struct __test_metadata *metadata; 2518 }; 2519 2520 /* 2521 * To avoid joining joined threads (which is not allowed by Bionic), 2522 * make sure we both successfully join and clear the tid to skip a 2523 * later join attempt during fixture teardown. Any remaining threads 2524 * will be directly killed during teardown. 2525 */ 2526 #define PTHREAD_JOIN(tid, status) \ 2527 do { \ 2528 int _rc = pthread_join(tid, status); \ 2529 if (_rc) { \ 2530 TH_LOG("pthread_join of tid %u failed: %d\n", \ 2531 (unsigned int)tid, _rc); \ 2532 } else { \ 2533 tid = 0; \ 2534 } \ 2535 } while (0) 2536 2537 FIXTURE(TSYNC) { 2538 struct sock_fprog root_prog, apply_prog; 2539 struct tsync_sibling sibling[TSYNC_SIBLINGS]; 2540 sem_t started; 2541 pthread_cond_t cond; 2542 pthread_mutex_t mutex; 2543 int sibling_count; 2544 }; 2545 2546 FIXTURE_SETUP(TSYNC) 2547 { 2548 struct sock_filter root_filter[] = { 2549 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 2550 }; 2551 struct sock_filter apply_filter[] = { 2552 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, 2553 offsetof(struct seccomp_data, nr)), 2554 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_read, 0, 1), 2555 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL), 2556 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 2557 }; 2558 2559 memset(&self->root_prog, 0, sizeof(self->root_prog)); 2560 memset(&self->apply_prog, 0, sizeof(self->apply_prog)); 2561 memset(&self->sibling, 0, sizeof(self->sibling)); 2562 self->root_prog.filter = malloc(sizeof(root_filter)); 2563 ASSERT_NE(NULL, self->root_prog.filter); 2564 memcpy(self->root_prog.filter, &root_filter, sizeof(root_filter)); 2565 self->root_prog.len = (unsigned short)ARRAY_SIZE(root_filter); 2566 2567 self->apply_prog.filter = malloc(sizeof(apply_filter)); 2568 ASSERT_NE(NULL, self->apply_prog.filter); 2569 memcpy(self->apply_prog.filter, &apply_filter, sizeof(apply_filter)); 2570 self->apply_prog.len = (unsigned short)ARRAY_SIZE(apply_filter); 2571 2572 self->sibling_count = 0; 2573 pthread_mutex_init(&self->mutex, NULL); 2574 pthread_cond_init(&self->cond, NULL); 2575 sem_init(&self->started, 0, 0); 2576 self->sibling[0].tid = 0; 2577 self->sibling[0].cond = &self->cond; 2578 self->sibling[0].started = &self->started; 2579 self->sibling[0].mutex = &self->mutex; 2580 self->sibling[0].diverge = 0; 2581 self->sibling[0].num_waits = 1; 2582 self->sibling[0].prog = &self->root_prog; 2583 self->sibling[0].metadata = _metadata; 2584 self->sibling[1].tid = 0; 2585 self->sibling[1].cond = &self->cond; 2586 self->sibling[1].started = &self->started; 2587 self->sibling[1].mutex = &self->mutex; 2588 self->sibling[1].diverge = 0; 2589 self->sibling[1].prog = &self->root_prog; 2590 self->sibling[1].num_waits = 1; 2591 self->sibling[1].metadata = _metadata; 2592 } 2593 2594 FIXTURE_TEARDOWN(TSYNC) 2595 { 2596 int sib = 0; 2597 2598 if (self->root_prog.filter) 2599 free(self->root_prog.filter); 2600 if (self->apply_prog.filter) 2601 free(self->apply_prog.filter); 2602 2603 for ( ; sib < self->sibling_count; ++sib) { 2604 struct tsync_sibling *s = &self->sibling[sib]; 2605 2606 if (!s->tid) 2607 continue; 2608 /* 2609 * If a thread is still running, it may be stuck, so hit 2610 * it over the head really hard. 2611 */ 2612 pthread_kill(s->tid, 9); 2613 } 2614 pthread_mutex_destroy(&self->mutex); 2615 pthread_cond_destroy(&self->cond); 2616 sem_destroy(&self->started); 2617 } 2618 2619 void *tsync_sibling(void *data) 2620 { 2621 long ret = 0; 2622 struct tsync_sibling *me = data; 2623 2624 me->system_tid = syscall(__NR_gettid); 2625 2626 pthread_mutex_lock(me->mutex); 2627 if (me->diverge) { 2628 /* Just re-apply the root prog to fork the tree */ 2629 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, 2630 me->prog, 0, 0); 2631 } 2632 sem_post(me->started); 2633 /* Return outside of started so parent notices failures. */ 2634 if (ret) { 2635 pthread_mutex_unlock(me->mutex); 2636 return (void *)SIBLING_EXIT_FAILURE; 2637 } 2638 do { 2639 pthread_cond_wait(me->cond, me->mutex); 2640 me->num_waits = me->num_waits - 1; 2641 } while (me->num_waits); 2642 pthread_mutex_unlock(me->mutex); 2643 2644 ret = prctl(PR_GET_NO_NEW_PRIVS, 0, 0, 0, 0); 2645 if (!ret) 2646 return (void *)SIBLING_EXIT_NEWPRIVS; 2647 read(-1, NULL, 0); 2648 return (void *)SIBLING_EXIT_UNKILLED; 2649 } 2650 2651 void tsync_start_sibling(struct tsync_sibling *sibling) 2652 { 2653 pthread_create(&sibling->tid, NULL, tsync_sibling, (void *)sibling); 2654 } 2655 2656 TEST_F(TSYNC, siblings_fail_prctl) 2657 { 2658 long ret; 2659 void *status; 2660 struct sock_filter filter[] = { 2661 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, 2662 offsetof(struct seccomp_data, nr)), 2663 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_prctl, 0, 1), 2664 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ERRNO | EINVAL), 2665 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 2666 }; 2667 struct sock_fprog prog = { 2668 .len = (unsigned short)ARRAY_SIZE(filter), 2669 .filter = filter, 2670 }; 2671 2672 ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) { 2673 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 2674 } 2675 2676 /* Check prctl failure detection by requesting sib 0 diverge. */ 2677 ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog); 2678 ASSERT_NE(ENOSYS, errno) { 2679 TH_LOG("Kernel does not support seccomp syscall!"); 2680 } 2681 ASSERT_EQ(0, ret) { 2682 TH_LOG("setting filter failed"); 2683 } 2684 2685 self->sibling[0].diverge = 1; 2686 tsync_start_sibling(&self->sibling[0]); 2687 tsync_start_sibling(&self->sibling[1]); 2688 2689 while (self->sibling_count < TSYNC_SIBLINGS) { 2690 sem_wait(&self->started); 2691 self->sibling_count++; 2692 } 2693 2694 /* Signal the threads to clean up*/ 2695 pthread_mutex_lock(&self->mutex); 2696 ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) { 2697 TH_LOG("cond broadcast non-zero"); 2698 } 2699 pthread_mutex_unlock(&self->mutex); 2700 2701 /* Ensure diverging sibling failed to call prctl. */ 2702 PTHREAD_JOIN(self->sibling[0].tid, &status); 2703 EXPECT_EQ(SIBLING_EXIT_FAILURE, (long)status); 2704 PTHREAD_JOIN(self->sibling[1].tid, &status); 2705 EXPECT_EQ(SIBLING_EXIT_UNKILLED, (long)status); 2706 } 2707 2708 TEST_F(TSYNC, two_siblings_with_ancestor) 2709 { 2710 long ret; 2711 void *status; 2712 2713 ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) { 2714 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 2715 } 2716 2717 ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &self->root_prog); 2718 ASSERT_NE(ENOSYS, errno) { 2719 TH_LOG("Kernel does not support seccomp syscall!"); 2720 } 2721 ASSERT_EQ(0, ret) { 2722 TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER!"); 2723 } 2724 tsync_start_sibling(&self->sibling[0]); 2725 tsync_start_sibling(&self->sibling[1]); 2726 2727 while (self->sibling_count < TSYNC_SIBLINGS) { 2728 sem_wait(&self->started); 2729 self->sibling_count++; 2730 } 2731 2732 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC, 2733 &self->apply_prog); 2734 ASSERT_EQ(0, ret) { 2735 TH_LOG("Could install filter on all threads!"); 2736 } 2737 /* Tell the siblings to test the policy */ 2738 pthread_mutex_lock(&self->mutex); 2739 ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) { 2740 TH_LOG("cond broadcast non-zero"); 2741 } 2742 pthread_mutex_unlock(&self->mutex); 2743 /* Ensure they are both killed and don't exit cleanly. */ 2744 PTHREAD_JOIN(self->sibling[0].tid, &status); 2745 EXPECT_EQ(0x0, (long)status); 2746 PTHREAD_JOIN(self->sibling[1].tid, &status); 2747 EXPECT_EQ(0x0, (long)status); 2748 } 2749 2750 TEST_F(TSYNC, two_sibling_want_nnp) 2751 { 2752 void *status; 2753 2754 /* start siblings before any prctl() operations */ 2755 tsync_start_sibling(&self->sibling[0]); 2756 tsync_start_sibling(&self->sibling[1]); 2757 while (self->sibling_count < TSYNC_SIBLINGS) { 2758 sem_wait(&self->started); 2759 self->sibling_count++; 2760 } 2761 2762 /* Tell the siblings to test no policy */ 2763 pthread_mutex_lock(&self->mutex); 2764 ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) { 2765 TH_LOG("cond broadcast non-zero"); 2766 } 2767 pthread_mutex_unlock(&self->mutex); 2768 2769 /* Ensure they are both upset about lacking nnp. */ 2770 PTHREAD_JOIN(self->sibling[0].tid, &status); 2771 EXPECT_EQ(SIBLING_EXIT_NEWPRIVS, (long)status); 2772 PTHREAD_JOIN(self->sibling[1].tid, &status); 2773 EXPECT_EQ(SIBLING_EXIT_NEWPRIVS, (long)status); 2774 } 2775 2776 TEST_F(TSYNC, two_siblings_with_no_filter) 2777 { 2778 long ret; 2779 void *status; 2780 2781 /* start siblings before any prctl() operations */ 2782 tsync_start_sibling(&self->sibling[0]); 2783 tsync_start_sibling(&self->sibling[1]); 2784 while (self->sibling_count < TSYNC_SIBLINGS) { 2785 sem_wait(&self->started); 2786 self->sibling_count++; 2787 } 2788 2789 ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) { 2790 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 2791 } 2792 2793 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC, 2794 &self->apply_prog); 2795 ASSERT_NE(ENOSYS, errno) { 2796 TH_LOG("Kernel does not support seccomp syscall!"); 2797 } 2798 ASSERT_EQ(0, ret) { 2799 TH_LOG("Could install filter on all threads!"); 2800 } 2801 2802 /* Tell the siblings to test the policy */ 2803 pthread_mutex_lock(&self->mutex); 2804 ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) { 2805 TH_LOG("cond broadcast non-zero"); 2806 } 2807 pthread_mutex_unlock(&self->mutex); 2808 2809 /* Ensure they are both killed and don't exit cleanly. */ 2810 PTHREAD_JOIN(self->sibling[0].tid, &status); 2811 EXPECT_EQ(0x0, (long)status); 2812 PTHREAD_JOIN(self->sibling[1].tid, &status); 2813 EXPECT_EQ(0x0, (long)status); 2814 } 2815 2816 TEST_F(TSYNC, two_siblings_with_one_divergence) 2817 { 2818 long ret; 2819 void *status; 2820 2821 ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) { 2822 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 2823 } 2824 2825 ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &self->root_prog); 2826 ASSERT_NE(ENOSYS, errno) { 2827 TH_LOG("Kernel does not support seccomp syscall!"); 2828 } 2829 ASSERT_EQ(0, ret) { 2830 TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER!"); 2831 } 2832 self->sibling[0].diverge = 1; 2833 tsync_start_sibling(&self->sibling[0]); 2834 tsync_start_sibling(&self->sibling[1]); 2835 2836 while (self->sibling_count < TSYNC_SIBLINGS) { 2837 sem_wait(&self->started); 2838 self->sibling_count++; 2839 } 2840 2841 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC, 2842 &self->apply_prog); 2843 ASSERT_EQ(self->sibling[0].system_tid, ret) { 2844 TH_LOG("Did not fail on diverged sibling."); 2845 } 2846 2847 /* Wake the threads */ 2848 pthread_mutex_lock(&self->mutex); 2849 ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) { 2850 TH_LOG("cond broadcast non-zero"); 2851 } 2852 pthread_mutex_unlock(&self->mutex); 2853 2854 /* Ensure they are both unkilled. */ 2855 PTHREAD_JOIN(self->sibling[0].tid, &status); 2856 EXPECT_EQ(SIBLING_EXIT_UNKILLED, (long)status); 2857 PTHREAD_JOIN(self->sibling[1].tid, &status); 2858 EXPECT_EQ(SIBLING_EXIT_UNKILLED, (long)status); 2859 } 2860 2861 TEST_F(TSYNC, two_siblings_with_one_divergence_no_tid_in_err) 2862 { 2863 long ret, flags; 2864 void *status; 2865 2866 ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) { 2867 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 2868 } 2869 2870 ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &self->root_prog); 2871 ASSERT_NE(ENOSYS, errno) { 2872 TH_LOG("Kernel does not support seccomp syscall!"); 2873 } 2874 ASSERT_EQ(0, ret) { 2875 TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER!"); 2876 } 2877 self->sibling[0].diverge = 1; 2878 tsync_start_sibling(&self->sibling[0]); 2879 tsync_start_sibling(&self->sibling[1]); 2880 2881 while (self->sibling_count < TSYNC_SIBLINGS) { 2882 sem_wait(&self->started); 2883 self->sibling_count++; 2884 } 2885 2886 flags = SECCOMP_FILTER_FLAG_TSYNC | \ 2887 SECCOMP_FILTER_FLAG_TSYNC_ESRCH; 2888 ret = seccomp(SECCOMP_SET_MODE_FILTER, flags, &self->apply_prog); 2889 ASSERT_EQ(ESRCH, errno) { 2890 TH_LOG("Did not return ESRCH for diverged sibling."); 2891 } 2892 ASSERT_EQ(-1, ret) { 2893 TH_LOG("Did not fail on diverged sibling."); 2894 } 2895 2896 /* Wake the threads */ 2897 pthread_mutex_lock(&self->mutex); 2898 ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) { 2899 TH_LOG("cond broadcast non-zero"); 2900 } 2901 pthread_mutex_unlock(&self->mutex); 2902 2903 /* Ensure they are both unkilled. */ 2904 PTHREAD_JOIN(self->sibling[0].tid, &status); 2905 EXPECT_EQ(SIBLING_EXIT_UNKILLED, (long)status); 2906 PTHREAD_JOIN(self->sibling[1].tid, &status); 2907 EXPECT_EQ(SIBLING_EXIT_UNKILLED, (long)status); 2908 } 2909 2910 TEST_F(TSYNC, two_siblings_not_under_filter) 2911 { 2912 long ret, sib; 2913 void *status; 2914 struct timespec delay = { .tv_nsec = 100000000 }; 2915 2916 ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) { 2917 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 2918 } 2919 2920 /* 2921 * Sibling 0 will have its own seccomp policy 2922 * and Sibling 1 will not be under seccomp at 2923 * all. Sibling 1 will enter seccomp and 0 2924 * will cause failure. 2925 */ 2926 self->sibling[0].diverge = 1; 2927 tsync_start_sibling(&self->sibling[0]); 2928 tsync_start_sibling(&self->sibling[1]); 2929 2930 while (self->sibling_count < TSYNC_SIBLINGS) { 2931 sem_wait(&self->started); 2932 self->sibling_count++; 2933 } 2934 2935 ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &self->root_prog); 2936 ASSERT_NE(ENOSYS, errno) { 2937 TH_LOG("Kernel does not support seccomp syscall!"); 2938 } 2939 ASSERT_EQ(0, ret) { 2940 TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER!"); 2941 } 2942 2943 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC, 2944 &self->apply_prog); 2945 ASSERT_EQ(ret, self->sibling[0].system_tid) { 2946 TH_LOG("Did not fail on diverged sibling."); 2947 } 2948 sib = 1; 2949 if (ret == self->sibling[0].system_tid) 2950 sib = 0; 2951 2952 pthread_mutex_lock(&self->mutex); 2953 2954 /* Increment the other siblings num_waits so we can clean up 2955 * the one we just saw. 2956 */ 2957 self->sibling[!sib].num_waits += 1; 2958 2959 /* Signal the thread to clean up*/ 2960 ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) { 2961 TH_LOG("cond broadcast non-zero"); 2962 } 2963 pthread_mutex_unlock(&self->mutex); 2964 PTHREAD_JOIN(self->sibling[sib].tid, &status); 2965 EXPECT_EQ(SIBLING_EXIT_UNKILLED, (long)status); 2966 /* Poll for actual task death. pthread_join doesn't guarantee it. */ 2967 while (!kill(self->sibling[sib].system_tid, 0)) 2968 nanosleep(&delay, NULL); 2969 /* Switch to the remaining sibling */ 2970 sib = !sib; 2971 2972 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC, 2973 &self->apply_prog); 2974 ASSERT_EQ(0, ret) { 2975 TH_LOG("Expected the remaining sibling to sync"); 2976 }; 2977 2978 pthread_mutex_lock(&self->mutex); 2979 2980 /* If remaining sibling didn't have a chance to wake up during 2981 * the first broadcast, manually reduce the num_waits now. 2982 */ 2983 if (self->sibling[sib].num_waits > 1) 2984 self->sibling[sib].num_waits = 1; 2985 ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) { 2986 TH_LOG("cond broadcast non-zero"); 2987 } 2988 pthread_mutex_unlock(&self->mutex); 2989 PTHREAD_JOIN(self->sibling[sib].tid, &status); 2990 EXPECT_EQ(0, (long)status); 2991 /* Poll for actual task death. pthread_join doesn't guarantee it. */ 2992 while (!kill(self->sibling[sib].system_tid, 0)) 2993 nanosleep(&delay, NULL); 2994 2995 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC, 2996 &self->apply_prog); 2997 ASSERT_EQ(0, ret); /* just us chickens */ 2998 } 2999 3000 /* Make sure restarted syscalls are seen directly as "restart_syscall". */ 3001 TEST(syscall_restart) 3002 { 3003 long ret; 3004 unsigned long msg; 3005 pid_t child_pid; 3006 int pipefd[2]; 3007 int status; 3008 siginfo_t info = { }; 3009 struct sock_filter filter[] = { 3010 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, 3011 offsetof(struct seccomp_data, nr)), 3012 3013 #ifdef __NR_sigreturn 3014 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_sigreturn, 7, 0), 3015 #endif 3016 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_read, 6, 0), 3017 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_exit, 5, 0), 3018 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_rt_sigreturn, 4, 0), 3019 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_nanosleep, 5, 0), 3020 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_clock_nanosleep, 4, 0), 3021 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_restart_syscall, 4, 0), 3022 3023 /* Allow __NR_write for easy logging. */ 3024 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_write, 0, 1), 3025 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 3026 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL), 3027 /* The nanosleep jump target. */ 3028 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE|0x100), 3029 /* The restart_syscall jump target. */ 3030 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE|0x200), 3031 }; 3032 struct sock_fprog prog = { 3033 .len = (unsigned short)ARRAY_SIZE(filter), 3034 .filter = filter, 3035 }; 3036 #if defined(__arm__) 3037 struct utsname utsbuf; 3038 #endif 3039 3040 ASSERT_EQ(0, pipe(pipefd)); 3041 3042 child_pid = fork(); 3043 ASSERT_LE(0, child_pid); 3044 if (child_pid == 0) { 3045 /* Child uses EXPECT not ASSERT to deliver status correctly. */ 3046 char buf = ' '; 3047 struct timespec timeout = { }; 3048 3049 /* Attach parent as tracer and stop. */ 3050 EXPECT_EQ(0, ptrace(PTRACE_TRACEME)); 3051 EXPECT_EQ(0, raise(SIGSTOP)); 3052 3053 EXPECT_EQ(0, close(pipefd[1])); 3054 3055 EXPECT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) { 3056 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 3057 } 3058 3059 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0); 3060 EXPECT_EQ(0, ret) { 3061 TH_LOG("Failed to install filter!"); 3062 } 3063 3064 EXPECT_EQ(1, read(pipefd[0], &buf, 1)) { 3065 TH_LOG("Failed to read() sync from parent"); 3066 } 3067 EXPECT_EQ('.', buf) { 3068 TH_LOG("Failed to get sync data from read()"); 3069 } 3070 3071 /* Start nanosleep to be interrupted. */ 3072 timeout.tv_sec = 1; 3073 errno = 0; 3074 EXPECT_EQ(0, nanosleep(&timeout, NULL)) { 3075 TH_LOG("Call to nanosleep() failed (errno %d)", errno); 3076 } 3077 3078 /* Read final sync from parent. */ 3079 EXPECT_EQ(1, read(pipefd[0], &buf, 1)) { 3080 TH_LOG("Failed final read() from parent"); 3081 } 3082 EXPECT_EQ('!', buf) { 3083 TH_LOG("Failed to get final data from read()"); 3084 } 3085 3086 /* Directly report the status of our test harness results. */ 3087 syscall(__NR_exit, _metadata->passed ? EXIT_SUCCESS 3088 : EXIT_FAILURE); 3089 } 3090 EXPECT_EQ(0, close(pipefd[0])); 3091 3092 /* Attach to child, setup options, and release. */ 3093 ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0)); 3094 ASSERT_EQ(true, WIFSTOPPED(status)); 3095 ASSERT_EQ(0, ptrace(PTRACE_SETOPTIONS, child_pid, NULL, 3096 PTRACE_O_TRACESECCOMP)); 3097 ASSERT_EQ(0, ptrace(PTRACE_CONT, child_pid, NULL, 0)); 3098 ASSERT_EQ(1, write(pipefd[1], ".", 1)); 3099 3100 /* Wait for nanosleep() to start. */ 3101 ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0)); 3102 ASSERT_EQ(true, WIFSTOPPED(status)); 3103 ASSERT_EQ(SIGTRAP, WSTOPSIG(status)); 3104 ASSERT_EQ(PTRACE_EVENT_SECCOMP, (status >> 16)); 3105 ASSERT_EQ(0, ptrace(PTRACE_GETEVENTMSG, child_pid, NULL, &msg)); 3106 ASSERT_EQ(0x100, msg); 3107 ret = get_syscall(_metadata, child_pid); 3108 EXPECT_TRUE(ret == __NR_nanosleep || ret == __NR_clock_nanosleep); 3109 3110 /* Might as well check siginfo for sanity while we're here. */ 3111 ASSERT_EQ(0, ptrace(PTRACE_GETSIGINFO, child_pid, NULL, &info)); 3112 ASSERT_EQ(SIGTRAP, info.si_signo); 3113 ASSERT_EQ(SIGTRAP | (PTRACE_EVENT_SECCOMP << 8), info.si_code); 3114 EXPECT_EQ(0, info.si_errno); 3115 EXPECT_EQ(getuid(), info.si_uid); 3116 /* Verify signal delivery came from child (seccomp-triggered). */ 3117 EXPECT_EQ(child_pid, info.si_pid); 3118 3119 /* Interrupt nanosleep with SIGSTOP (which we'll need to handle). */ 3120 ASSERT_EQ(0, kill(child_pid, SIGSTOP)); 3121 ASSERT_EQ(0, ptrace(PTRACE_CONT, child_pid, NULL, 0)); 3122 ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0)); 3123 ASSERT_EQ(true, WIFSTOPPED(status)); 3124 ASSERT_EQ(SIGSTOP, WSTOPSIG(status)); 3125 ASSERT_EQ(0, ptrace(PTRACE_GETSIGINFO, child_pid, NULL, &info)); 3126 /* 3127 * There is no siginfo on SIGSTOP any more, so we can't verify 3128 * signal delivery came from parent now (getpid() == info.si_pid). 3129 * https://lkml.kernel.org/r/CAGXu5jJaZAOzP1qFz66tYrtbuywqb+UN2SOA1VLHpCCOiYvYeg@mail.gmail.com 3130 * At least verify the SIGSTOP via PTRACE_GETSIGINFO. 3131 */ 3132 EXPECT_EQ(SIGSTOP, info.si_signo); 3133 3134 /* Restart nanosleep with SIGCONT, which triggers restart_syscall. */ 3135 ASSERT_EQ(0, kill(child_pid, SIGCONT)); 3136 ASSERT_EQ(0, ptrace(PTRACE_CONT, child_pid, NULL, 0)); 3137 ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0)); 3138 ASSERT_EQ(true, WIFSTOPPED(status)); 3139 ASSERT_EQ(SIGCONT, WSTOPSIG(status)); 3140 ASSERT_EQ(0, ptrace(PTRACE_CONT, child_pid, NULL, 0)); 3141 3142 /* Wait for restart_syscall() to start. */ 3143 ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0)); 3144 ASSERT_EQ(true, WIFSTOPPED(status)); 3145 ASSERT_EQ(SIGTRAP, WSTOPSIG(status)); 3146 ASSERT_EQ(PTRACE_EVENT_SECCOMP, (status >> 16)); 3147 ASSERT_EQ(0, ptrace(PTRACE_GETEVENTMSG, child_pid, NULL, &msg)); 3148 3149 ASSERT_EQ(0x200, msg); 3150 ret = get_syscall(_metadata, child_pid); 3151 #if defined(__arm__) 3152 /* 3153 * FIXME: 3154 * - native ARM registers do NOT expose true syscall. 3155 * - compat ARM registers on ARM64 DO expose true syscall. 3156 */ 3157 ASSERT_EQ(0, uname(&utsbuf)); 3158 if (strncmp(utsbuf.machine, "arm", 3) == 0) { 3159 EXPECT_EQ(__NR_nanosleep, ret); 3160 } else 3161 #endif 3162 { 3163 EXPECT_EQ(__NR_restart_syscall, ret); 3164 } 3165 3166 /* Write again to end test. */ 3167 ASSERT_EQ(0, ptrace(PTRACE_CONT, child_pid, NULL, 0)); 3168 ASSERT_EQ(1, write(pipefd[1], "!", 1)); 3169 EXPECT_EQ(0, close(pipefd[1])); 3170 3171 ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0)); 3172 if (WIFSIGNALED(status) || WEXITSTATUS(status)) 3173 _metadata->passed = 0; 3174 } 3175 3176 TEST_SIGNAL(filter_flag_log, SIGSYS) 3177 { 3178 struct sock_filter allow_filter[] = { 3179 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 3180 }; 3181 struct sock_filter kill_filter[] = { 3182 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, 3183 offsetof(struct seccomp_data, nr)), 3184 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 0, 1), 3185 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL), 3186 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 3187 }; 3188 struct sock_fprog allow_prog = { 3189 .len = (unsigned short)ARRAY_SIZE(allow_filter), 3190 .filter = allow_filter, 3191 }; 3192 struct sock_fprog kill_prog = { 3193 .len = (unsigned short)ARRAY_SIZE(kill_filter), 3194 .filter = kill_filter, 3195 }; 3196 long ret; 3197 pid_t parent = getppid(); 3198 3199 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 3200 ASSERT_EQ(0, ret); 3201 3202 /* Verify that the FILTER_FLAG_LOG flag isn't accepted in strict mode */ 3203 ret = seccomp(SECCOMP_SET_MODE_STRICT, SECCOMP_FILTER_FLAG_LOG, 3204 &allow_prog); 3205 ASSERT_NE(ENOSYS, errno) { 3206 TH_LOG("Kernel does not support seccomp syscall!"); 3207 } 3208 EXPECT_NE(0, ret) { 3209 TH_LOG("Kernel accepted FILTER_FLAG_LOG flag in strict mode!"); 3210 } 3211 EXPECT_EQ(EINVAL, errno) { 3212 TH_LOG("Kernel returned unexpected errno for FILTER_FLAG_LOG flag in strict mode!"); 3213 } 3214 3215 /* Verify that a simple, permissive filter can be added with no flags */ 3216 ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &allow_prog); 3217 EXPECT_EQ(0, ret); 3218 3219 /* See if the same filter can be added with the FILTER_FLAG_LOG flag */ 3220 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_LOG, 3221 &allow_prog); 3222 ASSERT_NE(EINVAL, errno) { 3223 TH_LOG("Kernel does not support the FILTER_FLAG_LOG flag!"); 3224 } 3225 EXPECT_EQ(0, ret); 3226 3227 /* Ensure that the kill filter works with the FILTER_FLAG_LOG flag */ 3228 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_LOG, 3229 &kill_prog); 3230 EXPECT_EQ(0, ret); 3231 3232 EXPECT_EQ(parent, syscall(__NR_getppid)); 3233 /* getpid() should never return. */ 3234 EXPECT_EQ(0, syscall(__NR_getpid)); 3235 } 3236 3237 TEST(get_action_avail) 3238 { 3239 __u32 actions[] = { SECCOMP_RET_KILL_THREAD, SECCOMP_RET_TRAP, 3240 SECCOMP_RET_ERRNO, SECCOMP_RET_TRACE, 3241 SECCOMP_RET_LOG, SECCOMP_RET_ALLOW }; 3242 __u32 unknown_action = 0x10000000U; 3243 int i; 3244 long ret; 3245 3246 ret = seccomp(SECCOMP_GET_ACTION_AVAIL, 0, &actions[0]); 3247 ASSERT_NE(ENOSYS, errno) { 3248 TH_LOG("Kernel does not support seccomp syscall!"); 3249 } 3250 ASSERT_NE(EINVAL, errno) { 3251 TH_LOG("Kernel does not support SECCOMP_GET_ACTION_AVAIL operation!"); 3252 } 3253 EXPECT_EQ(ret, 0); 3254 3255 for (i = 0; i < ARRAY_SIZE(actions); i++) { 3256 ret = seccomp(SECCOMP_GET_ACTION_AVAIL, 0, &actions[i]); 3257 EXPECT_EQ(ret, 0) { 3258 TH_LOG("Expected action (0x%X) not available!", 3259 actions[i]); 3260 } 3261 } 3262 3263 /* Check that an unknown action is handled properly (EOPNOTSUPP) */ 3264 ret = seccomp(SECCOMP_GET_ACTION_AVAIL, 0, &unknown_action); 3265 EXPECT_EQ(ret, -1); 3266 EXPECT_EQ(errno, EOPNOTSUPP); 3267 } 3268 3269 TEST(get_metadata) 3270 { 3271 pid_t pid; 3272 int pipefd[2]; 3273 char buf; 3274 struct seccomp_metadata md; 3275 long ret; 3276 3277 /* Only real root can get metadata. */ 3278 if (geteuid()) { 3279 SKIP(return, "get_metadata requires real root"); 3280 return; 3281 } 3282 3283 ASSERT_EQ(0, pipe(pipefd)); 3284 3285 pid = fork(); 3286 ASSERT_GE(pid, 0); 3287 if (pid == 0) { 3288 struct sock_filter filter[] = { 3289 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 3290 }; 3291 struct sock_fprog prog = { 3292 .len = (unsigned short)ARRAY_SIZE(filter), 3293 .filter = filter, 3294 }; 3295 3296 /* one with log, one without */ 3297 EXPECT_EQ(0, seccomp(SECCOMP_SET_MODE_FILTER, 3298 SECCOMP_FILTER_FLAG_LOG, &prog)); 3299 EXPECT_EQ(0, seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog)); 3300 3301 EXPECT_EQ(0, close(pipefd[0])); 3302 ASSERT_EQ(1, write(pipefd[1], "1", 1)); 3303 ASSERT_EQ(0, close(pipefd[1])); 3304 3305 while (1) 3306 sleep(100); 3307 } 3308 3309 ASSERT_EQ(0, close(pipefd[1])); 3310 ASSERT_EQ(1, read(pipefd[0], &buf, 1)); 3311 3312 ASSERT_EQ(0, ptrace(PTRACE_ATTACH, pid)); 3313 ASSERT_EQ(pid, waitpid(pid, NULL, 0)); 3314 3315 /* Past here must not use ASSERT or child process is never killed. */ 3316 3317 md.filter_off = 0; 3318 errno = 0; 3319 ret = ptrace(PTRACE_SECCOMP_GET_METADATA, pid, sizeof(md), &md); 3320 EXPECT_EQ(sizeof(md), ret) { 3321 if (errno == EINVAL) 3322 SKIP(goto skip, "Kernel does not support PTRACE_SECCOMP_GET_METADATA (missing CONFIG_CHECKPOINT_RESTORE?)"); 3323 } 3324 3325 EXPECT_EQ(md.flags, SECCOMP_FILTER_FLAG_LOG); 3326 EXPECT_EQ(md.filter_off, 0); 3327 3328 md.filter_off = 1; 3329 ret = ptrace(PTRACE_SECCOMP_GET_METADATA, pid, sizeof(md), &md); 3330 EXPECT_EQ(sizeof(md), ret); 3331 EXPECT_EQ(md.flags, 0); 3332 EXPECT_EQ(md.filter_off, 1); 3333 3334 skip: 3335 ASSERT_EQ(0, kill(pid, SIGKILL)); 3336 } 3337 3338 static int user_notif_syscall(int nr, unsigned int flags) 3339 { 3340 struct sock_filter filter[] = { 3341 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, 3342 offsetof(struct seccomp_data, nr)), 3343 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, nr, 0, 1), 3344 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_USER_NOTIF), 3345 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 3346 }; 3347 3348 struct sock_fprog prog = { 3349 .len = (unsigned short)ARRAY_SIZE(filter), 3350 .filter = filter, 3351 }; 3352 3353 return seccomp(SECCOMP_SET_MODE_FILTER, flags, &prog); 3354 } 3355 3356 #define USER_NOTIF_MAGIC INT_MAX 3357 TEST(user_notification_basic) 3358 { 3359 pid_t pid; 3360 long ret; 3361 int status, listener; 3362 struct seccomp_notif req = {}; 3363 struct seccomp_notif_resp resp = {}; 3364 struct pollfd pollfd; 3365 3366 struct sock_filter filter[] = { 3367 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 3368 }; 3369 struct sock_fprog prog = { 3370 .len = (unsigned short)ARRAY_SIZE(filter), 3371 .filter = filter, 3372 }; 3373 3374 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 3375 ASSERT_EQ(0, ret) { 3376 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 3377 } 3378 3379 pid = fork(); 3380 ASSERT_GE(pid, 0); 3381 3382 /* Check that we get -ENOSYS with no listener attached */ 3383 if (pid == 0) { 3384 if (user_notif_syscall(__NR_getppid, 0) < 0) 3385 exit(1); 3386 ret = syscall(__NR_getppid); 3387 exit(ret >= 0 || errno != ENOSYS); 3388 } 3389 3390 EXPECT_EQ(waitpid(pid, &status, 0), pid); 3391 EXPECT_EQ(true, WIFEXITED(status)); 3392 EXPECT_EQ(0, WEXITSTATUS(status)); 3393 3394 /* Add some no-op filters for grins. */ 3395 EXPECT_EQ(seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog), 0); 3396 EXPECT_EQ(seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog), 0); 3397 EXPECT_EQ(seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog), 0); 3398 EXPECT_EQ(seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog), 0); 3399 3400 /* Check that the basic notification machinery works */ 3401 listener = user_notif_syscall(__NR_getppid, 3402 SECCOMP_FILTER_FLAG_NEW_LISTENER); 3403 ASSERT_GE(listener, 0); 3404 3405 /* Installing a second listener in the chain should EBUSY */ 3406 EXPECT_EQ(user_notif_syscall(__NR_getppid, 3407 SECCOMP_FILTER_FLAG_NEW_LISTENER), 3408 -1); 3409 EXPECT_EQ(errno, EBUSY); 3410 3411 pid = fork(); 3412 ASSERT_GE(pid, 0); 3413 3414 if (pid == 0) { 3415 ret = syscall(__NR_getppid); 3416 exit(ret != USER_NOTIF_MAGIC); 3417 } 3418 3419 pollfd.fd = listener; 3420 pollfd.events = POLLIN | POLLOUT; 3421 3422 EXPECT_GT(poll(&pollfd, 1, -1), 0); 3423 EXPECT_EQ(pollfd.revents, POLLIN); 3424 3425 /* Test that we can't pass garbage to the kernel. */ 3426 memset(&req, 0, sizeof(req)); 3427 req.pid = -1; 3428 errno = 0; 3429 ret = ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req); 3430 EXPECT_EQ(-1, ret); 3431 EXPECT_EQ(EINVAL, errno); 3432 3433 if (ret) { 3434 req.pid = 0; 3435 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0); 3436 } 3437 3438 pollfd.fd = listener; 3439 pollfd.events = POLLIN | POLLOUT; 3440 3441 EXPECT_GT(poll(&pollfd, 1, -1), 0); 3442 EXPECT_EQ(pollfd.revents, POLLOUT); 3443 3444 EXPECT_EQ(req.data.nr, __NR_getppid); 3445 3446 resp.id = req.id; 3447 resp.error = 0; 3448 resp.val = USER_NOTIF_MAGIC; 3449 3450 /* check that we make sure flags == 0 */ 3451 resp.flags = 1; 3452 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), -1); 3453 EXPECT_EQ(errno, EINVAL); 3454 3455 resp.flags = 0; 3456 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), 0); 3457 3458 EXPECT_EQ(waitpid(pid, &status, 0), pid); 3459 EXPECT_EQ(true, WIFEXITED(status)); 3460 EXPECT_EQ(0, WEXITSTATUS(status)); 3461 } 3462 3463 TEST(user_notification_with_tsync) 3464 { 3465 int ret; 3466 unsigned int flags; 3467 3468 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 3469 ASSERT_EQ(0, ret) { 3470 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 3471 } 3472 3473 /* these were exclusive */ 3474 flags = SECCOMP_FILTER_FLAG_NEW_LISTENER | 3475 SECCOMP_FILTER_FLAG_TSYNC; 3476 ASSERT_EQ(-1, user_notif_syscall(__NR_getppid, flags)); 3477 ASSERT_EQ(EINVAL, errno); 3478 3479 /* but now they're not */ 3480 flags |= SECCOMP_FILTER_FLAG_TSYNC_ESRCH; 3481 ret = user_notif_syscall(__NR_getppid, flags); 3482 close(ret); 3483 ASSERT_LE(0, ret); 3484 } 3485 3486 TEST(user_notification_kill_in_middle) 3487 { 3488 pid_t pid; 3489 long ret; 3490 int listener; 3491 struct seccomp_notif req = {}; 3492 struct seccomp_notif_resp resp = {}; 3493 3494 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 3495 ASSERT_EQ(0, ret) { 3496 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 3497 } 3498 3499 listener = user_notif_syscall(__NR_getppid, 3500 SECCOMP_FILTER_FLAG_NEW_LISTENER); 3501 ASSERT_GE(listener, 0); 3502 3503 /* 3504 * Check that nothing bad happens when we kill the task in the middle 3505 * of a syscall. 3506 */ 3507 pid = fork(); 3508 ASSERT_GE(pid, 0); 3509 3510 if (pid == 0) { 3511 ret = syscall(__NR_getppid); 3512 exit(ret != USER_NOTIF_MAGIC); 3513 } 3514 3515 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0); 3516 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_ID_VALID, &req.id), 0); 3517 3518 EXPECT_EQ(kill(pid, SIGKILL), 0); 3519 EXPECT_EQ(waitpid(pid, NULL, 0), pid); 3520 3521 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_ID_VALID, &req.id), -1); 3522 3523 resp.id = req.id; 3524 ret = ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp); 3525 EXPECT_EQ(ret, -1); 3526 EXPECT_EQ(errno, ENOENT); 3527 } 3528 3529 static int handled = -1; 3530 3531 static void signal_handler(int signal) 3532 { 3533 if (write(handled, "c", 1) != 1) 3534 perror("write from signal"); 3535 } 3536 3537 TEST(user_notification_signal) 3538 { 3539 pid_t pid; 3540 long ret; 3541 int status, listener, sk_pair[2]; 3542 struct seccomp_notif req = {}; 3543 struct seccomp_notif_resp resp = {}; 3544 char c; 3545 3546 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 3547 ASSERT_EQ(0, ret) { 3548 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 3549 } 3550 3551 ASSERT_EQ(socketpair(PF_LOCAL, SOCK_SEQPACKET, 0, sk_pair), 0); 3552 3553 listener = user_notif_syscall(__NR_gettid, 3554 SECCOMP_FILTER_FLAG_NEW_LISTENER); 3555 ASSERT_GE(listener, 0); 3556 3557 pid = fork(); 3558 ASSERT_GE(pid, 0); 3559 3560 if (pid == 0) { 3561 close(sk_pair[0]); 3562 handled = sk_pair[1]; 3563 if (signal(SIGUSR1, signal_handler) == SIG_ERR) { 3564 perror("signal"); 3565 exit(1); 3566 } 3567 /* 3568 * ERESTARTSYS behavior is a bit hard to test, because we need 3569 * to rely on a signal that has not yet been handled. Let's at 3570 * least check that the error code gets propagated through, and 3571 * hope that it doesn't break when there is actually a signal :) 3572 */ 3573 ret = syscall(__NR_gettid); 3574 exit(!(ret == -1 && errno == 512)); 3575 } 3576 3577 close(sk_pair[1]); 3578 3579 memset(&req, 0, sizeof(req)); 3580 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0); 3581 3582 EXPECT_EQ(kill(pid, SIGUSR1), 0); 3583 3584 /* 3585 * Make sure the signal really is delivered, which means we're not 3586 * stuck in the user notification code any more and the notification 3587 * should be dead. 3588 */ 3589 EXPECT_EQ(read(sk_pair[0], &c, 1), 1); 3590 3591 resp.id = req.id; 3592 resp.error = -EPERM; 3593 resp.val = 0; 3594 3595 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), -1); 3596 EXPECT_EQ(errno, ENOENT); 3597 3598 memset(&req, 0, sizeof(req)); 3599 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0); 3600 3601 resp.id = req.id; 3602 resp.error = -512; /* -ERESTARTSYS */ 3603 resp.val = 0; 3604 3605 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), 0); 3606 3607 EXPECT_EQ(waitpid(pid, &status, 0), pid); 3608 EXPECT_EQ(true, WIFEXITED(status)); 3609 EXPECT_EQ(0, WEXITSTATUS(status)); 3610 } 3611 3612 TEST(user_notification_closed_listener) 3613 { 3614 pid_t pid; 3615 long ret; 3616 int status, listener; 3617 3618 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 3619 ASSERT_EQ(0, ret) { 3620 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 3621 } 3622 3623 listener = user_notif_syscall(__NR_getppid, 3624 SECCOMP_FILTER_FLAG_NEW_LISTENER); 3625 ASSERT_GE(listener, 0); 3626 3627 /* 3628 * Check that we get an ENOSYS when the listener is closed. 3629 */ 3630 pid = fork(); 3631 ASSERT_GE(pid, 0); 3632 if (pid == 0) { 3633 close(listener); 3634 ret = syscall(__NR_getppid); 3635 exit(ret != -1 && errno != ENOSYS); 3636 } 3637 3638 close(listener); 3639 3640 EXPECT_EQ(waitpid(pid, &status, 0), pid); 3641 EXPECT_EQ(true, WIFEXITED(status)); 3642 EXPECT_EQ(0, WEXITSTATUS(status)); 3643 } 3644 3645 /* 3646 * Check that a pid in a child namespace still shows up as valid in ours. 3647 */ 3648 TEST(user_notification_child_pid_ns) 3649 { 3650 pid_t pid; 3651 int status, listener; 3652 struct seccomp_notif req = {}; 3653 struct seccomp_notif_resp resp = {}; 3654 3655 ASSERT_EQ(unshare(CLONE_NEWUSER | CLONE_NEWPID), 0) { 3656 if (errno == EINVAL) 3657 SKIP(return, "kernel missing CLONE_NEWUSER support"); 3658 }; 3659 3660 listener = user_notif_syscall(__NR_getppid, 3661 SECCOMP_FILTER_FLAG_NEW_LISTENER); 3662 ASSERT_GE(listener, 0); 3663 3664 pid = fork(); 3665 ASSERT_GE(pid, 0); 3666 3667 if (pid == 0) 3668 exit(syscall(__NR_getppid) != USER_NOTIF_MAGIC); 3669 3670 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0); 3671 EXPECT_EQ(req.pid, pid); 3672 3673 resp.id = req.id; 3674 resp.error = 0; 3675 resp.val = USER_NOTIF_MAGIC; 3676 3677 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), 0); 3678 3679 EXPECT_EQ(waitpid(pid, &status, 0), pid); 3680 EXPECT_EQ(true, WIFEXITED(status)); 3681 EXPECT_EQ(0, WEXITSTATUS(status)); 3682 close(listener); 3683 } 3684 3685 /* 3686 * Check that a pid in a sibling (i.e. unrelated) namespace shows up as 0, i.e. 3687 * invalid. 3688 */ 3689 TEST(user_notification_sibling_pid_ns) 3690 { 3691 pid_t pid, pid2; 3692 int status, listener; 3693 struct seccomp_notif req = {}; 3694 struct seccomp_notif_resp resp = {}; 3695 3696 ASSERT_EQ(prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0), 0) { 3697 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 3698 } 3699 3700 listener = user_notif_syscall(__NR_getppid, 3701 SECCOMP_FILTER_FLAG_NEW_LISTENER); 3702 ASSERT_GE(listener, 0); 3703 3704 pid = fork(); 3705 ASSERT_GE(pid, 0); 3706 3707 if (pid == 0) { 3708 ASSERT_EQ(unshare(CLONE_NEWPID), 0); 3709 3710 pid2 = fork(); 3711 ASSERT_GE(pid2, 0); 3712 3713 if (pid2 == 0) 3714 exit(syscall(__NR_getppid) != USER_NOTIF_MAGIC); 3715 3716 EXPECT_EQ(waitpid(pid2, &status, 0), pid2); 3717 EXPECT_EQ(true, WIFEXITED(status)); 3718 EXPECT_EQ(0, WEXITSTATUS(status)); 3719 exit(WEXITSTATUS(status)); 3720 } 3721 3722 /* Create the sibling ns, and sibling in it. */ 3723 ASSERT_EQ(unshare(CLONE_NEWPID), 0) { 3724 if (errno == EPERM) 3725 SKIP(return, "CLONE_NEWPID requires CAP_SYS_ADMIN"); 3726 } 3727 ASSERT_EQ(errno, 0); 3728 3729 pid2 = fork(); 3730 ASSERT_GE(pid2, 0); 3731 3732 if (pid2 == 0) { 3733 ASSERT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0); 3734 /* 3735 * The pid should be 0, i.e. the task is in some namespace that 3736 * we can't "see". 3737 */ 3738 EXPECT_EQ(req.pid, 0); 3739 3740 resp.id = req.id; 3741 resp.error = 0; 3742 resp.val = USER_NOTIF_MAGIC; 3743 3744 ASSERT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), 0); 3745 exit(0); 3746 } 3747 3748 close(listener); 3749 3750 EXPECT_EQ(waitpid(pid, &status, 0), pid); 3751 EXPECT_EQ(true, WIFEXITED(status)); 3752 EXPECT_EQ(0, WEXITSTATUS(status)); 3753 3754 EXPECT_EQ(waitpid(pid2, &status, 0), pid2); 3755 EXPECT_EQ(true, WIFEXITED(status)); 3756 EXPECT_EQ(0, WEXITSTATUS(status)); 3757 } 3758 3759 TEST(user_notification_fault_recv) 3760 { 3761 pid_t pid; 3762 int status, listener; 3763 struct seccomp_notif req = {}; 3764 struct seccomp_notif_resp resp = {}; 3765 3766 ASSERT_EQ(unshare(CLONE_NEWUSER), 0) { 3767 if (errno == EINVAL) 3768 SKIP(return, "kernel missing CLONE_NEWUSER support"); 3769 } 3770 3771 listener = user_notif_syscall(__NR_getppid, 3772 SECCOMP_FILTER_FLAG_NEW_LISTENER); 3773 ASSERT_GE(listener, 0); 3774 3775 pid = fork(); 3776 ASSERT_GE(pid, 0); 3777 3778 if (pid == 0) 3779 exit(syscall(__NR_getppid) != USER_NOTIF_MAGIC); 3780 3781 /* Do a bad recv() */ 3782 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, NULL), -1); 3783 EXPECT_EQ(errno, EFAULT); 3784 3785 /* We should still be able to receive this notification, though. */ 3786 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0); 3787 EXPECT_EQ(req.pid, pid); 3788 3789 resp.id = req.id; 3790 resp.error = 0; 3791 resp.val = USER_NOTIF_MAGIC; 3792 3793 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), 0); 3794 3795 EXPECT_EQ(waitpid(pid, &status, 0), pid); 3796 EXPECT_EQ(true, WIFEXITED(status)); 3797 EXPECT_EQ(0, WEXITSTATUS(status)); 3798 } 3799 3800 TEST(seccomp_get_notif_sizes) 3801 { 3802 struct seccomp_notif_sizes sizes; 3803 3804 ASSERT_EQ(seccomp(SECCOMP_GET_NOTIF_SIZES, 0, &sizes), 0); 3805 EXPECT_EQ(sizes.seccomp_notif, sizeof(struct seccomp_notif)); 3806 EXPECT_EQ(sizes.seccomp_notif_resp, sizeof(struct seccomp_notif_resp)); 3807 } 3808 3809 TEST(user_notification_continue) 3810 { 3811 pid_t pid; 3812 long ret; 3813 int status, listener; 3814 struct seccomp_notif req = {}; 3815 struct seccomp_notif_resp resp = {}; 3816 struct pollfd pollfd; 3817 3818 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 3819 ASSERT_EQ(0, ret) { 3820 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 3821 } 3822 3823 listener = user_notif_syscall(__NR_dup, SECCOMP_FILTER_FLAG_NEW_LISTENER); 3824 ASSERT_GE(listener, 0); 3825 3826 pid = fork(); 3827 ASSERT_GE(pid, 0); 3828 3829 if (pid == 0) { 3830 int dup_fd, pipe_fds[2]; 3831 pid_t self; 3832 3833 ASSERT_GE(pipe(pipe_fds), 0); 3834 3835 dup_fd = dup(pipe_fds[0]); 3836 ASSERT_GE(dup_fd, 0); 3837 EXPECT_NE(pipe_fds[0], dup_fd); 3838 3839 self = getpid(); 3840 ASSERT_EQ(filecmp(self, self, pipe_fds[0], dup_fd), 0); 3841 exit(0); 3842 } 3843 3844 pollfd.fd = listener; 3845 pollfd.events = POLLIN | POLLOUT; 3846 3847 EXPECT_GT(poll(&pollfd, 1, -1), 0); 3848 EXPECT_EQ(pollfd.revents, POLLIN); 3849 3850 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0); 3851 3852 pollfd.fd = listener; 3853 pollfd.events = POLLIN | POLLOUT; 3854 3855 EXPECT_GT(poll(&pollfd, 1, -1), 0); 3856 EXPECT_EQ(pollfd.revents, POLLOUT); 3857 3858 EXPECT_EQ(req.data.nr, __NR_dup); 3859 3860 resp.id = req.id; 3861 resp.flags = SECCOMP_USER_NOTIF_FLAG_CONTINUE; 3862 3863 /* 3864 * Verify that setting SECCOMP_USER_NOTIF_FLAG_CONTINUE enforces other 3865 * args be set to 0. 3866 */ 3867 resp.error = 0; 3868 resp.val = USER_NOTIF_MAGIC; 3869 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), -1); 3870 EXPECT_EQ(errno, EINVAL); 3871 3872 resp.error = USER_NOTIF_MAGIC; 3873 resp.val = 0; 3874 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), -1); 3875 EXPECT_EQ(errno, EINVAL); 3876 3877 resp.error = 0; 3878 resp.val = 0; 3879 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), 0) { 3880 if (errno == EINVAL) 3881 SKIP(goto skip, "Kernel does not support SECCOMP_USER_NOTIF_FLAG_CONTINUE"); 3882 } 3883 3884 skip: 3885 EXPECT_EQ(waitpid(pid, &status, 0), pid); 3886 EXPECT_EQ(true, WIFEXITED(status)); 3887 EXPECT_EQ(0, WEXITSTATUS(status)) { 3888 if (WEXITSTATUS(status) == 2) { 3889 SKIP(return, "Kernel does not support kcmp() syscall"); 3890 return; 3891 } 3892 } 3893 } 3894 3895 TEST(user_notification_filter_empty) 3896 { 3897 pid_t pid; 3898 long ret; 3899 int status; 3900 struct pollfd pollfd; 3901 struct __clone_args args = { 3902 .flags = CLONE_FILES, 3903 .exit_signal = SIGCHLD, 3904 }; 3905 3906 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 3907 ASSERT_EQ(0, ret) { 3908 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 3909 } 3910 3911 pid = sys_clone3(&args, sizeof(args)); 3912 ASSERT_GE(pid, 0); 3913 3914 if (pid == 0) { 3915 int listener; 3916 3917 listener = user_notif_syscall(__NR_mknodat, SECCOMP_FILTER_FLAG_NEW_LISTENER); 3918 if (listener < 0) 3919 _exit(EXIT_FAILURE); 3920 3921 if (dup2(listener, 200) != 200) 3922 _exit(EXIT_FAILURE); 3923 3924 close(listener); 3925 3926 _exit(EXIT_SUCCESS); 3927 } 3928 3929 EXPECT_EQ(waitpid(pid, &status, 0), pid); 3930 EXPECT_EQ(true, WIFEXITED(status)); 3931 EXPECT_EQ(0, WEXITSTATUS(status)); 3932 3933 /* 3934 * The seccomp filter has become unused so we should be notified once 3935 * the kernel gets around to cleaning up task struct. 3936 */ 3937 pollfd.fd = 200; 3938 pollfd.events = POLLHUP; 3939 3940 EXPECT_GT(poll(&pollfd, 1, 2000), 0); 3941 EXPECT_GT((pollfd.revents & POLLHUP) ?: 0, 0); 3942 } 3943 3944 static void *do_thread(void *data) 3945 { 3946 return NULL; 3947 } 3948 3949 TEST(user_notification_filter_empty_threaded) 3950 { 3951 pid_t pid; 3952 long ret; 3953 int status; 3954 struct pollfd pollfd; 3955 struct __clone_args args = { 3956 .flags = CLONE_FILES, 3957 .exit_signal = SIGCHLD, 3958 }; 3959 3960 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 3961 ASSERT_EQ(0, ret) { 3962 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 3963 } 3964 3965 pid = sys_clone3(&args, sizeof(args)); 3966 ASSERT_GE(pid, 0); 3967 3968 if (pid == 0) { 3969 pid_t pid1, pid2; 3970 int listener, status; 3971 pthread_t thread; 3972 3973 listener = user_notif_syscall(__NR_dup, SECCOMP_FILTER_FLAG_NEW_LISTENER); 3974 if (listener < 0) 3975 _exit(EXIT_FAILURE); 3976 3977 if (dup2(listener, 200) != 200) 3978 _exit(EXIT_FAILURE); 3979 3980 close(listener); 3981 3982 pid1 = fork(); 3983 if (pid1 < 0) 3984 _exit(EXIT_FAILURE); 3985 3986 if (pid1 == 0) 3987 _exit(EXIT_SUCCESS); 3988 3989 pid2 = fork(); 3990 if (pid2 < 0) 3991 _exit(EXIT_FAILURE); 3992 3993 if (pid2 == 0) 3994 _exit(EXIT_SUCCESS); 3995 3996 if (pthread_create(&thread, NULL, do_thread, NULL) || 3997 pthread_join(thread, NULL)) 3998 _exit(EXIT_FAILURE); 3999 4000 if (pthread_create(&thread, NULL, do_thread, NULL) || 4001 pthread_join(thread, NULL)) 4002 _exit(EXIT_FAILURE); 4003 4004 if (waitpid(pid1, &status, 0) != pid1 || !WIFEXITED(status) || 4005 WEXITSTATUS(status)) 4006 _exit(EXIT_FAILURE); 4007 4008 if (waitpid(pid2, &status, 0) != pid2 || !WIFEXITED(status) || 4009 WEXITSTATUS(status)) 4010 _exit(EXIT_FAILURE); 4011 4012 exit(EXIT_SUCCESS); 4013 } 4014 4015 EXPECT_EQ(waitpid(pid, &status, 0), pid); 4016 EXPECT_EQ(true, WIFEXITED(status)); 4017 EXPECT_EQ(0, WEXITSTATUS(status)); 4018 4019 /* 4020 * The seccomp filter has become unused so we should be notified once 4021 * the kernel gets around to cleaning up task struct. 4022 */ 4023 pollfd.fd = 200; 4024 pollfd.events = POLLHUP; 4025 4026 EXPECT_GT(poll(&pollfd, 1, 2000), 0); 4027 EXPECT_GT((pollfd.revents & POLLHUP) ?: 0, 0); 4028 } 4029 4030 TEST(user_notification_addfd) 4031 { 4032 pid_t pid; 4033 long ret; 4034 int status, listener, memfd, fd, nextfd; 4035 struct seccomp_notif_addfd addfd = {}; 4036 struct seccomp_notif_addfd_small small = {}; 4037 struct seccomp_notif_addfd_big big = {}; 4038 struct seccomp_notif req = {}; 4039 struct seccomp_notif_resp resp = {}; 4040 /* 100 ms */ 4041 struct timespec delay = { .tv_nsec = 100000000 }; 4042 4043 /* There may be arbitrary already-open fds at test start. */ 4044 memfd = memfd_create("test", 0); 4045 ASSERT_GE(memfd, 0); 4046 nextfd = memfd + 1; 4047 4048 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 4049 ASSERT_EQ(0, ret) { 4050 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 4051 } 4052 4053 /* fd: 4 */ 4054 /* Check that the basic notification machinery works */ 4055 listener = user_notif_syscall(__NR_getppid, 4056 SECCOMP_FILTER_FLAG_NEW_LISTENER); 4057 ASSERT_EQ(listener, nextfd++); 4058 4059 pid = fork(); 4060 ASSERT_GE(pid, 0); 4061 4062 if (pid == 0) { 4063 /* fds will be added and this value is expected */ 4064 if (syscall(__NR_getppid) != USER_NOTIF_MAGIC) 4065 exit(1); 4066 4067 /* Atomic addfd+send is received here. Check it is a valid fd */ 4068 if (fcntl(syscall(__NR_getppid), F_GETFD) == -1) 4069 exit(1); 4070 4071 exit(syscall(__NR_getppid) != USER_NOTIF_MAGIC); 4072 } 4073 4074 ASSERT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0); 4075 4076 addfd.srcfd = memfd; 4077 addfd.newfd = 0; 4078 addfd.id = req.id; 4079 addfd.flags = 0x0; 4080 4081 /* Verify bad newfd_flags cannot be set */ 4082 addfd.newfd_flags = ~O_CLOEXEC; 4083 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_ADDFD, &addfd), -1); 4084 EXPECT_EQ(errno, EINVAL); 4085 addfd.newfd_flags = O_CLOEXEC; 4086 4087 /* Verify bad flags cannot be set */ 4088 addfd.flags = 0xff; 4089 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_ADDFD, &addfd), -1); 4090 EXPECT_EQ(errno, EINVAL); 4091 addfd.flags = 0; 4092 4093 /* Verify that remote_fd cannot be set without setting flags */ 4094 addfd.newfd = 1; 4095 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_ADDFD, &addfd), -1); 4096 EXPECT_EQ(errno, EINVAL); 4097 addfd.newfd = 0; 4098 4099 /* Verify small size cannot be set */ 4100 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_ADDFD_SMALL, &small), -1); 4101 EXPECT_EQ(errno, EINVAL); 4102 4103 /* Verify we can't send bits filled in unknown buffer area */ 4104 memset(&big, 0xAA, sizeof(big)); 4105 big.addfd = addfd; 4106 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_ADDFD_BIG, &big), -1); 4107 EXPECT_EQ(errno, E2BIG); 4108 4109 4110 /* Verify we can set an arbitrary remote fd */ 4111 fd = ioctl(listener, SECCOMP_IOCTL_NOTIF_ADDFD, &addfd); 4112 EXPECT_EQ(fd, nextfd++); 4113 EXPECT_EQ(filecmp(getpid(), pid, memfd, fd), 0); 4114 4115 /* Verify we can set an arbitrary remote fd with large size */ 4116 memset(&big, 0x0, sizeof(big)); 4117 big.addfd = addfd; 4118 fd = ioctl(listener, SECCOMP_IOCTL_NOTIF_ADDFD_BIG, &big); 4119 EXPECT_EQ(fd, nextfd++); 4120 4121 /* Verify we can set a specific remote fd */ 4122 addfd.newfd = 42; 4123 addfd.flags = SECCOMP_ADDFD_FLAG_SETFD; 4124 fd = ioctl(listener, SECCOMP_IOCTL_NOTIF_ADDFD, &addfd); 4125 EXPECT_EQ(fd, 42); 4126 EXPECT_EQ(filecmp(getpid(), pid, memfd, fd), 0); 4127 4128 /* Resume syscall */ 4129 resp.id = req.id; 4130 resp.error = 0; 4131 resp.val = USER_NOTIF_MAGIC; 4132 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), 0); 4133 4134 /* 4135 * This sets the ID of the ADD FD to the last request plus 1. The 4136 * notification ID increments 1 per notification. 4137 */ 4138 addfd.id = req.id + 1; 4139 4140 /* This spins until the underlying notification is generated */ 4141 while (ioctl(listener, SECCOMP_IOCTL_NOTIF_ADDFD, &addfd) != -1 && 4142 errno != -EINPROGRESS) 4143 nanosleep(&delay, NULL); 4144 4145 memset(&req, 0, sizeof(req)); 4146 ASSERT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0); 4147 ASSERT_EQ(addfd.id, req.id); 4148 4149 /* Verify we can do an atomic addfd and send */ 4150 addfd.newfd = 0; 4151 addfd.flags = SECCOMP_ADDFD_FLAG_SEND; 4152 fd = ioctl(listener, SECCOMP_IOCTL_NOTIF_ADDFD, &addfd); 4153 /* 4154 * Child has earlier "low" fds and now 42, so we expect the next 4155 * lowest available fd to be assigned here. 4156 */ 4157 EXPECT_EQ(fd, nextfd++); 4158 ASSERT_EQ(filecmp(getpid(), pid, memfd, fd), 0); 4159 4160 /* 4161 * This sets the ID of the ADD FD to the last request plus 1. The 4162 * notification ID increments 1 per notification. 4163 */ 4164 addfd.id = req.id + 1; 4165 4166 /* This spins until the underlying notification is generated */ 4167 while (ioctl(listener, SECCOMP_IOCTL_NOTIF_ADDFD, &addfd) != -1 && 4168 errno != -EINPROGRESS) 4169 nanosleep(&delay, NULL); 4170 4171 memset(&req, 0, sizeof(req)); 4172 ASSERT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0); 4173 ASSERT_EQ(addfd.id, req.id); 4174 4175 resp.id = req.id; 4176 resp.error = 0; 4177 resp.val = USER_NOTIF_MAGIC; 4178 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), 0); 4179 4180 /* Wait for child to finish. */ 4181 EXPECT_EQ(waitpid(pid, &status, 0), pid); 4182 EXPECT_EQ(true, WIFEXITED(status)); 4183 EXPECT_EQ(0, WEXITSTATUS(status)); 4184 4185 close(memfd); 4186 } 4187 4188 TEST(user_notification_addfd_rlimit) 4189 { 4190 pid_t pid; 4191 long ret; 4192 int status, listener, memfd; 4193 struct seccomp_notif_addfd addfd = {}; 4194 struct seccomp_notif req = {}; 4195 struct seccomp_notif_resp resp = {}; 4196 const struct rlimit lim = { 4197 .rlim_cur = 0, 4198 .rlim_max = 0, 4199 }; 4200 4201 memfd = memfd_create("test", 0); 4202 ASSERT_GE(memfd, 0); 4203 4204 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 4205 ASSERT_EQ(0, ret) { 4206 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 4207 } 4208 4209 /* Check that the basic notification machinery works */ 4210 listener = user_notif_syscall(__NR_getppid, 4211 SECCOMP_FILTER_FLAG_NEW_LISTENER); 4212 ASSERT_GE(listener, 0); 4213 4214 pid = fork(); 4215 ASSERT_GE(pid, 0); 4216 4217 if (pid == 0) 4218 exit(syscall(__NR_getppid) != USER_NOTIF_MAGIC); 4219 4220 4221 ASSERT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0); 4222 4223 ASSERT_EQ(prlimit(pid, RLIMIT_NOFILE, &lim, NULL), 0); 4224 4225 addfd.srcfd = memfd; 4226 addfd.newfd_flags = O_CLOEXEC; 4227 addfd.newfd = 0; 4228 addfd.id = req.id; 4229 addfd.flags = 0; 4230 4231 /* Should probably spot check /proc/sys/fs/file-nr */ 4232 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_ADDFD, &addfd), -1); 4233 EXPECT_EQ(errno, EMFILE); 4234 4235 addfd.flags = SECCOMP_ADDFD_FLAG_SEND; 4236 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_ADDFD, &addfd), -1); 4237 EXPECT_EQ(errno, EMFILE); 4238 4239 addfd.newfd = 100; 4240 addfd.flags = SECCOMP_ADDFD_FLAG_SETFD; 4241 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_ADDFD, &addfd), -1); 4242 EXPECT_EQ(errno, EBADF); 4243 4244 resp.id = req.id; 4245 resp.error = 0; 4246 resp.val = USER_NOTIF_MAGIC; 4247 4248 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), 0); 4249 4250 /* Wait for child to finish. */ 4251 EXPECT_EQ(waitpid(pid, &status, 0), pid); 4252 EXPECT_EQ(true, WIFEXITED(status)); 4253 EXPECT_EQ(0, WEXITSTATUS(status)); 4254 4255 close(memfd); 4256 } 4257 4258 /* Make sure PTRACE_O_SUSPEND_SECCOMP requires CAP_SYS_ADMIN. */ 4259 FIXTURE(O_SUSPEND_SECCOMP) { 4260 pid_t pid; 4261 }; 4262 4263 FIXTURE_SETUP(O_SUSPEND_SECCOMP) 4264 { 4265 ERRNO_FILTER(block_read, E2BIG); 4266 cap_value_t cap_list[] = { CAP_SYS_ADMIN }; 4267 cap_t caps; 4268 4269 self->pid = 0; 4270 4271 /* make sure we don't have CAP_SYS_ADMIN */ 4272 caps = cap_get_proc(); 4273 ASSERT_NE(NULL, caps); 4274 ASSERT_EQ(0, cap_set_flag(caps, CAP_EFFECTIVE, 1, cap_list, CAP_CLEAR)); 4275 ASSERT_EQ(0, cap_set_proc(caps)); 4276 cap_free(caps); 4277 4278 ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)); 4279 ASSERT_EQ(0, prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog_block_read)); 4280 4281 self->pid = fork(); 4282 ASSERT_GE(self->pid, 0); 4283 4284 if (self->pid == 0) { 4285 while (1) 4286 pause(); 4287 _exit(127); 4288 } 4289 } 4290 4291 FIXTURE_TEARDOWN(O_SUSPEND_SECCOMP) 4292 { 4293 if (self->pid) 4294 kill(self->pid, SIGKILL); 4295 } 4296 4297 TEST_F(O_SUSPEND_SECCOMP, setoptions) 4298 { 4299 int wstatus; 4300 4301 ASSERT_EQ(0, ptrace(PTRACE_ATTACH, self->pid, NULL, 0)); 4302 ASSERT_EQ(self->pid, wait(&wstatus)); 4303 ASSERT_EQ(-1, ptrace(PTRACE_SETOPTIONS, self->pid, NULL, PTRACE_O_SUSPEND_SECCOMP)); 4304 if (errno == EINVAL) 4305 SKIP(return, "Kernel does not support PTRACE_O_SUSPEND_SECCOMP (missing CONFIG_CHECKPOINT_RESTORE?)"); 4306 ASSERT_EQ(EPERM, errno); 4307 } 4308 4309 TEST_F(O_SUSPEND_SECCOMP, seize) 4310 { 4311 int ret; 4312 4313 ret = ptrace(PTRACE_SEIZE, self->pid, NULL, PTRACE_O_SUSPEND_SECCOMP); 4314 ASSERT_EQ(-1, ret); 4315 if (errno == EINVAL) 4316 SKIP(return, "Kernel does not support PTRACE_O_SUSPEND_SECCOMP (missing CONFIG_CHECKPOINT_RESTORE?)"); 4317 ASSERT_EQ(EPERM, errno); 4318 } 4319 4320 /* 4321 * get_nth - Get the nth, space separated entry in a file. 4322 * 4323 * Returns the length of the read field. 4324 * Throws error if field is zero-lengthed. 4325 */ 4326 static ssize_t get_nth(struct __test_metadata *_metadata, const char *path, 4327 const unsigned int position, char **entry) 4328 { 4329 char *line = NULL; 4330 unsigned int i; 4331 ssize_t nread; 4332 size_t len = 0; 4333 FILE *f; 4334 4335 f = fopen(path, "r"); 4336 ASSERT_NE(f, NULL) { 4337 TH_LOG("Could not open %s: %s", path, strerror(errno)); 4338 } 4339 4340 for (i = 0; i < position; i++) { 4341 nread = getdelim(&line, &len, ' ', f); 4342 ASSERT_GE(nread, 0) { 4343 TH_LOG("Failed to read %d entry in file %s", i, path); 4344 } 4345 } 4346 fclose(f); 4347 4348 ASSERT_GT(nread, 0) { 4349 TH_LOG("Entry in file %s had zero length", path); 4350 } 4351 4352 *entry = line; 4353 return nread - 1; 4354 } 4355 4356 /* For a given PID, get the task state (D, R, etc...) */ 4357 static char get_proc_stat(struct __test_metadata *_metadata, pid_t pid) 4358 { 4359 char proc_path[100] = {0}; 4360 char status; 4361 char *line; 4362 4363 snprintf(proc_path, sizeof(proc_path), "/proc/%d/stat", pid); 4364 ASSERT_EQ(get_nth(_metadata, proc_path, 3, &line), 1); 4365 4366 status = *line; 4367 free(line); 4368 4369 return status; 4370 } 4371 4372 TEST(user_notification_fifo) 4373 { 4374 struct seccomp_notif_resp resp = {}; 4375 struct seccomp_notif req = {}; 4376 int i, status, listener; 4377 pid_t pid, pids[3]; 4378 __u64 baseid; 4379 long ret; 4380 /* 100 ms */ 4381 struct timespec delay = { .tv_nsec = 100000000 }; 4382 4383 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 4384 ASSERT_EQ(0, ret) { 4385 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 4386 } 4387 4388 /* Setup a listener */ 4389 listener = user_notif_syscall(__NR_getppid, 4390 SECCOMP_FILTER_FLAG_NEW_LISTENER); 4391 ASSERT_GE(listener, 0); 4392 4393 pid = fork(); 4394 ASSERT_GE(pid, 0); 4395 4396 if (pid == 0) { 4397 ret = syscall(__NR_getppid); 4398 exit(ret != USER_NOTIF_MAGIC); 4399 } 4400 4401 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0); 4402 baseid = req.id + 1; 4403 4404 resp.id = req.id; 4405 resp.error = 0; 4406 resp.val = USER_NOTIF_MAGIC; 4407 4408 /* check that we make sure flags == 0 */ 4409 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), 0); 4410 4411 EXPECT_EQ(waitpid(pid, &status, 0), pid); 4412 EXPECT_EQ(true, WIFEXITED(status)); 4413 EXPECT_EQ(0, WEXITSTATUS(status)); 4414 4415 /* Start children, and generate notifications */ 4416 for (i = 0; i < ARRAY_SIZE(pids); i++) { 4417 pid = fork(); 4418 if (pid == 0) { 4419 ret = syscall(__NR_getppid); 4420 exit(ret != USER_NOTIF_MAGIC); 4421 } 4422 pids[i] = pid; 4423 } 4424 4425 /* This spins until all of the children are sleeping */ 4426 restart_wait: 4427 for (i = 0; i < ARRAY_SIZE(pids); i++) { 4428 if (get_proc_stat(_metadata, pids[i]) != 'S') { 4429 nanosleep(&delay, NULL); 4430 goto restart_wait; 4431 } 4432 } 4433 4434 /* Read the notifications in order (and respond) */ 4435 for (i = 0; i < ARRAY_SIZE(pids); i++) { 4436 memset(&req, 0, sizeof(req)); 4437 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0); 4438 EXPECT_EQ(req.id, baseid + i); 4439 resp.id = req.id; 4440 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), 0); 4441 } 4442 4443 /* Make sure notifications were received */ 4444 for (i = 0; i < ARRAY_SIZE(pids); i++) { 4445 EXPECT_EQ(waitpid(pids[i], &status, 0), pids[i]); 4446 EXPECT_EQ(true, WIFEXITED(status)); 4447 EXPECT_EQ(0, WEXITSTATUS(status)); 4448 } 4449 } 4450 4451 /* get_proc_syscall - Get the syscall in progress for a given pid 4452 * 4453 * Returns the current syscall number for a given process 4454 * Returns -1 if not in syscall (running or blocked) 4455 */ 4456 static long get_proc_syscall(struct __test_metadata *_metadata, int pid) 4457 { 4458 char proc_path[100] = {0}; 4459 long ret = -1; 4460 ssize_t nread; 4461 char *line; 4462 4463 snprintf(proc_path, sizeof(proc_path), "/proc/%d/syscall", pid); 4464 nread = get_nth(_metadata, proc_path, 1, &line); 4465 ASSERT_GT(nread, 0); 4466 4467 if (!strncmp("running", line, MIN(7, nread))) 4468 ret = strtol(line, NULL, 16); 4469 4470 free(line); 4471 return ret; 4472 } 4473 4474 /* Ensure non-fatal signals prior to receive are unmodified */ 4475 TEST(user_notification_wait_killable_pre_notification) 4476 { 4477 struct sigaction new_action = { 4478 .sa_handler = signal_handler, 4479 }; 4480 int listener, status, sk_pair[2]; 4481 pid_t pid; 4482 long ret; 4483 char c; 4484 /* 100 ms */ 4485 struct timespec delay = { .tv_nsec = 100000000 }; 4486 4487 ASSERT_EQ(sigemptyset(&new_action.sa_mask), 0); 4488 4489 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 4490 ASSERT_EQ(0, ret) 4491 { 4492 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 4493 } 4494 4495 ASSERT_EQ(socketpair(PF_LOCAL, SOCK_SEQPACKET, 0, sk_pair), 0); 4496 4497 listener = user_notif_syscall( 4498 __NR_getppid, SECCOMP_FILTER_FLAG_NEW_LISTENER | 4499 SECCOMP_FILTER_FLAG_WAIT_KILLABLE_RECV); 4500 ASSERT_GE(listener, 0); 4501 4502 /* 4503 * Check that we can kill the process with SIGUSR1 prior to receiving 4504 * the notification. SIGUSR1 is wired up to a custom signal handler, 4505 * and make sure it gets called. 4506 */ 4507 pid = fork(); 4508 ASSERT_GE(pid, 0); 4509 4510 if (pid == 0) { 4511 close(sk_pair[0]); 4512 handled = sk_pair[1]; 4513 4514 /* Setup the non-fatal sigaction without SA_RESTART */ 4515 if (sigaction(SIGUSR1, &new_action, NULL)) { 4516 perror("sigaction"); 4517 exit(1); 4518 } 4519 4520 ret = syscall(__NR_getppid); 4521 /* Make sure we got a return from a signal interruption */ 4522 exit(ret != -1 || errno != EINTR); 4523 } 4524 4525 /* 4526 * Make sure we've gotten to the seccomp user notification wait 4527 * from getppid prior to sending any signals 4528 */ 4529 while (get_proc_syscall(_metadata, pid) != __NR_getppid && 4530 get_proc_stat(_metadata, pid) != 'S') 4531 nanosleep(&delay, NULL); 4532 4533 /* Send non-fatal kill signal */ 4534 EXPECT_EQ(kill(pid, SIGUSR1), 0); 4535 4536 /* wait for process to exit (exit checks for EINTR) */ 4537 EXPECT_EQ(waitpid(pid, &status, 0), pid); 4538 EXPECT_EQ(true, WIFEXITED(status)); 4539 EXPECT_EQ(0, WEXITSTATUS(status)); 4540 4541 EXPECT_EQ(read(sk_pair[0], &c, 1), 1); 4542 } 4543 4544 /* Ensure non-fatal signals after receive are blocked */ 4545 TEST(user_notification_wait_killable) 4546 { 4547 struct sigaction new_action = { 4548 .sa_handler = signal_handler, 4549 }; 4550 struct seccomp_notif_resp resp = {}; 4551 struct seccomp_notif req = {}; 4552 int listener, status, sk_pair[2]; 4553 pid_t pid; 4554 long ret; 4555 char c; 4556 /* 100 ms */ 4557 struct timespec delay = { .tv_nsec = 100000000 }; 4558 4559 ASSERT_EQ(sigemptyset(&new_action.sa_mask), 0); 4560 4561 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 4562 ASSERT_EQ(0, ret) 4563 { 4564 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 4565 } 4566 4567 ASSERT_EQ(socketpair(PF_LOCAL, SOCK_SEQPACKET, 0, sk_pair), 0); 4568 4569 listener = user_notif_syscall( 4570 __NR_getppid, SECCOMP_FILTER_FLAG_NEW_LISTENER | 4571 SECCOMP_FILTER_FLAG_WAIT_KILLABLE_RECV); 4572 ASSERT_GE(listener, 0); 4573 4574 pid = fork(); 4575 ASSERT_GE(pid, 0); 4576 4577 if (pid == 0) { 4578 close(sk_pair[0]); 4579 handled = sk_pair[1]; 4580 4581 /* Setup the sigaction without SA_RESTART */ 4582 if (sigaction(SIGUSR1, &new_action, NULL)) { 4583 perror("sigaction"); 4584 exit(1); 4585 } 4586 4587 /* Make sure that the syscall is completed (no EINTR) */ 4588 ret = syscall(__NR_getppid); 4589 exit(ret != USER_NOTIF_MAGIC); 4590 } 4591 4592 /* 4593 * Get the notification, to make move the notifying process into a 4594 * non-preemptible (TASK_KILLABLE) state. 4595 */ 4596 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0); 4597 /* Send non-fatal kill signal */ 4598 EXPECT_EQ(kill(pid, SIGUSR1), 0); 4599 4600 /* 4601 * Make sure the task enters moves to TASK_KILLABLE by waiting for 4602 * D (Disk Sleep) state after receiving non-fatal signal. 4603 */ 4604 while (get_proc_stat(_metadata, pid) != 'D') 4605 nanosleep(&delay, NULL); 4606 4607 resp.id = req.id; 4608 resp.val = USER_NOTIF_MAGIC; 4609 /* Make sure the notification is found and able to be replied to */ 4610 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), 0); 4611 4612 /* 4613 * Make sure that the signal handler does get called once we're back in 4614 * userspace. 4615 */ 4616 EXPECT_EQ(read(sk_pair[0], &c, 1), 1); 4617 /* wait for process to exit (exit checks for USER_NOTIF_MAGIC) */ 4618 EXPECT_EQ(waitpid(pid, &status, 0), pid); 4619 EXPECT_EQ(true, WIFEXITED(status)); 4620 EXPECT_EQ(0, WEXITSTATUS(status)); 4621 } 4622 4623 /* Ensure fatal signals after receive are not blocked */ 4624 TEST(user_notification_wait_killable_fatal) 4625 { 4626 struct seccomp_notif req = {}; 4627 int listener, status; 4628 pid_t pid; 4629 long ret; 4630 /* 100 ms */ 4631 struct timespec delay = { .tv_nsec = 100000000 }; 4632 4633 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 4634 ASSERT_EQ(0, ret) 4635 { 4636 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 4637 } 4638 4639 listener = user_notif_syscall( 4640 __NR_getppid, SECCOMP_FILTER_FLAG_NEW_LISTENER | 4641 SECCOMP_FILTER_FLAG_WAIT_KILLABLE_RECV); 4642 ASSERT_GE(listener, 0); 4643 4644 pid = fork(); 4645 ASSERT_GE(pid, 0); 4646 4647 if (pid == 0) { 4648 /* This should never complete as it should get a SIGTERM */ 4649 syscall(__NR_getppid); 4650 exit(1); 4651 } 4652 4653 while (get_proc_stat(_metadata, pid) != 'S') 4654 nanosleep(&delay, NULL); 4655 4656 /* 4657 * Get the notification, to make move the notifying process into a 4658 * non-preemptible (TASK_KILLABLE) state. 4659 */ 4660 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0); 4661 /* Kill the process with a fatal signal */ 4662 EXPECT_EQ(kill(pid, SIGTERM), 0); 4663 4664 /* 4665 * Wait for the process to exit, and make sure the process terminated 4666 * due to the SIGTERM signal. 4667 */ 4668 EXPECT_EQ(waitpid(pid, &status, 0), pid); 4669 EXPECT_EQ(true, WIFSIGNALED(status)); 4670 EXPECT_EQ(SIGTERM, WTERMSIG(status)); 4671 } 4672 4673 /* 4674 * TODO: 4675 * - expand NNP testing 4676 * - better arch-specific TRACE and TRAP handlers. 4677 * - endianness checking when appropriate 4678 * - 64-bit arg prodding 4679 * - arch value testing (x86 modes especially) 4680 * - verify that FILTER_FLAG_LOG filters generate log messages 4681 * - verify that RET_LOG generates log messages 4682 */ 4683 4684 TEST_HARNESS_MAIN 4685