1 /* 2 * Copyright (c) 2012 The Chromium OS Authors. All rights reserved. 3 * Use of this source code is governed by the GPLv2 license. 4 * 5 * Test code for seccomp bpf. 6 */ 7 8 #define _GNU_SOURCE 9 #include <sys/types.h> 10 11 /* 12 * glibc 2.26 and later have SIGSYS in siginfo_t. Before that, 13 * we need to use the kernel's siginfo.h file and trick glibc 14 * into accepting it. 15 */ 16 #if !__GLIBC_PREREQ(2, 26) 17 # include <asm/siginfo.h> 18 # define __have_siginfo_t 1 19 # define __have_sigval_t 1 20 # define __have_sigevent_t 1 21 #endif 22 23 #include <errno.h> 24 #include <linux/filter.h> 25 #include <sys/prctl.h> 26 #include <sys/ptrace.h> 27 #include <sys/user.h> 28 #include <linux/prctl.h> 29 #include <linux/ptrace.h> 30 #include <linux/seccomp.h> 31 #include <pthread.h> 32 #include <semaphore.h> 33 #include <signal.h> 34 #include <stddef.h> 35 #include <stdbool.h> 36 #include <string.h> 37 #include <time.h> 38 #include <linux/elf.h> 39 #include <sys/uio.h> 40 #include <sys/utsname.h> 41 #include <sys/fcntl.h> 42 #include <sys/mman.h> 43 #include <sys/times.h> 44 #include <sys/socket.h> 45 #include <sys/ioctl.h> 46 47 #include <unistd.h> 48 #include <sys/syscall.h> 49 #include <poll.h> 50 51 #include "../kselftest_harness.h" 52 53 #ifndef PR_SET_PTRACER 54 # define PR_SET_PTRACER 0x59616d61 55 #endif 56 57 #ifndef PR_SET_NO_NEW_PRIVS 58 #define PR_SET_NO_NEW_PRIVS 38 59 #define PR_GET_NO_NEW_PRIVS 39 60 #endif 61 62 #ifndef PR_SECCOMP_EXT 63 #define PR_SECCOMP_EXT 43 64 #endif 65 66 #ifndef SECCOMP_EXT_ACT 67 #define SECCOMP_EXT_ACT 1 68 #endif 69 70 #ifndef SECCOMP_EXT_ACT_TSYNC 71 #define SECCOMP_EXT_ACT_TSYNC 1 72 #endif 73 74 #ifndef SECCOMP_MODE_STRICT 75 #define SECCOMP_MODE_STRICT 1 76 #endif 77 78 #ifndef SECCOMP_MODE_FILTER 79 #define SECCOMP_MODE_FILTER 2 80 #endif 81 82 #ifndef SECCOMP_RET_ALLOW 83 struct seccomp_data { 84 int nr; 85 __u32 arch; 86 __u64 instruction_pointer; 87 __u64 args[6]; 88 }; 89 #endif 90 91 #ifndef SECCOMP_RET_KILL_PROCESS 92 #define SECCOMP_RET_KILL_PROCESS 0x80000000U /* kill the process */ 93 #define SECCOMP_RET_KILL_THREAD 0x00000000U /* kill the thread */ 94 #endif 95 #ifndef SECCOMP_RET_KILL 96 #define SECCOMP_RET_KILL SECCOMP_RET_KILL_THREAD 97 #define SECCOMP_RET_TRAP 0x00030000U /* disallow and force a SIGSYS */ 98 #define SECCOMP_RET_ERRNO 0x00050000U /* returns an errno */ 99 #define SECCOMP_RET_TRACE 0x7ff00000U /* pass to a tracer or disallow */ 100 #define SECCOMP_RET_ALLOW 0x7fff0000U /* allow */ 101 #endif 102 #ifndef SECCOMP_RET_LOG 103 #define SECCOMP_RET_LOG 0x7ffc0000U /* allow after logging */ 104 #endif 105 106 #ifndef __NR_seccomp 107 # if defined(__i386__) 108 # define __NR_seccomp 354 109 # elif defined(__x86_64__) 110 # define __NR_seccomp 317 111 # elif defined(__arm__) 112 # define __NR_seccomp 383 113 # elif defined(__aarch64__) 114 # define __NR_seccomp 277 115 # elif defined(__hppa__) 116 # define __NR_seccomp 338 117 # elif defined(__powerpc__) 118 # define __NR_seccomp 358 119 # elif defined(__s390__) 120 # define __NR_seccomp 348 121 # else 122 # warning "seccomp syscall number unknown for this architecture" 123 # define __NR_seccomp 0xffff 124 # endif 125 #endif 126 127 #ifndef SECCOMP_SET_MODE_STRICT 128 #define SECCOMP_SET_MODE_STRICT 0 129 #endif 130 131 #ifndef SECCOMP_SET_MODE_FILTER 132 #define SECCOMP_SET_MODE_FILTER 1 133 #endif 134 135 #ifndef SECCOMP_GET_ACTION_AVAIL 136 #define SECCOMP_GET_ACTION_AVAIL 2 137 #endif 138 139 #ifndef SECCOMP_GET_NOTIF_SIZES 140 #define SECCOMP_GET_NOTIF_SIZES 3 141 #endif 142 143 #ifndef SECCOMP_FILTER_FLAG_TSYNC 144 #define SECCOMP_FILTER_FLAG_TSYNC (1UL << 0) 145 #endif 146 147 #ifndef SECCOMP_FILTER_FLAG_LOG 148 #define SECCOMP_FILTER_FLAG_LOG (1UL << 1) 149 #endif 150 151 #ifndef SECCOMP_FILTER_FLAG_SPEC_ALLOW 152 #define SECCOMP_FILTER_FLAG_SPEC_ALLOW (1UL << 2) 153 #endif 154 155 #ifndef PTRACE_SECCOMP_GET_METADATA 156 #define PTRACE_SECCOMP_GET_METADATA 0x420d 157 158 struct seccomp_metadata { 159 __u64 filter_off; /* Input: which filter */ 160 __u64 flags; /* Output: filter's flags */ 161 }; 162 #endif 163 164 #ifndef SECCOMP_FILTER_FLAG_NEW_LISTENER 165 #define SECCOMP_FILTER_FLAG_NEW_LISTENER (1UL << 3) 166 167 #define SECCOMP_RET_USER_NOTIF 0x7fc00000U 168 169 #define SECCOMP_IOC_MAGIC '!' 170 #define SECCOMP_IO(nr) _IO(SECCOMP_IOC_MAGIC, nr) 171 #define SECCOMP_IOR(nr, type) _IOR(SECCOMP_IOC_MAGIC, nr, type) 172 #define SECCOMP_IOW(nr, type) _IOW(SECCOMP_IOC_MAGIC, nr, type) 173 #define SECCOMP_IOWR(nr, type) _IOWR(SECCOMP_IOC_MAGIC, nr, type) 174 175 /* Flags for seccomp notification fd ioctl. */ 176 #define SECCOMP_IOCTL_NOTIF_RECV SECCOMP_IOWR(0, struct seccomp_notif) 177 #define SECCOMP_IOCTL_NOTIF_SEND SECCOMP_IOWR(1, \ 178 struct seccomp_notif_resp) 179 #define SECCOMP_IOCTL_NOTIF_ID_VALID SECCOMP_IOR(2, __u64) 180 181 struct seccomp_notif { 182 __u64 id; 183 __u32 pid; 184 __u32 flags; 185 struct seccomp_data data; 186 }; 187 188 struct seccomp_notif_resp { 189 __u64 id; 190 __s64 val; 191 __s32 error; 192 __u32 flags; 193 }; 194 195 struct seccomp_notif_sizes { 196 __u16 seccomp_notif; 197 __u16 seccomp_notif_resp; 198 __u16 seccomp_data; 199 }; 200 #endif 201 202 #ifndef seccomp 203 int seccomp(unsigned int op, unsigned int flags, void *args) 204 { 205 errno = 0; 206 return syscall(__NR_seccomp, op, flags, args); 207 } 208 #endif 209 210 #if __BYTE_ORDER == __LITTLE_ENDIAN 211 #define syscall_arg(_n) (offsetof(struct seccomp_data, args[_n])) 212 #elif __BYTE_ORDER == __BIG_ENDIAN 213 #define syscall_arg(_n) (offsetof(struct seccomp_data, args[_n]) + sizeof(__u32)) 214 #else 215 #error "wut? Unknown __BYTE_ORDER?!" 216 #endif 217 218 #define SIBLING_EXIT_UNKILLED 0xbadbeef 219 #define SIBLING_EXIT_FAILURE 0xbadface 220 #define SIBLING_EXIT_NEWPRIVS 0xbadfeed 221 222 TEST(mode_strict_support) 223 { 224 long ret; 225 226 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_STRICT, NULL, NULL, NULL); 227 ASSERT_EQ(0, ret) { 228 TH_LOG("Kernel does not support CONFIG_SECCOMP"); 229 } 230 syscall(__NR_exit, 0); 231 } 232 233 TEST_SIGNAL(mode_strict_cannot_call_prctl, SIGKILL) 234 { 235 long ret; 236 237 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_STRICT, NULL, NULL, NULL); 238 ASSERT_EQ(0, ret) { 239 TH_LOG("Kernel does not support CONFIG_SECCOMP"); 240 } 241 syscall(__NR_prctl, PR_SET_SECCOMP, SECCOMP_MODE_FILTER, 242 NULL, NULL, NULL); 243 EXPECT_FALSE(true) { 244 TH_LOG("Unreachable!"); 245 } 246 } 247 248 /* Note! This doesn't test no new privs behavior */ 249 TEST(no_new_privs_support) 250 { 251 long ret; 252 253 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 254 EXPECT_EQ(0, ret) { 255 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 256 } 257 } 258 259 /* Tests kernel support by checking for a copy_from_user() fault on NULL. */ 260 TEST(mode_filter_support) 261 { 262 long ret; 263 264 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, NULL, 0, 0); 265 ASSERT_EQ(0, ret) { 266 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 267 } 268 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, NULL, NULL, NULL); 269 EXPECT_EQ(-1, ret); 270 EXPECT_EQ(EFAULT, errno) { 271 TH_LOG("Kernel does not support CONFIG_SECCOMP_FILTER!"); 272 } 273 } 274 275 TEST(mode_filter_without_nnp) 276 { 277 struct sock_filter filter[] = { 278 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 279 }; 280 struct sock_fprog prog = { 281 .len = (unsigned short)ARRAY_SIZE(filter), 282 .filter = filter, 283 }; 284 long ret; 285 286 ret = prctl(PR_GET_NO_NEW_PRIVS, 0, NULL, 0, 0); 287 ASSERT_LE(0, ret) { 288 TH_LOG("Expected 0 or unsupported for NO_NEW_PRIVS"); 289 } 290 errno = 0; 291 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0); 292 /* Succeeds with CAP_SYS_ADMIN, fails without */ 293 /* TODO(wad) check caps not euid */ 294 if (geteuid()) { 295 EXPECT_EQ(-1, ret); 296 EXPECT_EQ(EACCES, errno); 297 } else { 298 EXPECT_EQ(0, ret); 299 } 300 } 301 302 #define MAX_INSNS_PER_PATH 32768 303 304 TEST(filter_size_limits) 305 { 306 int i; 307 int count = BPF_MAXINSNS + 1; 308 struct sock_filter allow[] = { 309 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 310 }; 311 struct sock_filter *filter; 312 struct sock_fprog prog = { }; 313 long ret; 314 315 filter = calloc(count, sizeof(*filter)); 316 ASSERT_NE(NULL, filter); 317 318 for (i = 0; i < count; i++) 319 filter[i] = allow[0]; 320 321 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 322 ASSERT_EQ(0, ret); 323 324 prog.filter = filter; 325 prog.len = count; 326 327 /* Too many filter instructions in a single filter. */ 328 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0); 329 ASSERT_NE(0, ret) { 330 TH_LOG("Installing %d insn filter was allowed", prog.len); 331 } 332 333 /* One less is okay, though. */ 334 prog.len -= 1; 335 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0); 336 ASSERT_EQ(0, ret) { 337 TH_LOG("Installing %d insn filter wasn't allowed", prog.len); 338 } 339 } 340 341 TEST(filter_chain_limits) 342 { 343 int i; 344 int count = BPF_MAXINSNS; 345 struct sock_filter allow[] = { 346 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 347 }; 348 struct sock_filter *filter; 349 struct sock_fprog prog = { }; 350 long ret; 351 352 filter = calloc(count, sizeof(*filter)); 353 ASSERT_NE(NULL, filter); 354 355 for (i = 0; i < count; i++) 356 filter[i] = allow[0]; 357 358 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 359 ASSERT_EQ(0, ret); 360 361 prog.filter = filter; 362 prog.len = 1; 363 364 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0); 365 ASSERT_EQ(0, ret); 366 367 prog.len = count; 368 369 /* Too many total filter instructions. */ 370 for (i = 0; i < MAX_INSNS_PER_PATH; i++) { 371 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0); 372 if (ret != 0) 373 break; 374 } 375 ASSERT_NE(0, ret) { 376 TH_LOG("Allowed %d %d-insn filters (total with penalties:%d)", 377 i, count, i * (count + 4)); 378 } 379 } 380 381 TEST(mode_filter_cannot_move_to_strict) 382 { 383 struct sock_filter filter[] = { 384 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 385 }; 386 struct sock_fprog prog = { 387 .len = (unsigned short)ARRAY_SIZE(filter), 388 .filter = filter, 389 }; 390 long ret; 391 392 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 393 ASSERT_EQ(0, ret); 394 395 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0); 396 ASSERT_EQ(0, ret); 397 398 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_STRICT, NULL, 0, 0); 399 EXPECT_EQ(-1, ret); 400 EXPECT_EQ(EINVAL, errno); 401 } 402 403 404 TEST(mode_filter_get_seccomp) 405 { 406 struct sock_filter filter[] = { 407 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 408 }; 409 struct sock_fprog prog = { 410 .len = (unsigned short)ARRAY_SIZE(filter), 411 .filter = filter, 412 }; 413 long ret; 414 415 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 416 ASSERT_EQ(0, ret); 417 418 ret = prctl(PR_GET_SECCOMP, 0, 0, 0, 0); 419 EXPECT_EQ(0, ret); 420 421 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0); 422 ASSERT_EQ(0, ret); 423 424 ret = prctl(PR_GET_SECCOMP, 0, 0, 0, 0); 425 EXPECT_EQ(2, ret); 426 } 427 428 429 TEST(ALLOW_all) 430 { 431 struct sock_filter filter[] = { 432 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 433 }; 434 struct sock_fprog prog = { 435 .len = (unsigned short)ARRAY_SIZE(filter), 436 .filter = filter, 437 }; 438 long ret; 439 440 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 441 ASSERT_EQ(0, ret); 442 443 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog); 444 ASSERT_EQ(0, ret); 445 } 446 447 TEST(empty_prog) 448 { 449 struct sock_filter filter[] = { 450 }; 451 struct sock_fprog prog = { 452 .len = (unsigned short)ARRAY_SIZE(filter), 453 .filter = filter, 454 }; 455 long ret; 456 457 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 458 ASSERT_EQ(0, ret); 459 460 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog); 461 EXPECT_EQ(-1, ret); 462 EXPECT_EQ(EINVAL, errno); 463 } 464 465 TEST(log_all) 466 { 467 struct sock_filter filter[] = { 468 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_LOG), 469 }; 470 struct sock_fprog prog = { 471 .len = (unsigned short)ARRAY_SIZE(filter), 472 .filter = filter, 473 }; 474 long ret; 475 pid_t parent = getppid(); 476 477 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 478 ASSERT_EQ(0, ret); 479 480 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog); 481 ASSERT_EQ(0, ret); 482 483 /* getppid() should succeed and be logged (no check for logging) */ 484 EXPECT_EQ(parent, syscall(__NR_getppid)); 485 } 486 487 TEST_SIGNAL(unknown_ret_is_kill_inside, SIGSYS) 488 { 489 struct sock_filter filter[] = { 490 BPF_STMT(BPF_RET|BPF_K, 0x10000000U), 491 }; 492 struct sock_fprog prog = { 493 .len = (unsigned short)ARRAY_SIZE(filter), 494 .filter = filter, 495 }; 496 long ret; 497 498 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 499 ASSERT_EQ(0, ret); 500 501 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog); 502 ASSERT_EQ(0, ret); 503 EXPECT_EQ(0, syscall(__NR_getpid)) { 504 TH_LOG("getpid() shouldn't ever return"); 505 } 506 } 507 508 /* return code >= 0x80000000 is unused. */ 509 TEST_SIGNAL(unknown_ret_is_kill_above_allow, SIGSYS) 510 { 511 struct sock_filter filter[] = { 512 BPF_STMT(BPF_RET|BPF_K, 0x90000000U), 513 }; 514 struct sock_fprog prog = { 515 .len = (unsigned short)ARRAY_SIZE(filter), 516 .filter = filter, 517 }; 518 long ret; 519 520 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 521 ASSERT_EQ(0, ret); 522 523 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog); 524 ASSERT_EQ(0, ret); 525 EXPECT_EQ(0, syscall(__NR_getpid)) { 526 TH_LOG("getpid() shouldn't ever return"); 527 } 528 } 529 530 TEST_SIGNAL(KILL_all, SIGSYS) 531 { 532 struct sock_filter filter[] = { 533 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL), 534 }; 535 struct sock_fprog prog = { 536 .len = (unsigned short)ARRAY_SIZE(filter), 537 .filter = filter, 538 }; 539 long ret; 540 541 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 542 ASSERT_EQ(0, ret); 543 544 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog); 545 ASSERT_EQ(0, ret); 546 } 547 548 TEST_SIGNAL(KILL_one, SIGSYS) 549 { 550 struct sock_filter filter[] = { 551 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, 552 offsetof(struct seccomp_data, nr)), 553 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 0, 1), 554 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL), 555 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 556 }; 557 struct sock_fprog prog = { 558 .len = (unsigned short)ARRAY_SIZE(filter), 559 .filter = filter, 560 }; 561 long ret; 562 pid_t parent = getppid(); 563 564 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 565 ASSERT_EQ(0, ret); 566 567 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog); 568 ASSERT_EQ(0, ret); 569 570 EXPECT_EQ(parent, syscall(__NR_getppid)); 571 /* getpid() should never return. */ 572 EXPECT_EQ(0, syscall(__NR_getpid)); 573 } 574 575 TEST_SIGNAL(KILL_one_arg_one, SIGSYS) 576 { 577 void *fatal_address; 578 struct sock_filter filter[] = { 579 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, 580 offsetof(struct seccomp_data, nr)), 581 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_times, 1, 0), 582 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 583 /* Only both with lower 32-bit for now. */ 584 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, syscall_arg(0)), 585 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, 586 (unsigned long)&fatal_address, 0, 1), 587 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL), 588 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 589 }; 590 struct sock_fprog prog = { 591 .len = (unsigned short)ARRAY_SIZE(filter), 592 .filter = filter, 593 }; 594 long ret; 595 pid_t parent = getppid(); 596 struct tms timebuf; 597 clock_t clock = times(&timebuf); 598 599 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 600 ASSERT_EQ(0, ret); 601 602 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog); 603 ASSERT_EQ(0, ret); 604 605 EXPECT_EQ(parent, syscall(__NR_getppid)); 606 EXPECT_LE(clock, syscall(__NR_times, &timebuf)); 607 /* times() should never return. */ 608 EXPECT_EQ(0, syscall(__NR_times, &fatal_address)); 609 } 610 611 TEST_SIGNAL(KILL_one_arg_six, SIGSYS) 612 { 613 #ifndef __NR_mmap2 614 int sysno = __NR_mmap; 615 #else 616 int sysno = __NR_mmap2; 617 #endif 618 struct sock_filter filter[] = { 619 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, 620 offsetof(struct seccomp_data, nr)), 621 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, sysno, 1, 0), 622 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 623 /* Only both with lower 32-bit for now. */ 624 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, syscall_arg(5)), 625 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, 0x0C0FFEE, 0, 1), 626 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL), 627 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 628 }; 629 struct sock_fprog prog = { 630 .len = (unsigned short)ARRAY_SIZE(filter), 631 .filter = filter, 632 }; 633 long ret; 634 pid_t parent = getppid(); 635 int fd; 636 void *map1, *map2; 637 int page_size = sysconf(_SC_PAGESIZE); 638 639 ASSERT_LT(0, page_size); 640 641 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 642 ASSERT_EQ(0, ret); 643 644 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog); 645 ASSERT_EQ(0, ret); 646 647 fd = open("/dev/zero", O_RDONLY); 648 ASSERT_NE(-1, fd); 649 650 EXPECT_EQ(parent, syscall(__NR_getppid)); 651 map1 = (void *)syscall(sysno, 652 NULL, page_size, PROT_READ, MAP_PRIVATE, fd, page_size); 653 EXPECT_NE(MAP_FAILED, map1); 654 /* mmap2() should never return. */ 655 map2 = (void *)syscall(sysno, 656 NULL, page_size, PROT_READ, MAP_PRIVATE, fd, 0x0C0FFEE); 657 EXPECT_EQ(MAP_FAILED, map2); 658 659 /* The test failed, so clean up the resources. */ 660 munmap(map1, page_size); 661 munmap(map2, page_size); 662 close(fd); 663 } 664 665 /* This is a thread task to die via seccomp filter violation. */ 666 void *kill_thread(void *data) 667 { 668 bool die = (bool)data; 669 670 if (die) { 671 prctl(PR_GET_SECCOMP, 0, 0, 0, 0); 672 return (void *)SIBLING_EXIT_FAILURE; 673 } 674 675 return (void *)SIBLING_EXIT_UNKILLED; 676 } 677 678 /* Prepare a thread that will kill itself or both of us. */ 679 void kill_thread_or_group(struct __test_metadata *_metadata, bool kill_process) 680 { 681 pthread_t thread; 682 void *status; 683 /* Kill only when calling __NR_prctl. */ 684 struct sock_filter filter_thread[] = { 685 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, 686 offsetof(struct seccomp_data, nr)), 687 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_prctl, 0, 1), 688 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL_THREAD), 689 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 690 }; 691 struct sock_fprog prog_thread = { 692 .len = (unsigned short)ARRAY_SIZE(filter_thread), 693 .filter = filter_thread, 694 }; 695 struct sock_filter filter_process[] = { 696 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, 697 offsetof(struct seccomp_data, nr)), 698 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_prctl, 0, 1), 699 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL_PROCESS), 700 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 701 }; 702 struct sock_fprog prog_process = { 703 .len = (unsigned short)ARRAY_SIZE(filter_process), 704 .filter = filter_process, 705 }; 706 707 ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) { 708 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 709 } 710 711 ASSERT_EQ(0, seccomp(SECCOMP_SET_MODE_FILTER, 0, 712 kill_process ? &prog_process : &prog_thread)); 713 714 /* 715 * Add the KILL_THREAD rule again to make sure that the KILL_PROCESS 716 * flag cannot be downgraded by a new filter. 717 */ 718 ASSERT_EQ(0, seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog_thread)); 719 720 /* Start a thread that will exit immediately. */ 721 ASSERT_EQ(0, pthread_create(&thread, NULL, kill_thread, (void *)false)); 722 ASSERT_EQ(0, pthread_join(thread, &status)); 723 ASSERT_EQ(SIBLING_EXIT_UNKILLED, (unsigned long)status); 724 725 /* Start a thread that will die immediately. */ 726 ASSERT_EQ(0, pthread_create(&thread, NULL, kill_thread, (void *)true)); 727 ASSERT_EQ(0, pthread_join(thread, &status)); 728 ASSERT_NE(SIBLING_EXIT_FAILURE, (unsigned long)status); 729 730 /* 731 * If we get here, only the spawned thread died. Let the parent know 732 * the whole process didn't die (i.e. this thread, the spawner, 733 * stayed running). 734 */ 735 exit(42); 736 } 737 738 TEST(KILL_thread) 739 { 740 int status; 741 pid_t child_pid; 742 743 child_pid = fork(); 744 ASSERT_LE(0, child_pid); 745 if (child_pid == 0) { 746 kill_thread_or_group(_metadata, false); 747 _exit(38); 748 } 749 750 ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0)); 751 752 /* If only the thread was killed, we'll see exit 42. */ 753 ASSERT_TRUE(WIFEXITED(status)); 754 ASSERT_EQ(42, WEXITSTATUS(status)); 755 } 756 757 TEST(KILL_process) 758 { 759 int status; 760 pid_t child_pid; 761 762 child_pid = fork(); 763 ASSERT_LE(0, child_pid); 764 if (child_pid == 0) { 765 kill_thread_or_group(_metadata, true); 766 _exit(38); 767 } 768 769 ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0)); 770 771 /* If the entire process was killed, we'll see SIGSYS. */ 772 ASSERT_TRUE(WIFSIGNALED(status)); 773 ASSERT_EQ(SIGSYS, WTERMSIG(status)); 774 } 775 776 /* TODO(wad) add 64-bit versus 32-bit arg tests. */ 777 TEST(arg_out_of_range) 778 { 779 struct sock_filter filter[] = { 780 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, syscall_arg(6)), 781 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 782 }; 783 struct sock_fprog prog = { 784 .len = (unsigned short)ARRAY_SIZE(filter), 785 .filter = filter, 786 }; 787 long ret; 788 789 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 790 ASSERT_EQ(0, ret); 791 792 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog); 793 EXPECT_EQ(-1, ret); 794 EXPECT_EQ(EINVAL, errno); 795 } 796 797 #define ERRNO_FILTER(name, errno) \ 798 struct sock_filter _read_filter_##name[] = { \ 799 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, \ 800 offsetof(struct seccomp_data, nr)), \ 801 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_read, 0, 1), \ 802 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ERRNO | errno), \ 803 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), \ 804 }; \ 805 struct sock_fprog prog_##name = { \ 806 .len = (unsigned short)ARRAY_SIZE(_read_filter_##name), \ 807 .filter = _read_filter_##name, \ 808 } 809 810 /* Make sure basic errno values are correctly passed through a filter. */ 811 TEST(ERRNO_valid) 812 { 813 ERRNO_FILTER(valid, E2BIG); 814 long ret; 815 pid_t parent = getppid(); 816 817 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 818 ASSERT_EQ(0, ret); 819 820 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog_valid); 821 ASSERT_EQ(0, ret); 822 823 EXPECT_EQ(parent, syscall(__NR_getppid)); 824 EXPECT_EQ(-1, read(0, NULL, 0)); 825 EXPECT_EQ(E2BIG, errno); 826 } 827 828 /* Make sure an errno of zero is correctly handled by the arch code. */ 829 TEST(ERRNO_zero) 830 { 831 ERRNO_FILTER(zero, 0); 832 long ret; 833 pid_t parent = getppid(); 834 835 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 836 ASSERT_EQ(0, ret); 837 838 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog_zero); 839 ASSERT_EQ(0, ret); 840 841 EXPECT_EQ(parent, syscall(__NR_getppid)); 842 /* "errno" of 0 is ok. */ 843 EXPECT_EQ(0, read(0, NULL, 0)); 844 } 845 846 /* 847 * The SECCOMP_RET_DATA mask is 16 bits wide, but errno is smaller. 848 * This tests that the errno value gets capped correctly, fixed by 849 * 580c57f10768 ("seccomp: cap SECCOMP_RET_ERRNO data to MAX_ERRNO"). 850 */ 851 TEST(ERRNO_capped) 852 { 853 ERRNO_FILTER(capped, 4096); 854 long ret; 855 pid_t parent = getppid(); 856 857 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 858 ASSERT_EQ(0, ret); 859 860 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog_capped); 861 ASSERT_EQ(0, ret); 862 863 EXPECT_EQ(parent, syscall(__NR_getppid)); 864 EXPECT_EQ(-1, read(0, NULL, 0)); 865 EXPECT_EQ(4095, errno); 866 } 867 868 /* 869 * Filters are processed in reverse order: last applied is executed first. 870 * Since only the SECCOMP_RET_ACTION mask is tested for return values, the 871 * SECCOMP_RET_DATA mask results will follow the most recently applied 872 * matching filter return (and not the lowest or highest value). 873 */ 874 TEST(ERRNO_order) 875 { 876 ERRNO_FILTER(first, 11); 877 ERRNO_FILTER(second, 13); 878 ERRNO_FILTER(third, 12); 879 long ret; 880 pid_t parent = getppid(); 881 882 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 883 ASSERT_EQ(0, ret); 884 885 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog_first); 886 ASSERT_EQ(0, ret); 887 888 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog_second); 889 ASSERT_EQ(0, ret); 890 891 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog_third); 892 ASSERT_EQ(0, ret); 893 894 EXPECT_EQ(parent, syscall(__NR_getppid)); 895 EXPECT_EQ(-1, read(0, NULL, 0)); 896 EXPECT_EQ(12, errno); 897 } 898 899 FIXTURE_DATA(TRAP) { 900 struct sock_fprog prog; 901 }; 902 903 FIXTURE_SETUP(TRAP) 904 { 905 struct sock_filter filter[] = { 906 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, 907 offsetof(struct seccomp_data, nr)), 908 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 0, 1), 909 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRAP), 910 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 911 }; 912 913 memset(&self->prog, 0, sizeof(self->prog)); 914 self->prog.filter = malloc(sizeof(filter)); 915 ASSERT_NE(NULL, self->prog.filter); 916 memcpy(self->prog.filter, filter, sizeof(filter)); 917 self->prog.len = (unsigned short)ARRAY_SIZE(filter); 918 } 919 920 FIXTURE_TEARDOWN(TRAP) 921 { 922 if (self->prog.filter) 923 free(self->prog.filter); 924 } 925 926 TEST_F_SIGNAL(TRAP, dfl, SIGSYS) 927 { 928 long ret; 929 930 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 931 ASSERT_EQ(0, ret); 932 933 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog); 934 ASSERT_EQ(0, ret); 935 syscall(__NR_getpid); 936 } 937 938 /* Ensure that SIGSYS overrides SIG_IGN */ 939 TEST_F_SIGNAL(TRAP, ign, SIGSYS) 940 { 941 long ret; 942 943 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 944 ASSERT_EQ(0, ret); 945 946 signal(SIGSYS, SIG_IGN); 947 948 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog); 949 ASSERT_EQ(0, ret); 950 syscall(__NR_getpid); 951 } 952 953 static siginfo_t TRAP_info; 954 static volatile int TRAP_nr; 955 static void TRAP_action(int nr, siginfo_t *info, void *void_context) 956 { 957 memcpy(&TRAP_info, info, sizeof(TRAP_info)); 958 TRAP_nr = nr; 959 } 960 961 TEST_F(TRAP, handler) 962 { 963 int ret, test; 964 struct sigaction act; 965 sigset_t mask; 966 967 memset(&act, 0, sizeof(act)); 968 sigemptyset(&mask); 969 sigaddset(&mask, SIGSYS); 970 971 act.sa_sigaction = &TRAP_action; 972 act.sa_flags = SA_SIGINFO; 973 ret = sigaction(SIGSYS, &act, NULL); 974 ASSERT_EQ(0, ret) { 975 TH_LOG("sigaction failed"); 976 } 977 ret = sigprocmask(SIG_UNBLOCK, &mask, NULL); 978 ASSERT_EQ(0, ret) { 979 TH_LOG("sigprocmask failed"); 980 } 981 982 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 983 ASSERT_EQ(0, ret); 984 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog); 985 ASSERT_EQ(0, ret); 986 TRAP_nr = 0; 987 memset(&TRAP_info, 0, sizeof(TRAP_info)); 988 /* Expect the registers to be rolled back. (nr = error) may vary 989 * based on arch. */ 990 ret = syscall(__NR_getpid); 991 /* Silence gcc warning about volatile. */ 992 test = TRAP_nr; 993 EXPECT_EQ(SIGSYS, test); 994 struct local_sigsys { 995 void *_call_addr; /* calling user insn */ 996 int _syscall; /* triggering system call number */ 997 unsigned int _arch; /* AUDIT_ARCH_* of syscall */ 998 } *sigsys = (struct local_sigsys *) 999 #ifdef si_syscall 1000 &(TRAP_info.si_call_addr); 1001 #else 1002 &TRAP_info.si_pid; 1003 #endif 1004 EXPECT_EQ(__NR_getpid, sigsys->_syscall); 1005 /* Make sure arch is non-zero. */ 1006 EXPECT_NE(0, sigsys->_arch); 1007 EXPECT_NE(0, (unsigned long)sigsys->_call_addr); 1008 } 1009 1010 FIXTURE_DATA(precedence) { 1011 struct sock_fprog allow; 1012 struct sock_fprog log; 1013 struct sock_fprog trace; 1014 struct sock_fprog error; 1015 struct sock_fprog trap; 1016 struct sock_fprog kill; 1017 }; 1018 1019 FIXTURE_SETUP(precedence) 1020 { 1021 struct sock_filter allow_insns[] = { 1022 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 1023 }; 1024 struct sock_filter log_insns[] = { 1025 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, 1026 offsetof(struct seccomp_data, nr)), 1027 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 1, 0), 1028 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 1029 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_LOG), 1030 }; 1031 struct sock_filter trace_insns[] = { 1032 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, 1033 offsetof(struct seccomp_data, nr)), 1034 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 1, 0), 1035 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 1036 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE), 1037 }; 1038 struct sock_filter error_insns[] = { 1039 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, 1040 offsetof(struct seccomp_data, nr)), 1041 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 1, 0), 1042 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 1043 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ERRNO), 1044 }; 1045 struct sock_filter trap_insns[] = { 1046 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, 1047 offsetof(struct seccomp_data, nr)), 1048 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 1, 0), 1049 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 1050 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRAP), 1051 }; 1052 struct sock_filter kill_insns[] = { 1053 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, 1054 offsetof(struct seccomp_data, nr)), 1055 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 1, 0), 1056 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 1057 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL), 1058 }; 1059 1060 memset(self, 0, sizeof(*self)); 1061 #define FILTER_ALLOC(_x) \ 1062 self->_x.filter = malloc(sizeof(_x##_insns)); \ 1063 ASSERT_NE(NULL, self->_x.filter); \ 1064 memcpy(self->_x.filter, &_x##_insns, sizeof(_x##_insns)); \ 1065 self->_x.len = (unsigned short)ARRAY_SIZE(_x##_insns) 1066 FILTER_ALLOC(allow); 1067 FILTER_ALLOC(log); 1068 FILTER_ALLOC(trace); 1069 FILTER_ALLOC(error); 1070 FILTER_ALLOC(trap); 1071 FILTER_ALLOC(kill); 1072 } 1073 1074 FIXTURE_TEARDOWN(precedence) 1075 { 1076 #define FILTER_FREE(_x) if (self->_x.filter) free(self->_x.filter) 1077 FILTER_FREE(allow); 1078 FILTER_FREE(log); 1079 FILTER_FREE(trace); 1080 FILTER_FREE(error); 1081 FILTER_FREE(trap); 1082 FILTER_FREE(kill); 1083 } 1084 1085 TEST_F(precedence, allow_ok) 1086 { 1087 pid_t parent, res = 0; 1088 long ret; 1089 1090 parent = getppid(); 1091 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 1092 ASSERT_EQ(0, ret); 1093 1094 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow); 1095 ASSERT_EQ(0, ret); 1096 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log); 1097 ASSERT_EQ(0, ret); 1098 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace); 1099 ASSERT_EQ(0, ret); 1100 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error); 1101 ASSERT_EQ(0, ret); 1102 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trap); 1103 ASSERT_EQ(0, ret); 1104 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->kill); 1105 ASSERT_EQ(0, ret); 1106 /* Should work just fine. */ 1107 res = syscall(__NR_getppid); 1108 EXPECT_EQ(parent, res); 1109 } 1110 1111 TEST_F_SIGNAL(precedence, kill_is_highest, SIGSYS) 1112 { 1113 pid_t parent, res = 0; 1114 long ret; 1115 1116 parent = getppid(); 1117 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 1118 ASSERT_EQ(0, ret); 1119 1120 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow); 1121 ASSERT_EQ(0, ret); 1122 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log); 1123 ASSERT_EQ(0, ret); 1124 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace); 1125 ASSERT_EQ(0, ret); 1126 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error); 1127 ASSERT_EQ(0, ret); 1128 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trap); 1129 ASSERT_EQ(0, ret); 1130 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->kill); 1131 ASSERT_EQ(0, ret); 1132 /* Should work just fine. */ 1133 res = syscall(__NR_getppid); 1134 EXPECT_EQ(parent, res); 1135 /* getpid() should never return. */ 1136 res = syscall(__NR_getpid); 1137 EXPECT_EQ(0, res); 1138 } 1139 1140 TEST_F_SIGNAL(precedence, kill_is_highest_in_any_order, SIGSYS) 1141 { 1142 pid_t parent; 1143 long ret; 1144 1145 parent = getppid(); 1146 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 1147 ASSERT_EQ(0, ret); 1148 1149 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow); 1150 ASSERT_EQ(0, ret); 1151 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->kill); 1152 ASSERT_EQ(0, ret); 1153 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error); 1154 ASSERT_EQ(0, ret); 1155 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log); 1156 ASSERT_EQ(0, ret); 1157 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace); 1158 ASSERT_EQ(0, ret); 1159 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trap); 1160 ASSERT_EQ(0, ret); 1161 /* Should work just fine. */ 1162 EXPECT_EQ(parent, syscall(__NR_getppid)); 1163 /* getpid() should never return. */ 1164 EXPECT_EQ(0, syscall(__NR_getpid)); 1165 } 1166 1167 TEST_F_SIGNAL(precedence, trap_is_second, SIGSYS) 1168 { 1169 pid_t parent; 1170 long ret; 1171 1172 parent = getppid(); 1173 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 1174 ASSERT_EQ(0, ret); 1175 1176 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow); 1177 ASSERT_EQ(0, ret); 1178 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log); 1179 ASSERT_EQ(0, ret); 1180 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace); 1181 ASSERT_EQ(0, ret); 1182 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error); 1183 ASSERT_EQ(0, ret); 1184 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trap); 1185 ASSERT_EQ(0, ret); 1186 /* Should work just fine. */ 1187 EXPECT_EQ(parent, syscall(__NR_getppid)); 1188 /* getpid() should never return. */ 1189 EXPECT_EQ(0, syscall(__NR_getpid)); 1190 } 1191 1192 TEST_F_SIGNAL(precedence, trap_is_second_in_any_order, SIGSYS) 1193 { 1194 pid_t parent; 1195 long ret; 1196 1197 parent = getppid(); 1198 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 1199 ASSERT_EQ(0, ret); 1200 1201 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow); 1202 ASSERT_EQ(0, ret); 1203 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trap); 1204 ASSERT_EQ(0, ret); 1205 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log); 1206 ASSERT_EQ(0, ret); 1207 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace); 1208 ASSERT_EQ(0, ret); 1209 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error); 1210 ASSERT_EQ(0, ret); 1211 /* Should work just fine. */ 1212 EXPECT_EQ(parent, syscall(__NR_getppid)); 1213 /* getpid() should never return. */ 1214 EXPECT_EQ(0, syscall(__NR_getpid)); 1215 } 1216 1217 TEST_F(precedence, errno_is_third) 1218 { 1219 pid_t parent; 1220 long ret; 1221 1222 parent = getppid(); 1223 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 1224 ASSERT_EQ(0, ret); 1225 1226 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow); 1227 ASSERT_EQ(0, ret); 1228 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log); 1229 ASSERT_EQ(0, ret); 1230 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace); 1231 ASSERT_EQ(0, ret); 1232 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error); 1233 ASSERT_EQ(0, ret); 1234 /* Should work just fine. */ 1235 EXPECT_EQ(parent, syscall(__NR_getppid)); 1236 EXPECT_EQ(0, syscall(__NR_getpid)); 1237 } 1238 1239 TEST_F(precedence, errno_is_third_in_any_order) 1240 { 1241 pid_t parent; 1242 long ret; 1243 1244 parent = getppid(); 1245 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 1246 ASSERT_EQ(0, ret); 1247 1248 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log); 1249 ASSERT_EQ(0, ret); 1250 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error); 1251 ASSERT_EQ(0, ret); 1252 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace); 1253 ASSERT_EQ(0, ret); 1254 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow); 1255 ASSERT_EQ(0, ret); 1256 /* Should work just fine. */ 1257 EXPECT_EQ(parent, syscall(__NR_getppid)); 1258 EXPECT_EQ(0, syscall(__NR_getpid)); 1259 } 1260 1261 TEST_F(precedence, trace_is_fourth) 1262 { 1263 pid_t parent; 1264 long ret; 1265 1266 parent = getppid(); 1267 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 1268 ASSERT_EQ(0, ret); 1269 1270 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow); 1271 ASSERT_EQ(0, ret); 1272 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log); 1273 ASSERT_EQ(0, ret); 1274 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace); 1275 ASSERT_EQ(0, ret); 1276 /* Should work just fine. */ 1277 EXPECT_EQ(parent, syscall(__NR_getppid)); 1278 /* No ptracer */ 1279 EXPECT_EQ(-1, syscall(__NR_getpid)); 1280 } 1281 1282 TEST_F(precedence, trace_is_fourth_in_any_order) 1283 { 1284 pid_t parent; 1285 long ret; 1286 1287 parent = getppid(); 1288 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 1289 ASSERT_EQ(0, ret); 1290 1291 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace); 1292 ASSERT_EQ(0, ret); 1293 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow); 1294 ASSERT_EQ(0, ret); 1295 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log); 1296 ASSERT_EQ(0, ret); 1297 /* Should work just fine. */ 1298 EXPECT_EQ(parent, syscall(__NR_getppid)); 1299 /* No ptracer */ 1300 EXPECT_EQ(-1, syscall(__NR_getpid)); 1301 } 1302 1303 TEST_F(precedence, log_is_fifth) 1304 { 1305 pid_t mypid, parent; 1306 long ret; 1307 1308 mypid = getpid(); 1309 parent = getppid(); 1310 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 1311 ASSERT_EQ(0, ret); 1312 1313 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow); 1314 ASSERT_EQ(0, ret); 1315 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log); 1316 ASSERT_EQ(0, ret); 1317 /* Should work just fine. */ 1318 EXPECT_EQ(parent, syscall(__NR_getppid)); 1319 /* Should also work just fine */ 1320 EXPECT_EQ(mypid, syscall(__NR_getpid)); 1321 } 1322 1323 TEST_F(precedence, log_is_fifth_in_any_order) 1324 { 1325 pid_t mypid, parent; 1326 long ret; 1327 1328 mypid = getpid(); 1329 parent = getppid(); 1330 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 1331 ASSERT_EQ(0, ret); 1332 1333 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->log); 1334 ASSERT_EQ(0, ret); 1335 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow); 1336 ASSERT_EQ(0, ret); 1337 /* Should work just fine. */ 1338 EXPECT_EQ(parent, syscall(__NR_getppid)); 1339 /* Should also work just fine */ 1340 EXPECT_EQ(mypid, syscall(__NR_getpid)); 1341 } 1342 1343 #ifndef PTRACE_O_TRACESECCOMP 1344 #define PTRACE_O_TRACESECCOMP 0x00000080 1345 #endif 1346 1347 /* Catch the Ubuntu 12.04 value error. */ 1348 #if PTRACE_EVENT_SECCOMP != 7 1349 #undef PTRACE_EVENT_SECCOMP 1350 #endif 1351 1352 #ifndef PTRACE_EVENT_SECCOMP 1353 #define PTRACE_EVENT_SECCOMP 7 1354 #endif 1355 1356 #define IS_SECCOMP_EVENT(status) ((status >> 16) == PTRACE_EVENT_SECCOMP) 1357 bool tracer_running; 1358 void tracer_stop(int sig) 1359 { 1360 tracer_running = false; 1361 } 1362 1363 typedef void tracer_func_t(struct __test_metadata *_metadata, 1364 pid_t tracee, int status, void *args); 1365 1366 void start_tracer(struct __test_metadata *_metadata, int fd, pid_t tracee, 1367 tracer_func_t tracer_func, void *args, bool ptrace_syscall) 1368 { 1369 int ret = -1; 1370 struct sigaction action = { 1371 .sa_handler = tracer_stop, 1372 }; 1373 1374 /* Allow external shutdown. */ 1375 tracer_running = true; 1376 ASSERT_EQ(0, sigaction(SIGUSR1, &action, NULL)); 1377 1378 errno = 0; 1379 while (ret == -1 && errno != EINVAL) 1380 ret = ptrace(PTRACE_ATTACH, tracee, NULL, 0); 1381 ASSERT_EQ(0, ret) { 1382 kill(tracee, SIGKILL); 1383 } 1384 /* Wait for attach stop */ 1385 wait(NULL); 1386 1387 ret = ptrace(PTRACE_SETOPTIONS, tracee, NULL, ptrace_syscall ? 1388 PTRACE_O_TRACESYSGOOD : 1389 PTRACE_O_TRACESECCOMP); 1390 ASSERT_EQ(0, ret) { 1391 TH_LOG("Failed to set PTRACE_O_TRACESECCOMP"); 1392 kill(tracee, SIGKILL); 1393 } 1394 ret = ptrace(ptrace_syscall ? PTRACE_SYSCALL : PTRACE_CONT, 1395 tracee, NULL, 0); 1396 ASSERT_EQ(0, ret); 1397 1398 /* Unblock the tracee */ 1399 ASSERT_EQ(1, write(fd, "A", 1)); 1400 ASSERT_EQ(0, close(fd)); 1401 1402 /* Run until we're shut down. Must assert to stop execution. */ 1403 while (tracer_running) { 1404 int status; 1405 1406 if (wait(&status) != tracee) 1407 continue; 1408 if (WIFSIGNALED(status) || WIFEXITED(status)) 1409 /* Child is dead. Time to go. */ 1410 return; 1411 1412 /* Check if this is a seccomp event. */ 1413 ASSERT_EQ(!ptrace_syscall, IS_SECCOMP_EVENT(status)); 1414 1415 tracer_func(_metadata, tracee, status, args); 1416 1417 ret = ptrace(ptrace_syscall ? PTRACE_SYSCALL : PTRACE_CONT, 1418 tracee, NULL, 0); 1419 ASSERT_EQ(0, ret); 1420 } 1421 /* Directly report the status of our test harness results. */ 1422 syscall(__NR_exit, _metadata->passed ? EXIT_SUCCESS : EXIT_FAILURE); 1423 } 1424 1425 /* Common tracer setup/teardown functions. */ 1426 void cont_handler(int num) 1427 { } 1428 pid_t setup_trace_fixture(struct __test_metadata *_metadata, 1429 tracer_func_t func, void *args, bool ptrace_syscall) 1430 { 1431 char sync; 1432 int pipefd[2]; 1433 pid_t tracer_pid; 1434 pid_t tracee = getpid(); 1435 1436 /* Setup a pipe for clean synchronization. */ 1437 ASSERT_EQ(0, pipe(pipefd)); 1438 1439 /* Fork a child which we'll promote to tracer */ 1440 tracer_pid = fork(); 1441 ASSERT_LE(0, tracer_pid); 1442 signal(SIGALRM, cont_handler); 1443 if (tracer_pid == 0) { 1444 close(pipefd[0]); 1445 start_tracer(_metadata, pipefd[1], tracee, func, args, 1446 ptrace_syscall); 1447 syscall(__NR_exit, 0); 1448 } 1449 close(pipefd[1]); 1450 prctl(PR_SET_PTRACER, tracer_pid, 0, 0, 0); 1451 read(pipefd[0], &sync, 1); 1452 close(pipefd[0]); 1453 1454 return tracer_pid; 1455 } 1456 void teardown_trace_fixture(struct __test_metadata *_metadata, 1457 pid_t tracer) 1458 { 1459 if (tracer) { 1460 int status; 1461 /* 1462 * Extract the exit code from the other process and 1463 * adopt it for ourselves in case its asserts failed. 1464 */ 1465 ASSERT_EQ(0, kill(tracer, SIGUSR1)); 1466 ASSERT_EQ(tracer, waitpid(tracer, &status, 0)); 1467 if (WEXITSTATUS(status)) 1468 _metadata->passed = 0; 1469 } 1470 } 1471 1472 /* "poke" tracer arguments and function. */ 1473 struct tracer_args_poke_t { 1474 unsigned long poke_addr; 1475 }; 1476 1477 void tracer_poke(struct __test_metadata *_metadata, pid_t tracee, int status, 1478 void *args) 1479 { 1480 int ret; 1481 unsigned long msg; 1482 struct tracer_args_poke_t *info = (struct tracer_args_poke_t *)args; 1483 1484 ret = ptrace(PTRACE_GETEVENTMSG, tracee, NULL, &msg); 1485 EXPECT_EQ(0, ret); 1486 /* If this fails, don't try to recover. */ 1487 ASSERT_EQ(0x1001, msg) { 1488 kill(tracee, SIGKILL); 1489 } 1490 /* 1491 * Poke in the message. 1492 * Registers are not touched to try to keep this relatively arch 1493 * agnostic. 1494 */ 1495 ret = ptrace(PTRACE_POKEDATA, tracee, info->poke_addr, 0x1001); 1496 EXPECT_EQ(0, ret); 1497 } 1498 1499 FIXTURE_DATA(TRACE_poke) { 1500 struct sock_fprog prog; 1501 pid_t tracer; 1502 long poked; 1503 struct tracer_args_poke_t tracer_args; 1504 }; 1505 1506 FIXTURE_SETUP(TRACE_poke) 1507 { 1508 struct sock_filter filter[] = { 1509 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, 1510 offsetof(struct seccomp_data, nr)), 1511 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_read, 0, 1), 1512 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1001), 1513 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 1514 }; 1515 1516 self->poked = 0; 1517 memset(&self->prog, 0, sizeof(self->prog)); 1518 self->prog.filter = malloc(sizeof(filter)); 1519 ASSERT_NE(NULL, self->prog.filter); 1520 memcpy(self->prog.filter, filter, sizeof(filter)); 1521 self->prog.len = (unsigned short)ARRAY_SIZE(filter); 1522 1523 /* Set up tracer args. */ 1524 self->tracer_args.poke_addr = (unsigned long)&self->poked; 1525 1526 /* Launch tracer. */ 1527 self->tracer = setup_trace_fixture(_metadata, tracer_poke, 1528 &self->tracer_args, false); 1529 } 1530 1531 FIXTURE_TEARDOWN(TRACE_poke) 1532 { 1533 teardown_trace_fixture(_metadata, self->tracer); 1534 if (self->prog.filter) 1535 free(self->prog.filter); 1536 } 1537 1538 TEST_F(TRACE_poke, read_has_side_effects) 1539 { 1540 ssize_t ret; 1541 1542 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 1543 ASSERT_EQ(0, ret); 1544 1545 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0); 1546 ASSERT_EQ(0, ret); 1547 1548 EXPECT_EQ(0, self->poked); 1549 ret = read(-1, NULL, 0); 1550 EXPECT_EQ(-1, ret); 1551 EXPECT_EQ(0x1001, self->poked); 1552 } 1553 1554 TEST_F(TRACE_poke, getpid_runs_normally) 1555 { 1556 long ret; 1557 1558 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 1559 ASSERT_EQ(0, ret); 1560 1561 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0); 1562 ASSERT_EQ(0, ret); 1563 1564 EXPECT_EQ(0, self->poked); 1565 EXPECT_NE(0, syscall(__NR_getpid)); 1566 EXPECT_EQ(0, self->poked); 1567 } 1568 1569 #if defined(__x86_64__) 1570 # define ARCH_REGS struct user_regs_struct 1571 # define SYSCALL_NUM orig_rax 1572 # define SYSCALL_RET rax 1573 #elif defined(__i386__) 1574 # define ARCH_REGS struct user_regs_struct 1575 # define SYSCALL_NUM orig_eax 1576 # define SYSCALL_RET eax 1577 #elif defined(__arm__) 1578 # define ARCH_REGS struct pt_regs 1579 # define SYSCALL_NUM ARM_r7 1580 # define SYSCALL_RET ARM_r0 1581 #elif defined(__aarch64__) 1582 # define ARCH_REGS struct user_pt_regs 1583 # define SYSCALL_NUM regs[8] 1584 # define SYSCALL_RET regs[0] 1585 #elif defined(__hppa__) 1586 # define ARCH_REGS struct user_regs_struct 1587 # define SYSCALL_NUM gr[20] 1588 # define SYSCALL_RET gr[28] 1589 #elif defined(__powerpc__) 1590 # define ARCH_REGS struct pt_regs 1591 # define SYSCALL_NUM gpr[0] 1592 # define SYSCALL_RET gpr[3] 1593 #elif defined(__s390__) 1594 # define ARCH_REGS s390_regs 1595 # define SYSCALL_NUM gprs[2] 1596 # define SYSCALL_RET gprs[2] 1597 #elif defined(__mips__) 1598 # define ARCH_REGS struct pt_regs 1599 # define SYSCALL_NUM regs[2] 1600 # define SYSCALL_SYSCALL_NUM regs[4] 1601 # define SYSCALL_RET regs[2] 1602 # define SYSCALL_NUM_RET_SHARE_REG 1603 #else 1604 # error "Do not know how to find your architecture's registers and syscalls" 1605 #endif 1606 1607 /* When the syscall return can't be changed, stub out the tests for it. */ 1608 #ifdef SYSCALL_NUM_RET_SHARE_REG 1609 # define EXPECT_SYSCALL_RETURN(val, action) EXPECT_EQ(-1, action) 1610 #else 1611 # define EXPECT_SYSCALL_RETURN(val, action) EXPECT_EQ(val, action) 1612 #endif 1613 1614 /* Use PTRACE_GETREGS and PTRACE_SETREGS when available. This is useful for 1615 * architectures without HAVE_ARCH_TRACEHOOK (e.g. User-mode Linux). 1616 */ 1617 #if defined(__x86_64__) || defined(__i386__) || defined(__mips__) 1618 #define HAVE_GETREGS 1619 #endif 1620 1621 /* Architecture-specific syscall fetching routine. */ 1622 int get_syscall(struct __test_metadata *_metadata, pid_t tracee) 1623 { 1624 ARCH_REGS regs; 1625 #ifdef HAVE_GETREGS 1626 EXPECT_EQ(0, ptrace(PTRACE_GETREGS, tracee, 0, ®s)) { 1627 TH_LOG("PTRACE_GETREGS failed"); 1628 return -1; 1629 } 1630 #else 1631 struct iovec iov; 1632 1633 iov.iov_base = ®s; 1634 iov.iov_len = sizeof(regs); 1635 EXPECT_EQ(0, ptrace(PTRACE_GETREGSET, tracee, NT_PRSTATUS, &iov)) { 1636 TH_LOG("PTRACE_GETREGSET failed"); 1637 return -1; 1638 } 1639 #endif 1640 1641 #if defined(__mips__) 1642 if (regs.SYSCALL_NUM == __NR_O32_Linux) 1643 return regs.SYSCALL_SYSCALL_NUM; 1644 #endif 1645 return regs.SYSCALL_NUM; 1646 } 1647 1648 /* Architecture-specific syscall changing routine. */ 1649 void change_syscall(struct __test_metadata *_metadata, 1650 pid_t tracee, int syscall) 1651 { 1652 int ret; 1653 ARCH_REGS regs; 1654 #ifdef HAVE_GETREGS 1655 ret = ptrace(PTRACE_GETREGS, tracee, 0, ®s); 1656 #else 1657 struct iovec iov; 1658 iov.iov_base = ®s; 1659 iov.iov_len = sizeof(regs); 1660 ret = ptrace(PTRACE_GETREGSET, tracee, NT_PRSTATUS, &iov); 1661 #endif 1662 EXPECT_EQ(0, ret) {} 1663 1664 #if defined(__x86_64__) || defined(__i386__) || defined(__powerpc__) || \ 1665 defined(__s390__) || defined(__hppa__) 1666 { 1667 regs.SYSCALL_NUM = syscall; 1668 } 1669 #elif defined(__mips__) 1670 { 1671 if (regs.SYSCALL_NUM == __NR_O32_Linux) 1672 regs.SYSCALL_SYSCALL_NUM = syscall; 1673 else 1674 regs.SYSCALL_NUM = syscall; 1675 } 1676 1677 #elif defined(__arm__) 1678 # ifndef PTRACE_SET_SYSCALL 1679 # define PTRACE_SET_SYSCALL 23 1680 # endif 1681 { 1682 ret = ptrace(PTRACE_SET_SYSCALL, tracee, NULL, syscall); 1683 EXPECT_EQ(0, ret); 1684 } 1685 1686 #elif defined(__aarch64__) 1687 # ifndef NT_ARM_SYSTEM_CALL 1688 # define NT_ARM_SYSTEM_CALL 0x404 1689 # endif 1690 { 1691 iov.iov_base = &syscall; 1692 iov.iov_len = sizeof(syscall); 1693 ret = ptrace(PTRACE_SETREGSET, tracee, NT_ARM_SYSTEM_CALL, 1694 &iov); 1695 EXPECT_EQ(0, ret); 1696 } 1697 1698 #else 1699 ASSERT_EQ(1, 0) { 1700 TH_LOG("How is the syscall changed on this architecture?"); 1701 } 1702 #endif 1703 1704 /* If syscall is skipped, change return value. */ 1705 if (syscall == -1) 1706 #ifdef SYSCALL_NUM_RET_SHARE_REG 1707 TH_LOG("Can't modify syscall return on this architecture"); 1708 #else 1709 regs.SYSCALL_RET = EPERM; 1710 #endif 1711 1712 #ifdef HAVE_GETREGS 1713 ret = ptrace(PTRACE_SETREGS, tracee, 0, ®s); 1714 #else 1715 iov.iov_base = ®s; 1716 iov.iov_len = sizeof(regs); 1717 ret = ptrace(PTRACE_SETREGSET, tracee, NT_PRSTATUS, &iov); 1718 #endif 1719 EXPECT_EQ(0, ret); 1720 } 1721 1722 void tracer_syscall(struct __test_metadata *_metadata, pid_t tracee, 1723 int status, void *args) 1724 { 1725 int ret; 1726 unsigned long msg; 1727 1728 /* Make sure we got the right message. */ 1729 ret = ptrace(PTRACE_GETEVENTMSG, tracee, NULL, &msg); 1730 EXPECT_EQ(0, ret); 1731 1732 /* Validate and take action on expected syscalls. */ 1733 switch (msg) { 1734 case 0x1002: 1735 /* change getpid to getppid. */ 1736 EXPECT_EQ(__NR_getpid, get_syscall(_metadata, tracee)); 1737 change_syscall(_metadata, tracee, __NR_getppid); 1738 break; 1739 case 0x1003: 1740 /* skip gettid. */ 1741 EXPECT_EQ(__NR_gettid, get_syscall(_metadata, tracee)); 1742 change_syscall(_metadata, tracee, -1); 1743 break; 1744 case 0x1004: 1745 /* do nothing (allow getppid) */ 1746 EXPECT_EQ(__NR_getppid, get_syscall(_metadata, tracee)); 1747 break; 1748 default: 1749 EXPECT_EQ(0, msg) { 1750 TH_LOG("Unknown PTRACE_GETEVENTMSG: 0x%lx", msg); 1751 kill(tracee, SIGKILL); 1752 } 1753 } 1754 1755 } 1756 1757 void tracer_ptrace(struct __test_metadata *_metadata, pid_t tracee, 1758 int status, void *args) 1759 { 1760 int ret, nr; 1761 unsigned long msg; 1762 static bool entry; 1763 1764 /* Make sure we got an empty message. */ 1765 ret = ptrace(PTRACE_GETEVENTMSG, tracee, NULL, &msg); 1766 EXPECT_EQ(0, ret); 1767 EXPECT_EQ(0, msg); 1768 1769 /* The only way to tell PTRACE_SYSCALL entry/exit is by counting. */ 1770 entry = !entry; 1771 if (!entry) 1772 return; 1773 1774 nr = get_syscall(_metadata, tracee); 1775 1776 if (nr == __NR_getpid) 1777 change_syscall(_metadata, tracee, __NR_getppid); 1778 if (nr == __NR_openat) 1779 change_syscall(_metadata, tracee, -1); 1780 } 1781 1782 FIXTURE_DATA(TRACE_syscall) { 1783 struct sock_fprog prog; 1784 pid_t tracer, mytid, mypid, parent; 1785 }; 1786 1787 FIXTURE_SETUP(TRACE_syscall) 1788 { 1789 struct sock_filter filter[] = { 1790 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, 1791 offsetof(struct seccomp_data, nr)), 1792 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 0, 1), 1793 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1002), 1794 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_gettid, 0, 1), 1795 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1003), 1796 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getppid, 0, 1), 1797 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1004), 1798 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 1799 }; 1800 1801 memset(&self->prog, 0, sizeof(self->prog)); 1802 self->prog.filter = malloc(sizeof(filter)); 1803 ASSERT_NE(NULL, self->prog.filter); 1804 memcpy(self->prog.filter, filter, sizeof(filter)); 1805 self->prog.len = (unsigned short)ARRAY_SIZE(filter); 1806 1807 /* Prepare some testable syscall results. */ 1808 self->mytid = syscall(__NR_gettid); 1809 ASSERT_GT(self->mytid, 0); 1810 ASSERT_NE(self->mytid, 1) { 1811 TH_LOG("Running this test as init is not supported. :)"); 1812 } 1813 1814 self->mypid = getpid(); 1815 ASSERT_GT(self->mypid, 0); 1816 ASSERT_EQ(self->mytid, self->mypid); 1817 1818 self->parent = getppid(); 1819 ASSERT_GT(self->parent, 0); 1820 ASSERT_NE(self->parent, self->mypid); 1821 1822 /* Launch tracer. */ 1823 self->tracer = setup_trace_fixture(_metadata, tracer_syscall, NULL, 1824 false); 1825 } 1826 1827 FIXTURE_TEARDOWN(TRACE_syscall) 1828 { 1829 teardown_trace_fixture(_metadata, self->tracer); 1830 if (self->prog.filter) 1831 free(self->prog.filter); 1832 } 1833 1834 TEST_F(TRACE_syscall, ptrace_syscall_redirected) 1835 { 1836 /* Swap SECCOMP_RET_TRACE tracer for PTRACE_SYSCALL tracer. */ 1837 teardown_trace_fixture(_metadata, self->tracer); 1838 self->tracer = setup_trace_fixture(_metadata, tracer_ptrace, NULL, 1839 true); 1840 1841 /* Tracer will redirect getpid to getppid. */ 1842 EXPECT_NE(self->mypid, syscall(__NR_getpid)); 1843 } 1844 1845 TEST_F(TRACE_syscall, ptrace_syscall_dropped) 1846 { 1847 /* Swap SECCOMP_RET_TRACE tracer for PTRACE_SYSCALL tracer. */ 1848 teardown_trace_fixture(_metadata, self->tracer); 1849 self->tracer = setup_trace_fixture(_metadata, tracer_ptrace, NULL, 1850 true); 1851 1852 /* Tracer should skip the open syscall, resulting in EPERM. */ 1853 EXPECT_SYSCALL_RETURN(EPERM, syscall(__NR_openat)); 1854 } 1855 1856 TEST_F(TRACE_syscall, syscall_allowed) 1857 { 1858 long ret; 1859 1860 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 1861 ASSERT_EQ(0, ret); 1862 1863 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0); 1864 ASSERT_EQ(0, ret); 1865 1866 /* getppid works as expected (no changes). */ 1867 EXPECT_EQ(self->parent, syscall(__NR_getppid)); 1868 EXPECT_NE(self->mypid, syscall(__NR_getppid)); 1869 } 1870 1871 TEST_F(TRACE_syscall, syscall_redirected) 1872 { 1873 long ret; 1874 1875 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 1876 ASSERT_EQ(0, ret); 1877 1878 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0); 1879 ASSERT_EQ(0, ret); 1880 1881 /* getpid has been redirected to getppid as expected. */ 1882 EXPECT_EQ(self->parent, syscall(__NR_getpid)); 1883 EXPECT_NE(self->mypid, syscall(__NR_getpid)); 1884 } 1885 1886 TEST_F(TRACE_syscall, syscall_dropped) 1887 { 1888 long ret; 1889 1890 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 1891 ASSERT_EQ(0, ret); 1892 1893 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0); 1894 ASSERT_EQ(0, ret); 1895 1896 /* gettid has been skipped and an altered return value stored. */ 1897 EXPECT_SYSCALL_RETURN(EPERM, syscall(__NR_gettid)); 1898 EXPECT_NE(self->mytid, syscall(__NR_gettid)); 1899 } 1900 1901 TEST_F(TRACE_syscall, skip_after_RET_TRACE) 1902 { 1903 struct sock_filter filter[] = { 1904 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, 1905 offsetof(struct seccomp_data, nr)), 1906 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getppid, 0, 1), 1907 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ERRNO | EPERM), 1908 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 1909 }; 1910 struct sock_fprog prog = { 1911 .len = (unsigned short)ARRAY_SIZE(filter), 1912 .filter = filter, 1913 }; 1914 long ret; 1915 1916 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 1917 ASSERT_EQ(0, ret); 1918 1919 /* Install fixture filter. */ 1920 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0); 1921 ASSERT_EQ(0, ret); 1922 1923 /* Install "errno on getppid" filter. */ 1924 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0); 1925 ASSERT_EQ(0, ret); 1926 1927 /* Tracer will redirect getpid to getppid, and we should see EPERM. */ 1928 errno = 0; 1929 EXPECT_EQ(-1, syscall(__NR_getpid)); 1930 EXPECT_EQ(EPERM, errno); 1931 } 1932 1933 TEST_F_SIGNAL(TRACE_syscall, kill_after_RET_TRACE, SIGSYS) 1934 { 1935 struct sock_filter filter[] = { 1936 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, 1937 offsetof(struct seccomp_data, nr)), 1938 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getppid, 0, 1), 1939 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL), 1940 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 1941 }; 1942 struct sock_fprog prog = { 1943 .len = (unsigned short)ARRAY_SIZE(filter), 1944 .filter = filter, 1945 }; 1946 long ret; 1947 1948 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 1949 ASSERT_EQ(0, ret); 1950 1951 /* Install fixture filter. */ 1952 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0); 1953 ASSERT_EQ(0, ret); 1954 1955 /* Install "death on getppid" filter. */ 1956 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0); 1957 ASSERT_EQ(0, ret); 1958 1959 /* Tracer will redirect getpid to getppid, and we should die. */ 1960 EXPECT_NE(self->mypid, syscall(__NR_getpid)); 1961 } 1962 1963 TEST_F(TRACE_syscall, skip_after_ptrace) 1964 { 1965 struct sock_filter filter[] = { 1966 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, 1967 offsetof(struct seccomp_data, nr)), 1968 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getppid, 0, 1), 1969 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ERRNO | EPERM), 1970 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 1971 }; 1972 struct sock_fprog prog = { 1973 .len = (unsigned short)ARRAY_SIZE(filter), 1974 .filter = filter, 1975 }; 1976 long ret; 1977 1978 /* Swap SECCOMP_RET_TRACE tracer for PTRACE_SYSCALL tracer. */ 1979 teardown_trace_fixture(_metadata, self->tracer); 1980 self->tracer = setup_trace_fixture(_metadata, tracer_ptrace, NULL, 1981 true); 1982 1983 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 1984 ASSERT_EQ(0, ret); 1985 1986 /* Install "errno on getppid" filter. */ 1987 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0); 1988 ASSERT_EQ(0, ret); 1989 1990 /* Tracer will redirect getpid to getppid, and we should see EPERM. */ 1991 EXPECT_EQ(-1, syscall(__NR_getpid)); 1992 EXPECT_EQ(EPERM, errno); 1993 } 1994 1995 TEST_F_SIGNAL(TRACE_syscall, kill_after_ptrace, SIGSYS) 1996 { 1997 struct sock_filter filter[] = { 1998 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, 1999 offsetof(struct seccomp_data, nr)), 2000 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getppid, 0, 1), 2001 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL), 2002 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 2003 }; 2004 struct sock_fprog prog = { 2005 .len = (unsigned short)ARRAY_SIZE(filter), 2006 .filter = filter, 2007 }; 2008 long ret; 2009 2010 /* Swap SECCOMP_RET_TRACE tracer for PTRACE_SYSCALL tracer. */ 2011 teardown_trace_fixture(_metadata, self->tracer); 2012 self->tracer = setup_trace_fixture(_metadata, tracer_ptrace, NULL, 2013 true); 2014 2015 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 2016 ASSERT_EQ(0, ret); 2017 2018 /* Install "death on getppid" filter. */ 2019 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0); 2020 ASSERT_EQ(0, ret); 2021 2022 /* Tracer will redirect getpid to getppid, and we should die. */ 2023 EXPECT_NE(self->mypid, syscall(__NR_getpid)); 2024 } 2025 2026 TEST(seccomp_syscall) 2027 { 2028 struct sock_filter filter[] = { 2029 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 2030 }; 2031 struct sock_fprog prog = { 2032 .len = (unsigned short)ARRAY_SIZE(filter), 2033 .filter = filter, 2034 }; 2035 long ret; 2036 2037 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 2038 ASSERT_EQ(0, ret) { 2039 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 2040 } 2041 2042 /* Reject insane operation. */ 2043 ret = seccomp(-1, 0, &prog); 2044 ASSERT_NE(ENOSYS, errno) { 2045 TH_LOG("Kernel does not support seccomp syscall!"); 2046 } 2047 EXPECT_EQ(EINVAL, errno) { 2048 TH_LOG("Did not reject crazy op value!"); 2049 } 2050 2051 /* Reject strict with flags or pointer. */ 2052 ret = seccomp(SECCOMP_SET_MODE_STRICT, -1, NULL); 2053 EXPECT_EQ(EINVAL, errno) { 2054 TH_LOG("Did not reject mode strict with flags!"); 2055 } 2056 ret = seccomp(SECCOMP_SET_MODE_STRICT, 0, &prog); 2057 EXPECT_EQ(EINVAL, errno) { 2058 TH_LOG("Did not reject mode strict with uargs!"); 2059 } 2060 2061 /* Reject insane args for filter. */ 2062 ret = seccomp(SECCOMP_SET_MODE_FILTER, -1, &prog); 2063 EXPECT_EQ(EINVAL, errno) { 2064 TH_LOG("Did not reject crazy filter flags!"); 2065 } 2066 ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, NULL); 2067 EXPECT_EQ(EFAULT, errno) { 2068 TH_LOG("Did not reject NULL filter!"); 2069 } 2070 2071 ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog); 2072 EXPECT_EQ(0, errno) { 2073 TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER: %s", 2074 strerror(errno)); 2075 } 2076 } 2077 2078 TEST(seccomp_syscall_mode_lock) 2079 { 2080 struct sock_filter filter[] = { 2081 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 2082 }; 2083 struct sock_fprog prog = { 2084 .len = (unsigned short)ARRAY_SIZE(filter), 2085 .filter = filter, 2086 }; 2087 long ret; 2088 2089 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, NULL, 0, 0); 2090 ASSERT_EQ(0, ret) { 2091 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 2092 } 2093 2094 ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog); 2095 ASSERT_NE(ENOSYS, errno) { 2096 TH_LOG("Kernel does not support seccomp syscall!"); 2097 } 2098 EXPECT_EQ(0, ret) { 2099 TH_LOG("Could not install filter!"); 2100 } 2101 2102 /* Make sure neither entry point will switch to strict. */ 2103 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_STRICT, 0, 0, 0); 2104 EXPECT_EQ(EINVAL, errno) { 2105 TH_LOG("Switched to mode strict!"); 2106 } 2107 2108 ret = seccomp(SECCOMP_SET_MODE_STRICT, 0, NULL); 2109 EXPECT_EQ(EINVAL, errno) { 2110 TH_LOG("Switched to mode strict!"); 2111 } 2112 } 2113 2114 /* 2115 * Test detection of known and unknown filter flags. Userspace needs to be able 2116 * to check if a filter flag is supported by the current kernel and a good way 2117 * of doing that is by attempting to enter filter mode, with the flag bit in 2118 * question set, and a NULL pointer for the _args_ parameter. EFAULT indicates 2119 * that the flag is valid and EINVAL indicates that the flag is invalid. 2120 */ 2121 TEST(detect_seccomp_filter_flags) 2122 { 2123 unsigned int flags[] = { SECCOMP_FILTER_FLAG_TSYNC, 2124 SECCOMP_FILTER_FLAG_LOG, 2125 SECCOMP_FILTER_FLAG_SPEC_ALLOW, 2126 SECCOMP_FILTER_FLAG_NEW_LISTENER }; 2127 unsigned int flag, all_flags; 2128 int i; 2129 long ret; 2130 2131 /* Test detection of known-good filter flags */ 2132 for (i = 0, all_flags = 0; i < ARRAY_SIZE(flags); i++) { 2133 int bits = 0; 2134 2135 flag = flags[i]; 2136 /* Make sure the flag is a single bit! */ 2137 while (flag) { 2138 if (flag & 0x1) 2139 bits ++; 2140 flag >>= 1; 2141 } 2142 ASSERT_EQ(1, bits); 2143 flag = flags[i]; 2144 2145 ret = seccomp(SECCOMP_SET_MODE_FILTER, flag, NULL); 2146 ASSERT_NE(ENOSYS, errno) { 2147 TH_LOG("Kernel does not support seccomp syscall!"); 2148 } 2149 EXPECT_EQ(-1, ret); 2150 EXPECT_EQ(EFAULT, errno) { 2151 TH_LOG("Failed to detect that a known-good filter flag (0x%X) is supported!", 2152 flag); 2153 } 2154 2155 all_flags |= flag; 2156 } 2157 2158 /* Test detection of all known-good filter flags */ 2159 ret = seccomp(SECCOMP_SET_MODE_FILTER, all_flags, NULL); 2160 EXPECT_EQ(-1, ret); 2161 EXPECT_EQ(EFAULT, errno) { 2162 TH_LOG("Failed to detect that all known-good filter flags (0x%X) are supported!", 2163 all_flags); 2164 } 2165 2166 /* Test detection of an unknown filter flag */ 2167 flag = -1; 2168 ret = seccomp(SECCOMP_SET_MODE_FILTER, flag, NULL); 2169 EXPECT_EQ(-1, ret); 2170 EXPECT_EQ(EINVAL, errno) { 2171 TH_LOG("Failed to detect that an unknown filter flag (0x%X) is unsupported!", 2172 flag); 2173 } 2174 2175 /* 2176 * Test detection of an unknown filter flag that may simply need to be 2177 * added to this test 2178 */ 2179 flag = flags[ARRAY_SIZE(flags) - 1] << 1; 2180 ret = seccomp(SECCOMP_SET_MODE_FILTER, flag, NULL); 2181 EXPECT_EQ(-1, ret); 2182 EXPECT_EQ(EINVAL, errno) { 2183 TH_LOG("Failed to detect that an unknown filter flag (0x%X) is unsupported! Does a new flag need to be added to this test?", 2184 flag); 2185 } 2186 } 2187 2188 TEST(TSYNC_first) 2189 { 2190 struct sock_filter filter[] = { 2191 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 2192 }; 2193 struct sock_fprog prog = { 2194 .len = (unsigned short)ARRAY_SIZE(filter), 2195 .filter = filter, 2196 }; 2197 long ret; 2198 2199 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, NULL, 0, 0); 2200 ASSERT_EQ(0, ret) { 2201 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 2202 } 2203 2204 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC, 2205 &prog); 2206 ASSERT_NE(ENOSYS, errno) { 2207 TH_LOG("Kernel does not support seccomp syscall!"); 2208 } 2209 EXPECT_EQ(0, ret) { 2210 TH_LOG("Could not install initial filter with TSYNC!"); 2211 } 2212 } 2213 2214 #define TSYNC_SIBLINGS 2 2215 struct tsync_sibling { 2216 pthread_t tid; 2217 pid_t system_tid; 2218 sem_t *started; 2219 pthread_cond_t *cond; 2220 pthread_mutex_t *mutex; 2221 int diverge; 2222 int num_waits; 2223 struct sock_fprog *prog; 2224 struct __test_metadata *metadata; 2225 }; 2226 2227 /* 2228 * To avoid joining joined threads (which is not allowed by Bionic), 2229 * make sure we both successfully join and clear the tid to skip a 2230 * later join attempt during fixture teardown. Any remaining threads 2231 * will be directly killed during teardown. 2232 */ 2233 #define PTHREAD_JOIN(tid, status) \ 2234 do { \ 2235 int _rc = pthread_join(tid, status); \ 2236 if (_rc) { \ 2237 TH_LOG("pthread_join of tid %u failed: %d\n", \ 2238 (unsigned int)tid, _rc); \ 2239 } else { \ 2240 tid = 0; \ 2241 } \ 2242 } while (0) 2243 2244 FIXTURE_DATA(TSYNC) { 2245 struct sock_fprog root_prog, apply_prog; 2246 struct tsync_sibling sibling[TSYNC_SIBLINGS]; 2247 sem_t started; 2248 pthread_cond_t cond; 2249 pthread_mutex_t mutex; 2250 int sibling_count; 2251 }; 2252 2253 FIXTURE_SETUP(TSYNC) 2254 { 2255 struct sock_filter root_filter[] = { 2256 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 2257 }; 2258 struct sock_filter apply_filter[] = { 2259 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, 2260 offsetof(struct seccomp_data, nr)), 2261 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_read, 0, 1), 2262 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL), 2263 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 2264 }; 2265 2266 memset(&self->root_prog, 0, sizeof(self->root_prog)); 2267 memset(&self->apply_prog, 0, sizeof(self->apply_prog)); 2268 memset(&self->sibling, 0, sizeof(self->sibling)); 2269 self->root_prog.filter = malloc(sizeof(root_filter)); 2270 ASSERT_NE(NULL, self->root_prog.filter); 2271 memcpy(self->root_prog.filter, &root_filter, sizeof(root_filter)); 2272 self->root_prog.len = (unsigned short)ARRAY_SIZE(root_filter); 2273 2274 self->apply_prog.filter = malloc(sizeof(apply_filter)); 2275 ASSERT_NE(NULL, self->apply_prog.filter); 2276 memcpy(self->apply_prog.filter, &apply_filter, sizeof(apply_filter)); 2277 self->apply_prog.len = (unsigned short)ARRAY_SIZE(apply_filter); 2278 2279 self->sibling_count = 0; 2280 pthread_mutex_init(&self->mutex, NULL); 2281 pthread_cond_init(&self->cond, NULL); 2282 sem_init(&self->started, 0, 0); 2283 self->sibling[0].tid = 0; 2284 self->sibling[0].cond = &self->cond; 2285 self->sibling[0].started = &self->started; 2286 self->sibling[0].mutex = &self->mutex; 2287 self->sibling[0].diverge = 0; 2288 self->sibling[0].num_waits = 1; 2289 self->sibling[0].prog = &self->root_prog; 2290 self->sibling[0].metadata = _metadata; 2291 self->sibling[1].tid = 0; 2292 self->sibling[1].cond = &self->cond; 2293 self->sibling[1].started = &self->started; 2294 self->sibling[1].mutex = &self->mutex; 2295 self->sibling[1].diverge = 0; 2296 self->sibling[1].prog = &self->root_prog; 2297 self->sibling[1].num_waits = 1; 2298 self->sibling[1].metadata = _metadata; 2299 } 2300 2301 FIXTURE_TEARDOWN(TSYNC) 2302 { 2303 int sib = 0; 2304 2305 if (self->root_prog.filter) 2306 free(self->root_prog.filter); 2307 if (self->apply_prog.filter) 2308 free(self->apply_prog.filter); 2309 2310 for ( ; sib < self->sibling_count; ++sib) { 2311 struct tsync_sibling *s = &self->sibling[sib]; 2312 2313 if (!s->tid) 2314 continue; 2315 /* 2316 * If a thread is still running, it may be stuck, so hit 2317 * it over the head really hard. 2318 */ 2319 pthread_kill(s->tid, 9); 2320 } 2321 pthread_mutex_destroy(&self->mutex); 2322 pthread_cond_destroy(&self->cond); 2323 sem_destroy(&self->started); 2324 } 2325 2326 void *tsync_sibling(void *data) 2327 { 2328 long ret = 0; 2329 struct tsync_sibling *me = data; 2330 2331 me->system_tid = syscall(__NR_gettid); 2332 2333 pthread_mutex_lock(me->mutex); 2334 if (me->diverge) { 2335 /* Just re-apply the root prog to fork the tree */ 2336 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, 2337 me->prog, 0, 0); 2338 } 2339 sem_post(me->started); 2340 /* Return outside of started so parent notices failures. */ 2341 if (ret) { 2342 pthread_mutex_unlock(me->mutex); 2343 return (void *)SIBLING_EXIT_FAILURE; 2344 } 2345 do { 2346 pthread_cond_wait(me->cond, me->mutex); 2347 me->num_waits = me->num_waits - 1; 2348 } while (me->num_waits); 2349 pthread_mutex_unlock(me->mutex); 2350 2351 ret = prctl(PR_GET_NO_NEW_PRIVS, 0, 0, 0, 0); 2352 if (!ret) 2353 return (void *)SIBLING_EXIT_NEWPRIVS; 2354 read(0, NULL, 0); 2355 return (void *)SIBLING_EXIT_UNKILLED; 2356 } 2357 2358 void tsync_start_sibling(struct tsync_sibling *sibling) 2359 { 2360 pthread_create(&sibling->tid, NULL, tsync_sibling, (void *)sibling); 2361 } 2362 2363 TEST_F(TSYNC, siblings_fail_prctl) 2364 { 2365 long ret; 2366 void *status; 2367 struct sock_filter filter[] = { 2368 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, 2369 offsetof(struct seccomp_data, nr)), 2370 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_prctl, 0, 1), 2371 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ERRNO | EINVAL), 2372 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 2373 }; 2374 struct sock_fprog prog = { 2375 .len = (unsigned short)ARRAY_SIZE(filter), 2376 .filter = filter, 2377 }; 2378 2379 ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) { 2380 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 2381 } 2382 2383 /* Check prctl failure detection by requesting sib 0 diverge. */ 2384 ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog); 2385 ASSERT_NE(ENOSYS, errno) { 2386 TH_LOG("Kernel does not support seccomp syscall!"); 2387 } 2388 ASSERT_EQ(0, ret) { 2389 TH_LOG("setting filter failed"); 2390 } 2391 2392 self->sibling[0].diverge = 1; 2393 tsync_start_sibling(&self->sibling[0]); 2394 tsync_start_sibling(&self->sibling[1]); 2395 2396 while (self->sibling_count < TSYNC_SIBLINGS) { 2397 sem_wait(&self->started); 2398 self->sibling_count++; 2399 } 2400 2401 /* Signal the threads to clean up*/ 2402 pthread_mutex_lock(&self->mutex); 2403 ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) { 2404 TH_LOG("cond broadcast non-zero"); 2405 } 2406 pthread_mutex_unlock(&self->mutex); 2407 2408 /* Ensure diverging sibling failed to call prctl. */ 2409 PTHREAD_JOIN(self->sibling[0].tid, &status); 2410 EXPECT_EQ(SIBLING_EXIT_FAILURE, (long)status); 2411 PTHREAD_JOIN(self->sibling[1].tid, &status); 2412 EXPECT_EQ(SIBLING_EXIT_UNKILLED, (long)status); 2413 } 2414 2415 TEST_F(TSYNC, two_siblings_with_ancestor) 2416 { 2417 long ret; 2418 void *status; 2419 2420 ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) { 2421 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 2422 } 2423 2424 ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &self->root_prog); 2425 ASSERT_NE(ENOSYS, errno) { 2426 TH_LOG("Kernel does not support seccomp syscall!"); 2427 } 2428 ASSERT_EQ(0, ret) { 2429 TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER!"); 2430 } 2431 tsync_start_sibling(&self->sibling[0]); 2432 tsync_start_sibling(&self->sibling[1]); 2433 2434 while (self->sibling_count < TSYNC_SIBLINGS) { 2435 sem_wait(&self->started); 2436 self->sibling_count++; 2437 } 2438 2439 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC, 2440 &self->apply_prog); 2441 ASSERT_EQ(0, ret) { 2442 TH_LOG("Could install filter on all threads!"); 2443 } 2444 /* Tell the siblings to test the policy */ 2445 pthread_mutex_lock(&self->mutex); 2446 ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) { 2447 TH_LOG("cond broadcast non-zero"); 2448 } 2449 pthread_mutex_unlock(&self->mutex); 2450 /* Ensure they are both killed and don't exit cleanly. */ 2451 PTHREAD_JOIN(self->sibling[0].tid, &status); 2452 EXPECT_EQ(0x0, (long)status); 2453 PTHREAD_JOIN(self->sibling[1].tid, &status); 2454 EXPECT_EQ(0x0, (long)status); 2455 } 2456 2457 TEST_F(TSYNC, two_sibling_want_nnp) 2458 { 2459 void *status; 2460 2461 /* start siblings before any prctl() operations */ 2462 tsync_start_sibling(&self->sibling[0]); 2463 tsync_start_sibling(&self->sibling[1]); 2464 while (self->sibling_count < TSYNC_SIBLINGS) { 2465 sem_wait(&self->started); 2466 self->sibling_count++; 2467 } 2468 2469 /* Tell the siblings to test no policy */ 2470 pthread_mutex_lock(&self->mutex); 2471 ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) { 2472 TH_LOG("cond broadcast non-zero"); 2473 } 2474 pthread_mutex_unlock(&self->mutex); 2475 2476 /* Ensure they are both upset about lacking nnp. */ 2477 PTHREAD_JOIN(self->sibling[0].tid, &status); 2478 EXPECT_EQ(SIBLING_EXIT_NEWPRIVS, (long)status); 2479 PTHREAD_JOIN(self->sibling[1].tid, &status); 2480 EXPECT_EQ(SIBLING_EXIT_NEWPRIVS, (long)status); 2481 } 2482 2483 TEST_F(TSYNC, two_siblings_with_no_filter) 2484 { 2485 long ret; 2486 void *status; 2487 2488 /* start siblings before any prctl() operations */ 2489 tsync_start_sibling(&self->sibling[0]); 2490 tsync_start_sibling(&self->sibling[1]); 2491 while (self->sibling_count < TSYNC_SIBLINGS) { 2492 sem_wait(&self->started); 2493 self->sibling_count++; 2494 } 2495 2496 ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) { 2497 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 2498 } 2499 2500 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC, 2501 &self->apply_prog); 2502 ASSERT_NE(ENOSYS, errno) { 2503 TH_LOG("Kernel does not support seccomp syscall!"); 2504 } 2505 ASSERT_EQ(0, ret) { 2506 TH_LOG("Could install filter on all threads!"); 2507 } 2508 2509 /* Tell the siblings to test the policy */ 2510 pthread_mutex_lock(&self->mutex); 2511 ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) { 2512 TH_LOG("cond broadcast non-zero"); 2513 } 2514 pthread_mutex_unlock(&self->mutex); 2515 2516 /* Ensure they are both killed and don't exit cleanly. */ 2517 PTHREAD_JOIN(self->sibling[0].tid, &status); 2518 EXPECT_EQ(0x0, (long)status); 2519 PTHREAD_JOIN(self->sibling[1].tid, &status); 2520 EXPECT_EQ(0x0, (long)status); 2521 } 2522 2523 TEST_F(TSYNC, two_siblings_with_one_divergence) 2524 { 2525 long ret; 2526 void *status; 2527 2528 ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) { 2529 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 2530 } 2531 2532 ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &self->root_prog); 2533 ASSERT_NE(ENOSYS, errno) { 2534 TH_LOG("Kernel does not support seccomp syscall!"); 2535 } 2536 ASSERT_EQ(0, ret) { 2537 TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER!"); 2538 } 2539 self->sibling[0].diverge = 1; 2540 tsync_start_sibling(&self->sibling[0]); 2541 tsync_start_sibling(&self->sibling[1]); 2542 2543 while (self->sibling_count < TSYNC_SIBLINGS) { 2544 sem_wait(&self->started); 2545 self->sibling_count++; 2546 } 2547 2548 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC, 2549 &self->apply_prog); 2550 ASSERT_EQ(self->sibling[0].system_tid, ret) { 2551 TH_LOG("Did not fail on diverged sibling."); 2552 } 2553 2554 /* Wake the threads */ 2555 pthread_mutex_lock(&self->mutex); 2556 ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) { 2557 TH_LOG("cond broadcast non-zero"); 2558 } 2559 pthread_mutex_unlock(&self->mutex); 2560 2561 /* Ensure they are both unkilled. */ 2562 PTHREAD_JOIN(self->sibling[0].tid, &status); 2563 EXPECT_EQ(SIBLING_EXIT_UNKILLED, (long)status); 2564 PTHREAD_JOIN(self->sibling[1].tid, &status); 2565 EXPECT_EQ(SIBLING_EXIT_UNKILLED, (long)status); 2566 } 2567 2568 TEST_F(TSYNC, two_siblings_not_under_filter) 2569 { 2570 long ret, sib; 2571 void *status; 2572 2573 ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) { 2574 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 2575 } 2576 2577 /* 2578 * Sibling 0 will have its own seccomp policy 2579 * and Sibling 1 will not be under seccomp at 2580 * all. Sibling 1 will enter seccomp and 0 2581 * will cause failure. 2582 */ 2583 self->sibling[0].diverge = 1; 2584 tsync_start_sibling(&self->sibling[0]); 2585 tsync_start_sibling(&self->sibling[1]); 2586 2587 while (self->sibling_count < TSYNC_SIBLINGS) { 2588 sem_wait(&self->started); 2589 self->sibling_count++; 2590 } 2591 2592 ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &self->root_prog); 2593 ASSERT_NE(ENOSYS, errno) { 2594 TH_LOG("Kernel does not support seccomp syscall!"); 2595 } 2596 ASSERT_EQ(0, ret) { 2597 TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER!"); 2598 } 2599 2600 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC, 2601 &self->apply_prog); 2602 ASSERT_EQ(ret, self->sibling[0].system_tid) { 2603 TH_LOG("Did not fail on diverged sibling."); 2604 } 2605 sib = 1; 2606 if (ret == self->sibling[0].system_tid) 2607 sib = 0; 2608 2609 pthread_mutex_lock(&self->mutex); 2610 2611 /* Increment the other siblings num_waits so we can clean up 2612 * the one we just saw. 2613 */ 2614 self->sibling[!sib].num_waits += 1; 2615 2616 /* Signal the thread to clean up*/ 2617 ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) { 2618 TH_LOG("cond broadcast non-zero"); 2619 } 2620 pthread_mutex_unlock(&self->mutex); 2621 PTHREAD_JOIN(self->sibling[sib].tid, &status); 2622 EXPECT_EQ(SIBLING_EXIT_UNKILLED, (long)status); 2623 /* Poll for actual task death. pthread_join doesn't guarantee it. */ 2624 while (!kill(self->sibling[sib].system_tid, 0)) 2625 sleep(0.1); 2626 /* Switch to the remaining sibling */ 2627 sib = !sib; 2628 2629 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC, 2630 &self->apply_prog); 2631 ASSERT_EQ(0, ret) { 2632 TH_LOG("Expected the remaining sibling to sync"); 2633 }; 2634 2635 pthread_mutex_lock(&self->mutex); 2636 2637 /* If remaining sibling didn't have a chance to wake up during 2638 * the first broadcast, manually reduce the num_waits now. 2639 */ 2640 if (self->sibling[sib].num_waits > 1) 2641 self->sibling[sib].num_waits = 1; 2642 ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) { 2643 TH_LOG("cond broadcast non-zero"); 2644 } 2645 pthread_mutex_unlock(&self->mutex); 2646 PTHREAD_JOIN(self->sibling[sib].tid, &status); 2647 EXPECT_EQ(0, (long)status); 2648 /* Poll for actual task death. pthread_join doesn't guarantee it. */ 2649 while (!kill(self->sibling[sib].system_tid, 0)) 2650 sleep(0.1); 2651 2652 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC, 2653 &self->apply_prog); 2654 ASSERT_EQ(0, ret); /* just us chickens */ 2655 } 2656 2657 /* Make sure restarted syscalls are seen directly as "restart_syscall". */ 2658 TEST(syscall_restart) 2659 { 2660 long ret; 2661 unsigned long msg; 2662 pid_t child_pid; 2663 int pipefd[2]; 2664 int status; 2665 siginfo_t info = { }; 2666 struct sock_filter filter[] = { 2667 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, 2668 offsetof(struct seccomp_data, nr)), 2669 2670 #ifdef __NR_sigreturn 2671 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_sigreturn, 6, 0), 2672 #endif 2673 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_read, 5, 0), 2674 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_exit, 4, 0), 2675 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_rt_sigreturn, 3, 0), 2676 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_nanosleep, 4, 0), 2677 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_restart_syscall, 4, 0), 2678 2679 /* Allow __NR_write for easy logging. */ 2680 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_write, 0, 1), 2681 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 2682 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL), 2683 /* The nanosleep jump target. */ 2684 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE|0x100), 2685 /* The restart_syscall jump target. */ 2686 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE|0x200), 2687 }; 2688 struct sock_fprog prog = { 2689 .len = (unsigned short)ARRAY_SIZE(filter), 2690 .filter = filter, 2691 }; 2692 #if defined(__arm__) 2693 struct utsname utsbuf; 2694 #endif 2695 2696 ASSERT_EQ(0, pipe(pipefd)); 2697 2698 child_pid = fork(); 2699 ASSERT_LE(0, child_pid); 2700 if (child_pid == 0) { 2701 /* Child uses EXPECT not ASSERT to deliver status correctly. */ 2702 char buf = ' '; 2703 struct timespec timeout = { }; 2704 2705 /* Attach parent as tracer and stop. */ 2706 EXPECT_EQ(0, ptrace(PTRACE_TRACEME)); 2707 EXPECT_EQ(0, raise(SIGSTOP)); 2708 2709 EXPECT_EQ(0, close(pipefd[1])); 2710 2711 EXPECT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) { 2712 TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); 2713 } 2714 2715 ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0); 2716 EXPECT_EQ(0, ret) { 2717 TH_LOG("Failed to install filter!"); 2718 } 2719 2720 EXPECT_EQ(1, read(pipefd[0], &buf, 1)) { 2721 TH_LOG("Failed to read() sync from parent"); 2722 } 2723 EXPECT_EQ('.', buf) { 2724 TH_LOG("Failed to get sync data from read()"); 2725 } 2726 2727 /* Start nanosleep to be interrupted. */ 2728 timeout.tv_sec = 1; 2729 errno = 0; 2730 EXPECT_EQ(0, nanosleep(&timeout, NULL)) { 2731 TH_LOG("Call to nanosleep() failed (errno %d)", errno); 2732 } 2733 2734 /* Read final sync from parent. */ 2735 EXPECT_EQ(1, read(pipefd[0], &buf, 1)) { 2736 TH_LOG("Failed final read() from parent"); 2737 } 2738 EXPECT_EQ('!', buf) { 2739 TH_LOG("Failed to get final data from read()"); 2740 } 2741 2742 /* Directly report the status of our test harness results. */ 2743 syscall(__NR_exit, _metadata->passed ? EXIT_SUCCESS 2744 : EXIT_FAILURE); 2745 } 2746 EXPECT_EQ(0, close(pipefd[0])); 2747 2748 /* Attach to child, setup options, and release. */ 2749 ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0)); 2750 ASSERT_EQ(true, WIFSTOPPED(status)); 2751 ASSERT_EQ(0, ptrace(PTRACE_SETOPTIONS, child_pid, NULL, 2752 PTRACE_O_TRACESECCOMP)); 2753 ASSERT_EQ(0, ptrace(PTRACE_CONT, child_pid, NULL, 0)); 2754 ASSERT_EQ(1, write(pipefd[1], ".", 1)); 2755 2756 /* Wait for nanosleep() to start. */ 2757 ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0)); 2758 ASSERT_EQ(true, WIFSTOPPED(status)); 2759 ASSERT_EQ(SIGTRAP, WSTOPSIG(status)); 2760 ASSERT_EQ(PTRACE_EVENT_SECCOMP, (status >> 16)); 2761 ASSERT_EQ(0, ptrace(PTRACE_GETEVENTMSG, child_pid, NULL, &msg)); 2762 ASSERT_EQ(0x100, msg); 2763 EXPECT_EQ(__NR_nanosleep, get_syscall(_metadata, child_pid)); 2764 2765 /* Might as well check siginfo for sanity while we're here. */ 2766 ASSERT_EQ(0, ptrace(PTRACE_GETSIGINFO, child_pid, NULL, &info)); 2767 ASSERT_EQ(SIGTRAP, info.si_signo); 2768 ASSERT_EQ(SIGTRAP | (PTRACE_EVENT_SECCOMP << 8), info.si_code); 2769 EXPECT_EQ(0, info.si_errno); 2770 EXPECT_EQ(getuid(), info.si_uid); 2771 /* Verify signal delivery came from child (seccomp-triggered). */ 2772 EXPECT_EQ(child_pid, info.si_pid); 2773 2774 /* Interrupt nanosleep with SIGSTOP (which we'll need to handle). */ 2775 ASSERT_EQ(0, kill(child_pid, SIGSTOP)); 2776 ASSERT_EQ(0, ptrace(PTRACE_CONT, child_pid, NULL, 0)); 2777 ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0)); 2778 ASSERT_EQ(true, WIFSTOPPED(status)); 2779 ASSERT_EQ(SIGSTOP, WSTOPSIG(status)); 2780 ASSERT_EQ(0, ptrace(PTRACE_GETSIGINFO, child_pid, NULL, &info)); 2781 /* 2782 * There is no siginfo on SIGSTOP any more, so we can't verify 2783 * signal delivery came from parent now (getpid() == info.si_pid). 2784 * https://lkml.kernel.org/r/CAGXu5jJaZAOzP1qFz66tYrtbuywqb+UN2SOA1VLHpCCOiYvYeg@mail.gmail.com 2785 * At least verify the SIGSTOP via PTRACE_GETSIGINFO. 2786 */ 2787 EXPECT_EQ(SIGSTOP, info.si_signo); 2788 2789 /* Restart nanosleep with SIGCONT, which triggers restart_syscall. */ 2790 ASSERT_EQ(0, kill(child_pid, SIGCONT)); 2791 ASSERT_EQ(0, ptrace(PTRACE_CONT, child_pid, NULL, 0)); 2792 ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0)); 2793 ASSERT_EQ(true, WIFSTOPPED(status)); 2794 ASSERT_EQ(SIGCONT, WSTOPSIG(status)); 2795 ASSERT_EQ(0, ptrace(PTRACE_CONT, child_pid, NULL, 0)); 2796 2797 /* Wait for restart_syscall() to start. */ 2798 ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0)); 2799 ASSERT_EQ(true, WIFSTOPPED(status)); 2800 ASSERT_EQ(SIGTRAP, WSTOPSIG(status)); 2801 ASSERT_EQ(PTRACE_EVENT_SECCOMP, (status >> 16)); 2802 ASSERT_EQ(0, ptrace(PTRACE_GETEVENTMSG, child_pid, NULL, &msg)); 2803 2804 ASSERT_EQ(0x200, msg); 2805 ret = get_syscall(_metadata, child_pid); 2806 #if defined(__arm__) 2807 /* 2808 * FIXME: 2809 * - native ARM registers do NOT expose true syscall. 2810 * - compat ARM registers on ARM64 DO expose true syscall. 2811 */ 2812 ASSERT_EQ(0, uname(&utsbuf)); 2813 if (strncmp(utsbuf.machine, "arm", 3) == 0) { 2814 EXPECT_EQ(__NR_nanosleep, ret); 2815 } else 2816 #endif 2817 { 2818 EXPECT_EQ(__NR_restart_syscall, ret); 2819 } 2820 2821 /* Write again to end test. */ 2822 ASSERT_EQ(0, ptrace(PTRACE_CONT, child_pid, NULL, 0)); 2823 ASSERT_EQ(1, write(pipefd[1], "!", 1)); 2824 EXPECT_EQ(0, close(pipefd[1])); 2825 2826 ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0)); 2827 if (WIFSIGNALED(status) || WEXITSTATUS(status)) 2828 _metadata->passed = 0; 2829 } 2830 2831 TEST_SIGNAL(filter_flag_log, SIGSYS) 2832 { 2833 struct sock_filter allow_filter[] = { 2834 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 2835 }; 2836 struct sock_filter kill_filter[] = { 2837 BPF_STMT(BPF_LD|BPF_W|BPF_ABS, 2838 offsetof(struct seccomp_data, nr)), 2839 BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 0, 1), 2840 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL), 2841 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 2842 }; 2843 struct sock_fprog allow_prog = { 2844 .len = (unsigned short)ARRAY_SIZE(allow_filter), 2845 .filter = allow_filter, 2846 }; 2847 struct sock_fprog kill_prog = { 2848 .len = (unsigned short)ARRAY_SIZE(kill_filter), 2849 .filter = kill_filter, 2850 }; 2851 long ret; 2852 pid_t parent = getppid(); 2853 2854 ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); 2855 ASSERT_EQ(0, ret); 2856 2857 /* Verify that the FILTER_FLAG_LOG flag isn't accepted in strict mode */ 2858 ret = seccomp(SECCOMP_SET_MODE_STRICT, SECCOMP_FILTER_FLAG_LOG, 2859 &allow_prog); 2860 ASSERT_NE(ENOSYS, errno) { 2861 TH_LOG("Kernel does not support seccomp syscall!"); 2862 } 2863 EXPECT_NE(0, ret) { 2864 TH_LOG("Kernel accepted FILTER_FLAG_LOG flag in strict mode!"); 2865 } 2866 EXPECT_EQ(EINVAL, errno) { 2867 TH_LOG("Kernel returned unexpected errno for FILTER_FLAG_LOG flag in strict mode!"); 2868 } 2869 2870 /* Verify that a simple, permissive filter can be added with no flags */ 2871 ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &allow_prog); 2872 EXPECT_EQ(0, ret); 2873 2874 /* See if the same filter can be added with the FILTER_FLAG_LOG flag */ 2875 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_LOG, 2876 &allow_prog); 2877 ASSERT_NE(EINVAL, errno) { 2878 TH_LOG("Kernel does not support the FILTER_FLAG_LOG flag!"); 2879 } 2880 EXPECT_EQ(0, ret); 2881 2882 /* Ensure that the kill filter works with the FILTER_FLAG_LOG flag */ 2883 ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_LOG, 2884 &kill_prog); 2885 EXPECT_EQ(0, ret); 2886 2887 EXPECT_EQ(parent, syscall(__NR_getppid)); 2888 /* getpid() should never return. */ 2889 EXPECT_EQ(0, syscall(__NR_getpid)); 2890 } 2891 2892 TEST(get_action_avail) 2893 { 2894 __u32 actions[] = { SECCOMP_RET_KILL_THREAD, SECCOMP_RET_TRAP, 2895 SECCOMP_RET_ERRNO, SECCOMP_RET_TRACE, 2896 SECCOMP_RET_LOG, SECCOMP_RET_ALLOW }; 2897 __u32 unknown_action = 0x10000000U; 2898 int i; 2899 long ret; 2900 2901 ret = seccomp(SECCOMP_GET_ACTION_AVAIL, 0, &actions[0]); 2902 ASSERT_NE(ENOSYS, errno) { 2903 TH_LOG("Kernel does not support seccomp syscall!"); 2904 } 2905 ASSERT_NE(EINVAL, errno) { 2906 TH_LOG("Kernel does not support SECCOMP_GET_ACTION_AVAIL operation!"); 2907 } 2908 EXPECT_EQ(ret, 0); 2909 2910 for (i = 0; i < ARRAY_SIZE(actions); i++) { 2911 ret = seccomp(SECCOMP_GET_ACTION_AVAIL, 0, &actions[i]); 2912 EXPECT_EQ(ret, 0) { 2913 TH_LOG("Expected action (0x%X) not available!", 2914 actions[i]); 2915 } 2916 } 2917 2918 /* Check that an unknown action is handled properly (EOPNOTSUPP) */ 2919 ret = seccomp(SECCOMP_GET_ACTION_AVAIL, 0, &unknown_action); 2920 EXPECT_EQ(ret, -1); 2921 EXPECT_EQ(errno, EOPNOTSUPP); 2922 } 2923 2924 TEST(get_metadata) 2925 { 2926 pid_t pid; 2927 int pipefd[2]; 2928 char buf; 2929 struct seccomp_metadata md; 2930 long ret; 2931 2932 ASSERT_EQ(0, pipe(pipefd)); 2933 2934 pid = fork(); 2935 ASSERT_GE(pid, 0); 2936 if (pid == 0) { 2937 struct sock_filter filter[] = { 2938 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 2939 }; 2940 struct sock_fprog prog = { 2941 .len = (unsigned short)ARRAY_SIZE(filter), 2942 .filter = filter, 2943 }; 2944 2945 /* one with log, one without */ 2946 ASSERT_EQ(0, seccomp(SECCOMP_SET_MODE_FILTER, 2947 SECCOMP_FILTER_FLAG_LOG, &prog)); 2948 ASSERT_EQ(0, seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog)); 2949 2950 ASSERT_EQ(0, close(pipefd[0])); 2951 ASSERT_EQ(1, write(pipefd[1], "1", 1)); 2952 ASSERT_EQ(0, close(pipefd[1])); 2953 2954 while (1) 2955 sleep(100); 2956 } 2957 2958 ASSERT_EQ(0, close(pipefd[1])); 2959 ASSERT_EQ(1, read(pipefd[0], &buf, 1)); 2960 2961 ASSERT_EQ(0, ptrace(PTRACE_ATTACH, pid)); 2962 ASSERT_EQ(pid, waitpid(pid, NULL, 0)); 2963 2964 /* Past here must not use ASSERT or child process is never killed. */ 2965 2966 md.filter_off = 0; 2967 errno = 0; 2968 ret = ptrace(PTRACE_SECCOMP_GET_METADATA, pid, sizeof(md), &md); 2969 EXPECT_EQ(sizeof(md), ret) { 2970 if (errno == EINVAL) 2971 XFAIL(goto skip, "Kernel does not support PTRACE_SECCOMP_GET_METADATA (missing CONFIG_CHECKPOINT_RESTORE?)"); 2972 } 2973 2974 EXPECT_EQ(md.flags, SECCOMP_FILTER_FLAG_LOG); 2975 EXPECT_EQ(md.filter_off, 0); 2976 2977 md.filter_off = 1; 2978 ret = ptrace(PTRACE_SECCOMP_GET_METADATA, pid, sizeof(md), &md); 2979 EXPECT_EQ(sizeof(md), ret); 2980 EXPECT_EQ(md.flags, 0); 2981 EXPECT_EQ(md.filter_off, 1); 2982 2983 skip: 2984 ASSERT_EQ(0, kill(pid, SIGKILL)); 2985 } 2986 2987 static int user_trap_syscall(int nr, unsigned int flags) 2988 { 2989 struct sock_filter filter[] = { 2990 BPF_STMT(BPF_LD+BPF_W+BPF_ABS, 2991 offsetof(struct seccomp_data, nr)), 2992 BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, nr, 0, 1), 2993 BPF_STMT(BPF_RET+BPF_K, SECCOMP_RET_USER_NOTIF), 2994 BPF_STMT(BPF_RET+BPF_K, SECCOMP_RET_ALLOW), 2995 }; 2996 2997 struct sock_fprog prog = { 2998 .len = (unsigned short)ARRAY_SIZE(filter), 2999 .filter = filter, 3000 }; 3001 3002 return seccomp(SECCOMP_SET_MODE_FILTER, flags, &prog); 3003 } 3004 3005 #define USER_NOTIF_MAGIC 116983961184613L 3006 TEST(user_notification_basic) 3007 { 3008 pid_t pid; 3009 long ret; 3010 int status, listener; 3011 struct seccomp_notif req = {}; 3012 struct seccomp_notif_resp resp = {}; 3013 struct pollfd pollfd; 3014 3015 struct sock_filter filter[] = { 3016 BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), 3017 }; 3018 struct sock_fprog prog = { 3019 .len = (unsigned short)ARRAY_SIZE(filter), 3020 .filter = filter, 3021 }; 3022 3023 pid = fork(); 3024 ASSERT_GE(pid, 0); 3025 3026 /* Check that we get -ENOSYS with no listener attached */ 3027 if (pid == 0) { 3028 if (user_trap_syscall(__NR_getpid, 0) < 0) 3029 exit(1); 3030 ret = syscall(__NR_getpid); 3031 exit(ret >= 0 || errno != ENOSYS); 3032 } 3033 3034 EXPECT_EQ(waitpid(pid, &status, 0), pid); 3035 EXPECT_EQ(true, WIFEXITED(status)); 3036 EXPECT_EQ(0, WEXITSTATUS(status)); 3037 3038 /* Add some no-op filters so for grins. */ 3039 EXPECT_EQ(seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog), 0); 3040 EXPECT_EQ(seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog), 0); 3041 EXPECT_EQ(seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog), 0); 3042 EXPECT_EQ(seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog), 0); 3043 3044 /* Check that the basic notification machinery works */ 3045 listener = user_trap_syscall(__NR_getpid, 3046 SECCOMP_FILTER_FLAG_NEW_LISTENER); 3047 EXPECT_GE(listener, 0); 3048 3049 /* Installing a second listener in the chain should EBUSY */ 3050 EXPECT_EQ(user_trap_syscall(__NR_getpid, 3051 SECCOMP_FILTER_FLAG_NEW_LISTENER), 3052 -1); 3053 EXPECT_EQ(errno, EBUSY); 3054 3055 pid = fork(); 3056 ASSERT_GE(pid, 0); 3057 3058 if (pid == 0) { 3059 ret = syscall(__NR_getpid); 3060 exit(ret != USER_NOTIF_MAGIC); 3061 } 3062 3063 pollfd.fd = listener; 3064 pollfd.events = POLLIN | POLLOUT; 3065 3066 EXPECT_GT(poll(&pollfd, 1, -1), 0); 3067 EXPECT_EQ(pollfd.revents, POLLIN); 3068 3069 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0); 3070 3071 pollfd.fd = listener; 3072 pollfd.events = POLLIN | POLLOUT; 3073 3074 EXPECT_GT(poll(&pollfd, 1, -1), 0); 3075 EXPECT_EQ(pollfd.revents, POLLOUT); 3076 3077 EXPECT_EQ(req.data.nr, __NR_getpid); 3078 3079 resp.id = req.id; 3080 resp.error = 0; 3081 resp.val = USER_NOTIF_MAGIC; 3082 3083 /* check that we make sure flags == 0 */ 3084 resp.flags = 1; 3085 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), -1); 3086 EXPECT_EQ(errno, EINVAL); 3087 3088 resp.flags = 0; 3089 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), 0); 3090 3091 EXPECT_EQ(waitpid(pid, &status, 0), pid); 3092 EXPECT_EQ(true, WIFEXITED(status)); 3093 EXPECT_EQ(0, WEXITSTATUS(status)); 3094 } 3095 3096 TEST(user_notification_kill_in_middle) 3097 { 3098 pid_t pid; 3099 long ret; 3100 int listener; 3101 struct seccomp_notif req = {}; 3102 struct seccomp_notif_resp resp = {}; 3103 3104 listener = user_trap_syscall(__NR_getpid, 3105 SECCOMP_FILTER_FLAG_NEW_LISTENER); 3106 EXPECT_GE(listener, 0); 3107 3108 /* 3109 * Check that nothing bad happens when we kill the task in the middle 3110 * of a syscall. 3111 */ 3112 pid = fork(); 3113 ASSERT_GE(pid, 0); 3114 3115 if (pid == 0) { 3116 ret = syscall(__NR_getpid); 3117 exit(ret != USER_NOTIF_MAGIC); 3118 } 3119 3120 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0); 3121 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_ID_VALID, &req.id), 0); 3122 3123 EXPECT_EQ(kill(pid, SIGKILL), 0); 3124 EXPECT_EQ(waitpid(pid, NULL, 0), pid); 3125 3126 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_ID_VALID, &req.id), -1); 3127 3128 resp.id = req.id; 3129 ret = ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp); 3130 EXPECT_EQ(ret, -1); 3131 EXPECT_EQ(errno, ENOENT); 3132 } 3133 3134 static int handled = -1; 3135 3136 static void signal_handler(int signal) 3137 { 3138 if (write(handled, "c", 1) != 1) 3139 perror("write from signal"); 3140 } 3141 3142 TEST(user_notification_signal) 3143 { 3144 pid_t pid; 3145 long ret; 3146 int status, listener, sk_pair[2]; 3147 struct seccomp_notif req = {}; 3148 struct seccomp_notif_resp resp = {}; 3149 char c; 3150 3151 ASSERT_EQ(socketpair(PF_LOCAL, SOCK_SEQPACKET, 0, sk_pair), 0); 3152 3153 listener = user_trap_syscall(__NR_gettid, 3154 SECCOMP_FILTER_FLAG_NEW_LISTENER); 3155 EXPECT_GE(listener, 0); 3156 3157 pid = fork(); 3158 ASSERT_GE(pid, 0); 3159 3160 if (pid == 0) { 3161 close(sk_pair[0]); 3162 handled = sk_pair[1]; 3163 if (signal(SIGUSR1, signal_handler) == SIG_ERR) { 3164 perror("signal"); 3165 exit(1); 3166 } 3167 /* 3168 * ERESTARTSYS behavior is a bit hard to test, because we need 3169 * to rely on a signal that has not yet been handled. Let's at 3170 * least check that the error code gets propagated through, and 3171 * hope that it doesn't break when there is actually a signal :) 3172 */ 3173 ret = syscall(__NR_gettid); 3174 exit(!(ret == -1 && errno == 512)); 3175 } 3176 3177 close(sk_pair[1]); 3178 3179 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0); 3180 3181 EXPECT_EQ(kill(pid, SIGUSR1), 0); 3182 3183 /* 3184 * Make sure the signal really is delivered, which means we're not 3185 * stuck in the user notification code any more and the notification 3186 * should be dead. 3187 */ 3188 EXPECT_EQ(read(sk_pair[0], &c, 1), 1); 3189 3190 resp.id = req.id; 3191 resp.error = -EPERM; 3192 resp.val = 0; 3193 3194 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), -1); 3195 EXPECT_EQ(errno, ENOENT); 3196 3197 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0); 3198 3199 resp.id = req.id; 3200 resp.error = -512; /* -ERESTARTSYS */ 3201 resp.val = 0; 3202 3203 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), 0); 3204 3205 EXPECT_EQ(waitpid(pid, &status, 0), pid); 3206 EXPECT_EQ(true, WIFEXITED(status)); 3207 EXPECT_EQ(0, WEXITSTATUS(status)); 3208 } 3209 3210 TEST(user_notification_closed_listener) 3211 { 3212 pid_t pid; 3213 long ret; 3214 int status, listener; 3215 3216 listener = user_trap_syscall(__NR_getpid, 3217 SECCOMP_FILTER_FLAG_NEW_LISTENER); 3218 EXPECT_GE(listener, 0); 3219 3220 /* 3221 * Check that we get an ENOSYS when the listener is closed. 3222 */ 3223 pid = fork(); 3224 ASSERT_GE(pid, 0); 3225 if (pid == 0) { 3226 close(listener); 3227 ret = syscall(__NR_getpid); 3228 exit(ret != -1 && errno != ENOSYS); 3229 } 3230 3231 close(listener); 3232 3233 EXPECT_EQ(waitpid(pid, &status, 0), pid); 3234 EXPECT_EQ(true, WIFEXITED(status)); 3235 EXPECT_EQ(0, WEXITSTATUS(status)); 3236 } 3237 3238 /* 3239 * Check that a pid in a child namespace still shows up as valid in ours. 3240 */ 3241 TEST(user_notification_child_pid_ns) 3242 { 3243 pid_t pid; 3244 int status, listener; 3245 struct seccomp_notif req = {}; 3246 struct seccomp_notif_resp resp = {}; 3247 3248 ASSERT_EQ(unshare(CLONE_NEWPID), 0); 3249 3250 listener = user_trap_syscall(__NR_getpid, SECCOMP_FILTER_FLAG_NEW_LISTENER); 3251 ASSERT_GE(listener, 0); 3252 3253 pid = fork(); 3254 ASSERT_GE(pid, 0); 3255 3256 if (pid == 0) 3257 exit(syscall(__NR_getpid) != USER_NOTIF_MAGIC); 3258 3259 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0); 3260 EXPECT_EQ(req.pid, pid); 3261 3262 resp.id = req.id; 3263 resp.error = 0; 3264 resp.val = USER_NOTIF_MAGIC; 3265 3266 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), 0); 3267 3268 EXPECT_EQ(waitpid(pid, &status, 0), pid); 3269 EXPECT_EQ(true, WIFEXITED(status)); 3270 EXPECT_EQ(0, WEXITSTATUS(status)); 3271 close(listener); 3272 } 3273 3274 /* 3275 * Check that a pid in a sibling (i.e. unrelated) namespace shows up as 0, i.e. 3276 * invalid. 3277 */ 3278 TEST(user_notification_sibling_pid_ns) 3279 { 3280 pid_t pid, pid2; 3281 int status, listener; 3282 struct seccomp_notif req = {}; 3283 struct seccomp_notif_resp resp = {}; 3284 3285 listener = user_trap_syscall(__NR_getpid, SECCOMP_FILTER_FLAG_NEW_LISTENER); 3286 ASSERT_GE(listener, 0); 3287 3288 pid = fork(); 3289 ASSERT_GE(pid, 0); 3290 3291 if (pid == 0) { 3292 ASSERT_EQ(unshare(CLONE_NEWPID), 0); 3293 3294 pid2 = fork(); 3295 ASSERT_GE(pid2, 0); 3296 3297 if (pid2 == 0) 3298 exit(syscall(__NR_getpid) != USER_NOTIF_MAGIC); 3299 3300 EXPECT_EQ(waitpid(pid2, &status, 0), pid2); 3301 EXPECT_EQ(true, WIFEXITED(status)); 3302 EXPECT_EQ(0, WEXITSTATUS(status)); 3303 exit(WEXITSTATUS(status)); 3304 } 3305 3306 /* Create the sibling ns, and sibling in it. */ 3307 EXPECT_EQ(unshare(CLONE_NEWPID), 0); 3308 EXPECT_EQ(errno, 0); 3309 3310 pid2 = fork(); 3311 EXPECT_GE(pid2, 0); 3312 3313 if (pid2 == 0) { 3314 ASSERT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0); 3315 /* 3316 * The pid should be 0, i.e. the task is in some namespace that 3317 * we can't "see". 3318 */ 3319 ASSERT_EQ(req.pid, 0); 3320 3321 resp.id = req.id; 3322 resp.error = 0; 3323 resp.val = USER_NOTIF_MAGIC; 3324 3325 ASSERT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), 0); 3326 exit(0); 3327 } 3328 3329 close(listener); 3330 3331 EXPECT_EQ(waitpid(pid, &status, 0), pid); 3332 EXPECT_EQ(true, WIFEXITED(status)); 3333 EXPECT_EQ(0, WEXITSTATUS(status)); 3334 3335 EXPECT_EQ(waitpid(pid2, &status, 0), pid2); 3336 EXPECT_EQ(true, WIFEXITED(status)); 3337 EXPECT_EQ(0, WEXITSTATUS(status)); 3338 } 3339 3340 TEST(user_notification_fault_recv) 3341 { 3342 pid_t pid; 3343 int status, listener; 3344 struct seccomp_notif req = {}; 3345 struct seccomp_notif_resp resp = {}; 3346 3347 listener = user_trap_syscall(__NR_getpid, SECCOMP_FILTER_FLAG_NEW_LISTENER); 3348 ASSERT_GE(listener, 0); 3349 3350 pid = fork(); 3351 ASSERT_GE(pid, 0); 3352 3353 if (pid == 0) 3354 exit(syscall(__NR_getpid) != USER_NOTIF_MAGIC); 3355 3356 /* Do a bad recv() */ 3357 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, NULL), -1); 3358 EXPECT_EQ(errno, EFAULT); 3359 3360 /* We should still be able to receive this notification, though. */ 3361 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_RECV, &req), 0); 3362 EXPECT_EQ(req.pid, pid); 3363 3364 resp.id = req.id; 3365 resp.error = 0; 3366 resp.val = USER_NOTIF_MAGIC; 3367 3368 EXPECT_EQ(ioctl(listener, SECCOMP_IOCTL_NOTIF_SEND, &resp), 0); 3369 3370 EXPECT_EQ(waitpid(pid, &status, 0), pid); 3371 EXPECT_EQ(true, WIFEXITED(status)); 3372 EXPECT_EQ(0, WEXITSTATUS(status)); 3373 } 3374 3375 TEST(seccomp_get_notif_sizes) 3376 { 3377 struct seccomp_notif_sizes sizes; 3378 3379 EXPECT_EQ(seccomp(SECCOMP_GET_NOTIF_SIZES, 0, &sizes), 0); 3380 EXPECT_EQ(sizes.seccomp_notif, sizeof(struct seccomp_notif)); 3381 EXPECT_EQ(sizes.seccomp_notif_resp, sizeof(struct seccomp_notif_resp)); 3382 } 3383 3384 /* 3385 * TODO: 3386 * - add microbenchmarks 3387 * - expand NNP testing 3388 * - better arch-specific TRACE and TRAP handlers. 3389 * - endianness checking when appropriate 3390 * - 64-bit arg prodding 3391 * - arch value testing (x86 modes especially) 3392 * - verify that FILTER_FLAG_LOG filters generate log messages 3393 * - verify that RET_LOG generates log messages 3394 * - ... 3395 */ 3396 3397 TEST_HARNESS_MAIN 3398