1 /* 2 * linux/kernel/sys.c 3 * 4 * Copyright (C) 1991, 1992 Linus Torvalds 5 */ 6 7 #include <linux/export.h> 8 #include <linux/mm.h> 9 #include <linux/utsname.h> 10 #include <linux/mman.h> 11 #include <linux/reboot.h> 12 #include <linux/prctl.h> 13 #include <linux/highuid.h> 14 #include <linux/fs.h> 15 #include <linux/kmod.h> 16 #include <linux/perf_event.h> 17 #include <linux/resource.h> 18 #include <linux/kernel.h> 19 #include <linux/workqueue.h> 20 #include <linux/capability.h> 21 #include <linux/device.h> 22 #include <linux/key.h> 23 #include <linux/times.h> 24 #include <linux/posix-timers.h> 25 #include <linux/security.h> 26 #include <linux/dcookies.h> 27 #include <linux/suspend.h> 28 #include <linux/tty.h> 29 #include <linux/signal.h> 30 #include <linux/cn_proc.h> 31 #include <linux/getcpu.h> 32 #include <linux/task_io_accounting_ops.h> 33 #include <linux/seccomp.h> 34 #include <linux/cpu.h> 35 #include <linux/personality.h> 36 #include <linux/ptrace.h> 37 #include <linux/fs_struct.h> 38 #include <linux/file.h> 39 #include <linux/mount.h> 40 #include <linux/gfp.h> 41 #include <linux/syscore_ops.h> 42 #include <linux/version.h> 43 #include <linux/ctype.h> 44 45 #include <linux/compat.h> 46 #include <linux/syscalls.h> 47 #include <linux/kprobes.h> 48 #include <linux/user_namespace.h> 49 #include <linux/binfmts.h> 50 51 #include <linux/sched.h> 52 #include <linux/sched/autogroup.h> 53 #include <linux/sched/loadavg.h> 54 #include <linux/sched/stat.h> 55 #include <linux/sched/mm.h> 56 #include <linux/sched/coredump.h> 57 #include <linux/sched/task.h> 58 #include <linux/sched/cputime.h> 59 #include <linux/rcupdate.h> 60 #include <linux/uidgid.h> 61 #include <linux/cred.h> 62 63 #include <linux/kmsg_dump.h> 64 /* Move somewhere else to avoid recompiling? */ 65 #include <generated/utsrelease.h> 66 67 #include <linux/uaccess.h> 68 #include <asm/io.h> 69 #include <asm/unistd.h> 70 71 #ifndef SET_UNALIGN_CTL 72 # define SET_UNALIGN_CTL(a, b) (-EINVAL) 73 #endif 74 #ifndef GET_UNALIGN_CTL 75 # define GET_UNALIGN_CTL(a, b) (-EINVAL) 76 #endif 77 #ifndef SET_FPEMU_CTL 78 # define SET_FPEMU_CTL(a, b) (-EINVAL) 79 #endif 80 #ifndef GET_FPEMU_CTL 81 # define GET_FPEMU_CTL(a, b) (-EINVAL) 82 #endif 83 #ifndef SET_FPEXC_CTL 84 # define SET_FPEXC_CTL(a, b) (-EINVAL) 85 #endif 86 #ifndef GET_FPEXC_CTL 87 # define GET_FPEXC_CTL(a, b) (-EINVAL) 88 #endif 89 #ifndef GET_ENDIAN 90 # define GET_ENDIAN(a, b) (-EINVAL) 91 #endif 92 #ifndef SET_ENDIAN 93 # define SET_ENDIAN(a, b) (-EINVAL) 94 #endif 95 #ifndef GET_TSC_CTL 96 # define GET_TSC_CTL(a) (-EINVAL) 97 #endif 98 #ifndef SET_TSC_CTL 99 # define SET_TSC_CTL(a) (-EINVAL) 100 #endif 101 #ifndef MPX_ENABLE_MANAGEMENT 102 # define MPX_ENABLE_MANAGEMENT() (-EINVAL) 103 #endif 104 #ifndef MPX_DISABLE_MANAGEMENT 105 # define MPX_DISABLE_MANAGEMENT() (-EINVAL) 106 #endif 107 #ifndef GET_FP_MODE 108 # define GET_FP_MODE(a) (-EINVAL) 109 #endif 110 #ifndef SET_FP_MODE 111 # define SET_FP_MODE(a,b) (-EINVAL) 112 #endif 113 114 /* 115 * this is where the system-wide overflow UID and GID are defined, for 116 * architectures that now have 32-bit UID/GID but didn't in the past 117 */ 118 119 int overflowuid = DEFAULT_OVERFLOWUID; 120 int overflowgid = DEFAULT_OVERFLOWGID; 121 122 EXPORT_SYMBOL(overflowuid); 123 EXPORT_SYMBOL(overflowgid); 124 125 /* 126 * the same as above, but for filesystems which can only store a 16-bit 127 * UID and GID. as such, this is needed on all architectures 128 */ 129 130 int fs_overflowuid = DEFAULT_FS_OVERFLOWUID; 131 int fs_overflowgid = DEFAULT_FS_OVERFLOWUID; 132 133 EXPORT_SYMBOL(fs_overflowuid); 134 EXPORT_SYMBOL(fs_overflowgid); 135 136 /* 137 * Returns true if current's euid is same as p's uid or euid, 138 * or has CAP_SYS_NICE to p's user_ns. 139 * 140 * Called with rcu_read_lock, creds are safe 141 */ 142 static bool set_one_prio_perm(struct task_struct *p) 143 { 144 const struct cred *cred = current_cred(), *pcred = __task_cred(p); 145 146 if (uid_eq(pcred->uid, cred->euid) || 147 uid_eq(pcred->euid, cred->euid)) 148 return true; 149 if (ns_capable(pcred->user_ns, CAP_SYS_NICE)) 150 return true; 151 return false; 152 } 153 154 /* 155 * set the priority of a task 156 * - the caller must hold the RCU read lock 157 */ 158 static int set_one_prio(struct task_struct *p, int niceval, int error) 159 { 160 int no_nice; 161 162 if (!set_one_prio_perm(p)) { 163 error = -EPERM; 164 goto out; 165 } 166 if (niceval < task_nice(p) && !can_nice(p, niceval)) { 167 error = -EACCES; 168 goto out; 169 } 170 no_nice = security_task_setnice(p, niceval); 171 if (no_nice) { 172 error = no_nice; 173 goto out; 174 } 175 if (error == -ESRCH) 176 error = 0; 177 set_user_nice(p, niceval); 178 out: 179 return error; 180 } 181 182 SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval) 183 { 184 struct task_struct *g, *p; 185 struct user_struct *user; 186 const struct cred *cred = current_cred(); 187 int error = -EINVAL; 188 struct pid *pgrp; 189 kuid_t uid; 190 191 if (which > PRIO_USER || which < PRIO_PROCESS) 192 goto out; 193 194 /* normalize: avoid signed division (rounding problems) */ 195 error = -ESRCH; 196 if (niceval < MIN_NICE) 197 niceval = MIN_NICE; 198 if (niceval > MAX_NICE) 199 niceval = MAX_NICE; 200 201 rcu_read_lock(); 202 read_lock(&tasklist_lock); 203 switch (which) { 204 case PRIO_PROCESS: 205 if (who) 206 p = find_task_by_vpid(who); 207 else 208 p = current; 209 if (p) 210 error = set_one_prio(p, niceval, error); 211 break; 212 case PRIO_PGRP: 213 if (who) 214 pgrp = find_vpid(who); 215 else 216 pgrp = task_pgrp(current); 217 do_each_pid_thread(pgrp, PIDTYPE_PGID, p) { 218 error = set_one_prio(p, niceval, error); 219 } while_each_pid_thread(pgrp, PIDTYPE_PGID, p); 220 break; 221 case PRIO_USER: 222 uid = make_kuid(cred->user_ns, who); 223 user = cred->user; 224 if (!who) 225 uid = cred->uid; 226 else if (!uid_eq(uid, cred->uid)) { 227 user = find_user(uid); 228 if (!user) 229 goto out_unlock; /* No processes for this user */ 230 } 231 do_each_thread(g, p) { 232 if (uid_eq(task_uid(p), uid) && task_pid_vnr(p)) 233 error = set_one_prio(p, niceval, error); 234 } while_each_thread(g, p); 235 if (!uid_eq(uid, cred->uid)) 236 free_uid(user); /* For find_user() */ 237 break; 238 } 239 out_unlock: 240 read_unlock(&tasklist_lock); 241 rcu_read_unlock(); 242 out: 243 return error; 244 } 245 246 /* 247 * Ugh. To avoid negative return values, "getpriority()" will 248 * not return the normal nice-value, but a negated value that 249 * has been offset by 20 (ie it returns 40..1 instead of -20..19) 250 * to stay compatible. 251 */ 252 SYSCALL_DEFINE2(getpriority, int, which, int, who) 253 { 254 struct task_struct *g, *p; 255 struct user_struct *user; 256 const struct cred *cred = current_cred(); 257 long niceval, retval = -ESRCH; 258 struct pid *pgrp; 259 kuid_t uid; 260 261 if (which > PRIO_USER || which < PRIO_PROCESS) 262 return -EINVAL; 263 264 rcu_read_lock(); 265 read_lock(&tasklist_lock); 266 switch (which) { 267 case PRIO_PROCESS: 268 if (who) 269 p = find_task_by_vpid(who); 270 else 271 p = current; 272 if (p) { 273 niceval = nice_to_rlimit(task_nice(p)); 274 if (niceval > retval) 275 retval = niceval; 276 } 277 break; 278 case PRIO_PGRP: 279 if (who) 280 pgrp = find_vpid(who); 281 else 282 pgrp = task_pgrp(current); 283 do_each_pid_thread(pgrp, PIDTYPE_PGID, p) { 284 niceval = nice_to_rlimit(task_nice(p)); 285 if (niceval > retval) 286 retval = niceval; 287 } while_each_pid_thread(pgrp, PIDTYPE_PGID, p); 288 break; 289 case PRIO_USER: 290 uid = make_kuid(cred->user_ns, who); 291 user = cred->user; 292 if (!who) 293 uid = cred->uid; 294 else if (!uid_eq(uid, cred->uid)) { 295 user = find_user(uid); 296 if (!user) 297 goto out_unlock; /* No processes for this user */ 298 } 299 do_each_thread(g, p) { 300 if (uid_eq(task_uid(p), uid) && task_pid_vnr(p)) { 301 niceval = nice_to_rlimit(task_nice(p)); 302 if (niceval > retval) 303 retval = niceval; 304 } 305 } while_each_thread(g, p); 306 if (!uid_eq(uid, cred->uid)) 307 free_uid(user); /* for find_user() */ 308 break; 309 } 310 out_unlock: 311 read_unlock(&tasklist_lock); 312 rcu_read_unlock(); 313 314 return retval; 315 } 316 317 /* 318 * Unprivileged users may change the real gid to the effective gid 319 * or vice versa. (BSD-style) 320 * 321 * If you set the real gid at all, or set the effective gid to a value not 322 * equal to the real gid, then the saved gid is set to the new effective gid. 323 * 324 * This makes it possible for a setgid program to completely drop its 325 * privileges, which is often a useful assertion to make when you are doing 326 * a security audit over a program. 327 * 328 * The general idea is that a program which uses just setregid() will be 329 * 100% compatible with BSD. A program which uses just setgid() will be 330 * 100% compatible with POSIX with saved IDs. 331 * 332 * SMP: There are not races, the GIDs are checked only by filesystem 333 * operations (as far as semantic preservation is concerned). 334 */ 335 #ifdef CONFIG_MULTIUSER 336 SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid) 337 { 338 struct user_namespace *ns = current_user_ns(); 339 const struct cred *old; 340 struct cred *new; 341 int retval; 342 kgid_t krgid, kegid; 343 344 krgid = make_kgid(ns, rgid); 345 kegid = make_kgid(ns, egid); 346 347 if ((rgid != (gid_t) -1) && !gid_valid(krgid)) 348 return -EINVAL; 349 if ((egid != (gid_t) -1) && !gid_valid(kegid)) 350 return -EINVAL; 351 352 new = prepare_creds(); 353 if (!new) 354 return -ENOMEM; 355 old = current_cred(); 356 357 retval = -EPERM; 358 if (rgid != (gid_t) -1) { 359 if (gid_eq(old->gid, krgid) || 360 gid_eq(old->egid, krgid) || 361 ns_capable(old->user_ns, CAP_SETGID)) 362 new->gid = krgid; 363 else 364 goto error; 365 } 366 if (egid != (gid_t) -1) { 367 if (gid_eq(old->gid, kegid) || 368 gid_eq(old->egid, kegid) || 369 gid_eq(old->sgid, kegid) || 370 ns_capable(old->user_ns, CAP_SETGID)) 371 new->egid = kegid; 372 else 373 goto error; 374 } 375 376 if (rgid != (gid_t) -1 || 377 (egid != (gid_t) -1 && !gid_eq(kegid, old->gid))) 378 new->sgid = new->egid; 379 new->fsgid = new->egid; 380 381 return commit_creds(new); 382 383 error: 384 abort_creds(new); 385 return retval; 386 } 387 388 /* 389 * setgid() is implemented like SysV w/ SAVED_IDS 390 * 391 * SMP: Same implicit races as above. 392 */ 393 SYSCALL_DEFINE1(setgid, gid_t, gid) 394 { 395 struct user_namespace *ns = current_user_ns(); 396 const struct cred *old; 397 struct cred *new; 398 int retval; 399 kgid_t kgid; 400 401 kgid = make_kgid(ns, gid); 402 if (!gid_valid(kgid)) 403 return -EINVAL; 404 405 new = prepare_creds(); 406 if (!new) 407 return -ENOMEM; 408 old = current_cred(); 409 410 retval = -EPERM; 411 if (ns_capable(old->user_ns, CAP_SETGID)) 412 new->gid = new->egid = new->sgid = new->fsgid = kgid; 413 else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid)) 414 new->egid = new->fsgid = kgid; 415 else 416 goto error; 417 418 return commit_creds(new); 419 420 error: 421 abort_creds(new); 422 return retval; 423 } 424 425 /* 426 * change the user struct in a credentials set to match the new UID 427 */ 428 static int set_user(struct cred *new) 429 { 430 struct user_struct *new_user; 431 432 new_user = alloc_uid(new->uid); 433 if (!new_user) 434 return -EAGAIN; 435 436 /* 437 * We don't fail in case of NPROC limit excess here because too many 438 * poorly written programs don't check set*uid() return code, assuming 439 * it never fails if called by root. We may still enforce NPROC limit 440 * for programs doing set*uid()+execve() by harmlessly deferring the 441 * failure to the execve() stage. 442 */ 443 if (atomic_read(&new_user->processes) >= rlimit(RLIMIT_NPROC) && 444 new_user != INIT_USER) 445 current->flags |= PF_NPROC_EXCEEDED; 446 else 447 current->flags &= ~PF_NPROC_EXCEEDED; 448 449 free_uid(new->user); 450 new->user = new_user; 451 return 0; 452 } 453 454 /* 455 * Unprivileged users may change the real uid to the effective uid 456 * or vice versa. (BSD-style) 457 * 458 * If you set the real uid at all, or set the effective uid to a value not 459 * equal to the real uid, then the saved uid is set to the new effective uid. 460 * 461 * This makes it possible for a setuid program to completely drop its 462 * privileges, which is often a useful assertion to make when you are doing 463 * a security audit over a program. 464 * 465 * The general idea is that a program which uses just setreuid() will be 466 * 100% compatible with BSD. A program which uses just setuid() will be 467 * 100% compatible with POSIX with saved IDs. 468 */ 469 SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid) 470 { 471 struct user_namespace *ns = current_user_ns(); 472 const struct cred *old; 473 struct cred *new; 474 int retval; 475 kuid_t kruid, keuid; 476 477 kruid = make_kuid(ns, ruid); 478 keuid = make_kuid(ns, euid); 479 480 if ((ruid != (uid_t) -1) && !uid_valid(kruid)) 481 return -EINVAL; 482 if ((euid != (uid_t) -1) && !uid_valid(keuid)) 483 return -EINVAL; 484 485 new = prepare_creds(); 486 if (!new) 487 return -ENOMEM; 488 old = current_cred(); 489 490 retval = -EPERM; 491 if (ruid != (uid_t) -1) { 492 new->uid = kruid; 493 if (!uid_eq(old->uid, kruid) && 494 !uid_eq(old->euid, kruid) && 495 !ns_capable(old->user_ns, CAP_SETUID)) 496 goto error; 497 } 498 499 if (euid != (uid_t) -1) { 500 new->euid = keuid; 501 if (!uid_eq(old->uid, keuid) && 502 !uid_eq(old->euid, keuid) && 503 !uid_eq(old->suid, keuid) && 504 !ns_capable(old->user_ns, CAP_SETUID)) 505 goto error; 506 } 507 508 if (!uid_eq(new->uid, old->uid)) { 509 retval = set_user(new); 510 if (retval < 0) 511 goto error; 512 } 513 if (ruid != (uid_t) -1 || 514 (euid != (uid_t) -1 && !uid_eq(keuid, old->uid))) 515 new->suid = new->euid; 516 new->fsuid = new->euid; 517 518 retval = security_task_fix_setuid(new, old, LSM_SETID_RE); 519 if (retval < 0) 520 goto error; 521 522 return commit_creds(new); 523 524 error: 525 abort_creds(new); 526 return retval; 527 } 528 529 /* 530 * setuid() is implemented like SysV with SAVED_IDS 531 * 532 * Note that SAVED_ID's is deficient in that a setuid root program 533 * like sendmail, for example, cannot set its uid to be a normal 534 * user and then switch back, because if you're root, setuid() sets 535 * the saved uid too. If you don't like this, blame the bright people 536 * in the POSIX committee and/or USG. Note that the BSD-style setreuid() 537 * will allow a root program to temporarily drop privileges and be able to 538 * regain them by swapping the real and effective uid. 539 */ 540 SYSCALL_DEFINE1(setuid, uid_t, uid) 541 { 542 struct user_namespace *ns = current_user_ns(); 543 const struct cred *old; 544 struct cred *new; 545 int retval; 546 kuid_t kuid; 547 548 kuid = make_kuid(ns, uid); 549 if (!uid_valid(kuid)) 550 return -EINVAL; 551 552 new = prepare_creds(); 553 if (!new) 554 return -ENOMEM; 555 old = current_cred(); 556 557 retval = -EPERM; 558 if (ns_capable(old->user_ns, CAP_SETUID)) { 559 new->suid = new->uid = kuid; 560 if (!uid_eq(kuid, old->uid)) { 561 retval = set_user(new); 562 if (retval < 0) 563 goto error; 564 } 565 } else if (!uid_eq(kuid, old->uid) && !uid_eq(kuid, new->suid)) { 566 goto error; 567 } 568 569 new->fsuid = new->euid = kuid; 570 571 retval = security_task_fix_setuid(new, old, LSM_SETID_ID); 572 if (retval < 0) 573 goto error; 574 575 return commit_creds(new); 576 577 error: 578 abort_creds(new); 579 return retval; 580 } 581 582 583 /* 584 * This function implements a generic ability to update ruid, euid, 585 * and suid. This allows you to implement the 4.4 compatible seteuid(). 586 */ 587 SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid) 588 { 589 struct user_namespace *ns = current_user_ns(); 590 const struct cred *old; 591 struct cred *new; 592 int retval; 593 kuid_t kruid, keuid, ksuid; 594 595 kruid = make_kuid(ns, ruid); 596 keuid = make_kuid(ns, euid); 597 ksuid = make_kuid(ns, suid); 598 599 if ((ruid != (uid_t) -1) && !uid_valid(kruid)) 600 return -EINVAL; 601 602 if ((euid != (uid_t) -1) && !uid_valid(keuid)) 603 return -EINVAL; 604 605 if ((suid != (uid_t) -1) && !uid_valid(ksuid)) 606 return -EINVAL; 607 608 new = prepare_creds(); 609 if (!new) 610 return -ENOMEM; 611 612 old = current_cred(); 613 614 retval = -EPERM; 615 if (!ns_capable(old->user_ns, CAP_SETUID)) { 616 if (ruid != (uid_t) -1 && !uid_eq(kruid, old->uid) && 617 !uid_eq(kruid, old->euid) && !uid_eq(kruid, old->suid)) 618 goto error; 619 if (euid != (uid_t) -1 && !uid_eq(keuid, old->uid) && 620 !uid_eq(keuid, old->euid) && !uid_eq(keuid, old->suid)) 621 goto error; 622 if (suid != (uid_t) -1 && !uid_eq(ksuid, old->uid) && 623 !uid_eq(ksuid, old->euid) && !uid_eq(ksuid, old->suid)) 624 goto error; 625 } 626 627 if (ruid != (uid_t) -1) { 628 new->uid = kruid; 629 if (!uid_eq(kruid, old->uid)) { 630 retval = set_user(new); 631 if (retval < 0) 632 goto error; 633 } 634 } 635 if (euid != (uid_t) -1) 636 new->euid = keuid; 637 if (suid != (uid_t) -1) 638 new->suid = ksuid; 639 new->fsuid = new->euid; 640 641 retval = security_task_fix_setuid(new, old, LSM_SETID_RES); 642 if (retval < 0) 643 goto error; 644 645 return commit_creds(new); 646 647 error: 648 abort_creds(new); 649 return retval; 650 } 651 652 SYSCALL_DEFINE3(getresuid, uid_t __user *, ruidp, uid_t __user *, euidp, uid_t __user *, suidp) 653 { 654 const struct cred *cred = current_cred(); 655 int retval; 656 uid_t ruid, euid, suid; 657 658 ruid = from_kuid_munged(cred->user_ns, cred->uid); 659 euid = from_kuid_munged(cred->user_ns, cred->euid); 660 suid = from_kuid_munged(cred->user_ns, cred->suid); 661 662 retval = put_user(ruid, ruidp); 663 if (!retval) { 664 retval = put_user(euid, euidp); 665 if (!retval) 666 return put_user(suid, suidp); 667 } 668 return retval; 669 } 670 671 /* 672 * Same as above, but for rgid, egid, sgid. 673 */ 674 SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid) 675 { 676 struct user_namespace *ns = current_user_ns(); 677 const struct cred *old; 678 struct cred *new; 679 int retval; 680 kgid_t krgid, kegid, ksgid; 681 682 krgid = make_kgid(ns, rgid); 683 kegid = make_kgid(ns, egid); 684 ksgid = make_kgid(ns, sgid); 685 686 if ((rgid != (gid_t) -1) && !gid_valid(krgid)) 687 return -EINVAL; 688 if ((egid != (gid_t) -1) && !gid_valid(kegid)) 689 return -EINVAL; 690 if ((sgid != (gid_t) -1) && !gid_valid(ksgid)) 691 return -EINVAL; 692 693 new = prepare_creds(); 694 if (!new) 695 return -ENOMEM; 696 old = current_cred(); 697 698 retval = -EPERM; 699 if (!ns_capable(old->user_ns, CAP_SETGID)) { 700 if (rgid != (gid_t) -1 && !gid_eq(krgid, old->gid) && 701 !gid_eq(krgid, old->egid) && !gid_eq(krgid, old->sgid)) 702 goto error; 703 if (egid != (gid_t) -1 && !gid_eq(kegid, old->gid) && 704 !gid_eq(kegid, old->egid) && !gid_eq(kegid, old->sgid)) 705 goto error; 706 if (sgid != (gid_t) -1 && !gid_eq(ksgid, old->gid) && 707 !gid_eq(ksgid, old->egid) && !gid_eq(ksgid, old->sgid)) 708 goto error; 709 } 710 711 if (rgid != (gid_t) -1) 712 new->gid = krgid; 713 if (egid != (gid_t) -1) 714 new->egid = kegid; 715 if (sgid != (gid_t) -1) 716 new->sgid = ksgid; 717 new->fsgid = new->egid; 718 719 return commit_creds(new); 720 721 error: 722 abort_creds(new); 723 return retval; 724 } 725 726 SYSCALL_DEFINE3(getresgid, gid_t __user *, rgidp, gid_t __user *, egidp, gid_t __user *, sgidp) 727 { 728 const struct cred *cred = current_cred(); 729 int retval; 730 gid_t rgid, egid, sgid; 731 732 rgid = from_kgid_munged(cred->user_ns, cred->gid); 733 egid = from_kgid_munged(cred->user_ns, cred->egid); 734 sgid = from_kgid_munged(cred->user_ns, cred->sgid); 735 736 retval = put_user(rgid, rgidp); 737 if (!retval) { 738 retval = put_user(egid, egidp); 739 if (!retval) 740 retval = put_user(sgid, sgidp); 741 } 742 743 return retval; 744 } 745 746 747 /* 748 * "setfsuid()" sets the fsuid - the uid used for filesystem checks. This 749 * is used for "access()" and for the NFS daemon (letting nfsd stay at 750 * whatever uid it wants to). It normally shadows "euid", except when 751 * explicitly set by setfsuid() or for access.. 752 */ 753 SYSCALL_DEFINE1(setfsuid, uid_t, uid) 754 { 755 const struct cred *old; 756 struct cred *new; 757 uid_t old_fsuid; 758 kuid_t kuid; 759 760 old = current_cred(); 761 old_fsuid = from_kuid_munged(old->user_ns, old->fsuid); 762 763 kuid = make_kuid(old->user_ns, uid); 764 if (!uid_valid(kuid)) 765 return old_fsuid; 766 767 new = prepare_creds(); 768 if (!new) 769 return old_fsuid; 770 771 if (uid_eq(kuid, old->uid) || uid_eq(kuid, old->euid) || 772 uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) || 773 ns_capable(old->user_ns, CAP_SETUID)) { 774 if (!uid_eq(kuid, old->fsuid)) { 775 new->fsuid = kuid; 776 if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0) 777 goto change_okay; 778 } 779 } 780 781 abort_creds(new); 782 return old_fsuid; 783 784 change_okay: 785 commit_creds(new); 786 return old_fsuid; 787 } 788 789 /* 790 * Samma på svenska.. 791 */ 792 SYSCALL_DEFINE1(setfsgid, gid_t, gid) 793 { 794 const struct cred *old; 795 struct cred *new; 796 gid_t old_fsgid; 797 kgid_t kgid; 798 799 old = current_cred(); 800 old_fsgid = from_kgid_munged(old->user_ns, old->fsgid); 801 802 kgid = make_kgid(old->user_ns, gid); 803 if (!gid_valid(kgid)) 804 return old_fsgid; 805 806 new = prepare_creds(); 807 if (!new) 808 return old_fsgid; 809 810 if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) || 811 gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) || 812 ns_capable(old->user_ns, CAP_SETGID)) { 813 if (!gid_eq(kgid, old->fsgid)) { 814 new->fsgid = kgid; 815 goto change_okay; 816 } 817 } 818 819 abort_creds(new); 820 return old_fsgid; 821 822 change_okay: 823 commit_creds(new); 824 return old_fsgid; 825 } 826 #endif /* CONFIG_MULTIUSER */ 827 828 /** 829 * sys_getpid - return the thread group id of the current process 830 * 831 * Note, despite the name, this returns the tgid not the pid. The tgid and 832 * the pid are identical unless CLONE_THREAD was specified on clone() in 833 * which case the tgid is the same in all threads of the same group. 834 * 835 * This is SMP safe as current->tgid does not change. 836 */ 837 SYSCALL_DEFINE0(getpid) 838 { 839 return task_tgid_vnr(current); 840 } 841 842 /* Thread ID - the internal kernel "pid" */ 843 SYSCALL_DEFINE0(gettid) 844 { 845 return task_pid_vnr(current); 846 } 847 848 /* 849 * Accessing ->real_parent is not SMP-safe, it could 850 * change from under us. However, we can use a stale 851 * value of ->real_parent under rcu_read_lock(), see 852 * release_task()->call_rcu(delayed_put_task_struct). 853 */ 854 SYSCALL_DEFINE0(getppid) 855 { 856 int pid; 857 858 rcu_read_lock(); 859 pid = task_tgid_vnr(rcu_dereference(current->real_parent)); 860 rcu_read_unlock(); 861 862 return pid; 863 } 864 865 SYSCALL_DEFINE0(getuid) 866 { 867 /* Only we change this so SMP safe */ 868 return from_kuid_munged(current_user_ns(), current_uid()); 869 } 870 871 SYSCALL_DEFINE0(geteuid) 872 { 873 /* Only we change this so SMP safe */ 874 return from_kuid_munged(current_user_ns(), current_euid()); 875 } 876 877 SYSCALL_DEFINE0(getgid) 878 { 879 /* Only we change this so SMP safe */ 880 return from_kgid_munged(current_user_ns(), current_gid()); 881 } 882 883 SYSCALL_DEFINE0(getegid) 884 { 885 /* Only we change this so SMP safe */ 886 return from_kgid_munged(current_user_ns(), current_egid()); 887 } 888 889 static void do_sys_times(struct tms *tms) 890 { 891 u64 tgutime, tgstime, cutime, cstime; 892 893 thread_group_cputime_adjusted(current, &tgutime, &tgstime); 894 cutime = current->signal->cutime; 895 cstime = current->signal->cstime; 896 tms->tms_utime = nsec_to_clock_t(tgutime); 897 tms->tms_stime = nsec_to_clock_t(tgstime); 898 tms->tms_cutime = nsec_to_clock_t(cutime); 899 tms->tms_cstime = nsec_to_clock_t(cstime); 900 } 901 902 SYSCALL_DEFINE1(times, struct tms __user *, tbuf) 903 { 904 if (tbuf) { 905 struct tms tmp; 906 907 do_sys_times(&tmp); 908 if (copy_to_user(tbuf, &tmp, sizeof(struct tms))) 909 return -EFAULT; 910 } 911 force_successful_syscall_return(); 912 return (long) jiffies_64_to_clock_t(get_jiffies_64()); 913 } 914 915 #ifdef CONFIG_COMPAT 916 static compat_clock_t clock_t_to_compat_clock_t(clock_t x) 917 { 918 return compat_jiffies_to_clock_t(clock_t_to_jiffies(x)); 919 } 920 921 COMPAT_SYSCALL_DEFINE1(times, struct compat_tms __user *, tbuf) 922 { 923 if (tbuf) { 924 struct tms tms; 925 struct compat_tms tmp; 926 927 do_sys_times(&tms); 928 /* Convert our struct tms to the compat version. */ 929 tmp.tms_utime = clock_t_to_compat_clock_t(tms.tms_utime); 930 tmp.tms_stime = clock_t_to_compat_clock_t(tms.tms_stime); 931 tmp.tms_cutime = clock_t_to_compat_clock_t(tms.tms_cutime); 932 tmp.tms_cstime = clock_t_to_compat_clock_t(tms.tms_cstime); 933 if (copy_to_user(tbuf, &tmp, sizeof(tmp))) 934 return -EFAULT; 935 } 936 force_successful_syscall_return(); 937 return compat_jiffies_to_clock_t(jiffies); 938 } 939 #endif 940 941 /* 942 * This needs some heavy checking ... 943 * I just haven't the stomach for it. I also don't fully 944 * understand sessions/pgrp etc. Let somebody who does explain it. 945 * 946 * OK, I think I have the protection semantics right.... this is really 947 * only important on a multi-user system anyway, to make sure one user 948 * can't send a signal to a process owned by another. -TYT, 12/12/91 949 * 950 * !PF_FORKNOEXEC check to conform completely to POSIX. 951 */ 952 SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid) 953 { 954 struct task_struct *p; 955 struct task_struct *group_leader = current->group_leader; 956 struct pid *pgrp; 957 int err; 958 959 if (!pid) 960 pid = task_pid_vnr(group_leader); 961 if (!pgid) 962 pgid = pid; 963 if (pgid < 0) 964 return -EINVAL; 965 rcu_read_lock(); 966 967 /* From this point forward we keep holding onto the tasklist lock 968 * so that our parent does not change from under us. -DaveM 969 */ 970 write_lock_irq(&tasklist_lock); 971 972 err = -ESRCH; 973 p = find_task_by_vpid(pid); 974 if (!p) 975 goto out; 976 977 err = -EINVAL; 978 if (!thread_group_leader(p)) 979 goto out; 980 981 if (same_thread_group(p->real_parent, group_leader)) { 982 err = -EPERM; 983 if (task_session(p) != task_session(group_leader)) 984 goto out; 985 err = -EACCES; 986 if (!(p->flags & PF_FORKNOEXEC)) 987 goto out; 988 } else { 989 err = -ESRCH; 990 if (p != group_leader) 991 goto out; 992 } 993 994 err = -EPERM; 995 if (p->signal->leader) 996 goto out; 997 998 pgrp = task_pid(p); 999 if (pgid != pid) { 1000 struct task_struct *g; 1001 1002 pgrp = find_vpid(pgid); 1003 g = pid_task(pgrp, PIDTYPE_PGID); 1004 if (!g || task_session(g) != task_session(group_leader)) 1005 goto out; 1006 } 1007 1008 err = security_task_setpgid(p, pgid); 1009 if (err) 1010 goto out; 1011 1012 if (task_pgrp(p) != pgrp) 1013 change_pid(p, PIDTYPE_PGID, pgrp); 1014 1015 err = 0; 1016 out: 1017 /* All paths lead to here, thus we are safe. -DaveM */ 1018 write_unlock_irq(&tasklist_lock); 1019 rcu_read_unlock(); 1020 return err; 1021 } 1022 1023 SYSCALL_DEFINE1(getpgid, pid_t, pid) 1024 { 1025 struct task_struct *p; 1026 struct pid *grp; 1027 int retval; 1028 1029 rcu_read_lock(); 1030 if (!pid) 1031 grp = task_pgrp(current); 1032 else { 1033 retval = -ESRCH; 1034 p = find_task_by_vpid(pid); 1035 if (!p) 1036 goto out; 1037 grp = task_pgrp(p); 1038 if (!grp) 1039 goto out; 1040 1041 retval = security_task_getpgid(p); 1042 if (retval) 1043 goto out; 1044 } 1045 retval = pid_vnr(grp); 1046 out: 1047 rcu_read_unlock(); 1048 return retval; 1049 } 1050 1051 #ifdef __ARCH_WANT_SYS_GETPGRP 1052 1053 SYSCALL_DEFINE0(getpgrp) 1054 { 1055 return sys_getpgid(0); 1056 } 1057 1058 #endif 1059 1060 SYSCALL_DEFINE1(getsid, pid_t, pid) 1061 { 1062 struct task_struct *p; 1063 struct pid *sid; 1064 int retval; 1065 1066 rcu_read_lock(); 1067 if (!pid) 1068 sid = task_session(current); 1069 else { 1070 retval = -ESRCH; 1071 p = find_task_by_vpid(pid); 1072 if (!p) 1073 goto out; 1074 sid = task_session(p); 1075 if (!sid) 1076 goto out; 1077 1078 retval = security_task_getsid(p); 1079 if (retval) 1080 goto out; 1081 } 1082 retval = pid_vnr(sid); 1083 out: 1084 rcu_read_unlock(); 1085 return retval; 1086 } 1087 1088 static void set_special_pids(struct pid *pid) 1089 { 1090 struct task_struct *curr = current->group_leader; 1091 1092 if (task_session(curr) != pid) 1093 change_pid(curr, PIDTYPE_SID, pid); 1094 1095 if (task_pgrp(curr) != pid) 1096 change_pid(curr, PIDTYPE_PGID, pid); 1097 } 1098 1099 SYSCALL_DEFINE0(setsid) 1100 { 1101 struct task_struct *group_leader = current->group_leader; 1102 struct pid *sid = task_pid(group_leader); 1103 pid_t session = pid_vnr(sid); 1104 int err = -EPERM; 1105 1106 write_lock_irq(&tasklist_lock); 1107 /* Fail if I am already a session leader */ 1108 if (group_leader->signal->leader) 1109 goto out; 1110 1111 /* Fail if a process group id already exists that equals the 1112 * proposed session id. 1113 */ 1114 if (pid_task(sid, PIDTYPE_PGID)) 1115 goto out; 1116 1117 group_leader->signal->leader = 1; 1118 set_special_pids(sid); 1119 1120 proc_clear_tty(group_leader); 1121 1122 err = session; 1123 out: 1124 write_unlock_irq(&tasklist_lock); 1125 if (err > 0) { 1126 proc_sid_connector(group_leader); 1127 sched_autogroup_create_attach(group_leader); 1128 } 1129 return err; 1130 } 1131 1132 DECLARE_RWSEM(uts_sem); 1133 1134 #ifdef COMPAT_UTS_MACHINE 1135 #define override_architecture(name) \ 1136 (personality(current->personality) == PER_LINUX32 && \ 1137 copy_to_user(name->machine, COMPAT_UTS_MACHINE, \ 1138 sizeof(COMPAT_UTS_MACHINE))) 1139 #else 1140 #define override_architecture(name) 0 1141 #endif 1142 1143 /* 1144 * Work around broken programs that cannot handle "Linux 3.0". 1145 * Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40 1146 * And we map 4.x to 2.6.60+x, so 4.0 would be 2.6.60. 1147 */ 1148 static int override_release(char __user *release, size_t len) 1149 { 1150 int ret = 0; 1151 1152 if (current->personality & UNAME26) { 1153 const char *rest = UTS_RELEASE; 1154 char buf[65] = { 0 }; 1155 int ndots = 0; 1156 unsigned v; 1157 size_t copy; 1158 1159 while (*rest) { 1160 if (*rest == '.' && ++ndots >= 3) 1161 break; 1162 if (!isdigit(*rest) && *rest != '.') 1163 break; 1164 rest++; 1165 } 1166 v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 60; 1167 copy = clamp_t(size_t, len, 1, sizeof(buf)); 1168 copy = scnprintf(buf, copy, "2.6.%u%s", v, rest); 1169 ret = copy_to_user(release, buf, copy + 1); 1170 } 1171 return ret; 1172 } 1173 1174 SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name) 1175 { 1176 int errno = 0; 1177 1178 down_read(&uts_sem); 1179 if (copy_to_user(name, utsname(), sizeof *name)) 1180 errno = -EFAULT; 1181 up_read(&uts_sem); 1182 1183 if (!errno && override_release(name->release, sizeof(name->release))) 1184 errno = -EFAULT; 1185 if (!errno && override_architecture(name)) 1186 errno = -EFAULT; 1187 return errno; 1188 } 1189 1190 #ifdef __ARCH_WANT_SYS_OLD_UNAME 1191 /* 1192 * Old cruft 1193 */ 1194 SYSCALL_DEFINE1(uname, struct old_utsname __user *, name) 1195 { 1196 int error = 0; 1197 1198 if (!name) 1199 return -EFAULT; 1200 1201 down_read(&uts_sem); 1202 if (copy_to_user(name, utsname(), sizeof(*name))) 1203 error = -EFAULT; 1204 up_read(&uts_sem); 1205 1206 if (!error && override_release(name->release, sizeof(name->release))) 1207 error = -EFAULT; 1208 if (!error && override_architecture(name)) 1209 error = -EFAULT; 1210 return error; 1211 } 1212 1213 SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name) 1214 { 1215 int error; 1216 1217 if (!name) 1218 return -EFAULT; 1219 if (!access_ok(VERIFY_WRITE, name, sizeof(struct oldold_utsname))) 1220 return -EFAULT; 1221 1222 down_read(&uts_sem); 1223 error = __copy_to_user(&name->sysname, &utsname()->sysname, 1224 __OLD_UTS_LEN); 1225 error |= __put_user(0, name->sysname + __OLD_UTS_LEN); 1226 error |= __copy_to_user(&name->nodename, &utsname()->nodename, 1227 __OLD_UTS_LEN); 1228 error |= __put_user(0, name->nodename + __OLD_UTS_LEN); 1229 error |= __copy_to_user(&name->release, &utsname()->release, 1230 __OLD_UTS_LEN); 1231 error |= __put_user(0, name->release + __OLD_UTS_LEN); 1232 error |= __copy_to_user(&name->version, &utsname()->version, 1233 __OLD_UTS_LEN); 1234 error |= __put_user(0, name->version + __OLD_UTS_LEN); 1235 error |= __copy_to_user(&name->machine, &utsname()->machine, 1236 __OLD_UTS_LEN); 1237 error |= __put_user(0, name->machine + __OLD_UTS_LEN); 1238 up_read(&uts_sem); 1239 1240 if (!error && override_architecture(name)) 1241 error = -EFAULT; 1242 if (!error && override_release(name->release, sizeof(name->release))) 1243 error = -EFAULT; 1244 return error ? -EFAULT : 0; 1245 } 1246 #endif 1247 1248 SYSCALL_DEFINE2(sethostname, char __user *, name, int, len) 1249 { 1250 int errno; 1251 char tmp[__NEW_UTS_LEN]; 1252 1253 if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN)) 1254 return -EPERM; 1255 1256 if (len < 0 || len > __NEW_UTS_LEN) 1257 return -EINVAL; 1258 down_write(&uts_sem); 1259 errno = -EFAULT; 1260 if (!copy_from_user(tmp, name, len)) { 1261 struct new_utsname *u = utsname(); 1262 1263 memcpy(u->nodename, tmp, len); 1264 memset(u->nodename + len, 0, sizeof(u->nodename) - len); 1265 errno = 0; 1266 uts_proc_notify(UTS_PROC_HOSTNAME); 1267 } 1268 up_write(&uts_sem); 1269 return errno; 1270 } 1271 1272 #ifdef __ARCH_WANT_SYS_GETHOSTNAME 1273 1274 SYSCALL_DEFINE2(gethostname, char __user *, name, int, len) 1275 { 1276 int i, errno; 1277 struct new_utsname *u; 1278 1279 if (len < 0) 1280 return -EINVAL; 1281 down_read(&uts_sem); 1282 u = utsname(); 1283 i = 1 + strlen(u->nodename); 1284 if (i > len) 1285 i = len; 1286 errno = 0; 1287 if (copy_to_user(name, u->nodename, i)) 1288 errno = -EFAULT; 1289 up_read(&uts_sem); 1290 return errno; 1291 } 1292 1293 #endif 1294 1295 /* 1296 * Only setdomainname; getdomainname can be implemented by calling 1297 * uname() 1298 */ 1299 SYSCALL_DEFINE2(setdomainname, char __user *, name, int, len) 1300 { 1301 int errno; 1302 char tmp[__NEW_UTS_LEN]; 1303 1304 if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN)) 1305 return -EPERM; 1306 if (len < 0 || len > __NEW_UTS_LEN) 1307 return -EINVAL; 1308 1309 down_write(&uts_sem); 1310 errno = -EFAULT; 1311 if (!copy_from_user(tmp, name, len)) { 1312 struct new_utsname *u = utsname(); 1313 1314 memcpy(u->domainname, tmp, len); 1315 memset(u->domainname + len, 0, sizeof(u->domainname) - len); 1316 errno = 0; 1317 uts_proc_notify(UTS_PROC_DOMAINNAME); 1318 } 1319 up_write(&uts_sem); 1320 return errno; 1321 } 1322 1323 SYSCALL_DEFINE2(getrlimit, unsigned int, resource, struct rlimit __user *, rlim) 1324 { 1325 struct rlimit value; 1326 int ret; 1327 1328 ret = do_prlimit(current, resource, NULL, &value); 1329 if (!ret) 1330 ret = copy_to_user(rlim, &value, sizeof(*rlim)) ? -EFAULT : 0; 1331 1332 return ret; 1333 } 1334 1335 #ifdef CONFIG_COMPAT 1336 1337 COMPAT_SYSCALL_DEFINE2(setrlimit, unsigned int, resource, 1338 struct compat_rlimit __user *, rlim) 1339 { 1340 struct rlimit r; 1341 struct compat_rlimit r32; 1342 1343 if (copy_from_user(&r32, rlim, sizeof(struct compat_rlimit))) 1344 return -EFAULT; 1345 1346 if (r32.rlim_cur == COMPAT_RLIM_INFINITY) 1347 r.rlim_cur = RLIM_INFINITY; 1348 else 1349 r.rlim_cur = r32.rlim_cur; 1350 if (r32.rlim_max == COMPAT_RLIM_INFINITY) 1351 r.rlim_max = RLIM_INFINITY; 1352 else 1353 r.rlim_max = r32.rlim_max; 1354 return do_prlimit(current, resource, &r, NULL); 1355 } 1356 1357 COMPAT_SYSCALL_DEFINE2(getrlimit, unsigned int, resource, 1358 struct compat_rlimit __user *, rlim) 1359 { 1360 struct rlimit r; 1361 int ret; 1362 1363 ret = do_prlimit(current, resource, NULL, &r); 1364 if (!ret) { 1365 struct compat_rlimit r32; 1366 if (r.rlim_cur > COMPAT_RLIM_INFINITY) 1367 r32.rlim_cur = COMPAT_RLIM_INFINITY; 1368 else 1369 r32.rlim_cur = r.rlim_cur; 1370 if (r.rlim_max > COMPAT_RLIM_INFINITY) 1371 r32.rlim_max = COMPAT_RLIM_INFINITY; 1372 else 1373 r32.rlim_max = r.rlim_max; 1374 1375 if (copy_to_user(rlim, &r32, sizeof(struct compat_rlimit))) 1376 return -EFAULT; 1377 } 1378 return ret; 1379 } 1380 1381 #endif 1382 1383 #ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT 1384 1385 /* 1386 * Back compatibility for getrlimit. Needed for some apps. 1387 */ 1388 SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource, 1389 struct rlimit __user *, rlim) 1390 { 1391 struct rlimit x; 1392 if (resource >= RLIM_NLIMITS) 1393 return -EINVAL; 1394 1395 task_lock(current->group_leader); 1396 x = current->signal->rlim[resource]; 1397 task_unlock(current->group_leader); 1398 if (x.rlim_cur > 0x7FFFFFFF) 1399 x.rlim_cur = 0x7FFFFFFF; 1400 if (x.rlim_max > 0x7FFFFFFF) 1401 x.rlim_max = 0x7FFFFFFF; 1402 return copy_to_user(rlim, &x, sizeof(x)) ? -EFAULT : 0; 1403 } 1404 1405 #ifdef CONFIG_COMPAT 1406 COMPAT_SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource, 1407 struct compat_rlimit __user *, rlim) 1408 { 1409 struct rlimit r; 1410 1411 if (resource >= RLIM_NLIMITS) 1412 return -EINVAL; 1413 1414 task_lock(current->group_leader); 1415 r = current->signal->rlim[resource]; 1416 task_unlock(current->group_leader); 1417 if (r.rlim_cur > 0x7FFFFFFF) 1418 r.rlim_cur = 0x7FFFFFFF; 1419 if (r.rlim_max > 0x7FFFFFFF) 1420 r.rlim_max = 0x7FFFFFFF; 1421 1422 if (put_user(r.rlim_cur, &rlim->rlim_cur) || 1423 put_user(r.rlim_max, &rlim->rlim_max)) 1424 return -EFAULT; 1425 return 0; 1426 } 1427 #endif 1428 1429 #endif 1430 1431 static inline bool rlim64_is_infinity(__u64 rlim64) 1432 { 1433 #if BITS_PER_LONG < 64 1434 return rlim64 >= ULONG_MAX; 1435 #else 1436 return rlim64 == RLIM64_INFINITY; 1437 #endif 1438 } 1439 1440 static void rlim_to_rlim64(const struct rlimit *rlim, struct rlimit64 *rlim64) 1441 { 1442 if (rlim->rlim_cur == RLIM_INFINITY) 1443 rlim64->rlim_cur = RLIM64_INFINITY; 1444 else 1445 rlim64->rlim_cur = rlim->rlim_cur; 1446 if (rlim->rlim_max == RLIM_INFINITY) 1447 rlim64->rlim_max = RLIM64_INFINITY; 1448 else 1449 rlim64->rlim_max = rlim->rlim_max; 1450 } 1451 1452 static void rlim64_to_rlim(const struct rlimit64 *rlim64, struct rlimit *rlim) 1453 { 1454 if (rlim64_is_infinity(rlim64->rlim_cur)) 1455 rlim->rlim_cur = RLIM_INFINITY; 1456 else 1457 rlim->rlim_cur = (unsigned long)rlim64->rlim_cur; 1458 if (rlim64_is_infinity(rlim64->rlim_max)) 1459 rlim->rlim_max = RLIM_INFINITY; 1460 else 1461 rlim->rlim_max = (unsigned long)rlim64->rlim_max; 1462 } 1463 1464 /* make sure you are allowed to change @tsk limits before calling this */ 1465 int do_prlimit(struct task_struct *tsk, unsigned int resource, 1466 struct rlimit *new_rlim, struct rlimit *old_rlim) 1467 { 1468 struct rlimit *rlim; 1469 int retval = 0; 1470 1471 if (resource >= RLIM_NLIMITS) 1472 return -EINVAL; 1473 if (new_rlim) { 1474 if (new_rlim->rlim_cur > new_rlim->rlim_max) 1475 return -EINVAL; 1476 if (resource == RLIMIT_NOFILE && 1477 new_rlim->rlim_max > sysctl_nr_open) 1478 return -EPERM; 1479 } 1480 1481 /* protect tsk->signal and tsk->sighand from disappearing */ 1482 read_lock(&tasklist_lock); 1483 if (!tsk->sighand) { 1484 retval = -ESRCH; 1485 goto out; 1486 } 1487 1488 rlim = tsk->signal->rlim + resource; 1489 task_lock(tsk->group_leader); 1490 if (new_rlim) { 1491 /* Keep the capable check against init_user_ns until 1492 cgroups can contain all limits */ 1493 if (new_rlim->rlim_max > rlim->rlim_max && 1494 !capable(CAP_SYS_RESOURCE)) 1495 retval = -EPERM; 1496 if (!retval) 1497 retval = security_task_setrlimit(tsk, resource, new_rlim); 1498 if (resource == RLIMIT_CPU && new_rlim->rlim_cur == 0) { 1499 /* 1500 * The caller is asking for an immediate RLIMIT_CPU 1501 * expiry. But we use the zero value to mean "it was 1502 * never set". So let's cheat and make it one second 1503 * instead 1504 */ 1505 new_rlim->rlim_cur = 1; 1506 } 1507 } 1508 if (!retval) { 1509 if (old_rlim) 1510 *old_rlim = *rlim; 1511 if (new_rlim) 1512 *rlim = *new_rlim; 1513 } 1514 task_unlock(tsk->group_leader); 1515 1516 /* 1517 * RLIMIT_CPU handling. Note that the kernel fails to return an error 1518 * code if it rejected the user's attempt to set RLIMIT_CPU. This is a 1519 * very long-standing error, and fixing it now risks breakage of 1520 * applications, so we live with it 1521 */ 1522 if (!retval && new_rlim && resource == RLIMIT_CPU && 1523 new_rlim->rlim_cur != RLIM_INFINITY && 1524 IS_ENABLED(CONFIG_POSIX_TIMERS)) 1525 update_rlimit_cpu(tsk, new_rlim->rlim_cur); 1526 out: 1527 read_unlock(&tasklist_lock); 1528 return retval; 1529 } 1530 1531 /* rcu lock must be held */ 1532 static int check_prlimit_permission(struct task_struct *task, 1533 unsigned int flags) 1534 { 1535 const struct cred *cred = current_cred(), *tcred; 1536 bool id_match; 1537 1538 if (current == task) 1539 return 0; 1540 1541 tcred = __task_cred(task); 1542 id_match = (uid_eq(cred->uid, tcred->euid) && 1543 uid_eq(cred->uid, tcred->suid) && 1544 uid_eq(cred->uid, tcred->uid) && 1545 gid_eq(cred->gid, tcred->egid) && 1546 gid_eq(cred->gid, tcred->sgid) && 1547 gid_eq(cred->gid, tcred->gid)); 1548 if (!id_match && !ns_capable(tcred->user_ns, CAP_SYS_RESOURCE)) 1549 return -EPERM; 1550 1551 return security_task_prlimit(cred, tcred, flags); 1552 } 1553 1554 SYSCALL_DEFINE4(prlimit64, pid_t, pid, unsigned int, resource, 1555 const struct rlimit64 __user *, new_rlim, 1556 struct rlimit64 __user *, old_rlim) 1557 { 1558 struct rlimit64 old64, new64; 1559 struct rlimit old, new; 1560 struct task_struct *tsk; 1561 unsigned int checkflags = 0; 1562 int ret; 1563 1564 if (old_rlim) 1565 checkflags |= LSM_PRLIMIT_READ; 1566 1567 if (new_rlim) { 1568 if (copy_from_user(&new64, new_rlim, sizeof(new64))) 1569 return -EFAULT; 1570 rlim64_to_rlim(&new64, &new); 1571 checkflags |= LSM_PRLIMIT_WRITE; 1572 } 1573 1574 rcu_read_lock(); 1575 tsk = pid ? find_task_by_vpid(pid) : current; 1576 if (!tsk) { 1577 rcu_read_unlock(); 1578 return -ESRCH; 1579 } 1580 ret = check_prlimit_permission(tsk, checkflags); 1581 if (ret) { 1582 rcu_read_unlock(); 1583 return ret; 1584 } 1585 get_task_struct(tsk); 1586 rcu_read_unlock(); 1587 1588 ret = do_prlimit(tsk, resource, new_rlim ? &new : NULL, 1589 old_rlim ? &old : NULL); 1590 1591 if (!ret && old_rlim) { 1592 rlim_to_rlim64(&old, &old64); 1593 if (copy_to_user(old_rlim, &old64, sizeof(old64))) 1594 ret = -EFAULT; 1595 } 1596 1597 put_task_struct(tsk); 1598 return ret; 1599 } 1600 1601 SYSCALL_DEFINE2(setrlimit, unsigned int, resource, struct rlimit __user *, rlim) 1602 { 1603 struct rlimit new_rlim; 1604 1605 if (copy_from_user(&new_rlim, rlim, sizeof(*rlim))) 1606 return -EFAULT; 1607 return do_prlimit(current, resource, &new_rlim, NULL); 1608 } 1609 1610 /* 1611 * It would make sense to put struct rusage in the task_struct, 1612 * except that would make the task_struct be *really big*. After 1613 * task_struct gets moved into malloc'ed memory, it would 1614 * make sense to do this. It will make moving the rest of the information 1615 * a lot simpler! (Which we're not doing right now because we're not 1616 * measuring them yet). 1617 * 1618 * When sampling multiple threads for RUSAGE_SELF, under SMP we might have 1619 * races with threads incrementing their own counters. But since word 1620 * reads are atomic, we either get new values or old values and we don't 1621 * care which for the sums. We always take the siglock to protect reading 1622 * the c* fields from p->signal from races with exit.c updating those 1623 * fields when reaping, so a sample either gets all the additions of a 1624 * given child after it's reaped, or none so this sample is before reaping. 1625 * 1626 * Locking: 1627 * We need to take the siglock for CHILDEREN, SELF and BOTH 1628 * for the cases current multithreaded, non-current single threaded 1629 * non-current multithreaded. Thread traversal is now safe with 1630 * the siglock held. 1631 * Strictly speaking, we donot need to take the siglock if we are current and 1632 * single threaded, as no one else can take our signal_struct away, no one 1633 * else can reap the children to update signal->c* counters, and no one else 1634 * can race with the signal-> fields. If we do not take any lock, the 1635 * signal-> fields could be read out of order while another thread was just 1636 * exiting. So we should place a read memory barrier when we avoid the lock. 1637 * On the writer side, write memory barrier is implied in __exit_signal 1638 * as __exit_signal releases the siglock spinlock after updating the signal-> 1639 * fields. But we don't do this yet to keep things simple. 1640 * 1641 */ 1642 1643 static void accumulate_thread_rusage(struct task_struct *t, struct rusage *r) 1644 { 1645 r->ru_nvcsw += t->nvcsw; 1646 r->ru_nivcsw += t->nivcsw; 1647 r->ru_minflt += t->min_flt; 1648 r->ru_majflt += t->maj_flt; 1649 r->ru_inblock += task_io_get_inblock(t); 1650 r->ru_oublock += task_io_get_oublock(t); 1651 } 1652 1653 void getrusage(struct task_struct *p, int who, struct rusage *r) 1654 { 1655 struct task_struct *t; 1656 unsigned long flags; 1657 u64 tgutime, tgstime, utime, stime; 1658 unsigned long maxrss = 0; 1659 1660 memset((char *)r, 0, sizeof (*r)); 1661 utime = stime = 0; 1662 1663 if (who == RUSAGE_THREAD) { 1664 task_cputime_adjusted(current, &utime, &stime); 1665 accumulate_thread_rusage(p, r); 1666 maxrss = p->signal->maxrss; 1667 goto out; 1668 } 1669 1670 if (!lock_task_sighand(p, &flags)) 1671 return; 1672 1673 switch (who) { 1674 case RUSAGE_BOTH: 1675 case RUSAGE_CHILDREN: 1676 utime = p->signal->cutime; 1677 stime = p->signal->cstime; 1678 r->ru_nvcsw = p->signal->cnvcsw; 1679 r->ru_nivcsw = p->signal->cnivcsw; 1680 r->ru_minflt = p->signal->cmin_flt; 1681 r->ru_majflt = p->signal->cmaj_flt; 1682 r->ru_inblock = p->signal->cinblock; 1683 r->ru_oublock = p->signal->coublock; 1684 maxrss = p->signal->cmaxrss; 1685 1686 if (who == RUSAGE_CHILDREN) 1687 break; 1688 1689 case RUSAGE_SELF: 1690 thread_group_cputime_adjusted(p, &tgutime, &tgstime); 1691 utime += tgutime; 1692 stime += tgstime; 1693 r->ru_nvcsw += p->signal->nvcsw; 1694 r->ru_nivcsw += p->signal->nivcsw; 1695 r->ru_minflt += p->signal->min_flt; 1696 r->ru_majflt += p->signal->maj_flt; 1697 r->ru_inblock += p->signal->inblock; 1698 r->ru_oublock += p->signal->oublock; 1699 if (maxrss < p->signal->maxrss) 1700 maxrss = p->signal->maxrss; 1701 t = p; 1702 do { 1703 accumulate_thread_rusage(t, r); 1704 } while_each_thread(p, t); 1705 break; 1706 1707 default: 1708 BUG(); 1709 } 1710 unlock_task_sighand(p, &flags); 1711 1712 out: 1713 r->ru_utime = ns_to_timeval(utime); 1714 r->ru_stime = ns_to_timeval(stime); 1715 1716 if (who != RUSAGE_CHILDREN) { 1717 struct mm_struct *mm = get_task_mm(p); 1718 1719 if (mm) { 1720 setmax_mm_hiwater_rss(&maxrss, mm); 1721 mmput(mm); 1722 } 1723 } 1724 r->ru_maxrss = maxrss * (PAGE_SIZE / 1024); /* convert pages to KBs */ 1725 } 1726 1727 SYSCALL_DEFINE2(getrusage, int, who, struct rusage __user *, ru) 1728 { 1729 struct rusage r; 1730 1731 if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN && 1732 who != RUSAGE_THREAD) 1733 return -EINVAL; 1734 1735 getrusage(current, who, &r); 1736 return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0; 1737 } 1738 1739 #ifdef CONFIG_COMPAT 1740 COMPAT_SYSCALL_DEFINE2(getrusage, int, who, struct compat_rusage __user *, ru) 1741 { 1742 struct rusage r; 1743 1744 if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN && 1745 who != RUSAGE_THREAD) 1746 return -EINVAL; 1747 1748 getrusage(current, who, &r); 1749 return put_compat_rusage(&r, ru); 1750 } 1751 #endif 1752 1753 SYSCALL_DEFINE1(umask, int, mask) 1754 { 1755 mask = xchg(¤t->fs->umask, mask & S_IRWXUGO); 1756 return mask; 1757 } 1758 1759 static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd) 1760 { 1761 struct fd exe; 1762 struct file *old_exe, *exe_file; 1763 struct inode *inode; 1764 int err; 1765 1766 exe = fdget(fd); 1767 if (!exe.file) 1768 return -EBADF; 1769 1770 inode = file_inode(exe.file); 1771 1772 /* 1773 * Because the original mm->exe_file points to executable file, make 1774 * sure that this one is executable as well, to avoid breaking an 1775 * overall picture. 1776 */ 1777 err = -EACCES; 1778 if (!S_ISREG(inode->i_mode) || path_noexec(&exe.file->f_path)) 1779 goto exit; 1780 1781 err = inode_permission(inode, MAY_EXEC); 1782 if (err) 1783 goto exit; 1784 1785 /* 1786 * Forbid mm->exe_file change if old file still mapped. 1787 */ 1788 exe_file = get_mm_exe_file(mm); 1789 err = -EBUSY; 1790 if (exe_file) { 1791 struct vm_area_struct *vma; 1792 1793 down_read(&mm->mmap_sem); 1794 for (vma = mm->mmap; vma; vma = vma->vm_next) { 1795 if (!vma->vm_file) 1796 continue; 1797 if (path_equal(&vma->vm_file->f_path, 1798 &exe_file->f_path)) 1799 goto exit_err; 1800 } 1801 1802 up_read(&mm->mmap_sem); 1803 fput(exe_file); 1804 } 1805 1806 err = 0; 1807 /* set the new file, lockless */ 1808 get_file(exe.file); 1809 old_exe = xchg(&mm->exe_file, exe.file); 1810 if (old_exe) 1811 fput(old_exe); 1812 exit: 1813 fdput(exe); 1814 return err; 1815 exit_err: 1816 up_read(&mm->mmap_sem); 1817 fput(exe_file); 1818 goto exit; 1819 } 1820 1821 /* 1822 * WARNING: we don't require any capability here so be very careful 1823 * in what is allowed for modification from userspace. 1824 */ 1825 static int validate_prctl_map(struct prctl_mm_map *prctl_map) 1826 { 1827 unsigned long mmap_max_addr = TASK_SIZE; 1828 struct mm_struct *mm = current->mm; 1829 int error = -EINVAL, i; 1830 1831 static const unsigned char offsets[] = { 1832 offsetof(struct prctl_mm_map, start_code), 1833 offsetof(struct prctl_mm_map, end_code), 1834 offsetof(struct prctl_mm_map, start_data), 1835 offsetof(struct prctl_mm_map, end_data), 1836 offsetof(struct prctl_mm_map, start_brk), 1837 offsetof(struct prctl_mm_map, brk), 1838 offsetof(struct prctl_mm_map, start_stack), 1839 offsetof(struct prctl_mm_map, arg_start), 1840 offsetof(struct prctl_mm_map, arg_end), 1841 offsetof(struct prctl_mm_map, env_start), 1842 offsetof(struct prctl_mm_map, env_end), 1843 }; 1844 1845 /* 1846 * Make sure the members are not somewhere outside 1847 * of allowed address space. 1848 */ 1849 for (i = 0; i < ARRAY_SIZE(offsets); i++) { 1850 u64 val = *(u64 *)((char *)prctl_map + offsets[i]); 1851 1852 if ((unsigned long)val >= mmap_max_addr || 1853 (unsigned long)val < mmap_min_addr) 1854 goto out; 1855 } 1856 1857 /* 1858 * Make sure the pairs are ordered. 1859 */ 1860 #define __prctl_check_order(__m1, __op, __m2) \ 1861 ((unsigned long)prctl_map->__m1 __op \ 1862 (unsigned long)prctl_map->__m2) ? 0 : -EINVAL 1863 error = __prctl_check_order(start_code, <, end_code); 1864 error |= __prctl_check_order(start_data, <, end_data); 1865 error |= __prctl_check_order(start_brk, <=, brk); 1866 error |= __prctl_check_order(arg_start, <=, arg_end); 1867 error |= __prctl_check_order(env_start, <=, env_end); 1868 if (error) 1869 goto out; 1870 #undef __prctl_check_order 1871 1872 error = -EINVAL; 1873 1874 /* 1875 * @brk should be after @end_data in traditional maps. 1876 */ 1877 if (prctl_map->start_brk <= prctl_map->end_data || 1878 prctl_map->brk <= prctl_map->end_data) 1879 goto out; 1880 1881 /* 1882 * Neither we should allow to override limits if they set. 1883 */ 1884 if (check_data_rlimit(rlimit(RLIMIT_DATA), prctl_map->brk, 1885 prctl_map->start_brk, prctl_map->end_data, 1886 prctl_map->start_data)) 1887 goto out; 1888 1889 /* 1890 * Someone is trying to cheat the auxv vector. 1891 */ 1892 if (prctl_map->auxv_size) { 1893 if (!prctl_map->auxv || prctl_map->auxv_size > sizeof(mm->saved_auxv)) 1894 goto out; 1895 } 1896 1897 /* 1898 * Finally, make sure the caller has the rights to 1899 * change /proc/pid/exe link: only local root should 1900 * be allowed to. 1901 */ 1902 if (prctl_map->exe_fd != (u32)-1) { 1903 struct user_namespace *ns = current_user_ns(); 1904 const struct cred *cred = current_cred(); 1905 1906 if (!uid_eq(cred->uid, make_kuid(ns, 0)) || 1907 !gid_eq(cred->gid, make_kgid(ns, 0))) 1908 goto out; 1909 } 1910 1911 error = 0; 1912 out: 1913 return error; 1914 } 1915 1916 #ifdef CONFIG_CHECKPOINT_RESTORE 1917 static int prctl_set_mm_map(int opt, const void __user *addr, unsigned long data_size) 1918 { 1919 struct prctl_mm_map prctl_map = { .exe_fd = (u32)-1, }; 1920 unsigned long user_auxv[AT_VECTOR_SIZE]; 1921 struct mm_struct *mm = current->mm; 1922 int error; 1923 1924 BUILD_BUG_ON(sizeof(user_auxv) != sizeof(mm->saved_auxv)); 1925 BUILD_BUG_ON(sizeof(struct prctl_mm_map) > 256); 1926 1927 if (opt == PR_SET_MM_MAP_SIZE) 1928 return put_user((unsigned int)sizeof(prctl_map), 1929 (unsigned int __user *)addr); 1930 1931 if (data_size != sizeof(prctl_map)) 1932 return -EINVAL; 1933 1934 if (copy_from_user(&prctl_map, addr, sizeof(prctl_map))) 1935 return -EFAULT; 1936 1937 error = validate_prctl_map(&prctl_map); 1938 if (error) 1939 return error; 1940 1941 if (prctl_map.auxv_size) { 1942 memset(user_auxv, 0, sizeof(user_auxv)); 1943 if (copy_from_user(user_auxv, 1944 (const void __user *)prctl_map.auxv, 1945 prctl_map.auxv_size)) 1946 return -EFAULT; 1947 1948 /* Last entry must be AT_NULL as specification requires */ 1949 user_auxv[AT_VECTOR_SIZE - 2] = AT_NULL; 1950 user_auxv[AT_VECTOR_SIZE - 1] = AT_NULL; 1951 } 1952 1953 if (prctl_map.exe_fd != (u32)-1) { 1954 error = prctl_set_mm_exe_file(mm, prctl_map.exe_fd); 1955 if (error) 1956 return error; 1957 } 1958 1959 down_write(&mm->mmap_sem); 1960 1961 /* 1962 * We don't validate if these members are pointing to 1963 * real present VMAs because application may have correspond 1964 * VMAs already unmapped and kernel uses these members for statistics 1965 * output in procfs mostly, except 1966 * 1967 * - @start_brk/@brk which are used in do_brk but kernel lookups 1968 * for VMAs when updating these memvers so anything wrong written 1969 * here cause kernel to swear at userspace program but won't lead 1970 * to any problem in kernel itself 1971 */ 1972 1973 mm->start_code = prctl_map.start_code; 1974 mm->end_code = prctl_map.end_code; 1975 mm->start_data = prctl_map.start_data; 1976 mm->end_data = prctl_map.end_data; 1977 mm->start_brk = prctl_map.start_brk; 1978 mm->brk = prctl_map.brk; 1979 mm->start_stack = prctl_map.start_stack; 1980 mm->arg_start = prctl_map.arg_start; 1981 mm->arg_end = prctl_map.arg_end; 1982 mm->env_start = prctl_map.env_start; 1983 mm->env_end = prctl_map.env_end; 1984 1985 /* 1986 * Note this update of @saved_auxv is lockless thus 1987 * if someone reads this member in procfs while we're 1988 * updating -- it may get partly updated results. It's 1989 * known and acceptable trade off: we leave it as is to 1990 * not introduce additional locks here making the kernel 1991 * more complex. 1992 */ 1993 if (prctl_map.auxv_size) 1994 memcpy(mm->saved_auxv, user_auxv, sizeof(user_auxv)); 1995 1996 up_write(&mm->mmap_sem); 1997 return 0; 1998 } 1999 #endif /* CONFIG_CHECKPOINT_RESTORE */ 2000 2001 static int prctl_set_auxv(struct mm_struct *mm, unsigned long addr, 2002 unsigned long len) 2003 { 2004 /* 2005 * This doesn't move the auxiliary vector itself since it's pinned to 2006 * mm_struct, but it permits filling the vector with new values. It's 2007 * up to the caller to provide sane values here, otherwise userspace 2008 * tools which use this vector might be unhappy. 2009 */ 2010 unsigned long user_auxv[AT_VECTOR_SIZE]; 2011 2012 if (len > sizeof(user_auxv)) 2013 return -EINVAL; 2014 2015 if (copy_from_user(user_auxv, (const void __user *)addr, len)) 2016 return -EFAULT; 2017 2018 /* Make sure the last entry is always AT_NULL */ 2019 user_auxv[AT_VECTOR_SIZE - 2] = 0; 2020 user_auxv[AT_VECTOR_SIZE - 1] = 0; 2021 2022 BUILD_BUG_ON(sizeof(user_auxv) != sizeof(mm->saved_auxv)); 2023 2024 task_lock(current); 2025 memcpy(mm->saved_auxv, user_auxv, len); 2026 task_unlock(current); 2027 2028 return 0; 2029 } 2030 2031 static int prctl_set_mm(int opt, unsigned long addr, 2032 unsigned long arg4, unsigned long arg5) 2033 { 2034 struct mm_struct *mm = current->mm; 2035 struct prctl_mm_map prctl_map; 2036 struct vm_area_struct *vma; 2037 int error; 2038 2039 if (arg5 || (arg4 && (opt != PR_SET_MM_AUXV && 2040 opt != PR_SET_MM_MAP && 2041 opt != PR_SET_MM_MAP_SIZE))) 2042 return -EINVAL; 2043 2044 #ifdef CONFIG_CHECKPOINT_RESTORE 2045 if (opt == PR_SET_MM_MAP || opt == PR_SET_MM_MAP_SIZE) 2046 return prctl_set_mm_map(opt, (const void __user *)addr, arg4); 2047 #endif 2048 2049 if (!capable(CAP_SYS_RESOURCE)) 2050 return -EPERM; 2051 2052 if (opt == PR_SET_MM_EXE_FILE) 2053 return prctl_set_mm_exe_file(mm, (unsigned int)addr); 2054 2055 if (opt == PR_SET_MM_AUXV) 2056 return prctl_set_auxv(mm, addr, arg4); 2057 2058 if (addr >= TASK_SIZE || addr < mmap_min_addr) 2059 return -EINVAL; 2060 2061 error = -EINVAL; 2062 2063 down_write(&mm->mmap_sem); 2064 vma = find_vma(mm, addr); 2065 2066 prctl_map.start_code = mm->start_code; 2067 prctl_map.end_code = mm->end_code; 2068 prctl_map.start_data = mm->start_data; 2069 prctl_map.end_data = mm->end_data; 2070 prctl_map.start_brk = mm->start_brk; 2071 prctl_map.brk = mm->brk; 2072 prctl_map.start_stack = mm->start_stack; 2073 prctl_map.arg_start = mm->arg_start; 2074 prctl_map.arg_end = mm->arg_end; 2075 prctl_map.env_start = mm->env_start; 2076 prctl_map.env_end = mm->env_end; 2077 prctl_map.auxv = NULL; 2078 prctl_map.auxv_size = 0; 2079 prctl_map.exe_fd = -1; 2080 2081 switch (opt) { 2082 case PR_SET_MM_START_CODE: 2083 prctl_map.start_code = addr; 2084 break; 2085 case PR_SET_MM_END_CODE: 2086 prctl_map.end_code = addr; 2087 break; 2088 case PR_SET_MM_START_DATA: 2089 prctl_map.start_data = addr; 2090 break; 2091 case PR_SET_MM_END_DATA: 2092 prctl_map.end_data = addr; 2093 break; 2094 case PR_SET_MM_START_STACK: 2095 prctl_map.start_stack = addr; 2096 break; 2097 case PR_SET_MM_START_BRK: 2098 prctl_map.start_brk = addr; 2099 break; 2100 case PR_SET_MM_BRK: 2101 prctl_map.brk = addr; 2102 break; 2103 case PR_SET_MM_ARG_START: 2104 prctl_map.arg_start = addr; 2105 break; 2106 case PR_SET_MM_ARG_END: 2107 prctl_map.arg_end = addr; 2108 break; 2109 case PR_SET_MM_ENV_START: 2110 prctl_map.env_start = addr; 2111 break; 2112 case PR_SET_MM_ENV_END: 2113 prctl_map.env_end = addr; 2114 break; 2115 default: 2116 goto out; 2117 } 2118 2119 error = validate_prctl_map(&prctl_map); 2120 if (error) 2121 goto out; 2122 2123 switch (opt) { 2124 /* 2125 * If command line arguments and environment 2126 * are placed somewhere else on stack, we can 2127 * set them up here, ARG_START/END to setup 2128 * command line argumets and ENV_START/END 2129 * for environment. 2130 */ 2131 case PR_SET_MM_START_STACK: 2132 case PR_SET_MM_ARG_START: 2133 case PR_SET_MM_ARG_END: 2134 case PR_SET_MM_ENV_START: 2135 case PR_SET_MM_ENV_END: 2136 if (!vma) { 2137 error = -EFAULT; 2138 goto out; 2139 } 2140 } 2141 2142 mm->start_code = prctl_map.start_code; 2143 mm->end_code = prctl_map.end_code; 2144 mm->start_data = prctl_map.start_data; 2145 mm->end_data = prctl_map.end_data; 2146 mm->start_brk = prctl_map.start_brk; 2147 mm->brk = prctl_map.brk; 2148 mm->start_stack = prctl_map.start_stack; 2149 mm->arg_start = prctl_map.arg_start; 2150 mm->arg_end = prctl_map.arg_end; 2151 mm->env_start = prctl_map.env_start; 2152 mm->env_end = prctl_map.env_end; 2153 2154 error = 0; 2155 out: 2156 up_write(&mm->mmap_sem); 2157 return error; 2158 } 2159 2160 #ifdef CONFIG_CHECKPOINT_RESTORE 2161 static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr) 2162 { 2163 return put_user(me->clear_child_tid, tid_addr); 2164 } 2165 #else 2166 static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr) 2167 { 2168 return -EINVAL; 2169 } 2170 #endif 2171 2172 static int propagate_has_child_subreaper(struct task_struct *p, void *data) 2173 { 2174 /* 2175 * If task has has_child_subreaper - all its decendants 2176 * already have these flag too and new decendants will 2177 * inherit it on fork, skip them. 2178 * 2179 * If we've found child_reaper - skip descendants in 2180 * it's subtree as they will never get out pidns. 2181 */ 2182 if (p->signal->has_child_subreaper || 2183 is_child_reaper(task_pid(p))) 2184 return 0; 2185 2186 p->signal->has_child_subreaper = 1; 2187 return 1; 2188 } 2189 2190 SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, 2191 unsigned long, arg4, unsigned long, arg5) 2192 { 2193 struct task_struct *me = current; 2194 unsigned char comm[sizeof(me->comm)]; 2195 long error; 2196 2197 error = security_task_prctl(option, arg2, arg3, arg4, arg5); 2198 if (error != -ENOSYS) 2199 return error; 2200 2201 error = 0; 2202 switch (option) { 2203 case PR_SET_PDEATHSIG: 2204 if (!valid_signal(arg2)) { 2205 error = -EINVAL; 2206 break; 2207 } 2208 me->pdeath_signal = arg2; 2209 break; 2210 case PR_GET_PDEATHSIG: 2211 error = put_user(me->pdeath_signal, (int __user *)arg2); 2212 break; 2213 case PR_GET_DUMPABLE: 2214 error = get_dumpable(me->mm); 2215 break; 2216 case PR_SET_DUMPABLE: 2217 if (arg2 != SUID_DUMP_DISABLE && arg2 != SUID_DUMP_USER) { 2218 error = -EINVAL; 2219 break; 2220 } 2221 set_dumpable(me->mm, arg2); 2222 break; 2223 2224 case PR_SET_UNALIGN: 2225 error = SET_UNALIGN_CTL(me, arg2); 2226 break; 2227 case PR_GET_UNALIGN: 2228 error = GET_UNALIGN_CTL(me, arg2); 2229 break; 2230 case PR_SET_FPEMU: 2231 error = SET_FPEMU_CTL(me, arg2); 2232 break; 2233 case PR_GET_FPEMU: 2234 error = GET_FPEMU_CTL(me, arg2); 2235 break; 2236 case PR_SET_FPEXC: 2237 error = SET_FPEXC_CTL(me, arg2); 2238 break; 2239 case PR_GET_FPEXC: 2240 error = GET_FPEXC_CTL(me, arg2); 2241 break; 2242 case PR_GET_TIMING: 2243 error = PR_TIMING_STATISTICAL; 2244 break; 2245 case PR_SET_TIMING: 2246 if (arg2 != PR_TIMING_STATISTICAL) 2247 error = -EINVAL; 2248 break; 2249 case PR_SET_NAME: 2250 comm[sizeof(me->comm) - 1] = 0; 2251 if (strncpy_from_user(comm, (char __user *)arg2, 2252 sizeof(me->comm) - 1) < 0) 2253 return -EFAULT; 2254 set_task_comm(me, comm); 2255 proc_comm_connector(me); 2256 break; 2257 case PR_GET_NAME: 2258 get_task_comm(comm, me); 2259 if (copy_to_user((char __user *)arg2, comm, sizeof(comm))) 2260 return -EFAULT; 2261 break; 2262 case PR_GET_ENDIAN: 2263 error = GET_ENDIAN(me, arg2); 2264 break; 2265 case PR_SET_ENDIAN: 2266 error = SET_ENDIAN(me, arg2); 2267 break; 2268 case PR_GET_SECCOMP: 2269 error = prctl_get_seccomp(); 2270 break; 2271 case PR_SET_SECCOMP: 2272 error = prctl_set_seccomp(arg2, (char __user *)arg3); 2273 break; 2274 case PR_GET_TSC: 2275 error = GET_TSC_CTL(arg2); 2276 break; 2277 case PR_SET_TSC: 2278 error = SET_TSC_CTL(arg2); 2279 break; 2280 case PR_TASK_PERF_EVENTS_DISABLE: 2281 error = perf_event_task_disable(); 2282 break; 2283 case PR_TASK_PERF_EVENTS_ENABLE: 2284 error = perf_event_task_enable(); 2285 break; 2286 case PR_GET_TIMERSLACK: 2287 if (current->timer_slack_ns > ULONG_MAX) 2288 error = ULONG_MAX; 2289 else 2290 error = current->timer_slack_ns; 2291 break; 2292 case PR_SET_TIMERSLACK: 2293 if (arg2 <= 0) 2294 current->timer_slack_ns = 2295 current->default_timer_slack_ns; 2296 else 2297 current->timer_slack_ns = arg2; 2298 break; 2299 case PR_MCE_KILL: 2300 if (arg4 | arg5) 2301 return -EINVAL; 2302 switch (arg2) { 2303 case PR_MCE_KILL_CLEAR: 2304 if (arg3 != 0) 2305 return -EINVAL; 2306 current->flags &= ~PF_MCE_PROCESS; 2307 break; 2308 case PR_MCE_KILL_SET: 2309 current->flags |= PF_MCE_PROCESS; 2310 if (arg3 == PR_MCE_KILL_EARLY) 2311 current->flags |= PF_MCE_EARLY; 2312 else if (arg3 == PR_MCE_KILL_LATE) 2313 current->flags &= ~PF_MCE_EARLY; 2314 else if (arg3 == PR_MCE_KILL_DEFAULT) 2315 current->flags &= 2316 ~(PF_MCE_EARLY|PF_MCE_PROCESS); 2317 else 2318 return -EINVAL; 2319 break; 2320 default: 2321 return -EINVAL; 2322 } 2323 break; 2324 case PR_MCE_KILL_GET: 2325 if (arg2 | arg3 | arg4 | arg5) 2326 return -EINVAL; 2327 if (current->flags & PF_MCE_PROCESS) 2328 error = (current->flags & PF_MCE_EARLY) ? 2329 PR_MCE_KILL_EARLY : PR_MCE_KILL_LATE; 2330 else 2331 error = PR_MCE_KILL_DEFAULT; 2332 break; 2333 case PR_SET_MM: 2334 error = prctl_set_mm(arg2, arg3, arg4, arg5); 2335 break; 2336 case PR_GET_TID_ADDRESS: 2337 error = prctl_get_tid_address(me, (int __user **)arg2); 2338 break; 2339 case PR_SET_CHILD_SUBREAPER: 2340 me->signal->is_child_subreaper = !!arg2; 2341 if (!arg2) 2342 break; 2343 2344 walk_process_tree(me, propagate_has_child_subreaper, NULL); 2345 break; 2346 case PR_GET_CHILD_SUBREAPER: 2347 error = put_user(me->signal->is_child_subreaper, 2348 (int __user *)arg2); 2349 break; 2350 case PR_SET_NO_NEW_PRIVS: 2351 if (arg2 != 1 || arg3 || arg4 || arg5) 2352 return -EINVAL; 2353 2354 task_set_no_new_privs(current); 2355 break; 2356 case PR_GET_NO_NEW_PRIVS: 2357 if (arg2 || arg3 || arg4 || arg5) 2358 return -EINVAL; 2359 return task_no_new_privs(current) ? 1 : 0; 2360 case PR_GET_THP_DISABLE: 2361 if (arg2 || arg3 || arg4 || arg5) 2362 return -EINVAL; 2363 error = !!test_bit(MMF_DISABLE_THP, &me->mm->flags); 2364 break; 2365 case PR_SET_THP_DISABLE: 2366 if (arg3 || arg4 || arg5) 2367 return -EINVAL; 2368 if (down_write_killable(&me->mm->mmap_sem)) 2369 return -EINTR; 2370 if (arg2) 2371 set_bit(MMF_DISABLE_THP, &me->mm->flags); 2372 else 2373 clear_bit(MMF_DISABLE_THP, &me->mm->flags); 2374 up_write(&me->mm->mmap_sem); 2375 break; 2376 case PR_MPX_ENABLE_MANAGEMENT: 2377 if (arg2 || arg3 || arg4 || arg5) 2378 return -EINVAL; 2379 error = MPX_ENABLE_MANAGEMENT(); 2380 break; 2381 case PR_MPX_DISABLE_MANAGEMENT: 2382 if (arg2 || arg3 || arg4 || arg5) 2383 return -EINVAL; 2384 error = MPX_DISABLE_MANAGEMENT(); 2385 break; 2386 case PR_SET_FP_MODE: 2387 error = SET_FP_MODE(me, arg2); 2388 break; 2389 case PR_GET_FP_MODE: 2390 error = GET_FP_MODE(me); 2391 break; 2392 default: 2393 error = -EINVAL; 2394 break; 2395 } 2396 return error; 2397 } 2398 2399 SYSCALL_DEFINE3(getcpu, unsigned __user *, cpup, unsigned __user *, nodep, 2400 struct getcpu_cache __user *, unused) 2401 { 2402 int err = 0; 2403 int cpu = raw_smp_processor_id(); 2404 2405 if (cpup) 2406 err |= put_user(cpu, cpup); 2407 if (nodep) 2408 err |= put_user(cpu_to_node(cpu), nodep); 2409 return err ? -EFAULT : 0; 2410 } 2411 2412 /** 2413 * do_sysinfo - fill in sysinfo struct 2414 * @info: pointer to buffer to fill 2415 */ 2416 static int do_sysinfo(struct sysinfo *info) 2417 { 2418 unsigned long mem_total, sav_total; 2419 unsigned int mem_unit, bitcount; 2420 struct timespec tp; 2421 2422 memset(info, 0, sizeof(struct sysinfo)); 2423 2424 get_monotonic_boottime(&tp); 2425 info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0); 2426 2427 get_avenrun(info->loads, 0, SI_LOAD_SHIFT - FSHIFT); 2428 2429 info->procs = nr_threads; 2430 2431 si_meminfo(info); 2432 si_swapinfo(info); 2433 2434 /* 2435 * If the sum of all the available memory (i.e. ram + swap) 2436 * is less than can be stored in a 32 bit unsigned long then 2437 * we can be binary compatible with 2.2.x kernels. If not, 2438 * well, in that case 2.2.x was broken anyways... 2439 * 2440 * -Erik Andersen <andersee@debian.org> 2441 */ 2442 2443 mem_total = info->totalram + info->totalswap; 2444 if (mem_total < info->totalram || mem_total < info->totalswap) 2445 goto out; 2446 bitcount = 0; 2447 mem_unit = info->mem_unit; 2448 while (mem_unit > 1) { 2449 bitcount++; 2450 mem_unit >>= 1; 2451 sav_total = mem_total; 2452 mem_total <<= 1; 2453 if (mem_total < sav_total) 2454 goto out; 2455 } 2456 2457 /* 2458 * If mem_total did not overflow, multiply all memory values by 2459 * info->mem_unit and set it to 1. This leaves things compatible 2460 * with 2.2.x, and also retains compatibility with earlier 2.4.x 2461 * kernels... 2462 */ 2463 2464 info->mem_unit = 1; 2465 info->totalram <<= bitcount; 2466 info->freeram <<= bitcount; 2467 info->sharedram <<= bitcount; 2468 info->bufferram <<= bitcount; 2469 info->totalswap <<= bitcount; 2470 info->freeswap <<= bitcount; 2471 info->totalhigh <<= bitcount; 2472 info->freehigh <<= bitcount; 2473 2474 out: 2475 return 0; 2476 } 2477 2478 SYSCALL_DEFINE1(sysinfo, struct sysinfo __user *, info) 2479 { 2480 struct sysinfo val; 2481 2482 do_sysinfo(&val); 2483 2484 if (copy_to_user(info, &val, sizeof(struct sysinfo))) 2485 return -EFAULT; 2486 2487 return 0; 2488 } 2489 2490 #ifdef CONFIG_COMPAT 2491 struct compat_sysinfo { 2492 s32 uptime; 2493 u32 loads[3]; 2494 u32 totalram; 2495 u32 freeram; 2496 u32 sharedram; 2497 u32 bufferram; 2498 u32 totalswap; 2499 u32 freeswap; 2500 u16 procs; 2501 u16 pad; 2502 u32 totalhigh; 2503 u32 freehigh; 2504 u32 mem_unit; 2505 char _f[20-2*sizeof(u32)-sizeof(int)]; 2506 }; 2507 2508 COMPAT_SYSCALL_DEFINE1(sysinfo, struct compat_sysinfo __user *, info) 2509 { 2510 struct sysinfo s; 2511 2512 do_sysinfo(&s); 2513 2514 /* Check to see if any memory value is too large for 32-bit and scale 2515 * down if needed 2516 */ 2517 if (upper_32_bits(s.totalram) || upper_32_bits(s.totalswap)) { 2518 int bitcount = 0; 2519 2520 while (s.mem_unit < PAGE_SIZE) { 2521 s.mem_unit <<= 1; 2522 bitcount++; 2523 } 2524 2525 s.totalram >>= bitcount; 2526 s.freeram >>= bitcount; 2527 s.sharedram >>= bitcount; 2528 s.bufferram >>= bitcount; 2529 s.totalswap >>= bitcount; 2530 s.freeswap >>= bitcount; 2531 s.totalhigh >>= bitcount; 2532 s.freehigh >>= bitcount; 2533 } 2534 2535 if (!access_ok(VERIFY_WRITE, info, sizeof(struct compat_sysinfo)) || 2536 __put_user(s.uptime, &info->uptime) || 2537 __put_user(s.loads[0], &info->loads[0]) || 2538 __put_user(s.loads[1], &info->loads[1]) || 2539 __put_user(s.loads[2], &info->loads[2]) || 2540 __put_user(s.totalram, &info->totalram) || 2541 __put_user(s.freeram, &info->freeram) || 2542 __put_user(s.sharedram, &info->sharedram) || 2543 __put_user(s.bufferram, &info->bufferram) || 2544 __put_user(s.totalswap, &info->totalswap) || 2545 __put_user(s.freeswap, &info->freeswap) || 2546 __put_user(s.procs, &info->procs) || 2547 __put_user(s.totalhigh, &info->totalhigh) || 2548 __put_user(s.freehigh, &info->freehigh) || 2549 __put_user(s.mem_unit, &info->mem_unit)) 2550 return -EFAULT; 2551 2552 return 0; 2553 } 2554 #endif /* CONFIG_COMPAT */ 2555