1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/kernel/sys.c 4 * 5 * Copyright (C) 1991, 1992 Linus Torvalds 6 */ 7 8 #include <linux/export.h> 9 #include <linux/mm.h> 10 #include <linux/utsname.h> 11 #include <linux/mman.h> 12 #include <linux/reboot.h> 13 #include <linux/prctl.h> 14 #include <linux/highuid.h> 15 #include <linux/fs.h> 16 #include <linux/kmod.h> 17 #include <linux/perf_event.h> 18 #include <linux/resource.h> 19 #include <linux/kernel.h> 20 #include <linux/workqueue.h> 21 #include <linux/capability.h> 22 #include <linux/device.h> 23 #include <linux/key.h> 24 #include <linux/times.h> 25 #include <linux/posix-timers.h> 26 #include <linux/security.h> 27 #include <linux/dcookies.h> 28 #include <linux/suspend.h> 29 #include <linux/tty.h> 30 #include <linux/signal.h> 31 #include <linux/cn_proc.h> 32 #include <linux/getcpu.h> 33 #include <linux/task_io_accounting_ops.h> 34 #include <linux/seccomp.h> 35 #include <linux/cpu.h> 36 #include <linux/personality.h> 37 #include <linux/ptrace.h> 38 #include <linux/fs_struct.h> 39 #include <linux/file.h> 40 #include <linux/mount.h> 41 #include <linux/gfp.h> 42 #include <linux/syscore_ops.h> 43 #include <linux/version.h> 44 #include <linux/ctype.h> 45 46 #include <linux/compat.h> 47 #include <linux/syscalls.h> 48 #include <linux/kprobes.h> 49 #include <linux/user_namespace.h> 50 #include <linux/binfmts.h> 51 52 #include <linux/sched.h> 53 #include <linux/sched/autogroup.h> 54 #include <linux/sched/loadavg.h> 55 #include <linux/sched/stat.h> 56 #include <linux/sched/mm.h> 57 #include <linux/sched/coredump.h> 58 #include <linux/sched/task.h> 59 #include <linux/sched/cputime.h> 60 #include <linux/rcupdate.h> 61 #include <linux/uidgid.h> 62 #include <linux/cred.h> 63 64 #include <linux/kmsg_dump.h> 65 /* Move somewhere else to avoid recompiling? */ 66 #include <generated/utsrelease.h> 67 68 #include <linux/uaccess.h> 69 #include <asm/io.h> 70 #include <asm/unistd.h> 71 72 #ifndef SET_UNALIGN_CTL 73 # define SET_UNALIGN_CTL(a, b) (-EINVAL) 74 #endif 75 #ifndef GET_UNALIGN_CTL 76 # define GET_UNALIGN_CTL(a, b) (-EINVAL) 77 #endif 78 #ifndef SET_FPEMU_CTL 79 # define SET_FPEMU_CTL(a, b) (-EINVAL) 80 #endif 81 #ifndef GET_FPEMU_CTL 82 # define GET_FPEMU_CTL(a, b) (-EINVAL) 83 #endif 84 #ifndef SET_FPEXC_CTL 85 # define SET_FPEXC_CTL(a, b) (-EINVAL) 86 #endif 87 #ifndef GET_FPEXC_CTL 88 # define GET_FPEXC_CTL(a, b) (-EINVAL) 89 #endif 90 #ifndef GET_ENDIAN 91 # define GET_ENDIAN(a, b) (-EINVAL) 92 #endif 93 #ifndef SET_ENDIAN 94 # define SET_ENDIAN(a, b) (-EINVAL) 95 #endif 96 #ifndef GET_TSC_CTL 97 # define GET_TSC_CTL(a) (-EINVAL) 98 #endif 99 #ifndef SET_TSC_CTL 100 # define SET_TSC_CTL(a) (-EINVAL) 101 #endif 102 #ifndef MPX_ENABLE_MANAGEMENT 103 # define MPX_ENABLE_MANAGEMENT() (-EINVAL) 104 #endif 105 #ifndef MPX_DISABLE_MANAGEMENT 106 # define MPX_DISABLE_MANAGEMENT() (-EINVAL) 107 #endif 108 #ifndef GET_FP_MODE 109 # define GET_FP_MODE(a) (-EINVAL) 110 #endif 111 #ifndef SET_FP_MODE 112 # define SET_FP_MODE(a,b) (-EINVAL) 113 #endif 114 115 /* 116 * this is where the system-wide overflow UID and GID are defined, for 117 * architectures that now have 32-bit UID/GID but didn't in the past 118 */ 119 120 int overflowuid = DEFAULT_OVERFLOWUID; 121 int overflowgid = DEFAULT_OVERFLOWGID; 122 123 EXPORT_SYMBOL(overflowuid); 124 EXPORT_SYMBOL(overflowgid); 125 126 /* 127 * the same as above, but for filesystems which can only store a 16-bit 128 * UID and GID. as such, this is needed on all architectures 129 */ 130 131 int fs_overflowuid = DEFAULT_FS_OVERFLOWUID; 132 int fs_overflowgid = DEFAULT_FS_OVERFLOWUID; 133 134 EXPORT_SYMBOL(fs_overflowuid); 135 EXPORT_SYMBOL(fs_overflowgid); 136 137 /* 138 * Returns true if current's euid is same as p's uid or euid, 139 * or has CAP_SYS_NICE to p's user_ns. 140 * 141 * Called with rcu_read_lock, creds are safe 142 */ 143 static bool set_one_prio_perm(struct task_struct *p) 144 { 145 const struct cred *cred = current_cred(), *pcred = __task_cred(p); 146 147 if (uid_eq(pcred->uid, cred->euid) || 148 uid_eq(pcred->euid, cred->euid)) 149 return true; 150 if (ns_capable(pcred->user_ns, CAP_SYS_NICE)) 151 return true; 152 return false; 153 } 154 155 /* 156 * set the priority of a task 157 * - the caller must hold the RCU read lock 158 */ 159 static int set_one_prio(struct task_struct *p, int niceval, int error) 160 { 161 int no_nice; 162 163 if (!set_one_prio_perm(p)) { 164 error = -EPERM; 165 goto out; 166 } 167 if (niceval < task_nice(p) && !can_nice(p, niceval)) { 168 error = -EACCES; 169 goto out; 170 } 171 no_nice = security_task_setnice(p, niceval); 172 if (no_nice) { 173 error = no_nice; 174 goto out; 175 } 176 if (error == -ESRCH) 177 error = 0; 178 set_user_nice(p, niceval); 179 out: 180 return error; 181 } 182 183 SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval) 184 { 185 struct task_struct *g, *p; 186 struct user_struct *user; 187 const struct cred *cred = current_cred(); 188 int error = -EINVAL; 189 struct pid *pgrp; 190 kuid_t uid; 191 192 if (which > PRIO_USER || which < PRIO_PROCESS) 193 goto out; 194 195 /* normalize: avoid signed division (rounding problems) */ 196 error = -ESRCH; 197 if (niceval < MIN_NICE) 198 niceval = MIN_NICE; 199 if (niceval > MAX_NICE) 200 niceval = MAX_NICE; 201 202 rcu_read_lock(); 203 read_lock(&tasklist_lock); 204 switch (which) { 205 case PRIO_PROCESS: 206 if (who) 207 p = find_task_by_vpid(who); 208 else 209 p = current; 210 if (p) 211 error = set_one_prio(p, niceval, error); 212 break; 213 case PRIO_PGRP: 214 if (who) 215 pgrp = find_vpid(who); 216 else 217 pgrp = task_pgrp(current); 218 do_each_pid_thread(pgrp, PIDTYPE_PGID, p) { 219 error = set_one_prio(p, niceval, error); 220 } while_each_pid_thread(pgrp, PIDTYPE_PGID, p); 221 break; 222 case PRIO_USER: 223 uid = make_kuid(cred->user_ns, who); 224 user = cred->user; 225 if (!who) 226 uid = cred->uid; 227 else if (!uid_eq(uid, cred->uid)) { 228 user = find_user(uid); 229 if (!user) 230 goto out_unlock; /* No processes for this user */ 231 } 232 do_each_thread(g, p) { 233 if (uid_eq(task_uid(p), uid) && task_pid_vnr(p)) 234 error = set_one_prio(p, niceval, error); 235 } while_each_thread(g, p); 236 if (!uid_eq(uid, cred->uid)) 237 free_uid(user); /* For find_user() */ 238 break; 239 } 240 out_unlock: 241 read_unlock(&tasklist_lock); 242 rcu_read_unlock(); 243 out: 244 return error; 245 } 246 247 /* 248 * Ugh. To avoid negative return values, "getpriority()" will 249 * not return the normal nice-value, but a negated value that 250 * has been offset by 20 (ie it returns 40..1 instead of -20..19) 251 * to stay compatible. 252 */ 253 SYSCALL_DEFINE2(getpriority, int, which, int, who) 254 { 255 struct task_struct *g, *p; 256 struct user_struct *user; 257 const struct cred *cred = current_cred(); 258 long niceval, retval = -ESRCH; 259 struct pid *pgrp; 260 kuid_t uid; 261 262 if (which > PRIO_USER || which < PRIO_PROCESS) 263 return -EINVAL; 264 265 rcu_read_lock(); 266 read_lock(&tasklist_lock); 267 switch (which) { 268 case PRIO_PROCESS: 269 if (who) 270 p = find_task_by_vpid(who); 271 else 272 p = current; 273 if (p) { 274 niceval = nice_to_rlimit(task_nice(p)); 275 if (niceval > retval) 276 retval = niceval; 277 } 278 break; 279 case PRIO_PGRP: 280 if (who) 281 pgrp = find_vpid(who); 282 else 283 pgrp = task_pgrp(current); 284 do_each_pid_thread(pgrp, PIDTYPE_PGID, p) { 285 niceval = nice_to_rlimit(task_nice(p)); 286 if (niceval > retval) 287 retval = niceval; 288 } while_each_pid_thread(pgrp, PIDTYPE_PGID, p); 289 break; 290 case PRIO_USER: 291 uid = make_kuid(cred->user_ns, who); 292 user = cred->user; 293 if (!who) 294 uid = cred->uid; 295 else if (!uid_eq(uid, cred->uid)) { 296 user = find_user(uid); 297 if (!user) 298 goto out_unlock; /* No processes for this user */ 299 } 300 do_each_thread(g, p) { 301 if (uid_eq(task_uid(p), uid) && task_pid_vnr(p)) { 302 niceval = nice_to_rlimit(task_nice(p)); 303 if (niceval > retval) 304 retval = niceval; 305 } 306 } while_each_thread(g, p); 307 if (!uid_eq(uid, cred->uid)) 308 free_uid(user); /* for find_user() */ 309 break; 310 } 311 out_unlock: 312 read_unlock(&tasklist_lock); 313 rcu_read_unlock(); 314 315 return retval; 316 } 317 318 /* 319 * Unprivileged users may change the real gid to the effective gid 320 * or vice versa. (BSD-style) 321 * 322 * If you set the real gid at all, or set the effective gid to a value not 323 * equal to the real gid, then the saved gid is set to the new effective gid. 324 * 325 * This makes it possible for a setgid program to completely drop its 326 * privileges, which is often a useful assertion to make when you are doing 327 * a security audit over a program. 328 * 329 * The general idea is that a program which uses just setregid() will be 330 * 100% compatible with BSD. A program which uses just setgid() will be 331 * 100% compatible with POSIX with saved IDs. 332 * 333 * SMP: There are not races, the GIDs are checked only by filesystem 334 * operations (as far as semantic preservation is concerned). 335 */ 336 #ifdef CONFIG_MULTIUSER 337 SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid) 338 { 339 struct user_namespace *ns = current_user_ns(); 340 const struct cred *old; 341 struct cred *new; 342 int retval; 343 kgid_t krgid, kegid; 344 345 krgid = make_kgid(ns, rgid); 346 kegid = make_kgid(ns, egid); 347 348 if ((rgid != (gid_t) -1) && !gid_valid(krgid)) 349 return -EINVAL; 350 if ((egid != (gid_t) -1) && !gid_valid(kegid)) 351 return -EINVAL; 352 353 new = prepare_creds(); 354 if (!new) 355 return -ENOMEM; 356 old = current_cred(); 357 358 retval = -EPERM; 359 if (rgid != (gid_t) -1) { 360 if (gid_eq(old->gid, krgid) || 361 gid_eq(old->egid, krgid) || 362 ns_capable(old->user_ns, CAP_SETGID)) 363 new->gid = krgid; 364 else 365 goto error; 366 } 367 if (egid != (gid_t) -1) { 368 if (gid_eq(old->gid, kegid) || 369 gid_eq(old->egid, kegid) || 370 gid_eq(old->sgid, kegid) || 371 ns_capable(old->user_ns, CAP_SETGID)) 372 new->egid = kegid; 373 else 374 goto error; 375 } 376 377 if (rgid != (gid_t) -1 || 378 (egid != (gid_t) -1 && !gid_eq(kegid, old->gid))) 379 new->sgid = new->egid; 380 new->fsgid = new->egid; 381 382 return commit_creds(new); 383 384 error: 385 abort_creds(new); 386 return retval; 387 } 388 389 /* 390 * setgid() is implemented like SysV w/ SAVED_IDS 391 * 392 * SMP: Same implicit races as above. 393 */ 394 SYSCALL_DEFINE1(setgid, gid_t, gid) 395 { 396 struct user_namespace *ns = current_user_ns(); 397 const struct cred *old; 398 struct cred *new; 399 int retval; 400 kgid_t kgid; 401 402 kgid = make_kgid(ns, gid); 403 if (!gid_valid(kgid)) 404 return -EINVAL; 405 406 new = prepare_creds(); 407 if (!new) 408 return -ENOMEM; 409 old = current_cred(); 410 411 retval = -EPERM; 412 if (ns_capable(old->user_ns, CAP_SETGID)) 413 new->gid = new->egid = new->sgid = new->fsgid = kgid; 414 else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid)) 415 new->egid = new->fsgid = kgid; 416 else 417 goto error; 418 419 return commit_creds(new); 420 421 error: 422 abort_creds(new); 423 return retval; 424 } 425 426 /* 427 * change the user struct in a credentials set to match the new UID 428 */ 429 static int set_user(struct cred *new) 430 { 431 struct user_struct *new_user; 432 433 new_user = alloc_uid(new->uid); 434 if (!new_user) 435 return -EAGAIN; 436 437 /* 438 * We don't fail in case of NPROC limit excess here because too many 439 * poorly written programs don't check set*uid() return code, assuming 440 * it never fails if called by root. We may still enforce NPROC limit 441 * for programs doing set*uid()+execve() by harmlessly deferring the 442 * failure to the execve() stage. 443 */ 444 if (atomic_read(&new_user->processes) >= rlimit(RLIMIT_NPROC) && 445 new_user != INIT_USER) 446 current->flags |= PF_NPROC_EXCEEDED; 447 else 448 current->flags &= ~PF_NPROC_EXCEEDED; 449 450 free_uid(new->user); 451 new->user = new_user; 452 return 0; 453 } 454 455 /* 456 * Unprivileged users may change the real uid to the effective uid 457 * or vice versa. (BSD-style) 458 * 459 * If you set the real uid at all, or set the effective uid to a value not 460 * equal to the real uid, then the saved uid is set to the new effective uid. 461 * 462 * This makes it possible for a setuid program to completely drop its 463 * privileges, which is often a useful assertion to make when you are doing 464 * a security audit over a program. 465 * 466 * The general idea is that a program which uses just setreuid() will be 467 * 100% compatible with BSD. A program which uses just setuid() will be 468 * 100% compatible with POSIX with saved IDs. 469 */ 470 SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid) 471 { 472 struct user_namespace *ns = current_user_ns(); 473 const struct cred *old; 474 struct cred *new; 475 int retval; 476 kuid_t kruid, keuid; 477 478 kruid = make_kuid(ns, ruid); 479 keuid = make_kuid(ns, euid); 480 481 if ((ruid != (uid_t) -1) && !uid_valid(kruid)) 482 return -EINVAL; 483 if ((euid != (uid_t) -1) && !uid_valid(keuid)) 484 return -EINVAL; 485 486 new = prepare_creds(); 487 if (!new) 488 return -ENOMEM; 489 old = current_cred(); 490 491 retval = -EPERM; 492 if (ruid != (uid_t) -1) { 493 new->uid = kruid; 494 if (!uid_eq(old->uid, kruid) && 495 !uid_eq(old->euid, kruid) && 496 !ns_capable(old->user_ns, CAP_SETUID)) 497 goto error; 498 } 499 500 if (euid != (uid_t) -1) { 501 new->euid = keuid; 502 if (!uid_eq(old->uid, keuid) && 503 !uid_eq(old->euid, keuid) && 504 !uid_eq(old->suid, keuid) && 505 !ns_capable(old->user_ns, CAP_SETUID)) 506 goto error; 507 } 508 509 if (!uid_eq(new->uid, old->uid)) { 510 retval = set_user(new); 511 if (retval < 0) 512 goto error; 513 } 514 if (ruid != (uid_t) -1 || 515 (euid != (uid_t) -1 && !uid_eq(keuid, old->uid))) 516 new->suid = new->euid; 517 new->fsuid = new->euid; 518 519 retval = security_task_fix_setuid(new, old, LSM_SETID_RE); 520 if (retval < 0) 521 goto error; 522 523 return commit_creds(new); 524 525 error: 526 abort_creds(new); 527 return retval; 528 } 529 530 /* 531 * setuid() is implemented like SysV with SAVED_IDS 532 * 533 * Note that SAVED_ID's is deficient in that a setuid root program 534 * like sendmail, for example, cannot set its uid to be a normal 535 * user and then switch back, because if you're root, setuid() sets 536 * the saved uid too. If you don't like this, blame the bright people 537 * in the POSIX committee and/or USG. Note that the BSD-style setreuid() 538 * will allow a root program to temporarily drop privileges and be able to 539 * regain them by swapping the real and effective uid. 540 */ 541 SYSCALL_DEFINE1(setuid, uid_t, uid) 542 { 543 struct user_namespace *ns = current_user_ns(); 544 const struct cred *old; 545 struct cred *new; 546 int retval; 547 kuid_t kuid; 548 549 kuid = make_kuid(ns, uid); 550 if (!uid_valid(kuid)) 551 return -EINVAL; 552 553 new = prepare_creds(); 554 if (!new) 555 return -ENOMEM; 556 old = current_cred(); 557 558 retval = -EPERM; 559 if (ns_capable(old->user_ns, CAP_SETUID)) { 560 new->suid = new->uid = kuid; 561 if (!uid_eq(kuid, old->uid)) { 562 retval = set_user(new); 563 if (retval < 0) 564 goto error; 565 } 566 } else if (!uid_eq(kuid, old->uid) && !uid_eq(kuid, new->suid)) { 567 goto error; 568 } 569 570 new->fsuid = new->euid = kuid; 571 572 retval = security_task_fix_setuid(new, old, LSM_SETID_ID); 573 if (retval < 0) 574 goto error; 575 576 return commit_creds(new); 577 578 error: 579 abort_creds(new); 580 return retval; 581 } 582 583 584 /* 585 * This function implements a generic ability to update ruid, euid, 586 * and suid. This allows you to implement the 4.4 compatible seteuid(). 587 */ 588 SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid) 589 { 590 struct user_namespace *ns = current_user_ns(); 591 const struct cred *old; 592 struct cred *new; 593 int retval; 594 kuid_t kruid, keuid, ksuid; 595 596 kruid = make_kuid(ns, ruid); 597 keuid = make_kuid(ns, euid); 598 ksuid = make_kuid(ns, suid); 599 600 if ((ruid != (uid_t) -1) && !uid_valid(kruid)) 601 return -EINVAL; 602 603 if ((euid != (uid_t) -1) && !uid_valid(keuid)) 604 return -EINVAL; 605 606 if ((suid != (uid_t) -1) && !uid_valid(ksuid)) 607 return -EINVAL; 608 609 new = prepare_creds(); 610 if (!new) 611 return -ENOMEM; 612 613 old = current_cred(); 614 615 retval = -EPERM; 616 if (!ns_capable(old->user_ns, CAP_SETUID)) { 617 if (ruid != (uid_t) -1 && !uid_eq(kruid, old->uid) && 618 !uid_eq(kruid, old->euid) && !uid_eq(kruid, old->suid)) 619 goto error; 620 if (euid != (uid_t) -1 && !uid_eq(keuid, old->uid) && 621 !uid_eq(keuid, old->euid) && !uid_eq(keuid, old->suid)) 622 goto error; 623 if (suid != (uid_t) -1 && !uid_eq(ksuid, old->uid) && 624 !uid_eq(ksuid, old->euid) && !uid_eq(ksuid, old->suid)) 625 goto error; 626 } 627 628 if (ruid != (uid_t) -1) { 629 new->uid = kruid; 630 if (!uid_eq(kruid, old->uid)) { 631 retval = set_user(new); 632 if (retval < 0) 633 goto error; 634 } 635 } 636 if (euid != (uid_t) -1) 637 new->euid = keuid; 638 if (suid != (uid_t) -1) 639 new->suid = ksuid; 640 new->fsuid = new->euid; 641 642 retval = security_task_fix_setuid(new, old, LSM_SETID_RES); 643 if (retval < 0) 644 goto error; 645 646 return commit_creds(new); 647 648 error: 649 abort_creds(new); 650 return retval; 651 } 652 653 SYSCALL_DEFINE3(getresuid, uid_t __user *, ruidp, uid_t __user *, euidp, uid_t __user *, suidp) 654 { 655 const struct cred *cred = current_cred(); 656 int retval; 657 uid_t ruid, euid, suid; 658 659 ruid = from_kuid_munged(cred->user_ns, cred->uid); 660 euid = from_kuid_munged(cred->user_ns, cred->euid); 661 suid = from_kuid_munged(cred->user_ns, cred->suid); 662 663 retval = put_user(ruid, ruidp); 664 if (!retval) { 665 retval = put_user(euid, euidp); 666 if (!retval) 667 return put_user(suid, suidp); 668 } 669 return retval; 670 } 671 672 /* 673 * Same as above, but for rgid, egid, sgid. 674 */ 675 SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid) 676 { 677 struct user_namespace *ns = current_user_ns(); 678 const struct cred *old; 679 struct cred *new; 680 int retval; 681 kgid_t krgid, kegid, ksgid; 682 683 krgid = make_kgid(ns, rgid); 684 kegid = make_kgid(ns, egid); 685 ksgid = make_kgid(ns, sgid); 686 687 if ((rgid != (gid_t) -1) && !gid_valid(krgid)) 688 return -EINVAL; 689 if ((egid != (gid_t) -1) && !gid_valid(kegid)) 690 return -EINVAL; 691 if ((sgid != (gid_t) -1) && !gid_valid(ksgid)) 692 return -EINVAL; 693 694 new = prepare_creds(); 695 if (!new) 696 return -ENOMEM; 697 old = current_cred(); 698 699 retval = -EPERM; 700 if (!ns_capable(old->user_ns, CAP_SETGID)) { 701 if (rgid != (gid_t) -1 && !gid_eq(krgid, old->gid) && 702 !gid_eq(krgid, old->egid) && !gid_eq(krgid, old->sgid)) 703 goto error; 704 if (egid != (gid_t) -1 && !gid_eq(kegid, old->gid) && 705 !gid_eq(kegid, old->egid) && !gid_eq(kegid, old->sgid)) 706 goto error; 707 if (sgid != (gid_t) -1 && !gid_eq(ksgid, old->gid) && 708 !gid_eq(ksgid, old->egid) && !gid_eq(ksgid, old->sgid)) 709 goto error; 710 } 711 712 if (rgid != (gid_t) -1) 713 new->gid = krgid; 714 if (egid != (gid_t) -1) 715 new->egid = kegid; 716 if (sgid != (gid_t) -1) 717 new->sgid = ksgid; 718 new->fsgid = new->egid; 719 720 return commit_creds(new); 721 722 error: 723 abort_creds(new); 724 return retval; 725 } 726 727 SYSCALL_DEFINE3(getresgid, gid_t __user *, rgidp, gid_t __user *, egidp, gid_t __user *, sgidp) 728 { 729 const struct cred *cred = current_cred(); 730 int retval; 731 gid_t rgid, egid, sgid; 732 733 rgid = from_kgid_munged(cred->user_ns, cred->gid); 734 egid = from_kgid_munged(cred->user_ns, cred->egid); 735 sgid = from_kgid_munged(cred->user_ns, cred->sgid); 736 737 retval = put_user(rgid, rgidp); 738 if (!retval) { 739 retval = put_user(egid, egidp); 740 if (!retval) 741 retval = put_user(sgid, sgidp); 742 } 743 744 return retval; 745 } 746 747 748 /* 749 * "setfsuid()" sets the fsuid - the uid used for filesystem checks. This 750 * is used for "access()" and for the NFS daemon (letting nfsd stay at 751 * whatever uid it wants to). It normally shadows "euid", except when 752 * explicitly set by setfsuid() or for access.. 753 */ 754 SYSCALL_DEFINE1(setfsuid, uid_t, uid) 755 { 756 const struct cred *old; 757 struct cred *new; 758 uid_t old_fsuid; 759 kuid_t kuid; 760 761 old = current_cred(); 762 old_fsuid = from_kuid_munged(old->user_ns, old->fsuid); 763 764 kuid = make_kuid(old->user_ns, uid); 765 if (!uid_valid(kuid)) 766 return old_fsuid; 767 768 new = prepare_creds(); 769 if (!new) 770 return old_fsuid; 771 772 if (uid_eq(kuid, old->uid) || uid_eq(kuid, old->euid) || 773 uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) || 774 ns_capable(old->user_ns, CAP_SETUID)) { 775 if (!uid_eq(kuid, old->fsuid)) { 776 new->fsuid = kuid; 777 if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0) 778 goto change_okay; 779 } 780 } 781 782 abort_creds(new); 783 return old_fsuid; 784 785 change_okay: 786 commit_creds(new); 787 return old_fsuid; 788 } 789 790 /* 791 * Samma pÃ¥ svenska.. 792 */ 793 SYSCALL_DEFINE1(setfsgid, gid_t, gid) 794 { 795 const struct cred *old; 796 struct cred *new; 797 gid_t old_fsgid; 798 kgid_t kgid; 799 800 old = current_cred(); 801 old_fsgid = from_kgid_munged(old->user_ns, old->fsgid); 802 803 kgid = make_kgid(old->user_ns, gid); 804 if (!gid_valid(kgid)) 805 return old_fsgid; 806 807 new = prepare_creds(); 808 if (!new) 809 return old_fsgid; 810 811 if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) || 812 gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) || 813 ns_capable(old->user_ns, CAP_SETGID)) { 814 if (!gid_eq(kgid, old->fsgid)) { 815 new->fsgid = kgid; 816 goto change_okay; 817 } 818 } 819 820 abort_creds(new); 821 return old_fsgid; 822 823 change_okay: 824 commit_creds(new); 825 return old_fsgid; 826 } 827 #endif /* CONFIG_MULTIUSER */ 828 829 /** 830 * sys_getpid - return the thread group id of the current process 831 * 832 * Note, despite the name, this returns the tgid not the pid. The tgid and 833 * the pid are identical unless CLONE_THREAD was specified on clone() in 834 * which case the tgid is the same in all threads of the same group. 835 * 836 * This is SMP safe as current->tgid does not change. 837 */ 838 SYSCALL_DEFINE0(getpid) 839 { 840 return task_tgid_vnr(current); 841 } 842 843 /* Thread ID - the internal kernel "pid" */ 844 SYSCALL_DEFINE0(gettid) 845 { 846 return task_pid_vnr(current); 847 } 848 849 /* 850 * Accessing ->real_parent is not SMP-safe, it could 851 * change from under us. However, we can use a stale 852 * value of ->real_parent under rcu_read_lock(), see 853 * release_task()->call_rcu(delayed_put_task_struct). 854 */ 855 SYSCALL_DEFINE0(getppid) 856 { 857 int pid; 858 859 rcu_read_lock(); 860 pid = task_tgid_vnr(rcu_dereference(current->real_parent)); 861 rcu_read_unlock(); 862 863 return pid; 864 } 865 866 SYSCALL_DEFINE0(getuid) 867 { 868 /* Only we change this so SMP safe */ 869 return from_kuid_munged(current_user_ns(), current_uid()); 870 } 871 872 SYSCALL_DEFINE0(geteuid) 873 { 874 /* Only we change this so SMP safe */ 875 return from_kuid_munged(current_user_ns(), current_euid()); 876 } 877 878 SYSCALL_DEFINE0(getgid) 879 { 880 /* Only we change this so SMP safe */ 881 return from_kgid_munged(current_user_ns(), current_gid()); 882 } 883 884 SYSCALL_DEFINE0(getegid) 885 { 886 /* Only we change this so SMP safe */ 887 return from_kgid_munged(current_user_ns(), current_egid()); 888 } 889 890 static void do_sys_times(struct tms *tms) 891 { 892 u64 tgutime, tgstime, cutime, cstime; 893 894 thread_group_cputime_adjusted(current, &tgutime, &tgstime); 895 cutime = current->signal->cutime; 896 cstime = current->signal->cstime; 897 tms->tms_utime = nsec_to_clock_t(tgutime); 898 tms->tms_stime = nsec_to_clock_t(tgstime); 899 tms->tms_cutime = nsec_to_clock_t(cutime); 900 tms->tms_cstime = nsec_to_clock_t(cstime); 901 } 902 903 SYSCALL_DEFINE1(times, struct tms __user *, tbuf) 904 { 905 if (tbuf) { 906 struct tms tmp; 907 908 do_sys_times(&tmp); 909 if (copy_to_user(tbuf, &tmp, sizeof(struct tms))) 910 return -EFAULT; 911 } 912 force_successful_syscall_return(); 913 return (long) jiffies_64_to_clock_t(get_jiffies_64()); 914 } 915 916 #ifdef CONFIG_COMPAT 917 static compat_clock_t clock_t_to_compat_clock_t(clock_t x) 918 { 919 return compat_jiffies_to_clock_t(clock_t_to_jiffies(x)); 920 } 921 922 COMPAT_SYSCALL_DEFINE1(times, struct compat_tms __user *, tbuf) 923 { 924 if (tbuf) { 925 struct tms tms; 926 struct compat_tms tmp; 927 928 do_sys_times(&tms); 929 /* Convert our struct tms to the compat version. */ 930 tmp.tms_utime = clock_t_to_compat_clock_t(tms.tms_utime); 931 tmp.tms_stime = clock_t_to_compat_clock_t(tms.tms_stime); 932 tmp.tms_cutime = clock_t_to_compat_clock_t(tms.tms_cutime); 933 tmp.tms_cstime = clock_t_to_compat_clock_t(tms.tms_cstime); 934 if (copy_to_user(tbuf, &tmp, sizeof(tmp))) 935 return -EFAULT; 936 } 937 force_successful_syscall_return(); 938 return compat_jiffies_to_clock_t(jiffies); 939 } 940 #endif 941 942 /* 943 * This needs some heavy checking ... 944 * I just haven't the stomach for it. I also don't fully 945 * understand sessions/pgrp etc. Let somebody who does explain it. 946 * 947 * OK, I think I have the protection semantics right.... this is really 948 * only important on a multi-user system anyway, to make sure one user 949 * can't send a signal to a process owned by another. -TYT, 12/12/91 950 * 951 * !PF_FORKNOEXEC check to conform completely to POSIX. 952 */ 953 SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid) 954 { 955 struct task_struct *p; 956 struct task_struct *group_leader = current->group_leader; 957 struct pid *pgrp; 958 int err; 959 960 if (!pid) 961 pid = task_pid_vnr(group_leader); 962 if (!pgid) 963 pgid = pid; 964 if (pgid < 0) 965 return -EINVAL; 966 rcu_read_lock(); 967 968 /* From this point forward we keep holding onto the tasklist lock 969 * so that our parent does not change from under us. -DaveM 970 */ 971 write_lock_irq(&tasklist_lock); 972 973 err = -ESRCH; 974 p = find_task_by_vpid(pid); 975 if (!p) 976 goto out; 977 978 err = -EINVAL; 979 if (!thread_group_leader(p)) 980 goto out; 981 982 if (same_thread_group(p->real_parent, group_leader)) { 983 err = -EPERM; 984 if (task_session(p) != task_session(group_leader)) 985 goto out; 986 err = -EACCES; 987 if (!(p->flags & PF_FORKNOEXEC)) 988 goto out; 989 } else { 990 err = -ESRCH; 991 if (p != group_leader) 992 goto out; 993 } 994 995 err = -EPERM; 996 if (p->signal->leader) 997 goto out; 998 999 pgrp = task_pid(p); 1000 if (pgid != pid) { 1001 struct task_struct *g; 1002 1003 pgrp = find_vpid(pgid); 1004 g = pid_task(pgrp, PIDTYPE_PGID); 1005 if (!g || task_session(g) != task_session(group_leader)) 1006 goto out; 1007 } 1008 1009 err = security_task_setpgid(p, pgid); 1010 if (err) 1011 goto out; 1012 1013 if (task_pgrp(p) != pgrp) 1014 change_pid(p, PIDTYPE_PGID, pgrp); 1015 1016 err = 0; 1017 out: 1018 /* All paths lead to here, thus we are safe. -DaveM */ 1019 write_unlock_irq(&tasklist_lock); 1020 rcu_read_unlock(); 1021 return err; 1022 } 1023 1024 SYSCALL_DEFINE1(getpgid, pid_t, pid) 1025 { 1026 struct task_struct *p; 1027 struct pid *grp; 1028 int retval; 1029 1030 rcu_read_lock(); 1031 if (!pid) 1032 grp = task_pgrp(current); 1033 else { 1034 retval = -ESRCH; 1035 p = find_task_by_vpid(pid); 1036 if (!p) 1037 goto out; 1038 grp = task_pgrp(p); 1039 if (!grp) 1040 goto out; 1041 1042 retval = security_task_getpgid(p); 1043 if (retval) 1044 goto out; 1045 } 1046 retval = pid_vnr(grp); 1047 out: 1048 rcu_read_unlock(); 1049 return retval; 1050 } 1051 1052 #ifdef __ARCH_WANT_SYS_GETPGRP 1053 1054 SYSCALL_DEFINE0(getpgrp) 1055 { 1056 return sys_getpgid(0); 1057 } 1058 1059 #endif 1060 1061 SYSCALL_DEFINE1(getsid, pid_t, pid) 1062 { 1063 struct task_struct *p; 1064 struct pid *sid; 1065 int retval; 1066 1067 rcu_read_lock(); 1068 if (!pid) 1069 sid = task_session(current); 1070 else { 1071 retval = -ESRCH; 1072 p = find_task_by_vpid(pid); 1073 if (!p) 1074 goto out; 1075 sid = task_session(p); 1076 if (!sid) 1077 goto out; 1078 1079 retval = security_task_getsid(p); 1080 if (retval) 1081 goto out; 1082 } 1083 retval = pid_vnr(sid); 1084 out: 1085 rcu_read_unlock(); 1086 return retval; 1087 } 1088 1089 static void set_special_pids(struct pid *pid) 1090 { 1091 struct task_struct *curr = current->group_leader; 1092 1093 if (task_session(curr) != pid) 1094 change_pid(curr, PIDTYPE_SID, pid); 1095 1096 if (task_pgrp(curr) != pid) 1097 change_pid(curr, PIDTYPE_PGID, pid); 1098 } 1099 1100 SYSCALL_DEFINE0(setsid) 1101 { 1102 struct task_struct *group_leader = current->group_leader; 1103 struct pid *sid = task_pid(group_leader); 1104 pid_t session = pid_vnr(sid); 1105 int err = -EPERM; 1106 1107 write_lock_irq(&tasklist_lock); 1108 /* Fail if I am already a session leader */ 1109 if (group_leader->signal->leader) 1110 goto out; 1111 1112 /* Fail if a process group id already exists that equals the 1113 * proposed session id. 1114 */ 1115 if (pid_task(sid, PIDTYPE_PGID)) 1116 goto out; 1117 1118 group_leader->signal->leader = 1; 1119 set_special_pids(sid); 1120 1121 proc_clear_tty(group_leader); 1122 1123 err = session; 1124 out: 1125 write_unlock_irq(&tasklist_lock); 1126 if (err > 0) { 1127 proc_sid_connector(group_leader); 1128 sched_autogroup_create_attach(group_leader); 1129 } 1130 return err; 1131 } 1132 1133 DECLARE_RWSEM(uts_sem); 1134 1135 #ifdef COMPAT_UTS_MACHINE 1136 #define override_architecture(name) \ 1137 (personality(current->personality) == PER_LINUX32 && \ 1138 copy_to_user(name->machine, COMPAT_UTS_MACHINE, \ 1139 sizeof(COMPAT_UTS_MACHINE))) 1140 #else 1141 #define override_architecture(name) 0 1142 #endif 1143 1144 /* 1145 * Work around broken programs that cannot handle "Linux 3.0". 1146 * Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40 1147 * And we map 4.x to 2.6.60+x, so 4.0 would be 2.6.60. 1148 */ 1149 static int override_release(char __user *release, size_t len) 1150 { 1151 int ret = 0; 1152 1153 if (current->personality & UNAME26) { 1154 const char *rest = UTS_RELEASE; 1155 char buf[65] = { 0 }; 1156 int ndots = 0; 1157 unsigned v; 1158 size_t copy; 1159 1160 while (*rest) { 1161 if (*rest == '.' && ++ndots >= 3) 1162 break; 1163 if (!isdigit(*rest) && *rest != '.') 1164 break; 1165 rest++; 1166 } 1167 v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 60; 1168 copy = clamp_t(size_t, len, 1, sizeof(buf)); 1169 copy = scnprintf(buf, copy, "2.6.%u%s", v, rest); 1170 ret = copy_to_user(release, buf, copy + 1); 1171 } 1172 return ret; 1173 } 1174 1175 SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name) 1176 { 1177 int errno = 0; 1178 1179 down_read(&uts_sem); 1180 if (copy_to_user(name, utsname(), sizeof *name)) 1181 errno = -EFAULT; 1182 up_read(&uts_sem); 1183 1184 if (!errno && override_release(name->release, sizeof(name->release))) 1185 errno = -EFAULT; 1186 if (!errno && override_architecture(name)) 1187 errno = -EFAULT; 1188 return errno; 1189 } 1190 1191 #ifdef __ARCH_WANT_SYS_OLD_UNAME 1192 /* 1193 * Old cruft 1194 */ 1195 SYSCALL_DEFINE1(uname, struct old_utsname __user *, name) 1196 { 1197 int error = 0; 1198 1199 if (!name) 1200 return -EFAULT; 1201 1202 down_read(&uts_sem); 1203 if (copy_to_user(name, utsname(), sizeof(*name))) 1204 error = -EFAULT; 1205 up_read(&uts_sem); 1206 1207 if (!error && override_release(name->release, sizeof(name->release))) 1208 error = -EFAULT; 1209 if (!error && override_architecture(name)) 1210 error = -EFAULT; 1211 return error; 1212 } 1213 1214 SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name) 1215 { 1216 int error; 1217 1218 if (!name) 1219 return -EFAULT; 1220 if (!access_ok(VERIFY_WRITE, name, sizeof(struct oldold_utsname))) 1221 return -EFAULT; 1222 1223 down_read(&uts_sem); 1224 error = __copy_to_user(&name->sysname, &utsname()->sysname, 1225 __OLD_UTS_LEN); 1226 error |= __put_user(0, name->sysname + __OLD_UTS_LEN); 1227 error |= __copy_to_user(&name->nodename, &utsname()->nodename, 1228 __OLD_UTS_LEN); 1229 error |= __put_user(0, name->nodename + __OLD_UTS_LEN); 1230 error |= __copy_to_user(&name->release, &utsname()->release, 1231 __OLD_UTS_LEN); 1232 error |= __put_user(0, name->release + __OLD_UTS_LEN); 1233 error |= __copy_to_user(&name->version, &utsname()->version, 1234 __OLD_UTS_LEN); 1235 error |= __put_user(0, name->version + __OLD_UTS_LEN); 1236 error |= __copy_to_user(&name->machine, &utsname()->machine, 1237 __OLD_UTS_LEN); 1238 error |= __put_user(0, name->machine + __OLD_UTS_LEN); 1239 up_read(&uts_sem); 1240 1241 if (!error && override_architecture(name)) 1242 error = -EFAULT; 1243 if (!error && override_release(name->release, sizeof(name->release))) 1244 error = -EFAULT; 1245 return error ? -EFAULT : 0; 1246 } 1247 #endif 1248 1249 SYSCALL_DEFINE2(sethostname, char __user *, name, int, len) 1250 { 1251 int errno; 1252 char tmp[__NEW_UTS_LEN]; 1253 1254 if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN)) 1255 return -EPERM; 1256 1257 if (len < 0 || len > __NEW_UTS_LEN) 1258 return -EINVAL; 1259 down_write(&uts_sem); 1260 errno = -EFAULT; 1261 if (!copy_from_user(tmp, name, len)) { 1262 struct new_utsname *u = utsname(); 1263 1264 memcpy(u->nodename, tmp, len); 1265 memset(u->nodename + len, 0, sizeof(u->nodename) - len); 1266 errno = 0; 1267 uts_proc_notify(UTS_PROC_HOSTNAME); 1268 } 1269 up_write(&uts_sem); 1270 return errno; 1271 } 1272 1273 #ifdef __ARCH_WANT_SYS_GETHOSTNAME 1274 1275 SYSCALL_DEFINE2(gethostname, char __user *, name, int, len) 1276 { 1277 int i, errno; 1278 struct new_utsname *u; 1279 1280 if (len < 0) 1281 return -EINVAL; 1282 down_read(&uts_sem); 1283 u = utsname(); 1284 i = 1 + strlen(u->nodename); 1285 if (i > len) 1286 i = len; 1287 errno = 0; 1288 if (copy_to_user(name, u->nodename, i)) 1289 errno = -EFAULT; 1290 up_read(&uts_sem); 1291 return errno; 1292 } 1293 1294 #endif 1295 1296 /* 1297 * Only setdomainname; getdomainname can be implemented by calling 1298 * uname() 1299 */ 1300 SYSCALL_DEFINE2(setdomainname, char __user *, name, int, len) 1301 { 1302 int errno; 1303 char tmp[__NEW_UTS_LEN]; 1304 1305 if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN)) 1306 return -EPERM; 1307 if (len < 0 || len > __NEW_UTS_LEN) 1308 return -EINVAL; 1309 1310 down_write(&uts_sem); 1311 errno = -EFAULT; 1312 if (!copy_from_user(tmp, name, len)) { 1313 struct new_utsname *u = utsname(); 1314 1315 memcpy(u->domainname, tmp, len); 1316 memset(u->domainname + len, 0, sizeof(u->domainname) - len); 1317 errno = 0; 1318 uts_proc_notify(UTS_PROC_DOMAINNAME); 1319 } 1320 up_write(&uts_sem); 1321 return errno; 1322 } 1323 1324 SYSCALL_DEFINE2(getrlimit, unsigned int, resource, struct rlimit __user *, rlim) 1325 { 1326 struct rlimit value; 1327 int ret; 1328 1329 ret = do_prlimit(current, resource, NULL, &value); 1330 if (!ret) 1331 ret = copy_to_user(rlim, &value, sizeof(*rlim)) ? -EFAULT : 0; 1332 1333 return ret; 1334 } 1335 1336 #ifdef CONFIG_COMPAT 1337 1338 COMPAT_SYSCALL_DEFINE2(setrlimit, unsigned int, resource, 1339 struct compat_rlimit __user *, rlim) 1340 { 1341 struct rlimit r; 1342 struct compat_rlimit r32; 1343 1344 if (copy_from_user(&r32, rlim, sizeof(struct compat_rlimit))) 1345 return -EFAULT; 1346 1347 if (r32.rlim_cur == COMPAT_RLIM_INFINITY) 1348 r.rlim_cur = RLIM_INFINITY; 1349 else 1350 r.rlim_cur = r32.rlim_cur; 1351 if (r32.rlim_max == COMPAT_RLIM_INFINITY) 1352 r.rlim_max = RLIM_INFINITY; 1353 else 1354 r.rlim_max = r32.rlim_max; 1355 return do_prlimit(current, resource, &r, NULL); 1356 } 1357 1358 COMPAT_SYSCALL_DEFINE2(getrlimit, unsigned int, resource, 1359 struct compat_rlimit __user *, rlim) 1360 { 1361 struct rlimit r; 1362 int ret; 1363 1364 ret = do_prlimit(current, resource, NULL, &r); 1365 if (!ret) { 1366 struct compat_rlimit r32; 1367 if (r.rlim_cur > COMPAT_RLIM_INFINITY) 1368 r32.rlim_cur = COMPAT_RLIM_INFINITY; 1369 else 1370 r32.rlim_cur = r.rlim_cur; 1371 if (r.rlim_max > COMPAT_RLIM_INFINITY) 1372 r32.rlim_max = COMPAT_RLIM_INFINITY; 1373 else 1374 r32.rlim_max = r.rlim_max; 1375 1376 if (copy_to_user(rlim, &r32, sizeof(struct compat_rlimit))) 1377 return -EFAULT; 1378 } 1379 return ret; 1380 } 1381 1382 #endif 1383 1384 #ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT 1385 1386 /* 1387 * Back compatibility for getrlimit. Needed for some apps. 1388 */ 1389 SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource, 1390 struct rlimit __user *, rlim) 1391 { 1392 struct rlimit x; 1393 if (resource >= RLIM_NLIMITS) 1394 return -EINVAL; 1395 1396 task_lock(current->group_leader); 1397 x = current->signal->rlim[resource]; 1398 task_unlock(current->group_leader); 1399 if (x.rlim_cur > 0x7FFFFFFF) 1400 x.rlim_cur = 0x7FFFFFFF; 1401 if (x.rlim_max > 0x7FFFFFFF) 1402 x.rlim_max = 0x7FFFFFFF; 1403 return copy_to_user(rlim, &x, sizeof(x)) ? -EFAULT : 0; 1404 } 1405 1406 #ifdef CONFIG_COMPAT 1407 COMPAT_SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource, 1408 struct compat_rlimit __user *, rlim) 1409 { 1410 struct rlimit r; 1411 1412 if (resource >= RLIM_NLIMITS) 1413 return -EINVAL; 1414 1415 task_lock(current->group_leader); 1416 r = current->signal->rlim[resource]; 1417 task_unlock(current->group_leader); 1418 if (r.rlim_cur > 0x7FFFFFFF) 1419 r.rlim_cur = 0x7FFFFFFF; 1420 if (r.rlim_max > 0x7FFFFFFF) 1421 r.rlim_max = 0x7FFFFFFF; 1422 1423 if (put_user(r.rlim_cur, &rlim->rlim_cur) || 1424 put_user(r.rlim_max, &rlim->rlim_max)) 1425 return -EFAULT; 1426 return 0; 1427 } 1428 #endif 1429 1430 #endif 1431 1432 static inline bool rlim64_is_infinity(__u64 rlim64) 1433 { 1434 #if BITS_PER_LONG < 64 1435 return rlim64 >= ULONG_MAX; 1436 #else 1437 return rlim64 == RLIM64_INFINITY; 1438 #endif 1439 } 1440 1441 static void rlim_to_rlim64(const struct rlimit *rlim, struct rlimit64 *rlim64) 1442 { 1443 if (rlim->rlim_cur == RLIM_INFINITY) 1444 rlim64->rlim_cur = RLIM64_INFINITY; 1445 else 1446 rlim64->rlim_cur = rlim->rlim_cur; 1447 if (rlim->rlim_max == RLIM_INFINITY) 1448 rlim64->rlim_max = RLIM64_INFINITY; 1449 else 1450 rlim64->rlim_max = rlim->rlim_max; 1451 } 1452 1453 static void rlim64_to_rlim(const struct rlimit64 *rlim64, struct rlimit *rlim) 1454 { 1455 if (rlim64_is_infinity(rlim64->rlim_cur)) 1456 rlim->rlim_cur = RLIM_INFINITY; 1457 else 1458 rlim->rlim_cur = (unsigned long)rlim64->rlim_cur; 1459 if (rlim64_is_infinity(rlim64->rlim_max)) 1460 rlim->rlim_max = RLIM_INFINITY; 1461 else 1462 rlim->rlim_max = (unsigned long)rlim64->rlim_max; 1463 } 1464 1465 /* make sure you are allowed to change @tsk limits before calling this */ 1466 int do_prlimit(struct task_struct *tsk, unsigned int resource, 1467 struct rlimit *new_rlim, struct rlimit *old_rlim) 1468 { 1469 struct rlimit *rlim; 1470 int retval = 0; 1471 1472 if (resource >= RLIM_NLIMITS) 1473 return -EINVAL; 1474 if (new_rlim) { 1475 if (new_rlim->rlim_cur > new_rlim->rlim_max) 1476 return -EINVAL; 1477 if (resource == RLIMIT_NOFILE && 1478 new_rlim->rlim_max > sysctl_nr_open) 1479 return -EPERM; 1480 } 1481 1482 /* protect tsk->signal and tsk->sighand from disappearing */ 1483 read_lock(&tasklist_lock); 1484 if (!tsk->sighand) { 1485 retval = -ESRCH; 1486 goto out; 1487 } 1488 1489 rlim = tsk->signal->rlim + resource; 1490 task_lock(tsk->group_leader); 1491 if (new_rlim) { 1492 /* Keep the capable check against init_user_ns until 1493 cgroups can contain all limits */ 1494 if (new_rlim->rlim_max > rlim->rlim_max && 1495 !capable(CAP_SYS_RESOURCE)) 1496 retval = -EPERM; 1497 if (!retval) 1498 retval = security_task_setrlimit(tsk, resource, new_rlim); 1499 if (resource == RLIMIT_CPU && new_rlim->rlim_cur == 0) { 1500 /* 1501 * The caller is asking for an immediate RLIMIT_CPU 1502 * expiry. But we use the zero value to mean "it was 1503 * never set". So let's cheat and make it one second 1504 * instead 1505 */ 1506 new_rlim->rlim_cur = 1; 1507 } 1508 } 1509 if (!retval) { 1510 if (old_rlim) 1511 *old_rlim = *rlim; 1512 if (new_rlim) 1513 *rlim = *new_rlim; 1514 } 1515 task_unlock(tsk->group_leader); 1516 1517 /* 1518 * RLIMIT_CPU handling. Note that the kernel fails to return an error 1519 * code if it rejected the user's attempt to set RLIMIT_CPU. This is a 1520 * very long-standing error, and fixing it now risks breakage of 1521 * applications, so we live with it 1522 */ 1523 if (!retval && new_rlim && resource == RLIMIT_CPU && 1524 new_rlim->rlim_cur != RLIM_INFINITY && 1525 IS_ENABLED(CONFIG_POSIX_TIMERS)) 1526 update_rlimit_cpu(tsk, new_rlim->rlim_cur); 1527 out: 1528 read_unlock(&tasklist_lock); 1529 return retval; 1530 } 1531 1532 /* rcu lock must be held */ 1533 static int check_prlimit_permission(struct task_struct *task, 1534 unsigned int flags) 1535 { 1536 const struct cred *cred = current_cred(), *tcred; 1537 bool id_match; 1538 1539 if (current == task) 1540 return 0; 1541 1542 tcred = __task_cred(task); 1543 id_match = (uid_eq(cred->uid, tcred->euid) && 1544 uid_eq(cred->uid, tcred->suid) && 1545 uid_eq(cred->uid, tcred->uid) && 1546 gid_eq(cred->gid, tcred->egid) && 1547 gid_eq(cred->gid, tcred->sgid) && 1548 gid_eq(cred->gid, tcred->gid)); 1549 if (!id_match && !ns_capable(tcred->user_ns, CAP_SYS_RESOURCE)) 1550 return -EPERM; 1551 1552 return security_task_prlimit(cred, tcred, flags); 1553 } 1554 1555 SYSCALL_DEFINE4(prlimit64, pid_t, pid, unsigned int, resource, 1556 const struct rlimit64 __user *, new_rlim, 1557 struct rlimit64 __user *, old_rlim) 1558 { 1559 struct rlimit64 old64, new64; 1560 struct rlimit old, new; 1561 struct task_struct *tsk; 1562 unsigned int checkflags = 0; 1563 int ret; 1564 1565 if (old_rlim) 1566 checkflags |= LSM_PRLIMIT_READ; 1567 1568 if (new_rlim) { 1569 if (copy_from_user(&new64, new_rlim, sizeof(new64))) 1570 return -EFAULT; 1571 rlim64_to_rlim(&new64, &new); 1572 checkflags |= LSM_PRLIMIT_WRITE; 1573 } 1574 1575 rcu_read_lock(); 1576 tsk = pid ? find_task_by_vpid(pid) : current; 1577 if (!tsk) { 1578 rcu_read_unlock(); 1579 return -ESRCH; 1580 } 1581 ret = check_prlimit_permission(tsk, checkflags); 1582 if (ret) { 1583 rcu_read_unlock(); 1584 return ret; 1585 } 1586 get_task_struct(tsk); 1587 rcu_read_unlock(); 1588 1589 ret = do_prlimit(tsk, resource, new_rlim ? &new : NULL, 1590 old_rlim ? &old : NULL); 1591 1592 if (!ret && old_rlim) { 1593 rlim_to_rlim64(&old, &old64); 1594 if (copy_to_user(old_rlim, &old64, sizeof(old64))) 1595 ret = -EFAULT; 1596 } 1597 1598 put_task_struct(tsk); 1599 return ret; 1600 } 1601 1602 SYSCALL_DEFINE2(setrlimit, unsigned int, resource, struct rlimit __user *, rlim) 1603 { 1604 struct rlimit new_rlim; 1605 1606 if (copy_from_user(&new_rlim, rlim, sizeof(*rlim))) 1607 return -EFAULT; 1608 return do_prlimit(current, resource, &new_rlim, NULL); 1609 } 1610 1611 /* 1612 * It would make sense to put struct rusage in the task_struct, 1613 * except that would make the task_struct be *really big*. After 1614 * task_struct gets moved into malloc'ed memory, it would 1615 * make sense to do this. It will make moving the rest of the information 1616 * a lot simpler! (Which we're not doing right now because we're not 1617 * measuring them yet). 1618 * 1619 * When sampling multiple threads for RUSAGE_SELF, under SMP we might have 1620 * races with threads incrementing their own counters. But since word 1621 * reads are atomic, we either get new values or old values and we don't 1622 * care which for the sums. We always take the siglock to protect reading 1623 * the c* fields from p->signal from races with exit.c updating those 1624 * fields when reaping, so a sample either gets all the additions of a 1625 * given child after it's reaped, or none so this sample is before reaping. 1626 * 1627 * Locking: 1628 * We need to take the siglock for CHILDEREN, SELF and BOTH 1629 * for the cases current multithreaded, non-current single threaded 1630 * non-current multithreaded. Thread traversal is now safe with 1631 * the siglock held. 1632 * Strictly speaking, we donot need to take the siglock if we are current and 1633 * single threaded, as no one else can take our signal_struct away, no one 1634 * else can reap the children to update signal->c* counters, and no one else 1635 * can race with the signal-> fields. If we do not take any lock, the 1636 * signal-> fields could be read out of order while another thread was just 1637 * exiting. So we should place a read memory barrier when we avoid the lock. 1638 * On the writer side, write memory barrier is implied in __exit_signal 1639 * as __exit_signal releases the siglock spinlock after updating the signal-> 1640 * fields. But we don't do this yet to keep things simple. 1641 * 1642 */ 1643 1644 static void accumulate_thread_rusage(struct task_struct *t, struct rusage *r) 1645 { 1646 r->ru_nvcsw += t->nvcsw; 1647 r->ru_nivcsw += t->nivcsw; 1648 r->ru_minflt += t->min_flt; 1649 r->ru_majflt += t->maj_flt; 1650 r->ru_inblock += task_io_get_inblock(t); 1651 r->ru_oublock += task_io_get_oublock(t); 1652 } 1653 1654 void getrusage(struct task_struct *p, int who, struct rusage *r) 1655 { 1656 struct task_struct *t; 1657 unsigned long flags; 1658 u64 tgutime, tgstime, utime, stime; 1659 unsigned long maxrss = 0; 1660 1661 memset((char *)r, 0, sizeof (*r)); 1662 utime = stime = 0; 1663 1664 if (who == RUSAGE_THREAD) { 1665 task_cputime_adjusted(current, &utime, &stime); 1666 accumulate_thread_rusage(p, r); 1667 maxrss = p->signal->maxrss; 1668 goto out; 1669 } 1670 1671 if (!lock_task_sighand(p, &flags)) 1672 return; 1673 1674 switch (who) { 1675 case RUSAGE_BOTH: 1676 case RUSAGE_CHILDREN: 1677 utime = p->signal->cutime; 1678 stime = p->signal->cstime; 1679 r->ru_nvcsw = p->signal->cnvcsw; 1680 r->ru_nivcsw = p->signal->cnivcsw; 1681 r->ru_minflt = p->signal->cmin_flt; 1682 r->ru_majflt = p->signal->cmaj_flt; 1683 r->ru_inblock = p->signal->cinblock; 1684 r->ru_oublock = p->signal->coublock; 1685 maxrss = p->signal->cmaxrss; 1686 1687 if (who == RUSAGE_CHILDREN) 1688 break; 1689 1690 case RUSAGE_SELF: 1691 thread_group_cputime_adjusted(p, &tgutime, &tgstime); 1692 utime += tgutime; 1693 stime += tgstime; 1694 r->ru_nvcsw += p->signal->nvcsw; 1695 r->ru_nivcsw += p->signal->nivcsw; 1696 r->ru_minflt += p->signal->min_flt; 1697 r->ru_majflt += p->signal->maj_flt; 1698 r->ru_inblock += p->signal->inblock; 1699 r->ru_oublock += p->signal->oublock; 1700 if (maxrss < p->signal->maxrss) 1701 maxrss = p->signal->maxrss; 1702 t = p; 1703 do { 1704 accumulate_thread_rusage(t, r); 1705 } while_each_thread(p, t); 1706 break; 1707 1708 default: 1709 BUG(); 1710 } 1711 unlock_task_sighand(p, &flags); 1712 1713 out: 1714 r->ru_utime = ns_to_timeval(utime); 1715 r->ru_stime = ns_to_timeval(stime); 1716 1717 if (who != RUSAGE_CHILDREN) { 1718 struct mm_struct *mm = get_task_mm(p); 1719 1720 if (mm) { 1721 setmax_mm_hiwater_rss(&maxrss, mm); 1722 mmput(mm); 1723 } 1724 } 1725 r->ru_maxrss = maxrss * (PAGE_SIZE / 1024); /* convert pages to KBs */ 1726 } 1727 1728 SYSCALL_DEFINE2(getrusage, int, who, struct rusage __user *, ru) 1729 { 1730 struct rusage r; 1731 1732 if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN && 1733 who != RUSAGE_THREAD) 1734 return -EINVAL; 1735 1736 getrusage(current, who, &r); 1737 return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0; 1738 } 1739 1740 #ifdef CONFIG_COMPAT 1741 COMPAT_SYSCALL_DEFINE2(getrusage, int, who, struct compat_rusage __user *, ru) 1742 { 1743 struct rusage r; 1744 1745 if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN && 1746 who != RUSAGE_THREAD) 1747 return -EINVAL; 1748 1749 getrusage(current, who, &r); 1750 return put_compat_rusage(&r, ru); 1751 } 1752 #endif 1753 1754 SYSCALL_DEFINE1(umask, int, mask) 1755 { 1756 mask = xchg(¤t->fs->umask, mask & S_IRWXUGO); 1757 return mask; 1758 } 1759 1760 static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd) 1761 { 1762 struct fd exe; 1763 struct file *old_exe, *exe_file; 1764 struct inode *inode; 1765 int err; 1766 1767 exe = fdget(fd); 1768 if (!exe.file) 1769 return -EBADF; 1770 1771 inode = file_inode(exe.file); 1772 1773 /* 1774 * Because the original mm->exe_file points to executable file, make 1775 * sure that this one is executable as well, to avoid breaking an 1776 * overall picture. 1777 */ 1778 err = -EACCES; 1779 if (!S_ISREG(inode->i_mode) || path_noexec(&exe.file->f_path)) 1780 goto exit; 1781 1782 err = inode_permission(inode, MAY_EXEC); 1783 if (err) 1784 goto exit; 1785 1786 /* 1787 * Forbid mm->exe_file change if old file still mapped. 1788 */ 1789 exe_file = get_mm_exe_file(mm); 1790 err = -EBUSY; 1791 if (exe_file) { 1792 struct vm_area_struct *vma; 1793 1794 down_read(&mm->mmap_sem); 1795 for (vma = mm->mmap; vma; vma = vma->vm_next) { 1796 if (!vma->vm_file) 1797 continue; 1798 if (path_equal(&vma->vm_file->f_path, 1799 &exe_file->f_path)) 1800 goto exit_err; 1801 } 1802 1803 up_read(&mm->mmap_sem); 1804 fput(exe_file); 1805 } 1806 1807 err = 0; 1808 /* set the new file, lockless */ 1809 get_file(exe.file); 1810 old_exe = xchg(&mm->exe_file, exe.file); 1811 if (old_exe) 1812 fput(old_exe); 1813 exit: 1814 fdput(exe); 1815 return err; 1816 exit_err: 1817 up_read(&mm->mmap_sem); 1818 fput(exe_file); 1819 goto exit; 1820 } 1821 1822 /* 1823 * WARNING: we don't require any capability here so be very careful 1824 * in what is allowed for modification from userspace. 1825 */ 1826 static int validate_prctl_map(struct prctl_mm_map *prctl_map) 1827 { 1828 unsigned long mmap_max_addr = TASK_SIZE; 1829 struct mm_struct *mm = current->mm; 1830 int error = -EINVAL, i; 1831 1832 static const unsigned char offsets[] = { 1833 offsetof(struct prctl_mm_map, start_code), 1834 offsetof(struct prctl_mm_map, end_code), 1835 offsetof(struct prctl_mm_map, start_data), 1836 offsetof(struct prctl_mm_map, end_data), 1837 offsetof(struct prctl_mm_map, start_brk), 1838 offsetof(struct prctl_mm_map, brk), 1839 offsetof(struct prctl_mm_map, start_stack), 1840 offsetof(struct prctl_mm_map, arg_start), 1841 offsetof(struct prctl_mm_map, arg_end), 1842 offsetof(struct prctl_mm_map, env_start), 1843 offsetof(struct prctl_mm_map, env_end), 1844 }; 1845 1846 /* 1847 * Make sure the members are not somewhere outside 1848 * of allowed address space. 1849 */ 1850 for (i = 0; i < ARRAY_SIZE(offsets); i++) { 1851 u64 val = *(u64 *)((char *)prctl_map + offsets[i]); 1852 1853 if ((unsigned long)val >= mmap_max_addr || 1854 (unsigned long)val < mmap_min_addr) 1855 goto out; 1856 } 1857 1858 /* 1859 * Make sure the pairs are ordered. 1860 */ 1861 #define __prctl_check_order(__m1, __op, __m2) \ 1862 ((unsigned long)prctl_map->__m1 __op \ 1863 (unsigned long)prctl_map->__m2) ? 0 : -EINVAL 1864 error = __prctl_check_order(start_code, <, end_code); 1865 error |= __prctl_check_order(start_data, <, end_data); 1866 error |= __prctl_check_order(start_brk, <=, brk); 1867 error |= __prctl_check_order(arg_start, <=, arg_end); 1868 error |= __prctl_check_order(env_start, <=, env_end); 1869 if (error) 1870 goto out; 1871 #undef __prctl_check_order 1872 1873 error = -EINVAL; 1874 1875 /* 1876 * @brk should be after @end_data in traditional maps. 1877 */ 1878 if (prctl_map->start_brk <= prctl_map->end_data || 1879 prctl_map->brk <= prctl_map->end_data) 1880 goto out; 1881 1882 /* 1883 * Neither we should allow to override limits if they set. 1884 */ 1885 if (check_data_rlimit(rlimit(RLIMIT_DATA), prctl_map->brk, 1886 prctl_map->start_brk, prctl_map->end_data, 1887 prctl_map->start_data)) 1888 goto out; 1889 1890 /* 1891 * Someone is trying to cheat the auxv vector. 1892 */ 1893 if (prctl_map->auxv_size) { 1894 if (!prctl_map->auxv || prctl_map->auxv_size > sizeof(mm->saved_auxv)) 1895 goto out; 1896 } 1897 1898 /* 1899 * Finally, make sure the caller has the rights to 1900 * change /proc/pid/exe link: only local sys admin should 1901 * be allowed to. 1902 */ 1903 if (prctl_map->exe_fd != (u32)-1) { 1904 if (!ns_capable(current_user_ns(), CAP_SYS_ADMIN)) 1905 goto out; 1906 } 1907 1908 error = 0; 1909 out: 1910 return error; 1911 } 1912 1913 #ifdef CONFIG_CHECKPOINT_RESTORE 1914 static int prctl_set_mm_map(int opt, const void __user *addr, unsigned long data_size) 1915 { 1916 struct prctl_mm_map prctl_map = { .exe_fd = (u32)-1, }; 1917 unsigned long user_auxv[AT_VECTOR_SIZE]; 1918 struct mm_struct *mm = current->mm; 1919 int error; 1920 1921 BUILD_BUG_ON(sizeof(user_auxv) != sizeof(mm->saved_auxv)); 1922 BUILD_BUG_ON(sizeof(struct prctl_mm_map) > 256); 1923 1924 if (opt == PR_SET_MM_MAP_SIZE) 1925 return put_user((unsigned int)sizeof(prctl_map), 1926 (unsigned int __user *)addr); 1927 1928 if (data_size != sizeof(prctl_map)) 1929 return -EINVAL; 1930 1931 if (copy_from_user(&prctl_map, addr, sizeof(prctl_map))) 1932 return -EFAULT; 1933 1934 error = validate_prctl_map(&prctl_map); 1935 if (error) 1936 return error; 1937 1938 if (prctl_map.auxv_size) { 1939 memset(user_auxv, 0, sizeof(user_auxv)); 1940 if (copy_from_user(user_auxv, 1941 (const void __user *)prctl_map.auxv, 1942 prctl_map.auxv_size)) 1943 return -EFAULT; 1944 1945 /* Last entry must be AT_NULL as specification requires */ 1946 user_auxv[AT_VECTOR_SIZE - 2] = AT_NULL; 1947 user_auxv[AT_VECTOR_SIZE - 1] = AT_NULL; 1948 } 1949 1950 if (prctl_map.exe_fd != (u32)-1) { 1951 error = prctl_set_mm_exe_file(mm, prctl_map.exe_fd); 1952 if (error) 1953 return error; 1954 } 1955 1956 down_write(&mm->mmap_sem); 1957 1958 /* 1959 * We don't validate if these members are pointing to 1960 * real present VMAs because application may have correspond 1961 * VMAs already unmapped and kernel uses these members for statistics 1962 * output in procfs mostly, except 1963 * 1964 * - @start_brk/@brk which are used in do_brk but kernel lookups 1965 * for VMAs when updating these memvers so anything wrong written 1966 * here cause kernel to swear at userspace program but won't lead 1967 * to any problem in kernel itself 1968 */ 1969 1970 mm->start_code = prctl_map.start_code; 1971 mm->end_code = prctl_map.end_code; 1972 mm->start_data = prctl_map.start_data; 1973 mm->end_data = prctl_map.end_data; 1974 mm->start_brk = prctl_map.start_brk; 1975 mm->brk = prctl_map.brk; 1976 mm->start_stack = prctl_map.start_stack; 1977 mm->arg_start = prctl_map.arg_start; 1978 mm->arg_end = prctl_map.arg_end; 1979 mm->env_start = prctl_map.env_start; 1980 mm->env_end = prctl_map.env_end; 1981 1982 /* 1983 * Note this update of @saved_auxv is lockless thus 1984 * if someone reads this member in procfs while we're 1985 * updating -- it may get partly updated results. It's 1986 * known and acceptable trade off: we leave it as is to 1987 * not introduce additional locks here making the kernel 1988 * more complex. 1989 */ 1990 if (prctl_map.auxv_size) 1991 memcpy(mm->saved_auxv, user_auxv, sizeof(user_auxv)); 1992 1993 up_write(&mm->mmap_sem); 1994 return 0; 1995 } 1996 #endif /* CONFIG_CHECKPOINT_RESTORE */ 1997 1998 static int prctl_set_auxv(struct mm_struct *mm, unsigned long addr, 1999 unsigned long len) 2000 { 2001 /* 2002 * This doesn't move the auxiliary vector itself since it's pinned to 2003 * mm_struct, but it permits filling the vector with new values. It's 2004 * up to the caller to provide sane values here, otherwise userspace 2005 * tools which use this vector might be unhappy. 2006 */ 2007 unsigned long user_auxv[AT_VECTOR_SIZE]; 2008 2009 if (len > sizeof(user_auxv)) 2010 return -EINVAL; 2011 2012 if (copy_from_user(user_auxv, (const void __user *)addr, len)) 2013 return -EFAULT; 2014 2015 /* Make sure the last entry is always AT_NULL */ 2016 user_auxv[AT_VECTOR_SIZE - 2] = 0; 2017 user_auxv[AT_VECTOR_SIZE - 1] = 0; 2018 2019 BUILD_BUG_ON(sizeof(user_auxv) != sizeof(mm->saved_auxv)); 2020 2021 task_lock(current); 2022 memcpy(mm->saved_auxv, user_auxv, len); 2023 task_unlock(current); 2024 2025 return 0; 2026 } 2027 2028 static int prctl_set_mm(int opt, unsigned long addr, 2029 unsigned long arg4, unsigned long arg5) 2030 { 2031 struct mm_struct *mm = current->mm; 2032 struct prctl_mm_map prctl_map; 2033 struct vm_area_struct *vma; 2034 int error; 2035 2036 if (arg5 || (arg4 && (opt != PR_SET_MM_AUXV && 2037 opt != PR_SET_MM_MAP && 2038 opt != PR_SET_MM_MAP_SIZE))) 2039 return -EINVAL; 2040 2041 #ifdef CONFIG_CHECKPOINT_RESTORE 2042 if (opt == PR_SET_MM_MAP || opt == PR_SET_MM_MAP_SIZE) 2043 return prctl_set_mm_map(opt, (const void __user *)addr, arg4); 2044 #endif 2045 2046 if (!capable(CAP_SYS_RESOURCE)) 2047 return -EPERM; 2048 2049 if (opt == PR_SET_MM_EXE_FILE) 2050 return prctl_set_mm_exe_file(mm, (unsigned int)addr); 2051 2052 if (opt == PR_SET_MM_AUXV) 2053 return prctl_set_auxv(mm, addr, arg4); 2054 2055 if (addr >= TASK_SIZE || addr < mmap_min_addr) 2056 return -EINVAL; 2057 2058 error = -EINVAL; 2059 2060 down_write(&mm->mmap_sem); 2061 vma = find_vma(mm, addr); 2062 2063 prctl_map.start_code = mm->start_code; 2064 prctl_map.end_code = mm->end_code; 2065 prctl_map.start_data = mm->start_data; 2066 prctl_map.end_data = mm->end_data; 2067 prctl_map.start_brk = mm->start_brk; 2068 prctl_map.brk = mm->brk; 2069 prctl_map.start_stack = mm->start_stack; 2070 prctl_map.arg_start = mm->arg_start; 2071 prctl_map.arg_end = mm->arg_end; 2072 prctl_map.env_start = mm->env_start; 2073 prctl_map.env_end = mm->env_end; 2074 prctl_map.auxv = NULL; 2075 prctl_map.auxv_size = 0; 2076 prctl_map.exe_fd = -1; 2077 2078 switch (opt) { 2079 case PR_SET_MM_START_CODE: 2080 prctl_map.start_code = addr; 2081 break; 2082 case PR_SET_MM_END_CODE: 2083 prctl_map.end_code = addr; 2084 break; 2085 case PR_SET_MM_START_DATA: 2086 prctl_map.start_data = addr; 2087 break; 2088 case PR_SET_MM_END_DATA: 2089 prctl_map.end_data = addr; 2090 break; 2091 case PR_SET_MM_START_STACK: 2092 prctl_map.start_stack = addr; 2093 break; 2094 case PR_SET_MM_START_BRK: 2095 prctl_map.start_brk = addr; 2096 break; 2097 case PR_SET_MM_BRK: 2098 prctl_map.brk = addr; 2099 break; 2100 case PR_SET_MM_ARG_START: 2101 prctl_map.arg_start = addr; 2102 break; 2103 case PR_SET_MM_ARG_END: 2104 prctl_map.arg_end = addr; 2105 break; 2106 case PR_SET_MM_ENV_START: 2107 prctl_map.env_start = addr; 2108 break; 2109 case PR_SET_MM_ENV_END: 2110 prctl_map.env_end = addr; 2111 break; 2112 default: 2113 goto out; 2114 } 2115 2116 error = validate_prctl_map(&prctl_map); 2117 if (error) 2118 goto out; 2119 2120 switch (opt) { 2121 /* 2122 * If command line arguments and environment 2123 * are placed somewhere else on stack, we can 2124 * set them up here, ARG_START/END to setup 2125 * command line argumets and ENV_START/END 2126 * for environment. 2127 */ 2128 case PR_SET_MM_START_STACK: 2129 case PR_SET_MM_ARG_START: 2130 case PR_SET_MM_ARG_END: 2131 case PR_SET_MM_ENV_START: 2132 case PR_SET_MM_ENV_END: 2133 if (!vma) { 2134 error = -EFAULT; 2135 goto out; 2136 } 2137 } 2138 2139 mm->start_code = prctl_map.start_code; 2140 mm->end_code = prctl_map.end_code; 2141 mm->start_data = prctl_map.start_data; 2142 mm->end_data = prctl_map.end_data; 2143 mm->start_brk = prctl_map.start_brk; 2144 mm->brk = prctl_map.brk; 2145 mm->start_stack = prctl_map.start_stack; 2146 mm->arg_start = prctl_map.arg_start; 2147 mm->arg_end = prctl_map.arg_end; 2148 mm->env_start = prctl_map.env_start; 2149 mm->env_end = prctl_map.env_end; 2150 2151 error = 0; 2152 out: 2153 up_write(&mm->mmap_sem); 2154 return error; 2155 } 2156 2157 #ifdef CONFIG_CHECKPOINT_RESTORE 2158 static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr) 2159 { 2160 return put_user(me->clear_child_tid, tid_addr); 2161 } 2162 #else 2163 static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr) 2164 { 2165 return -EINVAL; 2166 } 2167 #endif 2168 2169 static int propagate_has_child_subreaper(struct task_struct *p, void *data) 2170 { 2171 /* 2172 * If task has has_child_subreaper - all its decendants 2173 * already have these flag too and new decendants will 2174 * inherit it on fork, skip them. 2175 * 2176 * If we've found child_reaper - skip descendants in 2177 * it's subtree as they will never get out pidns. 2178 */ 2179 if (p->signal->has_child_subreaper || 2180 is_child_reaper(task_pid(p))) 2181 return 0; 2182 2183 p->signal->has_child_subreaper = 1; 2184 return 1; 2185 } 2186 2187 SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, 2188 unsigned long, arg4, unsigned long, arg5) 2189 { 2190 struct task_struct *me = current; 2191 unsigned char comm[sizeof(me->comm)]; 2192 long error; 2193 2194 error = security_task_prctl(option, arg2, arg3, arg4, arg5); 2195 if (error != -ENOSYS) 2196 return error; 2197 2198 error = 0; 2199 switch (option) { 2200 case PR_SET_PDEATHSIG: 2201 if (!valid_signal(arg2)) { 2202 error = -EINVAL; 2203 break; 2204 } 2205 me->pdeath_signal = arg2; 2206 break; 2207 case PR_GET_PDEATHSIG: 2208 error = put_user(me->pdeath_signal, (int __user *)arg2); 2209 break; 2210 case PR_GET_DUMPABLE: 2211 error = get_dumpable(me->mm); 2212 break; 2213 case PR_SET_DUMPABLE: 2214 if (arg2 != SUID_DUMP_DISABLE && arg2 != SUID_DUMP_USER) { 2215 error = -EINVAL; 2216 break; 2217 } 2218 set_dumpable(me->mm, arg2); 2219 break; 2220 2221 case PR_SET_UNALIGN: 2222 error = SET_UNALIGN_CTL(me, arg2); 2223 break; 2224 case PR_GET_UNALIGN: 2225 error = GET_UNALIGN_CTL(me, arg2); 2226 break; 2227 case PR_SET_FPEMU: 2228 error = SET_FPEMU_CTL(me, arg2); 2229 break; 2230 case PR_GET_FPEMU: 2231 error = GET_FPEMU_CTL(me, arg2); 2232 break; 2233 case PR_SET_FPEXC: 2234 error = SET_FPEXC_CTL(me, arg2); 2235 break; 2236 case PR_GET_FPEXC: 2237 error = GET_FPEXC_CTL(me, arg2); 2238 break; 2239 case PR_GET_TIMING: 2240 error = PR_TIMING_STATISTICAL; 2241 break; 2242 case PR_SET_TIMING: 2243 if (arg2 != PR_TIMING_STATISTICAL) 2244 error = -EINVAL; 2245 break; 2246 case PR_SET_NAME: 2247 comm[sizeof(me->comm) - 1] = 0; 2248 if (strncpy_from_user(comm, (char __user *)arg2, 2249 sizeof(me->comm) - 1) < 0) 2250 return -EFAULT; 2251 set_task_comm(me, comm); 2252 proc_comm_connector(me); 2253 break; 2254 case PR_GET_NAME: 2255 get_task_comm(comm, me); 2256 if (copy_to_user((char __user *)arg2, comm, sizeof(comm))) 2257 return -EFAULT; 2258 break; 2259 case PR_GET_ENDIAN: 2260 error = GET_ENDIAN(me, arg2); 2261 break; 2262 case PR_SET_ENDIAN: 2263 error = SET_ENDIAN(me, arg2); 2264 break; 2265 case PR_GET_SECCOMP: 2266 error = prctl_get_seccomp(); 2267 break; 2268 case PR_SET_SECCOMP: 2269 error = prctl_set_seccomp(arg2, (char __user *)arg3); 2270 break; 2271 case PR_GET_TSC: 2272 error = GET_TSC_CTL(arg2); 2273 break; 2274 case PR_SET_TSC: 2275 error = SET_TSC_CTL(arg2); 2276 break; 2277 case PR_TASK_PERF_EVENTS_DISABLE: 2278 error = perf_event_task_disable(); 2279 break; 2280 case PR_TASK_PERF_EVENTS_ENABLE: 2281 error = perf_event_task_enable(); 2282 break; 2283 case PR_GET_TIMERSLACK: 2284 if (current->timer_slack_ns > ULONG_MAX) 2285 error = ULONG_MAX; 2286 else 2287 error = current->timer_slack_ns; 2288 break; 2289 case PR_SET_TIMERSLACK: 2290 if (arg2 <= 0) 2291 current->timer_slack_ns = 2292 current->default_timer_slack_ns; 2293 else 2294 current->timer_slack_ns = arg2; 2295 break; 2296 case PR_MCE_KILL: 2297 if (arg4 | arg5) 2298 return -EINVAL; 2299 switch (arg2) { 2300 case PR_MCE_KILL_CLEAR: 2301 if (arg3 != 0) 2302 return -EINVAL; 2303 current->flags &= ~PF_MCE_PROCESS; 2304 break; 2305 case PR_MCE_KILL_SET: 2306 current->flags |= PF_MCE_PROCESS; 2307 if (arg3 == PR_MCE_KILL_EARLY) 2308 current->flags |= PF_MCE_EARLY; 2309 else if (arg3 == PR_MCE_KILL_LATE) 2310 current->flags &= ~PF_MCE_EARLY; 2311 else if (arg3 == PR_MCE_KILL_DEFAULT) 2312 current->flags &= 2313 ~(PF_MCE_EARLY|PF_MCE_PROCESS); 2314 else 2315 return -EINVAL; 2316 break; 2317 default: 2318 return -EINVAL; 2319 } 2320 break; 2321 case PR_MCE_KILL_GET: 2322 if (arg2 | arg3 | arg4 | arg5) 2323 return -EINVAL; 2324 if (current->flags & PF_MCE_PROCESS) 2325 error = (current->flags & PF_MCE_EARLY) ? 2326 PR_MCE_KILL_EARLY : PR_MCE_KILL_LATE; 2327 else 2328 error = PR_MCE_KILL_DEFAULT; 2329 break; 2330 case PR_SET_MM: 2331 error = prctl_set_mm(arg2, arg3, arg4, arg5); 2332 break; 2333 case PR_GET_TID_ADDRESS: 2334 error = prctl_get_tid_address(me, (int __user **)arg2); 2335 break; 2336 case PR_SET_CHILD_SUBREAPER: 2337 me->signal->is_child_subreaper = !!arg2; 2338 if (!arg2) 2339 break; 2340 2341 walk_process_tree(me, propagate_has_child_subreaper, NULL); 2342 break; 2343 case PR_GET_CHILD_SUBREAPER: 2344 error = put_user(me->signal->is_child_subreaper, 2345 (int __user *)arg2); 2346 break; 2347 case PR_SET_NO_NEW_PRIVS: 2348 if (arg2 != 1 || arg3 || arg4 || arg5) 2349 return -EINVAL; 2350 2351 task_set_no_new_privs(current); 2352 break; 2353 case PR_GET_NO_NEW_PRIVS: 2354 if (arg2 || arg3 || arg4 || arg5) 2355 return -EINVAL; 2356 return task_no_new_privs(current) ? 1 : 0; 2357 case PR_GET_THP_DISABLE: 2358 if (arg2 || arg3 || arg4 || arg5) 2359 return -EINVAL; 2360 error = !!test_bit(MMF_DISABLE_THP, &me->mm->flags); 2361 break; 2362 case PR_SET_THP_DISABLE: 2363 if (arg3 || arg4 || arg5) 2364 return -EINVAL; 2365 if (down_write_killable(&me->mm->mmap_sem)) 2366 return -EINTR; 2367 if (arg2) 2368 set_bit(MMF_DISABLE_THP, &me->mm->flags); 2369 else 2370 clear_bit(MMF_DISABLE_THP, &me->mm->flags); 2371 up_write(&me->mm->mmap_sem); 2372 break; 2373 case PR_MPX_ENABLE_MANAGEMENT: 2374 if (arg2 || arg3 || arg4 || arg5) 2375 return -EINVAL; 2376 error = MPX_ENABLE_MANAGEMENT(); 2377 break; 2378 case PR_MPX_DISABLE_MANAGEMENT: 2379 if (arg2 || arg3 || arg4 || arg5) 2380 return -EINVAL; 2381 error = MPX_DISABLE_MANAGEMENT(); 2382 break; 2383 case PR_SET_FP_MODE: 2384 error = SET_FP_MODE(me, arg2); 2385 break; 2386 case PR_GET_FP_MODE: 2387 error = GET_FP_MODE(me); 2388 break; 2389 default: 2390 error = -EINVAL; 2391 break; 2392 } 2393 return error; 2394 } 2395 2396 SYSCALL_DEFINE3(getcpu, unsigned __user *, cpup, unsigned __user *, nodep, 2397 struct getcpu_cache __user *, unused) 2398 { 2399 int err = 0; 2400 int cpu = raw_smp_processor_id(); 2401 2402 if (cpup) 2403 err |= put_user(cpu, cpup); 2404 if (nodep) 2405 err |= put_user(cpu_to_node(cpu), nodep); 2406 return err ? -EFAULT : 0; 2407 } 2408 2409 /** 2410 * do_sysinfo - fill in sysinfo struct 2411 * @info: pointer to buffer to fill 2412 */ 2413 static int do_sysinfo(struct sysinfo *info) 2414 { 2415 unsigned long mem_total, sav_total; 2416 unsigned int mem_unit, bitcount; 2417 struct timespec tp; 2418 2419 memset(info, 0, sizeof(struct sysinfo)); 2420 2421 get_monotonic_boottime(&tp); 2422 info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0); 2423 2424 get_avenrun(info->loads, 0, SI_LOAD_SHIFT - FSHIFT); 2425 2426 info->procs = nr_threads; 2427 2428 si_meminfo(info); 2429 si_swapinfo(info); 2430 2431 /* 2432 * If the sum of all the available memory (i.e. ram + swap) 2433 * is less than can be stored in a 32 bit unsigned long then 2434 * we can be binary compatible with 2.2.x kernels. If not, 2435 * well, in that case 2.2.x was broken anyways... 2436 * 2437 * -Erik Andersen <andersee@debian.org> 2438 */ 2439 2440 mem_total = info->totalram + info->totalswap; 2441 if (mem_total < info->totalram || mem_total < info->totalswap) 2442 goto out; 2443 bitcount = 0; 2444 mem_unit = info->mem_unit; 2445 while (mem_unit > 1) { 2446 bitcount++; 2447 mem_unit >>= 1; 2448 sav_total = mem_total; 2449 mem_total <<= 1; 2450 if (mem_total < sav_total) 2451 goto out; 2452 } 2453 2454 /* 2455 * If mem_total did not overflow, multiply all memory values by 2456 * info->mem_unit and set it to 1. This leaves things compatible 2457 * with 2.2.x, and also retains compatibility with earlier 2.4.x 2458 * kernels... 2459 */ 2460 2461 info->mem_unit = 1; 2462 info->totalram <<= bitcount; 2463 info->freeram <<= bitcount; 2464 info->sharedram <<= bitcount; 2465 info->bufferram <<= bitcount; 2466 info->totalswap <<= bitcount; 2467 info->freeswap <<= bitcount; 2468 info->totalhigh <<= bitcount; 2469 info->freehigh <<= bitcount; 2470 2471 out: 2472 return 0; 2473 } 2474 2475 SYSCALL_DEFINE1(sysinfo, struct sysinfo __user *, info) 2476 { 2477 struct sysinfo val; 2478 2479 do_sysinfo(&val); 2480 2481 if (copy_to_user(info, &val, sizeof(struct sysinfo))) 2482 return -EFAULT; 2483 2484 return 0; 2485 } 2486 2487 #ifdef CONFIG_COMPAT 2488 struct compat_sysinfo { 2489 s32 uptime; 2490 u32 loads[3]; 2491 u32 totalram; 2492 u32 freeram; 2493 u32 sharedram; 2494 u32 bufferram; 2495 u32 totalswap; 2496 u32 freeswap; 2497 u16 procs; 2498 u16 pad; 2499 u32 totalhigh; 2500 u32 freehigh; 2501 u32 mem_unit; 2502 char _f[20-2*sizeof(u32)-sizeof(int)]; 2503 }; 2504 2505 COMPAT_SYSCALL_DEFINE1(sysinfo, struct compat_sysinfo __user *, info) 2506 { 2507 struct sysinfo s; 2508 2509 do_sysinfo(&s); 2510 2511 /* Check to see if any memory value is too large for 32-bit and scale 2512 * down if needed 2513 */ 2514 if (upper_32_bits(s.totalram) || upper_32_bits(s.totalswap)) { 2515 int bitcount = 0; 2516 2517 while (s.mem_unit < PAGE_SIZE) { 2518 s.mem_unit <<= 1; 2519 bitcount++; 2520 } 2521 2522 s.totalram >>= bitcount; 2523 s.freeram >>= bitcount; 2524 s.sharedram >>= bitcount; 2525 s.bufferram >>= bitcount; 2526 s.totalswap >>= bitcount; 2527 s.freeswap >>= bitcount; 2528 s.totalhigh >>= bitcount; 2529 s.freehigh >>= bitcount; 2530 } 2531 2532 if (!access_ok(VERIFY_WRITE, info, sizeof(struct compat_sysinfo)) || 2533 __put_user(s.uptime, &info->uptime) || 2534 __put_user(s.loads[0], &info->loads[0]) || 2535 __put_user(s.loads[1], &info->loads[1]) || 2536 __put_user(s.loads[2], &info->loads[2]) || 2537 __put_user(s.totalram, &info->totalram) || 2538 __put_user(s.freeram, &info->freeram) || 2539 __put_user(s.sharedram, &info->sharedram) || 2540 __put_user(s.bufferram, &info->bufferram) || 2541 __put_user(s.totalswap, &info->totalswap) || 2542 __put_user(s.freeswap, &info->freeswap) || 2543 __put_user(s.procs, &info->procs) || 2544 __put_user(s.totalhigh, &info->totalhigh) || 2545 __put_user(s.freehigh, &info->freehigh) || 2546 __put_user(s.mem_unit, &info->mem_unit)) 2547 return -EFAULT; 2548 2549 return 0; 2550 } 2551 #endif /* CONFIG_COMPAT */ 2552