1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/kernel/sys.c 4 * 5 * Copyright (C) 1991, 1992 Linus Torvalds 6 */ 7 8 #include <linux/export.h> 9 #include <linux/mm.h> 10 #include <linux/utsname.h> 11 #include <linux/mman.h> 12 #include <linux/reboot.h> 13 #include <linux/prctl.h> 14 #include <linux/highuid.h> 15 #include <linux/fs.h> 16 #include <linux/kmod.h> 17 #include <linux/perf_event.h> 18 #include <linux/resource.h> 19 #include <linux/kernel.h> 20 #include <linux/workqueue.h> 21 #include <linux/capability.h> 22 #include <linux/device.h> 23 #include <linux/key.h> 24 #include <linux/times.h> 25 #include <linux/posix-timers.h> 26 #include <linux/security.h> 27 #include <linux/dcookies.h> 28 #include <linux/suspend.h> 29 #include <linux/tty.h> 30 #include <linux/signal.h> 31 #include <linux/cn_proc.h> 32 #include <linux/getcpu.h> 33 #include <linux/task_io_accounting_ops.h> 34 #include <linux/seccomp.h> 35 #include <linux/cpu.h> 36 #include <linux/personality.h> 37 #include <linux/ptrace.h> 38 #include <linux/fs_struct.h> 39 #include <linux/file.h> 40 #include <linux/mount.h> 41 #include <linux/gfp.h> 42 #include <linux/syscore_ops.h> 43 #include <linux/version.h> 44 #include <linux/ctype.h> 45 46 #include <linux/compat.h> 47 #include <linux/syscalls.h> 48 #include <linux/kprobes.h> 49 #include <linux/user_namespace.h> 50 #include <linux/binfmts.h> 51 52 #include <linux/sched.h> 53 #include <linux/sched/autogroup.h> 54 #include <linux/sched/loadavg.h> 55 #include <linux/sched/stat.h> 56 #include <linux/sched/mm.h> 57 #include <linux/sched/coredump.h> 58 #include <linux/sched/task.h> 59 #include <linux/sched/cputime.h> 60 #include <linux/rcupdate.h> 61 #include <linux/uidgid.h> 62 #include <linux/cred.h> 63 64 #include <linux/nospec.h> 65 66 #include <linux/kmsg_dump.h> 67 /* Move somewhere else to avoid recompiling? */ 68 #include <generated/utsrelease.h> 69 70 #include <linux/uaccess.h> 71 #include <asm/io.h> 72 #include <asm/unistd.h> 73 74 #include "uid16.h" 75 76 #ifndef SET_UNALIGN_CTL 77 # define SET_UNALIGN_CTL(a, b) (-EINVAL) 78 #endif 79 #ifndef GET_UNALIGN_CTL 80 # define GET_UNALIGN_CTL(a, b) (-EINVAL) 81 #endif 82 #ifndef SET_FPEMU_CTL 83 # define SET_FPEMU_CTL(a, b) (-EINVAL) 84 #endif 85 #ifndef GET_FPEMU_CTL 86 # define GET_FPEMU_CTL(a, b) (-EINVAL) 87 #endif 88 #ifndef SET_FPEXC_CTL 89 # define SET_FPEXC_CTL(a, b) (-EINVAL) 90 #endif 91 #ifndef GET_FPEXC_CTL 92 # define GET_FPEXC_CTL(a, b) (-EINVAL) 93 #endif 94 #ifndef GET_ENDIAN 95 # define GET_ENDIAN(a, b) (-EINVAL) 96 #endif 97 #ifndef SET_ENDIAN 98 # define SET_ENDIAN(a, b) (-EINVAL) 99 #endif 100 #ifndef GET_TSC_CTL 101 # define GET_TSC_CTL(a) (-EINVAL) 102 #endif 103 #ifndef SET_TSC_CTL 104 # define SET_TSC_CTL(a) (-EINVAL) 105 #endif 106 #ifndef GET_FP_MODE 107 # define GET_FP_MODE(a) (-EINVAL) 108 #endif 109 #ifndef SET_FP_MODE 110 # define SET_FP_MODE(a,b) (-EINVAL) 111 #endif 112 #ifndef SVE_SET_VL 113 # define SVE_SET_VL(a) (-EINVAL) 114 #endif 115 #ifndef SVE_GET_VL 116 # define SVE_GET_VL() (-EINVAL) 117 #endif 118 #ifndef PAC_RESET_KEYS 119 # define PAC_RESET_KEYS(a, b) (-EINVAL) 120 #endif 121 #ifndef SET_TAGGED_ADDR_CTRL 122 # define SET_TAGGED_ADDR_CTRL(a) (-EINVAL) 123 #endif 124 #ifndef GET_TAGGED_ADDR_CTRL 125 # define GET_TAGGED_ADDR_CTRL() (-EINVAL) 126 #endif 127 128 /* 129 * this is where the system-wide overflow UID and GID are defined, for 130 * architectures that now have 32-bit UID/GID but didn't in the past 131 */ 132 133 int overflowuid = DEFAULT_OVERFLOWUID; 134 int overflowgid = DEFAULT_OVERFLOWGID; 135 136 EXPORT_SYMBOL(overflowuid); 137 EXPORT_SYMBOL(overflowgid); 138 139 /* 140 * the same as above, but for filesystems which can only store a 16-bit 141 * UID and GID. as such, this is needed on all architectures 142 */ 143 144 int fs_overflowuid = DEFAULT_FS_OVERFLOWUID; 145 int fs_overflowgid = DEFAULT_FS_OVERFLOWGID; 146 147 EXPORT_SYMBOL(fs_overflowuid); 148 EXPORT_SYMBOL(fs_overflowgid); 149 150 /* 151 * Returns true if current's euid is same as p's uid or euid, 152 * or has CAP_SYS_NICE to p's user_ns. 153 * 154 * Called with rcu_read_lock, creds are safe 155 */ 156 static bool set_one_prio_perm(struct task_struct *p) 157 { 158 const struct cred *cred = current_cred(), *pcred = __task_cred(p); 159 160 if (uid_eq(pcred->uid, cred->euid) || 161 uid_eq(pcred->euid, cred->euid)) 162 return true; 163 if (ns_capable(pcred->user_ns, CAP_SYS_NICE)) 164 return true; 165 return false; 166 } 167 168 /* 169 * set the priority of a task 170 * - the caller must hold the RCU read lock 171 */ 172 static int set_one_prio(struct task_struct *p, int niceval, int error) 173 { 174 int no_nice; 175 176 if (!set_one_prio_perm(p)) { 177 error = -EPERM; 178 goto out; 179 } 180 if (niceval < task_nice(p) && !can_nice(p, niceval)) { 181 error = -EACCES; 182 goto out; 183 } 184 no_nice = security_task_setnice(p, niceval); 185 if (no_nice) { 186 error = no_nice; 187 goto out; 188 } 189 if (error == -ESRCH) 190 error = 0; 191 set_user_nice(p, niceval); 192 out: 193 return error; 194 } 195 196 SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval) 197 { 198 struct task_struct *g, *p; 199 struct user_struct *user; 200 const struct cred *cred = current_cred(); 201 int error = -EINVAL; 202 struct pid *pgrp; 203 kuid_t uid; 204 205 if (which > PRIO_USER || which < PRIO_PROCESS) 206 goto out; 207 208 /* normalize: avoid signed division (rounding problems) */ 209 error = -ESRCH; 210 if (niceval < MIN_NICE) 211 niceval = MIN_NICE; 212 if (niceval > MAX_NICE) 213 niceval = MAX_NICE; 214 215 rcu_read_lock(); 216 read_lock(&tasklist_lock); 217 switch (which) { 218 case PRIO_PROCESS: 219 if (who) 220 p = find_task_by_vpid(who); 221 else 222 p = current; 223 if (p) 224 error = set_one_prio(p, niceval, error); 225 break; 226 case PRIO_PGRP: 227 if (who) 228 pgrp = find_vpid(who); 229 else 230 pgrp = task_pgrp(current); 231 do_each_pid_thread(pgrp, PIDTYPE_PGID, p) { 232 error = set_one_prio(p, niceval, error); 233 } while_each_pid_thread(pgrp, PIDTYPE_PGID, p); 234 break; 235 case PRIO_USER: 236 uid = make_kuid(cred->user_ns, who); 237 user = cred->user; 238 if (!who) 239 uid = cred->uid; 240 else if (!uid_eq(uid, cred->uid)) { 241 user = find_user(uid); 242 if (!user) 243 goto out_unlock; /* No processes for this user */ 244 } 245 do_each_thread(g, p) { 246 if (uid_eq(task_uid(p), uid) && task_pid_vnr(p)) 247 error = set_one_prio(p, niceval, error); 248 } while_each_thread(g, p); 249 if (!uid_eq(uid, cred->uid)) 250 free_uid(user); /* For find_user() */ 251 break; 252 } 253 out_unlock: 254 read_unlock(&tasklist_lock); 255 rcu_read_unlock(); 256 out: 257 return error; 258 } 259 260 /* 261 * Ugh. To avoid negative return values, "getpriority()" will 262 * not return the normal nice-value, but a negated value that 263 * has been offset by 20 (ie it returns 40..1 instead of -20..19) 264 * to stay compatible. 265 */ 266 SYSCALL_DEFINE2(getpriority, int, which, int, who) 267 { 268 struct task_struct *g, *p; 269 struct user_struct *user; 270 const struct cred *cred = current_cred(); 271 long niceval, retval = -ESRCH; 272 struct pid *pgrp; 273 kuid_t uid; 274 275 if (which > PRIO_USER || which < PRIO_PROCESS) 276 return -EINVAL; 277 278 rcu_read_lock(); 279 read_lock(&tasklist_lock); 280 switch (which) { 281 case PRIO_PROCESS: 282 if (who) 283 p = find_task_by_vpid(who); 284 else 285 p = current; 286 if (p) { 287 niceval = nice_to_rlimit(task_nice(p)); 288 if (niceval > retval) 289 retval = niceval; 290 } 291 break; 292 case PRIO_PGRP: 293 if (who) 294 pgrp = find_vpid(who); 295 else 296 pgrp = task_pgrp(current); 297 do_each_pid_thread(pgrp, PIDTYPE_PGID, p) { 298 niceval = nice_to_rlimit(task_nice(p)); 299 if (niceval > retval) 300 retval = niceval; 301 } while_each_pid_thread(pgrp, PIDTYPE_PGID, p); 302 break; 303 case PRIO_USER: 304 uid = make_kuid(cred->user_ns, who); 305 user = cred->user; 306 if (!who) 307 uid = cred->uid; 308 else if (!uid_eq(uid, cred->uid)) { 309 user = find_user(uid); 310 if (!user) 311 goto out_unlock; /* No processes for this user */ 312 } 313 do_each_thread(g, p) { 314 if (uid_eq(task_uid(p), uid) && task_pid_vnr(p)) { 315 niceval = nice_to_rlimit(task_nice(p)); 316 if (niceval > retval) 317 retval = niceval; 318 } 319 } while_each_thread(g, p); 320 if (!uid_eq(uid, cred->uid)) 321 free_uid(user); /* for find_user() */ 322 break; 323 } 324 out_unlock: 325 read_unlock(&tasklist_lock); 326 rcu_read_unlock(); 327 328 return retval; 329 } 330 331 /* 332 * Unprivileged users may change the real gid to the effective gid 333 * or vice versa. (BSD-style) 334 * 335 * If you set the real gid at all, or set the effective gid to a value not 336 * equal to the real gid, then the saved gid is set to the new effective gid. 337 * 338 * This makes it possible for a setgid program to completely drop its 339 * privileges, which is often a useful assertion to make when you are doing 340 * a security audit over a program. 341 * 342 * The general idea is that a program which uses just setregid() will be 343 * 100% compatible with BSD. A program which uses just setgid() will be 344 * 100% compatible with POSIX with saved IDs. 345 * 346 * SMP: There are not races, the GIDs are checked only by filesystem 347 * operations (as far as semantic preservation is concerned). 348 */ 349 #ifdef CONFIG_MULTIUSER 350 long __sys_setregid(gid_t rgid, gid_t egid) 351 { 352 struct user_namespace *ns = current_user_ns(); 353 const struct cred *old; 354 struct cred *new; 355 int retval; 356 kgid_t krgid, kegid; 357 358 krgid = make_kgid(ns, rgid); 359 kegid = make_kgid(ns, egid); 360 361 if ((rgid != (gid_t) -1) && !gid_valid(krgid)) 362 return -EINVAL; 363 if ((egid != (gid_t) -1) && !gid_valid(kegid)) 364 return -EINVAL; 365 366 new = prepare_creds(); 367 if (!new) 368 return -ENOMEM; 369 old = current_cred(); 370 371 retval = -EPERM; 372 if (rgid != (gid_t) -1) { 373 if (gid_eq(old->gid, krgid) || 374 gid_eq(old->egid, krgid) || 375 ns_capable(old->user_ns, CAP_SETGID)) 376 new->gid = krgid; 377 else 378 goto error; 379 } 380 if (egid != (gid_t) -1) { 381 if (gid_eq(old->gid, kegid) || 382 gid_eq(old->egid, kegid) || 383 gid_eq(old->sgid, kegid) || 384 ns_capable(old->user_ns, CAP_SETGID)) 385 new->egid = kegid; 386 else 387 goto error; 388 } 389 390 if (rgid != (gid_t) -1 || 391 (egid != (gid_t) -1 && !gid_eq(kegid, old->gid))) 392 new->sgid = new->egid; 393 new->fsgid = new->egid; 394 395 return commit_creds(new); 396 397 error: 398 abort_creds(new); 399 return retval; 400 } 401 402 SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid) 403 { 404 return __sys_setregid(rgid, egid); 405 } 406 407 /* 408 * setgid() is implemented like SysV w/ SAVED_IDS 409 * 410 * SMP: Same implicit races as above. 411 */ 412 long __sys_setgid(gid_t gid) 413 { 414 struct user_namespace *ns = current_user_ns(); 415 const struct cred *old; 416 struct cred *new; 417 int retval; 418 kgid_t kgid; 419 420 kgid = make_kgid(ns, gid); 421 if (!gid_valid(kgid)) 422 return -EINVAL; 423 424 new = prepare_creds(); 425 if (!new) 426 return -ENOMEM; 427 old = current_cred(); 428 429 retval = -EPERM; 430 if (ns_capable(old->user_ns, CAP_SETGID)) 431 new->gid = new->egid = new->sgid = new->fsgid = kgid; 432 else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid)) 433 new->egid = new->fsgid = kgid; 434 else 435 goto error; 436 437 return commit_creds(new); 438 439 error: 440 abort_creds(new); 441 return retval; 442 } 443 444 SYSCALL_DEFINE1(setgid, gid_t, gid) 445 { 446 return __sys_setgid(gid); 447 } 448 449 /* 450 * change the user struct in a credentials set to match the new UID 451 */ 452 static int set_user(struct cred *new) 453 { 454 struct user_struct *new_user; 455 456 new_user = alloc_uid(new->uid); 457 if (!new_user) 458 return -EAGAIN; 459 460 /* 461 * We don't fail in case of NPROC limit excess here because too many 462 * poorly written programs don't check set*uid() return code, assuming 463 * it never fails if called by root. We may still enforce NPROC limit 464 * for programs doing set*uid()+execve() by harmlessly deferring the 465 * failure to the execve() stage. 466 */ 467 if (atomic_read(&new_user->processes) >= rlimit(RLIMIT_NPROC) && 468 new_user != INIT_USER) 469 current->flags |= PF_NPROC_EXCEEDED; 470 else 471 current->flags &= ~PF_NPROC_EXCEEDED; 472 473 free_uid(new->user); 474 new->user = new_user; 475 return 0; 476 } 477 478 /* 479 * Unprivileged users may change the real uid to the effective uid 480 * or vice versa. (BSD-style) 481 * 482 * If you set the real uid at all, or set the effective uid to a value not 483 * equal to the real uid, then the saved uid is set to the new effective uid. 484 * 485 * This makes it possible for a setuid program to completely drop its 486 * privileges, which is often a useful assertion to make when you are doing 487 * a security audit over a program. 488 * 489 * The general idea is that a program which uses just setreuid() will be 490 * 100% compatible with BSD. A program which uses just setuid() will be 491 * 100% compatible with POSIX with saved IDs. 492 */ 493 long __sys_setreuid(uid_t ruid, uid_t euid) 494 { 495 struct user_namespace *ns = current_user_ns(); 496 const struct cred *old; 497 struct cred *new; 498 int retval; 499 kuid_t kruid, keuid; 500 501 kruid = make_kuid(ns, ruid); 502 keuid = make_kuid(ns, euid); 503 504 if ((ruid != (uid_t) -1) && !uid_valid(kruid)) 505 return -EINVAL; 506 if ((euid != (uid_t) -1) && !uid_valid(keuid)) 507 return -EINVAL; 508 509 new = prepare_creds(); 510 if (!new) 511 return -ENOMEM; 512 old = current_cred(); 513 514 retval = -EPERM; 515 if (ruid != (uid_t) -1) { 516 new->uid = kruid; 517 if (!uid_eq(old->uid, kruid) && 518 !uid_eq(old->euid, kruid) && 519 !ns_capable_setid(old->user_ns, CAP_SETUID)) 520 goto error; 521 } 522 523 if (euid != (uid_t) -1) { 524 new->euid = keuid; 525 if (!uid_eq(old->uid, keuid) && 526 !uid_eq(old->euid, keuid) && 527 !uid_eq(old->suid, keuid) && 528 !ns_capable_setid(old->user_ns, CAP_SETUID)) 529 goto error; 530 } 531 532 if (!uid_eq(new->uid, old->uid)) { 533 retval = set_user(new); 534 if (retval < 0) 535 goto error; 536 } 537 if (ruid != (uid_t) -1 || 538 (euid != (uid_t) -1 && !uid_eq(keuid, old->uid))) 539 new->suid = new->euid; 540 new->fsuid = new->euid; 541 542 retval = security_task_fix_setuid(new, old, LSM_SETID_RE); 543 if (retval < 0) 544 goto error; 545 546 return commit_creds(new); 547 548 error: 549 abort_creds(new); 550 return retval; 551 } 552 553 SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid) 554 { 555 return __sys_setreuid(ruid, euid); 556 } 557 558 /* 559 * setuid() is implemented like SysV with SAVED_IDS 560 * 561 * Note that SAVED_ID's is deficient in that a setuid root program 562 * like sendmail, for example, cannot set its uid to be a normal 563 * user and then switch back, because if you're root, setuid() sets 564 * the saved uid too. If you don't like this, blame the bright people 565 * in the POSIX committee and/or USG. Note that the BSD-style setreuid() 566 * will allow a root program to temporarily drop privileges and be able to 567 * regain them by swapping the real and effective uid. 568 */ 569 long __sys_setuid(uid_t uid) 570 { 571 struct user_namespace *ns = current_user_ns(); 572 const struct cred *old; 573 struct cred *new; 574 int retval; 575 kuid_t kuid; 576 577 kuid = make_kuid(ns, uid); 578 if (!uid_valid(kuid)) 579 return -EINVAL; 580 581 new = prepare_creds(); 582 if (!new) 583 return -ENOMEM; 584 old = current_cred(); 585 586 retval = -EPERM; 587 if (ns_capable_setid(old->user_ns, CAP_SETUID)) { 588 new->suid = new->uid = kuid; 589 if (!uid_eq(kuid, old->uid)) { 590 retval = set_user(new); 591 if (retval < 0) 592 goto error; 593 } 594 } else if (!uid_eq(kuid, old->uid) && !uid_eq(kuid, new->suid)) { 595 goto error; 596 } 597 598 new->fsuid = new->euid = kuid; 599 600 retval = security_task_fix_setuid(new, old, LSM_SETID_ID); 601 if (retval < 0) 602 goto error; 603 604 return commit_creds(new); 605 606 error: 607 abort_creds(new); 608 return retval; 609 } 610 611 SYSCALL_DEFINE1(setuid, uid_t, uid) 612 { 613 return __sys_setuid(uid); 614 } 615 616 617 /* 618 * This function implements a generic ability to update ruid, euid, 619 * and suid. This allows you to implement the 4.4 compatible seteuid(). 620 */ 621 long __sys_setresuid(uid_t ruid, uid_t euid, uid_t suid) 622 { 623 struct user_namespace *ns = current_user_ns(); 624 const struct cred *old; 625 struct cred *new; 626 int retval; 627 kuid_t kruid, keuid, ksuid; 628 629 kruid = make_kuid(ns, ruid); 630 keuid = make_kuid(ns, euid); 631 ksuid = make_kuid(ns, suid); 632 633 if ((ruid != (uid_t) -1) && !uid_valid(kruid)) 634 return -EINVAL; 635 636 if ((euid != (uid_t) -1) && !uid_valid(keuid)) 637 return -EINVAL; 638 639 if ((suid != (uid_t) -1) && !uid_valid(ksuid)) 640 return -EINVAL; 641 642 new = prepare_creds(); 643 if (!new) 644 return -ENOMEM; 645 646 old = current_cred(); 647 648 retval = -EPERM; 649 if (!ns_capable_setid(old->user_ns, CAP_SETUID)) { 650 if (ruid != (uid_t) -1 && !uid_eq(kruid, old->uid) && 651 !uid_eq(kruid, old->euid) && !uid_eq(kruid, old->suid)) 652 goto error; 653 if (euid != (uid_t) -1 && !uid_eq(keuid, old->uid) && 654 !uid_eq(keuid, old->euid) && !uid_eq(keuid, old->suid)) 655 goto error; 656 if (suid != (uid_t) -1 && !uid_eq(ksuid, old->uid) && 657 !uid_eq(ksuid, old->euid) && !uid_eq(ksuid, old->suid)) 658 goto error; 659 } 660 661 if (ruid != (uid_t) -1) { 662 new->uid = kruid; 663 if (!uid_eq(kruid, old->uid)) { 664 retval = set_user(new); 665 if (retval < 0) 666 goto error; 667 } 668 } 669 if (euid != (uid_t) -1) 670 new->euid = keuid; 671 if (suid != (uid_t) -1) 672 new->suid = ksuid; 673 new->fsuid = new->euid; 674 675 retval = security_task_fix_setuid(new, old, LSM_SETID_RES); 676 if (retval < 0) 677 goto error; 678 679 return commit_creds(new); 680 681 error: 682 abort_creds(new); 683 return retval; 684 } 685 686 SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid) 687 { 688 return __sys_setresuid(ruid, euid, suid); 689 } 690 691 SYSCALL_DEFINE3(getresuid, uid_t __user *, ruidp, uid_t __user *, euidp, uid_t __user *, suidp) 692 { 693 const struct cred *cred = current_cred(); 694 int retval; 695 uid_t ruid, euid, suid; 696 697 ruid = from_kuid_munged(cred->user_ns, cred->uid); 698 euid = from_kuid_munged(cred->user_ns, cred->euid); 699 suid = from_kuid_munged(cred->user_ns, cred->suid); 700 701 retval = put_user(ruid, ruidp); 702 if (!retval) { 703 retval = put_user(euid, euidp); 704 if (!retval) 705 return put_user(suid, suidp); 706 } 707 return retval; 708 } 709 710 /* 711 * Same as above, but for rgid, egid, sgid. 712 */ 713 long __sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid) 714 { 715 struct user_namespace *ns = current_user_ns(); 716 const struct cred *old; 717 struct cred *new; 718 int retval; 719 kgid_t krgid, kegid, ksgid; 720 721 krgid = make_kgid(ns, rgid); 722 kegid = make_kgid(ns, egid); 723 ksgid = make_kgid(ns, sgid); 724 725 if ((rgid != (gid_t) -1) && !gid_valid(krgid)) 726 return -EINVAL; 727 if ((egid != (gid_t) -1) && !gid_valid(kegid)) 728 return -EINVAL; 729 if ((sgid != (gid_t) -1) && !gid_valid(ksgid)) 730 return -EINVAL; 731 732 new = prepare_creds(); 733 if (!new) 734 return -ENOMEM; 735 old = current_cred(); 736 737 retval = -EPERM; 738 if (!ns_capable(old->user_ns, CAP_SETGID)) { 739 if (rgid != (gid_t) -1 && !gid_eq(krgid, old->gid) && 740 !gid_eq(krgid, old->egid) && !gid_eq(krgid, old->sgid)) 741 goto error; 742 if (egid != (gid_t) -1 && !gid_eq(kegid, old->gid) && 743 !gid_eq(kegid, old->egid) && !gid_eq(kegid, old->sgid)) 744 goto error; 745 if (sgid != (gid_t) -1 && !gid_eq(ksgid, old->gid) && 746 !gid_eq(ksgid, old->egid) && !gid_eq(ksgid, old->sgid)) 747 goto error; 748 } 749 750 if (rgid != (gid_t) -1) 751 new->gid = krgid; 752 if (egid != (gid_t) -1) 753 new->egid = kegid; 754 if (sgid != (gid_t) -1) 755 new->sgid = ksgid; 756 new->fsgid = new->egid; 757 758 return commit_creds(new); 759 760 error: 761 abort_creds(new); 762 return retval; 763 } 764 765 SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid) 766 { 767 return __sys_setresgid(rgid, egid, sgid); 768 } 769 770 SYSCALL_DEFINE3(getresgid, gid_t __user *, rgidp, gid_t __user *, egidp, gid_t __user *, sgidp) 771 { 772 const struct cred *cred = current_cred(); 773 int retval; 774 gid_t rgid, egid, sgid; 775 776 rgid = from_kgid_munged(cred->user_ns, cred->gid); 777 egid = from_kgid_munged(cred->user_ns, cred->egid); 778 sgid = from_kgid_munged(cred->user_ns, cred->sgid); 779 780 retval = put_user(rgid, rgidp); 781 if (!retval) { 782 retval = put_user(egid, egidp); 783 if (!retval) 784 retval = put_user(sgid, sgidp); 785 } 786 787 return retval; 788 } 789 790 791 /* 792 * "setfsuid()" sets the fsuid - the uid used for filesystem checks. This 793 * is used for "access()" and for the NFS daemon (letting nfsd stay at 794 * whatever uid it wants to). It normally shadows "euid", except when 795 * explicitly set by setfsuid() or for access.. 796 */ 797 long __sys_setfsuid(uid_t uid) 798 { 799 const struct cred *old; 800 struct cred *new; 801 uid_t old_fsuid; 802 kuid_t kuid; 803 804 old = current_cred(); 805 old_fsuid = from_kuid_munged(old->user_ns, old->fsuid); 806 807 kuid = make_kuid(old->user_ns, uid); 808 if (!uid_valid(kuid)) 809 return old_fsuid; 810 811 new = prepare_creds(); 812 if (!new) 813 return old_fsuid; 814 815 if (uid_eq(kuid, old->uid) || uid_eq(kuid, old->euid) || 816 uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) || 817 ns_capable_setid(old->user_ns, CAP_SETUID)) { 818 if (!uid_eq(kuid, old->fsuid)) { 819 new->fsuid = kuid; 820 if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0) 821 goto change_okay; 822 } 823 } 824 825 abort_creds(new); 826 return old_fsuid; 827 828 change_okay: 829 commit_creds(new); 830 return old_fsuid; 831 } 832 833 SYSCALL_DEFINE1(setfsuid, uid_t, uid) 834 { 835 return __sys_setfsuid(uid); 836 } 837 838 /* 839 * Samma på svenska.. 840 */ 841 long __sys_setfsgid(gid_t gid) 842 { 843 const struct cred *old; 844 struct cred *new; 845 gid_t old_fsgid; 846 kgid_t kgid; 847 848 old = current_cred(); 849 old_fsgid = from_kgid_munged(old->user_ns, old->fsgid); 850 851 kgid = make_kgid(old->user_ns, gid); 852 if (!gid_valid(kgid)) 853 return old_fsgid; 854 855 new = prepare_creds(); 856 if (!new) 857 return old_fsgid; 858 859 if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) || 860 gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) || 861 ns_capable(old->user_ns, CAP_SETGID)) { 862 if (!gid_eq(kgid, old->fsgid)) { 863 new->fsgid = kgid; 864 goto change_okay; 865 } 866 } 867 868 abort_creds(new); 869 return old_fsgid; 870 871 change_okay: 872 commit_creds(new); 873 return old_fsgid; 874 } 875 876 SYSCALL_DEFINE1(setfsgid, gid_t, gid) 877 { 878 return __sys_setfsgid(gid); 879 } 880 #endif /* CONFIG_MULTIUSER */ 881 882 /** 883 * sys_getpid - return the thread group id of the current process 884 * 885 * Note, despite the name, this returns the tgid not the pid. The tgid and 886 * the pid are identical unless CLONE_THREAD was specified on clone() in 887 * which case the tgid is the same in all threads of the same group. 888 * 889 * This is SMP safe as current->tgid does not change. 890 */ 891 SYSCALL_DEFINE0(getpid) 892 { 893 return task_tgid_vnr(current); 894 } 895 896 /* Thread ID - the internal kernel "pid" */ 897 SYSCALL_DEFINE0(gettid) 898 { 899 return task_pid_vnr(current); 900 } 901 902 /* 903 * Accessing ->real_parent is not SMP-safe, it could 904 * change from under us. However, we can use a stale 905 * value of ->real_parent under rcu_read_lock(), see 906 * release_task()->call_rcu(delayed_put_task_struct). 907 */ 908 SYSCALL_DEFINE0(getppid) 909 { 910 int pid; 911 912 rcu_read_lock(); 913 pid = task_tgid_vnr(rcu_dereference(current->real_parent)); 914 rcu_read_unlock(); 915 916 return pid; 917 } 918 919 SYSCALL_DEFINE0(getuid) 920 { 921 /* Only we change this so SMP safe */ 922 return from_kuid_munged(current_user_ns(), current_uid()); 923 } 924 925 SYSCALL_DEFINE0(geteuid) 926 { 927 /* Only we change this so SMP safe */ 928 return from_kuid_munged(current_user_ns(), current_euid()); 929 } 930 931 SYSCALL_DEFINE0(getgid) 932 { 933 /* Only we change this so SMP safe */ 934 return from_kgid_munged(current_user_ns(), current_gid()); 935 } 936 937 SYSCALL_DEFINE0(getegid) 938 { 939 /* Only we change this so SMP safe */ 940 return from_kgid_munged(current_user_ns(), current_egid()); 941 } 942 943 static void do_sys_times(struct tms *tms) 944 { 945 u64 tgutime, tgstime, cutime, cstime; 946 947 thread_group_cputime_adjusted(current, &tgutime, &tgstime); 948 cutime = current->signal->cutime; 949 cstime = current->signal->cstime; 950 tms->tms_utime = nsec_to_clock_t(tgutime); 951 tms->tms_stime = nsec_to_clock_t(tgstime); 952 tms->tms_cutime = nsec_to_clock_t(cutime); 953 tms->tms_cstime = nsec_to_clock_t(cstime); 954 } 955 956 SYSCALL_DEFINE1(times, struct tms __user *, tbuf) 957 { 958 if (tbuf) { 959 struct tms tmp; 960 961 do_sys_times(&tmp); 962 if (copy_to_user(tbuf, &tmp, sizeof(struct tms))) 963 return -EFAULT; 964 } 965 force_successful_syscall_return(); 966 return (long) jiffies_64_to_clock_t(get_jiffies_64()); 967 } 968 969 #ifdef CONFIG_COMPAT 970 static compat_clock_t clock_t_to_compat_clock_t(clock_t x) 971 { 972 return compat_jiffies_to_clock_t(clock_t_to_jiffies(x)); 973 } 974 975 COMPAT_SYSCALL_DEFINE1(times, struct compat_tms __user *, tbuf) 976 { 977 if (tbuf) { 978 struct tms tms; 979 struct compat_tms tmp; 980 981 do_sys_times(&tms); 982 /* Convert our struct tms to the compat version. */ 983 tmp.tms_utime = clock_t_to_compat_clock_t(tms.tms_utime); 984 tmp.tms_stime = clock_t_to_compat_clock_t(tms.tms_stime); 985 tmp.tms_cutime = clock_t_to_compat_clock_t(tms.tms_cutime); 986 tmp.tms_cstime = clock_t_to_compat_clock_t(tms.tms_cstime); 987 if (copy_to_user(tbuf, &tmp, sizeof(tmp))) 988 return -EFAULT; 989 } 990 force_successful_syscall_return(); 991 return compat_jiffies_to_clock_t(jiffies); 992 } 993 #endif 994 995 /* 996 * This needs some heavy checking ... 997 * I just haven't the stomach for it. I also don't fully 998 * understand sessions/pgrp etc. Let somebody who does explain it. 999 * 1000 * OK, I think I have the protection semantics right.... this is really 1001 * only important on a multi-user system anyway, to make sure one user 1002 * can't send a signal to a process owned by another. -TYT, 12/12/91 1003 * 1004 * !PF_FORKNOEXEC check to conform completely to POSIX. 1005 */ 1006 SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid) 1007 { 1008 struct task_struct *p; 1009 struct task_struct *group_leader = current->group_leader; 1010 struct pid *pgrp; 1011 int err; 1012 1013 if (!pid) 1014 pid = task_pid_vnr(group_leader); 1015 if (!pgid) 1016 pgid = pid; 1017 if (pgid < 0) 1018 return -EINVAL; 1019 rcu_read_lock(); 1020 1021 /* From this point forward we keep holding onto the tasklist lock 1022 * so that our parent does not change from under us. -DaveM 1023 */ 1024 write_lock_irq(&tasklist_lock); 1025 1026 err = -ESRCH; 1027 p = find_task_by_vpid(pid); 1028 if (!p) 1029 goto out; 1030 1031 err = -EINVAL; 1032 if (!thread_group_leader(p)) 1033 goto out; 1034 1035 if (same_thread_group(p->real_parent, group_leader)) { 1036 err = -EPERM; 1037 if (task_session(p) != task_session(group_leader)) 1038 goto out; 1039 err = -EACCES; 1040 if (!(p->flags & PF_FORKNOEXEC)) 1041 goto out; 1042 } else { 1043 err = -ESRCH; 1044 if (p != group_leader) 1045 goto out; 1046 } 1047 1048 err = -EPERM; 1049 if (p->signal->leader) 1050 goto out; 1051 1052 pgrp = task_pid(p); 1053 if (pgid != pid) { 1054 struct task_struct *g; 1055 1056 pgrp = find_vpid(pgid); 1057 g = pid_task(pgrp, PIDTYPE_PGID); 1058 if (!g || task_session(g) != task_session(group_leader)) 1059 goto out; 1060 } 1061 1062 err = security_task_setpgid(p, pgid); 1063 if (err) 1064 goto out; 1065 1066 if (task_pgrp(p) != pgrp) 1067 change_pid(p, PIDTYPE_PGID, pgrp); 1068 1069 err = 0; 1070 out: 1071 /* All paths lead to here, thus we are safe. -DaveM */ 1072 write_unlock_irq(&tasklist_lock); 1073 rcu_read_unlock(); 1074 return err; 1075 } 1076 1077 static int do_getpgid(pid_t pid) 1078 { 1079 struct task_struct *p; 1080 struct pid *grp; 1081 int retval; 1082 1083 rcu_read_lock(); 1084 if (!pid) 1085 grp = task_pgrp(current); 1086 else { 1087 retval = -ESRCH; 1088 p = find_task_by_vpid(pid); 1089 if (!p) 1090 goto out; 1091 grp = task_pgrp(p); 1092 if (!grp) 1093 goto out; 1094 1095 retval = security_task_getpgid(p); 1096 if (retval) 1097 goto out; 1098 } 1099 retval = pid_vnr(grp); 1100 out: 1101 rcu_read_unlock(); 1102 return retval; 1103 } 1104 1105 SYSCALL_DEFINE1(getpgid, pid_t, pid) 1106 { 1107 return do_getpgid(pid); 1108 } 1109 1110 #ifdef __ARCH_WANT_SYS_GETPGRP 1111 1112 SYSCALL_DEFINE0(getpgrp) 1113 { 1114 return do_getpgid(0); 1115 } 1116 1117 #endif 1118 1119 SYSCALL_DEFINE1(getsid, pid_t, pid) 1120 { 1121 struct task_struct *p; 1122 struct pid *sid; 1123 int retval; 1124 1125 rcu_read_lock(); 1126 if (!pid) 1127 sid = task_session(current); 1128 else { 1129 retval = -ESRCH; 1130 p = find_task_by_vpid(pid); 1131 if (!p) 1132 goto out; 1133 sid = task_session(p); 1134 if (!sid) 1135 goto out; 1136 1137 retval = security_task_getsid(p); 1138 if (retval) 1139 goto out; 1140 } 1141 retval = pid_vnr(sid); 1142 out: 1143 rcu_read_unlock(); 1144 return retval; 1145 } 1146 1147 static void set_special_pids(struct pid *pid) 1148 { 1149 struct task_struct *curr = current->group_leader; 1150 1151 if (task_session(curr) != pid) 1152 change_pid(curr, PIDTYPE_SID, pid); 1153 1154 if (task_pgrp(curr) != pid) 1155 change_pid(curr, PIDTYPE_PGID, pid); 1156 } 1157 1158 int ksys_setsid(void) 1159 { 1160 struct task_struct *group_leader = current->group_leader; 1161 struct pid *sid = task_pid(group_leader); 1162 pid_t session = pid_vnr(sid); 1163 int err = -EPERM; 1164 1165 write_lock_irq(&tasklist_lock); 1166 /* Fail if I am already a session leader */ 1167 if (group_leader->signal->leader) 1168 goto out; 1169 1170 /* Fail if a process group id already exists that equals the 1171 * proposed session id. 1172 */ 1173 if (pid_task(sid, PIDTYPE_PGID)) 1174 goto out; 1175 1176 group_leader->signal->leader = 1; 1177 set_special_pids(sid); 1178 1179 proc_clear_tty(group_leader); 1180 1181 err = session; 1182 out: 1183 write_unlock_irq(&tasklist_lock); 1184 if (err > 0) { 1185 proc_sid_connector(group_leader); 1186 sched_autogroup_create_attach(group_leader); 1187 } 1188 return err; 1189 } 1190 1191 SYSCALL_DEFINE0(setsid) 1192 { 1193 return ksys_setsid(); 1194 } 1195 1196 DECLARE_RWSEM(uts_sem); 1197 1198 #ifdef COMPAT_UTS_MACHINE 1199 #define override_architecture(name) \ 1200 (personality(current->personality) == PER_LINUX32 && \ 1201 copy_to_user(name->machine, COMPAT_UTS_MACHINE, \ 1202 sizeof(COMPAT_UTS_MACHINE))) 1203 #else 1204 #define override_architecture(name) 0 1205 #endif 1206 1207 /* 1208 * Work around broken programs that cannot handle "Linux 3.0". 1209 * Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40 1210 * And we map 4.x and later versions to 2.6.60+x, so 4.0/5.0/6.0/... would be 1211 * 2.6.60. 1212 */ 1213 static int override_release(char __user *release, size_t len) 1214 { 1215 int ret = 0; 1216 1217 if (current->personality & UNAME26) { 1218 const char *rest = UTS_RELEASE; 1219 char buf[65] = { 0 }; 1220 int ndots = 0; 1221 unsigned v; 1222 size_t copy; 1223 1224 while (*rest) { 1225 if (*rest == '.' && ++ndots >= 3) 1226 break; 1227 if (!isdigit(*rest) && *rest != '.') 1228 break; 1229 rest++; 1230 } 1231 v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 60; 1232 copy = clamp_t(size_t, len, 1, sizeof(buf)); 1233 copy = scnprintf(buf, copy, "2.6.%u%s", v, rest); 1234 ret = copy_to_user(release, buf, copy + 1); 1235 } 1236 return ret; 1237 } 1238 1239 SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name) 1240 { 1241 struct new_utsname tmp; 1242 1243 down_read(&uts_sem); 1244 memcpy(&tmp, utsname(), sizeof(tmp)); 1245 up_read(&uts_sem); 1246 if (copy_to_user(name, &tmp, sizeof(tmp))) 1247 return -EFAULT; 1248 1249 if (override_release(name->release, sizeof(name->release))) 1250 return -EFAULT; 1251 if (override_architecture(name)) 1252 return -EFAULT; 1253 return 0; 1254 } 1255 1256 #ifdef __ARCH_WANT_SYS_OLD_UNAME 1257 /* 1258 * Old cruft 1259 */ 1260 SYSCALL_DEFINE1(uname, struct old_utsname __user *, name) 1261 { 1262 struct old_utsname tmp; 1263 1264 if (!name) 1265 return -EFAULT; 1266 1267 down_read(&uts_sem); 1268 memcpy(&tmp, utsname(), sizeof(tmp)); 1269 up_read(&uts_sem); 1270 if (copy_to_user(name, &tmp, sizeof(tmp))) 1271 return -EFAULT; 1272 1273 if (override_release(name->release, sizeof(name->release))) 1274 return -EFAULT; 1275 if (override_architecture(name)) 1276 return -EFAULT; 1277 return 0; 1278 } 1279 1280 SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name) 1281 { 1282 struct oldold_utsname tmp; 1283 1284 if (!name) 1285 return -EFAULT; 1286 1287 memset(&tmp, 0, sizeof(tmp)); 1288 1289 down_read(&uts_sem); 1290 memcpy(&tmp.sysname, &utsname()->sysname, __OLD_UTS_LEN); 1291 memcpy(&tmp.nodename, &utsname()->nodename, __OLD_UTS_LEN); 1292 memcpy(&tmp.release, &utsname()->release, __OLD_UTS_LEN); 1293 memcpy(&tmp.version, &utsname()->version, __OLD_UTS_LEN); 1294 memcpy(&tmp.machine, &utsname()->machine, __OLD_UTS_LEN); 1295 up_read(&uts_sem); 1296 if (copy_to_user(name, &tmp, sizeof(tmp))) 1297 return -EFAULT; 1298 1299 if (override_architecture(name)) 1300 return -EFAULT; 1301 if (override_release(name->release, sizeof(name->release))) 1302 return -EFAULT; 1303 return 0; 1304 } 1305 #endif 1306 1307 SYSCALL_DEFINE2(sethostname, char __user *, name, int, len) 1308 { 1309 int errno; 1310 char tmp[__NEW_UTS_LEN]; 1311 1312 if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN)) 1313 return -EPERM; 1314 1315 if (len < 0 || len > __NEW_UTS_LEN) 1316 return -EINVAL; 1317 errno = -EFAULT; 1318 if (!copy_from_user(tmp, name, len)) { 1319 struct new_utsname *u; 1320 1321 down_write(&uts_sem); 1322 u = utsname(); 1323 memcpy(u->nodename, tmp, len); 1324 memset(u->nodename + len, 0, sizeof(u->nodename) - len); 1325 errno = 0; 1326 uts_proc_notify(UTS_PROC_HOSTNAME); 1327 up_write(&uts_sem); 1328 } 1329 return errno; 1330 } 1331 1332 #ifdef __ARCH_WANT_SYS_GETHOSTNAME 1333 1334 SYSCALL_DEFINE2(gethostname, char __user *, name, int, len) 1335 { 1336 int i; 1337 struct new_utsname *u; 1338 char tmp[__NEW_UTS_LEN + 1]; 1339 1340 if (len < 0) 1341 return -EINVAL; 1342 down_read(&uts_sem); 1343 u = utsname(); 1344 i = 1 + strlen(u->nodename); 1345 if (i > len) 1346 i = len; 1347 memcpy(tmp, u->nodename, i); 1348 up_read(&uts_sem); 1349 if (copy_to_user(name, tmp, i)) 1350 return -EFAULT; 1351 return 0; 1352 } 1353 1354 #endif 1355 1356 /* 1357 * Only setdomainname; getdomainname can be implemented by calling 1358 * uname() 1359 */ 1360 SYSCALL_DEFINE2(setdomainname, char __user *, name, int, len) 1361 { 1362 int errno; 1363 char tmp[__NEW_UTS_LEN]; 1364 1365 if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN)) 1366 return -EPERM; 1367 if (len < 0 || len > __NEW_UTS_LEN) 1368 return -EINVAL; 1369 1370 errno = -EFAULT; 1371 if (!copy_from_user(tmp, name, len)) { 1372 struct new_utsname *u; 1373 1374 down_write(&uts_sem); 1375 u = utsname(); 1376 memcpy(u->domainname, tmp, len); 1377 memset(u->domainname + len, 0, sizeof(u->domainname) - len); 1378 errno = 0; 1379 uts_proc_notify(UTS_PROC_DOMAINNAME); 1380 up_write(&uts_sem); 1381 } 1382 return errno; 1383 } 1384 1385 SYSCALL_DEFINE2(getrlimit, unsigned int, resource, struct rlimit __user *, rlim) 1386 { 1387 struct rlimit value; 1388 int ret; 1389 1390 ret = do_prlimit(current, resource, NULL, &value); 1391 if (!ret) 1392 ret = copy_to_user(rlim, &value, sizeof(*rlim)) ? -EFAULT : 0; 1393 1394 return ret; 1395 } 1396 1397 #ifdef CONFIG_COMPAT 1398 1399 COMPAT_SYSCALL_DEFINE2(setrlimit, unsigned int, resource, 1400 struct compat_rlimit __user *, rlim) 1401 { 1402 struct rlimit r; 1403 struct compat_rlimit r32; 1404 1405 if (copy_from_user(&r32, rlim, sizeof(struct compat_rlimit))) 1406 return -EFAULT; 1407 1408 if (r32.rlim_cur == COMPAT_RLIM_INFINITY) 1409 r.rlim_cur = RLIM_INFINITY; 1410 else 1411 r.rlim_cur = r32.rlim_cur; 1412 if (r32.rlim_max == COMPAT_RLIM_INFINITY) 1413 r.rlim_max = RLIM_INFINITY; 1414 else 1415 r.rlim_max = r32.rlim_max; 1416 return do_prlimit(current, resource, &r, NULL); 1417 } 1418 1419 COMPAT_SYSCALL_DEFINE2(getrlimit, unsigned int, resource, 1420 struct compat_rlimit __user *, rlim) 1421 { 1422 struct rlimit r; 1423 int ret; 1424 1425 ret = do_prlimit(current, resource, NULL, &r); 1426 if (!ret) { 1427 struct compat_rlimit r32; 1428 if (r.rlim_cur > COMPAT_RLIM_INFINITY) 1429 r32.rlim_cur = COMPAT_RLIM_INFINITY; 1430 else 1431 r32.rlim_cur = r.rlim_cur; 1432 if (r.rlim_max > COMPAT_RLIM_INFINITY) 1433 r32.rlim_max = COMPAT_RLIM_INFINITY; 1434 else 1435 r32.rlim_max = r.rlim_max; 1436 1437 if (copy_to_user(rlim, &r32, sizeof(struct compat_rlimit))) 1438 return -EFAULT; 1439 } 1440 return ret; 1441 } 1442 1443 #endif 1444 1445 #ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT 1446 1447 /* 1448 * Back compatibility for getrlimit. Needed for some apps. 1449 */ 1450 SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource, 1451 struct rlimit __user *, rlim) 1452 { 1453 struct rlimit x; 1454 if (resource >= RLIM_NLIMITS) 1455 return -EINVAL; 1456 1457 resource = array_index_nospec(resource, RLIM_NLIMITS); 1458 task_lock(current->group_leader); 1459 x = current->signal->rlim[resource]; 1460 task_unlock(current->group_leader); 1461 if (x.rlim_cur > 0x7FFFFFFF) 1462 x.rlim_cur = 0x7FFFFFFF; 1463 if (x.rlim_max > 0x7FFFFFFF) 1464 x.rlim_max = 0x7FFFFFFF; 1465 return copy_to_user(rlim, &x, sizeof(x)) ? -EFAULT : 0; 1466 } 1467 1468 #ifdef CONFIG_COMPAT 1469 COMPAT_SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource, 1470 struct compat_rlimit __user *, rlim) 1471 { 1472 struct rlimit r; 1473 1474 if (resource >= RLIM_NLIMITS) 1475 return -EINVAL; 1476 1477 resource = array_index_nospec(resource, RLIM_NLIMITS); 1478 task_lock(current->group_leader); 1479 r = current->signal->rlim[resource]; 1480 task_unlock(current->group_leader); 1481 if (r.rlim_cur > 0x7FFFFFFF) 1482 r.rlim_cur = 0x7FFFFFFF; 1483 if (r.rlim_max > 0x7FFFFFFF) 1484 r.rlim_max = 0x7FFFFFFF; 1485 1486 if (put_user(r.rlim_cur, &rlim->rlim_cur) || 1487 put_user(r.rlim_max, &rlim->rlim_max)) 1488 return -EFAULT; 1489 return 0; 1490 } 1491 #endif 1492 1493 #endif 1494 1495 static inline bool rlim64_is_infinity(__u64 rlim64) 1496 { 1497 #if BITS_PER_LONG < 64 1498 return rlim64 >= ULONG_MAX; 1499 #else 1500 return rlim64 == RLIM64_INFINITY; 1501 #endif 1502 } 1503 1504 static void rlim_to_rlim64(const struct rlimit *rlim, struct rlimit64 *rlim64) 1505 { 1506 if (rlim->rlim_cur == RLIM_INFINITY) 1507 rlim64->rlim_cur = RLIM64_INFINITY; 1508 else 1509 rlim64->rlim_cur = rlim->rlim_cur; 1510 if (rlim->rlim_max == RLIM_INFINITY) 1511 rlim64->rlim_max = RLIM64_INFINITY; 1512 else 1513 rlim64->rlim_max = rlim->rlim_max; 1514 } 1515 1516 static void rlim64_to_rlim(const struct rlimit64 *rlim64, struct rlimit *rlim) 1517 { 1518 if (rlim64_is_infinity(rlim64->rlim_cur)) 1519 rlim->rlim_cur = RLIM_INFINITY; 1520 else 1521 rlim->rlim_cur = (unsigned long)rlim64->rlim_cur; 1522 if (rlim64_is_infinity(rlim64->rlim_max)) 1523 rlim->rlim_max = RLIM_INFINITY; 1524 else 1525 rlim->rlim_max = (unsigned long)rlim64->rlim_max; 1526 } 1527 1528 /* make sure you are allowed to change @tsk limits before calling this */ 1529 int do_prlimit(struct task_struct *tsk, unsigned int resource, 1530 struct rlimit *new_rlim, struct rlimit *old_rlim) 1531 { 1532 struct rlimit *rlim; 1533 int retval = 0; 1534 1535 if (resource >= RLIM_NLIMITS) 1536 return -EINVAL; 1537 if (new_rlim) { 1538 if (new_rlim->rlim_cur > new_rlim->rlim_max) 1539 return -EINVAL; 1540 if (resource == RLIMIT_NOFILE && 1541 new_rlim->rlim_max > sysctl_nr_open) 1542 return -EPERM; 1543 } 1544 1545 /* protect tsk->signal and tsk->sighand from disappearing */ 1546 read_lock(&tasklist_lock); 1547 if (!tsk->sighand) { 1548 retval = -ESRCH; 1549 goto out; 1550 } 1551 1552 rlim = tsk->signal->rlim + resource; 1553 task_lock(tsk->group_leader); 1554 if (new_rlim) { 1555 /* Keep the capable check against init_user_ns until 1556 cgroups can contain all limits */ 1557 if (new_rlim->rlim_max > rlim->rlim_max && 1558 !capable(CAP_SYS_RESOURCE)) 1559 retval = -EPERM; 1560 if (!retval) 1561 retval = security_task_setrlimit(tsk, resource, new_rlim); 1562 } 1563 if (!retval) { 1564 if (old_rlim) 1565 *old_rlim = *rlim; 1566 if (new_rlim) 1567 *rlim = *new_rlim; 1568 } 1569 task_unlock(tsk->group_leader); 1570 1571 /* 1572 * RLIMIT_CPU handling. Arm the posix CPU timer if the limit is not 1573 * infite. In case of RLIM_INFINITY the posix CPU timer code 1574 * ignores the rlimit. 1575 */ 1576 if (!retval && new_rlim && resource == RLIMIT_CPU && 1577 new_rlim->rlim_cur != RLIM_INFINITY && 1578 IS_ENABLED(CONFIG_POSIX_TIMERS)) 1579 update_rlimit_cpu(tsk, new_rlim->rlim_cur); 1580 out: 1581 read_unlock(&tasklist_lock); 1582 return retval; 1583 } 1584 1585 /* rcu lock must be held */ 1586 static int check_prlimit_permission(struct task_struct *task, 1587 unsigned int flags) 1588 { 1589 const struct cred *cred = current_cred(), *tcred; 1590 bool id_match; 1591 1592 if (current == task) 1593 return 0; 1594 1595 tcred = __task_cred(task); 1596 id_match = (uid_eq(cred->uid, tcred->euid) && 1597 uid_eq(cred->uid, tcred->suid) && 1598 uid_eq(cred->uid, tcred->uid) && 1599 gid_eq(cred->gid, tcred->egid) && 1600 gid_eq(cred->gid, tcred->sgid) && 1601 gid_eq(cred->gid, tcred->gid)); 1602 if (!id_match && !ns_capable(tcred->user_ns, CAP_SYS_RESOURCE)) 1603 return -EPERM; 1604 1605 return security_task_prlimit(cred, tcred, flags); 1606 } 1607 1608 SYSCALL_DEFINE4(prlimit64, pid_t, pid, unsigned int, resource, 1609 const struct rlimit64 __user *, new_rlim, 1610 struct rlimit64 __user *, old_rlim) 1611 { 1612 struct rlimit64 old64, new64; 1613 struct rlimit old, new; 1614 struct task_struct *tsk; 1615 unsigned int checkflags = 0; 1616 int ret; 1617 1618 if (old_rlim) 1619 checkflags |= LSM_PRLIMIT_READ; 1620 1621 if (new_rlim) { 1622 if (copy_from_user(&new64, new_rlim, sizeof(new64))) 1623 return -EFAULT; 1624 rlim64_to_rlim(&new64, &new); 1625 checkflags |= LSM_PRLIMIT_WRITE; 1626 } 1627 1628 rcu_read_lock(); 1629 tsk = pid ? find_task_by_vpid(pid) : current; 1630 if (!tsk) { 1631 rcu_read_unlock(); 1632 return -ESRCH; 1633 } 1634 ret = check_prlimit_permission(tsk, checkflags); 1635 if (ret) { 1636 rcu_read_unlock(); 1637 return ret; 1638 } 1639 get_task_struct(tsk); 1640 rcu_read_unlock(); 1641 1642 ret = do_prlimit(tsk, resource, new_rlim ? &new : NULL, 1643 old_rlim ? &old : NULL); 1644 1645 if (!ret && old_rlim) { 1646 rlim_to_rlim64(&old, &old64); 1647 if (copy_to_user(old_rlim, &old64, sizeof(old64))) 1648 ret = -EFAULT; 1649 } 1650 1651 put_task_struct(tsk); 1652 return ret; 1653 } 1654 1655 SYSCALL_DEFINE2(setrlimit, unsigned int, resource, struct rlimit __user *, rlim) 1656 { 1657 struct rlimit new_rlim; 1658 1659 if (copy_from_user(&new_rlim, rlim, sizeof(*rlim))) 1660 return -EFAULT; 1661 return do_prlimit(current, resource, &new_rlim, NULL); 1662 } 1663 1664 /* 1665 * It would make sense to put struct rusage in the task_struct, 1666 * except that would make the task_struct be *really big*. After 1667 * task_struct gets moved into malloc'ed memory, it would 1668 * make sense to do this. It will make moving the rest of the information 1669 * a lot simpler! (Which we're not doing right now because we're not 1670 * measuring them yet). 1671 * 1672 * When sampling multiple threads for RUSAGE_SELF, under SMP we might have 1673 * races with threads incrementing their own counters. But since word 1674 * reads are atomic, we either get new values or old values and we don't 1675 * care which for the sums. We always take the siglock to protect reading 1676 * the c* fields from p->signal from races with exit.c updating those 1677 * fields when reaping, so a sample either gets all the additions of a 1678 * given child after it's reaped, or none so this sample is before reaping. 1679 * 1680 * Locking: 1681 * We need to take the siglock for CHILDEREN, SELF and BOTH 1682 * for the cases current multithreaded, non-current single threaded 1683 * non-current multithreaded. Thread traversal is now safe with 1684 * the siglock held. 1685 * Strictly speaking, we donot need to take the siglock if we are current and 1686 * single threaded, as no one else can take our signal_struct away, no one 1687 * else can reap the children to update signal->c* counters, and no one else 1688 * can race with the signal-> fields. If we do not take any lock, the 1689 * signal-> fields could be read out of order while another thread was just 1690 * exiting. So we should place a read memory barrier when we avoid the lock. 1691 * On the writer side, write memory barrier is implied in __exit_signal 1692 * as __exit_signal releases the siglock spinlock after updating the signal-> 1693 * fields. But we don't do this yet to keep things simple. 1694 * 1695 */ 1696 1697 static void accumulate_thread_rusage(struct task_struct *t, struct rusage *r) 1698 { 1699 r->ru_nvcsw += t->nvcsw; 1700 r->ru_nivcsw += t->nivcsw; 1701 r->ru_minflt += t->min_flt; 1702 r->ru_majflt += t->maj_flt; 1703 r->ru_inblock += task_io_get_inblock(t); 1704 r->ru_oublock += task_io_get_oublock(t); 1705 } 1706 1707 void getrusage(struct task_struct *p, int who, struct rusage *r) 1708 { 1709 struct task_struct *t; 1710 unsigned long flags; 1711 u64 tgutime, tgstime, utime, stime; 1712 unsigned long maxrss = 0; 1713 1714 memset((char *)r, 0, sizeof (*r)); 1715 utime = stime = 0; 1716 1717 if (who == RUSAGE_THREAD) { 1718 task_cputime_adjusted(current, &utime, &stime); 1719 accumulate_thread_rusage(p, r); 1720 maxrss = p->signal->maxrss; 1721 goto out; 1722 } 1723 1724 if (!lock_task_sighand(p, &flags)) 1725 return; 1726 1727 switch (who) { 1728 case RUSAGE_BOTH: 1729 case RUSAGE_CHILDREN: 1730 utime = p->signal->cutime; 1731 stime = p->signal->cstime; 1732 r->ru_nvcsw = p->signal->cnvcsw; 1733 r->ru_nivcsw = p->signal->cnivcsw; 1734 r->ru_minflt = p->signal->cmin_flt; 1735 r->ru_majflt = p->signal->cmaj_flt; 1736 r->ru_inblock = p->signal->cinblock; 1737 r->ru_oublock = p->signal->coublock; 1738 maxrss = p->signal->cmaxrss; 1739 1740 if (who == RUSAGE_CHILDREN) 1741 break; 1742 /* fall through */ 1743 1744 case RUSAGE_SELF: 1745 thread_group_cputime_adjusted(p, &tgutime, &tgstime); 1746 utime += tgutime; 1747 stime += tgstime; 1748 r->ru_nvcsw += p->signal->nvcsw; 1749 r->ru_nivcsw += p->signal->nivcsw; 1750 r->ru_minflt += p->signal->min_flt; 1751 r->ru_majflt += p->signal->maj_flt; 1752 r->ru_inblock += p->signal->inblock; 1753 r->ru_oublock += p->signal->oublock; 1754 if (maxrss < p->signal->maxrss) 1755 maxrss = p->signal->maxrss; 1756 t = p; 1757 do { 1758 accumulate_thread_rusage(t, r); 1759 } while_each_thread(p, t); 1760 break; 1761 1762 default: 1763 BUG(); 1764 } 1765 unlock_task_sighand(p, &flags); 1766 1767 out: 1768 r->ru_utime = ns_to_kernel_old_timeval(utime); 1769 r->ru_stime = ns_to_kernel_old_timeval(stime); 1770 1771 if (who != RUSAGE_CHILDREN) { 1772 struct mm_struct *mm = get_task_mm(p); 1773 1774 if (mm) { 1775 setmax_mm_hiwater_rss(&maxrss, mm); 1776 mmput(mm); 1777 } 1778 } 1779 r->ru_maxrss = maxrss * (PAGE_SIZE / 1024); /* convert pages to KBs */ 1780 } 1781 1782 SYSCALL_DEFINE2(getrusage, int, who, struct rusage __user *, ru) 1783 { 1784 struct rusage r; 1785 1786 if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN && 1787 who != RUSAGE_THREAD) 1788 return -EINVAL; 1789 1790 getrusage(current, who, &r); 1791 return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0; 1792 } 1793 1794 #ifdef CONFIG_COMPAT 1795 COMPAT_SYSCALL_DEFINE2(getrusage, int, who, struct compat_rusage __user *, ru) 1796 { 1797 struct rusage r; 1798 1799 if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN && 1800 who != RUSAGE_THREAD) 1801 return -EINVAL; 1802 1803 getrusage(current, who, &r); 1804 return put_compat_rusage(&r, ru); 1805 } 1806 #endif 1807 1808 SYSCALL_DEFINE1(umask, int, mask) 1809 { 1810 mask = xchg(¤t->fs->umask, mask & S_IRWXUGO); 1811 return mask; 1812 } 1813 1814 static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd) 1815 { 1816 struct fd exe; 1817 struct file *old_exe, *exe_file; 1818 struct inode *inode; 1819 int err; 1820 1821 exe = fdget(fd); 1822 if (!exe.file) 1823 return -EBADF; 1824 1825 inode = file_inode(exe.file); 1826 1827 /* 1828 * Because the original mm->exe_file points to executable file, make 1829 * sure that this one is executable as well, to avoid breaking an 1830 * overall picture. 1831 */ 1832 err = -EACCES; 1833 if (!S_ISREG(inode->i_mode) || path_noexec(&exe.file->f_path)) 1834 goto exit; 1835 1836 err = inode_permission(inode, MAY_EXEC); 1837 if (err) 1838 goto exit; 1839 1840 /* 1841 * Forbid mm->exe_file change if old file still mapped. 1842 */ 1843 exe_file = get_mm_exe_file(mm); 1844 err = -EBUSY; 1845 if (exe_file) { 1846 struct vm_area_struct *vma; 1847 1848 down_read(&mm->mmap_sem); 1849 for (vma = mm->mmap; vma; vma = vma->vm_next) { 1850 if (!vma->vm_file) 1851 continue; 1852 if (path_equal(&vma->vm_file->f_path, 1853 &exe_file->f_path)) 1854 goto exit_err; 1855 } 1856 1857 up_read(&mm->mmap_sem); 1858 fput(exe_file); 1859 } 1860 1861 err = 0; 1862 /* set the new file, lockless */ 1863 get_file(exe.file); 1864 old_exe = xchg(&mm->exe_file, exe.file); 1865 if (old_exe) 1866 fput(old_exe); 1867 exit: 1868 fdput(exe); 1869 return err; 1870 exit_err: 1871 up_read(&mm->mmap_sem); 1872 fput(exe_file); 1873 goto exit; 1874 } 1875 1876 /* 1877 * Check arithmetic relations of passed addresses. 1878 * 1879 * WARNING: we don't require any capability here so be very careful 1880 * in what is allowed for modification from userspace. 1881 */ 1882 static int validate_prctl_map_addr(struct prctl_mm_map *prctl_map) 1883 { 1884 unsigned long mmap_max_addr = TASK_SIZE; 1885 int error = -EINVAL, i; 1886 1887 static const unsigned char offsets[] = { 1888 offsetof(struct prctl_mm_map, start_code), 1889 offsetof(struct prctl_mm_map, end_code), 1890 offsetof(struct prctl_mm_map, start_data), 1891 offsetof(struct prctl_mm_map, end_data), 1892 offsetof(struct prctl_mm_map, start_brk), 1893 offsetof(struct prctl_mm_map, brk), 1894 offsetof(struct prctl_mm_map, start_stack), 1895 offsetof(struct prctl_mm_map, arg_start), 1896 offsetof(struct prctl_mm_map, arg_end), 1897 offsetof(struct prctl_mm_map, env_start), 1898 offsetof(struct prctl_mm_map, env_end), 1899 }; 1900 1901 /* 1902 * Make sure the members are not somewhere outside 1903 * of allowed address space. 1904 */ 1905 for (i = 0; i < ARRAY_SIZE(offsets); i++) { 1906 u64 val = *(u64 *)((char *)prctl_map + offsets[i]); 1907 1908 if ((unsigned long)val >= mmap_max_addr || 1909 (unsigned long)val < mmap_min_addr) 1910 goto out; 1911 } 1912 1913 /* 1914 * Make sure the pairs are ordered. 1915 */ 1916 #define __prctl_check_order(__m1, __op, __m2) \ 1917 ((unsigned long)prctl_map->__m1 __op \ 1918 (unsigned long)prctl_map->__m2) ? 0 : -EINVAL 1919 error = __prctl_check_order(start_code, <, end_code); 1920 error |= __prctl_check_order(start_data,<=, end_data); 1921 error |= __prctl_check_order(start_brk, <=, brk); 1922 error |= __prctl_check_order(arg_start, <=, arg_end); 1923 error |= __prctl_check_order(env_start, <=, env_end); 1924 if (error) 1925 goto out; 1926 #undef __prctl_check_order 1927 1928 error = -EINVAL; 1929 1930 /* 1931 * @brk should be after @end_data in traditional maps. 1932 */ 1933 if (prctl_map->start_brk <= prctl_map->end_data || 1934 prctl_map->brk <= prctl_map->end_data) 1935 goto out; 1936 1937 /* 1938 * Neither we should allow to override limits if they set. 1939 */ 1940 if (check_data_rlimit(rlimit(RLIMIT_DATA), prctl_map->brk, 1941 prctl_map->start_brk, prctl_map->end_data, 1942 prctl_map->start_data)) 1943 goto out; 1944 1945 error = 0; 1946 out: 1947 return error; 1948 } 1949 1950 #ifdef CONFIG_CHECKPOINT_RESTORE 1951 static int prctl_set_mm_map(int opt, const void __user *addr, unsigned long data_size) 1952 { 1953 struct prctl_mm_map prctl_map = { .exe_fd = (u32)-1, }; 1954 unsigned long user_auxv[AT_VECTOR_SIZE]; 1955 struct mm_struct *mm = current->mm; 1956 int error; 1957 1958 BUILD_BUG_ON(sizeof(user_auxv) != sizeof(mm->saved_auxv)); 1959 BUILD_BUG_ON(sizeof(struct prctl_mm_map) > 256); 1960 1961 if (opt == PR_SET_MM_MAP_SIZE) 1962 return put_user((unsigned int)sizeof(prctl_map), 1963 (unsigned int __user *)addr); 1964 1965 if (data_size != sizeof(prctl_map)) 1966 return -EINVAL; 1967 1968 if (copy_from_user(&prctl_map, addr, sizeof(prctl_map))) 1969 return -EFAULT; 1970 1971 error = validate_prctl_map_addr(&prctl_map); 1972 if (error) 1973 return error; 1974 1975 if (prctl_map.auxv_size) { 1976 /* 1977 * Someone is trying to cheat the auxv vector. 1978 */ 1979 if (!prctl_map.auxv || 1980 prctl_map.auxv_size > sizeof(mm->saved_auxv)) 1981 return -EINVAL; 1982 1983 memset(user_auxv, 0, sizeof(user_auxv)); 1984 if (copy_from_user(user_auxv, 1985 (const void __user *)prctl_map.auxv, 1986 prctl_map.auxv_size)) 1987 return -EFAULT; 1988 1989 /* Last entry must be AT_NULL as specification requires */ 1990 user_auxv[AT_VECTOR_SIZE - 2] = AT_NULL; 1991 user_auxv[AT_VECTOR_SIZE - 1] = AT_NULL; 1992 } 1993 1994 if (prctl_map.exe_fd != (u32)-1) { 1995 /* 1996 * Make sure the caller has the rights to 1997 * change /proc/pid/exe link: only local sys admin should 1998 * be allowed to. 1999 */ 2000 if (!ns_capable(current_user_ns(), CAP_SYS_ADMIN)) 2001 return -EINVAL; 2002 2003 error = prctl_set_mm_exe_file(mm, prctl_map.exe_fd); 2004 if (error) 2005 return error; 2006 } 2007 2008 /* 2009 * arg_lock protects concurent updates but we still need mmap_sem for 2010 * read to exclude races with sys_brk. 2011 */ 2012 down_read(&mm->mmap_sem); 2013 2014 /* 2015 * We don't validate if these members are pointing to 2016 * real present VMAs because application may have correspond 2017 * VMAs already unmapped and kernel uses these members for statistics 2018 * output in procfs mostly, except 2019 * 2020 * - @start_brk/@brk which are used in do_brk but kernel lookups 2021 * for VMAs when updating these memvers so anything wrong written 2022 * here cause kernel to swear at userspace program but won't lead 2023 * to any problem in kernel itself 2024 */ 2025 2026 spin_lock(&mm->arg_lock); 2027 mm->start_code = prctl_map.start_code; 2028 mm->end_code = prctl_map.end_code; 2029 mm->start_data = prctl_map.start_data; 2030 mm->end_data = prctl_map.end_data; 2031 mm->start_brk = prctl_map.start_brk; 2032 mm->brk = prctl_map.brk; 2033 mm->start_stack = prctl_map.start_stack; 2034 mm->arg_start = prctl_map.arg_start; 2035 mm->arg_end = prctl_map.arg_end; 2036 mm->env_start = prctl_map.env_start; 2037 mm->env_end = prctl_map.env_end; 2038 spin_unlock(&mm->arg_lock); 2039 2040 /* 2041 * Note this update of @saved_auxv is lockless thus 2042 * if someone reads this member in procfs while we're 2043 * updating -- it may get partly updated results. It's 2044 * known and acceptable trade off: we leave it as is to 2045 * not introduce additional locks here making the kernel 2046 * more complex. 2047 */ 2048 if (prctl_map.auxv_size) 2049 memcpy(mm->saved_auxv, user_auxv, sizeof(user_auxv)); 2050 2051 up_read(&mm->mmap_sem); 2052 return 0; 2053 } 2054 #endif /* CONFIG_CHECKPOINT_RESTORE */ 2055 2056 static int prctl_set_auxv(struct mm_struct *mm, unsigned long addr, 2057 unsigned long len) 2058 { 2059 /* 2060 * This doesn't move the auxiliary vector itself since it's pinned to 2061 * mm_struct, but it permits filling the vector with new values. It's 2062 * up to the caller to provide sane values here, otherwise userspace 2063 * tools which use this vector might be unhappy. 2064 */ 2065 unsigned long user_auxv[AT_VECTOR_SIZE]; 2066 2067 if (len > sizeof(user_auxv)) 2068 return -EINVAL; 2069 2070 if (copy_from_user(user_auxv, (const void __user *)addr, len)) 2071 return -EFAULT; 2072 2073 /* Make sure the last entry is always AT_NULL */ 2074 user_auxv[AT_VECTOR_SIZE - 2] = 0; 2075 user_auxv[AT_VECTOR_SIZE - 1] = 0; 2076 2077 BUILD_BUG_ON(sizeof(user_auxv) != sizeof(mm->saved_auxv)); 2078 2079 task_lock(current); 2080 memcpy(mm->saved_auxv, user_auxv, len); 2081 task_unlock(current); 2082 2083 return 0; 2084 } 2085 2086 static int prctl_set_mm(int opt, unsigned long addr, 2087 unsigned long arg4, unsigned long arg5) 2088 { 2089 struct mm_struct *mm = current->mm; 2090 struct prctl_mm_map prctl_map = { 2091 .auxv = NULL, 2092 .auxv_size = 0, 2093 .exe_fd = -1, 2094 }; 2095 struct vm_area_struct *vma; 2096 int error; 2097 2098 if (arg5 || (arg4 && (opt != PR_SET_MM_AUXV && 2099 opt != PR_SET_MM_MAP && 2100 opt != PR_SET_MM_MAP_SIZE))) 2101 return -EINVAL; 2102 2103 #ifdef CONFIG_CHECKPOINT_RESTORE 2104 if (opt == PR_SET_MM_MAP || opt == PR_SET_MM_MAP_SIZE) 2105 return prctl_set_mm_map(opt, (const void __user *)addr, arg4); 2106 #endif 2107 2108 if (!capable(CAP_SYS_RESOURCE)) 2109 return -EPERM; 2110 2111 if (opt == PR_SET_MM_EXE_FILE) 2112 return prctl_set_mm_exe_file(mm, (unsigned int)addr); 2113 2114 if (opt == PR_SET_MM_AUXV) 2115 return prctl_set_auxv(mm, addr, arg4); 2116 2117 if (addr >= TASK_SIZE || addr < mmap_min_addr) 2118 return -EINVAL; 2119 2120 error = -EINVAL; 2121 2122 /* 2123 * arg_lock protects concurent updates of arg boundaries, we need 2124 * mmap_sem for a) concurrent sys_brk, b) finding VMA for addr 2125 * validation. 2126 */ 2127 down_read(&mm->mmap_sem); 2128 vma = find_vma(mm, addr); 2129 2130 spin_lock(&mm->arg_lock); 2131 prctl_map.start_code = mm->start_code; 2132 prctl_map.end_code = mm->end_code; 2133 prctl_map.start_data = mm->start_data; 2134 prctl_map.end_data = mm->end_data; 2135 prctl_map.start_brk = mm->start_brk; 2136 prctl_map.brk = mm->brk; 2137 prctl_map.start_stack = mm->start_stack; 2138 prctl_map.arg_start = mm->arg_start; 2139 prctl_map.arg_end = mm->arg_end; 2140 prctl_map.env_start = mm->env_start; 2141 prctl_map.env_end = mm->env_end; 2142 2143 switch (opt) { 2144 case PR_SET_MM_START_CODE: 2145 prctl_map.start_code = addr; 2146 break; 2147 case PR_SET_MM_END_CODE: 2148 prctl_map.end_code = addr; 2149 break; 2150 case PR_SET_MM_START_DATA: 2151 prctl_map.start_data = addr; 2152 break; 2153 case PR_SET_MM_END_DATA: 2154 prctl_map.end_data = addr; 2155 break; 2156 case PR_SET_MM_START_STACK: 2157 prctl_map.start_stack = addr; 2158 break; 2159 case PR_SET_MM_START_BRK: 2160 prctl_map.start_brk = addr; 2161 break; 2162 case PR_SET_MM_BRK: 2163 prctl_map.brk = addr; 2164 break; 2165 case PR_SET_MM_ARG_START: 2166 prctl_map.arg_start = addr; 2167 break; 2168 case PR_SET_MM_ARG_END: 2169 prctl_map.arg_end = addr; 2170 break; 2171 case PR_SET_MM_ENV_START: 2172 prctl_map.env_start = addr; 2173 break; 2174 case PR_SET_MM_ENV_END: 2175 prctl_map.env_end = addr; 2176 break; 2177 default: 2178 goto out; 2179 } 2180 2181 error = validate_prctl_map_addr(&prctl_map); 2182 if (error) 2183 goto out; 2184 2185 switch (opt) { 2186 /* 2187 * If command line arguments and environment 2188 * are placed somewhere else on stack, we can 2189 * set them up here, ARG_START/END to setup 2190 * command line argumets and ENV_START/END 2191 * for environment. 2192 */ 2193 case PR_SET_MM_START_STACK: 2194 case PR_SET_MM_ARG_START: 2195 case PR_SET_MM_ARG_END: 2196 case PR_SET_MM_ENV_START: 2197 case PR_SET_MM_ENV_END: 2198 if (!vma) { 2199 error = -EFAULT; 2200 goto out; 2201 } 2202 } 2203 2204 mm->start_code = prctl_map.start_code; 2205 mm->end_code = prctl_map.end_code; 2206 mm->start_data = prctl_map.start_data; 2207 mm->end_data = prctl_map.end_data; 2208 mm->start_brk = prctl_map.start_brk; 2209 mm->brk = prctl_map.brk; 2210 mm->start_stack = prctl_map.start_stack; 2211 mm->arg_start = prctl_map.arg_start; 2212 mm->arg_end = prctl_map.arg_end; 2213 mm->env_start = prctl_map.env_start; 2214 mm->env_end = prctl_map.env_end; 2215 2216 error = 0; 2217 out: 2218 spin_unlock(&mm->arg_lock); 2219 up_read(&mm->mmap_sem); 2220 return error; 2221 } 2222 2223 #ifdef CONFIG_CHECKPOINT_RESTORE 2224 static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr) 2225 { 2226 return put_user(me->clear_child_tid, tid_addr); 2227 } 2228 #else 2229 static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr) 2230 { 2231 return -EINVAL; 2232 } 2233 #endif 2234 2235 static int propagate_has_child_subreaper(struct task_struct *p, void *data) 2236 { 2237 /* 2238 * If task has has_child_subreaper - all its decendants 2239 * already have these flag too and new decendants will 2240 * inherit it on fork, skip them. 2241 * 2242 * If we've found child_reaper - skip descendants in 2243 * it's subtree as they will never get out pidns. 2244 */ 2245 if (p->signal->has_child_subreaper || 2246 is_child_reaper(task_pid(p))) 2247 return 0; 2248 2249 p->signal->has_child_subreaper = 1; 2250 return 1; 2251 } 2252 2253 int __weak arch_prctl_spec_ctrl_get(struct task_struct *t, unsigned long which) 2254 { 2255 return -EINVAL; 2256 } 2257 2258 int __weak arch_prctl_spec_ctrl_set(struct task_struct *t, unsigned long which, 2259 unsigned long ctrl) 2260 { 2261 return -EINVAL; 2262 } 2263 2264 #define PR_IO_FLUSHER (PF_MEMALLOC_NOIO | PF_LESS_THROTTLE) 2265 2266 SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, 2267 unsigned long, arg4, unsigned long, arg5) 2268 { 2269 struct task_struct *me = current; 2270 unsigned char comm[sizeof(me->comm)]; 2271 long error; 2272 2273 error = security_task_prctl(option, arg2, arg3, arg4, arg5); 2274 if (error != -ENOSYS) 2275 return error; 2276 2277 error = 0; 2278 switch (option) { 2279 case PR_SET_PDEATHSIG: 2280 if (!valid_signal(arg2)) { 2281 error = -EINVAL; 2282 break; 2283 } 2284 me->pdeath_signal = arg2; 2285 break; 2286 case PR_GET_PDEATHSIG: 2287 error = put_user(me->pdeath_signal, (int __user *)arg2); 2288 break; 2289 case PR_GET_DUMPABLE: 2290 error = get_dumpable(me->mm); 2291 break; 2292 case PR_SET_DUMPABLE: 2293 if (arg2 != SUID_DUMP_DISABLE && arg2 != SUID_DUMP_USER) { 2294 error = -EINVAL; 2295 break; 2296 } 2297 set_dumpable(me->mm, arg2); 2298 break; 2299 2300 case PR_SET_UNALIGN: 2301 error = SET_UNALIGN_CTL(me, arg2); 2302 break; 2303 case PR_GET_UNALIGN: 2304 error = GET_UNALIGN_CTL(me, arg2); 2305 break; 2306 case PR_SET_FPEMU: 2307 error = SET_FPEMU_CTL(me, arg2); 2308 break; 2309 case PR_GET_FPEMU: 2310 error = GET_FPEMU_CTL(me, arg2); 2311 break; 2312 case PR_SET_FPEXC: 2313 error = SET_FPEXC_CTL(me, arg2); 2314 break; 2315 case PR_GET_FPEXC: 2316 error = GET_FPEXC_CTL(me, arg2); 2317 break; 2318 case PR_GET_TIMING: 2319 error = PR_TIMING_STATISTICAL; 2320 break; 2321 case PR_SET_TIMING: 2322 if (arg2 != PR_TIMING_STATISTICAL) 2323 error = -EINVAL; 2324 break; 2325 case PR_SET_NAME: 2326 comm[sizeof(me->comm) - 1] = 0; 2327 if (strncpy_from_user(comm, (char __user *)arg2, 2328 sizeof(me->comm) - 1) < 0) 2329 return -EFAULT; 2330 set_task_comm(me, comm); 2331 proc_comm_connector(me); 2332 break; 2333 case PR_GET_NAME: 2334 get_task_comm(comm, me); 2335 if (copy_to_user((char __user *)arg2, comm, sizeof(comm))) 2336 return -EFAULT; 2337 break; 2338 case PR_GET_ENDIAN: 2339 error = GET_ENDIAN(me, arg2); 2340 break; 2341 case PR_SET_ENDIAN: 2342 error = SET_ENDIAN(me, arg2); 2343 break; 2344 case PR_GET_SECCOMP: 2345 error = prctl_get_seccomp(); 2346 break; 2347 case PR_SET_SECCOMP: 2348 error = prctl_set_seccomp(arg2, (char __user *)arg3); 2349 break; 2350 case PR_GET_TSC: 2351 error = GET_TSC_CTL(arg2); 2352 break; 2353 case PR_SET_TSC: 2354 error = SET_TSC_CTL(arg2); 2355 break; 2356 case PR_TASK_PERF_EVENTS_DISABLE: 2357 error = perf_event_task_disable(); 2358 break; 2359 case PR_TASK_PERF_EVENTS_ENABLE: 2360 error = perf_event_task_enable(); 2361 break; 2362 case PR_GET_TIMERSLACK: 2363 if (current->timer_slack_ns > ULONG_MAX) 2364 error = ULONG_MAX; 2365 else 2366 error = current->timer_slack_ns; 2367 break; 2368 case PR_SET_TIMERSLACK: 2369 if (arg2 <= 0) 2370 current->timer_slack_ns = 2371 current->default_timer_slack_ns; 2372 else 2373 current->timer_slack_ns = arg2; 2374 break; 2375 case PR_MCE_KILL: 2376 if (arg4 | arg5) 2377 return -EINVAL; 2378 switch (arg2) { 2379 case PR_MCE_KILL_CLEAR: 2380 if (arg3 != 0) 2381 return -EINVAL; 2382 current->flags &= ~PF_MCE_PROCESS; 2383 break; 2384 case PR_MCE_KILL_SET: 2385 current->flags |= PF_MCE_PROCESS; 2386 if (arg3 == PR_MCE_KILL_EARLY) 2387 current->flags |= PF_MCE_EARLY; 2388 else if (arg3 == PR_MCE_KILL_LATE) 2389 current->flags &= ~PF_MCE_EARLY; 2390 else if (arg3 == PR_MCE_KILL_DEFAULT) 2391 current->flags &= 2392 ~(PF_MCE_EARLY|PF_MCE_PROCESS); 2393 else 2394 return -EINVAL; 2395 break; 2396 default: 2397 return -EINVAL; 2398 } 2399 break; 2400 case PR_MCE_KILL_GET: 2401 if (arg2 | arg3 | arg4 | arg5) 2402 return -EINVAL; 2403 if (current->flags & PF_MCE_PROCESS) 2404 error = (current->flags & PF_MCE_EARLY) ? 2405 PR_MCE_KILL_EARLY : PR_MCE_KILL_LATE; 2406 else 2407 error = PR_MCE_KILL_DEFAULT; 2408 break; 2409 case PR_SET_MM: 2410 error = prctl_set_mm(arg2, arg3, arg4, arg5); 2411 break; 2412 case PR_GET_TID_ADDRESS: 2413 error = prctl_get_tid_address(me, (int __user **)arg2); 2414 break; 2415 case PR_SET_CHILD_SUBREAPER: 2416 me->signal->is_child_subreaper = !!arg2; 2417 if (!arg2) 2418 break; 2419 2420 walk_process_tree(me, propagate_has_child_subreaper, NULL); 2421 break; 2422 case PR_GET_CHILD_SUBREAPER: 2423 error = put_user(me->signal->is_child_subreaper, 2424 (int __user *)arg2); 2425 break; 2426 case PR_SET_NO_NEW_PRIVS: 2427 if (arg2 != 1 || arg3 || arg4 || arg5) 2428 return -EINVAL; 2429 2430 task_set_no_new_privs(current); 2431 break; 2432 case PR_GET_NO_NEW_PRIVS: 2433 if (arg2 || arg3 || arg4 || arg5) 2434 return -EINVAL; 2435 return task_no_new_privs(current) ? 1 : 0; 2436 case PR_GET_THP_DISABLE: 2437 if (arg2 || arg3 || arg4 || arg5) 2438 return -EINVAL; 2439 error = !!test_bit(MMF_DISABLE_THP, &me->mm->flags); 2440 break; 2441 case PR_SET_THP_DISABLE: 2442 if (arg3 || arg4 || arg5) 2443 return -EINVAL; 2444 if (down_write_killable(&me->mm->mmap_sem)) 2445 return -EINTR; 2446 if (arg2) 2447 set_bit(MMF_DISABLE_THP, &me->mm->flags); 2448 else 2449 clear_bit(MMF_DISABLE_THP, &me->mm->flags); 2450 up_write(&me->mm->mmap_sem); 2451 break; 2452 case PR_MPX_ENABLE_MANAGEMENT: 2453 case PR_MPX_DISABLE_MANAGEMENT: 2454 /* No longer implemented: */ 2455 return -EINVAL; 2456 case PR_SET_FP_MODE: 2457 error = SET_FP_MODE(me, arg2); 2458 break; 2459 case PR_GET_FP_MODE: 2460 error = GET_FP_MODE(me); 2461 break; 2462 case PR_SVE_SET_VL: 2463 error = SVE_SET_VL(arg2); 2464 break; 2465 case PR_SVE_GET_VL: 2466 error = SVE_GET_VL(); 2467 break; 2468 case PR_GET_SPECULATION_CTRL: 2469 if (arg3 || arg4 || arg5) 2470 return -EINVAL; 2471 error = arch_prctl_spec_ctrl_get(me, arg2); 2472 break; 2473 case PR_SET_SPECULATION_CTRL: 2474 if (arg4 || arg5) 2475 return -EINVAL; 2476 error = arch_prctl_spec_ctrl_set(me, arg2, arg3); 2477 break; 2478 case PR_PAC_RESET_KEYS: 2479 if (arg3 || arg4 || arg5) 2480 return -EINVAL; 2481 error = PAC_RESET_KEYS(me, arg2); 2482 break; 2483 case PR_SET_TAGGED_ADDR_CTRL: 2484 if (arg3 || arg4 || arg5) 2485 return -EINVAL; 2486 error = SET_TAGGED_ADDR_CTRL(arg2); 2487 break; 2488 case PR_GET_TAGGED_ADDR_CTRL: 2489 if (arg2 || arg3 || arg4 || arg5) 2490 return -EINVAL; 2491 error = GET_TAGGED_ADDR_CTRL(); 2492 break; 2493 case PR_SET_IO_FLUSHER: 2494 if (!capable(CAP_SYS_RESOURCE)) 2495 return -EPERM; 2496 2497 if (arg3 || arg4 || arg5) 2498 return -EINVAL; 2499 2500 if (arg2 == 1) 2501 current->flags |= PR_IO_FLUSHER; 2502 else if (!arg2) 2503 current->flags &= ~PR_IO_FLUSHER; 2504 else 2505 return -EINVAL; 2506 break; 2507 case PR_GET_IO_FLUSHER: 2508 if (!capable(CAP_SYS_RESOURCE)) 2509 return -EPERM; 2510 2511 if (arg2 || arg3 || arg4 || arg5) 2512 return -EINVAL; 2513 2514 error = (current->flags & PR_IO_FLUSHER) == PR_IO_FLUSHER; 2515 break; 2516 default: 2517 error = -EINVAL; 2518 break; 2519 } 2520 return error; 2521 } 2522 2523 SYSCALL_DEFINE3(getcpu, unsigned __user *, cpup, unsigned __user *, nodep, 2524 struct getcpu_cache __user *, unused) 2525 { 2526 int err = 0; 2527 int cpu = raw_smp_processor_id(); 2528 2529 if (cpup) 2530 err |= put_user(cpu, cpup); 2531 if (nodep) 2532 err |= put_user(cpu_to_node(cpu), nodep); 2533 return err ? -EFAULT : 0; 2534 } 2535 2536 /** 2537 * do_sysinfo - fill in sysinfo struct 2538 * @info: pointer to buffer to fill 2539 */ 2540 static int do_sysinfo(struct sysinfo *info) 2541 { 2542 unsigned long mem_total, sav_total; 2543 unsigned int mem_unit, bitcount; 2544 struct timespec64 tp; 2545 2546 memset(info, 0, sizeof(struct sysinfo)); 2547 2548 ktime_get_boottime_ts64(&tp); 2549 info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0); 2550 2551 get_avenrun(info->loads, 0, SI_LOAD_SHIFT - FSHIFT); 2552 2553 info->procs = nr_threads; 2554 2555 si_meminfo(info); 2556 si_swapinfo(info); 2557 2558 /* 2559 * If the sum of all the available memory (i.e. ram + swap) 2560 * is less than can be stored in a 32 bit unsigned long then 2561 * we can be binary compatible with 2.2.x kernels. If not, 2562 * well, in that case 2.2.x was broken anyways... 2563 * 2564 * -Erik Andersen <andersee@debian.org> 2565 */ 2566 2567 mem_total = info->totalram + info->totalswap; 2568 if (mem_total < info->totalram || mem_total < info->totalswap) 2569 goto out; 2570 bitcount = 0; 2571 mem_unit = info->mem_unit; 2572 while (mem_unit > 1) { 2573 bitcount++; 2574 mem_unit >>= 1; 2575 sav_total = mem_total; 2576 mem_total <<= 1; 2577 if (mem_total < sav_total) 2578 goto out; 2579 } 2580 2581 /* 2582 * If mem_total did not overflow, multiply all memory values by 2583 * info->mem_unit and set it to 1. This leaves things compatible 2584 * with 2.2.x, and also retains compatibility with earlier 2.4.x 2585 * kernels... 2586 */ 2587 2588 info->mem_unit = 1; 2589 info->totalram <<= bitcount; 2590 info->freeram <<= bitcount; 2591 info->sharedram <<= bitcount; 2592 info->bufferram <<= bitcount; 2593 info->totalswap <<= bitcount; 2594 info->freeswap <<= bitcount; 2595 info->totalhigh <<= bitcount; 2596 info->freehigh <<= bitcount; 2597 2598 out: 2599 return 0; 2600 } 2601 2602 SYSCALL_DEFINE1(sysinfo, struct sysinfo __user *, info) 2603 { 2604 struct sysinfo val; 2605 2606 do_sysinfo(&val); 2607 2608 if (copy_to_user(info, &val, sizeof(struct sysinfo))) 2609 return -EFAULT; 2610 2611 return 0; 2612 } 2613 2614 #ifdef CONFIG_COMPAT 2615 struct compat_sysinfo { 2616 s32 uptime; 2617 u32 loads[3]; 2618 u32 totalram; 2619 u32 freeram; 2620 u32 sharedram; 2621 u32 bufferram; 2622 u32 totalswap; 2623 u32 freeswap; 2624 u16 procs; 2625 u16 pad; 2626 u32 totalhigh; 2627 u32 freehigh; 2628 u32 mem_unit; 2629 char _f[20-2*sizeof(u32)-sizeof(int)]; 2630 }; 2631 2632 COMPAT_SYSCALL_DEFINE1(sysinfo, struct compat_sysinfo __user *, info) 2633 { 2634 struct sysinfo s; 2635 2636 do_sysinfo(&s); 2637 2638 /* Check to see if any memory value is too large for 32-bit and scale 2639 * down if needed 2640 */ 2641 if (upper_32_bits(s.totalram) || upper_32_bits(s.totalswap)) { 2642 int bitcount = 0; 2643 2644 while (s.mem_unit < PAGE_SIZE) { 2645 s.mem_unit <<= 1; 2646 bitcount++; 2647 } 2648 2649 s.totalram >>= bitcount; 2650 s.freeram >>= bitcount; 2651 s.sharedram >>= bitcount; 2652 s.bufferram >>= bitcount; 2653 s.totalswap >>= bitcount; 2654 s.freeswap >>= bitcount; 2655 s.totalhigh >>= bitcount; 2656 s.freehigh >>= bitcount; 2657 } 2658 2659 if (!access_ok(info, sizeof(struct compat_sysinfo)) || 2660 __put_user(s.uptime, &info->uptime) || 2661 __put_user(s.loads[0], &info->loads[0]) || 2662 __put_user(s.loads[1], &info->loads[1]) || 2663 __put_user(s.loads[2], &info->loads[2]) || 2664 __put_user(s.totalram, &info->totalram) || 2665 __put_user(s.freeram, &info->freeram) || 2666 __put_user(s.sharedram, &info->sharedram) || 2667 __put_user(s.bufferram, &info->bufferram) || 2668 __put_user(s.totalswap, &info->totalswap) || 2669 __put_user(s.freeswap, &info->freeswap) || 2670 __put_user(s.procs, &info->procs) || 2671 __put_user(s.totalhigh, &info->totalhigh) || 2672 __put_user(s.freehigh, &info->freehigh) || 2673 __put_user(s.mem_unit, &info->mem_unit)) 2674 return -EFAULT; 2675 2676 return 0; 2677 } 2678 #endif /* CONFIG_COMPAT */ 2679