1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/kernel/sys.c 4 * 5 * Copyright (C) 1991, 1992 Linus Torvalds 6 */ 7 8 #include <linux/export.h> 9 #include <linux/mm.h> 10 #include <linux/mm_inline.h> 11 #include <linux/utsname.h> 12 #include <linux/mman.h> 13 #include <linux/reboot.h> 14 #include <linux/prctl.h> 15 #include <linux/highuid.h> 16 #include <linux/fs.h> 17 #include <linux/kmod.h> 18 #include <linux/perf_event.h> 19 #include <linux/resource.h> 20 #include <linux/kernel.h> 21 #include <linux/workqueue.h> 22 #include <linux/capability.h> 23 #include <linux/device.h> 24 #include <linux/key.h> 25 #include <linux/times.h> 26 #include <linux/posix-timers.h> 27 #include <linux/security.h> 28 #include <linux/random.h> 29 #include <linux/suspend.h> 30 #include <linux/tty.h> 31 #include <linux/signal.h> 32 #include <linux/cn_proc.h> 33 #include <linux/getcpu.h> 34 #include <linux/task_io_accounting_ops.h> 35 #include <linux/seccomp.h> 36 #include <linux/cpu.h> 37 #include <linux/personality.h> 38 #include <linux/ptrace.h> 39 #include <linux/fs_struct.h> 40 #include <linux/file.h> 41 #include <linux/mount.h> 42 #include <linux/gfp.h> 43 #include <linux/syscore_ops.h> 44 #include <linux/version.h> 45 #include <linux/ctype.h> 46 #include <linux/syscall_user_dispatch.h> 47 48 #include <linux/compat.h> 49 #include <linux/syscalls.h> 50 #include <linux/kprobes.h> 51 #include <linux/user_namespace.h> 52 #include <linux/time_namespace.h> 53 #include <linux/binfmts.h> 54 55 #include <linux/sched.h> 56 #include <linux/sched/autogroup.h> 57 #include <linux/sched/loadavg.h> 58 #include <linux/sched/stat.h> 59 #include <linux/sched/mm.h> 60 #include <linux/sched/coredump.h> 61 #include <linux/sched/task.h> 62 #include <linux/sched/cputime.h> 63 #include <linux/rcupdate.h> 64 #include <linux/uidgid.h> 65 #include <linux/cred.h> 66 67 #include <linux/nospec.h> 68 69 #include <linux/kmsg_dump.h> 70 /* Move somewhere else to avoid recompiling? */ 71 #include <generated/utsrelease.h> 72 73 #include <linux/uaccess.h> 74 #include <asm/io.h> 75 #include <asm/unistd.h> 76 77 #include "uid16.h" 78 79 #ifndef SET_UNALIGN_CTL 80 # define SET_UNALIGN_CTL(a, b) (-EINVAL) 81 #endif 82 #ifndef GET_UNALIGN_CTL 83 # define GET_UNALIGN_CTL(a, b) (-EINVAL) 84 #endif 85 #ifndef SET_FPEMU_CTL 86 # define SET_FPEMU_CTL(a, b) (-EINVAL) 87 #endif 88 #ifndef GET_FPEMU_CTL 89 # define GET_FPEMU_CTL(a, b) (-EINVAL) 90 #endif 91 #ifndef SET_FPEXC_CTL 92 # define SET_FPEXC_CTL(a, b) (-EINVAL) 93 #endif 94 #ifndef GET_FPEXC_CTL 95 # define GET_FPEXC_CTL(a, b) (-EINVAL) 96 #endif 97 #ifndef GET_ENDIAN 98 # define GET_ENDIAN(a, b) (-EINVAL) 99 #endif 100 #ifndef SET_ENDIAN 101 # define SET_ENDIAN(a, b) (-EINVAL) 102 #endif 103 #ifndef GET_TSC_CTL 104 # define GET_TSC_CTL(a) (-EINVAL) 105 #endif 106 #ifndef SET_TSC_CTL 107 # define SET_TSC_CTL(a) (-EINVAL) 108 #endif 109 #ifndef GET_FP_MODE 110 # define GET_FP_MODE(a) (-EINVAL) 111 #endif 112 #ifndef SET_FP_MODE 113 # define SET_FP_MODE(a,b) (-EINVAL) 114 #endif 115 #ifndef SVE_SET_VL 116 # define SVE_SET_VL(a) (-EINVAL) 117 #endif 118 #ifndef SVE_GET_VL 119 # define SVE_GET_VL() (-EINVAL) 120 #endif 121 #ifndef SME_SET_VL 122 # define SME_SET_VL(a) (-EINVAL) 123 #endif 124 #ifndef SME_GET_VL 125 # define SME_GET_VL() (-EINVAL) 126 #endif 127 #ifndef PAC_RESET_KEYS 128 # define PAC_RESET_KEYS(a, b) (-EINVAL) 129 #endif 130 #ifndef PAC_SET_ENABLED_KEYS 131 # define PAC_SET_ENABLED_KEYS(a, b, c) (-EINVAL) 132 #endif 133 #ifndef PAC_GET_ENABLED_KEYS 134 # define PAC_GET_ENABLED_KEYS(a) (-EINVAL) 135 #endif 136 #ifndef SET_TAGGED_ADDR_CTRL 137 # define SET_TAGGED_ADDR_CTRL(a) (-EINVAL) 138 #endif 139 #ifndef GET_TAGGED_ADDR_CTRL 140 # define GET_TAGGED_ADDR_CTRL() (-EINVAL) 141 #endif 142 143 /* 144 * this is where the system-wide overflow UID and GID are defined, for 145 * architectures that now have 32-bit UID/GID but didn't in the past 146 */ 147 148 int overflowuid = DEFAULT_OVERFLOWUID; 149 int overflowgid = DEFAULT_OVERFLOWGID; 150 151 EXPORT_SYMBOL(overflowuid); 152 EXPORT_SYMBOL(overflowgid); 153 154 /* 155 * the same as above, but for filesystems which can only store a 16-bit 156 * UID and GID. as such, this is needed on all architectures 157 */ 158 159 int fs_overflowuid = DEFAULT_FS_OVERFLOWUID; 160 int fs_overflowgid = DEFAULT_FS_OVERFLOWGID; 161 162 EXPORT_SYMBOL(fs_overflowuid); 163 EXPORT_SYMBOL(fs_overflowgid); 164 165 /* 166 * Returns true if current's euid is same as p's uid or euid, 167 * or has CAP_SYS_NICE to p's user_ns. 168 * 169 * Called with rcu_read_lock, creds are safe 170 */ 171 static bool set_one_prio_perm(struct task_struct *p) 172 { 173 const struct cred *cred = current_cred(), *pcred = __task_cred(p); 174 175 if (uid_eq(pcred->uid, cred->euid) || 176 uid_eq(pcred->euid, cred->euid)) 177 return true; 178 if (ns_capable(pcred->user_ns, CAP_SYS_NICE)) 179 return true; 180 return false; 181 } 182 183 /* 184 * set the priority of a task 185 * - the caller must hold the RCU read lock 186 */ 187 static int set_one_prio(struct task_struct *p, int niceval, int error) 188 { 189 int no_nice; 190 191 if (!set_one_prio_perm(p)) { 192 error = -EPERM; 193 goto out; 194 } 195 if (niceval < task_nice(p) && !can_nice(p, niceval)) { 196 error = -EACCES; 197 goto out; 198 } 199 no_nice = security_task_setnice(p, niceval); 200 if (no_nice) { 201 error = no_nice; 202 goto out; 203 } 204 if (error == -ESRCH) 205 error = 0; 206 set_user_nice(p, niceval); 207 out: 208 return error; 209 } 210 211 SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval) 212 { 213 struct task_struct *g, *p; 214 struct user_struct *user; 215 const struct cred *cred = current_cred(); 216 int error = -EINVAL; 217 struct pid *pgrp; 218 kuid_t uid; 219 220 if (which > PRIO_USER || which < PRIO_PROCESS) 221 goto out; 222 223 /* normalize: avoid signed division (rounding problems) */ 224 error = -ESRCH; 225 if (niceval < MIN_NICE) 226 niceval = MIN_NICE; 227 if (niceval > MAX_NICE) 228 niceval = MAX_NICE; 229 230 rcu_read_lock(); 231 switch (which) { 232 case PRIO_PROCESS: 233 if (who) 234 p = find_task_by_vpid(who); 235 else 236 p = current; 237 if (p) 238 error = set_one_prio(p, niceval, error); 239 break; 240 case PRIO_PGRP: 241 if (who) 242 pgrp = find_vpid(who); 243 else 244 pgrp = task_pgrp(current); 245 read_lock(&tasklist_lock); 246 do_each_pid_thread(pgrp, PIDTYPE_PGID, p) { 247 error = set_one_prio(p, niceval, error); 248 } while_each_pid_thread(pgrp, PIDTYPE_PGID, p); 249 read_unlock(&tasklist_lock); 250 break; 251 case PRIO_USER: 252 uid = make_kuid(cred->user_ns, who); 253 user = cred->user; 254 if (!who) 255 uid = cred->uid; 256 else if (!uid_eq(uid, cred->uid)) { 257 user = find_user(uid); 258 if (!user) 259 goto out_unlock; /* No processes for this user */ 260 } 261 for_each_process_thread(g, p) { 262 if (uid_eq(task_uid(p), uid) && task_pid_vnr(p)) 263 error = set_one_prio(p, niceval, error); 264 } 265 if (!uid_eq(uid, cred->uid)) 266 free_uid(user); /* For find_user() */ 267 break; 268 } 269 out_unlock: 270 rcu_read_unlock(); 271 out: 272 return error; 273 } 274 275 /* 276 * Ugh. To avoid negative return values, "getpriority()" will 277 * not return the normal nice-value, but a negated value that 278 * has been offset by 20 (ie it returns 40..1 instead of -20..19) 279 * to stay compatible. 280 */ 281 SYSCALL_DEFINE2(getpriority, int, which, int, who) 282 { 283 struct task_struct *g, *p; 284 struct user_struct *user; 285 const struct cred *cred = current_cred(); 286 long niceval, retval = -ESRCH; 287 struct pid *pgrp; 288 kuid_t uid; 289 290 if (which > PRIO_USER || which < PRIO_PROCESS) 291 return -EINVAL; 292 293 rcu_read_lock(); 294 switch (which) { 295 case PRIO_PROCESS: 296 if (who) 297 p = find_task_by_vpid(who); 298 else 299 p = current; 300 if (p) { 301 niceval = nice_to_rlimit(task_nice(p)); 302 if (niceval > retval) 303 retval = niceval; 304 } 305 break; 306 case PRIO_PGRP: 307 if (who) 308 pgrp = find_vpid(who); 309 else 310 pgrp = task_pgrp(current); 311 read_lock(&tasklist_lock); 312 do_each_pid_thread(pgrp, PIDTYPE_PGID, p) { 313 niceval = nice_to_rlimit(task_nice(p)); 314 if (niceval > retval) 315 retval = niceval; 316 } while_each_pid_thread(pgrp, PIDTYPE_PGID, p); 317 read_unlock(&tasklist_lock); 318 break; 319 case PRIO_USER: 320 uid = make_kuid(cred->user_ns, who); 321 user = cred->user; 322 if (!who) 323 uid = cred->uid; 324 else if (!uid_eq(uid, cred->uid)) { 325 user = find_user(uid); 326 if (!user) 327 goto out_unlock; /* No processes for this user */ 328 } 329 for_each_process_thread(g, p) { 330 if (uid_eq(task_uid(p), uid) && task_pid_vnr(p)) { 331 niceval = nice_to_rlimit(task_nice(p)); 332 if (niceval > retval) 333 retval = niceval; 334 } 335 } 336 if (!uid_eq(uid, cred->uid)) 337 free_uid(user); /* for find_user() */ 338 break; 339 } 340 out_unlock: 341 rcu_read_unlock(); 342 343 return retval; 344 } 345 346 /* 347 * Unprivileged users may change the real gid to the effective gid 348 * or vice versa. (BSD-style) 349 * 350 * If you set the real gid at all, or set the effective gid to a value not 351 * equal to the real gid, then the saved gid is set to the new effective gid. 352 * 353 * This makes it possible for a setgid program to completely drop its 354 * privileges, which is often a useful assertion to make when you are doing 355 * a security audit over a program. 356 * 357 * The general idea is that a program which uses just setregid() will be 358 * 100% compatible with BSD. A program which uses just setgid() will be 359 * 100% compatible with POSIX with saved IDs. 360 * 361 * SMP: There are not races, the GIDs are checked only by filesystem 362 * operations (as far as semantic preservation is concerned). 363 */ 364 #ifdef CONFIG_MULTIUSER 365 long __sys_setregid(gid_t rgid, gid_t egid) 366 { 367 struct user_namespace *ns = current_user_ns(); 368 const struct cred *old; 369 struct cred *new; 370 int retval; 371 kgid_t krgid, kegid; 372 373 krgid = make_kgid(ns, rgid); 374 kegid = make_kgid(ns, egid); 375 376 if ((rgid != (gid_t) -1) && !gid_valid(krgid)) 377 return -EINVAL; 378 if ((egid != (gid_t) -1) && !gid_valid(kegid)) 379 return -EINVAL; 380 381 new = prepare_creds(); 382 if (!new) 383 return -ENOMEM; 384 old = current_cred(); 385 386 retval = -EPERM; 387 if (rgid != (gid_t) -1) { 388 if (gid_eq(old->gid, krgid) || 389 gid_eq(old->egid, krgid) || 390 ns_capable_setid(old->user_ns, CAP_SETGID)) 391 new->gid = krgid; 392 else 393 goto error; 394 } 395 if (egid != (gid_t) -1) { 396 if (gid_eq(old->gid, kegid) || 397 gid_eq(old->egid, kegid) || 398 gid_eq(old->sgid, kegid) || 399 ns_capable_setid(old->user_ns, CAP_SETGID)) 400 new->egid = kegid; 401 else 402 goto error; 403 } 404 405 if (rgid != (gid_t) -1 || 406 (egid != (gid_t) -1 && !gid_eq(kegid, old->gid))) 407 new->sgid = new->egid; 408 new->fsgid = new->egid; 409 410 retval = security_task_fix_setgid(new, old, LSM_SETID_RE); 411 if (retval < 0) 412 goto error; 413 414 return commit_creds(new); 415 416 error: 417 abort_creds(new); 418 return retval; 419 } 420 421 SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid) 422 { 423 return __sys_setregid(rgid, egid); 424 } 425 426 /* 427 * setgid() is implemented like SysV w/ SAVED_IDS 428 * 429 * SMP: Same implicit races as above. 430 */ 431 long __sys_setgid(gid_t gid) 432 { 433 struct user_namespace *ns = current_user_ns(); 434 const struct cred *old; 435 struct cred *new; 436 int retval; 437 kgid_t kgid; 438 439 kgid = make_kgid(ns, gid); 440 if (!gid_valid(kgid)) 441 return -EINVAL; 442 443 new = prepare_creds(); 444 if (!new) 445 return -ENOMEM; 446 old = current_cred(); 447 448 retval = -EPERM; 449 if (ns_capable_setid(old->user_ns, CAP_SETGID)) 450 new->gid = new->egid = new->sgid = new->fsgid = kgid; 451 else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid)) 452 new->egid = new->fsgid = kgid; 453 else 454 goto error; 455 456 retval = security_task_fix_setgid(new, old, LSM_SETID_ID); 457 if (retval < 0) 458 goto error; 459 460 return commit_creds(new); 461 462 error: 463 abort_creds(new); 464 return retval; 465 } 466 467 SYSCALL_DEFINE1(setgid, gid_t, gid) 468 { 469 return __sys_setgid(gid); 470 } 471 472 /* 473 * change the user struct in a credentials set to match the new UID 474 */ 475 static int set_user(struct cred *new) 476 { 477 struct user_struct *new_user; 478 479 new_user = alloc_uid(new->uid); 480 if (!new_user) 481 return -EAGAIN; 482 483 free_uid(new->user); 484 new->user = new_user; 485 return 0; 486 } 487 488 static void flag_nproc_exceeded(struct cred *new) 489 { 490 if (new->ucounts == current_ucounts()) 491 return; 492 493 /* 494 * We don't fail in case of NPROC limit excess here because too many 495 * poorly written programs don't check set*uid() return code, assuming 496 * it never fails if called by root. We may still enforce NPROC limit 497 * for programs doing set*uid()+execve() by harmlessly deferring the 498 * failure to the execve() stage. 499 */ 500 if (is_rlimit_overlimit(new->ucounts, UCOUNT_RLIMIT_NPROC, rlimit(RLIMIT_NPROC)) && 501 new->user != INIT_USER) 502 current->flags |= PF_NPROC_EXCEEDED; 503 else 504 current->flags &= ~PF_NPROC_EXCEEDED; 505 } 506 507 /* 508 * Unprivileged users may change the real uid to the effective uid 509 * or vice versa. (BSD-style) 510 * 511 * If you set the real uid at all, or set the effective uid to a value not 512 * equal to the real uid, then the saved uid is set to the new effective uid. 513 * 514 * This makes it possible for a setuid program to completely drop its 515 * privileges, which is often a useful assertion to make when you are doing 516 * a security audit over a program. 517 * 518 * The general idea is that a program which uses just setreuid() will be 519 * 100% compatible with BSD. A program which uses just setuid() will be 520 * 100% compatible with POSIX with saved IDs. 521 */ 522 long __sys_setreuid(uid_t ruid, uid_t euid) 523 { 524 struct user_namespace *ns = current_user_ns(); 525 const struct cred *old; 526 struct cred *new; 527 int retval; 528 kuid_t kruid, keuid; 529 530 kruid = make_kuid(ns, ruid); 531 keuid = make_kuid(ns, euid); 532 533 if ((ruid != (uid_t) -1) && !uid_valid(kruid)) 534 return -EINVAL; 535 if ((euid != (uid_t) -1) && !uid_valid(keuid)) 536 return -EINVAL; 537 538 new = prepare_creds(); 539 if (!new) 540 return -ENOMEM; 541 old = current_cred(); 542 543 retval = -EPERM; 544 if (ruid != (uid_t) -1) { 545 new->uid = kruid; 546 if (!uid_eq(old->uid, kruid) && 547 !uid_eq(old->euid, kruid) && 548 !ns_capable_setid(old->user_ns, CAP_SETUID)) 549 goto error; 550 } 551 552 if (euid != (uid_t) -1) { 553 new->euid = keuid; 554 if (!uid_eq(old->uid, keuid) && 555 !uid_eq(old->euid, keuid) && 556 !uid_eq(old->suid, keuid) && 557 !ns_capable_setid(old->user_ns, CAP_SETUID)) 558 goto error; 559 } 560 561 if (!uid_eq(new->uid, old->uid)) { 562 retval = set_user(new); 563 if (retval < 0) 564 goto error; 565 } 566 if (ruid != (uid_t) -1 || 567 (euid != (uid_t) -1 && !uid_eq(keuid, old->uid))) 568 new->suid = new->euid; 569 new->fsuid = new->euid; 570 571 retval = security_task_fix_setuid(new, old, LSM_SETID_RE); 572 if (retval < 0) 573 goto error; 574 575 retval = set_cred_ucounts(new); 576 if (retval < 0) 577 goto error; 578 579 flag_nproc_exceeded(new); 580 return commit_creds(new); 581 582 error: 583 abort_creds(new); 584 return retval; 585 } 586 587 SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid) 588 { 589 return __sys_setreuid(ruid, euid); 590 } 591 592 /* 593 * setuid() is implemented like SysV with SAVED_IDS 594 * 595 * Note that SAVED_ID's is deficient in that a setuid root program 596 * like sendmail, for example, cannot set its uid to be a normal 597 * user and then switch back, because if you're root, setuid() sets 598 * the saved uid too. If you don't like this, blame the bright people 599 * in the POSIX committee and/or USG. Note that the BSD-style setreuid() 600 * will allow a root program to temporarily drop privileges and be able to 601 * regain them by swapping the real and effective uid. 602 */ 603 long __sys_setuid(uid_t uid) 604 { 605 struct user_namespace *ns = current_user_ns(); 606 const struct cred *old; 607 struct cred *new; 608 int retval; 609 kuid_t kuid; 610 611 kuid = make_kuid(ns, uid); 612 if (!uid_valid(kuid)) 613 return -EINVAL; 614 615 new = prepare_creds(); 616 if (!new) 617 return -ENOMEM; 618 old = current_cred(); 619 620 retval = -EPERM; 621 if (ns_capable_setid(old->user_ns, CAP_SETUID)) { 622 new->suid = new->uid = kuid; 623 if (!uid_eq(kuid, old->uid)) { 624 retval = set_user(new); 625 if (retval < 0) 626 goto error; 627 } 628 } else if (!uid_eq(kuid, old->uid) && !uid_eq(kuid, new->suid)) { 629 goto error; 630 } 631 632 new->fsuid = new->euid = kuid; 633 634 retval = security_task_fix_setuid(new, old, LSM_SETID_ID); 635 if (retval < 0) 636 goto error; 637 638 retval = set_cred_ucounts(new); 639 if (retval < 0) 640 goto error; 641 642 flag_nproc_exceeded(new); 643 return commit_creds(new); 644 645 error: 646 abort_creds(new); 647 return retval; 648 } 649 650 SYSCALL_DEFINE1(setuid, uid_t, uid) 651 { 652 return __sys_setuid(uid); 653 } 654 655 656 /* 657 * This function implements a generic ability to update ruid, euid, 658 * and suid. This allows you to implement the 4.4 compatible seteuid(). 659 */ 660 long __sys_setresuid(uid_t ruid, uid_t euid, uid_t suid) 661 { 662 struct user_namespace *ns = current_user_ns(); 663 const struct cred *old; 664 struct cred *new; 665 int retval; 666 kuid_t kruid, keuid, ksuid; 667 bool ruid_new, euid_new, suid_new; 668 669 kruid = make_kuid(ns, ruid); 670 keuid = make_kuid(ns, euid); 671 ksuid = make_kuid(ns, suid); 672 673 if ((ruid != (uid_t) -1) && !uid_valid(kruid)) 674 return -EINVAL; 675 676 if ((euid != (uid_t) -1) && !uid_valid(keuid)) 677 return -EINVAL; 678 679 if ((suid != (uid_t) -1) && !uid_valid(ksuid)) 680 return -EINVAL; 681 682 old = current_cred(); 683 684 /* check for no-op */ 685 if ((ruid == (uid_t) -1 || uid_eq(kruid, old->uid)) && 686 (euid == (uid_t) -1 || (uid_eq(keuid, old->euid) && 687 uid_eq(keuid, old->fsuid))) && 688 (suid == (uid_t) -1 || uid_eq(ksuid, old->suid))) 689 return 0; 690 691 ruid_new = ruid != (uid_t) -1 && !uid_eq(kruid, old->uid) && 692 !uid_eq(kruid, old->euid) && !uid_eq(kruid, old->suid); 693 euid_new = euid != (uid_t) -1 && !uid_eq(keuid, old->uid) && 694 !uid_eq(keuid, old->euid) && !uid_eq(keuid, old->suid); 695 suid_new = suid != (uid_t) -1 && !uid_eq(ksuid, old->uid) && 696 !uid_eq(ksuid, old->euid) && !uid_eq(ksuid, old->suid); 697 if ((ruid_new || euid_new || suid_new) && 698 !ns_capable_setid(old->user_ns, CAP_SETUID)) 699 return -EPERM; 700 701 new = prepare_creds(); 702 if (!new) 703 return -ENOMEM; 704 705 if (ruid != (uid_t) -1) { 706 new->uid = kruid; 707 if (!uid_eq(kruid, old->uid)) { 708 retval = set_user(new); 709 if (retval < 0) 710 goto error; 711 } 712 } 713 if (euid != (uid_t) -1) 714 new->euid = keuid; 715 if (suid != (uid_t) -1) 716 new->suid = ksuid; 717 new->fsuid = new->euid; 718 719 retval = security_task_fix_setuid(new, old, LSM_SETID_RES); 720 if (retval < 0) 721 goto error; 722 723 retval = set_cred_ucounts(new); 724 if (retval < 0) 725 goto error; 726 727 flag_nproc_exceeded(new); 728 return commit_creds(new); 729 730 error: 731 abort_creds(new); 732 return retval; 733 } 734 735 SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid) 736 { 737 return __sys_setresuid(ruid, euid, suid); 738 } 739 740 SYSCALL_DEFINE3(getresuid, uid_t __user *, ruidp, uid_t __user *, euidp, uid_t __user *, suidp) 741 { 742 const struct cred *cred = current_cred(); 743 int retval; 744 uid_t ruid, euid, suid; 745 746 ruid = from_kuid_munged(cred->user_ns, cred->uid); 747 euid = from_kuid_munged(cred->user_ns, cred->euid); 748 suid = from_kuid_munged(cred->user_ns, cred->suid); 749 750 retval = put_user(ruid, ruidp); 751 if (!retval) { 752 retval = put_user(euid, euidp); 753 if (!retval) 754 return put_user(suid, suidp); 755 } 756 return retval; 757 } 758 759 /* 760 * Same as above, but for rgid, egid, sgid. 761 */ 762 long __sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid) 763 { 764 struct user_namespace *ns = current_user_ns(); 765 const struct cred *old; 766 struct cred *new; 767 int retval; 768 kgid_t krgid, kegid, ksgid; 769 bool rgid_new, egid_new, sgid_new; 770 771 krgid = make_kgid(ns, rgid); 772 kegid = make_kgid(ns, egid); 773 ksgid = make_kgid(ns, sgid); 774 775 if ((rgid != (gid_t) -1) && !gid_valid(krgid)) 776 return -EINVAL; 777 if ((egid != (gid_t) -1) && !gid_valid(kegid)) 778 return -EINVAL; 779 if ((sgid != (gid_t) -1) && !gid_valid(ksgid)) 780 return -EINVAL; 781 782 old = current_cred(); 783 784 /* check for no-op */ 785 if ((rgid == (gid_t) -1 || gid_eq(krgid, old->gid)) && 786 (egid == (gid_t) -1 || (gid_eq(kegid, old->egid) && 787 gid_eq(kegid, old->fsgid))) && 788 (sgid == (gid_t) -1 || gid_eq(ksgid, old->sgid))) 789 return 0; 790 791 rgid_new = rgid != (gid_t) -1 && !gid_eq(krgid, old->gid) && 792 !gid_eq(krgid, old->egid) && !gid_eq(krgid, old->sgid); 793 egid_new = egid != (gid_t) -1 && !gid_eq(kegid, old->gid) && 794 !gid_eq(kegid, old->egid) && !gid_eq(kegid, old->sgid); 795 sgid_new = sgid != (gid_t) -1 && !gid_eq(ksgid, old->gid) && 796 !gid_eq(ksgid, old->egid) && !gid_eq(ksgid, old->sgid); 797 if ((rgid_new || egid_new || sgid_new) && 798 !ns_capable_setid(old->user_ns, CAP_SETGID)) 799 return -EPERM; 800 801 new = prepare_creds(); 802 if (!new) 803 return -ENOMEM; 804 805 if (rgid != (gid_t) -1) 806 new->gid = krgid; 807 if (egid != (gid_t) -1) 808 new->egid = kegid; 809 if (sgid != (gid_t) -1) 810 new->sgid = ksgid; 811 new->fsgid = new->egid; 812 813 retval = security_task_fix_setgid(new, old, LSM_SETID_RES); 814 if (retval < 0) 815 goto error; 816 817 return commit_creds(new); 818 819 error: 820 abort_creds(new); 821 return retval; 822 } 823 824 SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid) 825 { 826 return __sys_setresgid(rgid, egid, sgid); 827 } 828 829 SYSCALL_DEFINE3(getresgid, gid_t __user *, rgidp, gid_t __user *, egidp, gid_t __user *, sgidp) 830 { 831 const struct cred *cred = current_cred(); 832 int retval; 833 gid_t rgid, egid, sgid; 834 835 rgid = from_kgid_munged(cred->user_ns, cred->gid); 836 egid = from_kgid_munged(cred->user_ns, cred->egid); 837 sgid = from_kgid_munged(cred->user_ns, cred->sgid); 838 839 retval = put_user(rgid, rgidp); 840 if (!retval) { 841 retval = put_user(egid, egidp); 842 if (!retval) 843 retval = put_user(sgid, sgidp); 844 } 845 846 return retval; 847 } 848 849 850 /* 851 * "setfsuid()" sets the fsuid - the uid used for filesystem checks. This 852 * is used for "access()" and for the NFS daemon (letting nfsd stay at 853 * whatever uid it wants to). It normally shadows "euid", except when 854 * explicitly set by setfsuid() or for access.. 855 */ 856 long __sys_setfsuid(uid_t uid) 857 { 858 const struct cred *old; 859 struct cred *new; 860 uid_t old_fsuid; 861 kuid_t kuid; 862 863 old = current_cred(); 864 old_fsuid = from_kuid_munged(old->user_ns, old->fsuid); 865 866 kuid = make_kuid(old->user_ns, uid); 867 if (!uid_valid(kuid)) 868 return old_fsuid; 869 870 new = prepare_creds(); 871 if (!new) 872 return old_fsuid; 873 874 if (uid_eq(kuid, old->uid) || uid_eq(kuid, old->euid) || 875 uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) || 876 ns_capable_setid(old->user_ns, CAP_SETUID)) { 877 if (!uid_eq(kuid, old->fsuid)) { 878 new->fsuid = kuid; 879 if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0) 880 goto change_okay; 881 } 882 } 883 884 abort_creds(new); 885 return old_fsuid; 886 887 change_okay: 888 commit_creds(new); 889 return old_fsuid; 890 } 891 892 SYSCALL_DEFINE1(setfsuid, uid_t, uid) 893 { 894 return __sys_setfsuid(uid); 895 } 896 897 /* 898 * Samma på svenska.. 899 */ 900 long __sys_setfsgid(gid_t gid) 901 { 902 const struct cred *old; 903 struct cred *new; 904 gid_t old_fsgid; 905 kgid_t kgid; 906 907 old = current_cred(); 908 old_fsgid = from_kgid_munged(old->user_ns, old->fsgid); 909 910 kgid = make_kgid(old->user_ns, gid); 911 if (!gid_valid(kgid)) 912 return old_fsgid; 913 914 new = prepare_creds(); 915 if (!new) 916 return old_fsgid; 917 918 if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) || 919 gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) || 920 ns_capable_setid(old->user_ns, CAP_SETGID)) { 921 if (!gid_eq(kgid, old->fsgid)) { 922 new->fsgid = kgid; 923 if (security_task_fix_setgid(new,old,LSM_SETID_FS) == 0) 924 goto change_okay; 925 } 926 } 927 928 abort_creds(new); 929 return old_fsgid; 930 931 change_okay: 932 commit_creds(new); 933 return old_fsgid; 934 } 935 936 SYSCALL_DEFINE1(setfsgid, gid_t, gid) 937 { 938 return __sys_setfsgid(gid); 939 } 940 #endif /* CONFIG_MULTIUSER */ 941 942 /** 943 * sys_getpid - return the thread group id of the current process 944 * 945 * Note, despite the name, this returns the tgid not the pid. The tgid and 946 * the pid are identical unless CLONE_THREAD was specified on clone() in 947 * which case the tgid is the same in all threads of the same group. 948 * 949 * This is SMP safe as current->tgid does not change. 950 */ 951 SYSCALL_DEFINE0(getpid) 952 { 953 return task_tgid_vnr(current); 954 } 955 956 /* Thread ID - the internal kernel "pid" */ 957 SYSCALL_DEFINE0(gettid) 958 { 959 return task_pid_vnr(current); 960 } 961 962 /* 963 * Accessing ->real_parent is not SMP-safe, it could 964 * change from under us. However, we can use a stale 965 * value of ->real_parent under rcu_read_lock(), see 966 * release_task()->call_rcu(delayed_put_task_struct). 967 */ 968 SYSCALL_DEFINE0(getppid) 969 { 970 int pid; 971 972 rcu_read_lock(); 973 pid = task_tgid_vnr(rcu_dereference(current->real_parent)); 974 rcu_read_unlock(); 975 976 return pid; 977 } 978 979 SYSCALL_DEFINE0(getuid) 980 { 981 /* Only we change this so SMP safe */ 982 return from_kuid_munged(current_user_ns(), current_uid()); 983 } 984 985 SYSCALL_DEFINE0(geteuid) 986 { 987 /* Only we change this so SMP safe */ 988 return from_kuid_munged(current_user_ns(), current_euid()); 989 } 990 991 SYSCALL_DEFINE0(getgid) 992 { 993 /* Only we change this so SMP safe */ 994 return from_kgid_munged(current_user_ns(), current_gid()); 995 } 996 997 SYSCALL_DEFINE0(getegid) 998 { 999 /* Only we change this so SMP safe */ 1000 return from_kgid_munged(current_user_ns(), current_egid()); 1001 } 1002 1003 static void do_sys_times(struct tms *tms) 1004 { 1005 u64 tgutime, tgstime, cutime, cstime; 1006 1007 thread_group_cputime_adjusted(current, &tgutime, &tgstime); 1008 cutime = current->signal->cutime; 1009 cstime = current->signal->cstime; 1010 tms->tms_utime = nsec_to_clock_t(tgutime); 1011 tms->tms_stime = nsec_to_clock_t(tgstime); 1012 tms->tms_cutime = nsec_to_clock_t(cutime); 1013 tms->tms_cstime = nsec_to_clock_t(cstime); 1014 } 1015 1016 SYSCALL_DEFINE1(times, struct tms __user *, tbuf) 1017 { 1018 if (tbuf) { 1019 struct tms tmp; 1020 1021 do_sys_times(&tmp); 1022 if (copy_to_user(tbuf, &tmp, sizeof(struct tms))) 1023 return -EFAULT; 1024 } 1025 force_successful_syscall_return(); 1026 return (long) jiffies_64_to_clock_t(get_jiffies_64()); 1027 } 1028 1029 #ifdef CONFIG_COMPAT 1030 static compat_clock_t clock_t_to_compat_clock_t(clock_t x) 1031 { 1032 return compat_jiffies_to_clock_t(clock_t_to_jiffies(x)); 1033 } 1034 1035 COMPAT_SYSCALL_DEFINE1(times, struct compat_tms __user *, tbuf) 1036 { 1037 if (tbuf) { 1038 struct tms tms; 1039 struct compat_tms tmp; 1040 1041 do_sys_times(&tms); 1042 /* Convert our struct tms to the compat version. */ 1043 tmp.tms_utime = clock_t_to_compat_clock_t(tms.tms_utime); 1044 tmp.tms_stime = clock_t_to_compat_clock_t(tms.tms_stime); 1045 tmp.tms_cutime = clock_t_to_compat_clock_t(tms.tms_cutime); 1046 tmp.tms_cstime = clock_t_to_compat_clock_t(tms.tms_cstime); 1047 if (copy_to_user(tbuf, &tmp, sizeof(tmp))) 1048 return -EFAULT; 1049 } 1050 force_successful_syscall_return(); 1051 return compat_jiffies_to_clock_t(jiffies); 1052 } 1053 #endif 1054 1055 /* 1056 * This needs some heavy checking ... 1057 * I just haven't the stomach for it. I also don't fully 1058 * understand sessions/pgrp etc. Let somebody who does explain it. 1059 * 1060 * OK, I think I have the protection semantics right.... this is really 1061 * only important on a multi-user system anyway, to make sure one user 1062 * can't send a signal to a process owned by another. -TYT, 12/12/91 1063 * 1064 * !PF_FORKNOEXEC check to conform completely to POSIX. 1065 */ 1066 SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid) 1067 { 1068 struct task_struct *p; 1069 struct task_struct *group_leader = current->group_leader; 1070 struct pid *pgrp; 1071 int err; 1072 1073 if (!pid) 1074 pid = task_pid_vnr(group_leader); 1075 if (!pgid) 1076 pgid = pid; 1077 if (pgid < 0) 1078 return -EINVAL; 1079 rcu_read_lock(); 1080 1081 /* From this point forward we keep holding onto the tasklist lock 1082 * so that our parent does not change from under us. -DaveM 1083 */ 1084 write_lock_irq(&tasklist_lock); 1085 1086 err = -ESRCH; 1087 p = find_task_by_vpid(pid); 1088 if (!p) 1089 goto out; 1090 1091 err = -EINVAL; 1092 if (!thread_group_leader(p)) 1093 goto out; 1094 1095 if (same_thread_group(p->real_parent, group_leader)) { 1096 err = -EPERM; 1097 if (task_session(p) != task_session(group_leader)) 1098 goto out; 1099 err = -EACCES; 1100 if (!(p->flags & PF_FORKNOEXEC)) 1101 goto out; 1102 } else { 1103 err = -ESRCH; 1104 if (p != group_leader) 1105 goto out; 1106 } 1107 1108 err = -EPERM; 1109 if (p->signal->leader) 1110 goto out; 1111 1112 pgrp = task_pid(p); 1113 if (pgid != pid) { 1114 struct task_struct *g; 1115 1116 pgrp = find_vpid(pgid); 1117 g = pid_task(pgrp, PIDTYPE_PGID); 1118 if (!g || task_session(g) != task_session(group_leader)) 1119 goto out; 1120 } 1121 1122 err = security_task_setpgid(p, pgid); 1123 if (err) 1124 goto out; 1125 1126 if (task_pgrp(p) != pgrp) 1127 change_pid(p, PIDTYPE_PGID, pgrp); 1128 1129 err = 0; 1130 out: 1131 /* All paths lead to here, thus we are safe. -DaveM */ 1132 write_unlock_irq(&tasklist_lock); 1133 rcu_read_unlock(); 1134 return err; 1135 } 1136 1137 static int do_getpgid(pid_t pid) 1138 { 1139 struct task_struct *p; 1140 struct pid *grp; 1141 int retval; 1142 1143 rcu_read_lock(); 1144 if (!pid) 1145 grp = task_pgrp(current); 1146 else { 1147 retval = -ESRCH; 1148 p = find_task_by_vpid(pid); 1149 if (!p) 1150 goto out; 1151 grp = task_pgrp(p); 1152 if (!grp) 1153 goto out; 1154 1155 retval = security_task_getpgid(p); 1156 if (retval) 1157 goto out; 1158 } 1159 retval = pid_vnr(grp); 1160 out: 1161 rcu_read_unlock(); 1162 return retval; 1163 } 1164 1165 SYSCALL_DEFINE1(getpgid, pid_t, pid) 1166 { 1167 return do_getpgid(pid); 1168 } 1169 1170 #ifdef __ARCH_WANT_SYS_GETPGRP 1171 1172 SYSCALL_DEFINE0(getpgrp) 1173 { 1174 return do_getpgid(0); 1175 } 1176 1177 #endif 1178 1179 SYSCALL_DEFINE1(getsid, pid_t, pid) 1180 { 1181 struct task_struct *p; 1182 struct pid *sid; 1183 int retval; 1184 1185 rcu_read_lock(); 1186 if (!pid) 1187 sid = task_session(current); 1188 else { 1189 retval = -ESRCH; 1190 p = find_task_by_vpid(pid); 1191 if (!p) 1192 goto out; 1193 sid = task_session(p); 1194 if (!sid) 1195 goto out; 1196 1197 retval = security_task_getsid(p); 1198 if (retval) 1199 goto out; 1200 } 1201 retval = pid_vnr(sid); 1202 out: 1203 rcu_read_unlock(); 1204 return retval; 1205 } 1206 1207 static void set_special_pids(struct pid *pid) 1208 { 1209 struct task_struct *curr = current->group_leader; 1210 1211 if (task_session(curr) != pid) 1212 change_pid(curr, PIDTYPE_SID, pid); 1213 1214 if (task_pgrp(curr) != pid) 1215 change_pid(curr, PIDTYPE_PGID, pid); 1216 } 1217 1218 int ksys_setsid(void) 1219 { 1220 struct task_struct *group_leader = current->group_leader; 1221 struct pid *sid = task_pid(group_leader); 1222 pid_t session = pid_vnr(sid); 1223 int err = -EPERM; 1224 1225 write_lock_irq(&tasklist_lock); 1226 /* Fail if I am already a session leader */ 1227 if (group_leader->signal->leader) 1228 goto out; 1229 1230 /* Fail if a process group id already exists that equals the 1231 * proposed session id. 1232 */ 1233 if (pid_task(sid, PIDTYPE_PGID)) 1234 goto out; 1235 1236 group_leader->signal->leader = 1; 1237 set_special_pids(sid); 1238 1239 proc_clear_tty(group_leader); 1240 1241 err = session; 1242 out: 1243 write_unlock_irq(&tasklist_lock); 1244 if (err > 0) { 1245 proc_sid_connector(group_leader); 1246 sched_autogroup_create_attach(group_leader); 1247 } 1248 return err; 1249 } 1250 1251 SYSCALL_DEFINE0(setsid) 1252 { 1253 return ksys_setsid(); 1254 } 1255 1256 DECLARE_RWSEM(uts_sem); 1257 1258 #ifdef COMPAT_UTS_MACHINE 1259 #define override_architecture(name) \ 1260 (personality(current->personality) == PER_LINUX32 && \ 1261 copy_to_user(name->machine, COMPAT_UTS_MACHINE, \ 1262 sizeof(COMPAT_UTS_MACHINE))) 1263 #else 1264 #define override_architecture(name) 0 1265 #endif 1266 1267 /* 1268 * Work around broken programs that cannot handle "Linux 3.0". 1269 * Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40 1270 * And we map 4.x and later versions to 2.6.60+x, so 4.0/5.0/6.0/... would be 1271 * 2.6.60. 1272 */ 1273 static int override_release(char __user *release, size_t len) 1274 { 1275 int ret = 0; 1276 1277 if (current->personality & UNAME26) { 1278 const char *rest = UTS_RELEASE; 1279 char buf[65] = { 0 }; 1280 int ndots = 0; 1281 unsigned v; 1282 size_t copy; 1283 1284 while (*rest) { 1285 if (*rest == '.' && ++ndots >= 3) 1286 break; 1287 if (!isdigit(*rest) && *rest != '.') 1288 break; 1289 rest++; 1290 } 1291 v = LINUX_VERSION_PATCHLEVEL + 60; 1292 copy = clamp_t(size_t, len, 1, sizeof(buf)); 1293 copy = scnprintf(buf, copy, "2.6.%u%s", v, rest); 1294 ret = copy_to_user(release, buf, copy + 1); 1295 } 1296 return ret; 1297 } 1298 1299 SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name) 1300 { 1301 struct new_utsname tmp; 1302 1303 down_read(&uts_sem); 1304 memcpy(&tmp, utsname(), sizeof(tmp)); 1305 up_read(&uts_sem); 1306 if (copy_to_user(name, &tmp, sizeof(tmp))) 1307 return -EFAULT; 1308 1309 if (override_release(name->release, sizeof(name->release))) 1310 return -EFAULT; 1311 if (override_architecture(name)) 1312 return -EFAULT; 1313 return 0; 1314 } 1315 1316 #ifdef __ARCH_WANT_SYS_OLD_UNAME 1317 /* 1318 * Old cruft 1319 */ 1320 SYSCALL_DEFINE1(uname, struct old_utsname __user *, name) 1321 { 1322 struct old_utsname tmp; 1323 1324 if (!name) 1325 return -EFAULT; 1326 1327 down_read(&uts_sem); 1328 memcpy(&tmp, utsname(), sizeof(tmp)); 1329 up_read(&uts_sem); 1330 if (copy_to_user(name, &tmp, sizeof(tmp))) 1331 return -EFAULT; 1332 1333 if (override_release(name->release, sizeof(name->release))) 1334 return -EFAULT; 1335 if (override_architecture(name)) 1336 return -EFAULT; 1337 return 0; 1338 } 1339 1340 SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name) 1341 { 1342 struct oldold_utsname tmp; 1343 1344 if (!name) 1345 return -EFAULT; 1346 1347 memset(&tmp, 0, sizeof(tmp)); 1348 1349 down_read(&uts_sem); 1350 memcpy(&tmp.sysname, &utsname()->sysname, __OLD_UTS_LEN); 1351 memcpy(&tmp.nodename, &utsname()->nodename, __OLD_UTS_LEN); 1352 memcpy(&tmp.release, &utsname()->release, __OLD_UTS_LEN); 1353 memcpy(&tmp.version, &utsname()->version, __OLD_UTS_LEN); 1354 memcpy(&tmp.machine, &utsname()->machine, __OLD_UTS_LEN); 1355 up_read(&uts_sem); 1356 if (copy_to_user(name, &tmp, sizeof(tmp))) 1357 return -EFAULT; 1358 1359 if (override_architecture(name)) 1360 return -EFAULT; 1361 if (override_release(name->release, sizeof(name->release))) 1362 return -EFAULT; 1363 return 0; 1364 } 1365 #endif 1366 1367 SYSCALL_DEFINE2(sethostname, char __user *, name, int, len) 1368 { 1369 int errno; 1370 char tmp[__NEW_UTS_LEN]; 1371 1372 if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN)) 1373 return -EPERM; 1374 1375 if (len < 0 || len > __NEW_UTS_LEN) 1376 return -EINVAL; 1377 errno = -EFAULT; 1378 if (!copy_from_user(tmp, name, len)) { 1379 struct new_utsname *u; 1380 1381 add_device_randomness(tmp, len); 1382 down_write(&uts_sem); 1383 u = utsname(); 1384 memcpy(u->nodename, tmp, len); 1385 memset(u->nodename + len, 0, sizeof(u->nodename) - len); 1386 errno = 0; 1387 uts_proc_notify(UTS_PROC_HOSTNAME); 1388 up_write(&uts_sem); 1389 } 1390 return errno; 1391 } 1392 1393 #ifdef __ARCH_WANT_SYS_GETHOSTNAME 1394 1395 SYSCALL_DEFINE2(gethostname, char __user *, name, int, len) 1396 { 1397 int i; 1398 struct new_utsname *u; 1399 char tmp[__NEW_UTS_LEN + 1]; 1400 1401 if (len < 0) 1402 return -EINVAL; 1403 down_read(&uts_sem); 1404 u = utsname(); 1405 i = 1 + strlen(u->nodename); 1406 if (i > len) 1407 i = len; 1408 memcpy(tmp, u->nodename, i); 1409 up_read(&uts_sem); 1410 if (copy_to_user(name, tmp, i)) 1411 return -EFAULT; 1412 return 0; 1413 } 1414 1415 #endif 1416 1417 /* 1418 * Only setdomainname; getdomainname can be implemented by calling 1419 * uname() 1420 */ 1421 SYSCALL_DEFINE2(setdomainname, char __user *, name, int, len) 1422 { 1423 int errno; 1424 char tmp[__NEW_UTS_LEN]; 1425 1426 if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN)) 1427 return -EPERM; 1428 if (len < 0 || len > __NEW_UTS_LEN) 1429 return -EINVAL; 1430 1431 errno = -EFAULT; 1432 if (!copy_from_user(tmp, name, len)) { 1433 struct new_utsname *u; 1434 1435 add_device_randomness(tmp, len); 1436 down_write(&uts_sem); 1437 u = utsname(); 1438 memcpy(u->domainname, tmp, len); 1439 memset(u->domainname + len, 0, sizeof(u->domainname) - len); 1440 errno = 0; 1441 uts_proc_notify(UTS_PROC_DOMAINNAME); 1442 up_write(&uts_sem); 1443 } 1444 return errno; 1445 } 1446 1447 /* make sure you are allowed to change @tsk limits before calling this */ 1448 static int do_prlimit(struct task_struct *tsk, unsigned int resource, 1449 struct rlimit *new_rlim, struct rlimit *old_rlim) 1450 { 1451 struct rlimit *rlim; 1452 int retval = 0; 1453 1454 if (resource >= RLIM_NLIMITS) 1455 return -EINVAL; 1456 resource = array_index_nospec(resource, RLIM_NLIMITS); 1457 1458 if (new_rlim) { 1459 if (new_rlim->rlim_cur > new_rlim->rlim_max) 1460 return -EINVAL; 1461 if (resource == RLIMIT_NOFILE && 1462 new_rlim->rlim_max > sysctl_nr_open) 1463 return -EPERM; 1464 } 1465 1466 /* Holding a refcount on tsk protects tsk->signal from disappearing. */ 1467 rlim = tsk->signal->rlim + resource; 1468 task_lock(tsk->group_leader); 1469 if (new_rlim) { 1470 /* 1471 * Keep the capable check against init_user_ns until cgroups can 1472 * contain all limits. 1473 */ 1474 if (new_rlim->rlim_max > rlim->rlim_max && 1475 !capable(CAP_SYS_RESOURCE)) 1476 retval = -EPERM; 1477 if (!retval) 1478 retval = security_task_setrlimit(tsk, resource, new_rlim); 1479 } 1480 if (!retval) { 1481 if (old_rlim) 1482 *old_rlim = *rlim; 1483 if (new_rlim) 1484 *rlim = *new_rlim; 1485 } 1486 task_unlock(tsk->group_leader); 1487 1488 /* 1489 * RLIMIT_CPU handling. Arm the posix CPU timer if the limit is not 1490 * infinite. In case of RLIM_INFINITY the posix CPU timer code 1491 * ignores the rlimit. 1492 */ 1493 if (!retval && new_rlim && resource == RLIMIT_CPU && 1494 new_rlim->rlim_cur != RLIM_INFINITY && 1495 IS_ENABLED(CONFIG_POSIX_TIMERS)) { 1496 /* 1497 * update_rlimit_cpu can fail if the task is exiting, but there 1498 * may be other tasks in the thread group that are not exiting, 1499 * and they need their cpu timers adjusted. 1500 * 1501 * The group_leader is the last task to be released, so if we 1502 * cannot update_rlimit_cpu on it, then the entire process is 1503 * exiting and we do not need to update at all. 1504 */ 1505 update_rlimit_cpu(tsk->group_leader, new_rlim->rlim_cur); 1506 } 1507 1508 return retval; 1509 } 1510 1511 SYSCALL_DEFINE2(getrlimit, unsigned int, resource, struct rlimit __user *, rlim) 1512 { 1513 struct rlimit value; 1514 int ret; 1515 1516 ret = do_prlimit(current, resource, NULL, &value); 1517 if (!ret) 1518 ret = copy_to_user(rlim, &value, sizeof(*rlim)) ? -EFAULT : 0; 1519 1520 return ret; 1521 } 1522 1523 #ifdef CONFIG_COMPAT 1524 1525 COMPAT_SYSCALL_DEFINE2(setrlimit, unsigned int, resource, 1526 struct compat_rlimit __user *, rlim) 1527 { 1528 struct rlimit r; 1529 struct compat_rlimit r32; 1530 1531 if (copy_from_user(&r32, rlim, sizeof(struct compat_rlimit))) 1532 return -EFAULT; 1533 1534 if (r32.rlim_cur == COMPAT_RLIM_INFINITY) 1535 r.rlim_cur = RLIM_INFINITY; 1536 else 1537 r.rlim_cur = r32.rlim_cur; 1538 if (r32.rlim_max == COMPAT_RLIM_INFINITY) 1539 r.rlim_max = RLIM_INFINITY; 1540 else 1541 r.rlim_max = r32.rlim_max; 1542 return do_prlimit(current, resource, &r, NULL); 1543 } 1544 1545 COMPAT_SYSCALL_DEFINE2(getrlimit, unsigned int, resource, 1546 struct compat_rlimit __user *, rlim) 1547 { 1548 struct rlimit r; 1549 int ret; 1550 1551 ret = do_prlimit(current, resource, NULL, &r); 1552 if (!ret) { 1553 struct compat_rlimit r32; 1554 if (r.rlim_cur > COMPAT_RLIM_INFINITY) 1555 r32.rlim_cur = COMPAT_RLIM_INFINITY; 1556 else 1557 r32.rlim_cur = r.rlim_cur; 1558 if (r.rlim_max > COMPAT_RLIM_INFINITY) 1559 r32.rlim_max = COMPAT_RLIM_INFINITY; 1560 else 1561 r32.rlim_max = r.rlim_max; 1562 1563 if (copy_to_user(rlim, &r32, sizeof(struct compat_rlimit))) 1564 return -EFAULT; 1565 } 1566 return ret; 1567 } 1568 1569 #endif 1570 1571 #ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT 1572 1573 /* 1574 * Back compatibility for getrlimit. Needed for some apps. 1575 */ 1576 SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource, 1577 struct rlimit __user *, rlim) 1578 { 1579 struct rlimit x; 1580 if (resource >= RLIM_NLIMITS) 1581 return -EINVAL; 1582 1583 resource = array_index_nospec(resource, RLIM_NLIMITS); 1584 task_lock(current->group_leader); 1585 x = current->signal->rlim[resource]; 1586 task_unlock(current->group_leader); 1587 if (x.rlim_cur > 0x7FFFFFFF) 1588 x.rlim_cur = 0x7FFFFFFF; 1589 if (x.rlim_max > 0x7FFFFFFF) 1590 x.rlim_max = 0x7FFFFFFF; 1591 return copy_to_user(rlim, &x, sizeof(x)) ? -EFAULT : 0; 1592 } 1593 1594 #ifdef CONFIG_COMPAT 1595 COMPAT_SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource, 1596 struct compat_rlimit __user *, rlim) 1597 { 1598 struct rlimit r; 1599 1600 if (resource >= RLIM_NLIMITS) 1601 return -EINVAL; 1602 1603 resource = array_index_nospec(resource, RLIM_NLIMITS); 1604 task_lock(current->group_leader); 1605 r = current->signal->rlim[resource]; 1606 task_unlock(current->group_leader); 1607 if (r.rlim_cur > 0x7FFFFFFF) 1608 r.rlim_cur = 0x7FFFFFFF; 1609 if (r.rlim_max > 0x7FFFFFFF) 1610 r.rlim_max = 0x7FFFFFFF; 1611 1612 if (put_user(r.rlim_cur, &rlim->rlim_cur) || 1613 put_user(r.rlim_max, &rlim->rlim_max)) 1614 return -EFAULT; 1615 return 0; 1616 } 1617 #endif 1618 1619 #endif 1620 1621 static inline bool rlim64_is_infinity(__u64 rlim64) 1622 { 1623 #if BITS_PER_LONG < 64 1624 return rlim64 >= ULONG_MAX; 1625 #else 1626 return rlim64 == RLIM64_INFINITY; 1627 #endif 1628 } 1629 1630 static void rlim_to_rlim64(const struct rlimit *rlim, struct rlimit64 *rlim64) 1631 { 1632 if (rlim->rlim_cur == RLIM_INFINITY) 1633 rlim64->rlim_cur = RLIM64_INFINITY; 1634 else 1635 rlim64->rlim_cur = rlim->rlim_cur; 1636 if (rlim->rlim_max == RLIM_INFINITY) 1637 rlim64->rlim_max = RLIM64_INFINITY; 1638 else 1639 rlim64->rlim_max = rlim->rlim_max; 1640 } 1641 1642 static void rlim64_to_rlim(const struct rlimit64 *rlim64, struct rlimit *rlim) 1643 { 1644 if (rlim64_is_infinity(rlim64->rlim_cur)) 1645 rlim->rlim_cur = RLIM_INFINITY; 1646 else 1647 rlim->rlim_cur = (unsigned long)rlim64->rlim_cur; 1648 if (rlim64_is_infinity(rlim64->rlim_max)) 1649 rlim->rlim_max = RLIM_INFINITY; 1650 else 1651 rlim->rlim_max = (unsigned long)rlim64->rlim_max; 1652 } 1653 1654 /* rcu lock must be held */ 1655 static int check_prlimit_permission(struct task_struct *task, 1656 unsigned int flags) 1657 { 1658 const struct cred *cred = current_cred(), *tcred; 1659 bool id_match; 1660 1661 if (current == task) 1662 return 0; 1663 1664 tcred = __task_cred(task); 1665 id_match = (uid_eq(cred->uid, tcred->euid) && 1666 uid_eq(cred->uid, tcred->suid) && 1667 uid_eq(cred->uid, tcred->uid) && 1668 gid_eq(cred->gid, tcred->egid) && 1669 gid_eq(cred->gid, tcred->sgid) && 1670 gid_eq(cred->gid, tcred->gid)); 1671 if (!id_match && !ns_capable(tcred->user_ns, CAP_SYS_RESOURCE)) 1672 return -EPERM; 1673 1674 return security_task_prlimit(cred, tcred, flags); 1675 } 1676 1677 SYSCALL_DEFINE4(prlimit64, pid_t, pid, unsigned int, resource, 1678 const struct rlimit64 __user *, new_rlim, 1679 struct rlimit64 __user *, old_rlim) 1680 { 1681 struct rlimit64 old64, new64; 1682 struct rlimit old, new; 1683 struct task_struct *tsk; 1684 unsigned int checkflags = 0; 1685 int ret; 1686 1687 if (old_rlim) 1688 checkflags |= LSM_PRLIMIT_READ; 1689 1690 if (new_rlim) { 1691 if (copy_from_user(&new64, new_rlim, sizeof(new64))) 1692 return -EFAULT; 1693 rlim64_to_rlim(&new64, &new); 1694 checkflags |= LSM_PRLIMIT_WRITE; 1695 } 1696 1697 rcu_read_lock(); 1698 tsk = pid ? find_task_by_vpid(pid) : current; 1699 if (!tsk) { 1700 rcu_read_unlock(); 1701 return -ESRCH; 1702 } 1703 ret = check_prlimit_permission(tsk, checkflags); 1704 if (ret) { 1705 rcu_read_unlock(); 1706 return ret; 1707 } 1708 get_task_struct(tsk); 1709 rcu_read_unlock(); 1710 1711 ret = do_prlimit(tsk, resource, new_rlim ? &new : NULL, 1712 old_rlim ? &old : NULL); 1713 1714 if (!ret && old_rlim) { 1715 rlim_to_rlim64(&old, &old64); 1716 if (copy_to_user(old_rlim, &old64, sizeof(old64))) 1717 ret = -EFAULT; 1718 } 1719 1720 put_task_struct(tsk); 1721 return ret; 1722 } 1723 1724 SYSCALL_DEFINE2(setrlimit, unsigned int, resource, struct rlimit __user *, rlim) 1725 { 1726 struct rlimit new_rlim; 1727 1728 if (copy_from_user(&new_rlim, rlim, sizeof(*rlim))) 1729 return -EFAULT; 1730 return do_prlimit(current, resource, &new_rlim, NULL); 1731 } 1732 1733 /* 1734 * It would make sense to put struct rusage in the task_struct, 1735 * except that would make the task_struct be *really big*. After 1736 * task_struct gets moved into malloc'ed memory, it would 1737 * make sense to do this. It will make moving the rest of the information 1738 * a lot simpler! (Which we're not doing right now because we're not 1739 * measuring them yet). 1740 * 1741 * When sampling multiple threads for RUSAGE_SELF, under SMP we might have 1742 * races with threads incrementing their own counters. But since word 1743 * reads are atomic, we either get new values or old values and we don't 1744 * care which for the sums. We always take the siglock to protect reading 1745 * the c* fields from p->signal from races with exit.c updating those 1746 * fields when reaping, so a sample either gets all the additions of a 1747 * given child after it's reaped, or none so this sample is before reaping. 1748 * 1749 * Locking: 1750 * We need to take the siglock for CHILDEREN, SELF and BOTH 1751 * for the cases current multithreaded, non-current single threaded 1752 * non-current multithreaded. Thread traversal is now safe with 1753 * the siglock held. 1754 * Strictly speaking, we donot need to take the siglock if we are current and 1755 * single threaded, as no one else can take our signal_struct away, no one 1756 * else can reap the children to update signal->c* counters, and no one else 1757 * can race with the signal-> fields. If we do not take any lock, the 1758 * signal-> fields could be read out of order while another thread was just 1759 * exiting. So we should place a read memory barrier when we avoid the lock. 1760 * On the writer side, write memory barrier is implied in __exit_signal 1761 * as __exit_signal releases the siglock spinlock after updating the signal-> 1762 * fields. But we don't do this yet to keep things simple. 1763 * 1764 */ 1765 1766 static void accumulate_thread_rusage(struct task_struct *t, struct rusage *r) 1767 { 1768 r->ru_nvcsw += t->nvcsw; 1769 r->ru_nivcsw += t->nivcsw; 1770 r->ru_minflt += t->min_flt; 1771 r->ru_majflt += t->maj_flt; 1772 r->ru_inblock += task_io_get_inblock(t); 1773 r->ru_oublock += task_io_get_oublock(t); 1774 } 1775 1776 void getrusage(struct task_struct *p, int who, struct rusage *r) 1777 { 1778 struct task_struct *t; 1779 unsigned long flags; 1780 u64 tgutime, tgstime, utime, stime; 1781 unsigned long maxrss = 0; 1782 1783 memset((char *)r, 0, sizeof (*r)); 1784 utime = stime = 0; 1785 1786 if (who == RUSAGE_THREAD) { 1787 task_cputime_adjusted(current, &utime, &stime); 1788 accumulate_thread_rusage(p, r); 1789 maxrss = p->signal->maxrss; 1790 goto out; 1791 } 1792 1793 if (!lock_task_sighand(p, &flags)) 1794 return; 1795 1796 switch (who) { 1797 case RUSAGE_BOTH: 1798 case RUSAGE_CHILDREN: 1799 utime = p->signal->cutime; 1800 stime = p->signal->cstime; 1801 r->ru_nvcsw = p->signal->cnvcsw; 1802 r->ru_nivcsw = p->signal->cnivcsw; 1803 r->ru_minflt = p->signal->cmin_flt; 1804 r->ru_majflt = p->signal->cmaj_flt; 1805 r->ru_inblock = p->signal->cinblock; 1806 r->ru_oublock = p->signal->coublock; 1807 maxrss = p->signal->cmaxrss; 1808 1809 if (who == RUSAGE_CHILDREN) 1810 break; 1811 fallthrough; 1812 1813 case RUSAGE_SELF: 1814 thread_group_cputime_adjusted(p, &tgutime, &tgstime); 1815 utime += tgutime; 1816 stime += tgstime; 1817 r->ru_nvcsw += p->signal->nvcsw; 1818 r->ru_nivcsw += p->signal->nivcsw; 1819 r->ru_minflt += p->signal->min_flt; 1820 r->ru_majflt += p->signal->maj_flt; 1821 r->ru_inblock += p->signal->inblock; 1822 r->ru_oublock += p->signal->oublock; 1823 if (maxrss < p->signal->maxrss) 1824 maxrss = p->signal->maxrss; 1825 t = p; 1826 do { 1827 accumulate_thread_rusage(t, r); 1828 } while_each_thread(p, t); 1829 break; 1830 1831 default: 1832 BUG(); 1833 } 1834 unlock_task_sighand(p, &flags); 1835 1836 out: 1837 r->ru_utime = ns_to_kernel_old_timeval(utime); 1838 r->ru_stime = ns_to_kernel_old_timeval(stime); 1839 1840 if (who != RUSAGE_CHILDREN) { 1841 struct mm_struct *mm = get_task_mm(p); 1842 1843 if (mm) { 1844 setmax_mm_hiwater_rss(&maxrss, mm); 1845 mmput(mm); 1846 } 1847 } 1848 r->ru_maxrss = maxrss * (PAGE_SIZE / 1024); /* convert pages to KBs */ 1849 } 1850 1851 SYSCALL_DEFINE2(getrusage, int, who, struct rusage __user *, ru) 1852 { 1853 struct rusage r; 1854 1855 if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN && 1856 who != RUSAGE_THREAD) 1857 return -EINVAL; 1858 1859 getrusage(current, who, &r); 1860 return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0; 1861 } 1862 1863 #ifdef CONFIG_COMPAT 1864 COMPAT_SYSCALL_DEFINE2(getrusage, int, who, struct compat_rusage __user *, ru) 1865 { 1866 struct rusage r; 1867 1868 if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN && 1869 who != RUSAGE_THREAD) 1870 return -EINVAL; 1871 1872 getrusage(current, who, &r); 1873 return put_compat_rusage(&r, ru); 1874 } 1875 #endif 1876 1877 SYSCALL_DEFINE1(umask, int, mask) 1878 { 1879 mask = xchg(¤t->fs->umask, mask & S_IRWXUGO); 1880 return mask; 1881 } 1882 1883 static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd) 1884 { 1885 struct fd exe; 1886 struct inode *inode; 1887 int err; 1888 1889 exe = fdget(fd); 1890 if (!exe.file) 1891 return -EBADF; 1892 1893 inode = file_inode(exe.file); 1894 1895 /* 1896 * Because the original mm->exe_file points to executable file, make 1897 * sure that this one is executable as well, to avoid breaking an 1898 * overall picture. 1899 */ 1900 err = -EACCES; 1901 if (!S_ISREG(inode->i_mode) || path_noexec(&exe.file->f_path)) 1902 goto exit; 1903 1904 err = file_permission(exe.file, MAY_EXEC); 1905 if (err) 1906 goto exit; 1907 1908 err = replace_mm_exe_file(mm, exe.file); 1909 exit: 1910 fdput(exe); 1911 return err; 1912 } 1913 1914 /* 1915 * Check arithmetic relations of passed addresses. 1916 * 1917 * WARNING: we don't require any capability here so be very careful 1918 * in what is allowed for modification from userspace. 1919 */ 1920 static int validate_prctl_map_addr(struct prctl_mm_map *prctl_map) 1921 { 1922 unsigned long mmap_max_addr = TASK_SIZE; 1923 int error = -EINVAL, i; 1924 1925 static const unsigned char offsets[] = { 1926 offsetof(struct prctl_mm_map, start_code), 1927 offsetof(struct prctl_mm_map, end_code), 1928 offsetof(struct prctl_mm_map, start_data), 1929 offsetof(struct prctl_mm_map, end_data), 1930 offsetof(struct prctl_mm_map, start_brk), 1931 offsetof(struct prctl_mm_map, brk), 1932 offsetof(struct prctl_mm_map, start_stack), 1933 offsetof(struct prctl_mm_map, arg_start), 1934 offsetof(struct prctl_mm_map, arg_end), 1935 offsetof(struct prctl_mm_map, env_start), 1936 offsetof(struct prctl_mm_map, env_end), 1937 }; 1938 1939 /* 1940 * Make sure the members are not somewhere outside 1941 * of allowed address space. 1942 */ 1943 for (i = 0; i < ARRAY_SIZE(offsets); i++) { 1944 u64 val = *(u64 *)((char *)prctl_map + offsets[i]); 1945 1946 if ((unsigned long)val >= mmap_max_addr || 1947 (unsigned long)val < mmap_min_addr) 1948 goto out; 1949 } 1950 1951 /* 1952 * Make sure the pairs are ordered. 1953 */ 1954 #define __prctl_check_order(__m1, __op, __m2) \ 1955 ((unsigned long)prctl_map->__m1 __op \ 1956 (unsigned long)prctl_map->__m2) ? 0 : -EINVAL 1957 error = __prctl_check_order(start_code, <, end_code); 1958 error |= __prctl_check_order(start_data,<=, end_data); 1959 error |= __prctl_check_order(start_brk, <=, brk); 1960 error |= __prctl_check_order(arg_start, <=, arg_end); 1961 error |= __prctl_check_order(env_start, <=, env_end); 1962 if (error) 1963 goto out; 1964 #undef __prctl_check_order 1965 1966 error = -EINVAL; 1967 1968 /* 1969 * Neither we should allow to override limits if they set. 1970 */ 1971 if (check_data_rlimit(rlimit(RLIMIT_DATA), prctl_map->brk, 1972 prctl_map->start_brk, prctl_map->end_data, 1973 prctl_map->start_data)) 1974 goto out; 1975 1976 error = 0; 1977 out: 1978 return error; 1979 } 1980 1981 #ifdef CONFIG_CHECKPOINT_RESTORE 1982 static int prctl_set_mm_map(int opt, const void __user *addr, unsigned long data_size) 1983 { 1984 struct prctl_mm_map prctl_map = { .exe_fd = (u32)-1, }; 1985 unsigned long user_auxv[AT_VECTOR_SIZE]; 1986 struct mm_struct *mm = current->mm; 1987 int error; 1988 1989 BUILD_BUG_ON(sizeof(user_auxv) != sizeof(mm->saved_auxv)); 1990 BUILD_BUG_ON(sizeof(struct prctl_mm_map) > 256); 1991 1992 if (opt == PR_SET_MM_MAP_SIZE) 1993 return put_user((unsigned int)sizeof(prctl_map), 1994 (unsigned int __user *)addr); 1995 1996 if (data_size != sizeof(prctl_map)) 1997 return -EINVAL; 1998 1999 if (copy_from_user(&prctl_map, addr, sizeof(prctl_map))) 2000 return -EFAULT; 2001 2002 error = validate_prctl_map_addr(&prctl_map); 2003 if (error) 2004 return error; 2005 2006 if (prctl_map.auxv_size) { 2007 /* 2008 * Someone is trying to cheat the auxv vector. 2009 */ 2010 if (!prctl_map.auxv || 2011 prctl_map.auxv_size > sizeof(mm->saved_auxv)) 2012 return -EINVAL; 2013 2014 memset(user_auxv, 0, sizeof(user_auxv)); 2015 if (copy_from_user(user_auxv, 2016 (const void __user *)prctl_map.auxv, 2017 prctl_map.auxv_size)) 2018 return -EFAULT; 2019 2020 /* Last entry must be AT_NULL as specification requires */ 2021 user_auxv[AT_VECTOR_SIZE - 2] = AT_NULL; 2022 user_auxv[AT_VECTOR_SIZE - 1] = AT_NULL; 2023 } 2024 2025 if (prctl_map.exe_fd != (u32)-1) { 2026 /* 2027 * Check if the current user is checkpoint/restore capable. 2028 * At the time of this writing, it checks for CAP_SYS_ADMIN 2029 * or CAP_CHECKPOINT_RESTORE. 2030 * Note that a user with access to ptrace can masquerade an 2031 * arbitrary program as any executable, even setuid ones. 2032 * This may have implications in the tomoyo subsystem. 2033 */ 2034 if (!checkpoint_restore_ns_capable(current_user_ns())) 2035 return -EPERM; 2036 2037 error = prctl_set_mm_exe_file(mm, prctl_map.exe_fd); 2038 if (error) 2039 return error; 2040 } 2041 2042 /* 2043 * arg_lock protects concurrent updates but we still need mmap_lock for 2044 * read to exclude races with sys_brk. 2045 */ 2046 mmap_read_lock(mm); 2047 2048 /* 2049 * We don't validate if these members are pointing to 2050 * real present VMAs because application may have correspond 2051 * VMAs already unmapped and kernel uses these members for statistics 2052 * output in procfs mostly, except 2053 * 2054 * - @start_brk/@brk which are used in do_brk_flags but kernel lookups 2055 * for VMAs when updating these members so anything wrong written 2056 * here cause kernel to swear at userspace program but won't lead 2057 * to any problem in kernel itself 2058 */ 2059 2060 spin_lock(&mm->arg_lock); 2061 mm->start_code = prctl_map.start_code; 2062 mm->end_code = prctl_map.end_code; 2063 mm->start_data = prctl_map.start_data; 2064 mm->end_data = prctl_map.end_data; 2065 mm->start_brk = prctl_map.start_brk; 2066 mm->brk = prctl_map.brk; 2067 mm->start_stack = prctl_map.start_stack; 2068 mm->arg_start = prctl_map.arg_start; 2069 mm->arg_end = prctl_map.arg_end; 2070 mm->env_start = prctl_map.env_start; 2071 mm->env_end = prctl_map.env_end; 2072 spin_unlock(&mm->arg_lock); 2073 2074 /* 2075 * Note this update of @saved_auxv is lockless thus 2076 * if someone reads this member in procfs while we're 2077 * updating -- it may get partly updated results. It's 2078 * known and acceptable trade off: we leave it as is to 2079 * not introduce additional locks here making the kernel 2080 * more complex. 2081 */ 2082 if (prctl_map.auxv_size) 2083 memcpy(mm->saved_auxv, user_auxv, sizeof(user_auxv)); 2084 2085 mmap_read_unlock(mm); 2086 return 0; 2087 } 2088 #endif /* CONFIG_CHECKPOINT_RESTORE */ 2089 2090 static int prctl_set_auxv(struct mm_struct *mm, unsigned long addr, 2091 unsigned long len) 2092 { 2093 /* 2094 * This doesn't move the auxiliary vector itself since it's pinned to 2095 * mm_struct, but it permits filling the vector with new values. It's 2096 * up to the caller to provide sane values here, otherwise userspace 2097 * tools which use this vector might be unhappy. 2098 */ 2099 unsigned long user_auxv[AT_VECTOR_SIZE] = {}; 2100 2101 if (len > sizeof(user_auxv)) 2102 return -EINVAL; 2103 2104 if (copy_from_user(user_auxv, (const void __user *)addr, len)) 2105 return -EFAULT; 2106 2107 /* Make sure the last entry is always AT_NULL */ 2108 user_auxv[AT_VECTOR_SIZE - 2] = 0; 2109 user_auxv[AT_VECTOR_SIZE - 1] = 0; 2110 2111 BUILD_BUG_ON(sizeof(user_auxv) != sizeof(mm->saved_auxv)); 2112 2113 task_lock(current); 2114 memcpy(mm->saved_auxv, user_auxv, len); 2115 task_unlock(current); 2116 2117 return 0; 2118 } 2119 2120 static int prctl_set_mm(int opt, unsigned long addr, 2121 unsigned long arg4, unsigned long arg5) 2122 { 2123 struct mm_struct *mm = current->mm; 2124 struct prctl_mm_map prctl_map = { 2125 .auxv = NULL, 2126 .auxv_size = 0, 2127 .exe_fd = -1, 2128 }; 2129 struct vm_area_struct *vma; 2130 int error; 2131 2132 if (arg5 || (arg4 && (opt != PR_SET_MM_AUXV && 2133 opt != PR_SET_MM_MAP && 2134 opt != PR_SET_MM_MAP_SIZE))) 2135 return -EINVAL; 2136 2137 #ifdef CONFIG_CHECKPOINT_RESTORE 2138 if (opt == PR_SET_MM_MAP || opt == PR_SET_MM_MAP_SIZE) 2139 return prctl_set_mm_map(opt, (const void __user *)addr, arg4); 2140 #endif 2141 2142 if (!capable(CAP_SYS_RESOURCE)) 2143 return -EPERM; 2144 2145 if (opt == PR_SET_MM_EXE_FILE) 2146 return prctl_set_mm_exe_file(mm, (unsigned int)addr); 2147 2148 if (opt == PR_SET_MM_AUXV) 2149 return prctl_set_auxv(mm, addr, arg4); 2150 2151 if (addr >= TASK_SIZE || addr < mmap_min_addr) 2152 return -EINVAL; 2153 2154 error = -EINVAL; 2155 2156 /* 2157 * arg_lock protects concurrent updates of arg boundaries, we need 2158 * mmap_lock for a) concurrent sys_brk, b) finding VMA for addr 2159 * validation. 2160 */ 2161 mmap_read_lock(mm); 2162 vma = find_vma(mm, addr); 2163 2164 spin_lock(&mm->arg_lock); 2165 prctl_map.start_code = mm->start_code; 2166 prctl_map.end_code = mm->end_code; 2167 prctl_map.start_data = mm->start_data; 2168 prctl_map.end_data = mm->end_data; 2169 prctl_map.start_brk = mm->start_brk; 2170 prctl_map.brk = mm->brk; 2171 prctl_map.start_stack = mm->start_stack; 2172 prctl_map.arg_start = mm->arg_start; 2173 prctl_map.arg_end = mm->arg_end; 2174 prctl_map.env_start = mm->env_start; 2175 prctl_map.env_end = mm->env_end; 2176 2177 switch (opt) { 2178 case PR_SET_MM_START_CODE: 2179 prctl_map.start_code = addr; 2180 break; 2181 case PR_SET_MM_END_CODE: 2182 prctl_map.end_code = addr; 2183 break; 2184 case PR_SET_MM_START_DATA: 2185 prctl_map.start_data = addr; 2186 break; 2187 case PR_SET_MM_END_DATA: 2188 prctl_map.end_data = addr; 2189 break; 2190 case PR_SET_MM_START_STACK: 2191 prctl_map.start_stack = addr; 2192 break; 2193 case PR_SET_MM_START_BRK: 2194 prctl_map.start_brk = addr; 2195 break; 2196 case PR_SET_MM_BRK: 2197 prctl_map.brk = addr; 2198 break; 2199 case PR_SET_MM_ARG_START: 2200 prctl_map.arg_start = addr; 2201 break; 2202 case PR_SET_MM_ARG_END: 2203 prctl_map.arg_end = addr; 2204 break; 2205 case PR_SET_MM_ENV_START: 2206 prctl_map.env_start = addr; 2207 break; 2208 case PR_SET_MM_ENV_END: 2209 prctl_map.env_end = addr; 2210 break; 2211 default: 2212 goto out; 2213 } 2214 2215 error = validate_prctl_map_addr(&prctl_map); 2216 if (error) 2217 goto out; 2218 2219 switch (opt) { 2220 /* 2221 * If command line arguments and environment 2222 * are placed somewhere else on stack, we can 2223 * set them up here, ARG_START/END to setup 2224 * command line arguments and ENV_START/END 2225 * for environment. 2226 */ 2227 case PR_SET_MM_START_STACK: 2228 case PR_SET_MM_ARG_START: 2229 case PR_SET_MM_ARG_END: 2230 case PR_SET_MM_ENV_START: 2231 case PR_SET_MM_ENV_END: 2232 if (!vma) { 2233 error = -EFAULT; 2234 goto out; 2235 } 2236 } 2237 2238 mm->start_code = prctl_map.start_code; 2239 mm->end_code = prctl_map.end_code; 2240 mm->start_data = prctl_map.start_data; 2241 mm->end_data = prctl_map.end_data; 2242 mm->start_brk = prctl_map.start_brk; 2243 mm->brk = prctl_map.brk; 2244 mm->start_stack = prctl_map.start_stack; 2245 mm->arg_start = prctl_map.arg_start; 2246 mm->arg_end = prctl_map.arg_end; 2247 mm->env_start = prctl_map.env_start; 2248 mm->env_end = prctl_map.env_end; 2249 2250 error = 0; 2251 out: 2252 spin_unlock(&mm->arg_lock); 2253 mmap_read_unlock(mm); 2254 return error; 2255 } 2256 2257 #ifdef CONFIG_CHECKPOINT_RESTORE 2258 static int prctl_get_tid_address(struct task_struct *me, int __user * __user *tid_addr) 2259 { 2260 return put_user(me->clear_child_tid, tid_addr); 2261 } 2262 #else 2263 static int prctl_get_tid_address(struct task_struct *me, int __user * __user *tid_addr) 2264 { 2265 return -EINVAL; 2266 } 2267 #endif 2268 2269 static int propagate_has_child_subreaper(struct task_struct *p, void *data) 2270 { 2271 /* 2272 * If task has has_child_subreaper - all its descendants 2273 * already have these flag too and new descendants will 2274 * inherit it on fork, skip them. 2275 * 2276 * If we've found child_reaper - skip descendants in 2277 * it's subtree as they will never get out pidns. 2278 */ 2279 if (p->signal->has_child_subreaper || 2280 is_child_reaper(task_pid(p))) 2281 return 0; 2282 2283 p->signal->has_child_subreaper = 1; 2284 return 1; 2285 } 2286 2287 int __weak arch_prctl_spec_ctrl_get(struct task_struct *t, unsigned long which) 2288 { 2289 return -EINVAL; 2290 } 2291 2292 int __weak arch_prctl_spec_ctrl_set(struct task_struct *t, unsigned long which, 2293 unsigned long ctrl) 2294 { 2295 return -EINVAL; 2296 } 2297 2298 #define PR_IO_FLUSHER (PF_MEMALLOC_NOIO | PF_LOCAL_THROTTLE) 2299 2300 #ifdef CONFIG_ANON_VMA_NAME 2301 2302 #define ANON_VMA_NAME_MAX_LEN 80 2303 #define ANON_VMA_NAME_INVALID_CHARS "\\`$[]" 2304 2305 static inline bool is_valid_name_char(char ch) 2306 { 2307 /* printable ascii characters, excluding ANON_VMA_NAME_INVALID_CHARS */ 2308 return ch > 0x1f && ch < 0x7f && 2309 !strchr(ANON_VMA_NAME_INVALID_CHARS, ch); 2310 } 2311 2312 static int prctl_set_vma(unsigned long opt, unsigned long addr, 2313 unsigned long size, unsigned long arg) 2314 { 2315 struct mm_struct *mm = current->mm; 2316 const char __user *uname; 2317 struct anon_vma_name *anon_name = NULL; 2318 int error; 2319 2320 switch (opt) { 2321 case PR_SET_VMA_ANON_NAME: 2322 uname = (const char __user *)arg; 2323 if (uname) { 2324 char *name, *pch; 2325 2326 name = strndup_user(uname, ANON_VMA_NAME_MAX_LEN); 2327 if (IS_ERR(name)) 2328 return PTR_ERR(name); 2329 2330 for (pch = name; *pch != '\0'; pch++) { 2331 if (!is_valid_name_char(*pch)) { 2332 kfree(name); 2333 return -EINVAL; 2334 } 2335 } 2336 /* anon_vma has its own copy */ 2337 anon_name = anon_vma_name_alloc(name); 2338 kfree(name); 2339 if (!anon_name) 2340 return -ENOMEM; 2341 2342 } 2343 2344 mmap_write_lock(mm); 2345 error = madvise_set_anon_name(mm, addr, size, anon_name); 2346 mmap_write_unlock(mm); 2347 anon_vma_name_put(anon_name); 2348 break; 2349 default: 2350 error = -EINVAL; 2351 } 2352 2353 return error; 2354 } 2355 2356 #else /* CONFIG_ANON_VMA_NAME */ 2357 static int prctl_set_vma(unsigned long opt, unsigned long start, 2358 unsigned long size, unsigned long arg) 2359 { 2360 return -EINVAL; 2361 } 2362 #endif /* CONFIG_ANON_VMA_NAME */ 2363 2364 static inline int prctl_set_mdwe(unsigned long bits, unsigned long arg3, 2365 unsigned long arg4, unsigned long arg5) 2366 { 2367 if (arg3 || arg4 || arg5) 2368 return -EINVAL; 2369 2370 if (bits & ~(PR_MDWE_REFUSE_EXEC_GAIN)) 2371 return -EINVAL; 2372 2373 if (bits & PR_MDWE_REFUSE_EXEC_GAIN) 2374 set_bit(MMF_HAS_MDWE, ¤t->mm->flags); 2375 else if (test_bit(MMF_HAS_MDWE, ¤t->mm->flags)) 2376 return -EPERM; /* Cannot unset the flag */ 2377 2378 return 0; 2379 } 2380 2381 static inline int prctl_get_mdwe(unsigned long arg2, unsigned long arg3, 2382 unsigned long arg4, unsigned long arg5) 2383 { 2384 if (arg2 || arg3 || arg4 || arg5) 2385 return -EINVAL; 2386 2387 return test_bit(MMF_HAS_MDWE, ¤t->mm->flags) ? 2388 PR_MDWE_REFUSE_EXEC_GAIN : 0; 2389 } 2390 2391 SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, 2392 unsigned long, arg4, unsigned long, arg5) 2393 { 2394 struct task_struct *me = current; 2395 unsigned char comm[sizeof(me->comm)]; 2396 long error; 2397 2398 error = security_task_prctl(option, arg2, arg3, arg4, arg5); 2399 if (error != -ENOSYS) 2400 return error; 2401 2402 error = 0; 2403 switch (option) { 2404 case PR_SET_PDEATHSIG: 2405 if (!valid_signal(arg2)) { 2406 error = -EINVAL; 2407 break; 2408 } 2409 me->pdeath_signal = arg2; 2410 break; 2411 case PR_GET_PDEATHSIG: 2412 error = put_user(me->pdeath_signal, (int __user *)arg2); 2413 break; 2414 case PR_GET_DUMPABLE: 2415 error = get_dumpable(me->mm); 2416 break; 2417 case PR_SET_DUMPABLE: 2418 if (arg2 != SUID_DUMP_DISABLE && arg2 != SUID_DUMP_USER) { 2419 error = -EINVAL; 2420 break; 2421 } 2422 set_dumpable(me->mm, arg2); 2423 break; 2424 2425 case PR_SET_UNALIGN: 2426 error = SET_UNALIGN_CTL(me, arg2); 2427 break; 2428 case PR_GET_UNALIGN: 2429 error = GET_UNALIGN_CTL(me, arg2); 2430 break; 2431 case PR_SET_FPEMU: 2432 error = SET_FPEMU_CTL(me, arg2); 2433 break; 2434 case PR_GET_FPEMU: 2435 error = GET_FPEMU_CTL(me, arg2); 2436 break; 2437 case PR_SET_FPEXC: 2438 error = SET_FPEXC_CTL(me, arg2); 2439 break; 2440 case PR_GET_FPEXC: 2441 error = GET_FPEXC_CTL(me, arg2); 2442 break; 2443 case PR_GET_TIMING: 2444 error = PR_TIMING_STATISTICAL; 2445 break; 2446 case PR_SET_TIMING: 2447 if (arg2 != PR_TIMING_STATISTICAL) 2448 error = -EINVAL; 2449 break; 2450 case PR_SET_NAME: 2451 comm[sizeof(me->comm) - 1] = 0; 2452 if (strncpy_from_user(comm, (char __user *)arg2, 2453 sizeof(me->comm) - 1) < 0) 2454 return -EFAULT; 2455 set_task_comm(me, comm); 2456 proc_comm_connector(me); 2457 break; 2458 case PR_GET_NAME: 2459 get_task_comm(comm, me); 2460 if (copy_to_user((char __user *)arg2, comm, sizeof(comm))) 2461 return -EFAULT; 2462 break; 2463 case PR_GET_ENDIAN: 2464 error = GET_ENDIAN(me, arg2); 2465 break; 2466 case PR_SET_ENDIAN: 2467 error = SET_ENDIAN(me, arg2); 2468 break; 2469 case PR_GET_SECCOMP: 2470 error = prctl_get_seccomp(); 2471 break; 2472 case PR_SET_SECCOMP: 2473 error = prctl_set_seccomp(arg2, (char __user *)arg3); 2474 break; 2475 case PR_GET_TSC: 2476 error = GET_TSC_CTL(arg2); 2477 break; 2478 case PR_SET_TSC: 2479 error = SET_TSC_CTL(arg2); 2480 break; 2481 case PR_TASK_PERF_EVENTS_DISABLE: 2482 error = perf_event_task_disable(); 2483 break; 2484 case PR_TASK_PERF_EVENTS_ENABLE: 2485 error = perf_event_task_enable(); 2486 break; 2487 case PR_GET_TIMERSLACK: 2488 if (current->timer_slack_ns > ULONG_MAX) 2489 error = ULONG_MAX; 2490 else 2491 error = current->timer_slack_ns; 2492 break; 2493 case PR_SET_TIMERSLACK: 2494 if (arg2 <= 0) 2495 current->timer_slack_ns = 2496 current->default_timer_slack_ns; 2497 else 2498 current->timer_slack_ns = arg2; 2499 break; 2500 case PR_MCE_KILL: 2501 if (arg4 | arg5) 2502 return -EINVAL; 2503 switch (arg2) { 2504 case PR_MCE_KILL_CLEAR: 2505 if (arg3 != 0) 2506 return -EINVAL; 2507 current->flags &= ~PF_MCE_PROCESS; 2508 break; 2509 case PR_MCE_KILL_SET: 2510 current->flags |= PF_MCE_PROCESS; 2511 if (arg3 == PR_MCE_KILL_EARLY) 2512 current->flags |= PF_MCE_EARLY; 2513 else if (arg3 == PR_MCE_KILL_LATE) 2514 current->flags &= ~PF_MCE_EARLY; 2515 else if (arg3 == PR_MCE_KILL_DEFAULT) 2516 current->flags &= 2517 ~(PF_MCE_EARLY|PF_MCE_PROCESS); 2518 else 2519 return -EINVAL; 2520 break; 2521 default: 2522 return -EINVAL; 2523 } 2524 break; 2525 case PR_MCE_KILL_GET: 2526 if (arg2 | arg3 | arg4 | arg5) 2527 return -EINVAL; 2528 if (current->flags & PF_MCE_PROCESS) 2529 error = (current->flags & PF_MCE_EARLY) ? 2530 PR_MCE_KILL_EARLY : PR_MCE_KILL_LATE; 2531 else 2532 error = PR_MCE_KILL_DEFAULT; 2533 break; 2534 case PR_SET_MM: 2535 error = prctl_set_mm(arg2, arg3, arg4, arg5); 2536 break; 2537 case PR_GET_TID_ADDRESS: 2538 error = prctl_get_tid_address(me, (int __user * __user *)arg2); 2539 break; 2540 case PR_SET_CHILD_SUBREAPER: 2541 me->signal->is_child_subreaper = !!arg2; 2542 if (!arg2) 2543 break; 2544 2545 walk_process_tree(me, propagate_has_child_subreaper, NULL); 2546 break; 2547 case PR_GET_CHILD_SUBREAPER: 2548 error = put_user(me->signal->is_child_subreaper, 2549 (int __user *)arg2); 2550 break; 2551 case PR_SET_NO_NEW_PRIVS: 2552 if (arg2 != 1 || arg3 || arg4 || arg5) 2553 return -EINVAL; 2554 2555 task_set_no_new_privs(current); 2556 break; 2557 case PR_GET_NO_NEW_PRIVS: 2558 if (arg2 || arg3 || arg4 || arg5) 2559 return -EINVAL; 2560 return task_no_new_privs(current) ? 1 : 0; 2561 case PR_GET_THP_DISABLE: 2562 if (arg2 || arg3 || arg4 || arg5) 2563 return -EINVAL; 2564 error = !!test_bit(MMF_DISABLE_THP, &me->mm->flags); 2565 break; 2566 case PR_SET_THP_DISABLE: 2567 if (arg3 || arg4 || arg5) 2568 return -EINVAL; 2569 if (mmap_write_lock_killable(me->mm)) 2570 return -EINTR; 2571 if (arg2) 2572 set_bit(MMF_DISABLE_THP, &me->mm->flags); 2573 else 2574 clear_bit(MMF_DISABLE_THP, &me->mm->flags); 2575 mmap_write_unlock(me->mm); 2576 break; 2577 case PR_MPX_ENABLE_MANAGEMENT: 2578 case PR_MPX_DISABLE_MANAGEMENT: 2579 /* No longer implemented: */ 2580 return -EINVAL; 2581 case PR_SET_FP_MODE: 2582 error = SET_FP_MODE(me, arg2); 2583 break; 2584 case PR_GET_FP_MODE: 2585 error = GET_FP_MODE(me); 2586 break; 2587 case PR_SVE_SET_VL: 2588 error = SVE_SET_VL(arg2); 2589 break; 2590 case PR_SVE_GET_VL: 2591 error = SVE_GET_VL(); 2592 break; 2593 case PR_SME_SET_VL: 2594 error = SME_SET_VL(arg2); 2595 break; 2596 case PR_SME_GET_VL: 2597 error = SME_GET_VL(); 2598 break; 2599 case PR_GET_SPECULATION_CTRL: 2600 if (arg3 || arg4 || arg5) 2601 return -EINVAL; 2602 error = arch_prctl_spec_ctrl_get(me, arg2); 2603 break; 2604 case PR_SET_SPECULATION_CTRL: 2605 if (arg4 || arg5) 2606 return -EINVAL; 2607 error = arch_prctl_spec_ctrl_set(me, arg2, arg3); 2608 break; 2609 case PR_PAC_RESET_KEYS: 2610 if (arg3 || arg4 || arg5) 2611 return -EINVAL; 2612 error = PAC_RESET_KEYS(me, arg2); 2613 break; 2614 case PR_PAC_SET_ENABLED_KEYS: 2615 if (arg4 || arg5) 2616 return -EINVAL; 2617 error = PAC_SET_ENABLED_KEYS(me, arg2, arg3); 2618 break; 2619 case PR_PAC_GET_ENABLED_KEYS: 2620 if (arg2 || arg3 || arg4 || arg5) 2621 return -EINVAL; 2622 error = PAC_GET_ENABLED_KEYS(me); 2623 break; 2624 case PR_SET_TAGGED_ADDR_CTRL: 2625 if (arg3 || arg4 || arg5) 2626 return -EINVAL; 2627 error = SET_TAGGED_ADDR_CTRL(arg2); 2628 break; 2629 case PR_GET_TAGGED_ADDR_CTRL: 2630 if (arg2 || arg3 || arg4 || arg5) 2631 return -EINVAL; 2632 error = GET_TAGGED_ADDR_CTRL(); 2633 break; 2634 case PR_SET_IO_FLUSHER: 2635 if (!capable(CAP_SYS_RESOURCE)) 2636 return -EPERM; 2637 2638 if (arg3 || arg4 || arg5) 2639 return -EINVAL; 2640 2641 if (arg2 == 1) 2642 current->flags |= PR_IO_FLUSHER; 2643 else if (!arg2) 2644 current->flags &= ~PR_IO_FLUSHER; 2645 else 2646 return -EINVAL; 2647 break; 2648 case PR_GET_IO_FLUSHER: 2649 if (!capable(CAP_SYS_RESOURCE)) 2650 return -EPERM; 2651 2652 if (arg2 || arg3 || arg4 || arg5) 2653 return -EINVAL; 2654 2655 error = (current->flags & PR_IO_FLUSHER) == PR_IO_FLUSHER; 2656 break; 2657 case PR_SET_SYSCALL_USER_DISPATCH: 2658 error = set_syscall_user_dispatch(arg2, arg3, arg4, 2659 (char __user *) arg5); 2660 break; 2661 #ifdef CONFIG_SCHED_CORE 2662 case PR_SCHED_CORE: 2663 error = sched_core_share_pid(arg2, arg3, arg4, arg5); 2664 break; 2665 #endif 2666 case PR_SET_MDWE: 2667 error = prctl_set_mdwe(arg2, arg3, arg4, arg5); 2668 break; 2669 case PR_GET_MDWE: 2670 error = prctl_get_mdwe(arg2, arg3, arg4, arg5); 2671 break; 2672 case PR_SET_VMA: 2673 error = prctl_set_vma(arg2, arg3, arg4, arg5); 2674 break; 2675 default: 2676 error = -EINVAL; 2677 break; 2678 } 2679 return error; 2680 } 2681 2682 SYSCALL_DEFINE3(getcpu, unsigned __user *, cpup, unsigned __user *, nodep, 2683 struct getcpu_cache __user *, unused) 2684 { 2685 int err = 0; 2686 int cpu = raw_smp_processor_id(); 2687 2688 if (cpup) 2689 err |= put_user(cpu, cpup); 2690 if (nodep) 2691 err |= put_user(cpu_to_node(cpu), nodep); 2692 return err ? -EFAULT : 0; 2693 } 2694 2695 /** 2696 * do_sysinfo - fill in sysinfo struct 2697 * @info: pointer to buffer to fill 2698 */ 2699 static int do_sysinfo(struct sysinfo *info) 2700 { 2701 unsigned long mem_total, sav_total; 2702 unsigned int mem_unit, bitcount; 2703 struct timespec64 tp; 2704 2705 memset(info, 0, sizeof(struct sysinfo)); 2706 2707 ktime_get_boottime_ts64(&tp); 2708 timens_add_boottime(&tp); 2709 info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0); 2710 2711 get_avenrun(info->loads, 0, SI_LOAD_SHIFT - FSHIFT); 2712 2713 info->procs = nr_threads; 2714 2715 si_meminfo(info); 2716 si_swapinfo(info); 2717 2718 /* 2719 * If the sum of all the available memory (i.e. ram + swap) 2720 * is less than can be stored in a 32 bit unsigned long then 2721 * we can be binary compatible with 2.2.x kernels. If not, 2722 * well, in that case 2.2.x was broken anyways... 2723 * 2724 * -Erik Andersen <andersee@debian.org> 2725 */ 2726 2727 mem_total = info->totalram + info->totalswap; 2728 if (mem_total < info->totalram || mem_total < info->totalswap) 2729 goto out; 2730 bitcount = 0; 2731 mem_unit = info->mem_unit; 2732 while (mem_unit > 1) { 2733 bitcount++; 2734 mem_unit >>= 1; 2735 sav_total = mem_total; 2736 mem_total <<= 1; 2737 if (mem_total < sav_total) 2738 goto out; 2739 } 2740 2741 /* 2742 * If mem_total did not overflow, multiply all memory values by 2743 * info->mem_unit and set it to 1. This leaves things compatible 2744 * with 2.2.x, and also retains compatibility with earlier 2.4.x 2745 * kernels... 2746 */ 2747 2748 info->mem_unit = 1; 2749 info->totalram <<= bitcount; 2750 info->freeram <<= bitcount; 2751 info->sharedram <<= bitcount; 2752 info->bufferram <<= bitcount; 2753 info->totalswap <<= bitcount; 2754 info->freeswap <<= bitcount; 2755 info->totalhigh <<= bitcount; 2756 info->freehigh <<= bitcount; 2757 2758 out: 2759 return 0; 2760 } 2761 2762 SYSCALL_DEFINE1(sysinfo, struct sysinfo __user *, info) 2763 { 2764 struct sysinfo val; 2765 2766 do_sysinfo(&val); 2767 2768 if (copy_to_user(info, &val, sizeof(struct sysinfo))) 2769 return -EFAULT; 2770 2771 return 0; 2772 } 2773 2774 #ifdef CONFIG_COMPAT 2775 struct compat_sysinfo { 2776 s32 uptime; 2777 u32 loads[3]; 2778 u32 totalram; 2779 u32 freeram; 2780 u32 sharedram; 2781 u32 bufferram; 2782 u32 totalswap; 2783 u32 freeswap; 2784 u16 procs; 2785 u16 pad; 2786 u32 totalhigh; 2787 u32 freehigh; 2788 u32 mem_unit; 2789 char _f[20-2*sizeof(u32)-sizeof(int)]; 2790 }; 2791 2792 COMPAT_SYSCALL_DEFINE1(sysinfo, struct compat_sysinfo __user *, info) 2793 { 2794 struct sysinfo s; 2795 struct compat_sysinfo s_32; 2796 2797 do_sysinfo(&s); 2798 2799 /* Check to see if any memory value is too large for 32-bit and scale 2800 * down if needed 2801 */ 2802 if (upper_32_bits(s.totalram) || upper_32_bits(s.totalswap)) { 2803 int bitcount = 0; 2804 2805 while (s.mem_unit < PAGE_SIZE) { 2806 s.mem_unit <<= 1; 2807 bitcount++; 2808 } 2809 2810 s.totalram >>= bitcount; 2811 s.freeram >>= bitcount; 2812 s.sharedram >>= bitcount; 2813 s.bufferram >>= bitcount; 2814 s.totalswap >>= bitcount; 2815 s.freeswap >>= bitcount; 2816 s.totalhigh >>= bitcount; 2817 s.freehigh >>= bitcount; 2818 } 2819 2820 memset(&s_32, 0, sizeof(s_32)); 2821 s_32.uptime = s.uptime; 2822 s_32.loads[0] = s.loads[0]; 2823 s_32.loads[1] = s.loads[1]; 2824 s_32.loads[2] = s.loads[2]; 2825 s_32.totalram = s.totalram; 2826 s_32.freeram = s.freeram; 2827 s_32.sharedram = s.sharedram; 2828 s_32.bufferram = s.bufferram; 2829 s_32.totalswap = s.totalswap; 2830 s_32.freeswap = s.freeswap; 2831 s_32.procs = s.procs; 2832 s_32.totalhigh = s.totalhigh; 2833 s_32.freehigh = s.freehigh; 2834 s_32.mem_unit = s.mem_unit; 2835 if (copy_to_user(info, &s_32, sizeof(s_32))) 2836 return -EFAULT; 2837 return 0; 2838 } 2839 #endif /* CONFIG_COMPAT */ 2840