1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/kernel/sys.c 4 * 5 * Copyright (C) 1991, 1992 Linus Torvalds 6 */ 7 8 #include <linux/export.h> 9 #include <linux/mm.h> 10 #include <linux/mm_inline.h> 11 #include <linux/utsname.h> 12 #include <linux/mman.h> 13 #include <linux/reboot.h> 14 #include <linux/prctl.h> 15 #include <linux/highuid.h> 16 #include <linux/fs.h> 17 #include <linux/kmod.h> 18 #include <linux/perf_event.h> 19 #include <linux/resource.h> 20 #include <linux/kernel.h> 21 #include <linux/workqueue.h> 22 #include <linux/capability.h> 23 #include <linux/device.h> 24 #include <linux/key.h> 25 #include <linux/times.h> 26 #include <linux/posix-timers.h> 27 #include <linux/security.h> 28 #include <linux/random.h> 29 #include <linux/suspend.h> 30 #include <linux/tty.h> 31 #include <linux/signal.h> 32 #include <linux/cn_proc.h> 33 #include <linux/getcpu.h> 34 #include <linux/task_io_accounting_ops.h> 35 #include <linux/seccomp.h> 36 #include <linux/cpu.h> 37 #include <linux/personality.h> 38 #include <linux/ptrace.h> 39 #include <linux/fs_struct.h> 40 #include <linux/file.h> 41 #include <linux/mount.h> 42 #include <linux/gfp.h> 43 #include <linux/syscore_ops.h> 44 #include <linux/version.h> 45 #include <linux/ctype.h> 46 #include <linux/syscall_user_dispatch.h> 47 48 #include <linux/compat.h> 49 #include <linux/syscalls.h> 50 #include <linux/kprobes.h> 51 #include <linux/user_namespace.h> 52 #include <linux/time_namespace.h> 53 #include <linux/binfmts.h> 54 55 #include <linux/sched.h> 56 #include <linux/sched/autogroup.h> 57 #include <linux/sched/loadavg.h> 58 #include <linux/sched/stat.h> 59 #include <linux/sched/mm.h> 60 #include <linux/sched/coredump.h> 61 #include <linux/sched/task.h> 62 #include <linux/sched/cputime.h> 63 #include <linux/rcupdate.h> 64 #include <linux/uidgid.h> 65 #include <linux/cred.h> 66 67 #include <linux/nospec.h> 68 69 #include <linux/kmsg_dump.h> 70 /* Move somewhere else to avoid recompiling? */ 71 #include <generated/utsrelease.h> 72 73 #include <linux/uaccess.h> 74 #include <asm/io.h> 75 #include <asm/unistd.h> 76 77 #include "uid16.h" 78 79 #ifndef SET_UNALIGN_CTL 80 # define SET_UNALIGN_CTL(a, b) (-EINVAL) 81 #endif 82 #ifndef GET_UNALIGN_CTL 83 # define GET_UNALIGN_CTL(a, b) (-EINVAL) 84 #endif 85 #ifndef SET_FPEMU_CTL 86 # define SET_FPEMU_CTL(a, b) (-EINVAL) 87 #endif 88 #ifndef GET_FPEMU_CTL 89 # define GET_FPEMU_CTL(a, b) (-EINVAL) 90 #endif 91 #ifndef SET_FPEXC_CTL 92 # define SET_FPEXC_CTL(a, b) (-EINVAL) 93 #endif 94 #ifndef GET_FPEXC_CTL 95 # define GET_FPEXC_CTL(a, b) (-EINVAL) 96 #endif 97 #ifndef GET_ENDIAN 98 # define GET_ENDIAN(a, b) (-EINVAL) 99 #endif 100 #ifndef SET_ENDIAN 101 # define SET_ENDIAN(a, b) (-EINVAL) 102 #endif 103 #ifndef GET_TSC_CTL 104 # define GET_TSC_CTL(a) (-EINVAL) 105 #endif 106 #ifndef SET_TSC_CTL 107 # define SET_TSC_CTL(a) (-EINVAL) 108 #endif 109 #ifndef GET_FP_MODE 110 # define GET_FP_MODE(a) (-EINVAL) 111 #endif 112 #ifndef SET_FP_MODE 113 # define SET_FP_MODE(a,b) (-EINVAL) 114 #endif 115 #ifndef SVE_SET_VL 116 # define SVE_SET_VL(a) (-EINVAL) 117 #endif 118 #ifndef SVE_GET_VL 119 # define SVE_GET_VL() (-EINVAL) 120 #endif 121 #ifndef SME_SET_VL 122 # define SME_SET_VL(a) (-EINVAL) 123 #endif 124 #ifndef SME_GET_VL 125 # define SME_GET_VL() (-EINVAL) 126 #endif 127 #ifndef PAC_RESET_KEYS 128 # define PAC_RESET_KEYS(a, b) (-EINVAL) 129 #endif 130 #ifndef PAC_SET_ENABLED_KEYS 131 # define PAC_SET_ENABLED_KEYS(a, b, c) (-EINVAL) 132 #endif 133 #ifndef PAC_GET_ENABLED_KEYS 134 # define PAC_GET_ENABLED_KEYS(a) (-EINVAL) 135 #endif 136 #ifndef SET_TAGGED_ADDR_CTRL 137 # define SET_TAGGED_ADDR_CTRL(a) (-EINVAL) 138 #endif 139 #ifndef GET_TAGGED_ADDR_CTRL 140 # define GET_TAGGED_ADDR_CTRL() (-EINVAL) 141 #endif 142 143 /* 144 * this is where the system-wide overflow UID and GID are defined, for 145 * architectures that now have 32-bit UID/GID but didn't in the past 146 */ 147 148 int overflowuid = DEFAULT_OVERFLOWUID; 149 int overflowgid = DEFAULT_OVERFLOWGID; 150 151 EXPORT_SYMBOL(overflowuid); 152 EXPORT_SYMBOL(overflowgid); 153 154 /* 155 * the same as above, but for filesystems which can only store a 16-bit 156 * UID and GID. as such, this is needed on all architectures 157 */ 158 159 int fs_overflowuid = DEFAULT_FS_OVERFLOWUID; 160 int fs_overflowgid = DEFAULT_FS_OVERFLOWGID; 161 162 EXPORT_SYMBOL(fs_overflowuid); 163 EXPORT_SYMBOL(fs_overflowgid); 164 165 /* 166 * Returns true if current's euid is same as p's uid or euid, 167 * or has CAP_SYS_NICE to p's user_ns. 168 * 169 * Called with rcu_read_lock, creds are safe 170 */ 171 static bool set_one_prio_perm(struct task_struct *p) 172 { 173 const struct cred *cred = current_cred(), *pcred = __task_cred(p); 174 175 if (uid_eq(pcred->uid, cred->euid) || 176 uid_eq(pcred->euid, cred->euid)) 177 return true; 178 if (ns_capable(pcred->user_ns, CAP_SYS_NICE)) 179 return true; 180 return false; 181 } 182 183 /* 184 * set the priority of a task 185 * - the caller must hold the RCU read lock 186 */ 187 static int set_one_prio(struct task_struct *p, int niceval, int error) 188 { 189 int no_nice; 190 191 if (!set_one_prio_perm(p)) { 192 error = -EPERM; 193 goto out; 194 } 195 if (niceval < task_nice(p) && !can_nice(p, niceval)) { 196 error = -EACCES; 197 goto out; 198 } 199 no_nice = security_task_setnice(p, niceval); 200 if (no_nice) { 201 error = no_nice; 202 goto out; 203 } 204 if (error == -ESRCH) 205 error = 0; 206 set_user_nice(p, niceval); 207 out: 208 return error; 209 } 210 211 SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval) 212 { 213 struct task_struct *g, *p; 214 struct user_struct *user; 215 const struct cred *cred = current_cred(); 216 int error = -EINVAL; 217 struct pid *pgrp; 218 kuid_t uid; 219 220 if (which > PRIO_USER || which < PRIO_PROCESS) 221 goto out; 222 223 /* normalize: avoid signed division (rounding problems) */ 224 error = -ESRCH; 225 if (niceval < MIN_NICE) 226 niceval = MIN_NICE; 227 if (niceval > MAX_NICE) 228 niceval = MAX_NICE; 229 230 rcu_read_lock(); 231 switch (which) { 232 case PRIO_PROCESS: 233 if (who) 234 p = find_task_by_vpid(who); 235 else 236 p = current; 237 if (p) 238 error = set_one_prio(p, niceval, error); 239 break; 240 case PRIO_PGRP: 241 if (who) 242 pgrp = find_vpid(who); 243 else 244 pgrp = task_pgrp(current); 245 read_lock(&tasklist_lock); 246 do_each_pid_thread(pgrp, PIDTYPE_PGID, p) { 247 error = set_one_prio(p, niceval, error); 248 } while_each_pid_thread(pgrp, PIDTYPE_PGID, p); 249 read_unlock(&tasklist_lock); 250 break; 251 case PRIO_USER: 252 uid = make_kuid(cred->user_ns, who); 253 user = cred->user; 254 if (!who) 255 uid = cred->uid; 256 else if (!uid_eq(uid, cred->uid)) { 257 user = find_user(uid); 258 if (!user) 259 goto out_unlock; /* No processes for this user */ 260 } 261 for_each_process_thread(g, p) { 262 if (uid_eq(task_uid(p), uid) && task_pid_vnr(p)) 263 error = set_one_prio(p, niceval, error); 264 } 265 if (!uid_eq(uid, cred->uid)) 266 free_uid(user); /* For find_user() */ 267 break; 268 } 269 out_unlock: 270 rcu_read_unlock(); 271 out: 272 return error; 273 } 274 275 /* 276 * Ugh. To avoid negative return values, "getpriority()" will 277 * not return the normal nice-value, but a negated value that 278 * has been offset by 20 (ie it returns 40..1 instead of -20..19) 279 * to stay compatible. 280 */ 281 SYSCALL_DEFINE2(getpriority, int, which, int, who) 282 { 283 struct task_struct *g, *p; 284 struct user_struct *user; 285 const struct cred *cred = current_cred(); 286 long niceval, retval = -ESRCH; 287 struct pid *pgrp; 288 kuid_t uid; 289 290 if (which > PRIO_USER || which < PRIO_PROCESS) 291 return -EINVAL; 292 293 rcu_read_lock(); 294 switch (which) { 295 case PRIO_PROCESS: 296 if (who) 297 p = find_task_by_vpid(who); 298 else 299 p = current; 300 if (p) { 301 niceval = nice_to_rlimit(task_nice(p)); 302 if (niceval > retval) 303 retval = niceval; 304 } 305 break; 306 case PRIO_PGRP: 307 if (who) 308 pgrp = find_vpid(who); 309 else 310 pgrp = task_pgrp(current); 311 read_lock(&tasklist_lock); 312 do_each_pid_thread(pgrp, PIDTYPE_PGID, p) { 313 niceval = nice_to_rlimit(task_nice(p)); 314 if (niceval > retval) 315 retval = niceval; 316 } while_each_pid_thread(pgrp, PIDTYPE_PGID, p); 317 read_unlock(&tasklist_lock); 318 break; 319 case PRIO_USER: 320 uid = make_kuid(cred->user_ns, who); 321 user = cred->user; 322 if (!who) 323 uid = cred->uid; 324 else if (!uid_eq(uid, cred->uid)) { 325 user = find_user(uid); 326 if (!user) 327 goto out_unlock; /* No processes for this user */ 328 } 329 for_each_process_thread(g, p) { 330 if (uid_eq(task_uid(p), uid) && task_pid_vnr(p)) { 331 niceval = nice_to_rlimit(task_nice(p)); 332 if (niceval > retval) 333 retval = niceval; 334 } 335 } 336 if (!uid_eq(uid, cred->uid)) 337 free_uid(user); /* for find_user() */ 338 break; 339 } 340 out_unlock: 341 rcu_read_unlock(); 342 343 return retval; 344 } 345 346 /* 347 * Unprivileged users may change the real gid to the effective gid 348 * or vice versa. (BSD-style) 349 * 350 * If you set the real gid at all, or set the effective gid to a value not 351 * equal to the real gid, then the saved gid is set to the new effective gid. 352 * 353 * This makes it possible for a setgid program to completely drop its 354 * privileges, which is often a useful assertion to make when you are doing 355 * a security audit over a program. 356 * 357 * The general idea is that a program which uses just setregid() will be 358 * 100% compatible with BSD. A program which uses just setgid() will be 359 * 100% compatible with POSIX with saved IDs. 360 * 361 * SMP: There are not races, the GIDs are checked only by filesystem 362 * operations (as far as semantic preservation is concerned). 363 */ 364 #ifdef CONFIG_MULTIUSER 365 long __sys_setregid(gid_t rgid, gid_t egid) 366 { 367 struct user_namespace *ns = current_user_ns(); 368 const struct cred *old; 369 struct cred *new; 370 int retval; 371 kgid_t krgid, kegid; 372 373 krgid = make_kgid(ns, rgid); 374 kegid = make_kgid(ns, egid); 375 376 if ((rgid != (gid_t) -1) && !gid_valid(krgid)) 377 return -EINVAL; 378 if ((egid != (gid_t) -1) && !gid_valid(kegid)) 379 return -EINVAL; 380 381 new = prepare_creds(); 382 if (!new) 383 return -ENOMEM; 384 old = current_cred(); 385 386 retval = -EPERM; 387 if (rgid != (gid_t) -1) { 388 if (gid_eq(old->gid, krgid) || 389 gid_eq(old->egid, krgid) || 390 ns_capable_setid(old->user_ns, CAP_SETGID)) 391 new->gid = krgid; 392 else 393 goto error; 394 } 395 if (egid != (gid_t) -1) { 396 if (gid_eq(old->gid, kegid) || 397 gid_eq(old->egid, kegid) || 398 gid_eq(old->sgid, kegid) || 399 ns_capable_setid(old->user_ns, CAP_SETGID)) 400 new->egid = kegid; 401 else 402 goto error; 403 } 404 405 if (rgid != (gid_t) -1 || 406 (egid != (gid_t) -1 && !gid_eq(kegid, old->gid))) 407 new->sgid = new->egid; 408 new->fsgid = new->egid; 409 410 retval = security_task_fix_setgid(new, old, LSM_SETID_RE); 411 if (retval < 0) 412 goto error; 413 414 return commit_creds(new); 415 416 error: 417 abort_creds(new); 418 return retval; 419 } 420 421 SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid) 422 { 423 return __sys_setregid(rgid, egid); 424 } 425 426 /* 427 * setgid() is implemented like SysV w/ SAVED_IDS 428 * 429 * SMP: Same implicit races as above. 430 */ 431 long __sys_setgid(gid_t gid) 432 { 433 struct user_namespace *ns = current_user_ns(); 434 const struct cred *old; 435 struct cred *new; 436 int retval; 437 kgid_t kgid; 438 439 kgid = make_kgid(ns, gid); 440 if (!gid_valid(kgid)) 441 return -EINVAL; 442 443 new = prepare_creds(); 444 if (!new) 445 return -ENOMEM; 446 old = current_cred(); 447 448 retval = -EPERM; 449 if (ns_capable_setid(old->user_ns, CAP_SETGID)) 450 new->gid = new->egid = new->sgid = new->fsgid = kgid; 451 else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid)) 452 new->egid = new->fsgid = kgid; 453 else 454 goto error; 455 456 retval = security_task_fix_setgid(new, old, LSM_SETID_ID); 457 if (retval < 0) 458 goto error; 459 460 return commit_creds(new); 461 462 error: 463 abort_creds(new); 464 return retval; 465 } 466 467 SYSCALL_DEFINE1(setgid, gid_t, gid) 468 { 469 return __sys_setgid(gid); 470 } 471 472 /* 473 * change the user struct in a credentials set to match the new UID 474 */ 475 static int set_user(struct cred *new) 476 { 477 struct user_struct *new_user; 478 479 new_user = alloc_uid(new->uid); 480 if (!new_user) 481 return -EAGAIN; 482 483 free_uid(new->user); 484 new->user = new_user; 485 return 0; 486 } 487 488 static void flag_nproc_exceeded(struct cred *new) 489 { 490 if (new->ucounts == current_ucounts()) 491 return; 492 493 /* 494 * We don't fail in case of NPROC limit excess here because too many 495 * poorly written programs don't check set*uid() return code, assuming 496 * it never fails if called by root. We may still enforce NPROC limit 497 * for programs doing set*uid()+execve() by harmlessly deferring the 498 * failure to the execve() stage. 499 */ 500 if (is_rlimit_overlimit(new->ucounts, UCOUNT_RLIMIT_NPROC, rlimit(RLIMIT_NPROC)) && 501 new->user != INIT_USER) 502 current->flags |= PF_NPROC_EXCEEDED; 503 else 504 current->flags &= ~PF_NPROC_EXCEEDED; 505 } 506 507 /* 508 * Unprivileged users may change the real uid to the effective uid 509 * or vice versa. (BSD-style) 510 * 511 * If you set the real uid at all, or set the effective uid to a value not 512 * equal to the real uid, then the saved uid is set to the new effective uid. 513 * 514 * This makes it possible for a setuid program to completely drop its 515 * privileges, which is often a useful assertion to make when you are doing 516 * a security audit over a program. 517 * 518 * The general idea is that a program which uses just setreuid() will be 519 * 100% compatible with BSD. A program which uses just setuid() will be 520 * 100% compatible with POSIX with saved IDs. 521 */ 522 long __sys_setreuid(uid_t ruid, uid_t euid) 523 { 524 struct user_namespace *ns = current_user_ns(); 525 const struct cred *old; 526 struct cred *new; 527 int retval; 528 kuid_t kruid, keuid; 529 530 kruid = make_kuid(ns, ruid); 531 keuid = make_kuid(ns, euid); 532 533 if ((ruid != (uid_t) -1) && !uid_valid(kruid)) 534 return -EINVAL; 535 if ((euid != (uid_t) -1) && !uid_valid(keuid)) 536 return -EINVAL; 537 538 new = prepare_creds(); 539 if (!new) 540 return -ENOMEM; 541 old = current_cred(); 542 543 retval = -EPERM; 544 if (ruid != (uid_t) -1) { 545 new->uid = kruid; 546 if (!uid_eq(old->uid, kruid) && 547 !uid_eq(old->euid, kruid) && 548 !ns_capable_setid(old->user_ns, CAP_SETUID)) 549 goto error; 550 } 551 552 if (euid != (uid_t) -1) { 553 new->euid = keuid; 554 if (!uid_eq(old->uid, keuid) && 555 !uid_eq(old->euid, keuid) && 556 !uid_eq(old->suid, keuid) && 557 !ns_capable_setid(old->user_ns, CAP_SETUID)) 558 goto error; 559 } 560 561 if (!uid_eq(new->uid, old->uid)) { 562 retval = set_user(new); 563 if (retval < 0) 564 goto error; 565 } 566 if (ruid != (uid_t) -1 || 567 (euid != (uid_t) -1 && !uid_eq(keuid, old->uid))) 568 new->suid = new->euid; 569 new->fsuid = new->euid; 570 571 retval = security_task_fix_setuid(new, old, LSM_SETID_RE); 572 if (retval < 0) 573 goto error; 574 575 retval = set_cred_ucounts(new); 576 if (retval < 0) 577 goto error; 578 579 flag_nproc_exceeded(new); 580 return commit_creds(new); 581 582 error: 583 abort_creds(new); 584 return retval; 585 } 586 587 SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid) 588 { 589 return __sys_setreuid(ruid, euid); 590 } 591 592 /* 593 * setuid() is implemented like SysV with SAVED_IDS 594 * 595 * Note that SAVED_ID's is deficient in that a setuid root program 596 * like sendmail, for example, cannot set its uid to be a normal 597 * user and then switch back, because if you're root, setuid() sets 598 * the saved uid too. If you don't like this, blame the bright people 599 * in the POSIX committee and/or USG. Note that the BSD-style setreuid() 600 * will allow a root program to temporarily drop privileges and be able to 601 * regain them by swapping the real and effective uid. 602 */ 603 long __sys_setuid(uid_t uid) 604 { 605 struct user_namespace *ns = current_user_ns(); 606 const struct cred *old; 607 struct cred *new; 608 int retval; 609 kuid_t kuid; 610 611 kuid = make_kuid(ns, uid); 612 if (!uid_valid(kuid)) 613 return -EINVAL; 614 615 new = prepare_creds(); 616 if (!new) 617 return -ENOMEM; 618 old = current_cred(); 619 620 retval = -EPERM; 621 if (ns_capable_setid(old->user_ns, CAP_SETUID)) { 622 new->suid = new->uid = kuid; 623 if (!uid_eq(kuid, old->uid)) { 624 retval = set_user(new); 625 if (retval < 0) 626 goto error; 627 } 628 } else if (!uid_eq(kuid, old->uid) && !uid_eq(kuid, new->suid)) { 629 goto error; 630 } 631 632 new->fsuid = new->euid = kuid; 633 634 retval = security_task_fix_setuid(new, old, LSM_SETID_ID); 635 if (retval < 0) 636 goto error; 637 638 retval = set_cred_ucounts(new); 639 if (retval < 0) 640 goto error; 641 642 flag_nproc_exceeded(new); 643 return commit_creds(new); 644 645 error: 646 abort_creds(new); 647 return retval; 648 } 649 650 SYSCALL_DEFINE1(setuid, uid_t, uid) 651 { 652 return __sys_setuid(uid); 653 } 654 655 656 /* 657 * This function implements a generic ability to update ruid, euid, 658 * and suid. This allows you to implement the 4.4 compatible seteuid(). 659 */ 660 long __sys_setresuid(uid_t ruid, uid_t euid, uid_t suid) 661 { 662 struct user_namespace *ns = current_user_ns(); 663 const struct cred *old; 664 struct cred *new; 665 int retval; 666 kuid_t kruid, keuid, ksuid; 667 668 kruid = make_kuid(ns, ruid); 669 keuid = make_kuid(ns, euid); 670 ksuid = make_kuid(ns, suid); 671 672 if ((ruid != (uid_t) -1) && !uid_valid(kruid)) 673 return -EINVAL; 674 675 if ((euid != (uid_t) -1) && !uid_valid(keuid)) 676 return -EINVAL; 677 678 if ((suid != (uid_t) -1) && !uid_valid(ksuid)) 679 return -EINVAL; 680 681 new = prepare_creds(); 682 if (!new) 683 return -ENOMEM; 684 685 old = current_cred(); 686 687 retval = -EPERM; 688 if (!ns_capable_setid(old->user_ns, CAP_SETUID)) { 689 if (ruid != (uid_t) -1 && !uid_eq(kruid, old->uid) && 690 !uid_eq(kruid, old->euid) && !uid_eq(kruid, old->suid)) 691 goto error; 692 if (euid != (uid_t) -1 && !uid_eq(keuid, old->uid) && 693 !uid_eq(keuid, old->euid) && !uid_eq(keuid, old->suid)) 694 goto error; 695 if (suid != (uid_t) -1 && !uid_eq(ksuid, old->uid) && 696 !uid_eq(ksuid, old->euid) && !uid_eq(ksuid, old->suid)) 697 goto error; 698 } 699 700 if (ruid != (uid_t) -1) { 701 new->uid = kruid; 702 if (!uid_eq(kruid, old->uid)) { 703 retval = set_user(new); 704 if (retval < 0) 705 goto error; 706 } 707 } 708 if (euid != (uid_t) -1) 709 new->euid = keuid; 710 if (suid != (uid_t) -1) 711 new->suid = ksuid; 712 new->fsuid = new->euid; 713 714 retval = security_task_fix_setuid(new, old, LSM_SETID_RES); 715 if (retval < 0) 716 goto error; 717 718 retval = set_cred_ucounts(new); 719 if (retval < 0) 720 goto error; 721 722 flag_nproc_exceeded(new); 723 return commit_creds(new); 724 725 error: 726 abort_creds(new); 727 return retval; 728 } 729 730 SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid) 731 { 732 return __sys_setresuid(ruid, euid, suid); 733 } 734 735 SYSCALL_DEFINE3(getresuid, uid_t __user *, ruidp, uid_t __user *, euidp, uid_t __user *, suidp) 736 { 737 const struct cred *cred = current_cred(); 738 int retval; 739 uid_t ruid, euid, suid; 740 741 ruid = from_kuid_munged(cred->user_ns, cred->uid); 742 euid = from_kuid_munged(cred->user_ns, cred->euid); 743 suid = from_kuid_munged(cred->user_ns, cred->suid); 744 745 retval = put_user(ruid, ruidp); 746 if (!retval) { 747 retval = put_user(euid, euidp); 748 if (!retval) 749 return put_user(suid, suidp); 750 } 751 return retval; 752 } 753 754 /* 755 * Same as above, but for rgid, egid, sgid. 756 */ 757 long __sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid) 758 { 759 struct user_namespace *ns = current_user_ns(); 760 const struct cred *old; 761 struct cred *new; 762 int retval; 763 kgid_t krgid, kegid, ksgid; 764 765 krgid = make_kgid(ns, rgid); 766 kegid = make_kgid(ns, egid); 767 ksgid = make_kgid(ns, sgid); 768 769 if ((rgid != (gid_t) -1) && !gid_valid(krgid)) 770 return -EINVAL; 771 if ((egid != (gid_t) -1) && !gid_valid(kegid)) 772 return -EINVAL; 773 if ((sgid != (gid_t) -1) && !gid_valid(ksgid)) 774 return -EINVAL; 775 776 new = prepare_creds(); 777 if (!new) 778 return -ENOMEM; 779 old = current_cred(); 780 781 retval = -EPERM; 782 if (!ns_capable_setid(old->user_ns, CAP_SETGID)) { 783 if (rgid != (gid_t) -1 && !gid_eq(krgid, old->gid) && 784 !gid_eq(krgid, old->egid) && !gid_eq(krgid, old->sgid)) 785 goto error; 786 if (egid != (gid_t) -1 && !gid_eq(kegid, old->gid) && 787 !gid_eq(kegid, old->egid) && !gid_eq(kegid, old->sgid)) 788 goto error; 789 if (sgid != (gid_t) -1 && !gid_eq(ksgid, old->gid) && 790 !gid_eq(ksgid, old->egid) && !gid_eq(ksgid, old->sgid)) 791 goto error; 792 } 793 794 if (rgid != (gid_t) -1) 795 new->gid = krgid; 796 if (egid != (gid_t) -1) 797 new->egid = kegid; 798 if (sgid != (gid_t) -1) 799 new->sgid = ksgid; 800 new->fsgid = new->egid; 801 802 retval = security_task_fix_setgid(new, old, LSM_SETID_RES); 803 if (retval < 0) 804 goto error; 805 806 return commit_creds(new); 807 808 error: 809 abort_creds(new); 810 return retval; 811 } 812 813 SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid) 814 { 815 return __sys_setresgid(rgid, egid, sgid); 816 } 817 818 SYSCALL_DEFINE3(getresgid, gid_t __user *, rgidp, gid_t __user *, egidp, gid_t __user *, sgidp) 819 { 820 const struct cred *cred = current_cred(); 821 int retval; 822 gid_t rgid, egid, sgid; 823 824 rgid = from_kgid_munged(cred->user_ns, cred->gid); 825 egid = from_kgid_munged(cred->user_ns, cred->egid); 826 sgid = from_kgid_munged(cred->user_ns, cred->sgid); 827 828 retval = put_user(rgid, rgidp); 829 if (!retval) { 830 retval = put_user(egid, egidp); 831 if (!retval) 832 retval = put_user(sgid, sgidp); 833 } 834 835 return retval; 836 } 837 838 839 /* 840 * "setfsuid()" sets the fsuid - the uid used for filesystem checks. This 841 * is used for "access()" and for the NFS daemon (letting nfsd stay at 842 * whatever uid it wants to). It normally shadows "euid", except when 843 * explicitly set by setfsuid() or for access.. 844 */ 845 long __sys_setfsuid(uid_t uid) 846 { 847 const struct cred *old; 848 struct cred *new; 849 uid_t old_fsuid; 850 kuid_t kuid; 851 852 old = current_cred(); 853 old_fsuid = from_kuid_munged(old->user_ns, old->fsuid); 854 855 kuid = make_kuid(old->user_ns, uid); 856 if (!uid_valid(kuid)) 857 return old_fsuid; 858 859 new = prepare_creds(); 860 if (!new) 861 return old_fsuid; 862 863 if (uid_eq(kuid, old->uid) || uid_eq(kuid, old->euid) || 864 uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) || 865 ns_capable_setid(old->user_ns, CAP_SETUID)) { 866 if (!uid_eq(kuid, old->fsuid)) { 867 new->fsuid = kuid; 868 if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0) 869 goto change_okay; 870 } 871 } 872 873 abort_creds(new); 874 return old_fsuid; 875 876 change_okay: 877 commit_creds(new); 878 return old_fsuid; 879 } 880 881 SYSCALL_DEFINE1(setfsuid, uid_t, uid) 882 { 883 return __sys_setfsuid(uid); 884 } 885 886 /* 887 * Samma på svenska.. 888 */ 889 long __sys_setfsgid(gid_t gid) 890 { 891 const struct cred *old; 892 struct cred *new; 893 gid_t old_fsgid; 894 kgid_t kgid; 895 896 old = current_cred(); 897 old_fsgid = from_kgid_munged(old->user_ns, old->fsgid); 898 899 kgid = make_kgid(old->user_ns, gid); 900 if (!gid_valid(kgid)) 901 return old_fsgid; 902 903 new = prepare_creds(); 904 if (!new) 905 return old_fsgid; 906 907 if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) || 908 gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) || 909 ns_capable_setid(old->user_ns, CAP_SETGID)) { 910 if (!gid_eq(kgid, old->fsgid)) { 911 new->fsgid = kgid; 912 if (security_task_fix_setgid(new,old,LSM_SETID_FS) == 0) 913 goto change_okay; 914 } 915 } 916 917 abort_creds(new); 918 return old_fsgid; 919 920 change_okay: 921 commit_creds(new); 922 return old_fsgid; 923 } 924 925 SYSCALL_DEFINE1(setfsgid, gid_t, gid) 926 { 927 return __sys_setfsgid(gid); 928 } 929 #endif /* CONFIG_MULTIUSER */ 930 931 /** 932 * sys_getpid - return the thread group id of the current process 933 * 934 * Note, despite the name, this returns the tgid not the pid. The tgid and 935 * the pid are identical unless CLONE_THREAD was specified on clone() in 936 * which case the tgid is the same in all threads of the same group. 937 * 938 * This is SMP safe as current->tgid does not change. 939 */ 940 SYSCALL_DEFINE0(getpid) 941 { 942 return task_tgid_vnr(current); 943 } 944 945 /* Thread ID - the internal kernel "pid" */ 946 SYSCALL_DEFINE0(gettid) 947 { 948 return task_pid_vnr(current); 949 } 950 951 /* 952 * Accessing ->real_parent is not SMP-safe, it could 953 * change from under us. However, we can use a stale 954 * value of ->real_parent under rcu_read_lock(), see 955 * release_task()->call_rcu(delayed_put_task_struct). 956 */ 957 SYSCALL_DEFINE0(getppid) 958 { 959 int pid; 960 961 rcu_read_lock(); 962 pid = task_tgid_vnr(rcu_dereference(current->real_parent)); 963 rcu_read_unlock(); 964 965 return pid; 966 } 967 968 SYSCALL_DEFINE0(getuid) 969 { 970 /* Only we change this so SMP safe */ 971 return from_kuid_munged(current_user_ns(), current_uid()); 972 } 973 974 SYSCALL_DEFINE0(geteuid) 975 { 976 /* Only we change this so SMP safe */ 977 return from_kuid_munged(current_user_ns(), current_euid()); 978 } 979 980 SYSCALL_DEFINE0(getgid) 981 { 982 /* Only we change this so SMP safe */ 983 return from_kgid_munged(current_user_ns(), current_gid()); 984 } 985 986 SYSCALL_DEFINE0(getegid) 987 { 988 /* Only we change this so SMP safe */ 989 return from_kgid_munged(current_user_ns(), current_egid()); 990 } 991 992 static void do_sys_times(struct tms *tms) 993 { 994 u64 tgutime, tgstime, cutime, cstime; 995 996 thread_group_cputime_adjusted(current, &tgutime, &tgstime); 997 cutime = current->signal->cutime; 998 cstime = current->signal->cstime; 999 tms->tms_utime = nsec_to_clock_t(tgutime); 1000 tms->tms_stime = nsec_to_clock_t(tgstime); 1001 tms->tms_cutime = nsec_to_clock_t(cutime); 1002 tms->tms_cstime = nsec_to_clock_t(cstime); 1003 } 1004 1005 SYSCALL_DEFINE1(times, struct tms __user *, tbuf) 1006 { 1007 if (tbuf) { 1008 struct tms tmp; 1009 1010 do_sys_times(&tmp); 1011 if (copy_to_user(tbuf, &tmp, sizeof(struct tms))) 1012 return -EFAULT; 1013 } 1014 force_successful_syscall_return(); 1015 return (long) jiffies_64_to_clock_t(get_jiffies_64()); 1016 } 1017 1018 #ifdef CONFIG_COMPAT 1019 static compat_clock_t clock_t_to_compat_clock_t(clock_t x) 1020 { 1021 return compat_jiffies_to_clock_t(clock_t_to_jiffies(x)); 1022 } 1023 1024 COMPAT_SYSCALL_DEFINE1(times, struct compat_tms __user *, tbuf) 1025 { 1026 if (tbuf) { 1027 struct tms tms; 1028 struct compat_tms tmp; 1029 1030 do_sys_times(&tms); 1031 /* Convert our struct tms to the compat version. */ 1032 tmp.tms_utime = clock_t_to_compat_clock_t(tms.tms_utime); 1033 tmp.tms_stime = clock_t_to_compat_clock_t(tms.tms_stime); 1034 tmp.tms_cutime = clock_t_to_compat_clock_t(tms.tms_cutime); 1035 tmp.tms_cstime = clock_t_to_compat_clock_t(tms.tms_cstime); 1036 if (copy_to_user(tbuf, &tmp, sizeof(tmp))) 1037 return -EFAULT; 1038 } 1039 force_successful_syscall_return(); 1040 return compat_jiffies_to_clock_t(jiffies); 1041 } 1042 #endif 1043 1044 /* 1045 * This needs some heavy checking ... 1046 * I just haven't the stomach for it. I also don't fully 1047 * understand sessions/pgrp etc. Let somebody who does explain it. 1048 * 1049 * OK, I think I have the protection semantics right.... this is really 1050 * only important on a multi-user system anyway, to make sure one user 1051 * can't send a signal to a process owned by another. -TYT, 12/12/91 1052 * 1053 * !PF_FORKNOEXEC check to conform completely to POSIX. 1054 */ 1055 SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid) 1056 { 1057 struct task_struct *p; 1058 struct task_struct *group_leader = current->group_leader; 1059 struct pid *pgrp; 1060 int err; 1061 1062 if (!pid) 1063 pid = task_pid_vnr(group_leader); 1064 if (!pgid) 1065 pgid = pid; 1066 if (pgid < 0) 1067 return -EINVAL; 1068 rcu_read_lock(); 1069 1070 /* From this point forward we keep holding onto the tasklist lock 1071 * so that our parent does not change from under us. -DaveM 1072 */ 1073 write_lock_irq(&tasklist_lock); 1074 1075 err = -ESRCH; 1076 p = find_task_by_vpid(pid); 1077 if (!p) 1078 goto out; 1079 1080 err = -EINVAL; 1081 if (!thread_group_leader(p)) 1082 goto out; 1083 1084 if (same_thread_group(p->real_parent, group_leader)) { 1085 err = -EPERM; 1086 if (task_session(p) != task_session(group_leader)) 1087 goto out; 1088 err = -EACCES; 1089 if (!(p->flags & PF_FORKNOEXEC)) 1090 goto out; 1091 } else { 1092 err = -ESRCH; 1093 if (p != group_leader) 1094 goto out; 1095 } 1096 1097 err = -EPERM; 1098 if (p->signal->leader) 1099 goto out; 1100 1101 pgrp = task_pid(p); 1102 if (pgid != pid) { 1103 struct task_struct *g; 1104 1105 pgrp = find_vpid(pgid); 1106 g = pid_task(pgrp, PIDTYPE_PGID); 1107 if (!g || task_session(g) != task_session(group_leader)) 1108 goto out; 1109 } 1110 1111 err = security_task_setpgid(p, pgid); 1112 if (err) 1113 goto out; 1114 1115 if (task_pgrp(p) != pgrp) 1116 change_pid(p, PIDTYPE_PGID, pgrp); 1117 1118 err = 0; 1119 out: 1120 /* All paths lead to here, thus we are safe. -DaveM */ 1121 write_unlock_irq(&tasklist_lock); 1122 rcu_read_unlock(); 1123 return err; 1124 } 1125 1126 static int do_getpgid(pid_t pid) 1127 { 1128 struct task_struct *p; 1129 struct pid *grp; 1130 int retval; 1131 1132 rcu_read_lock(); 1133 if (!pid) 1134 grp = task_pgrp(current); 1135 else { 1136 retval = -ESRCH; 1137 p = find_task_by_vpid(pid); 1138 if (!p) 1139 goto out; 1140 grp = task_pgrp(p); 1141 if (!grp) 1142 goto out; 1143 1144 retval = security_task_getpgid(p); 1145 if (retval) 1146 goto out; 1147 } 1148 retval = pid_vnr(grp); 1149 out: 1150 rcu_read_unlock(); 1151 return retval; 1152 } 1153 1154 SYSCALL_DEFINE1(getpgid, pid_t, pid) 1155 { 1156 return do_getpgid(pid); 1157 } 1158 1159 #ifdef __ARCH_WANT_SYS_GETPGRP 1160 1161 SYSCALL_DEFINE0(getpgrp) 1162 { 1163 return do_getpgid(0); 1164 } 1165 1166 #endif 1167 1168 SYSCALL_DEFINE1(getsid, pid_t, pid) 1169 { 1170 struct task_struct *p; 1171 struct pid *sid; 1172 int retval; 1173 1174 rcu_read_lock(); 1175 if (!pid) 1176 sid = task_session(current); 1177 else { 1178 retval = -ESRCH; 1179 p = find_task_by_vpid(pid); 1180 if (!p) 1181 goto out; 1182 sid = task_session(p); 1183 if (!sid) 1184 goto out; 1185 1186 retval = security_task_getsid(p); 1187 if (retval) 1188 goto out; 1189 } 1190 retval = pid_vnr(sid); 1191 out: 1192 rcu_read_unlock(); 1193 return retval; 1194 } 1195 1196 static void set_special_pids(struct pid *pid) 1197 { 1198 struct task_struct *curr = current->group_leader; 1199 1200 if (task_session(curr) != pid) 1201 change_pid(curr, PIDTYPE_SID, pid); 1202 1203 if (task_pgrp(curr) != pid) 1204 change_pid(curr, PIDTYPE_PGID, pid); 1205 } 1206 1207 int ksys_setsid(void) 1208 { 1209 struct task_struct *group_leader = current->group_leader; 1210 struct pid *sid = task_pid(group_leader); 1211 pid_t session = pid_vnr(sid); 1212 int err = -EPERM; 1213 1214 write_lock_irq(&tasklist_lock); 1215 /* Fail if I am already a session leader */ 1216 if (group_leader->signal->leader) 1217 goto out; 1218 1219 /* Fail if a process group id already exists that equals the 1220 * proposed session id. 1221 */ 1222 if (pid_task(sid, PIDTYPE_PGID)) 1223 goto out; 1224 1225 group_leader->signal->leader = 1; 1226 set_special_pids(sid); 1227 1228 proc_clear_tty(group_leader); 1229 1230 err = session; 1231 out: 1232 write_unlock_irq(&tasklist_lock); 1233 if (err > 0) { 1234 proc_sid_connector(group_leader); 1235 sched_autogroup_create_attach(group_leader); 1236 } 1237 return err; 1238 } 1239 1240 SYSCALL_DEFINE0(setsid) 1241 { 1242 return ksys_setsid(); 1243 } 1244 1245 DECLARE_RWSEM(uts_sem); 1246 1247 #ifdef COMPAT_UTS_MACHINE 1248 #define override_architecture(name) \ 1249 (personality(current->personality) == PER_LINUX32 && \ 1250 copy_to_user(name->machine, COMPAT_UTS_MACHINE, \ 1251 sizeof(COMPAT_UTS_MACHINE))) 1252 #else 1253 #define override_architecture(name) 0 1254 #endif 1255 1256 /* 1257 * Work around broken programs that cannot handle "Linux 3.0". 1258 * Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40 1259 * And we map 4.x and later versions to 2.6.60+x, so 4.0/5.0/6.0/... would be 1260 * 2.6.60. 1261 */ 1262 static int override_release(char __user *release, size_t len) 1263 { 1264 int ret = 0; 1265 1266 if (current->personality & UNAME26) { 1267 const char *rest = UTS_RELEASE; 1268 char buf[65] = { 0 }; 1269 int ndots = 0; 1270 unsigned v; 1271 size_t copy; 1272 1273 while (*rest) { 1274 if (*rest == '.' && ++ndots >= 3) 1275 break; 1276 if (!isdigit(*rest) && *rest != '.') 1277 break; 1278 rest++; 1279 } 1280 v = LINUX_VERSION_PATCHLEVEL + 60; 1281 copy = clamp_t(size_t, len, 1, sizeof(buf)); 1282 copy = scnprintf(buf, copy, "2.6.%u%s", v, rest); 1283 ret = copy_to_user(release, buf, copy + 1); 1284 } 1285 return ret; 1286 } 1287 1288 SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name) 1289 { 1290 struct new_utsname tmp; 1291 1292 down_read(&uts_sem); 1293 memcpy(&tmp, utsname(), sizeof(tmp)); 1294 up_read(&uts_sem); 1295 if (copy_to_user(name, &tmp, sizeof(tmp))) 1296 return -EFAULT; 1297 1298 if (override_release(name->release, sizeof(name->release))) 1299 return -EFAULT; 1300 if (override_architecture(name)) 1301 return -EFAULT; 1302 return 0; 1303 } 1304 1305 #ifdef __ARCH_WANT_SYS_OLD_UNAME 1306 /* 1307 * Old cruft 1308 */ 1309 SYSCALL_DEFINE1(uname, struct old_utsname __user *, name) 1310 { 1311 struct old_utsname tmp; 1312 1313 if (!name) 1314 return -EFAULT; 1315 1316 down_read(&uts_sem); 1317 memcpy(&tmp, utsname(), sizeof(tmp)); 1318 up_read(&uts_sem); 1319 if (copy_to_user(name, &tmp, sizeof(tmp))) 1320 return -EFAULT; 1321 1322 if (override_release(name->release, sizeof(name->release))) 1323 return -EFAULT; 1324 if (override_architecture(name)) 1325 return -EFAULT; 1326 return 0; 1327 } 1328 1329 SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name) 1330 { 1331 struct oldold_utsname tmp; 1332 1333 if (!name) 1334 return -EFAULT; 1335 1336 memset(&tmp, 0, sizeof(tmp)); 1337 1338 down_read(&uts_sem); 1339 memcpy(&tmp.sysname, &utsname()->sysname, __OLD_UTS_LEN); 1340 memcpy(&tmp.nodename, &utsname()->nodename, __OLD_UTS_LEN); 1341 memcpy(&tmp.release, &utsname()->release, __OLD_UTS_LEN); 1342 memcpy(&tmp.version, &utsname()->version, __OLD_UTS_LEN); 1343 memcpy(&tmp.machine, &utsname()->machine, __OLD_UTS_LEN); 1344 up_read(&uts_sem); 1345 if (copy_to_user(name, &tmp, sizeof(tmp))) 1346 return -EFAULT; 1347 1348 if (override_architecture(name)) 1349 return -EFAULT; 1350 if (override_release(name->release, sizeof(name->release))) 1351 return -EFAULT; 1352 return 0; 1353 } 1354 #endif 1355 1356 SYSCALL_DEFINE2(sethostname, char __user *, name, int, len) 1357 { 1358 int errno; 1359 char tmp[__NEW_UTS_LEN]; 1360 1361 if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN)) 1362 return -EPERM; 1363 1364 if (len < 0 || len > __NEW_UTS_LEN) 1365 return -EINVAL; 1366 errno = -EFAULT; 1367 if (!copy_from_user(tmp, name, len)) { 1368 struct new_utsname *u; 1369 1370 add_device_randomness(tmp, len); 1371 down_write(&uts_sem); 1372 u = utsname(); 1373 memcpy(u->nodename, tmp, len); 1374 memset(u->nodename + len, 0, sizeof(u->nodename) - len); 1375 errno = 0; 1376 uts_proc_notify(UTS_PROC_HOSTNAME); 1377 up_write(&uts_sem); 1378 } 1379 return errno; 1380 } 1381 1382 #ifdef __ARCH_WANT_SYS_GETHOSTNAME 1383 1384 SYSCALL_DEFINE2(gethostname, char __user *, name, int, len) 1385 { 1386 int i; 1387 struct new_utsname *u; 1388 char tmp[__NEW_UTS_LEN + 1]; 1389 1390 if (len < 0) 1391 return -EINVAL; 1392 down_read(&uts_sem); 1393 u = utsname(); 1394 i = 1 + strlen(u->nodename); 1395 if (i > len) 1396 i = len; 1397 memcpy(tmp, u->nodename, i); 1398 up_read(&uts_sem); 1399 if (copy_to_user(name, tmp, i)) 1400 return -EFAULT; 1401 return 0; 1402 } 1403 1404 #endif 1405 1406 /* 1407 * Only setdomainname; getdomainname can be implemented by calling 1408 * uname() 1409 */ 1410 SYSCALL_DEFINE2(setdomainname, char __user *, name, int, len) 1411 { 1412 int errno; 1413 char tmp[__NEW_UTS_LEN]; 1414 1415 if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN)) 1416 return -EPERM; 1417 if (len < 0 || len > __NEW_UTS_LEN) 1418 return -EINVAL; 1419 1420 errno = -EFAULT; 1421 if (!copy_from_user(tmp, name, len)) { 1422 struct new_utsname *u; 1423 1424 add_device_randomness(tmp, len); 1425 down_write(&uts_sem); 1426 u = utsname(); 1427 memcpy(u->domainname, tmp, len); 1428 memset(u->domainname + len, 0, sizeof(u->domainname) - len); 1429 errno = 0; 1430 uts_proc_notify(UTS_PROC_DOMAINNAME); 1431 up_write(&uts_sem); 1432 } 1433 return errno; 1434 } 1435 1436 /* make sure you are allowed to change @tsk limits before calling this */ 1437 static int do_prlimit(struct task_struct *tsk, unsigned int resource, 1438 struct rlimit *new_rlim, struct rlimit *old_rlim) 1439 { 1440 struct rlimit *rlim; 1441 int retval = 0; 1442 1443 if (resource >= RLIM_NLIMITS) 1444 return -EINVAL; 1445 resource = array_index_nospec(resource, RLIM_NLIMITS); 1446 1447 if (new_rlim) { 1448 if (new_rlim->rlim_cur > new_rlim->rlim_max) 1449 return -EINVAL; 1450 if (resource == RLIMIT_NOFILE && 1451 new_rlim->rlim_max > sysctl_nr_open) 1452 return -EPERM; 1453 } 1454 1455 /* Holding a refcount on tsk protects tsk->signal from disappearing. */ 1456 rlim = tsk->signal->rlim + resource; 1457 task_lock(tsk->group_leader); 1458 if (new_rlim) { 1459 /* 1460 * Keep the capable check against init_user_ns until cgroups can 1461 * contain all limits. 1462 */ 1463 if (new_rlim->rlim_max > rlim->rlim_max && 1464 !capable(CAP_SYS_RESOURCE)) 1465 retval = -EPERM; 1466 if (!retval) 1467 retval = security_task_setrlimit(tsk, resource, new_rlim); 1468 } 1469 if (!retval) { 1470 if (old_rlim) 1471 *old_rlim = *rlim; 1472 if (new_rlim) 1473 *rlim = *new_rlim; 1474 } 1475 task_unlock(tsk->group_leader); 1476 1477 /* 1478 * RLIMIT_CPU handling. Arm the posix CPU timer if the limit is not 1479 * infinite. In case of RLIM_INFINITY the posix CPU timer code 1480 * ignores the rlimit. 1481 */ 1482 if (!retval && new_rlim && resource == RLIMIT_CPU && 1483 new_rlim->rlim_cur != RLIM_INFINITY && 1484 IS_ENABLED(CONFIG_POSIX_TIMERS)) { 1485 /* 1486 * update_rlimit_cpu can fail if the task is exiting, but there 1487 * may be other tasks in the thread group that are not exiting, 1488 * and they need their cpu timers adjusted. 1489 * 1490 * The group_leader is the last task to be released, so if we 1491 * cannot update_rlimit_cpu on it, then the entire process is 1492 * exiting and we do not need to update at all. 1493 */ 1494 update_rlimit_cpu(tsk->group_leader, new_rlim->rlim_cur); 1495 } 1496 1497 return retval; 1498 } 1499 1500 SYSCALL_DEFINE2(getrlimit, unsigned int, resource, struct rlimit __user *, rlim) 1501 { 1502 struct rlimit value; 1503 int ret; 1504 1505 ret = do_prlimit(current, resource, NULL, &value); 1506 if (!ret) 1507 ret = copy_to_user(rlim, &value, sizeof(*rlim)) ? -EFAULT : 0; 1508 1509 return ret; 1510 } 1511 1512 #ifdef CONFIG_COMPAT 1513 1514 COMPAT_SYSCALL_DEFINE2(setrlimit, unsigned int, resource, 1515 struct compat_rlimit __user *, rlim) 1516 { 1517 struct rlimit r; 1518 struct compat_rlimit r32; 1519 1520 if (copy_from_user(&r32, rlim, sizeof(struct compat_rlimit))) 1521 return -EFAULT; 1522 1523 if (r32.rlim_cur == COMPAT_RLIM_INFINITY) 1524 r.rlim_cur = RLIM_INFINITY; 1525 else 1526 r.rlim_cur = r32.rlim_cur; 1527 if (r32.rlim_max == COMPAT_RLIM_INFINITY) 1528 r.rlim_max = RLIM_INFINITY; 1529 else 1530 r.rlim_max = r32.rlim_max; 1531 return do_prlimit(current, resource, &r, NULL); 1532 } 1533 1534 COMPAT_SYSCALL_DEFINE2(getrlimit, unsigned int, resource, 1535 struct compat_rlimit __user *, rlim) 1536 { 1537 struct rlimit r; 1538 int ret; 1539 1540 ret = do_prlimit(current, resource, NULL, &r); 1541 if (!ret) { 1542 struct compat_rlimit r32; 1543 if (r.rlim_cur > COMPAT_RLIM_INFINITY) 1544 r32.rlim_cur = COMPAT_RLIM_INFINITY; 1545 else 1546 r32.rlim_cur = r.rlim_cur; 1547 if (r.rlim_max > COMPAT_RLIM_INFINITY) 1548 r32.rlim_max = COMPAT_RLIM_INFINITY; 1549 else 1550 r32.rlim_max = r.rlim_max; 1551 1552 if (copy_to_user(rlim, &r32, sizeof(struct compat_rlimit))) 1553 return -EFAULT; 1554 } 1555 return ret; 1556 } 1557 1558 #endif 1559 1560 #ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT 1561 1562 /* 1563 * Back compatibility for getrlimit. Needed for some apps. 1564 */ 1565 SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource, 1566 struct rlimit __user *, rlim) 1567 { 1568 struct rlimit x; 1569 if (resource >= RLIM_NLIMITS) 1570 return -EINVAL; 1571 1572 resource = array_index_nospec(resource, RLIM_NLIMITS); 1573 task_lock(current->group_leader); 1574 x = current->signal->rlim[resource]; 1575 task_unlock(current->group_leader); 1576 if (x.rlim_cur > 0x7FFFFFFF) 1577 x.rlim_cur = 0x7FFFFFFF; 1578 if (x.rlim_max > 0x7FFFFFFF) 1579 x.rlim_max = 0x7FFFFFFF; 1580 return copy_to_user(rlim, &x, sizeof(x)) ? -EFAULT : 0; 1581 } 1582 1583 #ifdef CONFIG_COMPAT 1584 COMPAT_SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource, 1585 struct compat_rlimit __user *, rlim) 1586 { 1587 struct rlimit r; 1588 1589 if (resource >= RLIM_NLIMITS) 1590 return -EINVAL; 1591 1592 resource = array_index_nospec(resource, RLIM_NLIMITS); 1593 task_lock(current->group_leader); 1594 r = current->signal->rlim[resource]; 1595 task_unlock(current->group_leader); 1596 if (r.rlim_cur > 0x7FFFFFFF) 1597 r.rlim_cur = 0x7FFFFFFF; 1598 if (r.rlim_max > 0x7FFFFFFF) 1599 r.rlim_max = 0x7FFFFFFF; 1600 1601 if (put_user(r.rlim_cur, &rlim->rlim_cur) || 1602 put_user(r.rlim_max, &rlim->rlim_max)) 1603 return -EFAULT; 1604 return 0; 1605 } 1606 #endif 1607 1608 #endif 1609 1610 static inline bool rlim64_is_infinity(__u64 rlim64) 1611 { 1612 #if BITS_PER_LONG < 64 1613 return rlim64 >= ULONG_MAX; 1614 #else 1615 return rlim64 == RLIM64_INFINITY; 1616 #endif 1617 } 1618 1619 static void rlim_to_rlim64(const struct rlimit *rlim, struct rlimit64 *rlim64) 1620 { 1621 if (rlim->rlim_cur == RLIM_INFINITY) 1622 rlim64->rlim_cur = RLIM64_INFINITY; 1623 else 1624 rlim64->rlim_cur = rlim->rlim_cur; 1625 if (rlim->rlim_max == RLIM_INFINITY) 1626 rlim64->rlim_max = RLIM64_INFINITY; 1627 else 1628 rlim64->rlim_max = rlim->rlim_max; 1629 } 1630 1631 static void rlim64_to_rlim(const struct rlimit64 *rlim64, struct rlimit *rlim) 1632 { 1633 if (rlim64_is_infinity(rlim64->rlim_cur)) 1634 rlim->rlim_cur = RLIM_INFINITY; 1635 else 1636 rlim->rlim_cur = (unsigned long)rlim64->rlim_cur; 1637 if (rlim64_is_infinity(rlim64->rlim_max)) 1638 rlim->rlim_max = RLIM_INFINITY; 1639 else 1640 rlim->rlim_max = (unsigned long)rlim64->rlim_max; 1641 } 1642 1643 /* rcu lock must be held */ 1644 static int check_prlimit_permission(struct task_struct *task, 1645 unsigned int flags) 1646 { 1647 const struct cred *cred = current_cred(), *tcred; 1648 bool id_match; 1649 1650 if (current == task) 1651 return 0; 1652 1653 tcred = __task_cred(task); 1654 id_match = (uid_eq(cred->uid, tcred->euid) && 1655 uid_eq(cred->uid, tcred->suid) && 1656 uid_eq(cred->uid, tcred->uid) && 1657 gid_eq(cred->gid, tcred->egid) && 1658 gid_eq(cred->gid, tcred->sgid) && 1659 gid_eq(cred->gid, tcred->gid)); 1660 if (!id_match && !ns_capable(tcred->user_ns, CAP_SYS_RESOURCE)) 1661 return -EPERM; 1662 1663 return security_task_prlimit(cred, tcred, flags); 1664 } 1665 1666 SYSCALL_DEFINE4(prlimit64, pid_t, pid, unsigned int, resource, 1667 const struct rlimit64 __user *, new_rlim, 1668 struct rlimit64 __user *, old_rlim) 1669 { 1670 struct rlimit64 old64, new64; 1671 struct rlimit old, new; 1672 struct task_struct *tsk; 1673 unsigned int checkflags = 0; 1674 int ret; 1675 1676 if (old_rlim) 1677 checkflags |= LSM_PRLIMIT_READ; 1678 1679 if (new_rlim) { 1680 if (copy_from_user(&new64, new_rlim, sizeof(new64))) 1681 return -EFAULT; 1682 rlim64_to_rlim(&new64, &new); 1683 checkflags |= LSM_PRLIMIT_WRITE; 1684 } 1685 1686 rcu_read_lock(); 1687 tsk = pid ? find_task_by_vpid(pid) : current; 1688 if (!tsk) { 1689 rcu_read_unlock(); 1690 return -ESRCH; 1691 } 1692 ret = check_prlimit_permission(tsk, checkflags); 1693 if (ret) { 1694 rcu_read_unlock(); 1695 return ret; 1696 } 1697 get_task_struct(tsk); 1698 rcu_read_unlock(); 1699 1700 ret = do_prlimit(tsk, resource, new_rlim ? &new : NULL, 1701 old_rlim ? &old : NULL); 1702 1703 if (!ret && old_rlim) { 1704 rlim_to_rlim64(&old, &old64); 1705 if (copy_to_user(old_rlim, &old64, sizeof(old64))) 1706 ret = -EFAULT; 1707 } 1708 1709 put_task_struct(tsk); 1710 return ret; 1711 } 1712 1713 SYSCALL_DEFINE2(setrlimit, unsigned int, resource, struct rlimit __user *, rlim) 1714 { 1715 struct rlimit new_rlim; 1716 1717 if (copy_from_user(&new_rlim, rlim, sizeof(*rlim))) 1718 return -EFAULT; 1719 return do_prlimit(current, resource, &new_rlim, NULL); 1720 } 1721 1722 /* 1723 * It would make sense to put struct rusage in the task_struct, 1724 * except that would make the task_struct be *really big*. After 1725 * task_struct gets moved into malloc'ed memory, it would 1726 * make sense to do this. It will make moving the rest of the information 1727 * a lot simpler! (Which we're not doing right now because we're not 1728 * measuring them yet). 1729 * 1730 * When sampling multiple threads for RUSAGE_SELF, under SMP we might have 1731 * races with threads incrementing their own counters. But since word 1732 * reads are atomic, we either get new values or old values and we don't 1733 * care which for the sums. We always take the siglock to protect reading 1734 * the c* fields from p->signal from races with exit.c updating those 1735 * fields when reaping, so a sample either gets all the additions of a 1736 * given child after it's reaped, or none so this sample is before reaping. 1737 * 1738 * Locking: 1739 * We need to take the siglock for CHILDEREN, SELF and BOTH 1740 * for the cases current multithreaded, non-current single threaded 1741 * non-current multithreaded. Thread traversal is now safe with 1742 * the siglock held. 1743 * Strictly speaking, we donot need to take the siglock if we are current and 1744 * single threaded, as no one else can take our signal_struct away, no one 1745 * else can reap the children to update signal->c* counters, and no one else 1746 * can race with the signal-> fields. If we do not take any lock, the 1747 * signal-> fields could be read out of order while another thread was just 1748 * exiting. So we should place a read memory barrier when we avoid the lock. 1749 * On the writer side, write memory barrier is implied in __exit_signal 1750 * as __exit_signal releases the siglock spinlock after updating the signal-> 1751 * fields. But we don't do this yet to keep things simple. 1752 * 1753 */ 1754 1755 static void accumulate_thread_rusage(struct task_struct *t, struct rusage *r) 1756 { 1757 r->ru_nvcsw += t->nvcsw; 1758 r->ru_nivcsw += t->nivcsw; 1759 r->ru_minflt += t->min_flt; 1760 r->ru_majflt += t->maj_flt; 1761 r->ru_inblock += task_io_get_inblock(t); 1762 r->ru_oublock += task_io_get_oublock(t); 1763 } 1764 1765 void getrusage(struct task_struct *p, int who, struct rusage *r) 1766 { 1767 struct task_struct *t; 1768 unsigned long flags; 1769 u64 tgutime, tgstime, utime, stime; 1770 unsigned long maxrss = 0; 1771 1772 memset((char *)r, 0, sizeof (*r)); 1773 utime = stime = 0; 1774 1775 if (who == RUSAGE_THREAD) { 1776 task_cputime_adjusted(current, &utime, &stime); 1777 accumulate_thread_rusage(p, r); 1778 maxrss = p->signal->maxrss; 1779 goto out; 1780 } 1781 1782 if (!lock_task_sighand(p, &flags)) 1783 return; 1784 1785 switch (who) { 1786 case RUSAGE_BOTH: 1787 case RUSAGE_CHILDREN: 1788 utime = p->signal->cutime; 1789 stime = p->signal->cstime; 1790 r->ru_nvcsw = p->signal->cnvcsw; 1791 r->ru_nivcsw = p->signal->cnivcsw; 1792 r->ru_minflt = p->signal->cmin_flt; 1793 r->ru_majflt = p->signal->cmaj_flt; 1794 r->ru_inblock = p->signal->cinblock; 1795 r->ru_oublock = p->signal->coublock; 1796 maxrss = p->signal->cmaxrss; 1797 1798 if (who == RUSAGE_CHILDREN) 1799 break; 1800 fallthrough; 1801 1802 case RUSAGE_SELF: 1803 thread_group_cputime_adjusted(p, &tgutime, &tgstime); 1804 utime += tgutime; 1805 stime += tgstime; 1806 r->ru_nvcsw += p->signal->nvcsw; 1807 r->ru_nivcsw += p->signal->nivcsw; 1808 r->ru_minflt += p->signal->min_flt; 1809 r->ru_majflt += p->signal->maj_flt; 1810 r->ru_inblock += p->signal->inblock; 1811 r->ru_oublock += p->signal->oublock; 1812 if (maxrss < p->signal->maxrss) 1813 maxrss = p->signal->maxrss; 1814 t = p; 1815 do { 1816 accumulate_thread_rusage(t, r); 1817 } while_each_thread(p, t); 1818 break; 1819 1820 default: 1821 BUG(); 1822 } 1823 unlock_task_sighand(p, &flags); 1824 1825 out: 1826 r->ru_utime = ns_to_kernel_old_timeval(utime); 1827 r->ru_stime = ns_to_kernel_old_timeval(stime); 1828 1829 if (who != RUSAGE_CHILDREN) { 1830 struct mm_struct *mm = get_task_mm(p); 1831 1832 if (mm) { 1833 setmax_mm_hiwater_rss(&maxrss, mm); 1834 mmput(mm); 1835 } 1836 } 1837 r->ru_maxrss = maxrss * (PAGE_SIZE / 1024); /* convert pages to KBs */ 1838 } 1839 1840 SYSCALL_DEFINE2(getrusage, int, who, struct rusage __user *, ru) 1841 { 1842 struct rusage r; 1843 1844 if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN && 1845 who != RUSAGE_THREAD) 1846 return -EINVAL; 1847 1848 getrusage(current, who, &r); 1849 return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0; 1850 } 1851 1852 #ifdef CONFIG_COMPAT 1853 COMPAT_SYSCALL_DEFINE2(getrusage, int, who, struct compat_rusage __user *, ru) 1854 { 1855 struct rusage r; 1856 1857 if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN && 1858 who != RUSAGE_THREAD) 1859 return -EINVAL; 1860 1861 getrusage(current, who, &r); 1862 return put_compat_rusage(&r, ru); 1863 } 1864 #endif 1865 1866 SYSCALL_DEFINE1(umask, int, mask) 1867 { 1868 mask = xchg(¤t->fs->umask, mask & S_IRWXUGO); 1869 return mask; 1870 } 1871 1872 static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd) 1873 { 1874 struct fd exe; 1875 struct inode *inode; 1876 int err; 1877 1878 exe = fdget(fd); 1879 if (!exe.file) 1880 return -EBADF; 1881 1882 inode = file_inode(exe.file); 1883 1884 /* 1885 * Because the original mm->exe_file points to executable file, make 1886 * sure that this one is executable as well, to avoid breaking an 1887 * overall picture. 1888 */ 1889 err = -EACCES; 1890 if (!S_ISREG(inode->i_mode) || path_noexec(&exe.file->f_path)) 1891 goto exit; 1892 1893 err = file_permission(exe.file, MAY_EXEC); 1894 if (err) 1895 goto exit; 1896 1897 err = replace_mm_exe_file(mm, exe.file); 1898 exit: 1899 fdput(exe); 1900 return err; 1901 } 1902 1903 /* 1904 * Check arithmetic relations of passed addresses. 1905 * 1906 * WARNING: we don't require any capability here so be very careful 1907 * in what is allowed for modification from userspace. 1908 */ 1909 static int validate_prctl_map_addr(struct prctl_mm_map *prctl_map) 1910 { 1911 unsigned long mmap_max_addr = TASK_SIZE; 1912 int error = -EINVAL, i; 1913 1914 static const unsigned char offsets[] = { 1915 offsetof(struct prctl_mm_map, start_code), 1916 offsetof(struct prctl_mm_map, end_code), 1917 offsetof(struct prctl_mm_map, start_data), 1918 offsetof(struct prctl_mm_map, end_data), 1919 offsetof(struct prctl_mm_map, start_brk), 1920 offsetof(struct prctl_mm_map, brk), 1921 offsetof(struct prctl_mm_map, start_stack), 1922 offsetof(struct prctl_mm_map, arg_start), 1923 offsetof(struct prctl_mm_map, arg_end), 1924 offsetof(struct prctl_mm_map, env_start), 1925 offsetof(struct prctl_mm_map, env_end), 1926 }; 1927 1928 /* 1929 * Make sure the members are not somewhere outside 1930 * of allowed address space. 1931 */ 1932 for (i = 0; i < ARRAY_SIZE(offsets); i++) { 1933 u64 val = *(u64 *)((char *)prctl_map + offsets[i]); 1934 1935 if ((unsigned long)val >= mmap_max_addr || 1936 (unsigned long)val < mmap_min_addr) 1937 goto out; 1938 } 1939 1940 /* 1941 * Make sure the pairs are ordered. 1942 */ 1943 #define __prctl_check_order(__m1, __op, __m2) \ 1944 ((unsigned long)prctl_map->__m1 __op \ 1945 (unsigned long)prctl_map->__m2) ? 0 : -EINVAL 1946 error = __prctl_check_order(start_code, <, end_code); 1947 error |= __prctl_check_order(start_data,<=, end_data); 1948 error |= __prctl_check_order(start_brk, <=, brk); 1949 error |= __prctl_check_order(arg_start, <=, arg_end); 1950 error |= __prctl_check_order(env_start, <=, env_end); 1951 if (error) 1952 goto out; 1953 #undef __prctl_check_order 1954 1955 error = -EINVAL; 1956 1957 /* 1958 * Neither we should allow to override limits if they set. 1959 */ 1960 if (check_data_rlimit(rlimit(RLIMIT_DATA), prctl_map->brk, 1961 prctl_map->start_brk, prctl_map->end_data, 1962 prctl_map->start_data)) 1963 goto out; 1964 1965 error = 0; 1966 out: 1967 return error; 1968 } 1969 1970 #ifdef CONFIG_CHECKPOINT_RESTORE 1971 static int prctl_set_mm_map(int opt, const void __user *addr, unsigned long data_size) 1972 { 1973 struct prctl_mm_map prctl_map = { .exe_fd = (u32)-1, }; 1974 unsigned long user_auxv[AT_VECTOR_SIZE]; 1975 struct mm_struct *mm = current->mm; 1976 int error; 1977 1978 BUILD_BUG_ON(sizeof(user_auxv) != sizeof(mm->saved_auxv)); 1979 BUILD_BUG_ON(sizeof(struct prctl_mm_map) > 256); 1980 1981 if (opt == PR_SET_MM_MAP_SIZE) 1982 return put_user((unsigned int)sizeof(prctl_map), 1983 (unsigned int __user *)addr); 1984 1985 if (data_size != sizeof(prctl_map)) 1986 return -EINVAL; 1987 1988 if (copy_from_user(&prctl_map, addr, sizeof(prctl_map))) 1989 return -EFAULT; 1990 1991 error = validate_prctl_map_addr(&prctl_map); 1992 if (error) 1993 return error; 1994 1995 if (prctl_map.auxv_size) { 1996 /* 1997 * Someone is trying to cheat the auxv vector. 1998 */ 1999 if (!prctl_map.auxv || 2000 prctl_map.auxv_size > sizeof(mm->saved_auxv)) 2001 return -EINVAL; 2002 2003 memset(user_auxv, 0, sizeof(user_auxv)); 2004 if (copy_from_user(user_auxv, 2005 (const void __user *)prctl_map.auxv, 2006 prctl_map.auxv_size)) 2007 return -EFAULT; 2008 2009 /* Last entry must be AT_NULL as specification requires */ 2010 user_auxv[AT_VECTOR_SIZE - 2] = AT_NULL; 2011 user_auxv[AT_VECTOR_SIZE - 1] = AT_NULL; 2012 } 2013 2014 if (prctl_map.exe_fd != (u32)-1) { 2015 /* 2016 * Check if the current user is checkpoint/restore capable. 2017 * At the time of this writing, it checks for CAP_SYS_ADMIN 2018 * or CAP_CHECKPOINT_RESTORE. 2019 * Note that a user with access to ptrace can masquerade an 2020 * arbitrary program as any executable, even setuid ones. 2021 * This may have implications in the tomoyo subsystem. 2022 */ 2023 if (!checkpoint_restore_ns_capable(current_user_ns())) 2024 return -EPERM; 2025 2026 error = prctl_set_mm_exe_file(mm, prctl_map.exe_fd); 2027 if (error) 2028 return error; 2029 } 2030 2031 /* 2032 * arg_lock protects concurrent updates but we still need mmap_lock for 2033 * read to exclude races with sys_brk. 2034 */ 2035 mmap_read_lock(mm); 2036 2037 /* 2038 * We don't validate if these members are pointing to 2039 * real present VMAs because application may have correspond 2040 * VMAs already unmapped and kernel uses these members for statistics 2041 * output in procfs mostly, except 2042 * 2043 * - @start_brk/@brk which are used in do_brk_flags but kernel lookups 2044 * for VMAs when updating these members so anything wrong written 2045 * here cause kernel to swear at userspace program but won't lead 2046 * to any problem in kernel itself 2047 */ 2048 2049 spin_lock(&mm->arg_lock); 2050 mm->start_code = prctl_map.start_code; 2051 mm->end_code = prctl_map.end_code; 2052 mm->start_data = prctl_map.start_data; 2053 mm->end_data = prctl_map.end_data; 2054 mm->start_brk = prctl_map.start_brk; 2055 mm->brk = prctl_map.brk; 2056 mm->start_stack = prctl_map.start_stack; 2057 mm->arg_start = prctl_map.arg_start; 2058 mm->arg_end = prctl_map.arg_end; 2059 mm->env_start = prctl_map.env_start; 2060 mm->env_end = prctl_map.env_end; 2061 spin_unlock(&mm->arg_lock); 2062 2063 /* 2064 * Note this update of @saved_auxv is lockless thus 2065 * if someone reads this member in procfs while we're 2066 * updating -- it may get partly updated results. It's 2067 * known and acceptable trade off: we leave it as is to 2068 * not introduce additional locks here making the kernel 2069 * more complex. 2070 */ 2071 if (prctl_map.auxv_size) 2072 memcpy(mm->saved_auxv, user_auxv, sizeof(user_auxv)); 2073 2074 mmap_read_unlock(mm); 2075 return 0; 2076 } 2077 #endif /* CONFIG_CHECKPOINT_RESTORE */ 2078 2079 static int prctl_set_auxv(struct mm_struct *mm, unsigned long addr, 2080 unsigned long len) 2081 { 2082 /* 2083 * This doesn't move the auxiliary vector itself since it's pinned to 2084 * mm_struct, but it permits filling the vector with new values. It's 2085 * up to the caller to provide sane values here, otherwise userspace 2086 * tools which use this vector might be unhappy. 2087 */ 2088 unsigned long user_auxv[AT_VECTOR_SIZE] = {}; 2089 2090 if (len > sizeof(user_auxv)) 2091 return -EINVAL; 2092 2093 if (copy_from_user(user_auxv, (const void __user *)addr, len)) 2094 return -EFAULT; 2095 2096 /* Make sure the last entry is always AT_NULL */ 2097 user_auxv[AT_VECTOR_SIZE - 2] = 0; 2098 user_auxv[AT_VECTOR_SIZE - 1] = 0; 2099 2100 BUILD_BUG_ON(sizeof(user_auxv) != sizeof(mm->saved_auxv)); 2101 2102 task_lock(current); 2103 memcpy(mm->saved_auxv, user_auxv, len); 2104 task_unlock(current); 2105 2106 return 0; 2107 } 2108 2109 static int prctl_set_mm(int opt, unsigned long addr, 2110 unsigned long arg4, unsigned long arg5) 2111 { 2112 struct mm_struct *mm = current->mm; 2113 struct prctl_mm_map prctl_map = { 2114 .auxv = NULL, 2115 .auxv_size = 0, 2116 .exe_fd = -1, 2117 }; 2118 struct vm_area_struct *vma; 2119 int error; 2120 2121 if (arg5 || (arg4 && (opt != PR_SET_MM_AUXV && 2122 opt != PR_SET_MM_MAP && 2123 opt != PR_SET_MM_MAP_SIZE))) 2124 return -EINVAL; 2125 2126 #ifdef CONFIG_CHECKPOINT_RESTORE 2127 if (opt == PR_SET_MM_MAP || opt == PR_SET_MM_MAP_SIZE) 2128 return prctl_set_mm_map(opt, (const void __user *)addr, arg4); 2129 #endif 2130 2131 if (!capable(CAP_SYS_RESOURCE)) 2132 return -EPERM; 2133 2134 if (opt == PR_SET_MM_EXE_FILE) 2135 return prctl_set_mm_exe_file(mm, (unsigned int)addr); 2136 2137 if (opt == PR_SET_MM_AUXV) 2138 return prctl_set_auxv(mm, addr, arg4); 2139 2140 if (addr >= TASK_SIZE || addr < mmap_min_addr) 2141 return -EINVAL; 2142 2143 error = -EINVAL; 2144 2145 /* 2146 * arg_lock protects concurrent updates of arg boundaries, we need 2147 * mmap_lock for a) concurrent sys_brk, b) finding VMA for addr 2148 * validation. 2149 */ 2150 mmap_read_lock(mm); 2151 vma = find_vma(mm, addr); 2152 2153 spin_lock(&mm->arg_lock); 2154 prctl_map.start_code = mm->start_code; 2155 prctl_map.end_code = mm->end_code; 2156 prctl_map.start_data = mm->start_data; 2157 prctl_map.end_data = mm->end_data; 2158 prctl_map.start_brk = mm->start_brk; 2159 prctl_map.brk = mm->brk; 2160 prctl_map.start_stack = mm->start_stack; 2161 prctl_map.arg_start = mm->arg_start; 2162 prctl_map.arg_end = mm->arg_end; 2163 prctl_map.env_start = mm->env_start; 2164 prctl_map.env_end = mm->env_end; 2165 2166 switch (opt) { 2167 case PR_SET_MM_START_CODE: 2168 prctl_map.start_code = addr; 2169 break; 2170 case PR_SET_MM_END_CODE: 2171 prctl_map.end_code = addr; 2172 break; 2173 case PR_SET_MM_START_DATA: 2174 prctl_map.start_data = addr; 2175 break; 2176 case PR_SET_MM_END_DATA: 2177 prctl_map.end_data = addr; 2178 break; 2179 case PR_SET_MM_START_STACK: 2180 prctl_map.start_stack = addr; 2181 break; 2182 case PR_SET_MM_START_BRK: 2183 prctl_map.start_brk = addr; 2184 break; 2185 case PR_SET_MM_BRK: 2186 prctl_map.brk = addr; 2187 break; 2188 case PR_SET_MM_ARG_START: 2189 prctl_map.arg_start = addr; 2190 break; 2191 case PR_SET_MM_ARG_END: 2192 prctl_map.arg_end = addr; 2193 break; 2194 case PR_SET_MM_ENV_START: 2195 prctl_map.env_start = addr; 2196 break; 2197 case PR_SET_MM_ENV_END: 2198 prctl_map.env_end = addr; 2199 break; 2200 default: 2201 goto out; 2202 } 2203 2204 error = validate_prctl_map_addr(&prctl_map); 2205 if (error) 2206 goto out; 2207 2208 switch (opt) { 2209 /* 2210 * If command line arguments and environment 2211 * are placed somewhere else on stack, we can 2212 * set them up here, ARG_START/END to setup 2213 * command line arguments and ENV_START/END 2214 * for environment. 2215 */ 2216 case PR_SET_MM_START_STACK: 2217 case PR_SET_MM_ARG_START: 2218 case PR_SET_MM_ARG_END: 2219 case PR_SET_MM_ENV_START: 2220 case PR_SET_MM_ENV_END: 2221 if (!vma) { 2222 error = -EFAULT; 2223 goto out; 2224 } 2225 } 2226 2227 mm->start_code = prctl_map.start_code; 2228 mm->end_code = prctl_map.end_code; 2229 mm->start_data = prctl_map.start_data; 2230 mm->end_data = prctl_map.end_data; 2231 mm->start_brk = prctl_map.start_brk; 2232 mm->brk = prctl_map.brk; 2233 mm->start_stack = prctl_map.start_stack; 2234 mm->arg_start = prctl_map.arg_start; 2235 mm->arg_end = prctl_map.arg_end; 2236 mm->env_start = prctl_map.env_start; 2237 mm->env_end = prctl_map.env_end; 2238 2239 error = 0; 2240 out: 2241 spin_unlock(&mm->arg_lock); 2242 mmap_read_unlock(mm); 2243 return error; 2244 } 2245 2246 #ifdef CONFIG_CHECKPOINT_RESTORE 2247 static int prctl_get_tid_address(struct task_struct *me, int __user * __user *tid_addr) 2248 { 2249 return put_user(me->clear_child_tid, tid_addr); 2250 } 2251 #else 2252 static int prctl_get_tid_address(struct task_struct *me, int __user * __user *tid_addr) 2253 { 2254 return -EINVAL; 2255 } 2256 #endif 2257 2258 static int propagate_has_child_subreaper(struct task_struct *p, void *data) 2259 { 2260 /* 2261 * If task has has_child_subreaper - all its descendants 2262 * already have these flag too and new descendants will 2263 * inherit it on fork, skip them. 2264 * 2265 * If we've found child_reaper - skip descendants in 2266 * it's subtree as they will never get out pidns. 2267 */ 2268 if (p->signal->has_child_subreaper || 2269 is_child_reaper(task_pid(p))) 2270 return 0; 2271 2272 p->signal->has_child_subreaper = 1; 2273 return 1; 2274 } 2275 2276 int __weak arch_prctl_spec_ctrl_get(struct task_struct *t, unsigned long which) 2277 { 2278 return -EINVAL; 2279 } 2280 2281 int __weak arch_prctl_spec_ctrl_set(struct task_struct *t, unsigned long which, 2282 unsigned long ctrl) 2283 { 2284 return -EINVAL; 2285 } 2286 2287 #define PR_IO_FLUSHER (PF_MEMALLOC_NOIO | PF_LOCAL_THROTTLE) 2288 2289 #ifdef CONFIG_ANON_VMA_NAME 2290 2291 #define ANON_VMA_NAME_MAX_LEN 80 2292 #define ANON_VMA_NAME_INVALID_CHARS "\\`$[]" 2293 2294 static inline bool is_valid_name_char(char ch) 2295 { 2296 /* printable ascii characters, excluding ANON_VMA_NAME_INVALID_CHARS */ 2297 return ch > 0x1f && ch < 0x7f && 2298 !strchr(ANON_VMA_NAME_INVALID_CHARS, ch); 2299 } 2300 2301 static int prctl_set_vma(unsigned long opt, unsigned long addr, 2302 unsigned long size, unsigned long arg) 2303 { 2304 struct mm_struct *mm = current->mm; 2305 const char __user *uname; 2306 struct anon_vma_name *anon_name = NULL; 2307 int error; 2308 2309 switch (opt) { 2310 case PR_SET_VMA_ANON_NAME: 2311 uname = (const char __user *)arg; 2312 if (uname) { 2313 char *name, *pch; 2314 2315 name = strndup_user(uname, ANON_VMA_NAME_MAX_LEN); 2316 if (IS_ERR(name)) 2317 return PTR_ERR(name); 2318 2319 for (pch = name; *pch != '\0'; pch++) { 2320 if (!is_valid_name_char(*pch)) { 2321 kfree(name); 2322 return -EINVAL; 2323 } 2324 } 2325 /* anon_vma has its own copy */ 2326 anon_name = anon_vma_name_alloc(name); 2327 kfree(name); 2328 if (!anon_name) 2329 return -ENOMEM; 2330 2331 } 2332 2333 mmap_write_lock(mm); 2334 error = madvise_set_anon_name(mm, addr, size, anon_name); 2335 mmap_write_unlock(mm); 2336 anon_vma_name_put(anon_name); 2337 break; 2338 default: 2339 error = -EINVAL; 2340 } 2341 2342 return error; 2343 } 2344 2345 #else /* CONFIG_ANON_VMA_NAME */ 2346 static int prctl_set_vma(unsigned long opt, unsigned long start, 2347 unsigned long size, unsigned long arg) 2348 { 2349 return -EINVAL; 2350 } 2351 #endif /* CONFIG_ANON_VMA_NAME */ 2352 2353 static inline int prctl_set_mdwe(unsigned long bits, unsigned long arg3, 2354 unsigned long arg4, unsigned long arg5) 2355 { 2356 if (arg3 || arg4 || arg5) 2357 return -EINVAL; 2358 2359 if (bits & ~(PR_MDWE_REFUSE_EXEC_GAIN)) 2360 return -EINVAL; 2361 2362 if (bits & PR_MDWE_REFUSE_EXEC_GAIN) 2363 set_bit(MMF_HAS_MDWE, ¤t->mm->flags); 2364 else if (test_bit(MMF_HAS_MDWE, ¤t->mm->flags)) 2365 return -EPERM; /* Cannot unset the flag */ 2366 2367 return 0; 2368 } 2369 2370 static inline int prctl_get_mdwe(unsigned long arg2, unsigned long arg3, 2371 unsigned long arg4, unsigned long arg5) 2372 { 2373 if (arg2 || arg3 || arg4 || arg5) 2374 return -EINVAL; 2375 2376 return test_bit(MMF_HAS_MDWE, ¤t->mm->flags) ? 2377 PR_MDWE_REFUSE_EXEC_GAIN : 0; 2378 } 2379 2380 SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, 2381 unsigned long, arg4, unsigned long, arg5) 2382 { 2383 struct task_struct *me = current; 2384 unsigned char comm[sizeof(me->comm)]; 2385 long error; 2386 2387 error = security_task_prctl(option, arg2, arg3, arg4, arg5); 2388 if (error != -ENOSYS) 2389 return error; 2390 2391 error = 0; 2392 switch (option) { 2393 case PR_SET_PDEATHSIG: 2394 if (!valid_signal(arg2)) { 2395 error = -EINVAL; 2396 break; 2397 } 2398 me->pdeath_signal = arg2; 2399 break; 2400 case PR_GET_PDEATHSIG: 2401 error = put_user(me->pdeath_signal, (int __user *)arg2); 2402 break; 2403 case PR_GET_DUMPABLE: 2404 error = get_dumpable(me->mm); 2405 break; 2406 case PR_SET_DUMPABLE: 2407 if (arg2 != SUID_DUMP_DISABLE && arg2 != SUID_DUMP_USER) { 2408 error = -EINVAL; 2409 break; 2410 } 2411 set_dumpable(me->mm, arg2); 2412 break; 2413 2414 case PR_SET_UNALIGN: 2415 error = SET_UNALIGN_CTL(me, arg2); 2416 break; 2417 case PR_GET_UNALIGN: 2418 error = GET_UNALIGN_CTL(me, arg2); 2419 break; 2420 case PR_SET_FPEMU: 2421 error = SET_FPEMU_CTL(me, arg2); 2422 break; 2423 case PR_GET_FPEMU: 2424 error = GET_FPEMU_CTL(me, arg2); 2425 break; 2426 case PR_SET_FPEXC: 2427 error = SET_FPEXC_CTL(me, arg2); 2428 break; 2429 case PR_GET_FPEXC: 2430 error = GET_FPEXC_CTL(me, arg2); 2431 break; 2432 case PR_GET_TIMING: 2433 error = PR_TIMING_STATISTICAL; 2434 break; 2435 case PR_SET_TIMING: 2436 if (arg2 != PR_TIMING_STATISTICAL) 2437 error = -EINVAL; 2438 break; 2439 case PR_SET_NAME: 2440 comm[sizeof(me->comm) - 1] = 0; 2441 if (strncpy_from_user(comm, (char __user *)arg2, 2442 sizeof(me->comm) - 1) < 0) 2443 return -EFAULT; 2444 set_task_comm(me, comm); 2445 proc_comm_connector(me); 2446 break; 2447 case PR_GET_NAME: 2448 get_task_comm(comm, me); 2449 if (copy_to_user((char __user *)arg2, comm, sizeof(comm))) 2450 return -EFAULT; 2451 break; 2452 case PR_GET_ENDIAN: 2453 error = GET_ENDIAN(me, arg2); 2454 break; 2455 case PR_SET_ENDIAN: 2456 error = SET_ENDIAN(me, arg2); 2457 break; 2458 case PR_GET_SECCOMP: 2459 error = prctl_get_seccomp(); 2460 break; 2461 case PR_SET_SECCOMP: 2462 error = prctl_set_seccomp(arg2, (char __user *)arg3); 2463 break; 2464 case PR_GET_TSC: 2465 error = GET_TSC_CTL(arg2); 2466 break; 2467 case PR_SET_TSC: 2468 error = SET_TSC_CTL(arg2); 2469 break; 2470 case PR_TASK_PERF_EVENTS_DISABLE: 2471 error = perf_event_task_disable(); 2472 break; 2473 case PR_TASK_PERF_EVENTS_ENABLE: 2474 error = perf_event_task_enable(); 2475 break; 2476 case PR_GET_TIMERSLACK: 2477 if (current->timer_slack_ns > ULONG_MAX) 2478 error = ULONG_MAX; 2479 else 2480 error = current->timer_slack_ns; 2481 break; 2482 case PR_SET_TIMERSLACK: 2483 if (arg2 <= 0) 2484 current->timer_slack_ns = 2485 current->default_timer_slack_ns; 2486 else 2487 current->timer_slack_ns = arg2; 2488 break; 2489 case PR_MCE_KILL: 2490 if (arg4 | arg5) 2491 return -EINVAL; 2492 switch (arg2) { 2493 case PR_MCE_KILL_CLEAR: 2494 if (arg3 != 0) 2495 return -EINVAL; 2496 current->flags &= ~PF_MCE_PROCESS; 2497 break; 2498 case PR_MCE_KILL_SET: 2499 current->flags |= PF_MCE_PROCESS; 2500 if (arg3 == PR_MCE_KILL_EARLY) 2501 current->flags |= PF_MCE_EARLY; 2502 else if (arg3 == PR_MCE_KILL_LATE) 2503 current->flags &= ~PF_MCE_EARLY; 2504 else if (arg3 == PR_MCE_KILL_DEFAULT) 2505 current->flags &= 2506 ~(PF_MCE_EARLY|PF_MCE_PROCESS); 2507 else 2508 return -EINVAL; 2509 break; 2510 default: 2511 return -EINVAL; 2512 } 2513 break; 2514 case PR_MCE_KILL_GET: 2515 if (arg2 | arg3 | arg4 | arg5) 2516 return -EINVAL; 2517 if (current->flags & PF_MCE_PROCESS) 2518 error = (current->flags & PF_MCE_EARLY) ? 2519 PR_MCE_KILL_EARLY : PR_MCE_KILL_LATE; 2520 else 2521 error = PR_MCE_KILL_DEFAULT; 2522 break; 2523 case PR_SET_MM: 2524 error = prctl_set_mm(arg2, arg3, arg4, arg5); 2525 break; 2526 case PR_GET_TID_ADDRESS: 2527 error = prctl_get_tid_address(me, (int __user * __user *)arg2); 2528 break; 2529 case PR_SET_CHILD_SUBREAPER: 2530 me->signal->is_child_subreaper = !!arg2; 2531 if (!arg2) 2532 break; 2533 2534 walk_process_tree(me, propagate_has_child_subreaper, NULL); 2535 break; 2536 case PR_GET_CHILD_SUBREAPER: 2537 error = put_user(me->signal->is_child_subreaper, 2538 (int __user *)arg2); 2539 break; 2540 case PR_SET_NO_NEW_PRIVS: 2541 if (arg2 != 1 || arg3 || arg4 || arg5) 2542 return -EINVAL; 2543 2544 task_set_no_new_privs(current); 2545 break; 2546 case PR_GET_NO_NEW_PRIVS: 2547 if (arg2 || arg3 || arg4 || arg5) 2548 return -EINVAL; 2549 return task_no_new_privs(current) ? 1 : 0; 2550 case PR_GET_THP_DISABLE: 2551 if (arg2 || arg3 || arg4 || arg5) 2552 return -EINVAL; 2553 error = !!test_bit(MMF_DISABLE_THP, &me->mm->flags); 2554 break; 2555 case PR_SET_THP_DISABLE: 2556 if (arg3 || arg4 || arg5) 2557 return -EINVAL; 2558 if (mmap_write_lock_killable(me->mm)) 2559 return -EINTR; 2560 if (arg2) 2561 set_bit(MMF_DISABLE_THP, &me->mm->flags); 2562 else 2563 clear_bit(MMF_DISABLE_THP, &me->mm->flags); 2564 mmap_write_unlock(me->mm); 2565 break; 2566 case PR_MPX_ENABLE_MANAGEMENT: 2567 case PR_MPX_DISABLE_MANAGEMENT: 2568 /* No longer implemented: */ 2569 return -EINVAL; 2570 case PR_SET_FP_MODE: 2571 error = SET_FP_MODE(me, arg2); 2572 break; 2573 case PR_GET_FP_MODE: 2574 error = GET_FP_MODE(me); 2575 break; 2576 case PR_SVE_SET_VL: 2577 error = SVE_SET_VL(arg2); 2578 break; 2579 case PR_SVE_GET_VL: 2580 error = SVE_GET_VL(); 2581 break; 2582 case PR_SME_SET_VL: 2583 error = SME_SET_VL(arg2); 2584 break; 2585 case PR_SME_GET_VL: 2586 error = SME_GET_VL(); 2587 break; 2588 case PR_GET_SPECULATION_CTRL: 2589 if (arg3 || arg4 || arg5) 2590 return -EINVAL; 2591 error = arch_prctl_spec_ctrl_get(me, arg2); 2592 break; 2593 case PR_SET_SPECULATION_CTRL: 2594 if (arg4 || arg5) 2595 return -EINVAL; 2596 error = arch_prctl_spec_ctrl_set(me, arg2, arg3); 2597 break; 2598 case PR_PAC_RESET_KEYS: 2599 if (arg3 || arg4 || arg5) 2600 return -EINVAL; 2601 error = PAC_RESET_KEYS(me, arg2); 2602 break; 2603 case PR_PAC_SET_ENABLED_KEYS: 2604 if (arg4 || arg5) 2605 return -EINVAL; 2606 error = PAC_SET_ENABLED_KEYS(me, arg2, arg3); 2607 break; 2608 case PR_PAC_GET_ENABLED_KEYS: 2609 if (arg2 || arg3 || arg4 || arg5) 2610 return -EINVAL; 2611 error = PAC_GET_ENABLED_KEYS(me); 2612 break; 2613 case PR_SET_TAGGED_ADDR_CTRL: 2614 if (arg3 || arg4 || arg5) 2615 return -EINVAL; 2616 error = SET_TAGGED_ADDR_CTRL(arg2); 2617 break; 2618 case PR_GET_TAGGED_ADDR_CTRL: 2619 if (arg2 || arg3 || arg4 || arg5) 2620 return -EINVAL; 2621 error = GET_TAGGED_ADDR_CTRL(); 2622 break; 2623 case PR_SET_IO_FLUSHER: 2624 if (!capable(CAP_SYS_RESOURCE)) 2625 return -EPERM; 2626 2627 if (arg3 || arg4 || arg5) 2628 return -EINVAL; 2629 2630 if (arg2 == 1) 2631 current->flags |= PR_IO_FLUSHER; 2632 else if (!arg2) 2633 current->flags &= ~PR_IO_FLUSHER; 2634 else 2635 return -EINVAL; 2636 break; 2637 case PR_GET_IO_FLUSHER: 2638 if (!capable(CAP_SYS_RESOURCE)) 2639 return -EPERM; 2640 2641 if (arg2 || arg3 || arg4 || arg5) 2642 return -EINVAL; 2643 2644 error = (current->flags & PR_IO_FLUSHER) == PR_IO_FLUSHER; 2645 break; 2646 case PR_SET_SYSCALL_USER_DISPATCH: 2647 error = set_syscall_user_dispatch(arg2, arg3, arg4, 2648 (char __user *) arg5); 2649 break; 2650 #ifdef CONFIG_SCHED_CORE 2651 case PR_SCHED_CORE: 2652 error = sched_core_share_pid(arg2, arg3, arg4, arg5); 2653 break; 2654 #endif 2655 case PR_SET_MDWE: 2656 error = prctl_set_mdwe(arg2, arg3, arg4, arg5); 2657 break; 2658 case PR_GET_MDWE: 2659 error = prctl_get_mdwe(arg2, arg3, arg4, arg5); 2660 break; 2661 case PR_SET_VMA: 2662 error = prctl_set_vma(arg2, arg3, arg4, arg5); 2663 break; 2664 default: 2665 error = -EINVAL; 2666 break; 2667 } 2668 return error; 2669 } 2670 2671 SYSCALL_DEFINE3(getcpu, unsigned __user *, cpup, unsigned __user *, nodep, 2672 struct getcpu_cache __user *, unused) 2673 { 2674 int err = 0; 2675 int cpu = raw_smp_processor_id(); 2676 2677 if (cpup) 2678 err |= put_user(cpu, cpup); 2679 if (nodep) 2680 err |= put_user(cpu_to_node(cpu), nodep); 2681 return err ? -EFAULT : 0; 2682 } 2683 2684 /** 2685 * do_sysinfo - fill in sysinfo struct 2686 * @info: pointer to buffer to fill 2687 */ 2688 static int do_sysinfo(struct sysinfo *info) 2689 { 2690 unsigned long mem_total, sav_total; 2691 unsigned int mem_unit, bitcount; 2692 struct timespec64 tp; 2693 2694 memset(info, 0, sizeof(struct sysinfo)); 2695 2696 ktime_get_boottime_ts64(&tp); 2697 timens_add_boottime(&tp); 2698 info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0); 2699 2700 get_avenrun(info->loads, 0, SI_LOAD_SHIFT - FSHIFT); 2701 2702 info->procs = nr_threads; 2703 2704 si_meminfo(info); 2705 si_swapinfo(info); 2706 2707 /* 2708 * If the sum of all the available memory (i.e. ram + swap) 2709 * is less than can be stored in a 32 bit unsigned long then 2710 * we can be binary compatible with 2.2.x kernels. If not, 2711 * well, in that case 2.2.x was broken anyways... 2712 * 2713 * -Erik Andersen <andersee@debian.org> 2714 */ 2715 2716 mem_total = info->totalram + info->totalswap; 2717 if (mem_total < info->totalram || mem_total < info->totalswap) 2718 goto out; 2719 bitcount = 0; 2720 mem_unit = info->mem_unit; 2721 while (mem_unit > 1) { 2722 bitcount++; 2723 mem_unit >>= 1; 2724 sav_total = mem_total; 2725 mem_total <<= 1; 2726 if (mem_total < sav_total) 2727 goto out; 2728 } 2729 2730 /* 2731 * If mem_total did not overflow, multiply all memory values by 2732 * info->mem_unit and set it to 1. This leaves things compatible 2733 * with 2.2.x, and also retains compatibility with earlier 2.4.x 2734 * kernels... 2735 */ 2736 2737 info->mem_unit = 1; 2738 info->totalram <<= bitcount; 2739 info->freeram <<= bitcount; 2740 info->sharedram <<= bitcount; 2741 info->bufferram <<= bitcount; 2742 info->totalswap <<= bitcount; 2743 info->freeswap <<= bitcount; 2744 info->totalhigh <<= bitcount; 2745 info->freehigh <<= bitcount; 2746 2747 out: 2748 return 0; 2749 } 2750 2751 SYSCALL_DEFINE1(sysinfo, struct sysinfo __user *, info) 2752 { 2753 struct sysinfo val; 2754 2755 do_sysinfo(&val); 2756 2757 if (copy_to_user(info, &val, sizeof(struct sysinfo))) 2758 return -EFAULT; 2759 2760 return 0; 2761 } 2762 2763 #ifdef CONFIG_COMPAT 2764 struct compat_sysinfo { 2765 s32 uptime; 2766 u32 loads[3]; 2767 u32 totalram; 2768 u32 freeram; 2769 u32 sharedram; 2770 u32 bufferram; 2771 u32 totalswap; 2772 u32 freeswap; 2773 u16 procs; 2774 u16 pad; 2775 u32 totalhigh; 2776 u32 freehigh; 2777 u32 mem_unit; 2778 char _f[20-2*sizeof(u32)-sizeof(int)]; 2779 }; 2780 2781 COMPAT_SYSCALL_DEFINE1(sysinfo, struct compat_sysinfo __user *, info) 2782 { 2783 struct sysinfo s; 2784 struct compat_sysinfo s_32; 2785 2786 do_sysinfo(&s); 2787 2788 /* Check to see if any memory value is too large for 32-bit and scale 2789 * down if needed 2790 */ 2791 if (upper_32_bits(s.totalram) || upper_32_bits(s.totalswap)) { 2792 int bitcount = 0; 2793 2794 while (s.mem_unit < PAGE_SIZE) { 2795 s.mem_unit <<= 1; 2796 bitcount++; 2797 } 2798 2799 s.totalram >>= bitcount; 2800 s.freeram >>= bitcount; 2801 s.sharedram >>= bitcount; 2802 s.bufferram >>= bitcount; 2803 s.totalswap >>= bitcount; 2804 s.freeswap >>= bitcount; 2805 s.totalhigh >>= bitcount; 2806 s.freehigh >>= bitcount; 2807 } 2808 2809 memset(&s_32, 0, sizeof(s_32)); 2810 s_32.uptime = s.uptime; 2811 s_32.loads[0] = s.loads[0]; 2812 s_32.loads[1] = s.loads[1]; 2813 s_32.loads[2] = s.loads[2]; 2814 s_32.totalram = s.totalram; 2815 s_32.freeram = s.freeram; 2816 s_32.sharedram = s.sharedram; 2817 s_32.bufferram = s.bufferram; 2818 s_32.totalswap = s.totalswap; 2819 s_32.freeswap = s.freeswap; 2820 s_32.procs = s.procs; 2821 s_32.totalhigh = s.totalhigh; 2822 s_32.freehigh = s.freehigh; 2823 s_32.mem_unit = s.mem_unit; 2824 if (copy_to_user(info, &s_32, sizeof(s_32))) 2825 return -EFAULT; 2826 return 0; 2827 } 2828 #endif /* CONFIG_COMPAT */ 2829