1 /* 2 * linux/kernel/sys.c 3 * 4 * Copyright (C) 1991, 1992 Linus Torvalds 5 */ 6 7 #include <linux/config.h> 8 #include <linux/module.h> 9 #include <linux/mm.h> 10 #include <linux/utsname.h> 11 #include <linux/mman.h> 12 #include <linux/smp_lock.h> 13 #include <linux/notifier.h> 14 #include <linux/reboot.h> 15 #include <linux/prctl.h> 16 #include <linux/init.h> 17 #include <linux/highuid.h> 18 #include <linux/fs.h> 19 #include <linux/workqueue.h> 20 #include <linux/device.h> 21 #include <linux/key.h> 22 #include <linux/times.h> 23 #include <linux/posix-timers.h> 24 #include <linux/security.h> 25 #include <linux/dcookies.h> 26 #include <linux/suspend.h> 27 #include <linux/tty.h> 28 29 #include <linux/compat.h> 30 #include <linux/syscalls.h> 31 32 #include <asm/uaccess.h> 33 #include <asm/io.h> 34 #include <asm/unistd.h> 35 36 #ifndef SET_UNALIGN_CTL 37 # define SET_UNALIGN_CTL(a,b) (-EINVAL) 38 #endif 39 #ifndef GET_UNALIGN_CTL 40 # define GET_UNALIGN_CTL(a,b) (-EINVAL) 41 #endif 42 #ifndef SET_FPEMU_CTL 43 # define SET_FPEMU_CTL(a,b) (-EINVAL) 44 #endif 45 #ifndef GET_FPEMU_CTL 46 # define GET_FPEMU_CTL(a,b) (-EINVAL) 47 #endif 48 #ifndef SET_FPEXC_CTL 49 # define SET_FPEXC_CTL(a,b) (-EINVAL) 50 #endif 51 #ifndef GET_FPEXC_CTL 52 # define GET_FPEXC_CTL(a,b) (-EINVAL) 53 #endif 54 55 /* 56 * this is where the system-wide overflow UID and GID are defined, for 57 * architectures that now have 32-bit UID/GID but didn't in the past 58 */ 59 60 int overflowuid = DEFAULT_OVERFLOWUID; 61 int overflowgid = DEFAULT_OVERFLOWGID; 62 63 #ifdef CONFIG_UID16 64 EXPORT_SYMBOL(overflowuid); 65 EXPORT_SYMBOL(overflowgid); 66 #endif 67 68 /* 69 * the same as above, but for filesystems which can only store a 16-bit 70 * UID and GID. as such, this is needed on all architectures 71 */ 72 73 int fs_overflowuid = DEFAULT_FS_OVERFLOWUID; 74 int fs_overflowgid = DEFAULT_FS_OVERFLOWUID; 75 76 EXPORT_SYMBOL(fs_overflowuid); 77 EXPORT_SYMBOL(fs_overflowgid); 78 79 /* 80 * this indicates whether you can reboot with ctrl-alt-del: the default is yes 81 */ 82 83 int C_A_D = 1; 84 int cad_pid = 1; 85 86 /* 87 * Notifier list for kernel code which wants to be called 88 * at shutdown. This is used to stop any idling DMA operations 89 * and the like. 90 */ 91 92 static struct notifier_block *reboot_notifier_list; 93 static DEFINE_RWLOCK(notifier_lock); 94 95 /** 96 * notifier_chain_register - Add notifier to a notifier chain 97 * @list: Pointer to root list pointer 98 * @n: New entry in notifier chain 99 * 100 * Adds a notifier to a notifier chain. 101 * 102 * Currently always returns zero. 103 */ 104 105 int notifier_chain_register(struct notifier_block **list, struct notifier_block *n) 106 { 107 write_lock(¬ifier_lock); 108 while(*list) 109 { 110 if(n->priority > (*list)->priority) 111 break; 112 list= &((*list)->next); 113 } 114 n->next = *list; 115 *list=n; 116 write_unlock(¬ifier_lock); 117 return 0; 118 } 119 120 EXPORT_SYMBOL(notifier_chain_register); 121 122 /** 123 * notifier_chain_unregister - Remove notifier from a notifier chain 124 * @nl: Pointer to root list pointer 125 * @n: New entry in notifier chain 126 * 127 * Removes a notifier from a notifier chain. 128 * 129 * Returns zero on success, or %-ENOENT on failure. 130 */ 131 132 int notifier_chain_unregister(struct notifier_block **nl, struct notifier_block *n) 133 { 134 write_lock(¬ifier_lock); 135 while((*nl)!=NULL) 136 { 137 if((*nl)==n) 138 { 139 *nl=n->next; 140 write_unlock(¬ifier_lock); 141 return 0; 142 } 143 nl=&((*nl)->next); 144 } 145 write_unlock(¬ifier_lock); 146 return -ENOENT; 147 } 148 149 EXPORT_SYMBOL(notifier_chain_unregister); 150 151 /** 152 * notifier_call_chain - Call functions in a notifier chain 153 * @n: Pointer to root pointer of notifier chain 154 * @val: Value passed unmodified to notifier function 155 * @v: Pointer passed unmodified to notifier function 156 * 157 * Calls each function in a notifier chain in turn. 158 * 159 * If the return value of the notifier can be and'd 160 * with %NOTIFY_STOP_MASK, then notifier_call_chain 161 * will return immediately, with the return value of 162 * the notifier function which halted execution. 163 * Otherwise, the return value is the return value 164 * of the last notifier function called. 165 */ 166 167 int notifier_call_chain(struct notifier_block **n, unsigned long val, void *v) 168 { 169 int ret=NOTIFY_DONE; 170 struct notifier_block *nb = *n; 171 172 while(nb) 173 { 174 ret=nb->notifier_call(nb,val,v); 175 if(ret&NOTIFY_STOP_MASK) 176 { 177 return ret; 178 } 179 nb=nb->next; 180 } 181 return ret; 182 } 183 184 EXPORT_SYMBOL(notifier_call_chain); 185 186 /** 187 * register_reboot_notifier - Register function to be called at reboot time 188 * @nb: Info about notifier function to be called 189 * 190 * Registers a function with the list of functions 191 * to be called at reboot time. 192 * 193 * Currently always returns zero, as notifier_chain_register 194 * always returns zero. 195 */ 196 197 int register_reboot_notifier(struct notifier_block * nb) 198 { 199 return notifier_chain_register(&reboot_notifier_list, nb); 200 } 201 202 EXPORT_SYMBOL(register_reboot_notifier); 203 204 /** 205 * unregister_reboot_notifier - Unregister previously registered reboot notifier 206 * @nb: Hook to be unregistered 207 * 208 * Unregisters a previously registered reboot 209 * notifier function. 210 * 211 * Returns zero on success, or %-ENOENT on failure. 212 */ 213 214 int unregister_reboot_notifier(struct notifier_block * nb) 215 { 216 return notifier_chain_unregister(&reboot_notifier_list, nb); 217 } 218 219 EXPORT_SYMBOL(unregister_reboot_notifier); 220 221 static int set_one_prio(struct task_struct *p, int niceval, int error) 222 { 223 int no_nice; 224 225 if (p->uid != current->euid && 226 p->euid != current->euid && !capable(CAP_SYS_NICE)) { 227 error = -EPERM; 228 goto out; 229 } 230 if (niceval < task_nice(p) && !capable(CAP_SYS_NICE)) { 231 error = -EACCES; 232 goto out; 233 } 234 no_nice = security_task_setnice(p, niceval); 235 if (no_nice) { 236 error = no_nice; 237 goto out; 238 } 239 if (error == -ESRCH) 240 error = 0; 241 set_user_nice(p, niceval); 242 out: 243 return error; 244 } 245 246 asmlinkage long sys_setpriority(int which, int who, int niceval) 247 { 248 struct task_struct *g, *p; 249 struct user_struct *user; 250 int error = -EINVAL; 251 252 if (which > 2 || which < 0) 253 goto out; 254 255 /* normalize: avoid signed division (rounding problems) */ 256 error = -ESRCH; 257 if (niceval < -20) 258 niceval = -20; 259 if (niceval > 19) 260 niceval = 19; 261 262 read_lock(&tasklist_lock); 263 switch (which) { 264 case PRIO_PROCESS: 265 if (!who) 266 who = current->pid; 267 p = find_task_by_pid(who); 268 if (p) 269 error = set_one_prio(p, niceval, error); 270 break; 271 case PRIO_PGRP: 272 if (!who) 273 who = process_group(current); 274 do_each_task_pid(who, PIDTYPE_PGID, p) { 275 error = set_one_prio(p, niceval, error); 276 } while_each_task_pid(who, PIDTYPE_PGID, p); 277 break; 278 case PRIO_USER: 279 user = current->user; 280 if (!who) 281 who = current->uid; 282 else 283 if ((who != current->uid) && !(user = find_user(who))) 284 goto out_unlock; /* No processes for this user */ 285 286 do_each_thread(g, p) 287 if (p->uid == who) 288 error = set_one_prio(p, niceval, error); 289 while_each_thread(g, p); 290 if (who != current->uid) 291 free_uid(user); /* For find_user() */ 292 break; 293 } 294 out_unlock: 295 read_unlock(&tasklist_lock); 296 out: 297 return error; 298 } 299 300 /* 301 * Ugh. To avoid negative return values, "getpriority()" will 302 * not return the normal nice-value, but a negated value that 303 * has been offset by 20 (ie it returns 40..1 instead of -20..19) 304 * to stay compatible. 305 */ 306 asmlinkage long sys_getpriority(int which, int who) 307 { 308 struct task_struct *g, *p; 309 struct user_struct *user; 310 long niceval, retval = -ESRCH; 311 312 if (which > 2 || which < 0) 313 return -EINVAL; 314 315 read_lock(&tasklist_lock); 316 switch (which) { 317 case PRIO_PROCESS: 318 if (!who) 319 who = current->pid; 320 p = find_task_by_pid(who); 321 if (p) { 322 niceval = 20 - task_nice(p); 323 if (niceval > retval) 324 retval = niceval; 325 } 326 break; 327 case PRIO_PGRP: 328 if (!who) 329 who = process_group(current); 330 do_each_task_pid(who, PIDTYPE_PGID, p) { 331 niceval = 20 - task_nice(p); 332 if (niceval > retval) 333 retval = niceval; 334 } while_each_task_pid(who, PIDTYPE_PGID, p); 335 break; 336 case PRIO_USER: 337 user = current->user; 338 if (!who) 339 who = current->uid; 340 else 341 if ((who != current->uid) && !(user = find_user(who))) 342 goto out_unlock; /* No processes for this user */ 343 344 do_each_thread(g, p) 345 if (p->uid == who) { 346 niceval = 20 - task_nice(p); 347 if (niceval > retval) 348 retval = niceval; 349 } 350 while_each_thread(g, p); 351 if (who != current->uid) 352 free_uid(user); /* for find_user() */ 353 break; 354 } 355 out_unlock: 356 read_unlock(&tasklist_lock); 357 358 return retval; 359 } 360 361 362 /* 363 * Reboot system call: for obvious reasons only root may call it, 364 * and even root needs to set up some magic numbers in the registers 365 * so that some mistake won't make this reboot the whole machine. 366 * You can also set the meaning of the ctrl-alt-del-key here. 367 * 368 * reboot doesn't sync: do that yourself before calling this. 369 */ 370 asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void __user * arg) 371 { 372 char buffer[256]; 373 374 /* We only trust the superuser with rebooting the system. */ 375 if (!capable(CAP_SYS_BOOT)) 376 return -EPERM; 377 378 /* For safety, we require "magic" arguments. */ 379 if (magic1 != LINUX_REBOOT_MAGIC1 || 380 (magic2 != LINUX_REBOOT_MAGIC2 && 381 magic2 != LINUX_REBOOT_MAGIC2A && 382 magic2 != LINUX_REBOOT_MAGIC2B && 383 magic2 != LINUX_REBOOT_MAGIC2C)) 384 return -EINVAL; 385 386 lock_kernel(); 387 switch (cmd) { 388 case LINUX_REBOOT_CMD_RESTART: 389 notifier_call_chain(&reboot_notifier_list, SYS_RESTART, NULL); 390 system_state = SYSTEM_RESTART; 391 device_shutdown(); 392 printk(KERN_EMERG "Restarting system.\n"); 393 machine_restart(NULL); 394 break; 395 396 case LINUX_REBOOT_CMD_CAD_ON: 397 C_A_D = 1; 398 break; 399 400 case LINUX_REBOOT_CMD_CAD_OFF: 401 C_A_D = 0; 402 break; 403 404 case LINUX_REBOOT_CMD_HALT: 405 notifier_call_chain(&reboot_notifier_list, SYS_HALT, NULL); 406 system_state = SYSTEM_HALT; 407 device_shutdown(); 408 printk(KERN_EMERG "System halted.\n"); 409 machine_halt(); 410 unlock_kernel(); 411 do_exit(0); 412 break; 413 414 case LINUX_REBOOT_CMD_POWER_OFF: 415 notifier_call_chain(&reboot_notifier_list, SYS_POWER_OFF, NULL); 416 system_state = SYSTEM_POWER_OFF; 417 device_shutdown(); 418 printk(KERN_EMERG "Power down.\n"); 419 machine_power_off(); 420 unlock_kernel(); 421 do_exit(0); 422 break; 423 424 case LINUX_REBOOT_CMD_RESTART2: 425 if (strncpy_from_user(&buffer[0], arg, sizeof(buffer) - 1) < 0) { 426 unlock_kernel(); 427 return -EFAULT; 428 } 429 buffer[sizeof(buffer) - 1] = '\0'; 430 431 notifier_call_chain(&reboot_notifier_list, SYS_RESTART, buffer); 432 system_state = SYSTEM_RESTART; 433 device_shutdown(); 434 printk(KERN_EMERG "Restarting system with command '%s'.\n", buffer); 435 machine_restart(buffer); 436 break; 437 438 #ifdef CONFIG_SOFTWARE_SUSPEND 439 case LINUX_REBOOT_CMD_SW_SUSPEND: 440 { 441 int ret = software_suspend(); 442 unlock_kernel(); 443 return ret; 444 } 445 #endif 446 447 default: 448 unlock_kernel(); 449 return -EINVAL; 450 } 451 unlock_kernel(); 452 return 0; 453 } 454 455 static void deferred_cad(void *dummy) 456 { 457 notifier_call_chain(&reboot_notifier_list, SYS_RESTART, NULL); 458 machine_restart(NULL); 459 } 460 461 /* 462 * This function gets called by ctrl-alt-del - ie the keyboard interrupt. 463 * As it's called within an interrupt, it may NOT sync: the only choice 464 * is whether to reboot at once, or just ignore the ctrl-alt-del. 465 */ 466 void ctrl_alt_del(void) 467 { 468 static DECLARE_WORK(cad_work, deferred_cad, NULL); 469 470 if (C_A_D) 471 schedule_work(&cad_work); 472 else 473 kill_proc(cad_pid, SIGINT, 1); 474 } 475 476 477 /* 478 * Unprivileged users may change the real gid to the effective gid 479 * or vice versa. (BSD-style) 480 * 481 * If you set the real gid at all, or set the effective gid to a value not 482 * equal to the real gid, then the saved gid is set to the new effective gid. 483 * 484 * This makes it possible for a setgid program to completely drop its 485 * privileges, which is often a useful assertion to make when you are doing 486 * a security audit over a program. 487 * 488 * The general idea is that a program which uses just setregid() will be 489 * 100% compatible with BSD. A program which uses just setgid() will be 490 * 100% compatible with POSIX with saved IDs. 491 * 492 * SMP: There are not races, the GIDs are checked only by filesystem 493 * operations (as far as semantic preservation is concerned). 494 */ 495 asmlinkage long sys_setregid(gid_t rgid, gid_t egid) 496 { 497 int old_rgid = current->gid; 498 int old_egid = current->egid; 499 int new_rgid = old_rgid; 500 int new_egid = old_egid; 501 int retval; 502 503 retval = security_task_setgid(rgid, egid, (gid_t)-1, LSM_SETID_RE); 504 if (retval) 505 return retval; 506 507 if (rgid != (gid_t) -1) { 508 if ((old_rgid == rgid) || 509 (current->egid==rgid) || 510 capable(CAP_SETGID)) 511 new_rgid = rgid; 512 else 513 return -EPERM; 514 } 515 if (egid != (gid_t) -1) { 516 if ((old_rgid == egid) || 517 (current->egid == egid) || 518 (current->sgid == egid) || 519 capable(CAP_SETGID)) 520 new_egid = egid; 521 else { 522 return -EPERM; 523 } 524 } 525 if (new_egid != old_egid) 526 { 527 current->mm->dumpable = 0; 528 wmb(); 529 } 530 if (rgid != (gid_t) -1 || 531 (egid != (gid_t) -1 && egid != old_rgid)) 532 current->sgid = new_egid; 533 current->fsgid = new_egid; 534 current->egid = new_egid; 535 current->gid = new_rgid; 536 key_fsgid_changed(current); 537 return 0; 538 } 539 540 /* 541 * setgid() is implemented like SysV w/ SAVED_IDS 542 * 543 * SMP: Same implicit races as above. 544 */ 545 asmlinkage long sys_setgid(gid_t gid) 546 { 547 int old_egid = current->egid; 548 int retval; 549 550 retval = security_task_setgid(gid, (gid_t)-1, (gid_t)-1, LSM_SETID_ID); 551 if (retval) 552 return retval; 553 554 if (capable(CAP_SETGID)) 555 { 556 if(old_egid != gid) 557 { 558 current->mm->dumpable=0; 559 wmb(); 560 } 561 current->gid = current->egid = current->sgid = current->fsgid = gid; 562 } 563 else if ((gid == current->gid) || (gid == current->sgid)) 564 { 565 if(old_egid != gid) 566 { 567 current->mm->dumpable=0; 568 wmb(); 569 } 570 current->egid = current->fsgid = gid; 571 } 572 else 573 return -EPERM; 574 575 key_fsgid_changed(current); 576 return 0; 577 } 578 579 static int set_user(uid_t new_ruid, int dumpclear) 580 { 581 struct user_struct *new_user; 582 583 new_user = alloc_uid(new_ruid); 584 if (!new_user) 585 return -EAGAIN; 586 587 if (atomic_read(&new_user->processes) >= 588 current->signal->rlim[RLIMIT_NPROC].rlim_cur && 589 new_user != &root_user) { 590 free_uid(new_user); 591 return -EAGAIN; 592 } 593 594 switch_uid(new_user); 595 596 if(dumpclear) 597 { 598 current->mm->dumpable = 0; 599 wmb(); 600 } 601 current->uid = new_ruid; 602 return 0; 603 } 604 605 /* 606 * Unprivileged users may change the real uid to the effective uid 607 * or vice versa. (BSD-style) 608 * 609 * If you set the real uid at all, or set the effective uid to a value not 610 * equal to the real uid, then the saved uid is set to the new effective uid. 611 * 612 * This makes it possible for a setuid program to completely drop its 613 * privileges, which is often a useful assertion to make when you are doing 614 * a security audit over a program. 615 * 616 * The general idea is that a program which uses just setreuid() will be 617 * 100% compatible with BSD. A program which uses just setuid() will be 618 * 100% compatible with POSIX with saved IDs. 619 */ 620 asmlinkage long sys_setreuid(uid_t ruid, uid_t euid) 621 { 622 int old_ruid, old_euid, old_suid, new_ruid, new_euid; 623 int retval; 624 625 retval = security_task_setuid(ruid, euid, (uid_t)-1, LSM_SETID_RE); 626 if (retval) 627 return retval; 628 629 new_ruid = old_ruid = current->uid; 630 new_euid = old_euid = current->euid; 631 old_suid = current->suid; 632 633 if (ruid != (uid_t) -1) { 634 new_ruid = ruid; 635 if ((old_ruid != ruid) && 636 (current->euid != ruid) && 637 !capable(CAP_SETUID)) 638 return -EPERM; 639 } 640 641 if (euid != (uid_t) -1) { 642 new_euid = euid; 643 if ((old_ruid != euid) && 644 (current->euid != euid) && 645 (current->suid != euid) && 646 !capable(CAP_SETUID)) 647 return -EPERM; 648 } 649 650 if (new_ruid != old_ruid && set_user(new_ruid, new_euid != old_euid) < 0) 651 return -EAGAIN; 652 653 if (new_euid != old_euid) 654 { 655 current->mm->dumpable=0; 656 wmb(); 657 } 658 current->fsuid = current->euid = new_euid; 659 if (ruid != (uid_t) -1 || 660 (euid != (uid_t) -1 && euid != old_ruid)) 661 current->suid = current->euid; 662 current->fsuid = current->euid; 663 664 key_fsuid_changed(current); 665 666 return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_RE); 667 } 668 669 670 671 /* 672 * setuid() is implemented like SysV with SAVED_IDS 673 * 674 * Note that SAVED_ID's is deficient in that a setuid root program 675 * like sendmail, for example, cannot set its uid to be a normal 676 * user and then switch back, because if you're root, setuid() sets 677 * the saved uid too. If you don't like this, blame the bright people 678 * in the POSIX committee and/or USG. Note that the BSD-style setreuid() 679 * will allow a root program to temporarily drop privileges and be able to 680 * regain them by swapping the real and effective uid. 681 */ 682 asmlinkage long sys_setuid(uid_t uid) 683 { 684 int old_euid = current->euid; 685 int old_ruid, old_suid, new_ruid, new_suid; 686 int retval; 687 688 retval = security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_ID); 689 if (retval) 690 return retval; 691 692 old_ruid = new_ruid = current->uid; 693 old_suid = current->suid; 694 new_suid = old_suid; 695 696 if (capable(CAP_SETUID)) { 697 if (uid != old_ruid && set_user(uid, old_euid != uid) < 0) 698 return -EAGAIN; 699 new_suid = uid; 700 } else if ((uid != current->uid) && (uid != new_suid)) 701 return -EPERM; 702 703 if (old_euid != uid) 704 { 705 current->mm->dumpable = 0; 706 wmb(); 707 } 708 current->fsuid = current->euid = uid; 709 current->suid = new_suid; 710 711 key_fsuid_changed(current); 712 713 return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_ID); 714 } 715 716 717 /* 718 * This function implements a generic ability to update ruid, euid, 719 * and suid. This allows you to implement the 4.4 compatible seteuid(). 720 */ 721 asmlinkage long sys_setresuid(uid_t ruid, uid_t euid, uid_t suid) 722 { 723 int old_ruid = current->uid; 724 int old_euid = current->euid; 725 int old_suid = current->suid; 726 int retval; 727 728 retval = security_task_setuid(ruid, euid, suid, LSM_SETID_RES); 729 if (retval) 730 return retval; 731 732 if (!capable(CAP_SETUID)) { 733 if ((ruid != (uid_t) -1) && (ruid != current->uid) && 734 (ruid != current->euid) && (ruid != current->suid)) 735 return -EPERM; 736 if ((euid != (uid_t) -1) && (euid != current->uid) && 737 (euid != current->euid) && (euid != current->suid)) 738 return -EPERM; 739 if ((suid != (uid_t) -1) && (suid != current->uid) && 740 (suid != current->euid) && (suid != current->suid)) 741 return -EPERM; 742 } 743 if (ruid != (uid_t) -1) { 744 if (ruid != current->uid && set_user(ruid, euid != current->euid) < 0) 745 return -EAGAIN; 746 } 747 if (euid != (uid_t) -1) { 748 if (euid != current->euid) 749 { 750 current->mm->dumpable = 0; 751 wmb(); 752 } 753 current->euid = euid; 754 } 755 current->fsuid = current->euid; 756 if (suid != (uid_t) -1) 757 current->suid = suid; 758 759 key_fsuid_changed(current); 760 761 return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_RES); 762 } 763 764 asmlinkage long sys_getresuid(uid_t __user *ruid, uid_t __user *euid, uid_t __user *suid) 765 { 766 int retval; 767 768 if (!(retval = put_user(current->uid, ruid)) && 769 !(retval = put_user(current->euid, euid))) 770 retval = put_user(current->suid, suid); 771 772 return retval; 773 } 774 775 /* 776 * Same as above, but for rgid, egid, sgid. 777 */ 778 asmlinkage long sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid) 779 { 780 int retval; 781 782 retval = security_task_setgid(rgid, egid, sgid, LSM_SETID_RES); 783 if (retval) 784 return retval; 785 786 if (!capable(CAP_SETGID)) { 787 if ((rgid != (gid_t) -1) && (rgid != current->gid) && 788 (rgid != current->egid) && (rgid != current->sgid)) 789 return -EPERM; 790 if ((egid != (gid_t) -1) && (egid != current->gid) && 791 (egid != current->egid) && (egid != current->sgid)) 792 return -EPERM; 793 if ((sgid != (gid_t) -1) && (sgid != current->gid) && 794 (sgid != current->egid) && (sgid != current->sgid)) 795 return -EPERM; 796 } 797 if (egid != (gid_t) -1) { 798 if (egid != current->egid) 799 { 800 current->mm->dumpable = 0; 801 wmb(); 802 } 803 current->egid = egid; 804 } 805 current->fsgid = current->egid; 806 if (rgid != (gid_t) -1) 807 current->gid = rgid; 808 if (sgid != (gid_t) -1) 809 current->sgid = sgid; 810 811 key_fsgid_changed(current); 812 return 0; 813 } 814 815 asmlinkage long sys_getresgid(gid_t __user *rgid, gid_t __user *egid, gid_t __user *sgid) 816 { 817 int retval; 818 819 if (!(retval = put_user(current->gid, rgid)) && 820 !(retval = put_user(current->egid, egid))) 821 retval = put_user(current->sgid, sgid); 822 823 return retval; 824 } 825 826 827 /* 828 * "setfsuid()" sets the fsuid - the uid used for filesystem checks. This 829 * is used for "access()" and for the NFS daemon (letting nfsd stay at 830 * whatever uid it wants to). It normally shadows "euid", except when 831 * explicitly set by setfsuid() or for access.. 832 */ 833 asmlinkage long sys_setfsuid(uid_t uid) 834 { 835 int old_fsuid; 836 837 old_fsuid = current->fsuid; 838 if (security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS)) 839 return old_fsuid; 840 841 if (uid == current->uid || uid == current->euid || 842 uid == current->suid || uid == current->fsuid || 843 capable(CAP_SETUID)) 844 { 845 if (uid != old_fsuid) 846 { 847 current->mm->dumpable = 0; 848 wmb(); 849 } 850 current->fsuid = uid; 851 } 852 853 key_fsuid_changed(current); 854 855 security_task_post_setuid(old_fsuid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS); 856 857 return old_fsuid; 858 } 859 860 /* 861 * Samma p� svenska.. 862 */ 863 asmlinkage long sys_setfsgid(gid_t gid) 864 { 865 int old_fsgid; 866 867 old_fsgid = current->fsgid; 868 if (security_task_setgid(gid, (gid_t)-1, (gid_t)-1, LSM_SETID_FS)) 869 return old_fsgid; 870 871 if (gid == current->gid || gid == current->egid || 872 gid == current->sgid || gid == current->fsgid || 873 capable(CAP_SETGID)) 874 { 875 if (gid != old_fsgid) 876 { 877 current->mm->dumpable = 0; 878 wmb(); 879 } 880 current->fsgid = gid; 881 key_fsgid_changed(current); 882 } 883 return old_fsgid; 884 } 885 886 asmlinkage long sys_times(struct tms __user * tbuf) 887 { 888 /* 889 * In the SMP world we might just be unlucky and have one of 890 * the times increment as we use it. Since the value is an 891 * atomically safe type this is just fine. Conceptually its 892 * as if the syscall took an instant longer to occur. 893 */ 894 if (tbuf) { 895 struct tms tmp; 896 struct task_struct *tsk = current; 897 struct task_struct *t; 898 cputime_t utime, stime, cutime, cstime; 899 900 read_lock(&tasklist_lock); 901 utime = tsk->signal->utime; 902 stime = tsk->signal->stime; 903 t = tsk; 904 do { 905 utime = cputime_add(utime, t->utime); 906 stime = cputime_add(stime, t->stime); 907 t = next_thread(t); 908 } while (t != tsk); 909 910 /* 911 * While we have tasklist_lock read-locked, no dying thread 912 * can be updating current->signal->[us]time. Instead, 913 * we got their counts included in the live thread loop. 914 * However, another thread can come in right now and 915 * do a wait call that updates current->signal->c[us]time. 916 * To make sure we always see that pair updated atomically, 917 * we take the siglock around fetching them. 918 */ 919 spin_lock_irq(&tsk->sighand->siglock); 920 cutime = tsk->signal->cutime; 921 cstime = tsk->signal->cstime; 922 spin_unlock_irq(&tsk->sighand->siglock); 923 read_unlock(&tasklist_lock); 924 925 tmp.tms_utime = cputime_to_clock_t(utime); 926 tmp.tms_stime = cputime_to_clock_t(stime); 927 tmp.tms_cutime = cputime_to_clock_t(cutime); 928 tmp.tms_cstime = cputime_to_clock_t(cstime); 929 if (copy_to_user(tbuf, &tmp, sizeof(struct tms))) 930 return -EFAULT; 931 } 932 return (long) jiffies_64_to_clock_t(get_jiffies_64()); 933 } 934 935 /* 936 * This needs some heavy checking ... 937 * I just haven't the stomach for it. I also don't fully 938 * understand sessions/pgrp etc. Let somebody who does explain it. 939 * 940 * OK, I think I have the protection semantics right.... this is really 941 * only important on a multi-user system anyway, to make sure one user 942 * can't send a signal to a process owned by another. -TYT, 12/12/91 943 * 944 * Auch. Had to add the 'did_exec' flag to conform completely to POSIX. 945 * LBT 04.03.94 946 */ 947 948 asmlinkage long sys_setpgid(pid_t pid, pid_t pgid) 949 { 950 struct task_struct *p; 951 int err = -EINVAL; 952 953 if (!pid) 954 pid = current->pid; 955 if (!pgid) 956 pgid = pid; 957 if (pgid < 0) 958 return -EINVAL; 959 960 /* From this point forward we keep holding onto the tasklist lock 961 * so that our parent does not change from under us. -DaveM 962 */ 963 write_lock_irq(&tasklist_lock); 964 965 err = -ESRCH; 966 p = find_task_by_pid(pid); 967 if (!p) 968 goto out; 969 970 err = -EINVAL; 971 if (!thread_group_leader(p)) 972 goto out; 973 974 if (p->parent == current || p->real_parent == current) { 975 err = -EPERM; 976 if (p->signal->session != current->signal->session) 977 goto out; 978 err = -EACCES; 979 if (p->did_exec) 980 goto out; 981 } else { 982 err = -ESRCH; 983 if (p != current) 984 goto out; 985 } 986 987 err = -EPERM; 988 if (p->signal->leader) 989 goto out; 990 991 if (pgid != pid) { 992 struct task_struct *p; 993 994 do_each_task_pid(pgid, PIDTYPE_PGID, p) { 995 if (p->signal->session == current->signal->session) 996 goto ok_pgid; 997 } while_each_task_pid(pgid, PIDTYPE_PGID, p); 998 goto out; 999 } 1000 1001 ok_pgid: 1002 err = security_task_setpgid(p, pgid); 1003 if (err) 1004 goto out; 1005 1006 if (process_group(p) != pgid) { 1007 detach_pid(p, PIDTYPE_PGID); 1008 p->signal->pgrp = pgid; 1009 attach_pid(p, PIDTYPE_PGID, pgid); 1010 } 1011 1012 err = 0; 1013 out: 1014 /* All paths lead to here, thus we are safe. -DaveM */ 1015 write_unlock_irq(&tasklist_lock); 1016 return err; 1017 } 1018 1019 asmlinkage long sys_getpgid(pid_t pid) 1020 { 1021 if (!pid) { 1022 return process_group(current); 1023 } else { 1024 int retval; 1025 struct task_struct *p; 1026 1027 read_lock(&tasklist_lock); 1028 p = find_task_by_pid(pid); 1029 1030 retval = -ESRCH; 1031 if (p) { 1032 retval = security_task_getpgid(p); 1033 if (!retval) 1034 retval = process_group(p); 1035 } 1036 read_unlock(&tasklist_lock); 1037 return retval; 1038 } 1039 } 1040 1041 #ifdef __ARCH_WANT_SYS_GETPGRP 1042 1043 asmlinkage long sys_getpgrp(void) 1044 { 1045 /* SMP - assuming writes are word atomic this is fine */ 1046 return process_group(current); 1047 } 1048 1049 #endif 1050 1051 asmlinkage long sys_getsid(pid_t pid) 1052 { 1053 if (!pid) { 1054 return current->signal->session; 1055 } else { 1056 int retval; 1057 struct task_struct *p; 1058 1059 read_lock(&tasklist_lock); 1060 p = find_task_by_pid(pid); 1061 1062 retval = -ESRCH; 1063 if(p) { 1064 retval = security_task_getsid(p); 1065 if (!retval) 1066 retval = p->signal->session; 1067 } 1068 read_unlock(&tasklist_lock); 1069 return retval; 1070 } 1071 } 1072 1073 asmlinkage long sys_setsid(void) 1074 { 1075 struct pid *pid; 1076 int err = -EPERM; 1077 1078 if (!thread_group_leader(current)) 1079 return -EINVAL; 1080 1081 down(&tty_sem); 1082 write_lock_irq(&tasklist_lock); 1083 1084 pid = find_pid(PIDTYPE_PGID, current->pid); 1085 if (pid) 1086 goto out; 1087 1088 current->signal->leader = 1; 1089 __set_special_pids(current->pid, current->pid); 1090 current->signal->tty = NULL; 1091 current->signal->tty_old_pgrp = 0; 1092 err = process_group(current); 1093 out: 1094 write_unlock_irq(&tasklist_lock); 1095 up(&tty_sem); 1096 return err; 1097 } 1098 1099 /* 1100 * Supplementary group IDs 1101 */ 1102 1103 /* init to 2 - one for init_task, one to ensure it is never freed */ 1104 struct group_info init_groups = { .usage = ATOMIC_INIT(2) }; 1105 1106 struct group_info *groups_alloc(int gidsetsize) 1107 { 1108 struct group_info *group_info; 1109 int nblocks; 1110 int i; 1111 1112 nblocks = (gidsetsize + NGROUPS_PER_BLOCK - 1) / NGROUPS_PER_BLOCK; 1113 /* Make sure we always allocate at least one indirect block pointer */ 1114 nblocks = nblocks ? : 1; 1115 group_info = kmalloc(sizeof(*group_info) + nblocks*sizeof(gid_t *), GFP_USER); 1116 if (!group_info) 1117 return NULL; 1118 group_info->ngroups = gidsetsize; 1119 group_info->nblocks = nblocks; 1120 atomic_set(&group_info->usage, 1); 1121 1122 if (gidsetsize <= NGROUPS_SMALL) { 1123 group_info->blocks[0] = group_info->small_block; 1124 } else { 1125 for (i = 0; i < nblocks; i++) { 1126 gid_t *b; 1127 b = (void *)__get_free_page(GFP_USER); 1128 if (!b) 1129 goto out_undo_partial_alloc; 1130 group_info->blocks[i] = b; 1131 } 1132 } 1133 return group_info; 1134 1135 out_undo_partial_alloc: 1136 while (--i >= 0) { 1137 free_page((unsigned long)group_info->blocks[i]); 1138 } 1139 kfree(group_info); 1140 return NULL; 1141 } 1142 1143 EXPORT_SYMBOL(groups_alloc); 1144 1145 void groups_free(struct group_info *group_info) 1146 { 1147 if (group_info->blocks[0] != group_info->small_block) { 1148 int i; 1149 for (i = 0; i < group_info->nblocks; i++) 1150 free_page((unsigned long)group_info->blocks[i]); 1151 } 1152 kfree(group_info); 1153 } 1154 1155 EXPORT_SYMBOL(groups_free); 1156 1157 /* export the group_info to a user-space array */ 1158 static int groups_to_user(gid_t __user *grouplist, 1159 struct group_info *group_info) 1160 { 1161 int i; 1162 int count = group_info->ngroups; 1163 1164 for (i = 0; i < group_info->nblocks; i++) { 1165 int cp_count = min(NGROUPS_PER_BLOCK, count); 1166 int off = i * NGROUPS_PER_BLOCK; 1167 int len = cp_count * sizeof(*grouplist); 1168 1169 if (copy_to_user(grouplist+off, group_info->blocks[i], len)) 1170 return -EFAULT; 1171 1172 count -= cp_count; 1173 } 1174 return 0; 1175 } 1176 1177 /* fill a group_info from a user-space array - it must be allocated already */ 1178 static int groups_from_user(struct group_info *group_info, 1179 gid_t __user *grouplist) 1180 { 1181 int i; 1182 int count = group_info->ngroups; 1183 1184 for (i = 0; i < group_info->nblocks; i++) { 1185 int cp_count = min(NGROUPS_PER_BLOCK, count); 1186 int off = i * NGROUPS_PER_BLOCK; 1187 int len = cp_count * sizeof(*grouplist); 1188 1189 if (copy_from_user(group_info->blocks[i], grouplist+off, len)) 1190 return -EFAULT; 1191 1192 count -= cp_count; 1193 } 1194 return 0; 1195 } 1196 1197 /* a simple shell-metzner sort */ 1198 static void groups_sort(struct group_info *group_info) 1199 { 1200 int base, max, stride; 1201 int gidsetsize = group_info->ngroups; 1202 1203 for (stride = 1; stride < gidsetsize; stride = 3 * stride + 1) 1204 ; /* nothing */ 1205 stride /= 3; 1206 1207 while (stride) { 1208 max = gidsetsize - stride; 1209 for (base = 0; base < max; base++) { 1210 int left = base; 1211 int right = left + stride; 1212 gid_t tmp = GROUP_AT(group_info, right); 1213 1214 while (left >= 0 && GROUP_AT(group_info, left) > tmp) { 1215 GROUP_AT(group_info, right) = 1216 GROUP_AT(group_info, left); 1217 right = left; 1218 left -= stride; 1219 } 1220 GROUP_AT(group_info, right) = tmp; 1221 } 1222 stride /= 3; 1223 } 1224 } 1225 1226 /* a simple bsearch */ 1227 static int groups_search(struct group_info *group_info, gid_t grp) 1228 { 1229 int left, right; 1230 1231 if (!group_info) 1232 return 0; 1233 1234 left = 0; 1235 right = group_info->ngroups; 1236 while (left < right) { 1237 int mid = (left+right)/2; 1238 int cmp = grp - GROUP_AT(group_info, mid); 1239 if (cmp > 0) 1240 left = mid + 1; 1241 else if (cmp < 0) 1242 right = mid; 1243 else 1244 return 1; 1245 } 1246 return 0; 1247 } 1248 1249 /* validate and set current->group_info */ 1250 int set_current_groups(struct group_info *group_info) 1251 { 1252 int retval; 1253 struct group_info *old_info; 1254 1255 retval = security_task_setgroups(group_info); 1256 if (retval) 1257 return retval; 1258 1259 groups_sort(group_info); 1260 get_group_info(group_info); 1261 1262 task_lock(current); 1263 old_info = current->group_info; 1264 current->group_info = group_info; 1265 task_unlock(current); 1266 1267 put_group_info(old_info); 1268 1269 return 0; 1270 } 1271 1272 EXPORT_SYMBOL(set_current_groups); 1273 1274 asmlinkage long sys_getgroups(int gidsetsize, gid_t __user *grouplist) 1275 { 1276 int i = 0; 1277 1278 /* 1279 * SMP: Nobody else can change our grouplist. Thus we are 1280 * safe. 1281 */ 1282 1283 if (gidsetsize < 0) 1284 return -EINVAL; 1285 1286 /* no need to grab task_lock here; it cannot change */ 1287 get_group_info(current->group_info); 1288 i = current->group_info->ngroups; 1289 if (gidsetsize) { 1290 if (i > gidsetsize) { 1291 i = -EINVAL; 1292 goto out; 1293 } 1294 if (groups_to_user(grouplist, current->group_info)) { 1295 i = -EFAULT; 1296 goto out; 1297 } 1298 } 1299 out: 1300 put_group_info(current->group_info); 1301 return i; 1302 } 1303 1304 /* 1305 * SMP: Our groups are copy-on-write. We can set them safely 1306 * without another task interfering. 1307 */ 1308 1309 asmlinkage long sys_setgroups(int gidsetsize, gid_t __user *grouplist) 1310 { 1311 struct group_info *group_info; 1312 int retval; 1313 1314 if (!capable(CAP_SETGID)) 1315 return -EPERM; 1316 if ((unsigned)gidsetsize > NGROUPS_MAX) 1317 return -EINVAL; 1318 1319 group_info = groups_alloc(gidsetsize); 1320 if (!group_info) 1321 return -ENOMEM; 1322 retval = groups_from_user(group_info, grouplist); 1323 if (retval) { 1324 put_group_info(group_info); 1325 return retval; 1326 } 1327 1328 retval = set_current_groups(group_info); 1329 put_group_info(group_info); 1330 1331 return retval; 1332 } 1333 1334 /* 1335 * Check whether we're fsgid/egid or in the supplemental group.. 1336 */ 1337 int in_group_p(gid_t grp) 1338 { 1339 int retval = 1; 1340 if (grp != current->fsgid) { 1341 get_group_info(current->group_info); 1342 retval = groups_search(current->group_info, grp); 1343 put_group_info(current->group_info); 1344 } 1345 return retval; 1346 } 1347 1348 EXPORT_SYMBOL(in_group_p); 1349 1350 int in_egroup_p(gid_t grp) 1351 { 1352 int retval = 1; 1353 if (grp != current->egid) { 1354 get_group_info(current->group_info); 1355 retval = groups_search(current->group_info, grp); 1356 put_group_info(current->group_info); 1357 } 1358 return retval; 1359 } 1360 1361 EXPORT_SYMBOL(in_egroup_p); 1362 1363 DECLARE_RWSEM(uts_sem); 1364 1365 EXPORT_SYMBOL(uts_sem); 1366 1367 asmlinkage long sys_newuname(struct new_utsname __user * name) 1368 { 1369 int errno = 0; 1370 1371 down_read(&uts_sem); 1372 if (copy_to_user(name,&system_utsname,sizeof *name)) 1373 errno = -EFAULT; 1374 up_read(&uts_sem); 1375 return errno; 1376 } 1377 1378 asmlinkage long sys_sethostname(char __user *name, int len) 1379 { 1380 int errno; 1381 char tmp[__NEW_UTS_LEN]; 1382 1383 if (!capable(CAP_SYS_ADMIN)) 1384 return -EPERM; 1385 if (len < 0 || len > __NEW_UTS_LEN) 1386 return -EINVAL; 1387 down_write(&uts_sem); 1388 errno = -EFAULT; 1389 if (!copy_from_user(tmp, name, len)) { 1390 memcpy(system_utsname.nodename, tmp, len); 1391 system_utsname.nodename[len] = 0; 1392 errno = 0; 1393 } 1394 up_write(&uts_sem); 1395 return errno; 1396 } 1397 1398 #ifdef __ARCH_WANT_SYS_GETHOSTNAME 1399 1400 asmlinkage long sys_gethostname(char __user *name, int len) 1401 { 1402 int i, errno; 1403 1404 if (len < 0) 1405 return -EINVAL; 1406 down_read(&uts_sem); 1407 i = 1 + strlen(system_utsname.nodename); 1408 if (i > len) 1409 i = len; 1410 errno = 0; 1411 if (copy_to_user(name, system_utsname.nodename, i)) 1412 errno = -EFAULT; 1413 up_read(&uts_sem); 1414 return errno; 1415 } 1416 1417 #endif 1418 1419 /* 1420 * Only setdomainname; getdomainname can be implemented by calling 1421 * uname() 1422 */ 1423 asmlinkage long sys_setdomainname(char __user *name, int len) 1424 { 1425 int errno; 1426 char tmp[__NEW_UTS_LEN]; 1427 1428 if (!capable(CAP_SYS_ADMIN)) 1429 return -EPERM; 1430 if (len < 0 || len > __NEW_UTS_LEN) 1431 return -EINVAL; 1432 1433 down_write(&uts_sem); 1434 errno = -EFAULT; 1435 if (!copy_from_user(tmp, name, len)) { 1436 memcpy(system_utsname.domainname, tmp, len); 1437 system_utsname.domainname[len] = 0; 1438 errno = 0; 1439 } 1440 up_write(&uts_sem); 1441 return errno; 1442 } 1443 1444 asmlinkage long sys_getrlimit(unsigned int resource, struct rlimit __user *rlim) 1445 { 1446 if (resource >= RLIM_NLIMITS) 1447 return -EINVAL; 1448 else { 1449 struct rlimit value; 1450 task_lock(current->group_leader); 1451 value = current->signal->rlim[resource]; 1452 task_unlock(current->group_leader); 1453 return copy_to_user(rlim, &value, sizeof(*rlim)) ? -EFAULT : 0; 1454 } 1455 } 1456 1457 #ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT 1458 1459 /* 1460 * Back compatibility for getrlimit. Needed for some apps. 1461 */ 1462 1463 asmlinkage long sys_old_getrlimit(unsigned int resource, struct rlimit __user *rlim) 1464 { 1465 struct rlimit x; 1466 if (resource >= RLIM_NLIMITS) 1467 return -EINVAL; 1468 1469 task_lock(current->group_leader); 1470 x = current->signal->rlim[resource]; 1471 task_unlock(current->group_leader); 1472 if(x.rlim_cur > 0x7FFFFFFF) 1473 x.rlim_cur = 0x7FFFFFFF; 1474 if(x.rlim_max > 0x7FFFFFFF) 1475 x.rlim_max = 0x7FFFFFFF; 1476 return copy_to_user(rlim, &x, sizeof(x))?-EFAULT:0; 1477 } 1478 1479 #endif 1480 1481 asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim) 1482 { 1483 struct rlimit new_rlim, *old_rlim; 1484 int retval; 1485 1486 if (resource >= RLIM_NLIMITS) 1487 return -EINVAL; 1488 if(copy_from_user(&new_rlim, rlim, sizeof(*rlim))) 1489 return -EFAULT; 1490 if (new_rlim.rlim_cur > new_rlim.rlim_max) 1491 return -EINVAL; 1492 old_rlim = current->signal->rlim + resource; 1493 if ((new_rlim.rlim_max > old_rlim->rlim_max) && 1494 !capable(CAP_SYS_RESOURCE)) 1495 return -EPERM; 1496 if (resource == RLIMIT_NOFILE && new_rlim.rlim_max > NR_OPEN) 1497 return -EPERM; 1498 1499 retval = security_task_setrlimit(resource, &new_rlim); 1500 if (retval) 1501 return retval; 1502 1503 task_lock(current->group_leader); 1504 *old_rlim = new_rlim; 1505 task_unlock(current->group_leader); 1506 1507 if (resource == RLIMIT_CPU && new_rlim.rlim_cur != RLIM_INFINITY && 1508 (cputime_eq(current->signal->it_prof_expires, cputime_zero) || 1509 new_rlim.rlim_cur <= cputime_to_secs( 1510 current->signal->it_prof_expires))) { 1511 cputime_t cputime = secs_to_cputime(new_rlim.rlim_cur); 1512 read_lock(&tasklist_lock); 1513 spin_lock_irq(¤t->sighand->siglock); 1514 set_process_cpu_timer(current, CPUCLOCK_PROF, 1515 &cputime, NULL); 1516 spin_unlock_irq(¤t->sighand->siglock); 1517 read_unlock(&tasklist_lock); 1518 } 1519 1520 return 0; 1521 } 1522 1523 /* 1524 * It would make sense to put struct rusage in the task_struct, 1525 * except that would make the task_struct be *really big*. After 1526 * task_struct gets moved into malloc'ed memory, it would 1527 * make sense to do this. It will make moving the rest of the information 1528 * a lot simpler! (Which we're not doing right now because we're not 1529 * measuring them yet). 1530 * 1531 * This expects to be called with tasklist_lock read-locked or better, 1532 * and the siglock not locked. It may momentarily take the siglock. 1533 * 1534 * When sampling multiple threads for RUSAGE_SELF, under SMP we might have 1535 * races with threads incrementing their own counters. But since word 1536 * reads are atomic, we either get new values or old values and we don't 1537 * care which for the sums. We always take the siglock to protect reading 1538 * the c* fields from p->signal from races with exit.c updating those 1539 * fields when reaping, so a sample either gets all the additions of a 1540 * given child after it's reaped, or none so this sample is before reaping. 1541 */ 1542 1543 static void k_getrusage(struct task_struct *p, int who, struct rusage *r) 1544 { 1545 struct task_struct *t; 1546 unsigned long flags; 1547 cputime_t utime, stime; 1548 1549 memset((char *) r, 0, sizeof *r); 1550 1551 if (unlikely(!p->signal)) 1552 return; 1553 1554 switch (who) { 1555 case RUSAGE_CHILDREN: 1556 spin_lock_irqsave(&p->sighand->siglock, flags); 1557 utime = p->signal->cutime; 1558 stime = p->signal->cstime; 1559 r->ru_nvcsw = p->signal->cnvcsw; 1560 r->ru_nivcsw = p->signal->cnivcsw; 1561 r->ru_minflt = p->signal->cmin_flt; 1562 r->ru_majflt = p->signal->cmaj_flt; 1563 spin_unlock_irqrestore(&p->sighand->siglock, flags); 1564 cputime_to_timeval(utime, &r->ru_utime); 1565 cputime_to_timeval(stime, &r->ru_stime); 1566 break; 1567 case RUSAGE_SELF: 1568 spin_lock_irqsave(&p->sighand->siglock, flags); 1569 utime = stime = cputime_zero; 1570 goto sum_group; 1571 case RUSAGE_BOTH: 1572 spin_lock_irqsave(&p->sighand->siglock, flags); 1573 utime = p->signal->cutime; 1574 stime = p->signal->cstime; 1575 r->ru_nvcsw = p->signal->cnvcsw; 1576 r->ru_nivcsw = p->signal->cnivcsw; 1577 r->ru_minflt = p->signal->cmin_flt; 1578 r->ru_majflt = p->signal->cmaj_flt; 1579 sum_group: 1580 utime = cputime_add(utime, p->signal->utime); 1581 stime = cputime_add(stime, p->signal->stime); 1582 r->ru_nvcsw += p->signal->nvcsw; 1583 r->ru_nivcsw += p->signal->nivcsw; 1584 r->ru_minflt += p->signal->min_flt; 1585 r->ru_majflt += p->signal->maj_flt; 1586 t = p; 1587 do { 1588 utime = cputime_add(utime, t->utime); 1589 stime = cputime_add(stime, t->stime); 1590 r->ru_nvcsw += t->nvcsw; 1591 r->ru_nivcsw += t->nivcsw; 1592 r->ru_minflt += t->min_flt; 1593 r->ru_majflt += t->maj_flt; 1594 t = next_thread(t); 1595 } while (t != p); 1596 spin_unlock_irqrestore(&p->sighand->siglock, flags); 1597 cputime_to_timeval(utime, &r->ru_utime); 1598 cputime_to_timeval(stime, &r->ru_stime); 1599 break; 1600 default: 1601 BUG(); 1602 } 1603 } 1604 1605 int getrusage(struct task_struct *p, int who, struct rusage __user *ru) 1606 { 1607 struct rusage r; 1608 read_lock(&tasklist_lock); 1609 k_getrusage(p, who, &r); 1610 read_unlock(&tasklist_lock); 1611 return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0; 1612 } 1613 1614 asmlinkage long sys_getrusage(int who, struct rusage __user *ru) 1615 { 1616 if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN) 1617 return -EINVAL; 1618 return getrusage(current, who, ru); 1619 } 1620 1621 asmlinkage long sys_umask(int mask) 1622 { 1623 mask = xchg(¤t->fs->umask, mask & S_IRWXUGO); 1624 return mask; 1625 } 1626 1627 asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3, 1628 unsigned long arg4, unsigned long arg5) 1629 { 1630 long error; 1631 int sig; 1632 1633 error = security_task_prctl(option, arg2, arg3, arg4, arg5); 1634 if (error) 1635 return error; 1636 1637 switch (option) { 1638 case PR_SET_PDEATHSIG: 1639 sig = arg2; 1640 if (sig < 0 || sig > _NSIG) { 1641 error = -EINVAL; 1642 break; 1643 } 1644 current->pdeath_signal = sig; 1645 break; 1646 case PR_GET_PDEATHSIG: 1647 error = put_user(current->pdeath_signal, (int __user *)arg2); 1648 break; 1649 case PR_GET_DUMPABLE: 1650 if (current->mm->dumpable) 1651 error = 1; 1652 break; 1653 case PR_SET_DUMPABLE: 1654 if (arg2 != 0 && arg2 != 1) { 1655 error = -EINVAL; 1656 break; 1657 } 1658 current->mm->dumpable = arg2; 1659 break; 1660 1661 case PR_SET_UNALIGN: 1662 error = SET_UNALIGN_CTL(current, arg2); 1663 break; 1664 case PR_GET_UNALIGN: 1665 error = GET_UNALIGN_CTL(current, arg2); 1666 break; 1667 case PR_SET_FPEMU: 1668 error = SET_FPEMU_CTL(current, arg2); 1669 break; 1670 case PR_GET_FPEMU: 1671 error = GET_FPEMU_CTL(current, arg2); 1672 break; 1673 case PR_SET_FPEXC: 1674 error = SET_FPEXC_CTL(current, arg2); 1675 break; 1676 case PR_GET_FPEXC: 1677 error = GET_FPEXC_CTL(current, arg2); 1678 break; 1679 case PR_GET_TIMING: 1680 error = PR_TIMING_STATISTICAL; 1681 break; 1682 case PR_SET_TIMING: 1683 if (arg2 == PR_TIMING_STATISTICAL) 1684 error = 0; 1685 else 1686 error = -EINVAL; 1687 break; 1688 1689 case PR_GET_KEEPCAPS: 1690 if (current->keep_capabilities) 1691 error = 1; 1692 break; 1693 case PR_SET_KEEPCAPS: 1694 if (arg2 != 0 && arg2 != 1) { 1695 error = -EINVAL; 1696 break; 1697 } 1698 current->keep_capabilities = arg2; 1699 break; 1700 case PR_SET_NAME: { 1701 struct task_struct *me = current; 1702 unsigned char ncomm[sizeof(me->comm)]; 1703 1704 ncomm[sizeof(me->comm)-1] = 0; 1705 if (strncpy_from_user(ncomm, (char __user *)arg2, 1706 sizeof(me->comm)-1) < 0) 1707 return -EFAULT; 1708 set_task_comm(me, ncomm); 1709 return 0; 1710 } 1711 case PR_GET_NAME: { 1712 struct task_struct *me = current; 1713 unsigned char tcomm[sizeof(me->comm)]; 1714 1715 get_task_comm(tcomm, me); 1716 if (copy_to_user((char __user *)arg2, tcomm, sizeof(tcomm))) 1717 return -EFAULT; 1718 return 0; 1719 } 1720 default: 1721 error = -EINVAL; 1722 break; 1723 } 1724 return error; 1725 } 1726