xref: /openbmc/linux/kernel/sys.c (revision d5cb9783536a41df9f9cba5b0a1d78047ed787f7)
1 /*
2  *  linux/kernel/sys.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  */
6 
7 #include <linux/config.h>
8 #include <linux/module.h>
9 #include <linux/mm.h>
10 #include <linux/utsname.h>
11 #include <linux/mman.h>
12 #include <linux/smp_lock.h>
13 #include <linux/notifier.h>
14 #include <linux/reboot.h>
15 #include <linux/prctl.h>
16 #include <linux/init.h>
17 #include <linux/highuid.h>
18 #include <linux/fs.h>
19 #include <linux/kernel.h>
20 #include <linux/kexec.h>
21 #include <linux/workqueue.h>
22 #include <linux/device.h>
23 #include <linux/key.h>
24 #include <linux/times.h>
25 #include <linux/posix-timers.h>
26 #include <linux/security.h>
27 #include <linux/dcookies.h>
28 #include <linux/suspend.h>
29 #include <linux/tty.h>
30 #include <linux/signal.h>
31 
32 #include <linux/compat.h>
33 #include <linux/syscalls.h>
34 
35 #include <asm/uaccess.h>
36 #include <asm/io.h>
37 #include <asm/unistd.h>
38 
39 #ifndef SET_UNALIGN_CTL
40 # define SET_UNALIGN_CTL(a,b)	(-EINVAL)
41 #endif
42 #ifndef GET_UNALIGN_CTL
43 # define GET_UNALIGN_CTL(a,b)	(-EINVAL)
44 #endif
45 #ifndef SET_FPEMU_CTL
46 # define SET_FPEMU_CTL(a,b)	(-EINVAL)
47 #endif
48 #ifndef GET_FPEMU_CTL
49 # define GET_FPEMU_CTL(a,b)	(-EINVAL)
50 #endif
51 #ifndef SET_FPEXC_CTL
52 # define SET_FPEXC_CTL(a,b)	(-EINVAL)
53 #endif
54 #ifndef GET_FPEXC_CTL
55 # define GET_FPEXC_CTL(a,b)	(-EINVAL)
56 #endif
57 
58 /*
59  * this is where the system-wide overflow UID and GID are defined, for
60  * architectures that now have 32-bit UID/GID but didn't in the past
61  */
62 
63 int overflowuid = DEFAULT_OVERFLOWUID;
64 int overflowgid = DEFAULT_OVERFLOWGID;
65 
66 #ifdef CONFIG_UID16
67 EXPORT_SYMBOL(overflowuid);
68 EXPORT_SYMBOL(overflowgid);
69 #endif
70 
71 /*
72  * the same as above, but for filesystems which can only store a 16-bit
73  * UID and GID. as such, this is needed on all architectures
74  */
75 
76 int fs_overflowuid = DEFAULT_FS_OVERFLOWUID;
77 int fs_overflowgid = DEFAULT_FS_OVERFLOWUID;
78 
79 EXPORT_SYMBOL(fs_overflowuid);
80 EXPORT_SYMBOL(fs_overflowgid);
81 
82 /*
83  * this indicates whether you can reboot with ctrl-alt-del: the default is yes
84  */
85 
86 int C_A_D = 1;
87 int cad_pid = 1;
88 
89 /*
90  *	Notifier list for kernel code which wants to be called
91  *	at shutdown. This is used to stop any idling DMA operations
92  *	and the like.
93  */
94 
95 static struct notifier_block *reboot_notifier_list;
96 static DEFINE_RWLOCK(notifier_lock);
97 
98 /**
99  *	notifier_chain_register	- Add notifier to a notifier chain
100  *	@list: Pointer to root list pointer
101  *	@n: New entry in notifier chain
102  *
103  *	Adds a notifier to a notifier chain.
104  *
105  *	Currently always returns zero.
106  */
107 
108 int notifier_chain_register(struct notifier_block **list, struct notifier_block *n)
109 {
110 	write_lock(&notifier_lock);
111 	while(*list)
112 	{
113 		if(n->priority > (*list)->priority)
114 			break;
115 		list= &((*list)->next);
116 	}
117 	n->next = *list;
118 	*list=n;
119 	write_unlock(&notifier_lock);
120 	return 0;
121 }
122 
123 EXPORT_SYMBOL(notifier_chain_register);
124 
125 /**
126  *	notifier_chain_unregister - Remove notifier from a notifier chain
127  *	@nl: Pointer to root list pointer
128  *	@n: New entry in notifier chain
129  *
130  *	Removes a notifier from a notifier chain.
131  *
132  *	Returns zero on success, or %-ENOENT on failure.
133  */
134 
135 int notifier_chain_unregister(struct notifier_block **nl, struct notifier_block *n)
136 {
137 	write_lock(&notifier_lock);
138 	while((*nl)!=NULL)
139 	{
140 		if((*nl)==n)
141 		{
142 			*nl=n->next;
143 			write_unlock(&notifier_lock);
144 			return 0;
145 		}
146 		nl=&((*nl)->next);
147 	}
148 	write_unlock(&notifier_lock);
149 	return -ENOENT;
150 }
151 
152 EXPORT_SYMBOL(notifier_chain_unregister);
153 
154 /**
155  *	notifier_call_chain - Call functions in a notifier chain
156  *	@n: Pointer to root pointer of notifier chain
157  *	@val: Value passed unmodified to notifier function
158  *	@v: Pointer passed unmodified to notifier function
159  *
160  *	Calls each function in a notifier chain in turn.
161  *
162  *	If the return value of the notifier can be and'd
163  *	with %NOTIFY_STOP_MASK, then notifier_call_chain
164  *	will return immediately, with the return value of
165  *	the notifier function which halted execution.
166  *	Otherwise, the return value is the return value
167  *	of the last notifier function called.
168  */
169 
170 int notifier_call_chain(struct notifier_block **n, unsigned long val, void *v)
171 {
172 	int ret=NOTIFY_DONE;
173 	struct notifier_block *nb = *n;
174 
175 	while(nb)
176 	{
177 		ret=nb->notifier_call(nb,val,v);
178 		if(ret&NOTIFY_STOP_MASK)
179 		{
180 			return ret;
181 		}
182 		nb=nb->next;
183 	}
184 	return ret;
185 }
186 
187 EXPORT_SYMBOL(notifier_call_chain);
188 
189 /**
190  *	register_reboot_notifier - Register function to be called at reboot time
191  *	@nb: Info about notifier function to be called
192  *
193  *	Registers a function with the list of functions
194  *	to be called at reboot time.
195  *
196  *	Currently always returns zero, as notifier_chain_register
197  *	always returns zero.
198  */
199 
200 int register_reboot_notifier(struct notifier_block * nb)
201 {
202 	return notifier_chain_register(&reboot_notifier_list, nb);
203 }
204 
205 EXPORT_SYMBOL(register_reboot_notifier);
206 
207 /**
208  *	unregister_reboot_notifier - Unregister previously registered reboot notifier
209  *	@nb: Hook to be unregistered
210  *
211  *	Unregisters a previously registered reboot
212  *	notifier function.
213  *
214  *	Returns zero on success, or %-ENOENT on failure.
215  */
216 
217 int unregister_reboot_notifier(struct notifier_block * nb)
218 {
219 	return notifier_chain_unregister(&reboot_notifier_list, nb);
220 }
221 
222 EXPORT_SYMBOL(unregister_reboot_notifier);
223 
224 static int set_one_prio(struct task_struct *p, int niceval, int error)
225 {
226 	int no_nice;
227 
228 	if (p->uid != current->euid &&
229 		p->euid != current->euid && !capable(CAP_SYS_NICE)) {
230 		error = -EPERM;
231 		goto out;
232 	}
233 	if (niceval < task_nice(p) && !can_nice(p, niceval)) {
234 		error = -EACCES;
235 		goto out;
236 	}
237 	no_nice = security_task_setnice(p, niceval);
238 	if (no_nice) {
239 		error = no_nice;
240 		goto out;
241 	}
242 	if (error == -ESRCH)
243 		error = 0;
244 	set_user_nice(p, niceval);
245 out:
246 	return error;
247 }
248 
249 asmlinkage long sys_setpriority(int which, int who, int niceval)
250 {
251 	struct task_struct *g, *p;
252 	struct user_struct *user;
253 	int error = -EINVAL;
254 
255 	if (which > 2 || which < 0)
256 		goto out;
257 
258 	/* normalize: avoid signed division (rounding problems) */
259 	error = -ESRCH;
260 	if (niceval < -20)
261 		niceval = -20;
262 	if (niceval > 19)
263 		niceval = 19;
264 
265 	read_lock(&tasklist_lock);
266 	switch (which) {
267 		case PRIO_PROCESS:
268 			if (!who)
269 				who = current->pid;
270 			p = find_task_by_pid(who);
271 			if (p)
272 				error = set_one_prio(p, niceval, error);
273 			break;
274 		case PRIO_PGRP:
275 			if (!who)
276 				who = process_group(current);
277 			do_each_task_pid(who, PIDTYPE_PGID, p) {
278 				error = set_one_prio(p, niceval, error);
279 			} while_each_task_pid(who, PIDTYPE_PGID, p);
280 			break;
281 		case PRIO_USER:
282 			user = current->user;
283 			if (!who)
284 				who = current->uid;
285 			else
286 				if ((who != current->uid) && !(user = find_user(who)))
287 					goto out_unlock;	/* No processes for this user */
288 
289 			do_each_thread(g, p)
290 				if (p->uid == who)
291 					error = set_one_prio(p, niceval, error);
292 			while_each_thread(g, p);
293 			if (who != current->uid)
294 				free_uid(user);		/* For find_user() */
295 			break;
296 	}
297 out_unlock:
298 	read_unlock(&tasklist_lock);
299 out:
300 	return error;
301 }
302 
303 /*
304  * Ugh. To avoid negative return values, "getpriority()" will
305  * not return the normal nice-value, but a negated value that
306  * has been offset by 20 (ie it returns 40..1 instead of -20..19)
307  * to stay compatible.
308  */
309 asmlinkage long sys_getpriority(int which, int who)
310 {
311 	struct task_struct *g, *p;
312 	struct user_struct *user;
313 	long niceval, retval = -ESRCH;
314 
315 	if (which > 2 || which < 0)
316 		return -EINVAL;
317 
318 	read_lock(&tasklist_lock);
319 	switch (which) {
320 		case PRIO_PROCESS:
321 			if (!who)
322 				who = current->pid;
323 			p = find_task_by_pid(who);
324 			if (p) {
325 				niceval = 20 - task_nice(p);
326 				if (niceval > retval)
327 					retval = niceval;
328 			}
329 			break;
330 		case PRIO_PGRP:
331 			if (!who)
332 				who = process_group(current);
333 			do_each_task_pid(who, PIDTYPE_PGID, p) {
334 				niceval = 20 - task_nice(p);
335 				if (niceval > retval)
336 					retval = niceval;
337 			} while_each_task_pid(who, PIDTYPE_PGID, p);
338 			break;
339 		case PRIO_USER:
340 			user = current->user;
341 			if (!who)
342 				who = current->uid;
343 			else
344 				if ((who != current->uid) && !(user = find_user(who)))
345 					goto out_unlock;	/* No processes for this user */
346 
347 			do_each_thread(g, p)
348 				if (p->uid == who) {
349 					niceval = 20 - task_nice(p);
350 					if (niceval > retval)
351 						retval = niceval;
352 				}
353 			while_each_thread(g, p);
354 			if (who != current->uid)
355 				free_uid(user);		/* for find_user() */
356 			break;
357 	}
358 out_unlock:
359 	read_unlock(&tasklist_lock);
360 
361 	return retval;
362 }
363 
364 /**
365  *	emergency_restart - reboot the system
366  *
367  *	Without shutting down any hardware or taking any locks
368  *	reboot the system.  This is called when we know we are in
369  *	trouble so this is our best effort to reboot.  This is
370  *	safe to call in interrupt context.
371  */
372 void emergency_restart(void)
373 {
374 	machine_emergency_restart();
375 }
376 EXPORT_SYMBOL_GPL(emergency_restart);
377 
378 /**
379  *	kernel_restart - reboot the system
380  *
381  *	Shutdown everything and perform a clean reboot.
382  *	This is not safe to call in interrupt context.
383  */
384 void kernel_restart_prepare(char *cmd)
385 {
386 	notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd);
387 	system_state = SYSTEM_RESTART;
388 	device_shutdown();
389 }
390 void kernel_restart(char *cmd)
391 {
392 	kernel_restart_prepare(cmd);
393 	if (!cmd) {
394 		printk(KERN_EMERG "Restarting system.\n");
395 	} else {
396 		printk(KERN_EMERG "Restarting system with command '%s'.\n", cmd);
397 	}
398 	printk(".\n");
399 	machine_restart(cmd);
400 }
401 EXPORT_SYMBOL_GPL(kernel_restart);
402 
403 /**
404  *	kernel_kexec - reboot the system
405  *
406  *	Move into place and start executing a preloaded standalone
407  *	executable.  If nothing was preloaded return an error.
408  */
409 void kernel_kexec(void)
410 {
411 #ifdef CONFIG_KEXEC
412 	struct kimage *image;
413 	image = xchg(&kexec_image, 0);
414 	if (!image) {
415 		return;
416 	}
417 	kernel_restart_prepare(NULL);
418 	printk(KERN_EMERG "Starting new kernel\n");
419 	machine_shutdown();
420 	machine_kexec(image);
421 #endif
422 }
423 EXPORT_SYMBOL_GPL(kernel_kexec);
424 
425 /**
426  *	kernel_halt - halt the system
427  *
428  *	Shutdown everything and perform a clean system halt.
429  */
430 void kernel_halt_prepare(void)
431 {
432 	notifier_call_chain(&reboot_notifier_list, SYS_HALT, NULL);
433 	system_state = SYSTEM_HALT;
434 	device_shutdown();
435 }
436 void kernel_halt(void)
437 {
438 	kernel_halt_prepare();
439 	printk(KERN_EMERG "System halted.\n");
440 	machine_halt();
441 }
442 EXPORT_SYMBOL_GPL(kernel_halt);
443 
444 /**
445  *	kernel_power_off - power_off the system
446  *
447  *	Shutdown everything and perform a clean system power_off.
448  */
449 void kernel_power_off_prepare(void)
450 {
451 	notifier_call_chain(&reboot_notifier_list, SYS_POWER_OFF, NULL);
452 	system_state = SYSTEM_POWER_OFF;
453 	device_shutdown();
454 }
455 void kernel_power_off(void)
456 {
457 	kernel_power_off_prepare();
458 	printk(KERN_EMERG "Power down.\n");
459 	machine_power_off();
460 }
461 EXPORT_SYMBOL_GPL(kernel_power_off);
462 
463 /*
464  * Reboot system call: for obvious reasons only root may call it,
465  * and even root needs to set up some magic numbers in the registers
466  * so that some mistake won't make this reboot the whole machine.
467  * You can also set the meaning of the ctrl-alt-del-key here.
468  *
469  * reboot doesn't sync: do that yourself before calling this.
470  */
471 asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void __user * arg)
472 {
473 	char buffer[256];
474 
475 	/* We only trust the superuser with rebooting the system. */
476 	if (!capable(CAP_SYS_BOOT))
477 		return -EPERM;
478 
479 	/* For safety, we require "magic" arguments. */
480 	if (magic1 != LINUX_REBOOT_MAGIC1 ||
481 	    (magic2 != LINUX_REBOOT_MAGIC2 &&
482 	                magic2 != LINUX_REBOOT_MAGIC2A &&
483 			magic2 != LINUX_REBOOT_MAGIC2B &&
484 	                magic2 != LINUX_REBOOT_MAGIC2C))
485 		return -EINVAL;
486 
487 	lock_kernel();
488 	switch (cmd) {
489 	case LINUX_REBOOT_CMD_RESTART:
490 		kernel_restart(NULL);
491 		break;
492 
493 	case LINUX_REBOOT_CMD_CAD_ON:
494 		C_A_D = 1;
495 		break;
496 
497 	case LINUX_REBOOT_CMD_CAD_OFF:
498 		C_A_D = 0;
499 		break;
500 
501 	case LINUX_REBOOT_CMD_HALT:
502 		kernel_halt();
503 		unlock_kernel();
504 		do_exit(0);
505 		break;
506 
507 	case LINUX_REBOOT_CMD_POWER_OFF:
508 		kernel_power_off();
509 		unlock_kernel();
510 		do_exit(0);
511 		break;
512 
513 	case LINUX_REBOOT_CMD_RESTART2:
514 		if (strncpy_from_user(&buffer[0], arg, sizeof(buffer) - 1) < 0) {
515 			unlock_kernel();
516 			return -EFAULT;
517 		}
518 		buffer[sizeof(buffer) - 1] = '\0';
519 
520 		kernel_restart(buffer);
521 		break;
522 
523 	case LINUX_REBOOT_CMD_KEXEC:
524 		kernel_kexec();
525 		unlock_kernel();
526 		return -EINVAL;
527 
528 #ifdef CONFIG_SOFTWARE_SUSPEND
529 	case LINUX_REBOOT_CMD_SW_SUSPEND:
530 		{
531 			int ret = software_suspend();
532 			unlock_kernel();
533 			return ret;
534 		}
535 #endif
536 
537 	default:
538 		unlock_kernel();
539 		return -EINVAL;
540 	}
541 	unlock_kernel();
542 	return 0;
543 }
544 
545 static void deferred_cad(void *dummy)
546 {
547 	kernel_restart(NULL);
548 }
549 
550 /*
551  * This function gets called by ctrl-alt-del - ie the keyboard interrupt.
552  * As it's called within an interrupt, it may NOT sync: the only choice
553  * is whether to reboot at once, or just ignore the ctrl-alt-del.
554  */
555 void ctrl_alt_del(void)
556 {
557 	static DECLARE_WORK(cad_work, deferred_cad, NULL);
558 
559 	if (C_A_D)
560 		schedule_work(&cad_work);
561 	else
562 		kill_proc(cad_pid, SIGINT, 1);
563 }
564 
565 
566 /*
567  * Unprivileged users may change the real gid to the effective gid
568  * or vice versa.  (BSD-style)
569  *
570  * If you set the real gid at all, or set the effective gid to a value not
571  * equal to the real gid, then the saved gid is set to the new effective gid.
572  *
573  * This makes it possible for a setgid program to completely drop its
574  * privileges, which is often a useful assertion to make when you are doing
575  * a security audit over a program.
576  *
577  * The general idea is that a program which uses just setregid() will be
578  * 100% compatible with BSD.  A program which uses just setgid() will be
579  * 100% compatible with POSIX with saved IDs.
580  *
581  * SMP: There are not races, the GIDs are checked only by filesystem
582  *      operations (as far as semantic preservation is concerned).
583  */
584 asmlinkage long sys_setregid(gid_t rgid, gid_t egid)
585 {
586 	int old_rgid = current->gid;
587 	int old_egid = current->egid;
588 	int new_rgid = old_rgid;
589 	int new_egid = old_egid;
590 	int retval;
591 
592 	retval = security_task_setgid(rgid, egid, (gid_t)-1, LSM_SETID_RE);
593 	if (retval)
594 		return retval;
595 
596 	if (rgid != (gid_t) -1) {
597 		if ((old_rgid == rgid) ||
598 		    (current->egid==rgid) ||
599 		    capable(CAP_SETGID))
600 			new_rgid = rgid;
601 		else
602 			return -EPERM;
603 	}
604 	if (egid != (gid_t) -1) {
605 		if ((old_rgid == egid) ||
606 		    (current->egid == egid) ||
607 		    (current->sgid == egid) ||
608 		    capable(CAP_SETGID))
609 			new_egid = egid;
610 		else {
611 			return -EPERM;
612 		}
613 	}
614 	if (new_egid != old_egid)
615 	{
616 		current->mm->dumpable = suid_dumpable;
617 		smp_wmb();
618 	}
619 	if (rgid != (gid_t) -1 ||
620 	    (egid != (gid_t) -1 && egid != old_rgid))
621 		current->sgid = new_egid;
622 	current->fsgid = new_egid;
623 	current->egid = new_egid;
624 	current->gid = new_rgid;
625 	key_fsgid_changed(current);
626 	return 0;
627 }
628 
629 /*
630  * setgid() is implemented like SysV w/ SAVED_IDS
631  *
632  * SMP: Same implicit races as above.
633  */
634 asmlinkage long sys_setgid(gid_t gid)
635 {
636 	int old_egid = current->egid;
637 	int retval;
638 
639 	retval = security_task_setgid(gid, (gid_t)-1, (gid_t)-1, LSM_SETID_ID);
640 	if (retval)
641 		return retval;
642 
643 	if (capable(CAP_SETGID))
644 	{
645 		if(old_egid != gid)
646 		{
647 			current->mm->dumpable = suid_dumpable;
648 			smp_wmb();
649 		}
650 		current->gid = current->egid = current->sgid = current->fsgid = gid;
651 	}
652 	else if ((gid == current->gid) || (gid == current->sgid))
653 	{
654 		if(old_egid != gid)
655 		{
656 			current->mm->dumpable = suid_dumpable;
657 			smp_wmb();
658 		}
659 		current->egid = current->fsgid = gid;
660 	}
661 	else
662 		return -EPERM;
663 
664 	key_fsgid_changed(current);
665 	return 0;
666 }
667 
668 static int set_user(uid_t new_ruid, int dumpclear)
669 {
670 	struct user_struct *new_user;
671 
672 	new_user = alloc_uid(new_ruid);
673 	if (!new_user)
674 		return -EAGAIN;
675 
676 	if (atomic_read(&new_user->processes) >=
677 				current->signal->rlim[RLIMIT_NPROC].rlim_cur &&
678 			new_user != &root_user) {
679 		free_uid(new_user);
680 		return -EAGAIN;
681 	}
682 
683 	switch_uid(new_user);
684 
685 	if(dumpclear)
686 	{
687 		current->mm->dumpable = suid_dumpable;
688 		smp_wmb();
689 	}
690 	current->uid = new_ruid;
691 	return 0;
692 }
693 
694 /*
695  * Unprivileged users may change the real uid to the effective uid
696  * or vice versa.  (BSD-style)
697  *
698  * If you set the real uid at all, or set the effective uid to a value not
699  * equal to the real uid, then the saved uid is set to the new effective uid.
700  *
701  * This makes it possible for a setuid program to completely drop its
702  * privileges, which is often a useful assertion to make when you are doing
703  * a security audit over a program.
704  *
705  * The general idea is that a program which uses just setreuid() will be
706  * 100% compatible with BSD.  A program which uses just setuid() will be
707  * 100% compatible with POSIX with saved IDs.
708  */
709 asmlinkage long sys_setreuid(uid_t ruid, uid_t euid)
710 {
711 	int old_ruid, old_euid, old_suid, new_ruid, new_euid;
712 	int retval;
713 
714 	retval = security_task_setuid(ruid, euid, (uid_t)-1, LSM_SETID_RE);
715 	if (retval)
716 		return retval;
717 
718 	new_ruid = old_ruid = current->uid;
719 	new_euid = old_euid = current->euid;
720 	old_suid = current->suid;
721 
722 	if (ruid != (uid_t) -1) {
723 		new_ruid = ruid;
724 		if ((old_ruid != ruid) &&
725 		    (current->euid != ruid) &&
726 		    !capable(CAP_SETUID))
727 			return -EPERM;
728 	}
729 
730 	if (euid != (uid_t) -1) {
731 		new_euid = euid;
732 		if ((old_ruid != euid) &&
733 		    (current->euid != euid) &&
734 		    (current->suid != euid) &&
735 		    !capable(CAP_SETUID))
736 			return -EPERM;
737 	}
738 
739 	if (new_ruid != old_ruid && set_user(new_ruid, new_euid != old_euid) < 0)
740 		return -EAGAIN;
741 
742 	if (new_euid != old_euid)
743 	{
744 		current->mm->dumpable = suid_dumpable;
745 		smp_wmb();
746 	}
747 	current->fsuid = current->euid = new_euid;
748 	if (ruid != (uid_t) -1 ||
749 	    (euid != (uid_t) -1 && euid != old_ruid))
750 		current->suid = current->euid;
751 	current->fsuid = current->euid;
752 
753 	key_fsuid_changed(current);
754 
755 	return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_RE);
756 }
757 
758 
759 
760 /*
761  * setuid() is implemented like SysV with SAVED_IDS
762  *
763  * Note that SAVED_ID's is deficient in that a setuid root program
764  * like sendmail, for example, cannot set its uid to be a normal
765  * user and then switch back, because if you're root, setuid() sets
766  * the saved uid too.  If you don't like this, blame the bright people
767  * in the POSIX committee and/or USG.  Note that the BSD-style setreuid()
768  * will allow a root program to temporarily drop privileges and be able to
769  * regain them by swapping the real and effective uid.
770  */
771 asmlinkage long sys_setuid(uid_t uid)
772 {
773 	int old_euid = current->euid;
774 	int old_ruid, old_suid, new_ruid, new_suid;
775 	int retval;
776 
777 	retval = security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_ID);
778 	if (retval)
779 		return retval;
780 
781 	old_ruid = new_ruid = current->uid;
782 	old_suid = current->suid;
783 	new_suid = old_suid;
784 
785 	if (capable(CAP_SETUID)) {
786 		if (uid != old_ruid && set_user(uid, old_euid != uid) < 0)
787 			return -EAGAIN;
788 		new_suid = uid;
789 	} else if ((uid != current->uid) && (uid != new_suid))
790 		return -EPERM;
791 
792 	if (old_euid != uid)
793 	{
794 		current->mm->dumpable = suid_dumpable;
795 		smp_wmb();
796 	}
797 	current->fsuid = current->euid = uid;
798 	current->suid = new_suid;
799 
800 	key_fsuid_changed(current);
801 
802 	return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_ID);
803 }
804 
805 
806 /*
807  * This function implements a generic ability to update ruid, euid,
808  * and suid.  This allows you to implement the 4.4 compatible seteuid().
809  */
810 asmlinkage long sys_setresuid(uid_t ruid, uid_t euid, uid_t suid)
811 {
812 	int old_ruid = current->uid;
813 	int old_euid = current->euid;
814 	int old_suid = current->suid;
815 	int retval;
816 
817 	retval = security_task_setuid(ruid, euid, suid, LSM_SETID_RES);
818 	if (retval)
819 		return retval;
820 
821 	if (!capable(CAP_SETUID)) {
822 		if ((ruid != (uid_t) -1) && (ruid != current->uid) &&
823 		    (ruid != current->euid) && (ruid != current->suid))
824 			return -EPERM;
825 		if ((euid != (uid_t) -1) && (euid != current->uid) &&
826 		    (euid != current->euid) && (euid != current->suid))
827 			return -EPERM;
828 		if ((suid != (uid_t) -1) && (suid != current->uid) &&
829 		    (suid != current->euid) && (suid != current->suid))
830 			return -EPERM;
831 	}
832 	if (ruid != (uid_t) -1) {
833 		if (ruid != current->uid && set_user(ruid, euid != current->euid) < 0)
834 			return -EAGAIN;
835 	}
836 	if (euid != (uid_t) -1) {
837 		if (euid != current->euid)
838 		{
839 			current->mm->dumpable = suid_dumpable;
840 			smp_wmb();
841 		}
842 		current->euid = euid;
843 	}
844 	current->fsuid = current->euid;
845 	if (suid != (uid_t) -1)
846 		current->suid = suid;
847 
848 	key_fsuid_changed(current);
849 
850 	return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_RES);
851 }
852 
853 asmlinkage long sys_getresuid(uid_t __user *ruid, uid_t __user *euid, uid_t __user *suid)
854 {
855 	int retval;
856 
857 	if (!(retval = put_user(current->uid, ruid)) &&
858 	    !(retval = put_user(current->euid, euid)))
859 		retval = put_user(current->suid, suid);
860 
861 	return retval;
862 }
863 
864 /*
865  * Same as above, but for rgid, egid, sgid.
866  */
867 asmlinkage long sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid)
868 {
869 	int retval;
870 
871 	retval = security_task_setgid(rgid, egid, sgid, LSM_SETID_RES);
872 	if (retval)
873 		return retval;
874 
875 	if (!capable(CAP_SETGID)) {
876 		if ((rgid != (gid_t) -1) && (rgid != current->gid) &&
877 		    (rgid != current->egid) && (rgid != current->sgid))
878 			return -EPERM;
879 		if ((egid != (gid_t) -1) && (egid != current->gid) &&
880 		    (egid != current->egid) && (egid != current->sgid))
881 			return -EPERM;
882 		if ((sgid != (gid_t) -1) && (sgid != current->gid) &&
883 		    (sgid != current->egid) && (sgid != current->sgid))
884 			return -EPERM;
885 	}
886 	if (egid != (gid_t) -1) {
887 		if (egid != current->egid)
888 		{
889 			current->mm->dumpable = suid_dumpable;
890 			smp_wmb();
891 		}
892 		current->egid = egid;
893 	}
894 	current->fsgid = current->egid;
895 	if (rgid != (gid_t) -1)
896 		current->gid = rgid;
897 	if (sgid != (gid_t) -1)
898 		current->sgid = sgid;
899 
900 	key_fsgid_changed(current);
901 	return 0;
902 }
903 
904 asmlinkage long sys_getresgid(gid_t __user *rgid, gid_t __user *egid, gid_t __user *sgid)
905 {
906 	int retval;
907 
908 	if (!(retval = put_user(current->gid, rgid)) &&
909 	    !(retval = put_user(current->egid, egid)))
910 		retval = put_user(current->sgid, sgid);
911 
912 	return retval;
913 }
914 
915 
916 /*
917  * "setfsuid()" sets the fsuid - the uid used for filesystem checks. This
918  * is used for "access()" and for the NFS daemon (letting nfsd stay at
919  * whatever uid it wants to). It normally shadows "euid", except when
920  * explicitly set by setfsuid() or for access..
921  */
922 asmlinkage long sys_setfsuid(uid_t uid)
923 {
924 	int old_fsuid;
925 
926 	old_fsuid = current->fsuid;
927 	if (security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS))
928 		return old_fsuid;
929 
930 	if (uid == current->uid || uid == current->euid ||
931 	    uid == current->suid || uid == current->fsuid ||
932 	    capable(CAP_SETUID))
933 	{
934 		if (uid != old_fsuid)
935 		{
936 			current->mm->dumpable = suid_dumpable;
937 			smp_wmb();
938 		}
939 		current->fsuid = uid;
940 	}
941 
942 	key_fsuid_changed(current);
943 
944 	security_task_post_setuid(old_fsuid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS);
945 
946 	return old_fsuid;
947 }
948 
949 /*
950  * Samma p� svenska..
951  */
952 asmlinkage long sys_setfsgid(gid_t gid)
953 {
954 	int old_fsgid;
955 
956 	old_fsgid = current->fsgid;
957 	if (security_task_setgid(gid, (gid_t)-1, (gid_t)-1, LSM_SETID_FS))
958 		return old_fsgid;
959 
960 	if (gid == current->gid || gid == current->egid ||
961 	    gid == current->sgid || gid == current->fsgid ||
962 	    capable(CAP_SETGID))
963 	{
964 		if (gid != old_fsgid)
965 		{
966 			current->mm->dumpable = suid_dumpable;
967 			smp_wmb();
968 		}
969 		current->fsgid = gid;
970 		key_fsgid_changed(current);
971 	}
972 	return old_fsgid;
973 }
974 
975 asmlinkage long sys_times(struct tms __user * tbuf)
976 {
977 	/*
978 	 *	In the SMP world we might just be unlucky and have one of
979 	 *	the times increment as we use it. Since the value is an
980 	 *	atomically safe type this is just fine. Conceptually its
981 	 *	as if the syscall took an instant longer to occur.
982 	 */
983 	if (tbuf) {
984 		struct tms tmp;
985 		cputime_t utime, stime, cutime, cstime;
986 
987 #ifdef CONFIG_SMP
988 		if (thread_group_empty(current)) {
989 			/*
990 			 * Single thread case without the use of any locks.
991 			 *
992 			 * We may race with release_task if two threads are
993 			 * executing. However, release task first adds up the
994 			 * counters (__exit_signal) before  removing the task
995 			 * from the process tasklist (__unhash_process).
996 			 * __exit_signal also acquires and releases the
997 			 * siglock which results in the proper memory ordering
998 			 * so that the list modifications are always visible
999 			 * after the counters have been updated.
1000 			 *
1001 			 * If the counters have been updated by the second thread
1002 			 * but the thread has not yet been removed from the list
1003 			 * then the other branch will be executing which will
1004 			 * block on tasklist_lock until the exit handling of the
1005 			 * other task is finished.
1006 			 *
1007 			 * This also implies that the sighand->siglock cannot
1008 			 * be held by another processor. So we can also
1009 			 * skip acquiring that lock.
1010 			 */
1011 			utime = cputime_add(current->signal->utime, current->utime);
1012 			stime = cputime_add(current->signal->utime, current->stime);
1013 			cutime = current->signal->cutime;
1014 			cstime = current->signal->cstime;
1015 		} else
1016 #endif
1017 		{
1018 
1019 			/* Process with multiple threads */
1020 			struct task_struct *tsk = current;
1021 			struct task_struct *t;
1022 
1023 			read_lock(&tasklist_lock);
1024 			utime = tsk->signal->utime;
1025 			stime = tsk->signal->stime;
1026 			t = tsk;
1027 			do {
1028 				utime = cputime_add(utime, t->utime);
1029 				stime = cputime_add(stime, t->stime);
1030 				t = next_thread(t);
1031 			} while (t != tsk);
1032 
1033 			/*
1034 			 * While we have tasklist_lock read-locked, no dying thread
1035 			 * can be updating current->signal->[us]time.  Instead,
1036 			 * we got their counts included in the live thread loop.
1037 			 * However, another thread can come in right now and
1038 			 * do a wait call that updates current->signal->c[us]time.
1039 			 * To make sure we always see that pair updated atomically,
1040 			 * we take the siglock around fetching them.
1041 			 */
1042 			spin_lock_irq(&tsk->sighand->siglock);
1043 			cutime = tsk->signal->cutime;
1044 			cstime = tsk->signal->cstime;
1045 			spin_unlock_irq(&tsk->sighand->siglock);
1046 			read_unlock(&tasklist_lock);
1047 		}
1048 		tmp.tms_utime = cputime_to_clock_t(utime);
1049 		tmp.tms_stime = cputime_to_clock_t(stime);
1050 		tmp.tms_cutime = cputime_to_clock_t(cutime);
1051 		tmp.tms_cstime = cputime_to_clock_t(cstime);
1052 		if (copy_to_user(tbuf, &tmp, sizeof(struct tms)))
1053 			return -EFAULT;
1054 	}
1055 	return (long) jiffies_64_to_clock_t(get_jiffies_64());
1056 }
1057 
1058 /*
1059  * This needs some heavy checking ...
1060  * I just haven't the stomach for it. I also don't fully
1061  * understand sessions/pgrp etc. Let somebody who does explain it.
1062  *
1063  * OK, I think I have the protection semantics right.... this is really
1064  * only important on a multi-user system anyway, to make sure one user
1065  * can't send a signal to a process owned by another.  -TYT, 12/12/91
1066  *
1067  * Auch. Had to add the 'did_exec' flag to conform completely to POSIX.
1068  * LBT 04.03.94
1069  */
1070 
1071 asmlinkage long sys_setpgid(pid_t pid, pid_t pgid)
1072 {
1073 	struct task_struct *p;
1074 	int err = -EINVAL;
1075 
1076 	if (!pid)
1077 		pid = current->pid;
1078 	if (!pgid)
1079 		pgid = pid;
1080 	if (pgid < 0)
1081 		return -EINVAL;
1082 
1083 	/* From this point forward we keep holding onto the tasklist lock
1084 	 * so that our parent does not change from under us. -DaveM
1085 	 */
1086 	write_lock_irq(&tasklist_lock);
1087 
1088 	err = -ESRCH;
1089 	p = find_task_by_pid(pid);
1090 	if (!p)
1091 		goto out;
1092 
1093 	err = -EINVAL;
1094 	if (!thread_group_leader(p))
1095 		goto out;
1096 
1097 	if (p->parent == current || p->real_parent == current) {
1098 		err = -EPERM;
1099 		if (p->signal->session != current->signal->session)
1100 			goto out;
1101 		err = -EACCES;
1102 		if (p->did_exec)
1103 			goto out;
1104 	} else {
1105 		err = -ESRCH;
1106 		if (p != current)
1107 			goto out;
1108 	}
1109 
1110 	err = -EPERM;
1111 	if (p->signal->leader)
1112 		goto out;
1113 
1114 	if (pgid != pid) {
1115 		struct task_struct *p;
1116 
1117 		do_each_task_pid(pgid, PIDTYPE_PGID, p) {
1118 			if (p->signal->session == current->signal->session)
1119 				goto ok_pgid;
1120 		} while_each_task_pid(pgid, PIDTYPE_PGID, p);
1121 		goto out;
1122 	}
1123 
1124 ok_pgid:
1125 	err = security_task_setpgid(p, pgid);
1126 	if (err)
1127 		goto out;
1128 
1129 	if (process_group(p) != pgid) {
1130 		detach_pid(p, PIDTYPE_PGID);
1131 		p->signal->pgrp = pgid;
1132 		attach_pid(p, PIDTYPE_PGID, pgid);
1133 	}
1134 
1135 	err = 0;
1136 out:
1137 	/* All paths lead to here, thus we are safe. -DaveM */
1138 	write_unlock_irq(&tasklist_lock);
1139 	return err;
1140 }
1141 
1142 asmlinkage long sys_getpgid(pid_t pid)
1143 {
1144 	if (!pid) {
1145 		return process_group(current);
1146 	} else {
1147 		int retval;
1148 		struct task_struct *p;
1149 
1150 		read_lock(&tasklist_lock);
1151 		p = find_task_by_pid(pid);
1152 
1153 		retval = -ESRCH;
1154 		if (p) {
1155 			retval = security_task_getpgid(p);
1156 			if (!retval)
1157 				retval = process_group(p);
1158 		}
1159 		read_unlock(&tasklist_lock);
1160 		return retval;
1161 	}
1162 }
1163 
1164 #ifdef __ARCH_WANT_SYS_GETPGRP
1165 
1166 asmlinkage long sys_getpgrp(void)
1167 {
1168 	/* SMP - assuming writes are word atomic this is fine */
1169 	return process_group(current);
1170 }
1171 
1172 #endif
1173 
1174 asmlinkage long sys_getsid(pid_t pid)
1175 {
1176 	if (!pid) {
1177 		return current->signal->session;
1178 	} else {
1179 		int retval;
1180 		struct task_struct *p;
1181 
1182 		read_lock(&tasklist_lock);
1183 		p = find_task_by_pid(pid);
1184 
1185 		retval = -ESRCH;
1186 		if(p) {
1187 			retval = security_task_getsid(p);
1188 			if (!retval)
1189 				retval = p->signal->session;
1190 		}
1191 		read_unlock(&tasklist_lock);
1192 		return retval;
1193 	}
1194 }
1195 
1196 asmlinkage long sys_setsid(void)
1197 {
1198 	struct pid *pid;
1199 	int err = -EPERM;
1200 
1201 	if (!thread_group_leader(current))
1202 		return -EINVAL;
1203 
1204 	down(&tty_sem);
1205 	write_lock_irq(&tasklist_lock);
1206 
1207 	pid = find_pid(PIDTYPE_PGID, current->pid);
1208 	if (pid)
1209 		goto out;
1210 
1211 	current->signal->leader = 1;
1212 	__set_special_pids(current->pid, current->pid);
1213 	current->signal->tty = NULL;
1214 	current->signal->tty_old_pgrp = 0;
1215 	err = process_group(current);
1216 out:
1217 	write_unlock_irq(&tasklist_lock);
1218 	up(&tty_sem);
1219 	return err;
1220 }
1221 
1222 /*
1223  * Supplementary group IDs
1224  */
1225 
1226 /* init to 2 - one for init_task, one to ensure it is never freed */
1227 struct group_info init_groups = { .usage = ATOMIC_INIT(2) };
1228 
1229 struct group_info *groups_alloc(int gidsetsize)
1230 {
1231 	struct group_info *group_info;
1232 	int nblocks;
1233 	int i;
1234 
1235 	nblocks = (gidsetsize + NGROUPS_PER_BLOCK - 1) / NGROUPS_PER_BLOCK;
1236 	/* Make sure we always allocate at least one indirect block pointer */
1237 	nblocks = nblocks ? : 1;
1238 	group_info = kmalloc(sizeof(*group_info) + nblocks*sizeof(gid_t *), GFP_USER);
1239 	if (!group_info)
1240 		return NULL;
1241 	group_info->ngroups = gidsetsize;
1242 	group_info->nblocks = nblocks;
1243 	atomic_set(&group_info->usage, 1);
1244 
1245 	if (gidsetsize <= NGROUPS_SMALL) {
1246 		group_info->blocks[0] = group_info->small_block;
1247 	} else {
1248 		for (i = 0; i < nblocks; i++) {
1249 			gid_t *b;
1250 			b = (void *)__get_free_page(GFP_USER);
1251 			if (!b)
1252 				goto out_undo_partial_alloc;
1253 			group_info->blocks[i] = b;
1254 		}
1255 	}
1256 	return group_info;
1257 
1258 out_undo_partial_alloc:
1259 	while (--i >= 0) {
1260 		free_page((unsigned long)group_info->blocks[i]);
1261 	}
1262 	kfree(group_info);
1263 	return NULL;
1264 }
1265 
1266 EXPORT_SYMBOL(groups_alloc);
1267 
1268 void groups_free(struct group_info *group_info)
1269 {
1270 	if (group_info->blocks[0] != group_info->small_block) {
1271 		int i;
1272 		for (i = 0; i < group_info->nblocks; i++)
1273 			free_page((unsigned long)group_info->blocks[i]);
1274 	}
1275 	kfree(group_info);
1276 }
1277 
1278 EXPORT_SYMBOL(groups_free);
1279 
1280 /* export the group_info to a user-space array */
1281 static int groups_to_user(gid_t __user *grouplist,
1282     struct group_info *group_info)
1283 {
1284 	int i;
1285 	int count = group_info->ngroups;
1286 
1287 	for (i = 0; i < group_info->nblocks; i++) {
1288 		int cp_count = min(NGROUPS_PER_BLOCK, count);
1289 		int off = i * NGROUPS_PER_BLOCK;
1290 		int len = cp_count * sizeof(*grouplist);
1291 
1292 		if (copy_to_user(grouplist+off, group_info->blocks[i], len))
1293 			return -EFAULT;
1294 
1295 		count -= cp_count;
1296 	}
1297 	return 0;
1298 }
1299 
1300 /* fill a group_info from a user-space array - it must be allocated already */
1301 static int groups_from_user(struct group_info *group_info,
1302     gid_t __user *grouplist)
1303  {
1304 	int i;
1305 	int count = group_info->ngroups;
1306 
1307 	for (i = 0; i < group_info->nblocks; i++) {
1308 		int cp_count = min(NGROUPS_PER_BLOCK, count);
1309 		int off = i * NGROUPS_PER_BLOCK;
1310 		int len = cp_count * sizeof(*grouplist);
1311 
1312 		if (copy_from_user(group_info->blocks[i], grouplist+off, len))
1313 			return -EFAULT;
1314 
1315 		count -= cp_count;
1316 	}
1317 	return 0;
1318 }
1319 
1320 /* a simple Shell sort */
1321 static void groups_sort(struct group_info *group_info)
1322 {
1323 	int base, max, stride;
1324 	int gidsetsize = group_info->ngroups;
1325 
1326 	for (stride = 1; stride < gidsetsize; stride = 3 * stride + 1)
1327 		; /* nothing */
1328 	stride /= 3;
1329 
1330 	while (stride) {
1331 		max = gidsetsize - stride;
1332 		for (base = 0; base < max; base++) {
1333 			int left = base;
1334 			int right = left + stride;
1335 			gid_t tmp = GROUP_AT(group_info, right);
1336 
1337 			while (left >= 0 && GROUP_AT(group_info, left) > tmp) {
1338 				GROUP_AT(group_info, right) =
1339 				    GROUP_AT(group_info, left);
1340 				right = left;
1341 				left -= stride;
1342 			}
1343 			GROUP_AT(group_info, right) = tmp;
1344 		}
1345 		stride /= 3;
1346 	}
1347 }
1348 
1349 /* a simple bsearch */
1350 int groups_search(struct group_info *group_info, gid_t grp)
1351 {
1352 	int left, right;
1353 
1354 	if (!group_info)
1355 		return 0;
1356 
1357 	left = 0;
1358 	right = group_info->ngroups;
1359 	while (left < right) {
1360 		int mid = (left+right)/2;
1361 		int cmp = grp - GROUP_AT(group_info, mid);
1362 		if (cmp > 0)
1363 			left = mid + 1;
1364 		else if (cmp < 0)
1365 			right = mid;
1366 		else
1367 			return 1;
1368 	}
1369 	return 0;
1370 }
1371 
1372 /* validate and set current->group_info */
1373 int set_current_groups(struct group_info *group_info)
1374 {
1375 	int retval;
1376 	struct group_info *old_info;
1377 
1378 	retval = security_task_setgroups(group_info);
1379 	if (retval)
1380 		return retval;
1381 
1382 	groups_sort(group_info);
1383 	get_group_info(group_info);
1384 
1385 	task_lock(current);
1386 	old_info = current->group_info;
1387 	current->group_info = group_info;
1388 	task_unlock(current);
1389 
1390 	put_group_info(old_info);
1391 
1392 	return 0;
1393 }
1394 
1395 EXPORT_SYMBOL(set_current_groups);
1396 
1397 asmlinkage long sys_getgroups(int gidsetsize, gid_t __user *grouplist)
1398 {
1399 	int i = 0;
1400 
1401 	/*
1402 	 *	SMP: Nobody else can change our grouplist. Thus we are
1403 	 *	safe.
1404 	 */
1405 
1406 	if (gidsetsize < 0)
1407 		return -EINVAL;
1408 
1409 	/* no need to grab task_lock here; it cannot change */
1410 	get_group_info(current->group_info);
1411 	i = current->group_info->ngroups;
1412 	if (gidsetsize) {
1413 		if (i > gidsetsize) {
1414 			i = -EINVAL;
1415 			goto out;
1416 		}
1417 		if (groups_to_user(grouplist, current->group_info)) {
1418 			i = -EFAULT;
1419 			goto out;
1420 		}
1421 	}
1422 out:
1423 	put_group_info(current->group_info);
1424 	return i;
1425 }
1426 
1427 /*
1428  *	SMP: Our groups are copy-on-write. We can set them safely
1429  *	without another task interfering.
1430  */
1431 
1432 asmlinkage long sys_setgroups(int gidsetsize, gid_t __user *grouplist)
1433 {
1434 	struct group_info *group_info;
1435 	int retval;
1436 
1437 	if (!capable(CAP_SETGID))
1438 		return -EPERM;
1439 	if ((unsigned)gidsetsize > NGROUPS_MAX)
1440 		return -EINVAL;
1441 
1442 	group_info = groups_alloc(gidsetsize);
1443 	if (!group_info)
1444 		return -ENOMEM;
1445 	retval = groups_from_user(group_info, grouplist);
1446 	if (retval) {
1447 		put_group_info(group_info);
1448 		return retval;
1449 	}
1450 
1451 	retval = set_current_groups(group_info);
1452 	put_group_info(group_info);
1453 
1454 	return retval;
1455 }
1456 
1457 /*
1458  * Check whether we're fsgid/egid or in the supplemental group..
1459  */
1460 int in_group_p(gid_t grp)
1461 {
1462 	int retval = 1;
1463 	if (grp != current->fsgid) {
1464 		get_group_info(current->group_info);
1465 		retval = groups_search(current->group_info, grp);
1466 		put_group_info(current->group_info);
1467 	}
1468 	return retval;
1469 }
1470 
1471 EXPORT_SYMBOL(in_group_p);
1472 
1473 int in_egroup_p(gid_t grp)
1474 {
1475 	int retval = 1;
1476 	if (grp != current->egid) {
1477 		get_group_info(current->group_info);
1478 		retval = groups_search(current->group_info, grp);
1479 		put_group_info(current->group_info);
1480 	}
1481 	return retval;
1482 }
1483 
1484 EXPORT_SYMBOL(in_egroup_p);
1485 
1486 DECLARE_RWSEM(uts_sem);
1487 
1488 EXPORT_SYMBOL(uts_sem);
1489 
1490 asmlinkage long sys_newuname(struct new_utsname __user * name)
1491 {
1492 	int errno = 0;
1493 
1494 	down_read(&uts_sem);
1495 	if (copy_to_user(name,&system_utsname,sizeof *name))
1496 		errno = -EFAULT;
1497 	up_read(&uts_sem);
1498 	return errno;
1499 }
1500 
1501 asmlinkage long sys_sethostname(char __user *name, int len)
1502 {
1503 	int errno;
1504 	char tmp[__NEW_UTS_LEN];
1505 
1506 	if (!capable(CAP_SYS_ADMIN))
1507 		return -EPERM;
1508 	if (len < 0 || len > __NEW_UTS_LEN)
1509 		return -EINVAL;
1510 	down_write(&uts_sem);
1511 	errno = -EFAULT;
1512 	if (!copy_from_user(tmp, name, len)) {
1513 		memcpy(system_utsname.nodename, tmp, len);
1514 		system_utsname.nodename[len] = 0;
1515 		errno = 0;
1516 	}
1517 	up_write(&uts_sem);
1518 	return errno;
1519 }
1520 
1521 #ifdef __ARCH_WANT_SYS_GETHOSTNAME
1522 
1523 asmlinkage long sys_gethostname(char __user *name, int len)
1524 {
1525 	int i, errno;
1526 
1527 	if (len < 0)
1528 		return -EINVAL;
1529 	down_read(&uts_sem);
1530 	i = 1 + strlen(system_utsname.nodename);
1531 	if (i > len)
1532 		i = len;
1533 	errno = 0;
1534 	if (copy_to_user(name, system_utsname.nodename, i))
1535 		errno = -EFAULT;
1536 	up_read(&uts_sem);
1537 	return errno;
1538 }
1539 
1540 #endif
1541 
1542 /*
1543  * Only setdomainname; getdomainname can be implemented by calling
1544  * uname()
1545  */
1546 asmlinkage long sys_setdomainname(char __user *name, int len)
1547 {
1548 	int errno;
1549 	char tmp[__NEW_UTS_LEN];
1550 
1551 	if (!capable(CAP_SYS_ADMIN))
1552 		return -EPERM;
1553 	if (len < 0 || len > __NEW_UTS_LEN)
1554 		return -EINVAL;
1555 
1556 	down_write(&uts_sem);
1557 	errno = -EFAULT;
1558 	if (!copy_from_user(tmp, name, len)) {
1559 		memcpy(system_utsname.domainname, tmp, len);
1560 		system_utsname.domainname[len] = 0;
1561 		errno = 0;
1562 	}
1563 	up_write(&uts_sem);
1564 	return errno;
1565 }
1566 
1567 asmlinkage long sys_getrlimit(unsigned int resource, struct rlimit __user *rlim)
1568 {
1569 	if (resource >= RLIM_NLIMITS)
1570 		return -EINVAL;
1571 	else {
1572 		struct rlimit value;
1573 		task_lock(current->group_leader);
1574 		value = current->signal->rlim[resource];
1575 		task_unlock(current->group_leader);
1576 		return copy_to_user(rlim, &value, sizeof(*rlim)) ? -EFAULT : 0;
1577 	}
1578 }
1579 
1580 #ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT
1581 
1582 /*
1583  *	Back compatibility for getrlimit. Needed for some apps.
1584  */
1585 
1586 asmlinkage long sys_old_getrlimit(unsigned int resource, struct rlimit __user *rlim)
1587 {
1588 	struct rlimit x;
1589 	if (resource >= RLIM_NLIMITS)
1590 		return -EINVAL;
1591 
1592 	task_lock(current->group_leader);
1593 	x = current->signal->rlim[resource];
1594 	task_unlock(current->group_leader);
1595 	if(x.rlim_cur > 0x7FFFFFFF)
1596 		x.rlim_cur = 0x7FFFFFFF;
1597 	if(x.rlim_max > 0x7FFFFFFF)
1598 		x.rlim_max = 0x7FFFFFFF;
1599 	return copy_to_user(rlim, &x, sizeof(x))?-EFAULT:0;
1600 }
1601 
1602 #endif
1603 
1604 asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim)
1605 {
1606 	struct rlimit new_rlim, *old_rlim;
1607 	int retval;
1608 
1609 	if (resource >= RLIM_NLIMITS)
1610 		return -EINVAL;
1611 	if(copy_from_user(&new_rlim, rlim, sizeof(*rlim)))
1612 		return -EFAULT;
1613        if (new_rlim.rlim_cur > new_rlim.rlim_max)
1614                return -EINVAL;
1615 	old_rlim = current->signal->rlim + resource;
1616 	if ((new_rlim.rlim_max > old_rlim->rlim_max) &&
1617 	    !capable(CAP_SYS_RESOURCE))
1618 		return -EPERM;
1619 	if (resource == RLIMIT_NOFILE && new_rlim.rlim_max > NR_OPEN)
1620 			return -EPERM;
1621 
1622 	retval = security_task_setrlimit(resource, &new_rlim);
1623 	if (retval)
1624 		return retval;
1625 
1626 	task_lock(current->group_leader);
1627 	*old_rlim = new_rlim;
1628 	task_unlock(current->group_leader);
1629 
1630 	if (resource == RLIMIT_CPU && new_rlim.rlim_cur != RLIM_INFINITY &&
1631 	    (cputime_eq(current->signal->it_prof_expires, cputime_zero) ||
1632 	     new_rlim.rlim_cur <= cputime_to_secs(
1633 		     current->signal->it_prof_expires))) {
1634 		cputime_t cputime = secs_to_cputime(new_rlim.rlim_cur);
1635 		read_lock(&tasklist_lock);
1636 		spin_lock_irq(&current->sighand->siglock);
1637 		set_process_cpu_timer(current, CPUCLOCK_PROF,
1638 				      &cputime, NULL);
1639 		spin_unlock_irq(&current->sighand->siglock);
1640 		read_unlock(&tasklist_lock);
1641 	}
1642 
1643 	return 0;
1644 }
1645 
1646 /*
1647  * It would make sense to put struct rusage in the task_struct,
1648  * except that would make the task_struct be *really big*.  After
1649  * task_struct gets moved into malloc'ed memory, it would
1650  * make sense to do this.  It will make moving the rest of the information
1651  * a lot simpler!  (Which we're not doing right now because we're not
1652  * measuring them yet).
1653  *
1654  * This expects to be called with tasklist_lock read-locked or better,
1655  * and the siglock not locked.  It may momentarily take the siglock.
1656  *
1657  * When sampling multiple threads for RUSAGE_SELF, under SMP we might have
1658  * races with threads incrementing their own counters.  But since word
1659  * reads are atomic, we either get new values or old values and we don't
1660  * care which for the sums.  We always take the siglock to protect reading
1661  * the c* fields from p->signal from races with exit.c updating those
1662  * fields when reaping, so a sample either gets all the additions of a
1663  * given child after it's reaped, or none so this sample is before reaping.
1664  */
1665 
1666 static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
1667 {
1668 	struct task_struct *t;
1669 	unsigned long flags;
1670 	cputime_t utime, stime;
1671 
1672 	memset((char *) r, 0, sizeof *r);
1673 
1674 	if (unlikely(!p->signal))
1675 		return;
1676 
1677 	switch (who) {
1678 		case RUSAGE_CHILDREN:
1679 			spin_lock_irqsave(&p->sighand->siglock, flags);
1680 			utime = p->signal->cutime;
1681 			stime = p->signal->cstime;
1682 			r->ru_nvcsw = p->signal->cnvcsw;
1683 			r->ru_nivcsw = p->signal->cnivcsw;
1684 			r->ru_minflt = p->signal->cmin_flt;
1685 			r->ru_majflt = p->signal->cmaj_flt;
1686 			spin_unlock_irqrestore(&p->sighand->siglock, flags);
1687 			cputime_to_timeval(utime, &r->ru_utime);
1688 			cputime_to_timeval(stime, &r->ru_stime);
1689 			break;
1690 		case RUSAGE_SELF:
1691 			spin_lock_irqsave(&p->sighand->siglock, flags);
1692 			utime = stime = cputime_zero;
1693 			goto sum_group;
1694 		case RUSAGE_BOTH:
1695 			spin_lock_irqsave(&p->sighand->siglock, flags);
1696 			utime = p->signal->cutime;
1697 			stime = p->signal->cstime;
1698 			r->ru_nvcsw = p->signal->cnvcsw;
1699 			r->ru_nivcsw = p->signal->cnivcsw;
1700 			r->ru_minflt = p->signal->cmin_flt;
1701 			r->ru_majflt = p->signal->cmaj_flt;
1702 		sum_group:
1703 			utime = cputime_add(utime, p->signal->utime);
1704 			stime = cputime_add(stime, p->signal->stime);
1705 			r->ru_nvcsw += p->signal->nvcsw;
1706 			r->ru_nivcsw += p->signal->nivcsw;
1707 			r->ru_minflt += p->signal->min_flt;
1708 			r->ru_majflt += p->signal->maj_flt;
1709 			t = p;
1710 			do {
1711 				utime = cputime_add(utime, t->utime);
1712 				stime = cputime_add(stime, t->stime);
1713 				r->ru_nvcsw += t->nvcsw;
1714 				r->ru_nivcsw += t->nivcsw;
1715 				r->ru_minflt += t->min_flt;
1716 				r->ru_majflt += t->maj_flt;
1717 				t = next_thread(t);
1718 			} while (t != p);
1719 			spin_unlock_irqrestore(&p->sighand->siglock, flags);
1720 			cputime_to_timeval(utime, &r->ru_utime);
1721 			cputime_to_timeval(stime, &r->ru_stime);
1722 			break;
1723 		default:
1724 			BUG();
1725 	}
1726 }
1727 
1728 int getrusage(struct task_struct *p, int who, struct rusage __user *ru)
1729 {
1730 	struct rusage r;
1731 	read_lock(&tasklist_lock);
1732 	k_getrusage(p, who, &r);
1733 	read_unlock(&tasklist_lock);
1734 	return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0;
1735 }
1736 
1737 asmlinkage long sys_getrusage(int who, struct rusage __user *ru)
1738 {
1739 	if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN)
1740 		return -EINVAL;
1741 	return getrusage(current, who, ru);
1742 }
1743 
1744 asmlinkage long sys_umask(int mask)
1745 {
1746 	mask = xchg(&current->fs->umask, mask & S_IRWXUGO);
1747 	return mask;
1748 }
1749 
1750 asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
1751 			  unsigned long arg4, unsigned long arg5)
1752 {
1753 	long error;
1754 
1755 	error = security_task_prctl(option, arg2, arg3, arg4, arg5);
1756 	if (error)
1757 		return error;
1758 
1759 	switch (option) {
1760 		case PR_SET_PDEATHSIG:
1761 			if (!valid_signal(arg2)) {
1762 				error = -EINVAL;
1763 				break;
1764 			}
1765 			current->pdeath_signal = arg2;
1766 			break;
1767 		case PR_GET_PDEATHSIG:
1768 			error = put_user(current->pdeath_signal, (int __user *)arg2);
1769 			break;
1770 		case PR_GET_DUMPABLE:
1771 			error = current->mm->dumpable;
1772 			break;
1773 		case PR_SET_DUMPABLE:
1774 			if (arg2 < 0 || arg2 > 2) {
1775 				error = -EINVAL;
1776 				break;
1777 			}
1778 			current->mm->dumpable = arg2;
1779 			break;
1780 
1781 		case PR_SET_UNALIGN:
1782 			error = SET_UNALIGN_CTL(current, arg2);
1783 			break;
1784 		case PR_GET_UNALIGN:
1785 			error = GET_UNALIGN_CTL(current, arg2);
1786 			break;
1787 		case PR_SET_FPEMU:
1788 			error = SET_FPEMU_CTL(current, arg2);
1789 			break;
1790 		case PR_GET_FPEMU:
1791 			error = GET_FPEMU_CTL(current, arg2);
1792 			break;
1793 		case PR_SET_FPEXC:
1794 			error = SET_FPEXC_CTL(current, arg2);
1795 			break;
1796 		case PR_GET_FPEXC:
1797 			error = GET_FPEXC_CTL(current, arg2);
1798 			break;
1799 		case PR_GET_TIMING:
1800 			error = PR_TIMING_STATISTICAL;
1801 			break;
1802 		case PR_SET_TIMING:
1803 			if (arg2 == PR_TIMING_STATISTICAL)
1804 				error = 0;
1805 			else
1806 				error = -EINVAL;
1807 			break;
1808 
1809 		case PR_GET_KEEPCAPS:
1810 			if (current->keep_capabilities)
1811 				error = 1;
1812 			break;
1813 		case PR_SET_KEEPCAPS:
1814 			if (arg2 != 0 && arg2 != 1) {
1815 				error = -EINVAL;
1816 				break;
1817 			}
1818 			current->keep_capabilities = arg2;
1819 			break;
1820 		case PR_SET_NAME: {
1821 			struct task_struct *me = current;
1822 			unsigned char ncomm[sizeof(me->comm)];
1823 
1824 			ncomm[sizeof(me->comm)-1] = 0;
1825 			if (strncpy_from_user(ncomm, (char __user *)arg2,
1826 						sizeof(me->comm)-1) < 0)
1827 				return -EFAULT;
1828 			set_task_comm(me, ncomm);
1829 			return 0;
1830 		}
1831 		case PR_GET_NAME: {
1832 			struct task_struct *me = current;
1833 			unsigned char tcomm[sizeof(me->comm)];
1834 
1835 			get_task_comm(tcomm, me);
1836 			if (copy_to_user((char __user *)arg2, tcomm, sizeof(tcomm)))
1837 				return -EFAULT;
1838 			return 0;
1839 		}
1840 		default:
1841 			error = -EINVAL;
1842 			break;
1843 	}
1844 	return error;
1845 }
1846