xref: /openbmc/linux/kernel/sys.c (revision 64c70b1c)
1 /*
2  *  linux/kernel/sys.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  */
6 
7 #include <linux/module.h>
8 #include <linux/mm.h>
9 #include <linux/utsname.h>
10 #include <linux/mman.h>
11 #include <linux/smp_lock.h>
12 #include <linux/notifier.h>
13 #include <linux/reboot.h>
14 #include <linux/prctl.h>
15 #include <linux/highuid.h>
16 #include <linux/fs.h>
17 #include <linux/resource.h>
18 #include <linux/kernel.h>
19 #include <linux/kexec.h>
20 #include <linux/workqueue.h>
21 #include <linux/capability.h>
22 #include <linux/device.h>
23 #include <linux/key.h>
24 #include <linux/times.h>
25 #include <linux/posix-timers.h>
26 #include <linux/security.h>
27 #include <linux/dcookies.h>
28 #include <linux/suspend.h>
29 #include <linux/tty.h>
30 #include <linux/signal.h>
31 #include <linux/cn_proc.h>
32 #include <linux/getcpu.h>
33 #include <linux/task_io_accounting_ops.h>
34 
35 #include <linux/compat.h>
36 #include <linux/syscalls.h>
37 #include <linux/kprobes.h>
38 
39 #include <asm/uaccess.h>
40 #include <asm/io.h>
41 #include <asm/unistd.h>
42 
43 #ifndef SET_UNALIGN_CTL
44 # define SET_UNALIGN_CTL(a,b)	(-EINVAL)
45 #endif
46 #ifndef GET_UNALIGN_CTL
47 # define GET_UNALIGN_CTL(a,b)	(-EINVAL)
48 #endif
49 #ifndef SET_FPEMU_CTL
50 # define SET_FPEMU_CTL(a,b)	(-EINVAL)
51 #endif
52 #ifndef GET_FPEMU_CTL
53 # define GET_FPEMU_CTL(a,b)	(-EINVAL)
54 #endif
55 #ifndef SET_FPEXC_CTL
56 # define SET_FPEXC_CTL(a,b)	(-EINVAL)
57 #endif
58 #ifndef GET_FPEXC_CTL
59 # define GET_FPEXC_CTL(a,b)	(-EINVAL)
60 #endif
61 #ifndef GET_ENDIAN
62 # define GET_ENDIAN(a,b)	(-EINVAL)
63 #endif
64 #ifndef SET_ENDIAN
65 # define SET_ENDIAN(a,b)	(-EINVAL)
66 #endif
67 
68 /*
69  * this is where the system-wide overflow UID and GID are defined, for
70  * architectures that now have 32-bit UID/GID but didn't in the past
71  */
72 
73 int overflowuid = DEFAULT_OVERFLOWUID;
74 int overflowgid = DEFAULT_OVERFLOWGID;
75 
76 #ifdef CONFIG_UID16
77 EXPORT_SYMBOL(overflowuid);
78 EXPORT_SYMBOL(overflowgid);
79 #endif
80 
81 /*
82  * the same as above, but for filesystems which can only store a 16-bit
83  * UID and GID. as such, this is needed on all architectures
84  */
85 
86 int fs_overflowuid = DEFAULT_FS_OVERFLOWUID;
87 int fs_overflowgid = DEFAULT_FS_OVERFLOWUID;
88 
89 EXPORT_SYMBOL(fs_overflowuid);
90 EXPORT_SYMBOL(fs_overflowgid);
91 
92 /*
93  * this indicates whether you can reboot with ctrl-alt-del: the default is yes
94  */
95 
96 int C_A_D = 1;
97 struct pid *cad_pid;
98 EXPORT_SYMBOL(cad_pid);
99 
100 /*
101  *	Notifier list for kernel code which wants to be called
102  *	at shutdown. This is used to stop any idling DMA operations
103  *	and the like.
104  */
105 
106 static BLOCKING_NOTIFIER_HEAD(reboot_notifier_list);
107 
108 /*
109  *	Notifier chain core routines.  The exported routines below
110  *	are layered on top of these, with appropriate locking added.
111  */
112 
113 static int notifier_chain_register(struct notifier_block **nl,
114 		struct notifier_block *n)
115 {
116 	while ((*nl) != NULL) {
117 		if (n->priority > (*nl)->priority)
118 			break;
119 		nl = &((*nl)->next);
120 	}
121 	n->next = *nl;
122 	rcu_assign_pointer(*nl, n);
123 	return 0;
124 }
125 
126 static int notifier_chain_unregister(struct notifier_block **nl,
127 		struct notifier_block *n)
128 {
129 	while ((*nl) != NULL) {
130 		if ((*nl) == n) {
131 			rcu_assign_pointer(*nl, n->next);
132 			return 0;
133 		}
134 		nl = &((*nl)->next);
135 	}
136 	return -ENOENT;
137 }
138 
139 /**
140  * notifier_call_chain - Informs the registered notifiers about an event.
141  *	@nl:		Pointer to head of the blocking notifier chain
142  *	@val:		Value passed unmodified to notifier function
143  *	@v:		Pointer passed unmodified to notifier function
144  *	@nr_to_call:	Number of notifier functions to be called. Don't care
145  *		     	value of this parameter is -1.
146  *	@nr_calls:	Records the number of notifications sent. Don't care
147  *		   	value of this field is NULL.
148  * 	@returns:	notifier_call_chain returns the value returned by the
149  *			last notifier function called.
150  */
151 
152 static int __kprobes notifier_call_chain(struct notifier_block **nl,
153 					unsigned long val, void *v,
154 					int nr_to_call,	int *nr_calls)
155 {
156 	int ret = NOTIFY_DONE;
157 	struct notifier_block *nb, *next_nb;
158 
159 	nb = rcu_dereference(*nl);
160 
161 	while (nb && nr_to_call) {
162 		next_nb = rcu_dereference(nb->next);
163 		ret = nb->notifier_call(nb, val, v);
164 
165 		if (nr_calls)
166 			(*nr_calls)++;
167 
168 		if ((ret & NOTIFY_STOP_MASK) == NOTIFY_STOP_MASK)
169 			break;
170 		nb = next_nb;
171 		nr_to_call--;
172 	}
173 	return ret;
174 }
175 
176 /*
177  *	Atomic notifier chain routines.  Registration and unregistration
178  *	use a spinlock, and call_chain is synchronized by RCU (no locks).
179  */
180 
181 /**
182  *	atomic_notifier_chain_register - Add notifier to an atomic notifier chain
183  *	@nh: Pointer to head of the atomic notifier chain
184  *	@n: New entry in notifier chain
185  *
186  *	Adds a notifier to an atomic notifier chain.
187  *
188  *	Currently always returns zero.
189  */
190 
191 int atomic_notifier_chain_register(struct atomic_notifier_head *nh,
192 		struct notifier_block *n)
193 {
194 	unsigned long flags;
195 	int ret;
196 
197 	spin_lock_irqsave(&nh->lock, flags);
198 	ret = notifier_chain_register(&nh->head, n);
199 	spin_unlock_irqrestore(&nh->lock, flags);
200 	return ret;
201 }
202 
203 EXPORT_SYMBOL_GPL(atomic_notifier_chain_register);
204 
205 /**
206  *	atomic_notifier_chain_unregister - Remove notifier from an atomic notifier chain
207  *	@nh: Pointer to head of the atomic notifier chain
208  *	@n: Entry to remove from notifier chain
209  *
210  *	Removes a notifier from an atomic notifier chain.
211  *
212  *	Returns zero on success or %-ENOENT on failure.
213  */
214 int atomic_notifier_chain_unregister(struct atomic_notifier_head *nh,
215 		struct notifier_block *n)
216 {
217 	unsigned long flags;
218 	int ret;
219 
220 	spin_lock_irqsave(&nh->lock, flags);
221 	ret = notifier_chain_unregister(&nh->head, n);
222 	spin_unlock_irqrestore(&nh->lock, flags);
223 	synchronize_rcu();
224 	return ret;
225 }
226 
227 EXPORT_SYMBOL_GPL(atomic_notifier_chain_unregister);
228 
229 /**
230  *	__atomic_notifier_call_chain - Call functions in an atomic notifier chain
231  *	@nh: Pointer to head of the atomic notifier chain
232  *	@val: Value passed unmodified to notifier function
233  *	@v: Pointer passed unmodified to notifier function
234  *	@nr_to_call: See the comment for notifier_call_chain.
235  *	@nr_calls: See the comment for notifier_call_chain.
236  *
237  *	Calls each function in a notifier chain in turn.  The functions
238  *	run in an atomic context, so they must not block.
239  *	This routine uses RCU to synchronize with changes to the chain.
240  *
241  *	If the return value of the notifier can be and'ed
242  *	with %NOTIFY_STOP_MASK then atomic_notifier_call_chain()
243  *	will return immediately, with the return value of
244  *	the notifier function which halted execution.
245  *	Otherwise the return value is the return value
246  *	of the last notifier function called.
247  */
248 
249 int __kprobes __atomic_notifier_call_chain(struct atomic_notifier_head *nh,
250 					unsigned long val, void *v,
251 					int nr_to_call, int *nr_calls)
252 {
253 	int ret;
254 
255 	rcu_read_lock();
256 	ret = notifier_call_chain(&nh->head, val, v, nr_to_call, nr_calls);
257 	rcu_read_unlock();
258 	return ret;
259 }
260 
261 EXPORT_SYMBOL_GPL(__atomic_notifier_call_chain);
262 
263 int __kprobes atomic_notifier_call_chain(struct atomic_notifier_head *nh,
264 		unsigned long val, void *v)
265 {
266 	return __atomic_notifier_call_chain(nh, val, v, -1, NULL);
267 }
268 
269 EXPORT_SYMBOL_GPL(atomic_notifier_call_chain);
270 /*
271  *	Blocking notifier chain routines.  All access to the chain is
272  *	synchronized by an rwsem.
273  */
274 
275 /**
276  *	blocking_notifier_chain_register - Add notifier to a blocking notifier chain
277  *	@nh: Pointer to head of the blocking notifier chain
278  *	@n: New entry in notifier chain
279  *
280  *	Adds a notifier to a blocking notifier chain.
281  *	Must be called in process context.
282  *
283  *	Currently always returns zero.
284  */
285 
286 int blocking_notifier_chain_register(struct blocking_notifier_head *nh,
287 		struct notifier_block *n)
288 {
289 	int ret;
290 
291 	/*
292 	 * This code gets used during boot-up, when task switching is
293 	 * not yet working and interrupts must remain disabled.  At
294 	 * such times we must not call down_write().
295 	 */
296 	if (unlikely(system_state == SYSTEM_BOOTING))
297 		return notifier_chain_register(&nh->head, n);
298 
299 	down_write(&nh->rwsem);
300 	ret = notifier_chain_register(&nh->head, n);
301 	up_write(&nh->rwsem);
302 	return ret;
303 }
304 
305 EXPORT_SYMBOL_GPL(blocking_notifier_chain_register);
306 
307 /**
308  *	blocking_notifier_chain_unregister - Remove notifier from a blocking notifier chain
309  *	@nh: Pointer to head of the blocking notifier chain
310  *	@n: Entry to remove from notifier chain
311  *
312  *	Removes a notifier from a blocking notifier chain.
313  *	Must be called from process context.
314  *
315  *	Returns zero on success or %-ENOENT on failure.
316  */
317 int blocking_notifier_chain_unregister(struct blocking_notifier_head *nh,
318 		struct notifier_block *n)
319 {
320 	int ret;
321 
322 	/*
323 	 * This code gets used during boot-up, when task switching is
324 	 * not yet working and interrupts must remain disabled.  At
325 	 * such times we must not call down_write().
326 	 */
327 	if (unlikely(system_state == SYSTEM_BOOTING))
328 		return notifier_chain_unregister(&nh->head, n);
329 
330 	down_write(&nh->rwsem);
331 	ret = notifier_chain_unregister(&nh->head, n);
332 	up_write(&nh->rwsem);
333 	return ret;
334 }
335 
336 EXPORT_SYMBOL_GPL(blocking_notifier_chain_unregister);
337 
338 /**
339  *	__blocking_notifier_call_chain - Call functions in a blocking notifier chain
340  *	@nh: Pointer to head of the blocking notifier chain
341  *	@val: Value passed unmodified to notifier function
342  *	@v: Pointer passed unmodified to notifier function
343  *	@nr_to_call: See comment for notifier_call_chain.
344  *	@nr_calls: See comment for notifier_call_chain.
345  *
346  *	Calls each function in a notifier chain in turn.  The functions
347  *	run in a process context, so they are allowed to block.
348  *
349  *	If the return value of the notifier can be and'ed
350  *	with %NOTIFY_STOP_MASK then blocking_notifier_call_chain()
351  *	will return immediately, with the return value of
352  *	the notifier function which halted execution.
353  *	Otherwise the return value is the return value
354  *	of the last notifier function called.
355  */
356 
357 int __blocking_notifier_call_chain(struct blocking_notifier_head *nh,
358 				   unsigned long val, void *v,
359 				   int nr_to_call, int *nr_calls)
360 {
361 	int ret = NOTIFY_DONE;
362 
363 	/*
364 	 * We check the head outside the lock, but if this access is
365 	 * racy then it does not matter what the result of the test
366 	 * is, we re-check the list after having taken the lock anyway:
367 	 */
368 	if (rcu_dereference(nh->head)) {
369 		down_read(&nh->rwsem);
370 		ret = notifier_call_chain(&nh->head, val, v, nr_to_call,
371 					nr_calls);
372 		up_read(&nh->rwsem);
373 	}
374 	return ret;
375 }
376 EXPORT_SYMBOL_GPL(__blocking_notifier_call_chain);
377 
378 int blocking_notifier_call_chain(struct blocking_notifier_head *nh,
379 		unsigned long val, void *v)
380 {
381 	return __blocking_notifier_call_chain(nh, val, v, -1, NULL);
382 }
383 EXPORT_SYMBOL_GPL(blocking_notifier_call_chain);
384 
385 /*
386  *	Raw notifier chain routines.  There is no protection;
387  *	the caller must provide it.  Use at your own risk!
388  */
389 
390 /**
391  *	raw_notifier_chain_register - Add notifier to a raw notifier chain
392  *	@nh: Pointer to head of the raw notifier chain
393  *	@n: New entry in notifier chain
394  *
395  *	Adds a notifier to a raw notifier chain.
396  *	All locking must be provided by the caller.
397  *
398  *	Currently always returns zero.
399  */
400 
401 int raw_notifier_chain_register(struct raw_notifier_head *nh,
402 		struct notifier_block *n)
403 {
404 	return notifier_chain_register(&nh->head, n);
405 }
406 
407 EXPORT_SYMBOL_GPL(raw_notifier_chain_register);
408 
409 /**
410  *	raw_notifier_chain_unregister - Remove notifier from a raw notifier chain
411  *	@nh: Pointer to head of the raw notifier chain
412  *	@n: Entry to remove from notifier chain
413  *
414  *	Removes a notifier from a raw notifier chain.
415  *	All locking must be provided by the caller.
416  *
417  *	Returns zero on success or %-ENOENT on failure.
418  */
419 int raw_notifier_chain_unregister(struct raw_notifier_head *nh,
420 		struct notifier_block *n)
421 {
422 	return notifier_chain_unregister(&nh->head, n);
423 }
424 
425 EXPORT_SYMBOL_GPL(raw_notifier_chain_unregister);
426 
427 /**
428  *	__raw_notifier_call_chain - Call functions in a raw notifier chain
429  *	@nh: Pointer to head of the raw notifier chain
430  *	@val: Value passed unmodified to notifier function
431  *	@v: Pointer passed unmodified to notifier function
432  *	@nr_to_call: See comment for notifier_call_chain.
433  *	@nr_calls: See comment for notifier_call_chain
434  *
435  *	Calls each function in a notifier chain in turn.  The functions
436  *	run in an undefined context.
437  *	All locking must be provided by the caller.
438  *
439  *	If the return value of the notifier can be and'ed
440  *	with %NOTIFY_STOP_MASK then raw_notifier_call_chain()
441  *	will return immediately, with the return value of
442  *	the notifier function which halted execution.
443  *	Otherwise the return value is the return value
444  *	of the last notifier function called.
445  */
446 
447 int __raw_notifier_call_chain(struct raw_notifier_head *nh,
448 			      unsigned long val, void *v,
449 			      int nr_to_call, int *nr_calls)
450 {
451 	return notifier_call_chain(&nh->head, val, v, nr_to_call, nr_calls);
452 }
453 
454 EXPORT_SYMBOL_GPL(__raw_notifier_call_chain);
455 
456 int raw_notifier_call_chain(struct raw_notifier_head *nh,
457 		unsigned long val, void *v)
458 {
459 	return __raw_notifier_call_chain(nh, val, v, -1, NULL);
460 }
461 
462 EXPORT_SYMBOL_GPL(raw_notifier_call_chain);
463 
464 /*
465  *	SRCU notifier chain routines.    Registration and unregistration
466  *	use a mutex, and call_chain is synchronized by SRCU (no locks).
467  */
468 
469 /**
470  *	srcu_notifier_chain_register - Add notifier to an SRCU notifier chain
471  *	@nh: Pointer to head of the SRCU notifier chain
472  *	@n: New entry in notifier chain
473  *
474  *	Adds a notifier to an SRCU notifier chain.
475  *	Must be called in process context.
476  *
477  *	Currently always returns zero.
478  */
479 
480 int srcu_notifier_chain_register(struct srcu_notifier_head *nh,
481 		struct notifier_block *n)
482 {
483 	int ret;
484 
485 	/*
486 	 * This code gets used during boot-up, when task switching is
487 	 * not yet working and interrupts must remain disabled.  At
488 	 * such times we must not call mutex_lock().
489 	 */
490 	if (unlikely(system_state == SYSTEM_BOOTING))
491 		return notifier_chain_register(&nh->head, n);
492 
493 	mutex_lock(&nh->mutex);
494 	ret = notifier_chain_register(&nh->head, n);
495 	mutex_unlock(&nh->mutex);
496 	return ret;
497 }
498 
499 EXPORT_SYMBOL_GPL(srcu_notifier_chain_register);
500 
501 /**
502  *	srcu_notifier_chain_unregister - Remove notifier from an SRCU notifier chain
503  *	@nh: Pointer to head of the SRCU notifier chain
504  *	@n: Entry to remove from notifier chain
505  *
506  *	Removes a notifier from an SRCU notifier chain.
507  *	Must be called from process context.
508  *
509  *	Returns zero on success or %-ENOENT on failure.
510  */
511 int srcu_notifier_chain_unregister(struct srcu_notifier_head *nh,
512 		struct notifier_block *n)
513 {
514 	int ret;
515 
516 	/*
517 	 * This code gets used during boot-up, when task switching is
518 	 * not yet working and interrupts must remain disabled.  At
519 	 * such times we must not call mutex_lock().
520 	 */
521 	if (unlikely(system_state == SYSTEM_BOOTING))
522 		return notifier_chain_unregister(&nh->head, n);
523 
524 	mutex_lock(&nh->mutex);
525 	ret = notifier_chain_unregister(&nh->head, n);
526 	mutex_unlock(&nh->mutex);
527 	synchronize_srcu(&nh->srcu);
528 	return ret;
529 }
530 
531 EXPORT_SYMBOL_GPL(srcu_notifier_chain_unregister);
532 
533 /**
534  *	__srcu_notifier_call_chain - Call functions in an SRCU notifier chain
535  *	@nh: Pointer to head of the SRCU notifier chain
536  *	@val: Value passed unmodified to notifier function
537  *	@v: Pointer passed unmodified to notifier function
538  *	@nr_to_call: See comment for notifier_call_chain.
539  *	@nr_calls: See comment for notifier_call_chain
540  *
541  *	Calls each function in a notifier chain in turn.  The functions
542  *	run in a process context, so they are allowed to block.
543  *
544  *	If the return value of the notifier can be and'ed
545  *	with %NOTIFY_STOP_MASK then srcu_notifier_call_chain()
546  *	will return immediately, with the return value of
547  *	the notifier function which halted execution.
548  *	Otherwise the return value is the return value
549  *	of the last notifier function called.
550  */
551 
552 int __srcu_notifier_call_chain(struct srcu_notifier_head *nh,
553 			       unsigned long val, void *v,
554 			       int nr_to_call, int *nr_calls)
555 {
556 	int ret;
557 	int idx;
558 
559 	idx = srcu_read_lock(&nh->srcu);
560 	ret = notifier_call_chain(&nh->head, val, v, nr_to_call, nr_calls);
561 	srcu_read_unlock(&nh->srcu, idx);
562 	return ret;
563 }
564 EXPORT_SYMBOL_GPL(__srcu_notifier_call_chain);
565 
566 int srcu_notifier_call_chain(struct srcu_notifier_head *nh,
567 		unsigned long val, void *v)
568 {
569 	return __srcu_notifier_call_chain(nh, val, v, -1, NULL);
570 }
571 EXPORT_SYMBOL_GPL(srcu_notifier_call_chain);
572 
573 /**
574  *	srcu_init_notifier_head - Initialize an SRCU notifier head
575  *	@nh: Pointer to head of the srcu notifier chain
576  *
577  *	Unlike other sorts of notifier heads, SRCU notifier heads require
578  *	dynamic initialization.  Be sure to call this routine before
579  *	calling any of the other SRCU notifier routines for this head.
580  *
581  *	If an SRCU notifier head is deallocated, it must first be cleaned
582  *	up by calling srcu_cleanup_notifier_head().  Otherwise the head's
583  *	per-cpu data (used by the SRCU mechanism) will leak.
584  */
585 
586 void srcu_init_notifier_head(struct srcu_notifier_head *nh)
587 {
588 	mutex_init(&nh->mutex);
589 	if (init_srcu_struct(&nh->srcu) < 0)
590 		BUG();
591 	nh->head = NULL;
592 }
593 
594 EXPORT_SYMBOL_GPL(srcu_init_notifier_head);
595 
596 /**
597  *	register_reboot_notifier - Register function to be called at reboot time
598  *	@nb: Info about notifier function to be called
599  *
600  *	Registers a function with the list of functions
601  *	to be called at reboot time.
602  *
603  *	Currently always returns zero, as blocking_notifier_chain_register()
604  *	always returns zero.
605  */
606 
607 int register_reboot_notifier(struct notifier_block * nb)
608 {
609 	return blocking_notifier_chain_register(&reboot_notifier_list, nb);
610 }
611 
612 EXPORT_SYMBOL(register_reboot_notifier);
613 
614 /**
615  *	unregister_reboot_notifier - Unregister previously registered reboot notifier
616  *	@nb: Hook to be unregistered
617  *
618  *	Unregisters a previously registered reboot
619  *	notifier function.
620  *
621  *	Returns zero on success, or %-ENOENT on failure.
622  */
623 
624 int unregister_reboot_notifier(struct notifier_block * nb)
625 {
626 	return blocking_notifier_chain_unregister(&reboot_notifier_list, nb);
627 }
628 
629 EXPORT_SYMBOL(unregister_reboot_notifier);
630 
631 static int set_one_prio(struct task_struct *p, int niceval, int error)
632 {
633 	int no_nice;
634 
635 	if (p->uid != current->euid &&
636 		p->euid != current->euid && !capable(CAP_SYS_NICE)) {
637 		error = -EPERM;
638 		goto out;
639 	}
640 	if (niceval < task_nice(p) && !can_nice(p, niceval)) {
641 		error = -EACCES;
642 		goto out;
643 	}
644 	no_nice = security_task_setnice(p, niceval);
645 	if (no_nice) {
646 		error = no_nice;
647 		goto out;
648 	}
649 	if (error == -ESRCH)
650 		error = 0;
651 	set_user_nice(p, niceval);
652 out:
653 	return error;
654 }
655 
656 asmlinkage long sys_setpriority(int which, int who, int niceval)
657 {
658 	struct task_struct *g, *p;
659 	struct user_struct *user;
660 	int error = -EINVAL;
661 	struct pid *pgrp;
662 
663 	if (which > PRIO_USER || which < PRIO_PROCESS)
664 		goto out;
665 
666 	/* normalize: avoid signed division (rounding problems) */
667 	error = -ESRCH;
668 	if (niceval < -20)
669 		niceval = -20;
670 	if (niceval > 19)
671 		niceval = 19;
672 
673 	read_lock(&tasklist_lock);
674 	switch (which) {
675 		case PRIO_PROCESS:
676 			if (who)
677 				p = find_task_by_pid(who);
678 			else
679 				p = current;
680 			if (p)
681 				error = set_one_prio(p, niceval, error);
682 			break;
683 		case PRIO_PGRP:
684 			if (who)
685 				pgrp = find_pid(who);
686 			else
687 				pgrp = task_pgrp(current);
688 			do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
689 				error = set_one_prio(p, niceval, error);
690 			} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
691 			break;
692 		case PRIO_USER:
693 			user = current->user;
694 			if (!who)
695 				who = current->uid;
696 			else
697 				if ((who != current->uid) && !(user = find_user(who)))
698 					goto out_unlock;	/* No processes for this user */
699 
700 			do_each_thread(g, p)
701 				if (p->uid == who)
702 					error = set_one_prio(p, niceval, error);
703 			while_each_thread(g, p);
704 			if (who != current->uid)
705 				free_uid(user);		/* For find_user() */
706 			break;
707 	}
708 out_unlock:
709 	read_unlock(&tasklist_lock);
710 out:
711 	return error;
712 }
713 
714 /*
715  * Ugh. To avoid negative return values, "getpriority()" will
716  * not return the normal nice-value, but a negated value that
717  * has been offset by 20 (ie it returns 40..1 instead of -20..19)
718  * to stay compatible.
719  */
720 asmlinkage long sys_getpriority(int which, int who)
721 {
722 	struct task_struct *g, *p;
723 	struct user_struct *user;
724 	long niceval, retval = -ESRCH;
725 	struct pid *pgrp;
726 
727 	if (which > PRIO_USER || which < PRIO_PROCESS)
728 		return -EINVAL;
729 
730 	read_lock(&tasklist_lock);
731 	switch (which) {
732 		case PRIO_PROCESS:
733 			if (who)
734 				p = find_task_by_pid(who);
735 			else
736 				p = current;
737 			if (p) {
738 				niceval = 20 - task_nice(p);
739 				if (niceval > retval)
740 					retval = niceval;
741 			}
742 			break;
743 		case PRIO_PGRP:
744 			if (who)
745 				pgrp = find_pid(who);
746 			else
747 				pgrp = task_pgrp(current);
748 			do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
749 				niceval = 20 - task_nice(p);
750 				if (niceval > retval)
751 					retval = niceval;
752 			} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
753 			break;
754 		case PRIO_USER:
755 			user = current->user;
756 			if (!who)
757 				who = current->uid;
758 			else
759 				if ((who != current->uid) && !(user = find_user(who)))
760 					goto out_unlock;	/* No processes for this user */
761 
762 			do_each_thread(g, p)
763 				if (p->uid == who) {
764 					niceval = 20 - task_nice(p);
765 					if (niceval > retval)
766 						retval = niceval;
767 				}
768 			while_each_thread(g, p);
769 			if (who != current->uid)
770 				free_uid(user);		/* for find_user() */
771 			break;
772 	}
773 out_unlock:
774 	read_unlock(&tasklist_lock);
775 
776 	return retval;
777 }
778 
779 /**
780  *	emergency_restart - reboot the system
781  *
782  *	Without shutting down any hardware or taking any locks
783  *	reboot the system.  This is called when we know we are in
784  *	trouble so this is our best effort to reboot.  This is
785  *	safe to call in interrupt context.
786  */
787 void emergency_restart(void)
788 {
789 	machine_emergency_restart();
790 }
791 EXPORT_SYMBOL_GPL(emergency_restart);
792 
793 static void kernel_restart_prepare(char *cmd)
794 {
795 	blocking_notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd);
796 	system_state = SYSTEM_RESTART;
797 	device_shutdown();
798 }
799 
800 /**
801  *	kernel_restart - reboot the system
802  *	@cmd: pointer to buffer containing command to execute for restart
803  *		or %NULL
804  *
805  *	Shutdown everything and perform a clean reboot.
806  *	This is not safe to call in interrupt context.
807  */
808 void kernel_restart(char *cmd)
809 {
810 	kernel_restart_prepare(cmd);
811 	if (!cmd)
812 		printk(KERN_EMERG "Restarting system.\n");
813 	else
814 		printk(KERN_EMERG "Restarting system with command '%s'.\n", cmd);
815 	machine_restart(cmd);
816 }
817 EXPORT_SYMBOL_GPL(kernel_restart);
818 
819 /**
820  *	kernel_kexec - reboot the system
821  *
822  *	Move into place and start executing a preloaded standalone
823  *	executable.  If nothing was preloaded return an error.
824  */
825 static void kernel_kexec(void)
826 {
827 #ifdef CONFIG_KEXEC
828 	struct kimage *image;
829 	image = xchg(&kexec_image, NULL);
830 	if (!image)
831 		return;
832 	kernel_restart_prepare(NULL);
833 	printk(KERN_EMERG "Starting new kernel\n");
834 	machine_shutdown();
835 	machine_kexec(image);
836 #endif
837 }
838 
839 void kernel_shutdown_prepare(enum system_states state)
840 {
841 	blocking_notifier_call_chain(&reboot_notifier_list,
842 		(state == SYSTEM_HALT)?SYS_HALT:SYS_POWER_OFF, NULL);
843 	system_state = state;
844 	device_shutdown();
845 }
846 /**
847  *	kernel_halt - halt the system
848  *
849  *	Shutdown everything and perform a clean system halt.
850  */
851 void kernel_halt(void)
852 {
853 	kernel_shutdown_prepare(SYSTEM_HALT);
854 	printk(KERN_EMERG "System halted.\n");
855 	machine_halt();
856 }
857 
858 EXPORT_SYMBOL_GPL(kernel_halt);
859 
860 /**
861  *	kernel_power_off - power_off the system
862  *
863  *	Shutdown everything and perform a clean system power_off.
864  */
865 void kernel_power_off(void)
866 {
867 	kernel_shutdown_prepare(SYSTEM_POWER_OFF);
868 	printk(KERN_EMERG "Power down.\n");
869 	machine_power_off();
870 }
871 EXPORT_SYMBOL_GPL(kernel_power_off);
872 /*
873  * Reboot system call: for obvious reasons only root may call it,
874  * and even root needs to set up some magic numbers in the registers
875  * so that some mistake won't make this reboot the whole machine.
876  * You can also set the meaning of the ctrl-alt-del-key here.
877  *
878  * reboot doesn't sync: do that yourself before calling this.
879  */
880 asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void __user * arg)
881 {
882 	char buffer[256];
883 
884 	/* We only trust the superuser with rebooting the system. */
885 	if (!capable(CAP_SYS_BOOT))
886 		return -EPERM;
887 
888 	/* For safety, we require "magic" arguments. */
889 	if (magic1 != LINUX_REBOOT_MAGIC1 ||
890 	    (magic2 != LINUX_REBOOT_MAGIC2 &&
891 	                magic2 != LINUX_REBOOT_MAGIC2A &&
892 			magic2 != LINUX_REBOOT_MAGIC2B &&
893 	                magic2 != LINUX_REBOOT_MAGIC2C))
894 		return -EINVAL;
895 
896 	/* Instead of trying to make the power_off code look like
897 	 * halt when pm_power_off is not set do it the easy way.
898 	 */
899 	if ((cmd == LINUX_REBOOT_CMD_POWER_OFF) && !pm_power_off)
900 		cmd = LINUX_REBOOT_CMD_HALT;
901 
902 	lock_kernel();
903 	switch (cmd) {
904 	case LINUX_REBOOT_CMD_RESTART:
905 		kernel_restart(NULL);
906 		break;
907 
908 	case LINUX_REBOOT_CMD_CAD_ON:
909 		C_A_D = 1;
910 		break;
911 
912 	case LINUX_REBOOT_CMD_CAD_OFF:
913 		C_A_D = 0;
914 		break;
915 
916 	case LINUX_REBOOT_CMD_HALT:
917 		kernel_halt();
918 		unlock_kernel();
919 		do_exit(0);
920 		break;
921 
922 	case LINUX_REBOOT_CMD_POWER_OFF:
923 		kernel_power_off();
924 		unlock_kernel();
925 		do_exit(0);
926 		break;
927 
928 	case LINUX_REBOOT_CMD_RESTART2:
929 		if (strncpy_from_user(&buffer[0], arg, sizeof(buffer) - 1) < 0) {
930 			unlock_kernel();
931 			return -EFAULT;
932 		}
933 		buffer[sizeof(buffer) - 1] = '\0';
934 
935 		kernel_restart(buffer);
936 		break;
937 
938 	case LINUX_REBOOT_CMD_KEXEC:
939 		kernel_kexec();
940 		unlock_kernel();
941 		return -EINVAL;
942 
943 #ifdef CONFIG_SOFTWARE_SUSPEND
944 	case LINUX_REBOOT_CMD_SW_SUSPEND:
945 		{
946 			int ret = hibernate();
947 			unlock_kernel();
948 			return ret;
949 		}
950 #endif
951 
952 	default:
953 		unlock_kernel();
954 		return -EINVAL;
955 	}
956 	unlock_kernel();
957 	return 0;
958 }
959 
960 static void deferred_cad(struct work_struct *dummy)
961 {
962 	kernel_restart(NULL);
963 }
964 
965 /*
966  * This function gets called by ctrl-alt-del - ie the keyboard interrupt.
967  * As it's called within an interrupt, it may NOT sync: the only choice
968  * is whether to reboot at once, or just ignore the ctrl-alt-del.
969  */
970 void ctrl_alt_del(void)
971 {
972 	static DECLARE_WORK(cad_work, deferred_cad);
973 
974 	if (C_A_D)
975 		schedule_work(&cad_work);
976 	else
977 		kill_cad_pid(SIGINT, 1);
978 }
979 
980 /*
981  * Unprivileged users may change the real gid to the effective gid
982  * or vice versa.  (BSD-style)
983  *
984  * If you set the real gid at all, or set the effective gid to a value not
985  * equal to the real gid, then the saved gid is set to the new effective gid.
986  *
987  * This makes it possible for a setgid program to completely drop its
988  * privileges, which is often a useful assertion to make when you are doing
989  * a security audit over a program.
990  *
991  * The general idea is that a program which uses just setregid() will be
992  * 100% compatible with BSD.  A program which uses just setgid() will be
993  * 100% compatible with POSIX with saved IDs.
994  *
995  * SMP: There are not races, the GIDs are checked only by filesystem
996  *      operations (as far as semantic preservation is concerned).
997  */
998 asmlinkage long sys_setregid(gid_t rgid, gid_t egid)
999 {
1000 	int old_rgid = current->gid;
1001 	int old_egid = current->egid;
1002 	int new_rgid = old_rgid;
1003 	int new_egid = old_egid;
1004 	int retval;
1005 
1006 	retval = security_task_setgid(rgid, egid, (gid_t)-1, LSM_SETID_RE);
1007 	if (retval)
1008 		return retval;
1009 
1010 	if (rgid != (gid_t) -1) {
1011 		if ((old_rgid == rgid) ||
1012 		    (current->egid==rgid) ||
1013 		    capable(CAP_SETGID))
1014 			new_rgid = rgid;
1015 		else
1016 			return -EPERM;
1017 	}
1018 	if (egid != (gid_t) -1) {
1019 		if ((old_rgid == egid) ||
1020 		    (current->egid == egid) ||
1021 		    (current->sgid == egid) ||
1022 		    capable(CAP_SETGID))
1023 			new_egid = egid;
1024 		else
1025 			return -EPERM;
1026 	}
1027 	if (new_egid != old_egid) {
1028 		current->mm->dumpable = suid_dumpable;
1029 		smp_wmb();
1030 	}
1031 	if (rgid != (gid_t) -1 ||
1032 	    (egid != (gid_t) -1 && egid != old_rgid))
1033 		current->sgid = new_egid;
1034 	current->fsgid = new_egid;
1035 	current->egid = new_egid;
1036 	current->gid = new_rgid;
1037 	key_fsgid_changed(current);
1038 	proc_id_connector(current, PROC_EVENT_GID);
1039 	return 0;
1040 }
1041 
1042 /*
1043  * setgid() is implemented like SysV w/ SAVED_IDS
1044  *
1045  * SMP: Same implicit races as above.
1046  */
1047 asmlinkage long sys_setgid(gid_t gid)
1048 {
1049 	int old_egid = current->egid;
1050 	int retval;
1051 
1052 	retval = security_task_setgid(gid, (gid_t)-1, (gid_t)-1, LSM_SETID_ID);
1053 	if (retval)
1054 		return retval;
1055 
1056 	if (capable(CAP_SETGID)) {
1057 		if (old_egid != gid) {
1058 			current->mm->dumpable = suid_dumpable;
1059 			smp_wmb();
1060 		}
1061 		current->gid = current->egid = current->sgid = current->fsgid = gid;
1062 	} else if ((gid == current->gid) || (gid == current->sgid)) {
1063 		if (old_egid != gid) {
1064 			current->mm->dumpable = suid_dumpable;
1065 			smp_wmb();
1066 		}
1067 		current->egid = current->fsgid = gid;
1068 	}
1069 	else
1070 		return -EPERM;
1071 
1072 	key_fsgid_changed(current);
1073 	proc_id_connector(current, PROC_EVENT_GID);
1074 	return 0;
1075 }
1076 
1077 static int set_user(uid_t new_ruid, int dumpclear)
1078 {
1079 	struct user_struct *new_user;
1080 
1081 	new_user = alloc_uid(new_ruid);
1082 	if (!new_user)
1083 		return -EAGAIN;
1084 
1085 	if (atomic_read(&new_user->processes) >=
1086 				current->signal->rlim[RLIMIT_NPROC].rlim_cur &&
1087 			new_user != &root_user) {
1088 		free_uid(new_user);
1089 		return -EAGAIN;
1090 	}
1091 
1092 	switch_uid(new_user);
1093 
1094 	if (dumpclear) {
1095 		current->mm->dumpable = suid_dumpable;
1096 		smp_wmb();
1097 	}
1098 	current->uid = new_ruid;
1099 	return 0;
1100 }
1101 
1102 /*
1103  * Unprivileged users may change the real uid to the effective uid
1104  * or vice versa.  (BSD-style)
1105  *
1106  * If you set the real uid at all, or set the effective uid to a value not
1107  * equal to the real uid, then the saved uid is set to the new effective uid.
1108  *
1109  * This makes it possible for a setuid program to completely drop its
1110  * privileges, which is often a useful assertion to make when you are doing
1111  * a security audit over a program.
1112  *
1113  * The general idea is that a program which uses just setreuid() will be
1114  * 100% compatible with BSD.  A program which uses just setuid() will be
1115  * 100% compatible with POSIX with saved IDs.
1116  */
1117 asmlinkage long sys_setreuid(uid_t ruid, uid_t euid)
1118 {
1119 	int old_ruid, old_euid, old_suid, new_ruid, new_euid;
1120 	int retval;
1121 
1122 	retval = security_task_setuid(ruid, euid, (uid_t)-1, LSM_SETID_RE);
1123 	if (retval)
1124 		return retval;
1125 
1126 	new_ruid = old_ruid = current->uid;
1127 	new_euid = old_euid = current->euid;
1128 	old_suid = current->suid;
1129 
1130 	if (ruid != (uid_t) -1) {
1131 		new_ruid = ruid;
1132 		if ((old_ruid != ruid) &&
1133 		    (current->euid != ruid) &&
1134 		    !capable(CAP_SETUID))
1135 			return -EPERM;
1136 	}
1137 
1138 	if (euid != (uid_t) -1) {
1139 		new_euid = euid;
1140 		if ((old_ruid != euid) &&
1141 		    (current->euid != euid) &&
1142 		    (current->suid != euid) &&
1143 		    !capable(CAP_SETUID))
1144 			return -EPERM;
1145 	}
1146 
1147 	if (new_ruid != old_ruid && set_user(new_ruid, new_euid != old_euid) < 0)
1148 		return -EAGAIN;
1149 
1150 	if (new_euid != old_euid) {
1151 		current->mm->dumpable = suid_dumpable;
1152 		smp_wmb();
1153 	}
1154 	current->fsuid = current->euid = new_euid;
1155 	if (ruid != (uid_t) -1 ||
1156 	    (euid != (uid_t) -1 && euid != old_ruid))
1157 		current->suid = current->euid;
1158 	current->fsuid = current->euid;
1159 
1160 	key_fsuid_changed(current);
1161 	proc_id_connector(current, PROC_EVENT_UID);
1162 
1163 	return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_RE);
1164 }
1165 
1166 
1167 
1168 /*
1169  * setuid() is implemented like SysV with SAVED_IDS
1170  *
1171  * Note that SAVED_ID's is deficient in that a setuid root program
1172  * like sendmail, for example, cannot set its uid to be a normal
1173  * user and then switch back, because if you're root, setuid() sets
1174  * the saved uid too.  If you don't like this, blame the bright people
1175  * in the POSIX committee and/or USG.  Note that the BSD-style setreuid()
1176  * will allow a root program to temporarily drop privileges and be able to
1177  * regain them by swapping the real and effective uid.
1178  */
1179 asmlinkage long sys_setuid(uid_t uid)
1180 {
1181 	int old_euid = current->euid;
1182 	int old_ruid, old_suid, new_suid;
1183 	int retval;
1184 
1185 	retval = security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_ID);
1186 	if (retval)
1187 		return retval;
1188 
1189 	old_ruid = current->uid;
1190 	old_suid = current->suid;
1191 	new_suid = old_suid;
1192 
1193 	if (capable(CAP_SETUID)) {
1194 		if (uid != old_ruid && set_user(uid, old_euid != uid) < 0)
1195 			return -EAGAIN;
1196 		new_suid = uid;
1197 	} else if ((uid != current->uid) && (uid != new_suid))
1198 		return -EPERM;
1199 
1200 	if (old_euid != uid) {
1201 		current->mm->dumpable = suid_dumpable;
1202 		smp_wmb();
1203 	}
1204 	current->fsuid = current->euid = uid;
1205 	current->suid = new_suid;
1206 
1207 	key_fsuid_changed(current);
1208 	proc_id_connector(current, PROC_EVENT_UID);
1209 
1210 	return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_ID);
1211 }
1212 
1213 
1214 /*
1215  * This function implements a generic ability to update ruid, euid,
1216  * and suid.  This allows you to implement the 4.4 compatible seteuid().
1217  */
1218 asmlinkage long sys_setresuid(uid_t ruid, uid_t euid, uid_t suid)
1219 {
1220 	int old_ruid = current->uid;
1221 	int old_euid = current->euid;
1222 	int old_suid = current->suid;
1223 	int retval;
1224 
1225 	retval = security_task_setuid(ruid, euid, suid, LSM_SETID_RES);
1226 	if (retval)
1227 		return retval;
1228 
1229 	if (!capable(CAP_SETUID)) {
1230 		if ((ruid != (uid_t) -1) && (ruid != current->uid) &&
1231 		    (ruid != current->euid) && (ruid != current->suid))
1232 			return -EPERM;
1233 		if ((euid != (uid_t) -1) && (euid != current->uid) &&
1234 		    (euid != current->euid) && (euid != current->suid))
1235 			return -EPERM;
1236 		if ((suid != (uid_t) -1) && (suid != current->uid) &&
1237 		    (suid != current->euid) && (suid != current->suid))
1238 			return -EPERM;
1239 	}
1240 	if (ruid != (uid_t) -1) {
1241 		if (ruid != current->uid && set_user(ruid, euid != current->euid) < 0)
1242 			return -EAGAIN;
1243 	}
1244 	if (euid != (uid_t) -1) {
1245 		if (euid != current->euid) {
1246 			current->mm->dumpable = suid_dumpable;
1247 			smp_wmb();
1248 		}
1249 		current->euid = euid;
1250 	}
1251 	current->fsuid = current->euid;
1252 	if (suid != (uid_t) -1)
1253 		current->suid = suid;
1254 
1255 	key_fsuid_changed(current);
1256 	proc_id_connector(current, PROC_EVENT_UID);
1257 
1258 	return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_RES);
1259 }
1260 
1261 asmlinkage long sys_getresuid(uid_t __user *ruid, uid_t __user *euid, uid_t __user *suid)
1262 {
1263 	int retval;
1264 
1265 	if (!(retval = put_user(current->uid, ruid)) &&
1266 	    !(retval = put_user(current->euid, euid)))
1267 		retval = put_user(current->suid, suid);
1268 
1269 	return retval;
1270 }
1271 
1272 /*
1273  * Same as above, but for rgid, egid, sgid.
1274  */
1275 asmlinkage long sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid)
1276 {
1277 	int retval;
1278 
1279 	retval = security_task_setgid(rgid, egid, sgid, LSM_SETID_RES);
1280 	if (retval)
1281 		return retval;
1282 
1283 	if (!capable(CAP_SETGID)) {
1284 		if ((rgid != (gid_t) -1) && (rgid != current->gid) &&
1285 		    (rgid != current->egid) && (rgid != current->sgid))
1286 			return -EPERM;
1287 		if ((egid != (gid_t) -1) && (egid != current->gid) &&
1288 		    (egid != current->egid) && (egid != current->sgid))
1289 			return -EPERM;
1290 		if ((sgid != (gid_t) -1) && (sgid != current->gid) &&
1291 		    (sgid != current->egid) && (sgid != current->sgid))
1292 			return -EPERM;
1293 	}
1294 	if (egid != (gid_t) -1) {
1295 		if (egid != current->egid) {
1296 			current->mm->dumpable = suid_dumpable;
1297 			smp_wmb();
1298 		}
1299 		current->egid = egid;
1300 	}
1301 	current->fsgid = current->egid;
1302 	if (rgid != (gid_t) -1)
1303 		current->gid = rgid;
1304 	if (sgid != (gid_t) -1)
1305 		current->sgid = sgid;
1306 
1307 	key_fsgid_changed(current);
1308 	proc_id_connector(current, PROC_EVENT_GID);
1309 	return 0;
1310 }
1311 
1312 asmlinkage long sys_getresgid(gid_t __user *rgid, gid_t __user *egid, gid_t __user *sgid)
1313 {
1314 	int retval;
1315 
1316 	if (!(retval = put_user(current->gid, rgid)) &&
1317 	    !(retval = put_user(current->egid, egid)))
1318 		retval = put_user(current->sgid, sgid);
1319 
1320 	return retval;
1321 }
1322 
1323 
1324 /*
1325  * "setfsuid()" sets the fsuid - the uid used for filesystem checks. This
1326  * is used for "access()" and for the NFS daemon (letting nfsd stay at
1327  * whatever uid it wants to). It normally shadows "euid", except when
1328  * explicitly set by setfsuid() or for access..
1329  */
1330 asmlinkage long sys_setfsuid(uid_t uid)
1331 {
1332 	int old_fsuid;
1333 
1334 	old_fsuid = current->fsuid;
1335 	if (security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS))
1336 		return old_fsuid;
1337 
1338 	if (uid == current->uid || uid == current->euid ||
1339 	    uid == current->suid || uid == current->fsuid ||
1340 	    capable(CAP_SETUID)) {
1341 		if (uid != old_fsuid) {
1342 			current->mm->dumpable = suid_dumpable;
1343 			smp_wmb();
1344 		}
1345 		current->fsuid = uid;
1346 	}
1347 
1348 	key_fsuid_changed(current);
1349 	proc_id_connector(current, PROC_EVENT_UID);
1350 
1351 	security_task_post_setuid(old_fsuid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS);
1352 
1353 	return old_fsuid;
1354 }
1355 
1356 /*
1357  * Samma på svenska..
1358  */
1359 asmlinkage long sys_setfsgid(gid_t gid)
1360 {
1361 	int old_fsgid;
1362 
1363 	old_fsgid = current->fsgid;
1364 	if (security_task_setgid(gid, (gid_t)-1, (gid_t)-1, LSM_SETID_FS))
1365 		return old_fsgid;
1366 
1367 	if (gid == current->gid || gid == current->egid ||
1368 	    gid == current->sgid || gid == current->fsgid ||
1369 	    capable(CAP_SETGID)) {
1370 		if (gid != old_fsgid) {
1371 			current->mm->dumpable = suid_dumpable;
1372 			smp_wmb();
1373 		}
1374 		current->fsgid = gid;
1375 		key_fsgid_changed(current);
1376 		proc_id_connector(current, PROC_EVENT_GID);
1377 	}
1378 	return old_fsgid;
1379 }
1380 
1381 asmlinkage long sys_times(struct tms __user * tbuf)
1382 {
1383 	/*
1384 	 *	In the SMP world we might just be unlucky and have one of
1385 	 *	the times increment as we use it. Since the value is an
1386 	 *	atomically safe type this is just fine. Conceptually its
1387 	 *	as if the syscall took an instant longer to occur.
1388 	 */
1389 	if (tbuf) {
1390 		struct tms tmp;
1391 		struct task_struct *tsk = current;
1392 		struct task_struct *t;
1393 		cputime_t utime, stime, cutime, cstime;
1394 
1395 		spin_lock_irq(&tsk->sighand->siglock);
1396 		utime = tsk->signal->utime;
1397 		stime = tsk->signal->stime;
1398 		t = tsk;
1399 		do {
1400 			utime = cputime_add(utime, t->utime);
1401 			stime = cputime_add(stime, t->stime);
1402 			t = next_thread(t);
1403 		} while (t != tsk);
1404 
1405 		cutime = tsk->signal->cutime;
1406 		cstime = tsk->signal->cstime;
1407 		spin_unlock_irq(&tsk->sighand->siglock);
1408 
1409 		tmp.tms_utime = cputime_to_clock_t(utime);
1410 		tmp.tms_stime = cputime_to_clock_t(stime);
1411 		tmp.tms_cutime = cputime_to_clock_t(cutime);
1412 		tmp.tms_cstime = cputime_to_clock_t(cstime);
1413 		if (copy_to_user(tbuf, &tmp, sizeof(struct tms)))
1414 			return -EFAULT;
1415 	}
1416 	return (long) jiffies_64_to_clock_t(get_jiffies_64());
1417 }
1418 
1419 /*
1420  * This needs some heavy checking ...
1421  * I just haven't the stomach for it. I also don't fully
1422  * understand sessions/pgrp etc. Let somebody who does explain it.
1423  *
1424  * OK, I think I have the protection semantics right.... this is really
1425  * only important on a multi-user system anyway, to make sure one user
1426  * can't send a signal to a process owned by another.  -TYT, 12/12/91
1427  *
1428  * Auch. Had to add the 'did_exec' flag to conform completely to POSIX.
1429  * LBT 04.03.94
1430  */
1431 
1432 asmlinkage long sys_setpgid(pid_t pid, pid_t pgid)
1433 {
1434 	struct task_struct *p;
1435 	struct task_struct *group_leader = current->group_leader;
1436 	int err = -EINVAL;
1437 
1438 	if (!pid)
1439 		pid = group_leader->pid;
1440 	if (!pgid)
1441 		pgid = pid;
1442 	if (pgid < 0)
1443 		return -EINVAL;
1444 
1445 	/* From this point forward we keep holding onto the tasklist lock
1446 	 * so that our parent does not change from under us. -DaveM
1447 	 */
1448 	write_lock_irq(&tasklist_lock);
1449 
1450 	err = -ESRCH;
1451 	p = find_task_by_pid(pid);
1452 	if (!p)
1453 		goto out;
1454 
1455 	err = -EINVAL;
1456 	if (!thread_group_leader(p))
1457 		goto out;
1458 
1459 	if (p->real_parent == group_leader) {
1460 		err = -EPERM;
1461 		if (task_session(p) != task_session(group_leader))
1462 			goto out;
1463 		err = -EACCES;
1464 		if (p->did_exec)
1465 			goto out;
1466 	} else {
1467 		err = -ESRCH;
1468 		if (p != group_leader)
1469 			goto out;
1470 	}
1471 
1472 	err = -EPERM;
1473 	if (p->signal->leader)
1474 		goto out;
1475 
1476 	if (pgid != pid) {
1477 		struct task_struct *g =
1478 			find_task_by_pid_type(PIDTYPE_PGID, pgid);
1479 
1480 		if (!g || task_session(g) != task_session(group_leader))
1481 			goto out;
1482 	}
1483 
1484 	err = security_task_setpgid(p, pgid);
1485 	if (err)
1486 		goto out;
1487 
1488 	if (process_group(p) != pgid) {
1489 		detach_pid(p, PIDTYPE_PGID);
1490 		p->signal->pgrp = pgid;
1491 		attach_pid(p, PIDTYPE_PGID, find_pid(pgid));
1492 	}
1493 
1494 	err = 0;
1495 out:
1496 	/* All paths lead to here, thus we are safe. -DaveM */
1497 	write_unlock_irq(&tasklist_lock);
1498 	return err;
1499 }
1500 
1501 asmlinkage long sys_getpgid(pid_t pid)
1502 {
1503 	if (!pid)
1504 		return process_group(current);
1505 	else {
1506 		int retval;
1507 		struct task_struct *p;
1508 
1509 		read_lock(&tasklist_lock);
1510 		p = find_task_by_pid(pid);
1511 
1512 		retval = -ESRCH;
1513 		if (p) {
1514 			retval = security_task_getpgid(p);
1515 			if (!retval)
1516 				retval = process_group(p);
1517 		}
1518 		read_unlock(&tasklist_lock);
1519 		return retval;
1520 	}
1521 }
1522 
1523 #ifdef __ARCH_WANT_SYS_GETPGRP
1524 
1525 asmlinkage long sys_getpgrp(void)
1526 {
1527 	/* SMP - assuming writes are word atomic this is fine */
1528 	return process_group(current);
1529 }
1530 
1531 #endif
1532 
1533 asmlinkage long sys_getsid(pid_t pid)
1534 {
1535 	if (!pid)
1536 		return process_session(current);
1537 	else {
1538 		int retval;
1539 		struct task_struct *p;
1540 
1541 		read_lock(&tasklist_lock);
1542 		p = find_task_by_pid(pid);
1543 
1544 		retval = -ESRCH;
1545 		if (p) {
1546 			retval = security_task_getsid(p);
1547 			if (!retval)
1548 				retval = process_session(p);
1549 		}
1550 		read_unlock(&tasklist_lock);
1551 		return retval;
1552 	}
1553 }
1554 
1555 asmlinkage long sys_setsid(void)
1556 {
1557 	struct task_struct *group_leader = current->group_leader;
1558 	pid_t session;
1559 	int err = -EPERM;
1560 
1561 	write_lock_irq(&tasklist_lock);
1562 
1563 	/* Fail if I am already a session leader */
1564 	if (group_leader->signal->leader)
1565 		goto out;
1566 
1567 	session = group_leader->pid;
1568 	/* Fail if a process group id already exists that equals the
1569 	 * proposed session id.
1570 	 *
1571 	 * Don't check if session id == 1 because kernel threads use this
1572 	 * session id and so the check will always fail and make it so
1573 	 * init cannot successfully call setsid.
1574 	 */
1575 	if (session > 1 && find_task_by_pid_type(PIDTYPE_PGID, session))
1576 		goto out;
1577 
1578 	group_leader->signal->leader = 1;
1579 	__set_special_pids(session, session);
1580 
1581 	spin_lock(&group_leader->sighand->siglock);
1582 	group_leader->signal->tty = NULL;
1583 	spin_unlock(&group_leader->sighand->siglock);
1584 
1585 	err = process_group(group_leader);
1586 out:
1587 	write_unlock_irq(&tasklist_lock);
1588 	return err;
1589 }
1590 
1591 /*
1592  * Supplementary group IDs
1593  */
1594 
1595 /* init to 2 - one for init_task, one to ensure it is never freed */
1596 struct group_info init_groups = { .usage = ATOMIC_INIT(2) };
1597 
1598 struct group_info *groups_alloc(int gidsetsize)
1599 {
1600 	struct group_info *group_info;
1601 	int nblocks;
1602 	int i;
1603 
1604 	nblocks = (gidsetsize + NGROUPS_PER_BLOCK - 1) / NGROUPS_PER_BLOCK;
1605 	/* Make sure we always allocate at least one indirect block pointer */
1606 	nblocks = nblocks ? : 1;
1607 	group_info = kmalloc(sizeof(*group_info) + nblocks*sizeof(gid_t *), GFP_USER);
1608 	if (!group_info)
1609 		return NULL;
1610 	group_info->ngroups = gidsetsize;
1611 	group_info->nblocks = nblocks;
1612 	atomic_set(&group_info->usage, 1);
1613 
1614 	if (gidsetsize <= NGROUPS_SMALL)
1615 		group_info->blocks[0] = group_info->small_block;
1616 	else {
1617 		for (i = 0; i < nblocks; i++) {
1618 			gid_t *b;
1619 			b = (void *)__get_free_page(GFP_USER);
1620 			if (!b)
1621 				goto out_undo_partial_alloc;
1622 			group_info->blocks[i] = b;
1623 		}
1624 	}
1625 	return group_info;
1626 
1627 out_undo_partial_alloc:
1628 	while (--i >= 0) {
1629 		free_page((unsigned long)group_info->blocks[i]);
1630 	}
1631 	kfree(group_info);
1632 	return NULL;
1633 }
1634 
1635 EXPORT_SYMBOL(groups_alloc);
1636 
1637 void groups_free(struct group_info *group_info)
1638 {
1639 	if (group_info->blocks[0] != group_info->small_block) {
1640 		int i;
1641 		for (i = 0; i < group_info->nblocks; i++)
1642 			free_page((unsigned long)group_info->blocks[i]);
1643 	}
1644 	kfree(group_info);
1645 }
1646 
1647 EXPORT_SYMBOL(groups_free);
1648 
1649 /* export the group_info to a user-space array */
1650 static int groups_to_user(gid_t __user *grouplist,
1651     struct group_info *group_info)
1652 {
1653 	int i;
1654 	int count = group_info->ngroups;
1655 
1656 	for (i = 0; i < group_info->nblocks; i++) {
1657 		int cp_count = min(NGROUPS_PER_BLOCK, count);
1658 		int off = i * NGROUPS_PER_BLOCK;
1659 		int len = cp_count * sizeof(*grouplist);
1660 
1661 		if (copy_to_user(grouplist+off, group_info->blocks[i], len))
1662 			return -EFAULT;
1663 
1664 		count -= cp_count;
1665 	}
1666 	return 0;
1667 }
1668 
1669 /* fill a group_info from a user-space array - it must be allocated already */
1670 static int groups_from_user(struct group_info *group_info,
1671     gid_t __user *grouplist)
1672 {
1673 	int i;
1674 	int count = group_info->ngroups;
1675 
1676 	for (i = 0; i < group_info->nblocks; i++) {
1677 		int cp_count = min(NGROUPS_PER_BLOCK, count);
1678 		int off = i * NGROUPS_PER_BLOCK;
1679 		int len = cp_count * sizeof(*grouplist);
1680 
1681 		if (copy_from_user(group_info->blocks[i], grouplist+off, len))
1682 			return -EFAULT;
1683 
1684 		count -= cp_count;
1685 	}
1686 	return 0;
1687 }
1688 
1689 /* a simple Shell sort */
1690 static void groups_sort(struct group_info *group_info)
1691 {
1692 	int base, max, stride;
1693 	int gidsetsize = group_info->ngroups;
1694 
1695 	for (stride = 1; stride < gidsetsize; stride = 3 * stride + 1)
1696 		; /* nothing */
1697 	stride /= 3;
1698 
1699 	while (stride) {
1700 		max = gidsetsize - stride;
1701 		for (base = 0; base < max; base++) {
1702 			int left = base;
1703 			int right = left + stride;
1704 			gid_t tmp = GROUP_AT(group_info, right);
1705 
1706 			while (left >= 0 && GROUP_AT(group_info, left) > tmp) {
1707 				GROUP_AT(group_info, right) =
1708 				    GROUP_AT(group_info, left);
1709 				right = left;
1710 				left -= stride;
1711 			}
1712 			GROUP_AT(group_info, right) = tmp;
1713 		}
1714 		stride /= 3;
1715 	}
1716 }
1717 
1718 /* a simple bsearch */
1719 int groups_search(struct group_info *group_info, gid_t grp)
1720 {
1721 	unsigned int left, right;
1722 
1723 	if (!group_info)
1724 		return 0;
1725 
1726 	left = 0;
1727 	right = group_info->ngroups;
1728 	while (left < right) {
1729 		unsigned int mid = (left+right)/2;
1730 		int cmp = grp - GROUP_AT(group_info, mid);
1731 		if (cmp > 0)
1732 			left = mid + 1;
1733 		else if (cmp < 0)
1734 			right = mid;
1735 		else
1736 			return 1;
1737 	}
1738 	return 0;
1739 }
1740 
1741 /* validate and set current->group_info */
1742 int set_current_groups(struct group_info *group_info)
1743 {
1744 	int retval;
1745 	struct group_info *old_info;
1746 
1747 	retval = security_task_setgroups(group_info);
1748 	if (retval)
1749 		return retval;
1750 
1751 	groups_sort(group_info);
1752 	get_group_info(group_info);
1753 
1754 	task_lock(current);
1755 	old_info = current->group_info;
1756 	current->group_info = group_info;
1757 	task_unlock(current);
1758 
1759 	put_group_info(old_info);
1760 
1761 	return 0;
1762 }
1763 
1764 EXPORT_SYMBOL(set_current_groups);
1765 
1766 asmlinkage long sys_getgroups(int gidsetsize, gid_t __user *grouplist)
1767 {
1768 	int i = 0;
1769 
1770 	/*
1771 	 *	SMP: Nobody else can change our grouplist. Thus we are
1772 	 *	safe.
1773 	 */
1774 
1775 	if (gidsetsize < 0)
1776 		return -EINVAL;
1777 
1778 	/* no need to grab task_lock here; it cannot change */
1779 	i = current->group_info->ngroups;
1780 	if (gidsetsize) {
1781 		if (i > gidsetsize) {
1782 			i = -EINVAL;
1783 			goto out;
1784 		}
1785 		if (groups_to_user(grouplist, current->group_info)) {
1786 			i = -EFAULT;
1787 			goto out;
1788 		}
1789 	}
1790 out:
1791 	return i;
1792 }
1793 
1794 /*
1795  *	SMP: Our groups are copy-on-write. We can set them safely
1796  *	without another task interfering.
1797  */
1798 
1799 asmlinkage long sys_setgroups(int gidsetsize, gid_t __user *grouplist)
1800 {
1801 	struct group_info *group_info;
1802 	int retval;
1803 
1804 	if (!capable(CAP_SETGID))
1805 		return -EPERM;
1806 	if ((unsigned)gidsetsize > NGROUPS_MAX)
1807 		return -EINVAL;
1808 
1809 	group_info = groups_alloc(gidsetsize);
1810 	if (!group_info)
1811 		return -ENOMEM;
1812 	retval = groups_from_user(group_info, grouplist);
1813 	if (retval) {
1814 		put_group_info(group_info);
1815 		return retval;
1816 	}
1817 
1818 	retval = set_current_groups(group_info);
1819 	put_group_info(group_info);
1820 
1821 	return retval;
1822 }
1823 
1824 /*
1825  * Check whether we're fsgid/egid or in the supplemental group..
1826  */
1827 int in_group_p(gid_t grp)
1828 {
1829 	int retval = 1;
1830 	if (grp != current->fsgid)
1831 		retval = groups_search(current->group_info, grp);
1832 	return retval;
1833 }
1834 
1835 EXPORT_SYMBOL(in_group_p);
1836 
1837 int in_egroup_p(gid_t grp)
1838 {
1839 	int retval = 1;
1840 	if (grp != current->egid)
1841 		retval = groups_search(current->group_info, grp);
1842 	return retval;
1843 }
1844 
1845 EXPORT_SYMBOL(in_egroup_p);
1846 
1847 DECLARE_RWSEM(uts_sem);
1848 
1849 EXPORT_SYMBOL(uts_sem);
1850 
1851 asmlinkage long sys_newuname(struct new_utsname __user * name)
1852 {
1853 	int errno = 0;
1854 
1855 	down_read(&uts_sem);
1856 	if (copy_to_user(name, utsname(), sizeof *name))
1857 		errno = -EFAULT;
1858 	up_read(&uts_sem);
1859 	return errno;
1860 }
1861 
1862 asmlinkage long sys_sethostname(char __user *name, int len)
1863 {
1864 	int errno;
1865 	char tmp[__NEW_UTS_LEN];
1866 
1867 	if (!capable(CAP_SYS_ADMIN))
1868 		return -EPERM;
1869 	if (len < 0 || len > __NEW_UTS_LEN)
1870 		return -EINVAL;
1871 	down_write(&uts_sem);
1872 	errno = -EFAULT;
1873 	if (!copy_from_user(tmp, name, len)) {
1874 		memcpy(utsname()->nodename, tmp, len);
1875 		utsname()->nodename[len] = 0;
1876 		errno = 0;
1877 	}
1878 	up_write(&uts_sem);
1879 	return errno;
1880 }
1881 
1882 #ifdef __ARCH_WANT_SYS_GETHOSTNAME
1883 
1884 asmlinkage long sys_gethostname(char __user *name, int len)
1885 {
1886 	int i, errno;
1887 
1888 	if (len < 0)
1889 		return -EINVAL;
1890 	down_read(&uts_sem);
1891 	i = 1 + strlen(utsname()->nodename);
1892 	if (i > len)
1893 		i = len;
1894 	errno = 0;
1895 	if (copy_to_user(name, utsname()->nodename, i))
1896 		errno = -EFAULT;
1897 	up_read(&uts_sem);
1898 	return errno;
1899 }
1900 
1901 #endif
1902 
1903 /*
1904  * Only setdomainname; getdomainname can be implemented by calling
1905  * uname()
1906  */
1907 asmlinkage long sys_setdomainname(char __user *name, int len)
1908 {
1909 	int errno;
1910 	char tmp[__NEW_UTS_LEN];
1911 
1912 	if (!capable(CAP_SYS_ADMIN))
1913 		return -EPERM;
1914 	if (len < 0 || len > __NEW_UTS_LEN)
1915 		return -EINVAL;
1916 
1917 	down_write(&uts_sem);
1918 	errno = -EFAULT;
1919 	if (!copy_from_user(tmp, name, len)) {
1920 		memcpy(utsname()->domainname, tmp, len);
1921 		utsname()->domainname[len] = 0;
1922 		errno = 0;
1923 	}
1924 	up_write(&uts_sem);
1925 	return errno;
1926 }
1927 
1928 asmlinkage long sys_getrlimit(unsigned int resource, struct rlimit __user *rlim)
1929 {
1930 	if (resource >= RLIM_NLIMITS)
1931 		return -EINVAL;
1932 	else {
1933 		struct rlimit value;
1934 		task_lock(current->group_leader);
1935 		value = current->signal->rlim[resource];
1936 		task_unlock(current->group_leader);
1937 		return copy_to_user(rlim, &value, sizeof(*rlim)) ? -EFAULT : 0;
1938 	}
1939 }
1940 
1941 #ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT
1942 
1943 /*
1944  *	Back compatibility for getrlimit. Needed for some apps.
1945  */
1946 
1947 asmlinkage long sys_old_getrlimit(unsigned int resource, struct rlimit __user *rlim)
1948 {
1949 	struct rlimit x;
1950 	if (resource >= RLIM_NLIMITS)
1951 		return -EINVAL;
1952 
1953 	task_lock(current->group_leader);
1954 	x = current->signal->rlim[resource];
1955 	task_unlock(current->group_leader);
1956 	if (x.rlim_cur > 0x7FFFFFFF)
1957 		x.rlim_cur = 0x7FFFFFFF;
1958 	if (x.rlim_max > 0x7FFFFFFF)
1959 		x.rlim_max = 0x7FFFFFFF;
1960 	return copy_to_user(rlim, &x, sizeof(x))?-EFAULT:0;
1961 }
1962 
1963 #endif
1964 
1965 asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim)
1966 {
1967 	struct rlimit new_rlim, *old_rlim;
1968 	unsigned long it_prof_secs;
1969 	int retval;
1970 
1971 	if (resource >= RLIM_NLIMITS)
1972 		return -EINVAL;
1973 	if (copy_from_user(&new_rlim, rlim, sizeof(*rlim)))
1974 		return -EFAULT;
1975 	if (new_rlim.rlim_cur > new_rlim.rlim_max)
1976 		return -EINVAL;
1977 	old_rlim = current->signal->rlim + resource;
1978 	if ((new_rlim.rlim_max > old_rlim->rlim_max) &&
1979 	    !capable(CAP_SYS_RESOURCE))
1980 		return -EPERM;
1981 	if (resource == RLIMIT_NOFILE && new_rlim.rlim_max > NR_OPEN)
1982 		return -EPERM;
1983 
1984 	retval = security_task_setrlimit(resource, &new_rlim);
1985 	if (retval)
1986 		return retval;
1987 
1988 	if (resource == RLIMIT_CPU && new_rlim.rlim_cur == 0) {
1989 		/*
1990 		 * The caller is asking for an immediate RLIMIT_CPU
1991 		 * expiry.  But we use the zero value to mean "it was
1992 		 * never set".  So let's cheat and make it one second
1993 		 * instead
1994 		 */
1995 		new_rlim.rlim_cur = 1;
1996 	}
1997 
1998 	task_lock(current->group_leader);
1999 	*old_rlim = new_rlim;
2000 	task_unlock(current->group_leader);
2001 
2002 	if (resource != RLIMIT_CPU)
2003 		goto out;
2004 
2005 	/*
2006 	 * RLIMIT_CPU handling.   Note that the kernel fails to return an error
2007 	 * code if it rejected the user's attempt to set RLIMIT_CPU.  This is a
2008 	 * very long-standing error, and fixing it now risks breakage of
2009 	 * applications, so we live with it
2010 	 */
2011 	if (new_rlim.rlim_cur == RLIM_INFINITY)
2012 		goto out;
2013 
2014 	it_prof_secs = cputime_to_secs(current->signal->it_prof_expires);
2015 	if (it_prof_secs == 0 || new_rlim.rlim_cur <= it_prof_secs) {
2016 		unsigned long rlim_cur = new_rlim.rlim_cur;
2017 		cputime_t cputime;
2018 
2019 		cputime = secs_to_cputime(rlim_cur);
2020 		read_lock(&tasklist_lock);
2021 		spin_lock_irq(&current->sighand->siglock);
2022 		set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL);
2023 		spin_unlock_irq(&current->sighand->siglock);
2024 		read_unlock(&tasklist_lock);
2025 	}
2026 out:
2027 	return 0;
2028 }
2029 
2030 /*
2031  * It would make sense to put struct rusage in the task_struct,
2032  * except that would make the task_struct be *really big*.  After
2033  * task_struct gets moved into malloc'ed memory, it would
2034  * make sense to do this.  It will make moving the rest of the information
2035  * a lot simpler!  (Which we're not doing right now because we're not
2036  * measuring them yet).
2037  *
2038  * When sampling multiple threads for RUSAGE_SELF, under SMP we might have
2039  * races with threads incrementing their own counters.  But since word
2040  * reads are atomic, we either get new values or old values and we don't
2041  * care which for the sums.  We always take the siglock to protect reading
2042  * the c* fields from p->signal from races with exit.c updating those
2043  * fields when reaping, so a sample either gets all the additions of a
2044  * given child after it's reaped, or none so this sample is before reaping.
2045  *
2046  * Locking:
2047  * We need to take the siglock for CHILDEREN, SELF and BOTH
2048  * for  the cases current multithreaded, non-current single threaded
2049  * non-current multithreaded.  Thread traversal is now safe with
2050  * the siglock held.
2051  * Strictly speaking, we donot need to take the siglock if we are current and
2052  * single threaded,  as no one else can take our signal_struct away, no one
2053  * else can  reap the  children to update signal->c* counters, and no one else
2054  * can race with the signal-> fields. If we do not take any lock, the
2055  * signal-> fields could be read out of order while another thread was just
2056  * exiting. So we should  place a read memory barrier when we avoid the lock.
2057  * On the writer side,  write memory barrier is implied in  __exit_signal
2058  * as __exit_signal releases  the siglock spinlock after updating the signal->
2059  * fields. But we don't do this yet to keep things simple.
2060  *
2061  */
2062 
2063 static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
2064 {
2065 	struct task_struct *t;
2066 	unsigned long flags;
2067 	cputime_t utime, stime;
2068 
2069 	memset((char *) r, 0, sizeof *r);
2070 	utime = stime = cputime_zero;
2071 
2072 	rcu_read_lock();
2073 	if (!lock_task_sighand(p, &flags)) {
2074 		rcu_read_unlock();
2075 		return;
2076 	}
2077 
2078 	switch (who) {
2079 		case RUSAGE_BOTH:
2080 		case RUSAGE_CHILDREN:
2081 			utime = p->signal->cutime;
2082 			stime = p->signal->cstime;
2083 			r->ru_nvcsw = p->signal->cnvcsw;
2084 			r->ru_nivcsw = p->signal->cnivcsw;
2085 			r->ru_minflt = p->signal->cmin_flt;
2086 			r->ru_majflt = p->signal->cmaj_flt;
2087 			r->ru_inblock = p->signal->cinblock;
2088 			r->ru_oublock = p->signal->coublock;
2089 
2090 			if (who == RUSAGE_CHILDREN)
2091 				break;
2092 
2093 		case RUSAGE_SELF:
2094 			utime = cputime_add(utime, p->signal->utime);
2095 			stime = cputime_add(stime, p->signal->stime);
2096 			r->ru_nvcsw += p->signal->nvcsw;
2097 			r->ru_nivcsw += p->signal->nivcsw;
2098 			r->ru_minflt += p->signal->min_flt;
2099 			r->ru_majflt += p->signal->maj_flt;
2100 			r->ru_inblock += p->signal->inblock;
2101 			r->ru_oublock += p->signal->oublock;
2102 			t = p;
2103 			do {
2104 				utime = cputime_add(utime, t->utime);
2105 				stime = cputime_add(stime, t->stime);
2106 				r->ru_nvcsw += t->nvcsw;
2107 				r->ru_nivcsw += t->nivcsw;
2108 				r->ru_minflt += t->min_flt;
2109 				r->ru_majflt += t->maj_flt;
2110 				r->ru_inblock += task_io_get_inblock(t);
2111 				r->ru_oublock += task_io_get_oublock(t);
2112 				t = next_thread(t);
2113 			} while (t != p);
2114 			break;
2115 
2116 		default:
2117 			BUG();
2118 	}
2119 
2120 	unlock_task_sighand(p, &flags);
2121 	rcu_read_unlock();
2122 
2123 	cputime_to_timeval(utime, &r->ru_utime);
2124 	cputime_to_timeval(stime, &r->ru_stime);
2125 }
2126 
2127 int getrusage(struct task_struct *p, int who, struct rusage __user *ru)
2128 {
2129 	struct rusage r;
2130 	k_getrusage(p, who, &r);
2131 	return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0;
2132 }
2133 
2134 asmlinkage long sys_getrusage(int who, struct rusage __user *ru)
2135 {
2136 	if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN)
2137 		return -EINVAL;
2138 	return getrusage(current, who, ru);
2139 }
2140 
2141 asmlinkage long sys_umask(int mask)
2142 {
2143 	mask = xchg(&current->fs->umask, mask & S_IRWXUGO);
2144 	return mask;
2145 }
2146 
2147 asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
2148 			  unsigned long arg4, unsigned long arg5)
2149 {
2150 	long error;
2151 
2152 	error = security_task_prctl(option, arg2, arg3, arg4, arg5);
2153 	if (error)
2154 		return error;
2155 
2156 	switch (option) {
2157 		case PR_SET_PDEATHSIG:
2158 			if (!valid_signal(arg2)) {
2159 				error = -EINVAL;
2160 				break;
2161 			}
2162 			current->pdeath_signal = arg2;
2163 			break;
2164 		case PR_GET_PDEATHSIG:
2165 			error = put_user(current->pdeath_signal, (int __user *)arg2);
2166 			break;
2167 		case PR_GET_DUMPABLE:
2168 			error = current->mm->dumpable;
2169 			break;
2170 		case PR_SET_DUMPABLE:
2171 			if (arg2 < 0 || arg2 > 1) {
2172 				error = -EINVAL;
2173 				break;
2174 			}
2175 			current->mm->dumpable = arg2;
2176 			break;
2177 
2178 		case PR_SET_UNALIGN:
2179 			error = SET_UNALIGN_CTL(current, arg2);
2180 			break;
2181 		case PR_GET_UNALIGN:
2182 			error = GET_UNALIGN_CTL(current, arg2);
2183 			break;
2184 		case PR_SET_FPEMU:
2185 			error = SET_FPEMU_CTL(current, arg2);
2186 			break;
2187 		case PR_GET_FPEMU:
2188 			error = GET_FPEMU_CTL(current, arg2);
2189 			break;
2190 		case PR_SET_FPEXC:
2191 			error = SET_FPEXC_CTL(current, arg2);
2192 			break;
2193 		case PR_GET_FPEXC:
2194 			error = GET_FPEXC_CTL(current, arg2);
2195 			break;
2196 		case PR_GET_TIMING:
2197 			error = PR_TIMING_STATISTICAL;
2198 			break;
2199 		case PR_SET_TIMING:
2200 			if (arg2 == PR_TIMING_STATISTICAL)
2201 				error = 0;
2202 			else
2203 				error = -EINVAL;
2204 			break;
2205 
2206 		case PR_GET_KEEPCAPS:
2207 			if (current->keep_capabilities)
2208 				error = 1;
2209 			break;
2210 		case PR_SET_KEEPCAPS:
2211 			if (arg2 != 0 && arg2 != 1) {
2212 				error = -EINVAL;
2213 				break;
2214 			}
2215 			current->keep_capabilities = arg2;
2216 			break;
2217 		case PR_SET_NAME: {
2218 			struct task_struct *me = current;
2219 			unsigned char ncomm[sizeof(me->comm)];
2220 
2221 			ncomm[sizeof(me->comm)-1] = 0;
2222 			if (strncpy_from_user(ncomm, (char __user *)arg2,
2223 						sizeof(me->comm)-1) < 0)
2224 				return -EFAULT;
2225 			set_task_comm(me, ncomm);
2226 			return 0;
2227 		}
2228 		case PR_GET_NAME: {
2229 			struct task_struct *me = current;
2230 			unsigned char tcomm[sizeof(me->comm)];
2231 
2232 			get_task_comm(tcomm, me);
2233 			if (copy_to_user((char __user *)arg2, tcomm, sizeof(tcomm)))
2234 				return -EFAULT;
2235 			return 0;
2236 		}
2237 		case PR_GET_ENDIAN:
2238 			error = GET_ENDIAN(current, arg2);
2239 			break;
2240 		case PR_SET_ENDIAN:
2241 			error = SET_ENDIAN(current, arg2);
2242 			break;
2243 
2244 		default:
2245 			error = -EINVAL;
2246 			break;
2247 	}
2248 	return error;
2249 }
2250 
2251 asmlinkage long sys_getcpu(unsigned __user *cpup, unsigned __user *nodep,
2252 	   		   struct getcpu_cache __user *cache)
2253 {
2254 	int err = 0;
2255 	int cpu = raw_smp_processor_id();
2256 	if (cpup)
2257 		err |= put_user(cpu, cpup);
2258 	if (nodep)
2259 		err |= put_user(cpu_to_node(cpu), nodep);
2260 	if (cache) {
2261 		/*
2262 		 * The cache is not needed for this implementation,
2263 		 * but make sure user programs pass something
2264 		 * valid. vsyscall implementations can instead make
2265 		 * good use of the cache. Only use t0 and t1 because
2266 		 * these are available in both 32bit and 64bit ABI (no
2267 		 * need for a compat_getcpu). 32bit has enough
2268 		 * padding
2269 		 */
2270 		unsigned long t0, t1;
2271 		get_user(t0, &cache->blob[0]);
2272 		get_user(t1, &cache->blob[1]);
2273 		t0++;
2274 		t1++;
2275 		put_user(t0, &cache->blob[0]);
2276 		put_user(t1, &cache->blob[1]);
2277 	}
2278 	return err ? -EFAULT : 0;
2279 }
2280