xref: /openbmc/linux/kernel/sys.c (revision 92614ad5)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/kernel/sys.c
4  *
5  *  Copyright (C) 1991, 1992  Linus Torvalds
6  */
7 
8 #include <linux/export.h>
9 #include <linux/mm.h>
10 #include <linux/utsname.h>
11 #include <linux/mman.h>
12 #include <linux/reboot.h>
13 #include <linux/prctl.h>
14 #include <linux/highuid.h>
15 #include <linux/fs.h>
16 #include <linux/kmod.h>
17 #include <linux/perf_event.h>
18 #include <linux/resource.h>
19 #include <linux/kernel.h>
20 #include <linux/workqueue.h>
21 #include <linux/capability.h>
22 #include <linux/device.h>
23 #include <linux/key.h>
24 #include <linux/times.h>
25 #include <linux/posix-timers.h>
26 #include <linux/security.h>
27 #include <linux/dcookies.h>
28 #include <linux/suspend.h>
29 #include <linux/tty.h>
30 #include <linux/signal.h>
31 #include <linux/cn_proc.h>
32 #include <linux/getcpu.h>
33 #include <linux/task_io_accounting_ops.h>
34 #include <linux/seccomp.h>
35 #include <linux/cpu.h>
36 #include <linux/personality.h>
37 #include <linux/ptrace.h>
38 #include <linux/fs_struct.h>
39 #include <linux/file.h>
40 #include <linux/mount.h>
41 #include <linux/gfp.h>
42 #include <linux/syscore_ops.h>
43 #include <linux/version.h>
44 #include <linux/ctype.h>
45 #include <linux/syscall_user_dispatch.h>
46 
47 #include <linux/compat.h>
48 #include <linux/syscalls.h>
49 #include <linux/kprobes.h>
50 #include <linux/user_namespace.h>
51 #include <linux/time_namespace.h>
52 #include <linux/binfmts.h>
53 
54 #include <linux/sched.h>
55 #include <linux/sched/autogroup.h>
56 #include <linux/sched/loadavg.h>
57 #include <linux/sched/stat.h>
58 #include <linux/sched/mm.h>
59 #include <linux/sched/coredump.h>
60 #include <linux/sched/task.h>
61 #include <linux/sched/cputime.h>
62 #include <linux/rcupdate.h>
63 #include <linux/uidgid.h>
64 #include <linux/cred.h>
65 
66 #include <linux/nospec.h>
67 
68 #include <linux/kmsg_dump.h>
69 /* Move somewhere else to avoid recompiling? */
70 #include <generated/utsrelease.h>
71 
72 #include <linux/uaccess.h>
73 #include <asm/io.h>
74 #include <asm/unistd.h>
75 
76 #include "uid16.h"
77 
78 #ifndef SET_UNALIGN_CTL
79 # define SET_UNALIGN_CTL(a, b)	(-EINVAL)
80 #endif
81 #ifndef GET_UNALIGN_CTL
82 # define GET_UNALIGN_CTL(a, b)	(-EINVAL)
83 #endif
84 #ifndef SET_FPEMU_CTL
85 # define SET_FPEMU_CTL(a, b)	(-EINVAL)
86 #endif
87 #ifndef GET_FPEMU_CTL
88 # define GET_FPEMU_CTL(a, b)	(-EINVAL)
89 #endif
90 #ifndef SET_FPEXC_CTL
91 # define SET_FPEXC_CTL(a, b)	(-EINVAL)
92 #endif
93 #ifndef GET_FPEXC_CTL
94 # define GET_FPEXC_CTL(a, b)	(-EINVAL)
95 #endif
96 #ifndef GET_ENDIAN
97 # define GET_ENDIAN(a, b)	(-EINVAL)
98 #endif
99 #ifndef SET_ENDIAN
100 # define SET_ENDIAN(a, b)	(-EINVAL)
101 #endif
102 #ifndef GET_TSC_CTL
103 # define GET_TSC_CTL(a)		(-EINVAL)
104 #endif
105 #ifndef SET_TSC_CTL
106 # define SET_TSC_CTL(a)		(-EINVAL)
107 #endif
108 #ifndef GET_FP_MODE
109 # define GET_FP_MODE(a)		(-EINVAL)
110 #endif
111 #ifndef SET_FP_MODE
112 # define SET_FP_MODE(a,b)	(-EINVAL)
113 #endif
114 #ifndef SVE_SET_VL
115 # define SVE_SET_VL(a)		(-EINVAL)
116 #endif
117 #ifndef SVE_GET_VL
118 # define SVE_GET_VL()		(-EINVAL)
119 #endif
120 #ifndef PAC_RESET_KEYS
121 # define PAC_RESET_KEYS(a, b)	(-EINVAL)
122 #endif
123 #ifndef SET_TAGGED_ADDR_CTRL
124 # define SET_TAGGED_ADDR_CTRL(a)	(-EINVAL)
125 #endif
126 #ifndef GET_TAGGED_ADDR_CTRL
127 # define GET_TAGGED_ADDR_CTRL()		(-EINVAL)
128 #endif
129 
130 /*
131  * this is where the system-wide overflow UID and GID are defined, for
132  * architectures that now have 32-bit UID/GID but didn't in the past
133  */
134 
135 int overflowuid = DEFAULT_OVERFLOWUID;
136 int overflowgid = DEFAULT_OVERFLOWGID;
137 
138 EXPORT_SYMBOL(overflowuid);
139 EXPORT_SYMBOL(overflowgid);
140 
141 /*
142  * the same as above, but for filesystems which can only store a 16-bit
143  * UID and GID. as such, this is needed on all architectures
144  */
145 
146 int fs_overflowuid = DEFAULT_FS_OVERFLOWUID;
147 int fs_overflowgid = DEFAULT_FS_OVERFLOWGID;
148 
149 EXPORT_SYMBOL(fs_overflowuid);
150 EXPORT_SYMBOL(fs_overflowgid);
151 
152 /*
153  * Returns true if current's euid is same as p's uid or euid,
154  * or has CAP_SYS_NICE to p's user_ns.
155  *
156  * Called with rcu_read_lock, creds are safe
157  */
158 static bool set_one_prio_perm(struct task_struct *p)
159 {
160 	const struct cred *cred = current_cred(), *pcred = __task_cred(p);
161 
162 	if (uid_eq(pcred->uid,  cred->euid) ||
163 	    uid_eq(pcred->euid, cred->euid))
164 		return true;
165 	if (ns_capable(pcred->user_ns, CAP_SYS_NICE))
166 		return true;
167 	return false;
168 }
169 
170 /*
171  * set the priority of a task
172  * - the caller must hold the RCU read lock
173  */
174 static int set_one_prio(struct task_struct *p, int niceval, int error)
175 {
176 	int no_nice;
177 
178 	if (!set_one_prio_perm(p)) {
179 		error = -EPERM;
180 		goto out;
181 	}
182 	if (niceval < task_nice(p) && !can_nice(p, niceval)) {
183 		error = -EACCES;
184 		goto out;
185 	}
186 	no_nice = security_task_setnice(p, niceval);
187 	if (no_nice) {
188 		error = no_nice;
189 		goto out;
190 	}
191 	if (error == -ESRCH)
192 		error = 0;
193 	set_user_nice(p, niceval);
194 out:
195 	return error;
196 }
197 
198 SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval)
199 {
200 	struct task_struct *g, *p;
201 	struct user_struct *user;
202 	const struct cred *cred = current_cred();
203 	int error = -EINVAL;
204 	struct pid *pgrp;
205 	kuid_t uid;
206 
207 	if (which > PRIO_USER || which < PRIO_PROCESS)
208 		goto out;
209 
210 	/* normalize: avoid signed division (rounding problems) */
211 	error = -ESRCH;
212 	if (niceval < MIN_NICE)
213 		niceval = MIN_NICE;
214 	if (niceval > MAX_NICE)
215 		niceval = MAX_NICE;
216 
217 	rcu_read_lock();
218 	read_lock(&tasklist_lock);
219 	switch (which) {
220 	case PRIO_PROCESS:
221 		if (who)
222 			p = find_task_by_vpid(who);
223 		else
224 			p = current;
225 		if (p)
226 			error = set_one_prio(p, niceval, error);
227 		break;
228 	case PRIO_PGRP:
229 		if (who)
230 			pgrp = find_vpid(who);
231 		else
232 			pgrp = task_pgrp(current);
233 		do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
234 			error = set_one_prio(p, niceval, error);
235 		} while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
236 		break;
237 	case PRIO_USER:
238 		uid = make_kuid(cred->user_ns, who);
239 		user = cred->user;
240 		if (!who)
241 			uid = cred->uid;
242 		else if (!uid_eq(uid, cred->uid)) {
243 			user = find_user(uid);
244 			if (!user)
245 				goto out_unlock;	/* No processes for this user */
246 		}
247 		do_each_thread(g, p) {
248 			if (uid_eq(task_uid(p), uid) && task_pid_vnr(p))
249 				error = set_one_prio(p, niceval, error);
250 		} while_each_thread(g, p);
251 		if (!uid_eq(uid, cred->uid))
252 			free_uid(user);		/* For find_user() */
253 		break;
254 	}
255 out_unlock:
256 	read_unlock(&tasklist_lock);
257 	rcu_read_unlock();
258 out:
259 	return error;
260 }
261 
262 /*
263  * Ugh. To avoid negative return values, "getpriority()" will
264  * not return the normal nice-value, but a negated value that
265  * has been offset by 20 (ie it returns 40..1 instead of -20..19)
266  * to stay compatible.
267  */
268 SYSCALL_DEFINE2(getpriority, int, which, int, who)
269 {
270 	struct task_struct *g, *p;
271 	struct user_struct *user;
272 	const struct cred *cred = current_cred();
273 	long niceval, retval = -ESRCH;
274 	struct pid *pgrp;
275 	kuid_t uid;
276 
277 	if (which > PRIO_USER || which < PRIO_PROCESS)
278 		return -EINVAL;
279 
280 	rcu_read_lock();
281 	read_lock(&tasklist_lock);
282 	switch (which) {
283 	case PRIO_PROCESS:
284 		if (who)
285 			p = find_task_by_vpid(who);
286 		else
287 			p = current;
288 		if (p) {
289 			niceval = nice_to_rlimit(task_nice(p));
290 			if (niceval > retval)
291 				retval = niceval;
292 		}
293 		break;
294 	case PRIO_PGRP:
295 		if (who)
296 			pgrp = find_vpid(who);
297 		else
298 			pgrp = task_pgrp(current);
299 		do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
300 			niceval = nice_to_rlimit(task_nice(p));
301 			if (niceval > retval)
302 				retval = niceval;
303 		} while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
304 		break;
305 	case PRIO_USER:
306 		uid = make_kuid(cred->user_ns, who);
307 		user = cred->user;
308 		if (!who)
309 			uid = cred->uid;
310 		else if (!uid_eq(uid, cred->uid)) {
311 			user = find_user(uid);
312 			if (!user)
313 				goto out_unlock;	/* No processes for this user */
314 		}
315 		do_each_thread(g, p) {
316 			if (uid_eq(task_uid(p), uid) && task_pid_vnr(p)) {
317 				niceval = nice_to_rlimit(task_nice(p));
318 				if (niceval > retval)
319 					retval = niceval;
320 			}
321 		} while_each_thread(g, p);
322 		if (!uid_eq(uid, cred->uid))
323 			free_uid(user);		/* for find_user() */
324 		break;
325 	}
326 out_unlock:
327 	read_unlock(&tasklist_lock);
328 	rcu_read_unlock();
329 
330 	return retval;
331 }
332 
333 /*
334  * Unprivileged users may change the real gid to the effective gid
335  * or vice versa.  (BSD-style)
336  *
337  * If you set the real gid at all, or set the effective gid to a value not
338  * equal to the real gid, then the saved gid is set to the new effective gid.
339  *
340  * This makes it possible for a setgid program to completely drop its
341  * privileges, which is often a useful assertion to make when you are doing
342  * a security audit over a program.
343  *
344  * The general idea is that a program which uses just setregid() will be
345  * 100% compatible with BSD.  A program which uses just setgid() will be
346  * 100% compatible with POSIX with saved IDs.
347  *
348  * SMP: There are not races, the GIDs are checked only by filesystem
349  *      operations (as far as semantic preservation is concerned).
350  */
351 #ifdef CONFIG_MULTIUSER
352 long __sys_setregid(gid_t rgid, gid_t egid)
353 {
354 	struct user_namespace *ns = current_user_ns();
355 	const struct cred *old;
356 	struct cred *new;
357 	int retval;
358 	kgid_t krgid, kegid;
359 
360 	krgid = make_kgid(ns, rgid);
361 	kegid = make_kgid(ns, egid);
362 
363 	if ((rgid != (gid_t) -1) && !gid_valid(krgid))
364 		return -EINVAL;
365 	if ((egid != (gid_t) -1) && !gid_valid(kegid))
366 		return -EINVAL;
367 
368 	new = prepare_creds();
369 	if (!new)
370 		return -ENOMEM;
371 	old = current_cred();
372 
373 	retval = -EPERM;
374 	if (rgid != (gid_t) -1) {
375 		if (gid_eq(old->gid, krgid) ||
376 		    gid_eq(old->egid, krgid) ||
377 		    ns_capable_setid(old->user_ns, CAP_SETGID))
378 			new->gid = krgid;
379 		else
380 			goto error;
381 	}
382 	if (egid != (gid_t) -1) {
383 		if (gid_eq(old->gid, kegid) ||
384 		    gid_eq(old->egid, kegid) ||
385 		    gid_eq(old->sgid, kegid) ||
386 		    ns_capable_setid(old->user_ns, CAP_SETGID))
387 			new->egid = kegid;
388 		else
389 			goto error;
390 	}
391 
392 	if (rgid != (gid_t) -1 ||
393 	    (egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
394 		new->sgid = new->egid;
395 	new->fsgid = new->egid;
396 
397 	retval = security_task_fix_setgid(new, old, LSM_SETID_RE);
398 	if (retval < 0)
399 		goto error;
400 
401 	return commit_creds(new);
402 
403 error:
404 	abort_creds(new);
405 	return retval;
406 }
407 
408 SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
409 {
410 	return __sys_setregid(rgid, egid);
411 }
412 
413 /*
414  * setgid() is implemented like SysV w/ SAVED_IDS
415  *
416  * SMP: Same implicit races as above.
417  */
418 long __sys_setgid(gid_t gid)
419 {
420 	struct user_namespace *ns = current_user_ns();
421 	const struct cred *old;
422 	struct cred *new;
423 	int retval;
424 	kgid_t kgid;
425 
426 	kgid = make_kgid(ns, gid);
427 	if (!gid_valid(kgid))
428 		return -EINVAL;
429 
430 	new = prepare_creds();
431 	if (!new)
432 		return -ENOMEM;
433 	old = current_cred();
434 
435 	retval = -EPERM;
436 	if (ns_capable_setid(old->user_ns, CAP_SETGID))
437 		new->gid = new->egid = new->sgid = new->fsgid = kgid;
438 	else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
439 		new->egid = new->fsgid = kgid;
440 	else
441 		goto error;
442 
443 	retval = security_task_fix_setgid(new, old, LSM_SETID_ID);
444 	if (retval < 0)
445 		goto error;
446 
447 	return commit_creds(new);
448 
449 error:
450 	abort_creds(new);
451 	return retval;
452 }
453 
454 SYSCALL_DEFINE1(setgid, gid_t, gid)
455 {
456 	return __sys_setgid(gid);
457 }
458 
459 /*
460  * change the user struct in a credentials set to match the new UID
461  */
462 static int set_user(struct cred *new)
463 {
464 	struct user_struct *new_user;
465 
466 	new_user = alloc_uid(new->uid);
467 	if (!new_user)
468 		return -EAGAIN;
469 
470 	/*
471 	 * We don't fail in case of NPROC limit excess here because too many
472 	 * poorly written programs don't check set*uid() return code, assuming
473 	 * it never fails if called by root.  We may still enforce NPROC limit
474 	 * for programs doing set*uid()+execve() by harmlessly deferring the
475 	 * failure to the execve() stage.
476 	 */
477 	if (atomic_read(&new_user->processes) >= rlimit(RLIMIT_NPROC) &&
478 			new_user != INIT_USER)
479 		current->flags |= PF_NPROC_EXCEEDED;
480 	else
481 		current->flags &= ~PF_NPROC_EXCEEDED;
482 
483 	free_uid(new->user);
484 	new->user = new_user;
485 	return 0;
486 }
487 
488 /*
489  * Unprivileged users may change the real uid to the effective uid
490  * or vice versa.  (BSD-style)
491  *
492  * If you set the real uid at all, or set the effective uid to a value not
493  * equal to the real uid, then the saved uid is set to the new effective uid.
494  *
495  * This makes it possible for a setuid program to completely drop its
496  * privileges, which is often a useful assertion to make when you are doing
497  * a security audit over a program.
498  *
499  * The general idea is that a program which uses just setreuid() will be
500  * 100% compatible with BSD.  A program which uses just setuid() will be
501  * 100% compatible with POSIX with saved IDs.
502  */
503 long __sys_setreuid(uid_t ruid, uid_t euid)
504 {
505 	struct user_namespace *ns = current_user_ns();
506 	const struct cred *old;
507 	struct cred *new;
508 	int retval;
509 	kuid_t kruid, keuid;
510 
511 	kruid = make_kuid(ns, ruid);
512 	keuid = make_kuid(ns, euid);
513 
514 	if ((ruid != (uid_t) -1) && !uid_valid(kruid))
515 		return -EINVAL;
516 	if ((euid != (uid_t) -1) && !uid_valid(keuid))
517 		return -EINVAL;
518 
519 	new = prepare_creds();
520 	if (!new)
521 		return -ENOMEM;
522 	old = current_cred();
523 
524 	retval = -EPERM;
525 	if (ruid != (uid_t) -1) {
526 		new->uid = kruid;
527 		if (!uid_eq(old->uid, kruid) &&
528 		    !uid_eq(old->euid, kruid) &&
529 		    !ns_capable_setid(old->user_ns, CAP_SETUID))
530 			goto error;
531 	}
532 
533 	if (euid != (uid_t) -1) {
534 		new->euid = keuid;
535 		if (!uid_eq(old->uid, keuid) &&
536 		    !uid_eq(old->euid, keuid) &&
537 		    !uid_eq(old->suid, keuid) &&
538 		    !ns_capable_setid(old->user_ns, CAP_SETUID))
539 			goto error;
540 	}
541 
542 	if (!uid_eq(new->uid, old->uid)) {
543 		retval = set_user(new);
544 		if (retval < 0)
545 			goto error;
546 	}
547 	if (ruid != (uid_t) -1 ||
548 	    (euid != (uid_t) -1 && !uid_eq(keuid, old->uid)))
549 		new->suid = new->euid;
550 	new->fsuid = new->euid;
551 
552 	retval = security_task_fix_setuid(new, old, LSM_SETID_RE);
553 	if (retval < 0)
554 		goto error;
555 
556 	return commit_creds(new);
557 
558 error:
559 	abort_creds(new);
560 	return retval;
561 }
562 
563 SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
564 {
565 	return __sys_setreuid(ruid, euid);
566 }
567 
568 /*
569  * setuid() is implemented like SysV with SAVED_IDS
570  *
571  * Note that SAVED_ID's is deficient in that a setuid root program
572  * like sendmail, for example, cannot set its uid to be a normal
573  * user and then switch back, because if you're root, setuid() sets
574  * the saved uid too.  If you don't like this, blame the bright people
575  * in the POSIX committee and/or USG.  Note that the BSD-style setreuid()
576  * will allow a root program to temporarily drop privileges and be able to
577  * regain them by swapping the real and effective uid.
578  */
579 long __sys_setuid(uid_t uid)
580 {
581 	struct user_namespace *ns = current_user_ns();
582 	const struct cred *old;
583 	struct cred *new;
584 	int retval;
585 	kuid_t kuid;
586 
587 	kuid = make_kuid(ns, uid);
588 	if (!uid_valid(kuid))
589 		return -EINVAL;
590 
591 	new = prepare_creds();
592 	if (!new)
593 		return -ENOMEM;
594 	old = current_cred();
595 
596 	retval = -EPERM;
597 	if (ns_capable_setid(old->user_ns, CAP_SETUID)) {
598 		new->suid = new->uid = kuid;
599 		if (!uid_eq(kuid, old->uid)) {
600 			retval = set_user(new);
601 			if (retval < 0)
602 				goto error;
603 		}
604 	} else if (!uid_eq(kuid, old->uid) && !uid_eq(kuid, new->suid)) {
605 		goto error;
606 	}
607 
608 	new->fsuid = new->euid = kuid;
609 
610 	retval = security_task_fix_setuid(new, old, LSM_SETID_ID);
611 	if (retval < 0)
612 		goto error;
613 
614 	return commit_creds(new);
615 
616 error:
617 	abort_creds(new);
618 	return retval;
619 }
620 
621 SYSCALL_DEFINE1(setuid, uid_t, uid)
622 {
623 	return __sys_setuid(uid);
624 }
625 
626 
627 /*
628  * This function implements a generic ability to update ruid, euid,
629  * and suid.  This allows you to implement the 4.4 compatible seteuid().
630  */
631 long __sys_setresuid(uid_t ruid, uid_t euid, uid_t suid)
632 {
633 	struct user_namespace *ns = current_user_ns();
634 	const struct cred *old;
635 	struct cred *new;
636 	int retval;
637 	kuid_t kruid, keuid, ksuid;
638 
639 	kruid = make_kuid(ns, ruid);
640 	keuid = make_kuid(ns, euid);
641 	ksuid = make_kuid(ns, suid);
642 
643 	if ((ruid != (uid_t) -1) && !uid_valid(kruid))
644 		return -EINVAL;
645 
646 	if ((euid != (uid_t) -1) && !uid_valid(keuid))
647 		return -EINVAL;
648 
649 	if ((suid != (uid_t) -1) && !uid_valid(ksuid))
650 		return -EINVAL;
651 
652 	new = prepare_creds();
653 	if (!new)
654 		return -ENOMEM;
655 
656 	old = current_cred();
657 
658 	retval = -EPERM;
659 	if (!ns_capable_setid(old->user_ns, CAP_SETUID)) {
660 		if (ruid != (uid_t) -1        && !uid_eq(kruid, old->uid) &&
661 		    !uid_eq(kruid, old->euid) && !uid_eq(kruid, old->suid))
662 			goto error;
663 		if (euid != (uid_t) -1        && !uid_eq(keuid, old->uid) &&
664 		    !uid_eq(keuid, old->euid) && !uid_eq(keuid, old->suid))
665 			goto error;
666 		if (suid != (uid_t) -1        && !uid_eq(ksuid, old->uid) &&
667 		    !uid_eq(ksuid, old->euid) && !uid_eq(ksuid, old->suid))
668 			goto error;
669 	}
670 
671 	if (ruid != (uid_t) -1) {
672 		new->uid = kruid;
673 		if (!uid_eq(kruid, old->uid)) {
674 			retval = set_user(new);
675 			if (retval < 0)
676 				goto error;
677 		}
678 	}
679 	if (euid != (uid_t) -1)
680 		new->euid = keuid;
681 	if (suid != (uid_t) -1)
682 		new->suid = ksuid;
683 	new->fsuid = new->euid;
684 
685 	retval = security_task_fix_setuid(new, old, LSM_SETID_RES);
686 	if (retval < 0)
687 		goto error;
688 
689 	return commit_creds(new);
690 
691 error:
692 	abort_creds(new);
693 	return retval;
694 }
695 
696 SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
697 {
698 	return __sys_setresuid(ruid, euid, suid);
699 }
700 
701 SYSCALL_DEFINE3(getresuid, uid_t __user *, ruidp, uid_t __user *, euidp, uid_t __user *, suidp)
702 {
703 	const struct cred *cred = current_cred();
704 	int retval;
705 	uid_t ruid, euid, suid;
706 
707 	ruid = from_kuid_munged(cred->user_ns, cred->uid);
708 	euid = from_kuid_munged(cred->user_ns, cred->euid);
709 	suid = from_kuid_munged(cred->user_ns, cred->suid);
710 
711 	retval = put_user(ruid, ruidp);
712 	if (!retval) {
713 		retval = put_user(euid, euidp);
714 		if (!retval)
715 			return put_user(suid, suidp);
716 	}
717 	return retval;
718 }
719 
720 /*
721  * Same as above, but for rgid, egid, sgid.
722  */
723 long __sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid)
724 {
725 	struct user_namespace *ns = current_user_ns();
726 	const struct cred *old;
727 	struct cred *new;
728 	int retval;
729 	kgid_t krgid, kegid, ksgid;
730 
731 	krgid = make_kgid(ns, rgid);
732 	kegid = make_kgid(ns, egid);
733 	ksgid = make_kgid(ns, sgid);
734 
735 	if ((rgid != (gid_t) -1) && !gid_valid(krgid))
736 		return -EINVAL;
737 	if ((egid != (gid_t) -1) && !gid_valid(kegid))
738 		return -EINVAL;
739 	if ((sgid != (gid_t) -1) && !gid_valid(ksgid))
740 		return -EINVAL;
741 
742 	new = prepare_creds();
743 	if (!new)
744 		return -ENOMEM;
745 	old = current_cred();
746 
747 	retval = -EPERM;
748 	if (!ns_capable_setid(old->user_ns, CAP_SETGID)) {
749 		if (rgid != (gid_t) -1        && !gid_eq(krgid, old->gid) &&
750 		    !gid_eq(krgid, old->egid) && !gid_eq(krgid, old->sgid))
751 			goto error;
752 		if (egid != (gid_t) -1        && !gid_eq(kegid, old->gid) &&
753 		    !gid_eq(kegid, old->egid) && !gid_eq(kegid, old->sgid))
754 			goto error;
755 		if (sgid != (gid_t) -1        && !gid_eq(ksgid, old->gid) &&
756 		    !gid_eq(ksgid, old->egid) && !gid_eq(ksgid, old->sgid))
757 			goto error;
758 	}
759 
760 	if (rgid != (gid_t) -1)
761 		new->gid = krgid;
762 	if (egid != (gid_t) -1)
763 		new->egid = kegid;
764 	if (sgid != (gid_t) -1)
765 		new->sgid = ksgid;
766 	new->fsgid = new->egid;
767 
768 	retval = security_task_fix_setgid(new, old, LSM_SETID_RES);
769 	if (retval < 0)
770 		goto error;
771 
772 	return commit_creds(new);
773 
774 error:
775 	abort_creds(new);
776 	return retval;
777 }
778 
779 SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
780 {
781 	return __sys_setresgid(rgid, egid, sgid);
782 }
783 
784 SYSCALL_DEFINE3(getresgid, gid_t __user *, rgidp, gid_t __user *, egidp, gid_t __user *, sgidp)
785 {
786 	const struct cred *cred = current_cred();
787 	int retval;
788 	gid_t rgid, egid, sgid;
789 
790 	rgid = from_kgid_munged(cred->user_ns, cred->gid);
791 	egid = from_kgid_munged(cred->user_ns, cred->egid);
792 	sgid = from_kgid_munged(cred->user_ns, cred->sgid);
793 
794 	retval = put_user(rgid, rgidp);
795 	if (!retval) {
796 		retval = put_user(egid, egidp);
797 		if (!retval)
798 			retval = put_user(sgid, sgidp);
799 	}
800 
801 	return retval;
802 }
803 
804 
805 /*
806  * "setfsuid()" sets the fsuid - the uid used for filesystem checks. This
807  * is used for "access()" and for the NFS daemon (letting nfsd stay at
808  * whatever uid it wants to). It normally shadows "euid", except when
809  * explicitly set by setfsuid() or for access..
810  */
811 long __sys_setfsuid(uid_t uid)
812 {
813 	const struct cred *old;
814 	struct cred *new;
815 	uid_t old_fsuid;
816 	kuid_t kuid;
817 
818 	old = current_cred();
819 	old_fsuid = from_kuid_munged(old->user_ns, old->fsuid);
820 
821 	kuid = make_kuid(old->user_ns, uid);
822 	if (!uid_valid(kuid))
823 		return old_fsuid;
824 
825 	new = prepare_creds();
826 	if (!new)
827 		return old_fsuid;
828 
829 	if (uid_eq(kuid, old->uid)  || uid_eq(kuid, old->euid)  ||
830 	    uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) ||
831 	    ns_capable_setid(old->user_ns, CAP_SETUID)) {
832 		if (!uid_eq(kuid, old->fsuid)) {
833 			new->fsuid = kuid;
834 			if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
835 				goto change_okay;
836 		}
837 	}
838 
839 	abort_creds(new);
840 	return old_fsuid;
841 
842 change_okay:
843 	commit_creds(new);
844 	return old_fsuid;
845 }
846 
847 SYSCALL_DEFINE1(setfsuid, uid_t, uid)
848 {
849 	return __sys_setfsuid(uid);
850 }
851 
852 /*
853  * Samma på svenska..
854  */
855 long __sys_setfsgid(gid_t gid)
856 {
857 	const struct cred *old;
858 	struct cred *new;
859 	gid_t old_fsgid;
860 	kgid_t kgid;
861 
862 	old = current_cred();
863 	old_fsgid = from_kgid_munged(old->user_ns, old->fsgid);
864 
865 	kgid = make_kgid(old->user_ns, gid);
866 	if (!gid_valid(kgid))
867 		return old_fsgid;
868 
869 	new = prepare_creds();
870 	if (!new)
871 		return old_fsgid;
872 
873 	if (gid_eq(kgid, old->gid)  || gid_eq(kgid, old->egid)  ||
874 	    gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
875 	    ns_capable_setid(old->user_ns, CAP_SETGID)) {
876 		if (!gid_eq(kgid, old->fsgid)) {
877 			new->fsgid = kgid;
878 			if (security_task_fix_setgid(new,old,LSM_SETID_FS) == 0)
879 				goto change_okay;
880 		}
881 	}
882 
883 	abort_creds(new);
884 	return old_fsgid;
885 
886 change_okay:
887 	commit_creds(new);
888 	return old_fsgid;
889 }
890 
891 SYSCALL_DEFINE1(setfsgid, gid_t, gid)
892 {
893 	return __sys_setfsgid(gid);
894 }
895 #endif /* CONFIG_MULTIUSER */
896 
897 /**
898  * sys_getpid - return the thread group id of the current process
899  *
900  * Note, despite the name, this returns the tgid not the pid.  The tgid and
901  * the pid are identical unless CLONE_THREAD was specified on clone() in
902  * which case the tgid is the same in all threads of the same group.
903  *
904  * This is SMP safe as current->tgid does not change.
905  */
906 SYSCALL_DEFINE0(getpid)
907 {
908 	return task_tgid_vnr(current);
909 }
910 
911 /* Thread ID - the internal kernel "pid" */
912 SYSCALL_DEFINE0(gettid)
913 {
914 	return task_pid_vnr(current);
915 }
916 
917 /*
918  * Accessing ->real_parent is not SMP-safe, it could
919  * change from under us. However, we can use a stale
920  * value of ->real_parent under rcu_read_lock(), see
921  * release_task()->call_rcu(delayed_put_task_struct).
922  */
923 SYSCALL_DEFINE0(getppid)
924 {
925 	int pid;
926 
927 	rcu_read_lock();
928 	pid = task_tgid_vnr(rcu_dereference(current->real_parent));
929 	rcu_read_unlock();
930 
931 	return pid;
932 }
933 
934 SYSCALL_DEFINE0(getuid)
935 {
936 	/* Only we change this so SMP safe */
937 	return from_kuid_munged(current_user_ns(), current_uid());
938 }
939 
940 SYSCALL_DEFINE0(geteuid)
941 {
942 	/* Only we change this so SMP safe */
943 	return from_kuid_munged(current_user_ns(), current_euid());
944 }
945 
946 SYSCALL_DEFINE0(getgid)
947 {
948 	/* Only we change this so SMP safe */
949 	return from_kgid_munged(current_user_ns(), current_gid());
950 }
951 
952 SYSCALL_DEFINE0(getegid)
953 {
954 	/* Only we change this so SMP safe */
955 	return from_kgid_munged(current_user_ns(), current_egid());
956 }
957 
958 static void do_sys_times(struct tms *tms)
959 {
960 	u64 tgutime, tgstime, cutime, cstime;
961 
962 	thread_group_cputime_adjusted(current, &tgutime, &tgstime);
963 	cutime = current->signal->cutime;
964 	cstime = current->signal->cstime;
965 	tms->tms_utime = nsec_to_clock_t(tgutime);
966 	tms->tms_stime = nsec_to_clock_t(tgstime);
967 	tms->tms_cutime = nsec_to_clock_t(cutime);
968 	tms->tms_cstime = nsec_to_clock_t(cstime);
969 }
970 
971 SYSCALL_DEFINE1(times, struct tms __user *, tbuf)
972 {
973 	if (tbuf) {
974 		struct tms tmp;
975 
976 		do_sys_times(&tmp);
977 		if (copy_to_user(tbuf, &tmp, sizeof(struct tms)))
978 			return -EFAULT;
979 	}
980 	force_successful_syscall_return();
981 	return (long) jiffies_64_to_clock_t(get_jiffies_64());
982 }
983 
984 #ifdef CONFIG_COMPAT
985 static compat_clock_t clock_t_to_compat_clock_t(clock_t x)
986 {
987 	return compat_jiffies_to_clock_t(clock_t_to_jiffies(x));
988 }
989 
990 COMPAT_SYSCALL_DEFINE1(times, struct compat_tms __user *, tbuf)
991 {
992 	if (tbuf) {
993 		struct tms tms;
994 		struct compat_tms tmp;
995 
996 		do_sys_times(&tms);
997 		/* Convert our struct tms to the compat version. */
998 		tmp.tms_utime = clock_t_to_compat_clock_t(tms.tms_utime);
999 		tmp.tms_stime = clock_t_to_compat_clock_t(tms.tms_stime);
1000 		tmp.tms_cutime = clock_t_to_compat_clock_t(tms.tms_cutime);
1001 		tmp.tms_cstime = clock_t_to_compat_clock_t(tms.tms_cstime);
1002 		if (copy_to_user(tbuf, &tmp, sizeof(tmp)))
1003 			return -EFAULT;
1004 	}
1005 	force_successful_syscall_return();
1006 	return compat_jiffies_to_clock_t(jiffies);
1007 }
1008 #endif
1009 
1010 /*
1011  * This needs some heavy checking ...
1012  * I just haven't the stomach for it. I also don't fully
1013  * understand sessions/pgrp etc. Let somebody who does explain it.
1014  *
1015  * OK, I think I have the protection semantics right.... this is really
1016  * only important on a multi-user system anyway, to make sure one user
1017  * can't send a signal to a process owned by another.  -TYT, 12/12/91
1018  *
1019  * !PF_FORKNOEXEC check to conform completely to POSIX.
1020  */
1021 SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid)
1022 {
1023 	struct task_struct *p;
1024 	struct task_struct *group_leader = current->group_leader;
1025 	struct pid *pgrp;
1026 	int err;
1027 
1028 	if (!pid)
1029 		pid = task_pid_vnr(group_leader);
1030 	if (!pgid)
1031 		pgid = pid;
1032 	if (pgid < 0)
1033 		return -EINVAL;
1034 	rcu_read_lock();
1035 
1036 	/* From this point forward we keep holding onto the tasklist lock
1037 	 * so that our parent does not change from under us. -DaveM
1038 	 */
1039 	write_lock_irq(&tasklist_lock);
1040 
1041 	err = -ESRCH;
1042 	p = find_task_by_vpid(pid);
1043 	if (!p)
1044 		goto out;
1045 
1046 	err = -EINVAL;
1047 	if (!thread_group_leader(p))
1048 		goto out;
1049 
1050 	if (same_thread_group(p->real_parent, group_leader)) {
1051 		err = -EPERM;
1052 		if (task_session(p) != task_session(group_leader))
1053 			goto out;
1054 		err = -EACCES;
1055 		if (!(p->flags & PF_FORKNOEXEC))
1056 			goto out;
1057 	} else {
1058 		err = -ESRCH;
1059 		if (p != group_leader)
1060 			goto out;
1061 	}
1062 
1063 	err = -EPERM;
1064 	if (p->signal->leader)
1065 		goto out;
1066 
1067 	pgrp = task_pid(p);
1068 	if (pgid != pid) {
1069 		struct task_struct *g;
1070 
1071 		pgrp = find_vpid(pgid);
1072 		g = pid_task(pgrp, PIDTYPE_PGID);
1073 		if (!g || task_session(g) != task_session(group_leader))
1074 			goto out;
1075 	}
1076 
1077 	err = security_task_setpgid(p, pgid);
1078 	if (err)
1079 		goto out;
1080 
1081 	if (task_pgrp(p) != pgrp)
1082 		change_pid(p, PIDTYPE_PGID, pgrp);
1083 
1084 	err = 0;
1085 out:
1086 	/* All paths lead to here, thus we are safe. -DaveM */
1087 	write_unlock_irq(&tasklist_lock);
1088 	rcu_read_unlock();
1089 	return err;
1090 }
1091 
1092 static int do_getpgid(pid_t pid)
1093 {
1094 	struct task_struct *p;
1095 	struct pid *grp;
1096 	int retval;
1097 
1098 	rcu_read_lock();
1099 	if (!pid)
1100 		grp = task_pgrp(current);
1101 	else {
1102 		retval = -ESRCH;
1103 		p = find_task_by_vpid(pid);
1104 		if (!p)
1105 			goto out;
1106 		grp = task_pgrp(p);
1107 		if (!grp)
1108 			goto out;
1109 
1110 		retval = security_task_getpgid(p);
1111 		if (retval)
1112 			goto out;
1113 	}
1114 	retval = pid_vnr(grp);
1115 out:
1116 	rcu_read_unlock();
1117 	return retval;
1118 }
1119 
1120 SYSCALL_DEFINE1(getpgid, pid_t, pid)
1121 {
1122 	return do_getpgid(pid);
1123 }
1124 
1125 #ifdef __ARCH_WANT_SYS_GETPGRP
1126 
1127 SYSCALL_DEFINE0(getpgrp)
1128 {
1129 	return do_getpgid(0);
1130 }
1131 
1132 #endif
1133 
1134 SYSCALL_DEFINE1(getsid, pid_t, pid)
1135 {
1136 	struct task_struct *p;
1137 	struct pid *sid;
1138 	int retval;
1139 
1140 	rcu_read_lock();
1141 	if (!pid)
1142 		sid = task_session(current);
1143 	else {
1144 		retval = -ESRCH;
1145 		p = find_task_by_vpid(pid);
1146 		if (!p)
1147 			goto out;
1148 		sid = task_session(p);
1149 		if (!sid)
1150 			goto out;
1151 
1152 		retval = security_task_getsid(p);
1153 		if (retval)
1154 			goto out;
1155 	}
1156 	retval = pid_vnr(sid);
1157 out:
1158 	rcu_read_unlock();
1159 	return retval;
1160 }
1161 
1162 static void set_special_pids(struct pid *pid)
1163 {
1164 	struct task_struct *curr = current->group_leader;
1165 
1166 	if (task_session(curr) != pid)
1167 		change_pid(curr, PIDTYPE_SID, pid);
1168 
1169 	if (task_pgrp(curr) != pid)
1170 		change_pid(curr, PIDTYPE_PGID, pid);
1171 }
1172 
1173 int ksys_setsid(void)
1174 {
1175 	struct task_struct *group_leader = current->group_leader;
1176 	struct pid *sid = task_pid(group_leader);
1177 	pid_t session = pid_vnr(sid);
1178 	int err = -EPERM;
1179 
1180 	write_lock_irq(&tasklist_lock);
1181 	/* Fail if I am already a session leader */
1182 	if (group_leader->signal->leader)
1183 		goto out;
1184 
1185 	/* Fail if a process group id already exists that equals the
1186 	 * proposed session id.
1187 	 */
1188 	if (pid_task(sid, PIDTYPE_PGID))
1189 		goto out;
1190 
1191 	group_leader->signal->leader = 1;
1192 	set_special_pids(sid);
1193 
1194 	proc_clear_tty(group_leader);
1195 
1196 	err = session;
1197 out:
1198 	write_unlock_irq(&tasklist_lock);
1199 	if (err > 0) {
1200 		proc_sid_connector(group_leader);
1201 		sched_autogroup_create_attach(group_leader);
1202 	}
1203 	return err;
1204 }
1205 
1206 SYSCALL_DEFINE0(setsid)
1207 {
1208 	return ksys_setsid();
1209 }
1210 
1211 DECLARE_RWSEM(uts_sem);
1212 
1213 #ifdef COMPAT_UTS_MACHINE
1214 #define override_architecture(name) \
1215 	(personality(current->personality) == PER_LINUX32 && \
1216 	 copy_to_user(name->machine, COMPAT_UTS_MACHINE, \
1217 		      sizeof(COMPAT_UTS_MACHINE)))
1218 #else
1219 #define override_architecture(name)	0
1220 #endif
1221 
1222 /*
1223  * Work around broken programs that cannot handle "Linux 3.0".
1224  * Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40
1225  * And we map 4.x and later versions to 2.6.60+x, so 4.0/5.0/6.0/... would be
1226  * 2.6.60.
1227  */
1228 static int override_release(char __user *release, size_t len)
1229 {
1230 	int ret = 0;
1231 
1232 	if (current->personality & UNAME26) {
1233 		const char *rest = UTS_RELEASE;
1234 		char buf[65] = { 0 };
1235 		int ndots = 0;
1236 		unsigned v;
1237 		size_t copy;
1238 
1239 		while (*rest) {
1240 			if (*rest == '.' && ++ndots >= 3)
1241 				break;
1242 			if (!isdigit(*rest) && *rest != '.')
1243 				break;
1244 			rest++;
1245 		}
1246 		v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 60;
1247 		copy = clamp_t(size_t, len, 1, sizeof(buf));
1248 		copy = scnprintf(buf, copy, "2.6.%u%s", v, rest);
1249 		ret = copy_to_user(release, buf, copy + 1);
1250 	}
1251 	return ret;
1252 }
1253 
1254 SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name)
1255 {
1256 	struct new_utsname tmp;
1257 
1258 	down_read(&uts_sem);
1259 	memcpy(&tmp, utsname(), sizeof(tmp));
1260 	up_read(&uts_sem);
1261 	if (copy_to_user(name, &tmp, sizeof(tmp)))
1262 		return -EFAULT;
1263 
1264 	if (override_release(name->release, sizeof(name->release)))
1265 		return -EFAULT;
1266 	if (override_architecture(name))
1267 		return -EFAULT;
1268 	return 0;
1269 }
1270 
1271 #ifdef __ARCH_WANT_SYS_OLD_UNAME
1272 /*
1273  * Old cruft
1274  */
1275 SYSCALL_DEFINE1(uname, struct old_utsname __user *, name)
1276 {
1277 	struct old_utsname tmp;
1278 
1279 	if (!name)
1280 		return -EFAULT;
1281 
1282 	down_read(&uts_sem);
1283 	memcpy(&tmp, utsname(), sizeof(tmp));
1284 	up_read(&uts_sem);
1285 	if (copy_to_user(name, &tmp, sizeof(tmp)))
1286 		return -EFAULT;
1287 
1288 	if (override_release(name->release, sizeof(name->release)))
1289 		return -EFAULT;
1290 	if (override_architecture(name))
1291 		return -EFAULT;
1292 	return 0;
1293 }
1294 
1295 SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
1296 {
1297 	struct oldold_utsname tmp;
1298 
1299 	if (!name)
1300 		return -EFAULT;
1301 
1302 	memset(&tmp, 0, sizeof(tmp));
1303 
1304 	down_read(&uts_sem);
1305 	memcpy(&tmp.sysname, &utsname()->sysname, __OLD_UTS_LEN);
1306 	memcpy(&tmp.nodename, &utsname()->nodename, __OLD_UTS_LEN);
1307 	memcpy(&tmp.release, &utsname()->release, __OLD_UTS_LEN);
1308 	memcpy(&tmp.version, &utsname()->version, __OLD_UTS_LEN);
1309 	memcpy(&tmp.machine, &utsname()->machine, __OLD_UTS_LEN);
1310 	up_read(&uts_sem);
1311 	if (copy_to_user(name, &tmp, sizeof(tmp)))
1312 		return -EFAULT;
1313 
1314 	if (override_architecture(name))
1315 		return -EFAULT;
1316 	if (override_release(name->release, sizeof(name->release)))
1317 		return -EFAULT;
1318 	return 0;
1319 }
1320 #endif
1321 
1322 SYSCALL_DEFINE2(sethostname, char __user *, name, int, len)
1323 {
1324 	int errno;
1325 	char tmp[__NEW_UTS_LEN];
1326 
1327 	if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN))
1328 		return -EPERM;
1329 
1330 	if (len < 0 || len > __NEW_UTS_LEN)
1331 		return -EINVAL;
1332 	errno = -EFAULT;
1333 	if (!copy_from_user(tmp, name, len)) {
1334 		struct new_utsname *u;
1335 
1336 		down_write(&uts_sem);
1337 		u = utsname();
1338 		memcpy(u->nodename, tmp, len);
1339 		memset(u->nodename + len, 0, sizeof(u->nodename) - len);
1340 		errno = 0;
1341 		uts_proc_notify(UTS_PROC_HOSTNAME);
1342 		up_write(&uts_sem);
1343 	}
1344 	return errno;
1345 }
1346 
1347 #ifdef __ARCH_WANT_SYS_GETHOSTNAME
1348 
1349 SYSCALL_DEFINE2(gethostname, char __user *, name, int, len)
1350 {
1351 	int i;
1352 	struct new_utsname *u;
1353 	char tmp[__NEW_UTS_LEN + 1];
1354 
1355 	if (len < 0)
1356 		return -EINVAL;
1357 	down_read(&uts_sem);
1358 	u = utsname();
1359 	i = 1 + strlen(u->nodename);
1360 	if (i > len)
1361 		i = len;
1362 	memcpy(tmp, u->nodename, i);
1363 	up_read(&uts_sem);
1364 	if (copy_to_user(name, tmp, i))
1365 		return -EFAULT;
1366 	return 0;
1367 }
1368 
1369 #endif
1370 
1371 /*
1372  * Only setdomainname; getdomainname can be implemented by calling
1373  * uname()
1374  */
1375 SYSCALL_DEFINE2(setdomainname, char __user *, name, int, len)
1376 {
1377 	int errno;
1378 	char tmp[__NEW_UTS_LEN];
1379 
1380 	if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN))
1381 		return -EPERM;
1382 	if (len < 0 || len > __NEW_UTS_LEN)
1383 		return -EINVAL;
1384 
1385 	errno = -EFAULT;
1386 	if (!copy_from_user(tmp, name, len)) {
1387 		struct new_utsname *u;
1388 
1389 		down_write(&uts_sem);
1390 		u = utsname();
1391 		memcpy(u->domainname, tmp, len);
1392 		memset(u->domainname + len, 0, sizeof(u->domainname) - len);
1393 		errno = 0;
1394 		uts_proc_notify(UTS_PROC_DOMAINNAME);
1395 		up_write(&uts_sem);
1396 	}
1397 	return errno;
1398 }
1399 
1400 SYSCALL_DEFINE2(getrlimit, unsigned int, resource, struct rlimit __user *, rlim)
1401 {
1402 	struct rlimit value;
1403 	int ret;
1404 
1405 	ret = do_prlimit(current, resource, NULL, &value);
1406 	if (!ret)
1407 		ret = copy_to_user(rlim, &value, sizeof(*rlim)) ? -EFAULT : 0;
1408 
1409 	return ret;
1410 }
1411 
1412 #ifdef CONFIG_COMPAT
1413 
1414 COMPAT_SYSCALL_DEFINE2(setrlimit, unsigned int, resource,
1415 		       struct compat_rlimit __user *, rlim)
1416 {
1417 	struct rlimit r;
1418 	struct compat_rlimit r32;
1419 
1420 	if (copy_from_user(&r32, rlim, sizeof(struct compat_rlimit)))
1421 		return -EFAULT;
1422 
1423 	if (r32.rlim_cur == COMPAT_RLIM_INFINITY)
1424 		r.rlim_cur = RLIM_INFINITY;
1425 	else
1426 		r.rlim_cur = r32.rlim_cur;
1427 	if (r32.rlim_max == COMPAT_RLIM_INFINITY)
1428 		r.rlim_max = RLIM_INFINITY;
1429 	else
1430 		r.rlim_max = r32.rlim_max;
1431 	return do_prlimit(current, resource, &r, NULL);
1432 }
1433 
1434 COMPAT_SYSCALL_DEFINE2(getrlimit, unsigned int, resource,
1435 		       struct compat_rlimit __user *, rlim)
1436 {
1437 	struct rlimit r;
1438 	int ret;
1439 
1440 	ret = do_prlimit(current, resource, NULL, &r);
1441 	if (!ret) {
1442 		struct compat_rlimit r32;
1443 		if (r.rlim_cur > COMPAT_RLIM_INFINITY)
1444 			r32.rlim_cur = COMPAT_RLIM_INFINITY;
1445 		else
1446 			r32.rlim_cur = r.rlim_cur;
1447 		if (r.rlim_max > COMPAT_RLIM_INFINITY)
1448 			r32.rlim_max = COMPAT_RLIM_INFINITY;
1449 		else
1450 			r32.rlim_max = r.rlim_max;
1451 
1452 		if (copy_to_user(rlim, &r32, sizeof(struct compat_rlimit)))
1453 			return -EFAULT;
1454 	}
1455 	return ret;
1456 }
1457 
1458 #endif
1459 
1460 #ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT
1461 
1462 /*
1463  *	Back compatibility for getrlimit. Needed for some apps.
1464  */
1465 SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
1466 		struct rlimit __user *, rlim)
1467 {
1468 	struct rlimit x;
1469 	if (resource >= RLIM_NLIMITS)
1470 		return -EINVAL;
1471 
1472 	resource = array_index_nospec(resource, RLIM_NLIMITS);
1473 	task_lock(current->group_leader);
1474 	x = current->signal->rlim[resource];
1475 	task_unlock(current->group_leader);
1476 	if (x.rlim_cur > 0x7FFFFFFF)
1477 		x.rlim_cur = 0x7FFFFFFF;
1478 	if (x.rlim_max > 0x7FFFFFFF)
1479 		x.rlim_max = 0x7FFFFFFF;
1480 	return copy_to_user(rlim, &x, sizeof(x)) ? -EFAULT : 0;
1481 }
1482 
1483 #ifdef CONFIG_COMPAT
1484 COMPAT_SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
1485 		       struct compat_rlimit __user *, rlim)
1486 {
1487 	struct rlimit r;
1488 
1489 	if (resource >= RLIM_NLIMITS)
1490 		return -EINVAL;
1491 
1492 	resource = array_index_nospec(resource, RLIM_NLIMITS);
1493 	task_lock(current->group_leader);
1494 	r = current->signal->rlim[resource];
1495 	task_unlock(current->group_leader);
1496 	if (r.rlim_cur > 0x7FFFFFFF)
1497 		r.rlim_cur = 0x7FFFFFFF;
1498 	if (r.rlim_max > 0x7FFFFFFF)
1499 		r.rlim_max = 0x7FFFFFFF;
1500 
1501 	if (put_user(r.rlim_cur, &rlim->rlim_cur) ||
1502 	    put_user(r.rlim_max, &rlim->rlim_max))
1503 		return -EFAULT;
1504 	return 0;
1505 }
1506 #endif
1507 
1508 #endif
1509 
1510 static inline bool rlim64_is_infinity(__u64 rlim64)
1511 {
1512 #if BITS_PER_LONG < 64
1513 	return rlim64 >= ULONG_MAX;
1514 #else
1515 	return rlim64 == RLIM64_INFINITY;
1516 #endif
1517 }
1518 
1519 static void rlim_to_rlim64(const struct rlimit *rlim, struct rlimit64 *rlim64)
1520 {
1521 	if (rlim->rlim_cur == RLIM_INFINITY)
1522 		rlim64->rlim_cur = RLIM64_INFINITY;
1523 	else
1524 		rlim64->rlim_cur = rlim->rlim_cur;
1525 	if (rlim->rlim_max == RLIM_INFINITY)
1526 		rlim64->rlim_max = RLIM64_INFINITY;
1527 	else
1528 		rlim64->rlim_max = rlim->rlim_max;
1529 }
1530 
1531 static void rlim64_to_rlim(const struct rlimit64 *rlim64, struct rlimit *rlim)
1532 {
1533 	if (rlim64_is_infinity(rlim64->rlim_cur))
1534 		rlim->rlim_cur = RLIM_INFINITY;
1535 	else
1536 		rlim->rlim_cur = (unsigned long)rlim64->rlim_cur;
1537 	if (rlim64_is_infinity(rlim64->rlim_max))
1538 		rlim->rlim_max = RLIM_INFINITY;
1539 	else
1540 		rlim->rlim_max = (unsigned long)rlim64->rlim_max;
1541 }
1542 
1543 /* make sure you are allowed to change @tsk limits before calling this */
1544 int do_prlimit(struct task_struct *tsk, unsigned int resource,
1545 		struct rlimit *new_rlim, struct rlimit *old_rlim)
1546 {
1547 	struct rlimit *rlim;
1548 	int retval = 0;
1549 
1550 	if (resource >= RLIM_NLIMITS)
1551 		return -EINVAL;
1552 	if (new_rlim) {
1553 		if (new_rlim->rlim_cur > new_rlim->rlim_max)
1554 			return -EINVAL;
1555 		if (resource == RLIMIT_NOFILE &&
1556 				new_rlim->rlim_max > sysctl_nr_open)
1557 			return -EPERM;
1558 	}
1559 
1560 	/* protect tsk->signal and tsk->sighand from disappearing */
1561 	read_lock(&tasklist_lock);
1562 	if (!tsk->sighand) {
1563 		retval = -ESRCH;
1564 		goto out;
1565 	}
1566 
1567 	rlim = tsk->signal->rlim + resource;
1568 	task_lock(tsk->group_leader);
1569 	if (new_rlim) {
1570 		/* Keep the capable check against init_user_ns until
1571 		   cgroups can contain all limits */
1572 		if (new_rlim->rlim_max > rlim->rlim_max &&
1573 				!capable(CAP_SYS_RESOURCE))
1574 			retval = -EPERM;
1575 		if (!retval)
1576 			retval = security_task_setrlimit(tsk, resource, new_rlim);
1577 	}
1578 	if (!retval) {
1579 		if (old_rlim)
1580 			*old_rlim = *rlim;
1581 		if (new_rlim)
1582 			*rlim = *new_rlim;
1583 	}
1584 	task_unlock(tsk->group_leader);
1585 
1586 	/*
1587 	 * RLIMIT_CPU handling. Arm the posix CPU timer if the limit is not
1588 	 * infite. In case of RLIM_INFINITY the posix CPU timer code
1589 	 * ignores the rlimit.
1590 	 */
1591 	 if (!retval && new_rlim && resource == RLIMIT_CPU &&
1592 	     new_rlim->rlim_cur != RLIM_INFINITY &&
1593 	     IS_ENABLED(CONFIG_POSIX_TIMERS))
1594 		update_rlimit_cpu(tsk, new_rlim->rlim_cur);
1595 out:
1596 	read_unlock(&tasklist_lock);
1597 	return retval;
1598 }
1599 
1600 /* rcu lock must be held */
1601 static int check_prlimit_permission(struct task_struct *task,
1602 				    unsigned int flags)
1603 {
1604 	const struct cred *cred = current_cred(), *tcred;
1605 	bool id_match;
1606 
1607 	if (current == task)
1608 		return 0;
1609 
1610 	tcred = __task_cred(task);
1611 	id_match = (uid_eq(cred->uid, tcred->euid) &&
1612 		    uid_eq(cred->uid, tcred->suid) &&
1613 		    uid_eq(cred->uid, tcred->uid)  &&
1614 		    gid_eq(cred->gid, tcred->egid) &&
1615 		    gid_eq(cred->gid, tcred->sgid) &&
1616 		    gid_eq(cred->gid, tcred->gid));
1617 	if (!id_match && !ns_capable(tcred->user_ns, CAP_SYS_RESOURCE))
1618 		return -EPERM;
1619 
1620 	return security_task_prlimit(cred, tcred, flags);
1621 }
1622 
1623 SYSCALL_DEFINE4(prlimit64, pid_t, pid, unsigned int, resource,
1624 		const struct rlimit64 __user *, new_rlim,
1625 		struct rlimit64 __user *, old_rlim)
1626 {
1627 	struct rlimit64 old64, new64;
1628 	struct rlimit old, new;
1629 	struct task_struct *tsk;
1630 	unsigned int checkflags = 0;
1631 	int ret;
1632 
1633 	if (old_rlim)
1634 		checkflags |= LSM_PRLIMIT_READ;
1635 
1636 	if (new_rlim) {
1637 		if (copy_from_user(&new64, new_rlim, sizeof(new64)))
1638 			return -EFAULT;
1639 		rlim64_to_rlim(&new64, &new);
1640 		checkflags |= LSM_PRLIMIT_WRITE;
1641 	}
1642 
1643 	rcu_read_lock();
1644 	tsk = pid ? find_task_by_vpid(pid) : current;
1645 	if (!tsk) {
1646 		rcu_read_unlock();
1647 		return -ESRCH;
1648 	}
1649 	ret = check_prlimit_permission(tsk, checkflags);
1650 	if (ret) {
1651 		rcu_read_unlock();
1652 		return ret;
1653 	}
1654 	get_task_struct(tsk);
1655 	rcu_read_unlock();
1656 
1657 	ret = do_prlimit(tsk, resource, new_rlim ? &new : NULL,
1658 			old_rlim ? &old : NULL);
1659 
1660 	if (!ret && old_rlim) {
1661 		rlim_to_rlim64(&old, &old64);
1662 		if (copy_to_user(old_rlim, &old64, sizeof(old64)))
1663 			ret = -EFAULT;
1664 	}
1665 
1666 	put_task_struct(tsk);
1667 	return ret;
1668 }
1669 
1670 SYSCALL_DEFINE2(setrlimit, unsigned int, resource, struct rlimit __user *, rlim)
1671 {
1672 	struct rlimit new_rlim;
1673 
1674 	if (copy_from_user(&new_rlim, rlim, sizeof(*rlim)))
1675 		return -EFAULT;
1676 	return do_prlimit(current, resource, &new_rlim, NULL);
1677 }
1678 
1679 /*
1680  * It would make sense to put struct rusage in the task_struct,
1681  * except that would make the task_struct be *really big*.  After
1682  * task_struct gets moved into malloc'ed memory, it would
1683  * make sense to do this.  It will make moving the rest of the information
1684  * a lot simpler!  (Which we're not doing right now because we're not
1685  * measuring them yet).
1686  *
1687  * When sampling multiple threads for RUSAGE_SELF, under SMP we might have
1688  * races with threads incrementing their own counters.  But since word
1689  * reads are atomic, we either get new values or old values and we don't
1690  * care which for the sums.  We always take the siglock to protect reading
1691  * the c* fields from p->signal from races with exit.c updating those
1692  * fields when reaping, so a sample either gets all the additions of a
1693  * given child after it's reaped, or none so this sample is before reaping.
1694  *
1695  * Locking:
1696  * We need to take the siglock for CHILDEREN, SELF and BOTH
1697  * for  the cases current multithreaded, non-current single threaded
1698  * non-current multithreaded.  Thread traversal is now safe with
1699  * the siglock held.
1700  * Strictly speaking, we donot need to take the siglock if we are current and
1701  * single threaded,  as no one else can take our signal_struct away, no one
1702  * else can  reap the  children to update signal->c* counters, and no one else
1703  * can race with the signal-> fields. If we do not take any lock, the
1704  * signal-> fields could be read out of order while another thread was just
1705  * exiting. So we should  place a read memory barrier when we avoid the lock.
1706  * On the writer side,  write memory barrier is implied in  __exit_signal
1707  * as __exit_signal releases  the siglock spinlock after updating the signal->
1708  * fields. But we don't do this yet to keep things simple.
1709  *
1710  */
1711 
1712 static void accumulate_thread_rusage(struct task_struct *t, struct rusage *r)
1713 {
1714 	r->ru_nvcsw += t->nvcsw;
1715 	r->ru_nivcsw += t->nivcsw;
1716 	r->ru_minflt += t->min_flt;
1717 	r->ru_majflt += t->maj_flt;
1718 	r->ru_inblock += task_io_get_inblock(t);
1719 	r->ru_oublock += task_io_get_oublock(t);
1720 }
1721 
1722 void getrusage(struct task_struct *p, int who, struct rusage *r)
1723 {
1724 	struct task_struct *t;
1725 	unsigned long flags;
1726 	u64 tgutime, tgstime, utime, stime;
1727 	unsigned long maxrss = 0;
1728 
1729 	memset((char *)r, 0, sizeof (*r));
1730 	utime = stime = 0;
1731 
1732 	if (who == RUSAGE_THREAD) {
1733 		task_cputime_adjusted(current, &utime, &stime);
1734 		accumulate_thread_rusage(p, r);
1735 		maxrss = p->signal->maxrss;
1736 		goto out;
1737 	}
1738 
1739 	if (!lock_task_sighand(p, &flags))
1740 		return;
1741 
1742 	switch (who) {
1743 	case RUSAGE_BOTH:
1744 	case RUSAGE_CHILDREN:
1745 		utime = p->signal->cutime;
1746 		stime = p->signal->cstime;
1747 		r->ru_nvcsw = p->signal->cnvcsw;
1748 		r->ru_nivcsw = p->signal->cnivcsw;
1749 		r->ru_minflt = p->signal->cmin_flt;
1750 		r->ru_majflt = p->signal->cmaj_flt;
1751 		r->ru_inblock = p->signal->cinblock;
1752 		r->ru_oublock = p->signal->coublock;
1753 		maxrss = p->signal->cmaxrss;
1754 
1755 		if (who == RUSAGE_CHILDREN)
1756 			break;
1757 		fallthrough;
1758 
1759 	case RUSAGE_SELF:
1760 		thread_group_cputime_adjusted(p, &tgutime, &tgstime);
1761 		utime += tgutime;
1762 		stime += tgstime;
1763 		r->ru_nvcsw += p->signal->nvcsw;
1764 		r->ru_nivcsw += p->signal->nivcsw;
1765 		r->ru_minflt += p->signal->min_flt;
1766 		r->ru_majflt += p->signal->maj_flt;
1767 		r->ru_inblock += p->signal->inblock;
1768 		r->ru_oublock += p->signal->oublock;
1769 		if (maxrss < p->signal->maxrss)
1770 			maxrss = p->signal->maxrss;
1771 		t = p;
1772 		do {
1773 			accumulate_thread_rusage(t, r);
1774 		} while_each_thread(p, t);
1775 		break;
1776 
1777 	default:
1778 		BUG();
1779 	}
1780 	unlock_task_sighand(p, &flags);
1781 
1782 out:
1783 	r->ru_utime = ns_to_kernel_old_timeval(utime);
1784 	r->ru_stime = ns_to_kernel_old_timeval(stime);
1785 
1786 	if (who != RUSAGE_CHILDREN) {
1787 		struct mm_struct *mm = get_task_mm(p);
1788 
1789 		if (mm) {
1790 			setmax_mm_hiwater_rss(&maxrss, mm);
1791 			mmput(mm);
1792 		}
1793 	}
1794 	r->ru_maxrss = maxrss * (PAGE_SIZE / 1024); /* convert pages to KBs */
1795 }
1796 
1797 SYSCALL_DEFINE2(getrusage, int, who, struct rusage __user *, ru)
1798 {
1799 	struct rusage r;
1800 
1801 	if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN &&
1802 	    who != RUSAGE_THREAD)
1803 		return -EINVAL;
1804 
1805 	getrusage(current, who, &r);
1806 	return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0;
1807 }
1808 
1809 #ifdef CONFIG_COMPAT
1810 COMPAT_SYSCALL_DEFINE2(getrusage, int, who, struct compat_rusage __user *, ru)
1811 {
1812 	struct rusage r;
1813 
1814 	if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN &&
1815 	    who != RUSAGE_THREAD)
1816 		return -EINVAL;
1817 
1818 	getrusage(current, who, &r);
1819 	return put_compat_rusage(&r, ru);
1820 }
1821 #endif
1822 
1823 SYSCALL_DEFINE1(umask, int, mask)
1824 {
1825 	mask = xchg(&current->fs->umask, mask & S_IRWXUGO);
1826 	return mask;
1827 }
1828 
1829 static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd)
1830 {
1831 	struct fd exe;
1832 	struct file *old_exe, *exe_file;
1833 	struct inode *inode;
1834 	int err;
1835 
1836 	exe = fdget(fd);
1837 	if (!exe.file)
1838 		return -EBADF;
1839 
1840 	inode = file_inode(exe.file);
1841 
1842 	/*
1843 	 * Because the original mm->exe_file points to executable file, make
1844 	 * sure that this one is executable as well, to avoid breaking an
1845 	 * overall picture.
1846 	 */
1847 	err = -EACCES;
1848 	if (!S_ISREG(inode->i_mode) || path_noexec(&exe.file->f_path))
1849 		goto exit;
1850 
1851 	err = inode_permission(inode, MAY_EXEC);
1852 	if (err)
1853 		goto exit;
1854 
1855 	/*
1856 	 * Forbid mm->exe_file change if old file still mapped.
1857 	 */
1858 	exe_file = get_mm_exe_file(mm);
1859 	err = -EBUSY;
1860 	if (exe_file) {
1861 		struct vm_area_struct *vma;
1862 
1863 		mmap_read_lock(mm);
1864 		for (vma = mm->mmap; vma; vma = vma->vm_next) {
1865 			if (!vma->vm_file)
1866 				continue;
1867 			if (path_equal(&vma->vm_file->f_path,
1868 				       &exe_file->f_path))
1869 				goto exit_err;
1870 		}
1871 
1872 		mmap_read_unlock(mm);
1873 		fput(exe_file);
1874 	}
1875 
1876 	err = 0;
1877 	/* set the new file, lockless */
1878 	get_file(exe.file);
1879 	old_exe = xchg(&mm->exe_file, exe.file);
1880 	if (old_exe)
1881 		fput(old_exe);
1882 exit:
1883 	fdput(exe);
1884 	return err;
1885 exit_err:
1886 	mmap_read_unlock(mm);
1887 	fput(exe_file);
1888 	goto exit;
1889 }
1890 
1891 /*
1892  * Check arithmetic relations of passed addresses.
1893  *
1894  * WARNING: we don't require any capability here so be very careful
1895  * in what is allowed for modification from userspace.
1896  */
1897 static int validate_prctl_map_addr(struct prctl_mm_map *prctl_map)
1898 {
1899 	unsigned long mmap_max_addr = TASK_SIZE;
1900 	int error = -EINVAL, i;
1901 
1902 	static const unsigned char offsets[] = {
1903 		offsetof(struct prctl_mm_map, start_code),
1904 		offsetof(struct prctl_mm_map, end_code),
1905 		offsetof(struct prctl_mm_map, start_data),
1906 		offsetof(struct prctl_mm_map, end_data),
1907 		offsetof(struct prctl_mm_map, start_brk),
1908 		offsetof(struct prctl_mm_map, brk),
1909 		offsetof(struct prctl_mm_map, start_stack),
1910 		offsetof(struct prctl_mm_map, arg_start),
1911 		offsetof(struct prctl_mm_map, arg_end),
1912 		offsetof(struct prctl_mm_map, env_start),
1913 		offsetof(struct prctl_mm_map, env_end),
1914 	};
1915 
1916 	/*
1917 	 * Make sure the members are not somewhere outside
1918 	 * of allowed address space.
1919 	 */
1920 	for (i = 0; i < ARRAY_SIZE(offsets); i++) {
1921 		u64 val = *(u64 *)((char *)prctl_map + offsets[i]);
1922 
1923 		if ((unsigned long)val >= mmap_max_addr ||
1924 		    (unsigned long)val < mmap_min_addr)
1925 			goto out;
1926 	}
1927 
1928 	/*
1929 	 * Make sure the pairs are ordered.
1930 	 */
1931 #define __prctl_check_order(__m1, __op, __m2)				\
1932 	((unsigned long)prctl_map->__m1 __op				\
1933 	 (unsigned long)prctl_map->__m2) ? 0 : -EINVAL
1934 	error  = __prctl_check_order(start_code, <, end_code);
1935 	error |= __prctl_check_order(start_data,<=, end_data);
1936 	error |= __prctl_check_order(start_brk, <=, brk);
1937 	error |= __prctl_check_order(arg_start, <=, arg_end);
1938 	error |= __prctl_check_order(env_start, <=, env_end);
1939 	if (error)
1940 		goto out;
1941 #undef __prctl_check_order
1942 
1943 	error = -EINVAL;
1944 
1945 	/*
1946 	 * @brk should be after @end_data in traditional maps.
1947 	 */
1948 	if (prctl_map->start_brk <= prctl_map->end_data ||
1949 	    prctl_map->brk <= prctl_map->end_data)
1950 		goto out;
1951 
1952 	/*
1953 	 * Neither we should allow to override limits if they set.
1954 	 */
1955 	if (check_data_rlimit(rlimit(RLIMIT_DATA), prctl_map->brk,
1956 			      prctl_map->start_brk, prctl_map->end_data,
1957 			      prctl_map->start_data))
1958 			goto out;
1959 
1960 	error = 0;
1961 out:
1962 	return error;
1963 }
1964 
1965 #ifdef CONFIG_CHECKPOINT_RESTORE
1966 static int prctl_set_mm_map(int opt, const void __user *addr, unsigned long data_size)
1967 {
1968 	struct prctl_mm_map prctl_map = { .exe_fd = (u32)-1, };
1969 	unsigned long user_auxv[AT_VECTOR_SIZE];
1970 	struct mm_struct *mm = current->mm;
1971 	int error;
1972 
1973 	BUILD_BUG_ON(sizeof(user_auxv) != sizeof(mm->saved_auxv));
1974 	BUILD_BUG_ON(sizeof(struct prctl_mm_map) > 256);
1975 
1976 	if (opt == PR_SET_MM_MAP_SIZE)
1977 		return put_user((unsigned int)sizeof(prctl_map),
1978 				(unsigned int __user *)addr);
1979 
1980 	if (data_size != sizeof(prctl_map))
1981 		return -EINVAL;
1982 
1983 	if (copy_from_user(&prctl_map, addr, sizeof(prctl_map)))
1984 		return -EFAULT;
1985 
1986 	error = validate_prctl_map_addr(&prctl_map);
1987 	if (error)
1988 		return error;
1989 
1990 	if (prctl_map.auxv_size) {
1991 		/*
1992 		 * Someone is trying to cheat the auxv vector.
1993 		 */
1994 		if (!prctl_map.auxv ||
1995 				prctl_map.auxv_size > sizeof(mm->saved_auxv))
1996 			return -EINVAL;
1997 
1998 		memset(user_auxv, 0, sizeof(user_auxv));
1999 		if (copy_from_user(user_auxv,
2000 				   (const void __user *)prctl_map.auxv,
2001 				   prctl_map.auxv_size))
2002 			return -EFAULT;
2003 
2004 		/* Last entry must be AT_NULL as specification requires */
2005 		user_auxv[AT_VECTOR_SIZE - 2] = AT_NULL;
2006 		user_auxv[AT_VECTOR_SIZE - 1] = AT_NULL;
2007 	}
2008 
2009 	if (prctl_map.exe_fd != (u32)-1) {
2010 		/*
2011 		 * Check if the current user is checkpoint/restore capable.
2012 		 * At the time of this writing, it checks for CAP_SYS_ADMIN
2013 		 * or CAP_CHECKPOINT_RESTORE.
2014 		 * Note that a user with access to ptrace can masquerade an
2015 		 * arbitrary program as any executable, even setuid ones.
2016 		 * This may have implications in the tomoyo subsystem.
2017 		 */
2018 		if (!checkpoint_restore_ns_capable(current_user_ns()))
2019 			return -EPERM;
2020 
2021 		error = prctl_set_mm_exe_file(mm, prctl_map.exe_fd);
2022 		if (error)
2023 			return error;
2024 	}
2025 
2026 	/*
2027 	 * arg_lock protects concurent updates but we still need mmap_lock for
2028 	 * read to exclude races with sys_brk.
2029 	 */
2030 	mmap_read_lock(mm);
2031 
2032 	/*
2033 	 * We don't validate if these members are pointing to
2034 	 * real present VMAs because application may have correspond
2035 	 * VMAs already unmapped and kernel uses these members for statistics
2036 	 * output in procfs mostly, except
2037 	 *
2038 	 *  - @start_brk/@brk which are used in do_brk_flags but kernel lookups
2039 	 *    for VMAs when updating these memvers so anything wrong written
2040 	 *    here cause kernel to swear at userspace program but won't lead
2041 	 *    to any problem in kernel itself
2042 	 */
2043 
2044 	spin_lock(&mm->arg_lock);
2045 	mm->start_code	= prctl_map.start_code;
2046 	mm->end_code	= prctl_map.end_code;
2047 	mm->start_data	= prctl_map.start_data;
2048 	mm->end_data	= prctl_map.end_data;
2049 	mm->start_brk	= prctl_map.start_brk;
2050 	mm->brk		= prctl_map.brk;
2051 	mm->start_stack	= prctl_map.start_stack;
2052 	mm->arg_start	= prctl_map.arg_start;
2053 	mm->arg_end	= prctl_map.arg_end;
2054 	mm->env_start	= prctl_map.env_start;
2055 	mm->env_end	= prctl_map.env_end;
2056 	spin_unlock(&mm->arg_lock);
2057 
2058 	/*
2059 	 * Note this update of @saved_auxv is lockless thus
2060 	 * if someone reads this member in procfs while we're
2061 	 * updating -- it may get partly updated results. It's
2062 	 * known and acceptable trade off: we leave it as is to
2063 	 * not introduce additional locks here making the kernel
2064 	 * more complex.
2065 	 */
2066 	if (prctl_map.auxv_size)
2067 		memcpy(mm->saved_auxv, user_auxv, sizeof(user_auxv));
2068 
2069 	mmap_read_unlock(mm);
2070 	return 0;
2071 }
2072 #endif /* CONFIG_CHECKPOINT_RESTORE */
2073 
2074 static int prctl_set_auxv(struct mm_struct *mm, unsigned long addr,
2075 			  unsigned long len)
2076 {
2077 	/*
2078 	 * This doesn't move the auxiliary vector itself since it's pinned to
2079 	 * mm_struct, but it permits filling the vector with new values.  It's
2080 	 * up to the caller to provide sane values here, otherwise userspace
2081 	 * tools which use this vector might be unhappy.
2082 	 */
2083 	unsigned long user_auxv[AT_VECTOR_SIZE];
2084 
2085 	if (len > sizeof(user_auxv))
2086 		return -EINVAL;
2087 
2088 	if (copy_from_user(user_auxv, (const void __user *)addr, len))
2089 		return -EFAULT;
2090 
2091 	/* Make sure the last entry is always AT_NULL */
2092 	user_auxv[AT_VECTOR_SIZE - 2] = 0;
2093 	user_auxv[AT_VECTOR_SIZE - 1] = 0;
2094 
2095 	BUILD_BUG_ON(sizeof(user_auxv) != sizeof(mm->saved_auxv));
2096 
2097 	task_lock(current);
2098 	memcpy(mm->saved_auxv, user_auxv, len);
2099 	task_unlock(current);
2100 
2101 	return 0;
2102 }
2103 
2104 static int prctl_set_mm(int opt, unsigned long addr,
2105 			unsigned long arg4, unsigned long arg5)
2106 {
2107 	struct mm_struct *mm = current->mm;
2108 	struct prctl_mm_map prctl_map = {
2109 		.auxv = NULL,
2110 		.auxv_size = 0,
2111 		.exe_fd = -1,
2112 	};
2113 	struct vm_area_struct *vma;
2114 	int error;
2115 
2116 	if (arg5 || (arg4 && (opt != PR_SET_MM_AUXV &&
2117 			      opt != PR_SET_MM_MAP &&
2118 			      opt != PR_SET_MM_MAP_SIZE)))
2119 		return -EINVAL;
2120 
2121 #ifdef CONFIG_CHECKPOINT_RESTORE
2122 	if (opt == PR_SET_MM_MAP || opt == PR_SET_MM_MAP_SIZE)
2123 		return prctl_set_mm_map(opt, (const void __user *)addr, arg4);
2124 #endif
2125 
2126 	if (!capable(CAP_SYS_RESOURCE))
2127 		return -EPERM;
2128 
2129 	if (opt == PR_SET_MM_EXE_FILE)
2130 		return prctl_set_mm_exe_file(mm, (unsigned int)addr);
2131 
2132 	if (opt == PR_SET_MM_AUXV)
2133 		return prctl_set_auxv(mm, addr, arg4);
2134 
2135 	if (addr >= TASK_SIZE || addr < mmap_min_addr)
2136 		return -EINVAL;
2137 
2138 	error = -EINVAL;
2139 
2140 	/*
2141 	 * arg_lock protects concurent updates of arg boundaries, we need
2142 	 * mmap_lock for a) concurrent sys_brk, b) finding VMA for addr
2143 	 * validation.
2144 	 */
2145 	mmap_read_lock(mm);
2146 	vma = find_vma(mm, addr);
2147 
2148 	spin_lock(&mm->arg_lock);
2149 	prctl_map.start_code	= mm->start_code;
2150 	prctl_map.end_code	= mm->end_code;
2151 	prctl_map.start_data	= mm->start_data;
2152 	prctl_map.end_data	= mm->end_data;
2153 	prctl_map.start_brk	= mm->start_brk;
2154 	prctl_map.brk		= mm->brk;
2155 	prctl_map.start_stack	= mm->start_stack;
2156 	prctl_map.arg_start	= mm->arg_start;
2157 	prctl_map.arg_end	= mm->arg_end;
2158 	prctl_map.env_start	= mm->env_start;
2159 	prctl_map.env_end	= mm->env_end;
2160 
2161 	switch (opt) {
2162 	case PR_SET_MM_START_CODE:
2163 		prctl_map.start_code = addr;
2164 		break;
2165 	case PR_SET_MM_END_CODE:
2166 		prctl_map.end_code = addr;
2167 		break;
2168 	case PR_SET_MM_START_DATA:
2169 		prctl_map.start_data = addr;
2170 		break;
2171 	case PR_SET_MM_END_DATA:
2172 		prctl_map.end_data = addr;
2173 		break;
2174 	case PR_SET_MM_START_STACK:
2175 		prctl_map.start_stack = addr;
2176 		break;
2177 	case PR_SET_MM_START_BRK:
2178 		prctl_map.start_brk = addr;
2179 		break;
2180 	case PR_SET_MM_BRK:
2181 		prctl_map.brk = addr;
2182 		break;
2183 	case PR_SET_MM_ARG_START:
2184 		prctl_map.arg_start = addr;
2185 		break;
2186 	case PR_SET_MM_ARG_END:
2187 		prctl_map.arg_end = addr;
2188 		break;
2189 	case PR_SET_MM_ENV_START:
2190 		prctl_map.env_start = addr;
2191 		break;
2192 	case PR_SET_MM_ENV_END:
2193 		prctl_map.env_end = addr;
2194 		break;
2195 	default:
2196 		goto out;
2197 	}
2198 
2199 	error = validate_prctl_map_addr(&prctl_map);
2200 	if (error)
2201 		goto out;
2202 
2203 	switch (opt) {
2204 	/*
2205 	 * If command line arguments and environment
2206 	 * are placed somewhere else on stack, we can
2207 	 * set them up here, ARG_START/END to setup
2208 	 * command line argumets and ENV_START/END
2209 	 * for environment.
2210 	 */
2211 	case PR_SET_MM_START_STACK:
2212 	case PR_SET_MM_ARG_START:
2213 	case PR_SET_MM_ARG_END:
2214 	case PR_SET_MM_ENV_START:
2215 	case PR_SET_MM_ENV_END:
2216 		if (!vma) {
2217 			error = -EFAULT;
2218 			goto out;
2219 		}
2220 	}
2221 
2222 	mm->start_code	= prctl_map.start_code;
2223 	mm->end_code	= prctl_map.end_code;
2224 	mm->start_data	= prctl_map.start_data;
2225 	mm->end_data	= prctl_map.end_data;
2226 	mm->start_brk	= prctl_map.start_brk;
2227 	mm->brk		= prctl_map.brk;
2228 	mm->start_stack	= prctl_map.start_stack;
2229 	mm->arg_start	= prctl_map.arg_start;
2230 	mm->arg_end	= prctl_map.arg_end;
2231 	mm->env_start	= prctl_map.env_start;
2232 	mm->env_end	= prctl_map.env_end;
2233 
2234 	error = 0;
2235 out:
2236 	spin_unlock(&mm->arg_lock);
2237 	mmap_read_unlock(mm);
2238 	return error;
2239 }
2240 
2241 #ifdef CONFIG_CHECKPOINT_RESTORE
2242 static int prctl_get_tid_address(struct task_struct *me, int __user * __user *tid_addr)
2243 {
2244 	return put_user(me->clear_child_tid, tid_addr);
2245 }
2246 #else
2247 static int prctl_get_tid_address(struct task_struct *me, int __user * __user *tid_addr)
2248 {
2249 	return -EINVAL;
2250 }
2251 #endif
2252 
2253 static int propagate_has_child_subreaper(struct task_struct *p, void *data)
2254 {
2255 	/*
2256 	 * If task has has_child_subreaper - all its decendants
2257 	 * already have these flag too and new decendants will
2258 	 * inherit it on fork, skip them.
2259 	 *
2260 	 * If we've found child_reaper - skip descendants in
2261 	 * it's subtree as they will never get out pidns.
2262 	 */
2263 	if (p->signal->has_child_subreaper ||
2264 	    is_child_reaper(task_pid(p)))
2265 		return 0;
2266 
2267 	p->signal->has_child_subreaper = 1;
2268 	return 1;
2269 }
2270 
2271 int __weak arch_prctl_spec_ctrl_get(struct task_struct *t, unsigned long which)
2272 {
2273 	return -EINVAL;
2274 }
2275 
2276 int __weak arch_prctl_spec_ctrl_set(struct task_struct *t, unsigned long which,
2277 				    unsigned long ctrl)
2278 {
2279 	return -EINVAL;
2280 }
2281 
2282 #define PR_IO_FLUSHER (PF_MEMALLOC_NOIO | PF_LOCAL_THROTTLE)
2283 
2284 SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
2285 		unsigned long, arg4, unsigned long, arg5)
2286 {
2287 	struct task_struct *me = current;
2288 	unsigned char comm[sizeof(me->comm)];
2289 	long error;
2290 
2291 	error = security_task_prctl(option, arg2, arg3, arg4, arg5);
2292 	if (error != -ENOSYS)
2293 		return error;
2294 
2295 	error = 0;
2296 	switch (option) {
2297 	case PR_SET_PDEATHSIG:
2298 		if (!valid_signal(arg2)) {
2299 			error = -EINVAL;
2300 			break;
2301 		}
2302 		me->pdeath_signal = arg2;
2303 		break;
2304 	case PR_GET_PDEATHSIG:
2305 		error = put_user(me->pdeath_signal, (int __user *)arg2);
2306 		break;
2307 	case PR_GET_DUMPABLE:
2308 		error = get_dumpable(me->mm);
2309 		break;
2310 	case PR_SET_DUMPABLE:
2311 		if (arg2 != SUID_DUMP_DISABLE && arg2 != SUID_DUMP_USER) {
2312 			error = -EINVAL;
2313 			break;
2314 		}
2315 		set_dumpable(me->mm, arg2);
2316 		break;
2317 
2318 	case PR_SET_UNALIGN:
2319 		error = SET_UNALIGN_CTL(me, arg2);
2320 		break;
2321 	case PR_GET_UNALIGN:
2322 		error = GET_UNALIGN_CTL(me, arg2);
2323 		break;
2324 	case PR_SET_FPEMU:
2325 		error = SET_FPEMU_CTL(me, arg2);
2326 		break;
2327 	case PR_GET_FPEMU:
2328 		error = GET_FPEMU_CTL(me, arg2);
2329 		break;
2330 	case PR_SET_FPEXC:
2331 		error = SET_FPEXC_CTL(me, arg2);
2332 		break;
2333 	case PR_GET_FPEXC:
2334 		error = GET_FPEXC_CTL(me, arg2);
2335 		break;
2336 	case PR_GET_TIMING:
2337 		error = PR_TIMING_STATISTICAL;
2338 		break;
2339 	case PR_SET_TIMING:
2340 		if (arg2 != PR_TIMING_STATISTICAL)
2341 			error = -EINVAL;
2342 		break;
2343 	case PR_SET_NAME:
2344 		comm[sizeof(me->comm) - 1] = 0;
2345 		if (strncpy_from_user(comm, (char __user *)arg2,
2346 				      sizeof(me->comm) - 1) < 0)
2347 			return -EFAULT;
2348 		set_task_comm(me, comm);
2349 		proc_comm_connector(me);
2350 		break;
2351 	case PR_GET_NAME:
2352 		get_task_comm(comm, me);
2353 		if (copy_to_user((char __user *)arg2, comm, sizeof(comm)))
2354 			return -EFAULT;
2355 		break;
2356 	case PR_GET_ENDIAN:
2357 		error = GET_ENDIAN(me, arg2);
2358 		break;
2359 	case PR_SET_ENDIAN:
2360 		error = SET_ENDIAN(me, arg2);
2361 		break;
2362 	case PR_GET_SECCOMP:
2363 		error = prctl_get_seccomp();
2364 		break;
2365 	case PR_SET_SECCOMP:
2366 		error = prctl_set_seccomp(arg2, (char __user *)arg3);
2367 		break;
2368 	case PR_GET_TSC:
2369 		error = GET_TSC_CTL(arg2);
2370 		break;
2371 	case PR_SET_TSC:
2372 		error = SET_TSC_CTL(arg2);
2373 		break;
2374 	case PR_TASK_PERF_EVENTS_DISABLE:
2375 		error = perf_event_task_disable();
2376 		break;
2377 	case PR_TASK_PERF_EVENTS_ENABLE:
2378 		error = perf_event_task_enable();
2379 		break;
2380 	case PR_GET_TIMERSLACK:
2381 		if (current->timer_slack_ns > ULONG_MAX)
2382 			error = ULONG_MAX;
2383 		else
2384 			error = current->timer_slack_ns;
2385 		break;
2386 	case PR_SET_TIMERSLACK:
2387 		if (arg2 <= 0)
2388 			current->timer_slack_ns =
2389 					current->default_timer_slack_ns;
2390 		else
2391 			current->timer_slack_ns = arg2;
2392 		break;
2393 	case PR_MCE_KILL:
2394 		if (arg4 | arg5)
2395 			return -EINVAL;
2396 		switch (arg2) {
2397 		case PR_MCE_KILL_CLEAR:
2398 			if (arg3 != 0)
2399 				return -EINVAL;
2400 			current->flags &= ~PF_MCE_PROCESS;
2401 			break;
2402 		case PR_MCE_KILL_SET:
2403 			current->flags |= PF_MCE_PROCESS;
2404 			if (arg3 == PR_MCE_KILL_EARLY)
2405 				current->flags |= PF_MCE_EARLY;
2406 			else if (arg3 == PR_MCE_KILL_LATE)
2407 				current->flags &= ~PF_MCE_EARLY;
2408 			else if (arg3 == PR_MCE_KILL_DEFAULT)
2409 				current->flags &=
2410 						~(PF_MCE_EARLY|PF_MCE_PROCESS);
2411 			else
2412 				return -EINVAL;
2413 			break;
2414 		default:
2415 			return -EINVAL;
2416 		}
2417 		break;
2418 	case PR_MCE_KILL_GET:
2419 		if (arg2 | arg3 | arg4 | arg5)
2420 			return -EINVAL;
2421 		if (current->flags & PF_MCE_PROCESS)
2422 			error = (current->flags & PF_MCE_EARLY) ?
2423 				PR_MCE_KILL_EARLY : PR_MCE_KILL_LATE;
2424 		else
2425 			error = PR_MCE_KILL_DEFAULT;
2426 		break;
2427 	case PR_SET_MM:
2428 		error = prctl_set_mm(arg2, arg3, arg4, arg5);
2429 		break;
2430 	case PR_GET_TID_ADDRESS:
2431 		error = prctl_get_tid_address(me, (int __user * __user *)arg2);
2432 		break;
2433 	case PR_SET_CHILD_SUBREAPER:
2434 		me->signal->is_child_subreaper = !!arg2;
2435 		if (!arg2)
2436 			break;
2437 
2438 		walk_process_tree(me, propagate_has_child_subreaper, NULL);
2439 		break;
2440 	case PR_GET_CHILD_SUBREAPER:
2441 		error = put_user(me->signal->is_child_subreaper,
2442 				 (int __user *)arg2);
2443 		break;
2444 	case PR_SET_NO_NEW_PRIVS:
2445 		if (arg2 != 1 || arg3 || arg4 || arg5)
2446 			return -EINVAL;
2447 
2448 		task_set_no_new_privs(current);
2449 		break;
2450 	case PR_GET_NO_NEW_PRIVS:
2451 		if (arg2 || arg3 || arg4 || arg5)
2452 			return -EINVAL;
2453 		return task_no_new_privs(current) ? 1 : 0;
2454 	case PR_GET_THP_DISABLE:
2455 		if (arg2 || arg3 || arg4 || arg5)
2456 			return -EINVAL;
2457 		error = !!test_bit(MMF_DISABLE_THP, &me->mm->flags);
2458 		break;
2459 	case PR_SET_THP_DISABLE:
2460 		if (arg3 || arg4 || arg5)
2461 			return -EINVAL;
2462 		if (mmap_write_lock_killable(me->mm))
2463 			return -EINTR;
2464 		if (arg2)
2465 			set_bit(MMF_DISABLE_THP, &me->mm->flags);
2466 		else
2467 			clear_bit(MMF_DISABLE_THP, &me->mm->flags);
2468 		mmap_write_unlock(me->mm);
2469 		break;
2470 	case PR_MPX_ENABLE_MANAGEMENT:
2471 	case PR_MPX_DISABLE_MANAGEMENT:
2472 		/* No longer implemented: */
2473 		return -EINVAL;
2474 	case PR_SET_FP_MODE:
2475 		error = SET_FP_MODE(me, arg2);
2476 		break;
2477 	case PR_GET_FP_MODE:
2478 		error = GET_FP_MODE(me);
2479 		break;
2480 	case PR_SVE_SET_VL:
2481 		error = SVE_SET_VL(arg2);
2482 		break;
2483 	case PR_SVE_GET_VL:
2484 		error = SVE_GET_VL();
2485 		break;
2486 	case PR_GET_SPECULATION_CTRL:
2487 		if (arg3 || arg4 || arg5)
2488 			return -EINVAL;
2489 		error = arch_prctl_spec_ctrl_get(me, arg2);
2490 		break;
2491 	case PR_SET_SPECULATION_CTRL:
2492 		if (arg4 || arg5)
2493 			return -EINVAL;
2494 		error = arch_prctl_spec_ctrl_set(me, arg2, arg3);
2495 		break;
2496 	case PR_PAC_RESET_KEYS:
2497 		if (arg3 || arg4 || arg5)
2498 			return -EINVAL;
2499 		error = PAC_RESET_KEYS(me, arg2);
2500 		break;
2501 	case PR_SET_TAGGED_ADDR_CTRL:
2502 		if (arg3 || arg4 || arg5)
2503 			return -EINVAL;
2504 		error = SET_TAGGED_ADDR_CTRL(arg2);
2505 		break;
2506 	case PR_GET_TAGGED_ADDR_CTRL:
2507 		if (arg2 || arg3 || arg4 || arg5)
2508 			return -EINVAL;
2509 		error = GET_TAGGED_ADDR_CTRL();
2510 		break;
2511 	case PR_SET_IO_FLUSHER:
2512 		if (!capable(CAP_SYS_RESOURCE))
2513 			return -EPERM;
2514 
2515 		if (arg3 || arg4 || arg5)
2516 			return -EINVAL;
2517 
2518 		if (arg2 == 1)
2519 			current->flags |= PR_IO_FLUSHER;
2520 		else if (!arg2)
2521 			current->flags &= ~PR_IO_FLUSHER;
2522 		else
2523 			return -EINVAL;
2524 		break;
2525 	case PR_GET_IO_FLUSHER:
2526 		if (!capable(CAP_SYS_RESOURCE))
2527 			return -EPERM;
2528 
2529 		if (arg2 || arg3 || arg4 || arg5)
2530 			return -EINVAL;
2531 
2532 		error = (current->flags & PR_IO_FLUSHER) == PR_IO_FLUSHER;
2533 		break;
2534 	case PR_SET_SYSCALL_USER_DISPATCH:
2535 		error = set_syscall_user_dispatch(arg2, arg3, arg4,
2536 						  (char __user *) arg5);
2537 		break;
2538 	default:
2539 		error = -EINVAL;
2540 		break;
2541 	}
2542 	return error;
2543 }
2544 
2545 SYSCALL_DEFINE3(getcpu, unsigned __user *, cpup, unsigned __user *, nodep,
2546 		struct getcpu_cache __user *, unused)
2547 {
2548 	int err = 0;
2549 	int cpu = raw_smp_processor_id();
2550 
2551 	if (cpup)
2552 		err |= put_user(cpu, cpup);
2553 	if (nodep)
2554 		err |= put_user(cpu_to_node(cpu), nodep);
2555 	return err ? -EFAULT : 0;
2556 }
2557 
2558 /**
2559  * do_sysinfo - fill in sysinfo struct
2560  * @info: pointer to buffer to fill
2561  */
2562 static int do_sysinfo(struct sysinfo *info)
2563 {
2564 	unsigned long mem_total, sav_total;
2565 	unsigned int mem_unit, bitcount;
2566 	struct timespec64 tp;
2567 
2568 	memset(info, 0, sizeof(struct sysinfo));
2569 
2570 	ktime_get_boottime_ts64(&tp);
2571 	timens_add_boottime(&tp);
2572 	info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
2573 
2574 	get_avenrun(info->loads, 0, SI_LOAD_SHIFT - FSHIFT);
2575 
2576 	info->procs = nr_threads;
2577 
2578 	si_meminfo(info);
2579 	si_swapinfo(info);
2580 
2581 	/*
2582 	 * If the sum of all the available memory (i.e. ram + swap)
2583 	 * is less than can be stored in a 32 bit unsigned long then
2584 	 * we can be binary compatible with 2.2.x kernels.  If not,
2585 	 * well, in that case 2.2.x was broken anyways...
2586 	 *
2587 	 *  -Erik Andersen <andersee@debian.org>
2588 	 */
2589 
2590 	mem_total = info->totalram + info->totalswap;
2591 	if (mem_total < info->totalram || mem_total < info->totalswap)
2592 		goto out;
2593 	bitcount = 0;
2594 	mem_unit = info->mem_unit;
2595 	while (mem_unit > 1) {
2596 		bitcount++;
2597 		mem_unit >>= 1;
2598 		sav_total = mem_total;
2599 		mem_total <<= 1;
2600 		if (mem_total < sav_total)
2601 			goto out;
2602 	}
2603 
2604 	/*
2605 	 * If mem_total did not overflow, multiply all memory values by
2606 	 * info->mem_unit and set it to 1.  This leaves things compatible
2607 	 * with 2.2.x, and also retains compatibility with earlier 2.4.x
2608 	 * kernels...
2609 	 */
2610 
2611 	info->mem_unit = 1;
2612 	info->totalram <<= bitcount;
2613 	info->freeram <<= bitcount;
2614 	info->sharedram <<= bitcount;
2615 	info->bufferram <<= bitcount;
2616 	info->totalswap <<= bitcount;
2617 	info->freeswap <<= bitcount;
2618 	info->totalhigh <<= bitcount;
2619 	info->freehigh <<= bitcount;
2620 
2621 out:
2622 	return 0;
2623 }
2624 
2625 SYSCALL_DEFINE1(sysinfo, struct sysinfo __user *, info)
2626 {
2627 	struct sysinfo val;
2628 
2629 	do_sysinfo(&val);
2630 
2631 	if (copy_to_user(info, &val, sizeof(struct sysinfo)))
2632 		return -EFAULT;
2633 
2634 	return 0;
2635 }
2636 
2637 #ifdef CONFIG_COMPAT
2638 struct compat_sysinfo {
2639 	s32 uptime;
2640 	u32 loads[3];
2641 	u32 totalram;
2642 	u32 freeram;
2643 	u32 sharedram;
2644 	u32 bufferram;
2645 	u32 totalswap;
2646 	u32 freeswap;
2647 	u16 procs;
2648 	u16 pad;
2649 	u32 totalhigh;
2650 	u32 freehigh;
2651 	u32 mem_unit;
2652 	char _f[20-2*sizeof(u32)-sizeof(int)];
2653 };
2654 
2655 COMPAT_SYSCALL_DEFINE1(sysinfo, struct compat_sysinfo __user *, info)
2656 {
2657 	struct sysinfo s;
2658 	struct compat_sysinfo s_32;
2659 
2660 	do_sysinfo(&s);
2661 
2662 	/* Check to see if any memory value is too large for 32-bit and scale
2663 	 *  down if needed
2664 	 */
2665 	if (upper_32_bits(s.totalram) || upper_32_bits(s.totalswap)) {
2666 		int bitcount = 0;
2667 
2668 		while (s.mem_unit < PAGE_SIZE) {
2669 			s.mem_unit <<= 1;
2670 			bitcount++;
2671 		}
2672 
2673 		s.totalram >>= bitcount;
2674 		s.freeram >>= bitcount;
2675 		s.sharedram >>= bitcount;
2676 		s.bufferram >>= bitcount;
2677 		s.totalswap >>= bitcount;
2678 		s.freeswap >>= bitcount;
2679 		s.totalhigh >>= bitcount;
2680 		s.freehigh >>= bitcount;
2681 	}
2682 
2683 	memset(&s_32, 0, sizeof(s_32));
2684 	s_32.uptime = s.uptime;
2685 	s_32.loads[0] = s.loads[0];
2686 	s_32.loads[1] = s.loads[1];
2687 	s_32.loads[2] = s.loads[2];
2688 	s_32.totalram = s.totalram;
2689 	s_32.freeram = s.freeram;
2690 	s_32.sharedram = s.sharedram;
2691 	s_32.bufferram = s.bufferram;
2692 	s_32.totalswap = s.totalswap;
2693 	s_32.freeswap = s.freeswap;
2694 	s_32.procs = s.procs;
2695 	s_32.totalhigh = s.totalhigh;
2696 	s_32.freehigh = s.freehigh;
2697 	s_32.mem_unit = s.mem_unit;
2698 	if (copy_to_user(info, &s_32, sizeof(s_32)))
2699 		return -EFAULT;
2700 	return 0;
2701 }
2702 #endif /* CONFIG_COMPAT */
2703