xref: /openbmc/linux/kernel/sys.c (revision e149ca29)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/kernel/sys.c
4  *
5  *  Copyright (C) 1991, 1992  Linus Torvalds
6  */
7 
8 #include <linux/export.h>
9 #include <linux/mm.h>
10 #include <linux/utsname.h>
11 #include <linux/mman.h>
12 #include <linux/reboot.h>
13 #include <linux/prctl.h>
14 #include <linux/highuid.h>
15 #include <linux/fs.h>
16 #include <linux/kmod.h>
17 #include <linux/perf_event.h>
18 #include <linux/resource.h>
19 #include <linux/kernel.h>
20 #include <linux/workqueue.h>
21 #include <linux/capability.h>
22 #include <linux/device.h>
23 #include <linux/key.h>
24 #include <linux/times.h>
25 #include <linux/posix-timers.h>
26 #include <linux/security.h>
27 #include <linux/dcookies.h>
28 #include <linux/suspend.h>
29 #include <linux/tty.h>
30 #include <linux/signal.h>
31 #include <linux/cn_proc.h>
32 #include <linux/getcpu.h>
33 #include <linux/task_io_accounting_ops.h>
34 #include <linux/seccomp.h>
35 #include <linux/cpu.h>
36 #include <linux/personality.h>
37 #include <linux/ptrace.h>
38 #include <linux/fs_struct.h>
39 #include <linux/file.h>
40 #include <linux/mount.h>
41 #include <linux/gfp.h>
42 #include <linux/syscore_ops.h>
43 #include <linux/version.h>
44 #include <linux/ctype.h>
45 
46 #include <linux/compat.h>
47 #include <linux/syscalls.h>
48 #include <linux/kprobes.h>
49 #include <linux/user_namespace.h>
50 #include <linux/time_namespace.h>
51 #include <linux/binfmts.h>
52 
53 #include <linux/sched.h>
54 #include <linux/sched/autogroup.h>
55 #include <linux/sched/loadavg.h>
56 #include <linux/sched/stat.h>
57 #include <linux/sched/mm.h>
58 #include <linux/sched/coredump.h>
59 #include <linux/sched/task.h>
60 #include <linux/sched/cputime.h>
61 #include <linux/rcupdate.h>
62 #include <linux/uidgid.h>
63 #include <linux/cred.h>
64 
65 #include <linux/nospec.h>
66 
67 #include <linux/kmsg_dump.h>
68 /* Move somewhere else to avoid recompiling? */
69 #include <generated/utsrelease.h>
70 
71 #include <linux/uaccess.h>
72 #include <asm/io.h>
73 #include <asm/unistd.h>
74 
75 #include "uid16.h"
76 
77 #ifndef SET_UNALIGN_CTL
78 # define SET_UNALIGN_CTL(a, b)	(-EINVAL)
79 #endif
80 #ifndef GET_UNALIGN_CTL
81 # define GET_UNALIGN_CTL(a, b)	(-EINVAL)
82 #endif
83 #ifndef SET_FPEMU_CTL
84 # define SET_FPEMU_CTL(a, b)	(-EINVAL)
85 #endif
86 #ifndef GET_FPEMU_CTL
87 # define GET_FPEMU_CTL(a, b)	(-EINVAL)
88 #endif
89 #ifndef SET_FPEXC_CTL
90 # define SET_FPEXC_CTL(a, b)	(-EINVAL)
91 #endif
92 #ifndef GET_FPEXC_CTL
93 # define GET_FPEXC_CTL(a, b)	(-EINVAL)
94 #endif
95 #ifndef GET_ENDIAN
96 # define GET_ENDIAN(a, b)	(-EINVAL)
97 #endif
98 #ifndef SET_ENDIAN
99 # define SET_ENDIAN(a, b)	(-EINVAL)
100 #endif
101 #ifndef GET_TSC_CTL
102 # define GET_TSC_CTL(a)		(-EINVAL)
103 #endif
104 #ifndef SET_TSC_CTL
105 # define SET_TSC_CTL(a)		(-EINVAL)
106 #endif
107 #ifndef GET_FP_MODE
108 # define GET_FP_MODE(a)		(-EINVAL)
109 #endif
110 #ifndef SET_FP_MODE
111 # define SET_FP_MODE(a,b)	(-EINVAL)
112 #endif
113 #ifndef SVE_SET_VL
114 # define SVE_SET_VL(a)		(-EINVAL)
115 #endif
116 #ifndef SVE_GET_VL
117 # define SVE_GET_VL()		(-EINVAL)
118 #endif
119 #ifndef PAC_RESET_KEYS
120 # define PAC_RESET_KEYS(a, b)	(-EINVAL)
121 #endif
122 #ifndef SET_TAGGED_ADDR_CTRL
123 # define SET_TAGGED_ADDR_CTRL(a)	(-EINVAL)
124 #endif
125 #ifndef GET_TAGGED_ADDR_CTRL
126 # define GET_TAGGED_ADDR_CTRL()		(-EINVAL)
127 #endif
128 
129 /*
130  * this is where the system-wide overflow UID and GID are defined, for
131  * architectures that now have 32-bit UID/GID but didn't in the past
132  */
133 
134 int overflowuid = DEFAULT_OVERFLOWUID;
135 int overflowgid = DEFAULT_OVERFLOWGID;
136 
137 EXPORT_SYMBOL(overflowuid);
138 EXPORT_SYMBOL(overflowgid);
139 
140 /*
141  * the same as above, but for filesystems which can only store a 16-bit
142  * UID and GID. as such, this is needed on all architectures
143  */
144 
145 int fs_overflowuid = DEFAULT_FS_OVERFLOWUID;
146 int fs_overflowgid = DEFAULT_FS_OVERFLOWGID;
147 
148 EXPORT_SYMBOL(fs_overflowuid);
149 EXPORT_SYMBOL(fs_overflowgid);
150 
151 /*
152  * Returns true if current's euid is same as p's uid or euid,
153  * or has CAP_SYS_NICE to p's user_ns.
154  *
155  * Called with rcu_read_lock, creds are safe
156  */
157 static bool set_one_prio_perm(struct task_struct *p)
158 {
159 	const struct cred *cred = current_cred(), *pcred = __task_cred(p);
160 
161 	if (uid_eq(pcred->uid,  cred->euid) ||
162 	    uid_eq(pcred->euid, cred->euid))
163 		return true;
164 	if (ns_capable(pcred->user_ns, CAP_SYS_NICE))
165 		return true;
166 	return false;
167 }
168 
169 /*
170  * set the priority of a task
171  * - the caller must hold the RCU read lock
172  */
173 static int set_one_prio(struct task_struct *p, int niceval, int error)
174 {
175 	int no_nice;
176 
177 	if (!set_one_prio_perm(p)) {
178 		error = -EPERM;
179 		goto out;
180 	}
181 	if (niceval < task_nice(p) && !can_nice(p, niceval)) {
182 		error = -EACCES;
183 		goto out;
184 	}
185 	no_nice = security_task_setnice(p, niceval);
186 	if (no_nice) {
187 		error = no_nice;
188 		goto out;
189 	}
190 	if (error == -ESRCH)
191 		error = 0;
192 	set_user_nice(p, niceval);
193 out:
194 	return error;
195 }
196 
197 SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval)
198 {
199 	struct task_struct *g, *p;
200 	struct user_struct *user;
201 	const struct cred *cred = current_cred();
202 	int error = -EINVAL;
203 	struct pid *pgrp;
204 	kuid_t uid;
205 
206 	if (which > PRIO_USER || which < PRIO_PROCESS)
207 		goto out;
208 
209 	/* normalize: avoid signed division (rounding problems) */
210 	error = -ESRCH;
211 	if (niceval < MIN_NICE)
212 		niceval = MIN_NICE;
213 	if (niceval > MAX_NICE)
214 		niceval = MAX_NICE;
215 
216 	rcu_read_lock();
217 	read_lock(&tasklist_lock);
218 	switch (which) {
219 	case PRIO_PROCESS:
220 		if (who)
221 			p = find_task_by_vpid(who);
222 		else
223 			p = current;
224 		if (p)
225 			error = set_one_prio(p, niceval, error);
226 		break;
227 	case PRIO_PGRP:
228 		if (who)
229 			pgrp = find_vpid(who);
230 		else
231 			pgrp = task_pgrp(current);
232 		do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
233 			error = set_one_prio(p, niceval, error);
234 		} while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
235 		break;
236 	case PRIO_USER:
237 		uid = make_kuid(cred->user_ns, who);
238 		user = cred->user;
239 		if (!who)
240 			uid = cred->uid;
241 		else if (!uid_eq(uid, cred->uid)) {
242 			user = find_user(uid);
243 			if (!user)
244 				goto out_unlock;	/* No processes for this user */
245 		}
246 		do_each_thread(g, p) {
247 			if (uid_eq(task_uid(p), uid) && task_pid_vnr(p))
248 				error = set_one_prio(p, niceval, error);
249 		} while_each_thread(g, p);
250 		if (!uid_eq(uid, cred->uid))
251 			free_uid(user);		/* For find_user() */
252 		break;
253 	}
254 out_unlock:
255 	read_unlock(&tasklist_lock);
256 	rcu_read_unlock();
257 out:
258 	return error;
259 }
260 
261 /*
262  * Ugh. To avoid negative return values, "getpriority()" will
263  * not return the normal nice-value, but a negated value that
264  * has been offset by 20 (ie it returns 40..1 instead of -20..19)
265  * to stay compatible.
266  */
267 SYSCALL_DEFINE2(getpriority, int, which, int, who)
268 {
269 	struct task_struct *g, *p;
270 	struct user_struct *user;
271 	const struct cred *cred = current_cred();
272 	long niceval, retval = -ESRCH;
273 	struct pid *pgrp;
274 	kuid_t uid;
275 
276 	if (which > PRIO_USER || which < PRIO_PROCESS)
277 		return -EINVAL;
278 
279 	rcu_read_lock();
280 	read_lock(&tasklist_lock);
281 	switch (which) {
282 	case PRIO_PROCESS:
283 		if (who)
284 			p = find_task_by_vpid(who);
285 		else
286 			p = current;
287 		if (p) {
288 			niceval = nice_to_rlimit(task_nice(p));
289 			if (niceval > retval)
290 				retval = niceval;
291 		}
292 		break;
293 	case PRIO_PGRP:
294 		if (who)
295 			pgrp = find_vpid(who);
296 		else
297 			pgrp = task_pgrp(current);
298 		do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
299 			niceval = nice_to_rlimit(task_nice(p));
300 			if (niceval > retval)
301 				retval = niceval;
302 		} while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
303 		break;
304 	case PRIO_USER:
305 		uid = make_kuid(cred->user_ns, who);
306 		user = cred->user;
307 		if (!who)
308 			uid = cred->uid;
309 		else if (!uid_eq(uid, cred->uid)) {
310 			user = find_user(uid);
311 			if (!user)
312 				goto out_unlock;	/* No processes for this user */
313 		}
314 		do_each_thread(g, p) {
315 			if (uid_eq(task_uid(p), uid) && task_pid_vnr(p)) {
316 				niceval = nice_to_rlimit(task_nice(p));
317 				if (niceval > retval)
318 					retval = niceval;
319 			}
320 		} while_each_thread(g, p);
321 		if (!uid_eq(uid, cred->uid))
322 			free_uid(user);		/* for find_user() */
323 		break;
324 	}
325 out_unlock:
326 	read_unlock(&tasklist_lock);
327 	rcu_read_unlock();
328 
329 	return retval;
330 }
331 
332 /*
333  * Unprivileged users may change the real gid to the effective gid
334  * or vice versa.  (BSD-style)
335  *
336  * If you set the real gid at all, or set the effective gid to a value not
337  * equal to the real gid, then the saved gid is set to the new effective gid.
338  *
339  * This makes it possible for a setgid program to completely drop its
340  * privileges, which is often a useful assertion to make when you are doing
341  * a security audit over a program.
342  *
343  * The general idea is that a program which uses just setregid() will be
344  * 100% compatible with BSD.  A program which uses just setgid() will be
345  * 100% compatible with POSIX with saved IDs.
346  *
347  * SMP: There are not races, the GIDs are checked only by filesystem
348  *      operations (as far as semantic preservation is concerned).
349  */
350 #ifdef CONFIG_MULTIUSER
351 long __sys_setregid(gid_t rgid, gid_t egid)
352 {
353 	struct user_namespace *ns = current_user_ns();
354 	const struct cred *old;
355 	struct cred *new;
356 	int retval;
357 	kgid_t krgid, kegid;
358 
359 	krgid = make_kgid(ns, rgid);
360 	kegid = make_kgid(ns, egid);
361 
362 	if ((rgid != (gid_t) -1) && !gid_valid(krgid))
363 		return -EINVAL;
364 	if ((egid != (gid_t) -1) && !gid_valid(kegid))
365 		return -EINVAL;
366 
367 	new = prepare_creds();
368 	if (!new)
369 		return -ENOMEM;
370 	old = current_cred();
371 
372 	retval = -EPERM;
373 	if (rgid != (gid_t) -1) {
374 		if (gid_eq(old->gid, krgid) ||
375 		    gid_eq(old->egid, krgid) ||
376 		    ns_capable(old->user_ns, CAP_SETGID))
377 			new->gid = krgid;
378 		else
379 			goto error;
380 	}
381 	if (egid != (gid_t) -1) {
382 		if (gid_eq(old->gid, kegid) ||
383 		    gid_eq(old->egid, kegid) ||
384 		    gid_eq(old->sgid, kegid) ||
385 		    ns_capable(old->user_ns, CAP_SETGID))
386 			new->egid = kegid;
387 		else
388 			goto error;
389 	}
390 
391 	if (rgid != (gid_t) -1 ||
392 	    (egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
393 		new->sgid = new->egid;
394 	new->fsgid = new->egid;
395 
396 	return commit_creds(new);
397 
398 error:
399 	abort_creds(new);
400 	return retval;
401 }
402 
403 SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
404 {
405 	return __sys_setregid(rgid, egid);
406 }
407 
408 /*
409  * setgid() is implemented like SysV w/ SAVED_IDS
410  *
411  * SMP: Same implicit races as above.
412  */
413 long __sys_setgid(gid_t gid)
414 {
415 	struct user_namespace *ns = current_user_ns();
416 	const struct cred *old;
417 	struct cred *new;
418 	int retval;
419 	kgid_t kgid;
420 
421 	kgid = make_kgid(ns, gid);
422 	if (!gid_valid(kgid))
423 		return -EINVAL;
424 
425 	new = prepare_creds();
426 	if (!new)
427 		return -ENOMEM;
428 	old = current_cred();
429 
430 	retval = -EPERM;
431 	if (ns_capable(old->user_ns, CAP_SETGID))
432 		new->gid = new->egid = new->sgid = new->fsgid = kgid;
433 	else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
434 		new->egid = new->fsgid = kgid;
435 	else
436 		goto error;
437 
438 	return commit_creds(new);
439 
440 error:
441 	abort_creds(new);
442 	return retval;
443 }
444 
445 SYSCALL_DEFINE1(setgid, gid_t, gid)
446 {
447 	return __sys_setgid(gid);
448 }
449 
450 /*
451  * change the user struct in a credentials set to match the new UID
452  */
453 static int set_user(struct cred *new)
454 {
455 	struct user_struct *new_user;
456 
457 	new_user = alloc_uid(new->uid);
458 	if (!new_user)
459 		return -EAGAIN;
460 
461 	/*
462 	 * We don't fail in case of NPROC limit excess here because too many
463 	 * poorly written programs don't check set*uid() return code, assuming
464 	 * it never fails if called by root.  We may still enforce NPROC limit
465 	 * for programs doing set*uid()+execve() by harmlessly deferring the
466 	 * failure to the execve() stage.
467 	 */
468 	if (atomic_read(&new_user->processes) >= rlimit(RLIMIT_NPROC) &&
469 			new_user != INIT_USER)
470 		current->flags |= PF_NPROC_EXCEEDED;
471 	else
472 		current->flags &= ~PF_NPROC_EXCEEDED;
473 
474 	free_uid(new->user);
475 	new->user = new_user;
476 	return 0;
477 }
478 
479 /*
480  * Unprivileged users may change the real uid to the effective uid
481  * or vice versa.  (BSD-style)
482  *
483  * If you set the real uid at all, or set the effective uid to a value not
484  * equal to the real uid, then the saved uid is set to the new effective uid.
485  *
486  * This makes it possible for a setuid program to completely drop its
487  * privileges, which is often a useful assertion to make when you are doing
488  * a security audit over a program.
489  *
490  * The general idea is that a program which uses just setreuid() will be
491  * 100% compatible with BSD.  A program which uses just setuid() will be
492  * 100% compatible with POSIX with saved IDs.
493  */
494 long __sys_setreuid(uid_t ruid, uid_t euid)
495 {
496 	struct user_namespace *ns = current_user_ns();
497 	const struct cred *old;
498 	struct cred *new;
499 	int retval;
500 	kuid_t kruid, keuid;
501 
502 	kruid = make_kuid(ns, ruid);
503 	keuid = make_kuid(ns, euid);
504 
505 	if ((ruid != (uid_t) -1) && !uid_valid(kruid))
506 		return -EINVAL;
507 	if ((euid != (uid_t) -1) && !uid_valid(keuid))
508 		return -EINVAL;
509 
510 	new = prepare_creds();
511 	if (!new)
512 		return -ENOMEM;
513 	old = current_cred();
514 
515 	retval = -EPERM;
516 	if (ruid != (uid_t) -1) {
517 		new->uid = kruid;
518 		if (!uid_eq(old->uid, kruid) &&
519 		    !uid_eq(old->euid, kruid) &&
520 		    !ns_capable_setid(old->user_ns, CAP_SETUID))
521 			goto error;
522 	}
523 
524 	if (euid != (uid_t) -1) {
525 		new->euid = keuid;
526 		if (!uid_eq(old->uid, keuid) &&
527 		    !uid_eq(old->euid, keuid) &&
528 		    !uid_eq(old->suid, keuid) &&
529 		    !ns_capable_setid(old->user_ns, CAP_SETUID))
530 			goto error;
531 	}
532 
533 	if (!uid_eq(new->uid, old->uid)) {
534 		retval = set_user(new);
535 		if (retval < 0)
536 			goto error;
537 	}
538 	if (ruid != (uid_t) -1 ||
539 	    (euid != (uid_t) -1 && !uid_eq(keuid, old->uid)))
540 		new->suid = new->euid;
541 	new->fsuid = new->euid;
542 
543 	retval = security_task_fix_setuid(new, old, LSM_SETID_RE);
544 	if (retval < 0)
545 		goto error;
546 
547 	return commit_creds(new);
548 
549 error:
550 	abort_creds(new);
551 	return retval;
552 }
553 
554 SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
555 {
556 	return __sys_setreuid(ruid, euid);
557 }
558 
559 /*
560  * setuid() is implemented like SysV with SAVED_IDS
561  *
562  * Note that SAVED_ID's is deficient in that a setuid root program
563  * like sendmail, for example, cannot set its uid to be a normal
564  * user and then switch back, because if you're root, setuid() sets
565  * the saved uid too.  If you don't like this, blame the bright people
566  * in the POSIX committee and/or USG.  Note that the BSD-style setreuid()
567  * will allow a root program to temporarily drop privileges and be able to
568  * regain them by swapping the real and effective uid.
569  */
570 long __sys_setuid(uid_t uid)
571 {
572 	struct user_namespace *ns = current_user_ns();
573 	const struct cred *old;
574 	struct cred *new;
575 	int retval;
576 	kuid_t kuid;
577 
578 	kuid = make_kuid(ns, uid);
579 	if (!uid_valid(kuid))
580 		return -EINVAL;
581 
582 	new = prepare_creds();
583 	if (!new)
584 		return -ENOMEM;
585 	old = current_cred();
586 
587 	retval = -EPERM;
588 	if (ns_capable_setid(old->user_ns, CAP_SETUID)) {
589 		new->suid = new->uid = kuid;
590 		if (!uid_eq(kuid, old->uid)) {
591 			retval = set_user(new);
592 			if (retval < 0)
593 				goto error;
594 		}
595 	} else if (!uid_eq(kuid, old->uid) && !uid_eq(kuid, new->suid)) {
596 		goto error;
597 	}
598 
599 	new->fsuid = new->euid = kuid;
600 
601 	retval = security_task_fix_setuid(new, old, LSM_SETID_ID);
602 	if (retval < 0)
603 		goto error;
604 
605 	return commit_creds(new);
606 
607 error:
608 	abort_creds(new);
609 	return retval;
610 }
611 
612 SYSCALL_DEFINE1(setuid, uid_t, uid)
613 {
614 	return __sys_setuid(uid);
615 }
616 
617 
618 /*
619  * This function implements a generic ability to update ruid, euid,
620  * and suid.  This allows you to implement the 4.4 compatible seteuid().
621  */
622 long __sys_setresuid(uid_t ruid, uid_t euid, uid_t suid)
623 {
624 	struct user_namespace *ns = current_user_ns();
625 	const struct cred *old;
626 	struct cred *new;
627 	int retval;
628 	kuid_t kruid, keuid, ksuid;
629 
630 	kruid = make_kuid(ns, ruid);
631 	keuid = make_kuid(ns, euid);
632 	ksuid = make_kuid(ns, suid);
633 
634 	if ((ruid != (uid_t) -1) && !uid_valid(kruid))
635 		return -EINVAL;
636 
637 	if ((euid != (uid_t) -1) && !uid_valid(keuid))
638 		return -EINVAL;
639 
640 	if ((suid != (uid_t) -1) && !uid_valid(ksuid))
641 		return -EINVAL;
642 
643 	new = prepare_creds();
644 	if (!new)
645 		return -ENOMEM;
646 
647 	old = current_cred();
648 
649 	retval = -EPERM;
650 	if (!ns_capable_setid(old->user_ns, CAP_SETUID)) {
651 		if (ruid != (uid_t) -1        && !uid_eq(kruid, old->uid) &&
652 		    !uid_eq(kruid, old->euid) && !uid_eq(kruid, old->suid))
653 			goto error;
654 		if (euid != (uid_t) -1        && !uid_eq(keuid, old->uid) &&
655 		    !uid_eq(keuid, old->euid) && !uid_eq(keuid, old->suid))
656 			goto error;
657 		if (suid != (uid_t) -1        && !uid_eq(ksuid, old->uid) &&
658 		    !uid_eq(ksuid, old->euid) && !uid_eq(ksuid, old->suid))
659 			goto error;
660 	}
661 
662 	if (ruid != (uid_t) -1) {
663 		new->uid = kruid;
664 		if (!uid_eq(kruid, old->uid)) {
665 			retval = set_user(new);
666 			if (retval < 0)
667 				goto error;
668 		}
669 	}
670 	if (euid != (uid_t) -1)
671 		new->euid = keuid;
672 	if (suid != (uid_t) -1)
673 		new->suid = ksuid;
674 	new->fsuid = new->euid;
675 
676 	retval = security_task_fix_setuid(new, old, LSM_SETID_RES);
677 	if (retval < 0)
678 		goto error;
679 
680 	return commit_creds(new);
681 
682 error:
683 	abort_creds(new);
684 	return retval;
685 }
686 
687 SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
688 {
689 	return __sys_setresuid(ruid, euid, suid);
690 }
691 
692 SYSCALL_DEFINE3(getresuid, uid_t __user *, ruidp, uid_t __user *, euidp, uid_t __user *, suidp)
693 {
694 	const struct cred *cred = current_cred();
695 	int retval;
696 	uid_t ruid, euid, suid;
697 
698 	ruid = from_kuid_munged(cred->user_ns, cred->uid);
699 	euid = from_kuid_munged(cred->user_ns, cred->euid);
700 	suid = from_kuid_munged(cred->user_ns, cred->suid);
701 
702 	retval = put_user(ruid, ruidp);
703 	if (!retval) {
704 		retval = put_user(euid, euidp);
705 		if (!retval)
706 			return put_user(suid, suidp);
707 	}
708 	return retval;
709 }
710 
711 /*
712  * Same as above, but for rgid, egid, sgid.
713  */
714 long __sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid)
715 {
716 	struct user_namespace *ns = current_user_ns();
717 	const struct cred *old;
718 	struct cred *new;
719 	int retval;
720 	kgid_t krgid, kegid, ksgid;
721 
722 	krgid = make_kgid(ns, rgid);
723 	kegid = make_kgid(ns, egid);
724 	ksgid = make_kgid(ns, sgid);
725 
726 	if ((rgid != (gid_t) -1) && !gid_valid(krgid))
727 		return -EINVAL;
728 	if ((egid != (gid_t) -1) && !gid_valid(kegid))
729 		return -EINVAL;
730 	if ((sgid != (gid_t) -1) && !gid_valid(ksgid))
731 		return -EINVAL;
732 
733 	new = prepare_creds();
734 	if (!new)
735 		return -ENOMEM;
736 	old = current_cred();
737 
738 	retval = -EPERM;
739 	if (!ns_capable(old->user_ns, CAP_SETGID)) {
740 		if (rgid != (gid_t) -1        && !gid_eq(krgid, old->gid) &&
741 		    !gid_eq(krgid, old->egid) && !gid_eq(krgid, old->sgid))
742 			goto error;
743 		if (egid != (gid_t) -1        && !gid_eq(kegid, old->gid) &&
744 		    !gid_eq(kegid, old->egid) && !gid_eq(kegid, old->sgid))
745 			goto error;
746 		if (sgid != (gid_t) -1        && !gid_eq(ksgid, old->gid) &&
747 		    !gid_eq(ksgid, old->egid) && !gid_eq(ksgid, old->sgid))
748 			goto error;
749 	}
750 
751 	if (rgid != (gid_t) -1)
752 		new->gid = krgid;
753 	if (egid != (gid_t) -1)
754 		new->egid = kegid;
755 	if (sgid != (gid_t) -1)
756 		new->sgid = ksgid;
757 	new->fsgid = new->egid;
758 
759 	return commit_creds(new);
760 
761 error:
762 	abort_creds(new);
763 	return retval;
764 }
765 
766 SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
767 {
768 	return __sys_setresgid(rgid, egid, sgid);
769 }
770 
771 SYSCALL_DEFINE3(getresgid, gid_t __user *, rgidp, gid_t __user *, egidp, gid_t __user *, sgidp)
772 {
773 	const struct cred *cred = current_cred();
774 	int retval;
775 	gid_t rgid, egid, sgid;
776 
777 	rgid = from_kgid_munged(cred->user_ns, cred->gid);
778 	egid = from_kgid_munged(cred->user_ns, cred->egid);
779 	sgid = from_kgid_munged(cred->user_ns, cred->sgid);
780 
781 	retval = put_user(rgid, rgidp);
782 	if (!retval) {
783 		retval = put_user(egid, egidp);
784 		if (!retval)
785 			retval = put_user(sgid, sgidp);
786 	}
787 
788 	return retval;
789 }
790 
791 
792 /*
793  * "setfsuid()" sets the fsuid - the uid used for filesystem checks. This
794  * is used for "access()" and for the NFS daemon (letting nfsd stay at
795  * whatever uid it wants to). It normally shadows "euid", except when
796  * explicitly set by setfsuid() or for access..
797  */
798 long __sys_setfsuid(uid_t uid)
799 {
800 	const struct cred *old;
801 	struct cred *new;
802 	uid_t old_fsuid;
803 	kuid_t kuid;
804 
805 	old = current_cred();
806 	old_fsuid = from_kuid_munged(old->user_ns, old->fsuid);
807 
808 	kuid = make_kuid(old->user_ns, uid);
809 	if (!uid_valid(kuid))
810 		return old_fsuid;
811 
812 	new = prepare_creds();
813 	if (!new)
814 		return old_fsuid;
815 
816 	if (uid_eq(kuid, old->uid)  || uid_eq(kuid, old->euid)  ||
817 	    uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) ||
818 	    ns_capable_setid(old->user_ns, CAP_SETUID)) {
819 		if (!uid_eq(kuid, old->fsuid)) {
820 			new->fsuid = kuid;
821 			if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
822 				goto change_okay;
823 		}
824 	}
825 
826 	abort_creds(new);
827 	return old_fsuid;
828 
829 change_okay:
830 	commit_creds(new);
831 	return old_fsuid;
832 }
833 
834 SYSCALL_DEFINE1(setfsuid, uid_t, uid)
835 {
836 	return __sys_setfsuid(uid);
837 }
838 
839 /*
840  * Samma på svenska..
841  */
842 long __sys_setfsgid(gid_t gid)
843 {
844 	const struct cred *old;
845 	struct cred *new;
846 	gid_t old_fsgid;
847 	kgid_t kgid;
848 
849 	old = current_cred();
850 	old_fsgid = from_kgid_munged(old->user_ns, old->fsgid);
851 
852 	kgid = make_kgid(old->user_ns, gid);
853 	if (!gid_valid(kgid))
854 		return old_fsgid;
855 
856 	new = prepare_creds();
857 	if (!new)
858 		return old_fsgid;
859 
860 	if (gid_eq(kgid, old->gid)  || gid_eq(kgid, old->egid)  ||
861 	    gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
862 	    ns_capable(old->user_ns, CAP_SETGID)) {
863 		if (!gid_eq(kgid, old->fsgid)) {
864 			new->fsgid = kgid;
865 			goto change_okay;
866 		}
867 	}
868 
869 	abort_creds(new);
870 	return old_fsgid;
871 
872 change_okay:
873 	commit_creds(new);
874 	return old_fsgid;
875 }
876 
877 SYSCALL_DEFINE1(setfsgid, gid_t, gid)
878 {
879 	return __sys_setfsgid(gid);
880 }
881 #endif /* CONFIG_MULTIUSER */
882 
883 /**
884  * sys_getpid - return the thread group id of the current process
885  *
886  * Note, despite the name, this returns the tgid not the pid.  The tgid and
887  * the pid are identical unless CLONE_THREAD was specified on clone() in
888  * which case the tgid is the same in all threads of the same group.
889  *
890  * This is SMP safe as current->tgid does not change.
891  */
892 SYSCALL_DEFINE0(getpid)
893 {
894 	return task_tgid_vnr(current);
895 }
896 
897 /* Thread ID - the internal kernel "pid" */
898 SYSCALL_DEFINE0(gettid)
899 {
900 	return task_pid_vnr(current);
901 }
902 
903 /*
904  * Accessing ->real_parent is not SMP-safe, it could
905  * change from under us. However, we can use a stale
906  * value of ->real_parent under rcu_read_lock(), see
907  * release_task()->call_rcu(delayed_put_task_struct).
908  */
909 SYSCALL_DEFINE0(getppid)
910 {
911 	int pid;
912 
913 	rcu_read_lock();
914 	pid = task_tgid_vnr(rcu_dereference(current->real_parent));
915 	rcu_read_unlock();
916 
917 	return pid;
918 }
919 
920 SYSCALL_DEFINE0(getuid)
921 {
922 	/* Only we change this so SMP safe */
923 	return from_kuid_munged(current_user_ns(), current_uid());
924 }
925 
926 SYSCALL_DEFINE0(geteuid)
927 {
928 	/* Only we change this so SMP safe */
929 	return from_kuid_munged(current_user_ns(), current_euid());
930 }
931 
932 SYSCALL_DEFINE0(getgid)
933 {
934 	/* Only we change this so SMP safe */
935 	return from_kgid_munged(current_user_ns(), current_gid());
936 }
937 
938 SYSCALL_DEFINE0(getegid)
939 {
940 	/* Only we change this so SMP safe */
941 	return from_kgid_munged(current_user_ns(), current_egid());
942 }
943 
944 static void do_sys_times(struct tms *tms)
945 {
946 	u64 tgutime, tgstime, cutime, cstime;
947 
948 	thread_group_cputime_adjusted(current, &tgutime, &tgstime);
949 	cutime = current->signal->cutime;
950 	cstime = current->signal->cstime;
951 	tms->tms_utime = nsec_to_clock_t(tgutime);
952 	tms->tms_stime = nsec_to_clock_t(tgstime);
953 	tms->tms_cutime = nsec_to_clock_t(cutime);
954 	tms->tms_cstime = nsec_to_clock_t(cstime);
955 }
956 
957 SYSCALL_DEFINE1(times, struct tms __user *, tbuf)
958 {
959 	if (tbuf) {
960 		struct tms tmp;
961 
962 		do_sys_times(&tmp);
963 		if (copy_to_user(tbuf, &tmp, sizeof(struct tms)))
964 			return -EFAULT;
965 	}
966 	force_successful_syscall_return();
967 	return (long) jiffies_64_to_clock_t(get_jiffies_64());
968 }
969 
970 #ifdef CONFIG_COMPAT
971 static compat_clock_t clock_t_to_compat_clock_t(clock_t x)
972 {
973 	return compat_jiffies_to_clock_t(clock_t_to_jiffies(x));
974 }
975 
976 COMPAT_SYSCALL_DEFINE1(times, struct compat_tms __user *, tbuf)
977 {
978 	if (tbuf) {
979 		struct tms tms;
980 		struct compat_tms tmp;
981 
982 		do_sys_times(&tms);
983 		/* Convert our struct tms to the compat version. */
984 		tmp.tms_utime = clock_t_to_compat_clock_t(tms.tms_utime);
985 		tmp.tms_stime = clock_t_to_compat_clock_t(tms.tms_stime);
986 		tmp.tms_cutime = clock_t_to_compat_clock_t(tms.tms_cutime);
987 		tmp.tms_cstime = clock_t_to_compat_clock_t(tms.tms_cstime);
988 		if (copy_to_user(tbuf, &tmp, sizeof(tmp)))
989 			return -EFAULT;
990 	}
991 	force_successful_syscall_return();
992 	return compat_jiffies_to_clock_t(jiffies);
993 }
994 #endif
995 
996 /*
997  * This needs some heavy checking ...
998  * I just haven't the stomach for it. I also don't fully
999  * understand sessions/pgrp etc. Let somebody who does explain it.
1000  *
1001  * OK, I think I have the protection semantics right.... this is really
1002  * only important on a multi-user system anyway, to make sure one user
1003  * can't send a signal to a process owned by another.  -TYT, 12/12/91
1004  *
1005  * !PF_FORKNOEXEC check to conform completely to POSIX.
1006  */
1007 SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid)
1008 {
1009 	struct task_struct *p;
1010 	struct task_struct *group_leader = current->group_leader;
1011 	struct pid *pgrp;
1012 	int err;
1013 
1014 	if (!pid)
1015 		pid = task_pid_vnr(group_leader);
1016 	if (!pgid)
1017 		pgid = pid;
1018 	if (pgid < 0)
1019 		return -EINVAL;
1020 	rcu_read_lock();
1021 
1022 	/* From this point forward we keep holding onto the tasklist lock
1023 	 * so that our parent does not change from under us. -DaveM
1024 	 */
1025 	write_lock_irq(&tasklist_lock);
1026 
1027 	err = -ESRCH;
1028 	p = find_task_by_vpid(pid);
1029 	if (!p)
1030 		goto out;
1031 
1032 	err = -EINVAL;
1033 	if (!thread_group_leader(p))
1034 		goto out;
1035 
1036 	if (same_thread_group(p->real_parent, group_leader)) {
1037 		err = -EPERM;
1038 		if (task_session(p) != task_session(group_leader))
1039 			goto out;
1040 		err = -EACCES;
1041 		if (!(p->flags & PF_FORKNOEXEC))
1042 			goto out;
1043 	} else {
1044 		err = -ESRCH;
1045 		if (p != group_leader)
1046 			goto out;
1047 	}
1048 
1049 	err = -EPERM;
1050 	if (p->signal->leader)
1051 		goto out;
1052 
1053 	pgrp = task_pid(p);
1054 	if (pgid != pid) {
1055 		struct task_struct *g;
1056 
1057 		pgrp = find_vpid(pgid);
1058 		g = pid_task(pgrp, PIDTYPE_PGID);
1059 		if (!g || task_session(g) != task_session(group_leader))
1060 			goto out;
1061 	}
1062 
1063 	err = security_task_setpgid(p, pgid);
1064 	if (err)
1065 		goto out;
1066 
1067 	if (task_pgrp(p) != pgrp)
1068 		change_pid(p, PIDTYPE_PGID, pgrp);
1069 
1070 	err = 0;
1071 out:
1072 	/* All paths lead to here, thus we are safe. -DaveM */
1073 	write_unlock_irq(&tasklist_lock);
1074 	rcu_read_unlock();
1075 	return err;
1076 }
1077 
1078 static int do_getpgid(pid_t pid)
1079 {
1080 	struct task_struct *p;
1081 	struct pid *grp;
1082 	int retval;
1083 
1084 	rcu_read_lock();
1085 	if (!pid)
1086 		grp = task_pgrp(current);
1087 	else {
1088 		retval = -ESRCH;
1089 		p = find_task_by_vpid(pid);
1090 		if (!p)
1091 			goto out;
1092 		grp = task_pgrp(p);
1093 		if (!grp)
1094 			goto out;
1095 
1096 		retval = security_task_getpgid(p);
1097 		if (retval)
1098 			goto out;
1099 	}
1100 	retval = pid_vnr(grp);
1101 out:
1102 	rcu_read_unlock();
1103 	return retval;
1104 }
1105 
1106 SYSCALL_DEFINE1(getpgid, pid_t, pid)
1107 {
1108 	return do_getpgid(pid);
1109 }
1110 
1111 #ifdef __ARCH_WANT_SYS_GETPGRP
1112 
1113 SYSCALL_DEFINE0(getpgrp)
1114 {
1115 	return do_getpgid(0);
1116 }
1117 
1118 #endif
1119 
1120 SYSCALL_DEFINE1(getsid, pid_t, pid)
1121 {
1122 	struct task_struct *p;
1123 	struct pid *sid;
1124 	int retval;
1125 
1126 	rcu_read_lock();
1127 	if (!pid)
1128 		sid = task_session(current);
1129 	else {
1130 		retval = -ESRCH;
1131 		p = find_task_by_vpid(pid);
1132 		if (!p)
1133 			goto out;
1134 		sid = task_session(p);
1135 		if (!sid)
1136 			goto out;
1137 
1138 		retval = security_task_getsid(p);
1139 		if (retval)
1140 			goto out;
1141 	}
1142 	retval = pid_vnr(sid);
1143 out:
1144 	rcu_read_unlock();
1145 	return retval;
1146 }
1147 
1148 static void set_special_pids(struct pid *pid)
1149 {
1150 	struct task_struct *curr = current->group_leader;
1151 
1152 	if (task_session(curr) != pid)
1153 		change_pid(curr, PIDTYPE_SID, pid);
1154 
1155 	if (task_pgrp(curr) != pid)
1156 		change_pid(curr, PIDTYPE_PGID, pid);
1157 }
1158 
1159 int ksys_setsid(void)
1160 {
1161 	struct task_struct *group_leader = current->group_leader;
1162 	struct pid *sid = task_pid(group_leader);
1163 	pid_t session = pid_vnr(sid);
1164 	int err = -EPERM;
1165 
1166 	write_lock_irq(&tasklist_lock);
1167 	/* Fail if I am already a session leader */
1168 	if (group_leader->signal->leader)
1169 		goto out;
1170 
1171 	/* Fail if a process group id already exists that equals the
1172 	 * proposed session id.
1173 	 */
1174 	if (pid_task(sid, PIDTYPE_PGID))
1175 		goto out;
1176 
1177 	group_leader->signal->leader = 1;
1178 	set_special_pids(sid);
1179 
1180 	proc_clear_tty(group_leader);
1181 
1182 	err = session;
1183 out:
1184 	write_unlock_irq(&tasklist_lock);
1185 	if (err > 0) {
1186 		proc_sid_connector(group_leader);
1187 		sched_autogroup_create_attach(group_leader);
1188 	}
1189 	return err;
1190 }
1191 
1192 SYSCALL_DEFINE0(setsid)
1193 {
1194 	return ksys_setsid();
1195 }
1196 
1197 DECLARE_RWSEM(uts_sem);
1198 
1199 #ifdef COMPAT_UTS_MACHINE
1200 #define override_architecture(name) \
1201 	(personality(current->personality) == PER_LINUX32 && \
1202 	 copy_to_user(name->machine, COMPAT_UTS_MACHINE, \
1203 		      sizeof(COMPAT_UTS_MACHINE)))
1204 #else
1205 #define override_architecture(name)	0
1206 #endif
1207 
1208 /*
1209  * Work around broken programs that cannot handle "Linux 3.0".
1210  * Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40
1211  * And we map 4.x and later versions to 2.6.60+x, so 4.0/5.0/6.0/... would be
1212  * 2.6.60.
1213  */
1214 static int override_release(char __user *release, size_t len)
1215 {
1216 	int ret = 0;
1217 
1218 	if (current->personality & UNAME26) {
1219 		const char *rest = UTS_RELEASE;
1220 		char buf[65] = { 0 };
1221 		int ndots = 0;
1222 		unsigned v;
1223 		size_t copy;
1224 
1225 		while (*rest) {
1226 			if (*rest == '.' && ++ndots >= 3)
1227 				break;
1228 			if (!isdigit(*rest) && *rest != '.')
1229 				break;
1230 			rest++;
1231 		}
1232 		v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 60;
1233 		copy = clamp_t(size_t, len, 1, sizeof(buf));
1234 		copy = scnprintf(buf, copy, "2.6.%u%s", v, rest);
1235 		ret = copy_to_user(release, buf, copy + 1);
1236 	}
1237 	return ret;
1238 }
1239 
1240 SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name)
1241 {
1242 	struct new_utsname tmp;
1243 
1244 	down_read(&uts_sem);
1245 	memcpy(&tmp, utsname(), sizeof(tmp));
1246 	up_read(&uts_sem);
1247 	if (copy_to_user(name, &tmp, sizeof(tmp)))
1248 		return -EFAULT;
1249 
1250 	if (override_release(name->release, sizeof(name->release)))
1251 		return -EFAULT;
1252 	if (override_architecture(name))
1253 		return -EFAULT;
1254 	return 0;
1255 }
1256 
1257 #ifdef __ARCH_WANT_SYS_OLD_UNAME
1258 /*
1259  * Old cruft
1260  */
1261 SYSCALL_DEFINE1(uname, struct old_utsname __user *, name)
1262 {
1263 	struct old_utsname tmp;
1264 
1265 	if (!name)
1266 		return -EFAULT;
1267 
1268 	down_read(&uts_sem);
1269 	memcpy(&tmp, utsname(), sizeof(tmp));
1270 	up_read(&uts_sem);
1271 	if (copy_to_user(name, &tmp, sizeof(tmp)))
1272 		return -EFAULT;
1273 
1274 	if (override_release(name->release, sizeof(name->release)))
1275 		return -EFAULT;
1276 	if (override_architecture(name))
1277 		return -EFAULT;
1278 	return 0;
1279 }
1280 
1281 SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
1282 {
1283 	struct oldold_utsname tmp;
1284 
1285 	if (!name)
1286 		return -EFAULT;
1287 
1288 	memset(&tmp, 0, sizeof(tmp));
1289 
1290 	down_read(&uts_sem);
1291 	memcpy(&tmp.sysname, &utsname()->sysname, __OLD_UTS_LEN);
1292 	memcpy(&tmp.nodename, &utsname()->nodename, __OLD_UTS_LEN);
1293 	memcpy(&tmp.release, &utsname()->release, __OLD_UTS_LEN);
1294 	memcpy(&tmp.version, &utsname()->version, __OLD_UTS_LEN);
1295 	memcpy(&tmp.machine, &utsname()->machine, __OLD_UTS_LEN);
1296 	up_read(&uts_sem);
1297 	if (copy_to_user(name, &tmp, sizeof(tmp)))
1298 		return -EFAULT;
1299 
1300 	if (override_architecture(name))
1301 		return -EFAULT;
1302 	if (override_release(name->release, sizeof(name->release)))
1303 		return -EFAULT;
1304 	return 0;
1305 }
1306 #endif
1307 
1308 SYSCALL_DEFINE2(sethostname, char __user *, name, int, len)
1309 {
1310 	int errno;
1311 	char tmp[__NEW_UTS_LEN];
1312 
1313 	if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN))
1314 		return -EPERM;
1315 
1316 	if (len < 0 || len > __NEW_UTS_LEN)
1317 		return -EINVAL;
1318 	errno = -EFAULT;
1319 	if (!copy_from_user(tmp, name, len)) {
1320 		struct new_utsname *u;
1321 
1322 		down_write(&uts_sem);
1323 		u = utsname();
1324 		memcpy(u->nodename, tmp, len);
1325 		memset(u->nodename + len, 0, sizeof(u->nodename) - len);
1326 		errno = 0;
1327 		uts_proc_notify(UTS_PROC_HOSTNAME);
1328 		up_write(&uts_sem);
1329 	}
1330 	return errno;
1331 }
1332 
1333 #ifdef __ARCH_WANT_SYS_GETHOSTNAME
1334 
1335 SYSCALL_DEFINE2(gethostname, char __user *, name, int, len)
1336 {
1337 	int i;
1338 	struct new_utsname *u;
1339 	char tmp[__NEW_UTS_LEN + 1];
1340 
1341 	if (len < 0)
1342 		return -EINVAL;
1343 	down_read(&uts_sem);
1344 	u = utsname();
1345 	i = 1 + strlen(u->nodename);
1346 	if (i > len)
1347 		i = len;
1348 	memcpy(tmp, u->nodename, i);
1349 	up_read(&uts_sem);
1350 	if (copy_to_user(name, tmp, i))
1351 		return -EFAULT;
1352 	return 0;
1353 }
1354 
1355 #endif
1356 
1357 /*
1358  * Only setdomainname; getdomainname can be implemented by calling
1359  * uname()
1360  */
1361 SYSCALL_DEFINE2(setdomainname, char __user *, name, int, len)
1362 {
1363 	int errno;
1364 	char tmp[__NEW_UTS_LEN];
1365 
1366 	if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN))
1367 		return -EPERM;
1368 	if (len < 0 || len > __NEW_UTS_LEN)
1369 		return -EINVAL;
1370 
1371 	errno = -EFAULT;
1372 	if (!copy_from_user(tmp, name, len)) {
1373 		struct new_utsname *u;
1374 
1375 		down_write(&uts_sem);
1376 		u = utsname();
1377 		memcpy(u->domainname, tmp, len);
1378 		memset(u->domainname + len, 0, sizeof(u->domainname) - len);
1379 		errno = 0;
1380 		uts_proc_notify(UTS_PROC_DOMAINNAME);
1381 		up_write(&uts_sem);
1382 	}
1383 	return errno;
1384 }
1385 
1386 SYSCALL_DEFINE2(getrlimit, unsigned int, resource, struct rlimit __user *, rlim)
1387 {
1388 	struct rlimit value;
1389 	int ret;
1390 
1391 	ret = do_prlimit(current, resource, NULL, &value);
1392 	if (!ret)
1393 		ret = copy_to_user(rlim, &value, sizeof(*rlim)) ? -EFAULT : 0;
1394 
1395 	return ret;
1396 }
1397 
1398 #ifdef CONFIG_COMPAT
1399 
1400 COMPAT_SYSCALL_DEFINE2(setrlimit, unsigned int, resource,
1401 		       struct compat_rlimit __user *, rlim)
1402 {
1403 	struct rlimit r;
1404 	struct compat_rlimit r32;
1405 
1406 	if (copy_from_user(&r32, rlim, sizeof(struct compat_rlimit)))
1407 		return -EFAULT;
1408 
1409 	if (r32.rlim_cur == COMPAT_RLIM_INFINITY)
1410 		r.rlim_cur = RLIM_INFINITY;
1411 	else
1412 		r.rlim_cur = r32.rlim_cur;
1413 	if (r32.rlim_max == COMPAT_RLIM_INFINITY)
1414 		r.rlim_max = RLIM_INFINITY;
1415 	else
1416 		r.rlim_max = r32.rlim_max;
1417 	return do_prlimit(current, resource, &r, NULL);
1418 }
1419 
1420 COMPAT_SYSCALL_DEFINE2(getrlimit, unsigned int, resource,
1421 		       struct compat_rlimit __user *, rlim)
1422 {
1423 	struct rlimit r;
1424 	int ret;
1425 
1426 	ret = do_prlimit(current, resource, NULL, &r);
1427 	if (!ret) {
1428 		struct compat_rlimit r32;
1429 		if (r.rlim_cur > COMPAT_RLIM_INFINITY)
1430 			r32.rlim_cur = COMPAT_RLIM_INFINITY;
1431 		else
1432 			r32.rlim_cur = r.rlim_cur;
1433 		if (r.rlim_max > COMPAT_RLIM_INFINITY)
1434 			r32.rlim_max = COMPAT_RLIM_INFINITY;
1435 		else
1436 			r32.rlim_max = r.rlim_max;
1437 
1438 		if (copy_to_user(rlim, &r32, sizeof(struct compat_rlimit)))
1439 			return -EFAULT;
1440 	}
1441 	return ret;
1442 }
1443 
1444 #endif
1445 
1446 #ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT
1447 
1448 /*
1449  *	Back compatibility for getrlimit. Needed for some apps.
1450  */
1451 SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
1452 		struct rlimit __user *, rlim)
1453 {
1454 	struct rlimit x;
1455 	if (resource >= RLIM_NLIMITS)
1456 		return -EINVAL;
1457 
1458 	resource = array_index_nospec(resource, RLIM_NLIMITS);
1459 	task_lock(current->group_leader);
1460 	x = current->signal->rlim[resource];
1461 	task_unlock(current->group_leader);
1462 	if (x.rlim_cur > 0x7FFFFFFF)
1463 		x.rlim_cur = 0x7FFFFFFF;
1464 	if (x.rlim_max > 0x7FFFFFFF)
1465 		x.rlim_max = 0x7FFFFFFF;
1466 	return copy_to_user(rlim, &x, sizeof(x)) ? -EFAULT : 0;
1467 }
1468 
1469 #ifdef CONFIG_COMPAT
1470 COMPAT_SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
1471 		       struct compat_rlimit __user *, rlim)
1472 {
1473 	struct rlimit r;
1474 
1475 	if (resource >= RLIM_NLIMITS)
1476 		return -EINVAL;
1477 
1478 	resource = array_index_nospec(resource, RLIM_NLIMITS);
1479 	task_lock(current->group_leader);
1480 	r = current->signal->rlim[resource];
1481 	task_unlock(current->group_leader);
1482 	if (r.rlim_cur > 0x7FFFFFFF)
1483 		r.rlim_cur = 0x7FFFFFFF;
1484 	if (r.rlim_max > 0x7FFFFFFF)
1485 		r.rlim_max = 0x7FFFFFFF;
1486 
1487 	if (put_user(r.rlim_cur, &rlim->rlim_cur) ||
1488 	    put_user(r.rlim_max, &rlim->rlim_max))
1489 		return -EFAULT;
1490 	return 0;
1491 }
1492 #endif
1493 
1494 #endif
1495 
1496 static inline bool rlim64_is_infinity(__u64 rlim64)
1497 {
1498 #if BITS_PER_LONG < 64
1499 	return rlim64 >= ULONG_MAX;
1500 #else
1501 	return rlim64 == RLIM64_INFINITY;
1502 #endif
1503 }
1504 
1505 static void rlim_to_rlim64(const struct rlimit *rlim, struct rlimit64 *rlim64)
1506 {
1507 	if (rlim->rlim_cur == RLIM_INFINITY)
1508 		rlim64->rlim_cur = RLIM64_INFINITY;
1509 	else
1510 		rlim64->rlim_cur = rlim->rlim_cur;
1511 	if (rlim->rlim_max == RLIM_INFINITY)
1512 		rlim64->rlim_max = RLIM64_INFINITY;
1513 	else
1514 		rlim64->rlim_max = rlim->rlim_max;
1515 }
1516 
1517 static void rlim64_to_rlim(const struct rlimit64 *rlim64, struct rlimit *rlim)
1518 {
1519 	if (rlim64_is_infinity(rlim64->rlim_cur))
1520 		rlim->rlim_cur = RLIM_INFINITY;
1521 	else
1522 		rlim->rlim_cur = (unsigned long)rlim64->rlim_cur;
1523 	if (rlim64_is_infinity(rlim64->rlim_max))
1524 		rlim->rlim_max = RLIM_INFINITY;
1525 	else
1526 		rlim->rlim_max = (unsigned long)rlim64->rlim_max;
1527 }
1528 
1529 /* make sure you are allowed to change @tsk limits before calling this */
1530 int do_prlimit(struct task_struct *tsk, unsigned int resource,
1531 		struct rlimit *new_rlim, struct rlimit *old_rlim)
1532 {
1533 	struct rlimit *rlim;
1534 	int retval = 0;
1535 
1536 	if (resource >= RLIM_NLIMITS)
1537 		return -EINVAL;
1538 	if (new_rlim) {
1539 		if (new_rlim->rlim_cur > new_rlim->rlim_max)
1540 			return -EINVAL;
1541 		if (resource == RLIMIT_NOFILE &&
1542 				new_rlim->rlim_max > sysctl_nr_open)
1543 			return -EPERM;
1544 	}
1545 
1546 	/* protect tsk->signal and tsk->sighand from disappearing */
1547 	read_lock(&tasklist_lock);
1548 	if (!tsk->sighand) {
1549 		retval = -ESRCH;
1550 		goto out;
1551 	}
1552 
1553 	rlim = tsk->signal->rlim + resource;
1554 	task_lock(tsk->group_leader);
1555 	if (new_rlim) {
1556 		/* Keep the capable check against init_user_ns until
1557 		   cgroups can contain all limits */
1558 		if (new_rlim->rlim_max > rlim->rlim_max &&
1559 				!capable(CAP_SYS_RESOURCE))
1560 			retval = -EPERM;
1561 		if (!retval)
1562 			retval = security_task_setrlimit(tsk, resource, new_rlim);
1563 	}
1564 	if (!retval) {
1565 		if (old_rlim)
1566 			*old_rlim = *rlim;
1567 		if (new_rlim)
1568 			*rlim = *new_rlim;
1569 	}
1570 	task_unlock(tsk->group_leader);
1571 
1572 	/*
1573 	 * RLIMIT_CPU handling. Arm the posix CPU timer if the limit is not
1574 	 * infite. In case of RLIM_INFINITY the posix CPU timer code
1575 	 * ignores the rlimit.
1576 	 */
1577 	 if (!retval && new_rlim && resource == RLIMIT_CPU &&
1578 	     new_rlim->rlim_cur != RLIM_INFINITY &&
1579 	     IS_ENABLED(CONFIG_POSIX_TIMERS))
1580 		update_rlimit_cpu(tsk, new_rlim->rlim_cur);
1581 out:
1582 	read_unlock(&tasklist_lock);
1583 	return retval;
1584 }
1585 
1586 /* rcu lock must be held */
1587 static int check_prlimit_permission(struct task_struct *task,
1588 				    unsigned int flags)
1589 {
1590 	const struct cred *cred = current_cred(), *tcred;
1591 	bool id_match;
1592 
1593 	if (current == task)
1594 		return 0;
1595 
1596 	tcred = __task_cred(task);
1597 	id_match = (uid_eq(cred->uid, tcred->euid) &&
1598 		    uid_eq(cred->uid, tcred->suid) &&
1599 		    uid_eq(cred->uid, tcred->uid)  &&
1600 		    gid_eq(cred->gid, tcred->egid) &&
1601 		    gid_eq(cred->gid, tcred->sgid) &&
1602 		    gid_eq(cred->gid, tcred->gid));
1603 	if (!id_match && !ns_capable(tcred->user_ns, CAP_SYS_RESOURCE))
1604 		return -EPERM;
1605 
1606 	return security_task_prlimit(cred, tcred, flags);
1607 }
1608 
1609 SYSCALL_DEFINE4(prlimit64, pid_t, pid, unsigned int, resource,
1610 		const struct rlimit64 __user *, new_rlim,
1611 		struct rlimit64 __user *, old_rlim)
1612 {
1613 	struct rlimit64 old64, new64;
1614 	struct rlimit old, new;
1615 	struct task_struct *tsk;
1616 	unsigned int checkflags = 0;
1617 	int ret;
1618 
1619 	if (old_rlim)
1620 		checkflags |= LSM_PRLIMIT_READ;
1621 
1622 	if (new_rlim) {
1623 		if (copy_from_user(&new64, new_rlim, sizeof(new64)))
1624 			return -EFAULT;
1625 		rlim64_to_rlim(&new64, &new);
1626 		checkflags |= LSM_PRLIMIT_WRITE;
1627 	}
1628 
1629 	rcu_read_lock();
1630 	tsk = pid ? find_task_by_vpid(pid) : current;
1631 	if (!tsk) {
1632 		rcu_read_unlock();
1633 		return -ESRCH;
1634 	}
1635 	ret = check_prlimit_permission(tsk, checkflags);
1636 	if (ret) {
1637 		rcu_read_unlock();
1638 		return ret;
1639 	}
1640 	get_task_struct(tsk);
1641 	rcu_read_unlock();
1642 
1643 	ret = do_prlimit(tsk, resource, new_rlim ? &new : NULL,
1644 			old_rlim ? &old : NULL);
1645 
1646 	if (!ret && old_rlim) {
1647 		rlim_to_rlim64(&old, &old64);
1648 		if (copy_to_user(old_rlim, &old64, sizeof(old64)))
1649 			ret = -EFAULT;
1650 	}
1651 
1652 	put_task_struct(tsk);
1653 	return ret;
1654 }
1655 
1656 SYSCALL_DEFINE2(setrlimit, unsigned int, resource, struct rlimit __user *, rlim)
1657 {
1658 	struct rlimit new_rlim;
1659 
1660 	if (copy_from_user(&new_rlim, rlim, sizeof(*rlim)))
1661 		return -EFAULT;
1662 	return do_prlimit(current, resource, &new_rlim, NULL);
1663 }
1664 
1665 /*
1666  * It would make sense to put struct rusage in the task_struct,
1667  * except that would make the task_struct be *really big*.  After
1668  * task_struct gets moved into malloc'ed memory, it would
1669  * make sense to do this.  It will make moving the rest of the information
1670  * a lot simpler!  (Which we're not doing right now because we're not
1671  * measuring them yet).
1672  *
1673  * When sampling multiple threads for RUSAGE_SELF, under SMP we might have
1674  * races with threads incrementing their own counters.  But since word
1675  * reads are atomic, we either get new values or old values and we don't
1676  * care which for the sums.  We always take the siglock to protect reading
1677  * the c* fields from p->signal from races with exit.c updating those
1678  * fields when reaping, so a sample either gets all the additions of a
1679  * given child after it's reaped, or none so this sample is before reaping.
1680  *
1681  * Locking:
1682  * We need to take the siglock for CHILDEREN, SELF and BOTH
1683  * for  the cases current multithreaded, non-current single threaded
1684  * non-current multithreaded.  Thread traversal is now safe with
1685  * the siglock held.
1686  * Strictly speaking, we donot need to take the siglock if we are current and
1687  * single threaded,  as no one else can take our signal_struct away, no one
1688  * else can  reap the  children to update signal->c* counters, and no one else
1689  * can race with the signal-> fields. If we do not take any lock, the
1690  * signal-> fields could be read out of order while another thread was just
1691  * exiting. So we should  place a read memory barrier when we avoid the lock.
1692  * On the writer side,  write memory barrier is implied in  __exit_signal
1693  * as __exit_signal releases  the siglock spinlock after updating the signal->
1694  * fields. But we don't do this yet to keep things simple.
1695  *
1696  */
1697 
1698 static void accumulate_thread_rusage(struct task_struct *t, struct rusage *r)
1699 {
1700 	r->ru_nvcsw += t->nvcsw;
1701 	r->ru_nivcsw += t->nivcsw;
1702 	r->ru_minflt += t->min_flt;
1703 	r->ru_majflt += t->maj_flt;
1704 	r->ru_inblock += task_io_get_inblock(t);
1705 	r->ru_oublock += task_io_get_oublock(t);
1706 }
1707 
1708 void getrusage(struct task_struct *p, int who, struct rusage *r)
1709 {
1710 	struct task_struct *t;
1711 	unsigned long flags;
1712 	u64 tgutime, tgstime, utime, stime;
1713 	unsigned long maxrss = 0;
1714 
1715 	memset((char *)r, 0, sizeof (*r));
1716 	utime = stime = 0;
1717 
1718 	if (who == RUSAGE_THREAD) {
1719 		task_cputime_adjusted(current, &utime, &stime);
1720 		accumulate_thread_rusage(p, r);
1721 		maxrss = p->signal->maxrss;
1722 		goto out;
1723 	}
1724 
1725 	if (!lock_task_sighand(p, &flags))
1726 		return;
1727 
1728 	switch (who) {
1729 	case RUSAGE_BOTH:
1730 	case RUSAGE_CHILDREN:
1731 		utime = p->signal->cutime;
1732 		stime = p->signal->cstime;
1733 		r->ru_nvcsw = p->signal->cnvcsw;
1734 		r->ru_nivcsw = p->signal->cnivcsw;
1735 		r->ru_minflt = p->signal->cmin_flt;
1736 		r->ru_majflt = p->signal->cmaj_flt;
1737 		r->ru_inblock = p->signal->cinblock;
1738 		r->ru_oublock = p->signal->coublock;
1739 		maxrss = p->signal->cmaxrss;
1740 
1741 		if (who == RUSAGE_CHILDREN)
1742 			break;
1743 		/* fall through */
1744 
1745 	case RUSAGE_SELF:
1746 		thread_group_cputime_adjusted(p, &tgutime, &tgstime);
1747 		utime += tgutime;
1748 		stime += tgstime;
1749 		r->ru_nvcsw += p->signal->nvcsw;
1750 		r->ru_nivcsw += p->signal->nivcsw;
1751 		r->ru_minflt += p->signal->min_flt;
1752 		r->ru_majflt += p->signal->maj_flt;
1753 		r->ru_inblock += p->signal->inblock;
1754 		r->ru_oublock += p->signal->oublock;
1755 		if (maxrss < p->signal->maxrss)
1756 			maxrss = p->signal->maxrss;
1757 		t = p;
1758 		do {
1759 			accumulate_thread_rusage(t, r);
1760 		} while_each_thread(p, t);
1761 		break;
1762 
1763 	default:
1764 		BUG();
1765 	}
1766 	unlock_task_sighand(p, &flags);
1767 
1768 out:
1769 	r->ru_utime = ns_to_kernel_old_timeval(utime);
1770 	r->ru_stime = ns_to_kernel_old_timeval(stime);
1771 
1772 	if (who != RUSAGE_CHILDREN) {
1773 		struct mm_struct *mm = get_task_mm(p);
1774 
1775 		if (mm) {
1776 			setmax_mm_hiwater_rss(&maxrss, mm);
1777 			mmput(mm);
1778 		}
1779 	}
1780 	r->ru_maxrss = maxrss * (PAGE_SIZE / 1024); /* convert pages to KBs */
1781 }
1782 
1783 SYSCALL_DEFINE2(getrusage, int, who, struct rusage __user *, ru)
1784 {
1785 	struct rusage r;
1786 
1787 	if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN &&
1788 	    who != RUSAGE_THREAD)
1789 		return -EINVAL;
1790 
1791 	getrusage(current, who, &r);
1792 	return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0;
1793 }
1794 
1795 #ifdef CONFIG_COMPAT
1796 COMPAT_SYSCALL_DEFINE2(getrusage, int, who, struct compat_rusage __user *, ru)
1797 {
1798 	struct rusage r;
1799 
1800 	if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN &&
1801 	    who != RUSAGE_THREAD)
1802 		return -EINVAL;
1803 
1804 	getrusage(current, who, &r);
1805 	return put_compat_rusage(&r, ru);
1806 }
1807 #endif
1808 
1809 SYSCALL_DEFINE1(umask, int, mask)
1810 {
1811 	mask = xchg(&current->fs->umask, mask & S_IRWXUGO);
1812 	return mask;
1813 }
1814 
1815 static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd)
1816 {
1817 	struct fd exe;
1818 	struct file *old_exe, *exe_file;
1819 	struct inode *inode;
1820 	int err;
1821 
1822 	exe = fdget(fd);
1823 	if (!exe.file)
1824 		return -EBADF;
1825 
1826 	inode = file_inode(exe.file);
1827 
1828 	/*
1829 	 * Because the original mm->exe_file points to executable file, make
1830 	 * sure that this one is executable as well, to avoid breaking an
1831 	 * overall picture.
1832 	 */
1833 	err = -EACCES;
1834 	if (!S_ISREG(inode->i_mode) || path_noexec(&exe.file->f_path))
1835 		goto exit;
1836 
1837 	err = inode_permission(inode, MAY_EXEC);
1838 	if (err)
1839 		goto exit;
1840 
1841 	/*
1842 	 * Forbid mm->exe_file change if old file still mapped.
1843 	 */
1844 	exe_file = get_mm_exe_file(mm);
1845 	err = -EBUSY;
1846 	if (exe_file) {
1847 		struct vm_area_struct *vma;
1848 
1849 		down_read(&mm->mmap_sem);
1850 		for (vma = mm->mmap; vma; vma = vma->vm_next) {
1851 			if (!vma->vm_file)
1852 				continue;
1853 			if (path_equal(&vma->vm_file->f_path,
1854 				       &exe_file->f_path))
1855 				goto exit_err;
1856 		}
1857 
1858 		up_read(&mm->mmap_sem);
1859 		fput(exe_file);
1860 	}
1861 
1862 	err = 0;
1863 	/* set the new file, lockless */
1864 	get_file(exe.file);
1865 	old_exe = xchg(&mm->exe_file, exe.file);
1866 	if (old_exe)
1867 		fput(old_exe);
1868 exit:
1869 	fdput(exe);
1870 	return err;
1871 exit_err:
1872 	up_read(&mm->mmap_sem);
1873 	fput(exe_file);
1874 	goto exit;
1875 }
1876 
1877 /*
1878  * Check arithmetic relations of passed addresses.
1879  *
1880  * WARNING: we don't require any capability here so be very careful
1881  * in what is allowed for modification from userspace.
1882  */
1883 static int validate_prctl_map_addr(struct prctl_mm_map *prctl_map)
1884 {
1885 	unsigned long mmap_max_addr = TASK_SIZE;
1886 	int error = -EINVAL, i;
1887 
1888 	static const unsigned char offsets[] = {
1889 		offsetof(struct prctl_mm_map, start_code),
1890 		offsetof(struct prctl_mm_map, end_code),
1891 		offsetof(struct prctl_mm_map, start_data),
1892 		offsetof(struct prctl_mm_map, end_data),
1893 		offsetof(struct prctl_mm_map, start_brk),
1894 		offsetof(struct prctl_mm_map, brk),
1895 		offsetof(struct prctl_mm_map, start_stack),
1896 		offsetof(struct prctl_mm_map, arg_start),
1897 		offsetof(struct prctl_mm_map, arg_end),
1898 		offsetof(struct prctl_mm_map, env_start),
1899 		offsetof(struct prctl_mm_map, env_end),
1900 	};
1901 
1902 	/*
1903 	 * Make sure the members are not somewhere outside
1904 	 * of allowed address space.
1905 	 */
1906 	for (i = 0; i < ARRAY_SIZE(offsets); i++) {
1907 		u64 val = *(u64 *)((char *)prctl_map + offsets[i]);
1908 
1909 		if ((unsigned long)val >= mmap_max_addr ||
1910 		    (unsigned long)val < mmap_min_addr)
1911 			goto out;
1912 	}
1913 
1914 	/*
1915 	 * Make sure the pairs are ordered.
1916 	 */
1917 #define __prctl_check_order(__m1, __op, __m2)				\
1918 	((unsigned long)prctl_map->__m1 __op				\
1919 	 (unsigned long)prctl_map->__m2) ? 0 : -EINVAL
1920 	error  = __prctl_check_order(start_code, <, end_code);
1921 	error |= __prctl_check_order(start_data,<=, end_data);
1922 	error |= __prctl_check_order(start_brk, <=, brk);
1923 	error |= __prctl_check_order(arg_start, <=, arg_end);
1924 	error |= __prctl_check_order(env_start, <=, env_end);
1925 	if (error)
1926 		goto out;
1927 #undef __prctl_check_order
1928 
1929 	error = -EINVAL;
1930 
1931 	/*
1932 	 * @brk should be after @end_data in traditional maps.
1933 	 */
1934 	if (prctl_map->start_brk <= prctl_map->end_data ||
1935 	    prctl_map->brk <= prctl_map->end_data)
1936 		goto out;
1937 
1938 	/*
1939 	 * Neither we should allow to override limits if they set.
1940 	 */
1941 	if (check_data_rlimit(rlimit(RLIMIT_DATA), prctl_map->brk,
1942 			      prctl_map->start_brk, prctl_map->end_data,
1943 			      prctl_map->start_data))
1944 			goto out;
1945 
1946 	error = 0;
1947 out:
1948 	return error;
1949 }
1950 
1951 #ifdef CONFIG_CHECKPOINT_RESTORE
1952 static int prctl_set_mm_map(int opt, const void __user *addr, unsigned long data_size)
1953 {
1954 	struct prctl_mm_map prctl_map = { .exe_fd = (u32)-1, };
1955 	unsigned long user_auxv[AT_VECTOR_SIZE];
1956 	struct mm_struct *mm = current->mm;
1957 	int error;
1958 
1959 	BUILD_BUG_ON(sizeof(user_auxv) != sizeof(mm->saved_auxv));
1960 	BUILD_BUG_ON(sizeof(struct prctl_mm_map) > 256);
1961 
1962 	if (opt == PR_SET_MM_MAP_SIZE)
1963 		return put_user((unsigned int)sizeof(prctl_map),
1964 				(unsigned int __user *)addr);
1965 
1966 	if (data_size != sizeof(prctl_map))
1967 		return -EINVAL;
1968 
1969 	if (copy_from_user(&prctl_map, addr, sizeof(prctl_map)))
1970 		return -EFAULT;
1971 
1972 	error = validate_prctl_map_addr(&prctl_map);
1973 	if (error)
1974 		return error;
1975 
1976 	if (prctl_map.auxv_size) {
1977 		/*
1978 		 * Someone is trying to cheat the auxv vector.
1979 		 */
1980 		if (!prctl_map.auxv ||
1981 				prctl_map.auxv_size > sizeof(mm->saved_auxv))
1982 			return -EINVAL;
1983 
1984 		memset(user_auxv, 0, sizeof(user_auxv));
1985 		if (copy_from_user(user_auxv,
1986 				   (const void __user *)prctl_map.auxv,
1987 				   prctl_map.auxv_size))
1988 			return -EFAULT;
1989 
1990 		/* Last entry must be AT_NULL as specification requires */
1991 		user_auxv[AT_VECTOR_SIZE - 2] = AT_NULL;
1992 		user_auxv[AT_VECTOR_SIZE - 1] = AT_NULL;
1993 	}
1994 
1995 	if (prctl_map.exe_fd != (u32)-1) {
1996 		/*
1997 		 * Make sure the caller has the rights to
1998 		 * change /proc/pid/exe link: only local sys admin should
1999 		 * be allowed to.
2000 		 */
2001 		if (!ns_capable(current_user_ns(), CAP_SYS_ADMIN))
2002 			return -EINVAL;
2003 
2004 		error = prctl_set_mm_exe_file(mm, prctl_map.exe_fd);
2005 		if (error)
2006 			return error;
2007 	}
2008 
2009 	/*
2010 	 * arg_lock protects concurent updates but we still need mmap_sem for
2011 	 * read to exclude races with sys_brk.
2012 	 */
2013 	down_read(&mm->mmap_sem);
2014 
2015 	/*
2016 	 * We don't validate if these members are pointing to
2017 	 * real present VMAs because application may have correspond
2018 	 * VMAs already unmapped and kernel uses these members for statistics
2019 	 * output in procfs mostly, except
2020 	 *
2021 	 *  - @start_brk/@brk which are used in do_brk but kernel lookups
2022 	 *    for VMAs when updating these memvers so anything wrong written
2023 	 *    here cause kernel to swear at userspace program but won't lead
2024 	 *    to any problem in kernel itself
2025 	 */
2026 
2027 	spin_lock(&mm->arg_lock);
2028 	mm->start_code	= prctl_map.start_code;
2029 	mm->end_code	= prctl_map.end_code;
2030 	mm->start_data	= prctl_map.start_data;
2031 	mm->end_data	= prctl_map.end_data;
2032 	mm->start_brk	= prctl_map.start_brk;
2033 	mm->brk		= prctl_map.brk;
2034 	mm->start_stack	= prctl_map.start_stack;
2035 	mm->arg_start	= prctl_map.arg_start;
2036 	mm->arg_end	= prctl_map.arg_end;
2037 	mm->env_start	= prctl_map.env_start;
2038 	mm->env_end	= prctl_map.env_end;
2039 	spin_unlock(&mm->arg_lock);
2040 
2041 	/*
2042 	 * Note this update of @saved_auxv is lockless thus
2043 	 * if someone reads this member in procfs while we're
2044 	 * updating -- it may get partly updated results. It's
2045 	 * known and acceptable trade off: we leave it as is to
2046 	 * not introduce additional locks here making the kernel
2047 	 * more complex.
2048 	 */
2049 	if (prctl_map.auxv_size)
2050 		memcpy(mm->saved_auxv, user_auxv, sizeof(user_auxv));
2051 
2052 	up_read(&mm->mmap_sem);
2053 	return 0;
2054 }
2055 #endif /* CONFIG_CHECKPOINT_RESTORE */
2056 
2057 static int prctl_set_auxv(struct mm_struct *mm, unsigned long addr,
2058 			  unsigned long len)
2059 {
2060 	/*
2061 	 * This doesn't move the auxiliary vector itself since it's pinned to
2062 	 * mm_struct, but it permits filling the vector with new values.  It's
2063 	 * up to the caller to provide sane values here, otherwise userspace
2064 	 * tools which use this vector might be unhappy.
2065 	 */
2066 	unsigned long user_auxv[AT_VECTOR_SIZE];
2067 
2068 	if (len > sizeof(user_auxv))
2069 		return -EINVAL;
2070 
2071 	if (copy_from_user(user_auxv, (const void __user *)addr, len))
2072 		return -EFAULT;
2073 
2074 	/* Make sure the last entry is always AT_NULL */
2075 	user_auxv[AT_VECTOR_SIZE - 2] = 0;
2076 	user_auxv[AT_VECTOR_SIZE - 1] = 0;
2077 
2078 	BUILD_BUG_ON(sizeof(user_auxv) != sizeof(mm->saved_auxv));
2079 
2080 	task_lock(current);
2081 	memcpy(mm->saved_auxv, user_auxv, len);
2082 	task_unlock(current);
2083 
2084 	return 0;
2085 }
2086 
2087 static int prctl_set_mm(int opt, unsigned long addr,
2088 			unsigned long arg4, unsigned long arg5)
2089 {
2090 	struct mm_struct *mm = current->mm;
2091 	struct prctl_mm_map prctl_map = {
2092 		.auxv = NULL,
2093 		.auxv_size = 0,
2094 		.exe_fd = -1,
2095 	};
2096 	struct vm_area_struct *vma;
2097 	int error;
2098 
2099 	if (arg5 || (arg4 && (opt != PR_SET_MM_AUXV &&
2100 			      opt != PR_SET_MM_MAP &&
2101 			      opt != PR_SET_MM_MAP_SIZE)))
2102 		return -EINVAL;
2103 
2104 #ifdef CONFIG_CHECKPOINT_RESTORE
2105 	if (opt == PR_SET_MM_MAP || opt == PR_SET_MM_MAP_SIZE)
2106 		return prctl_set_mm_map(opt, (const void __user *)addr, arg4);
2107 #endif
2108 
2109 	if (!capable(CAP_SYS_RESOURCE))
2110 		return -EPERM;
2111 
2112 	if (opt == PR_SET_MM_EXE_FILE)
2113 		return prctl_set_mm_exe_file(mm, (unsigned int)addr);
2114 
2115 	if (opt == PR_SET_MM_AUXV)
2116 		return prctl_set_auxv(mm, addr, arg4);
2117 
2118 	if (addr >= TASK_SIZE || addr < mmap_min_addr)
2119 		return -EINVAL;
2120 
2121 	error = -EINVAL;
2122 
2123 	/*
2124 	 * arg_lock protects concurent updates of arg boundaries, we need
2125 	 * mmap_sem for a) concurrent sys_brk, b) finding VMA for addr
2126 	 * validation.
2127 	 */
2128 	down_read(&mm->mmap_sem);
2129 	vma = find_vma(mm, addr);
2130 
2131 	spin_lock(&mm->arg_lock);
2132 	prctl_map.start_code	= mm->start_code;
2133 	prctl_map.end_code	= mm->end_code;
2134 	prctl_map.start_data	= mm->start_data;
2135 	prctl_map.end_data	= mm->end_data;
2136 	prctl_map.start_brk	= mm->start_brk;
2137 	prctl_map.brk		= mm->brk;
2138 	prctl_map.start_stack	= mm->start_stack;
2139 	prctl_map.arg_start	= mm->arg_start;
2140 	prctl_map.arg_end	= mm->arg_end;
2141 	prctl_map.env_start	= mm->env_start;
2142 	prctl_map.env_end	= mm->env_end;
2143 
2144 	switch (opt) {
2145 	case PR_SET_MM_START_CODE:
2146 		prctl_map.start_code = addr;
2147 		break;
2148 	case PR_SET_MM_END_CODE:
2149 		prctl_map.end_code = addr;
2150 		break;
2151 	case PR_SET_MM_START_DATA:
2152 		prctl_map.start_data = addr;
2153 		break;
2154 	case PR_SET_MM_END_DATA:
2155 		prctl_map.end_data = addr;
2156 		break;
2157 	case PR_SET_MM_START_STACK:
2158 		prctl_map.start_stack = addr;
2159 		break;
2160 	case PR_SET_MM_START_BRK:
2161 		prctl_map.start_brk = addr;
2162 		break;
2163 	case PR_SET_MM_BRK:
2164 		prctl_map.brk = addr;
2165 		break;
2166 	case PR_SET_MM_ARG_START:
2167 		prctl_map.arg_start = addr;
2168 		break;
2169 	case PR_SET_MM_ARG_END:
2170 		prctl_map.arg_end = addr;
2171 		break;
2172 	case PR_SET_MM_ENV_START:
2173 		prctl_map.env_start = addr;
2174 		break;
2175 	case PR_SET_MM_ENV_END:
2176 		prctl_map.env_end = addr;
2177 		break;
2178 	default:
2179 		goto out;
2180 	}
2181 
2182 	error = validate_prctl_map_addr(&prctl_map);
2183 	if (error)
2184 		goto out;
2185 
2186 	switch (opt) {
2187 	/*
2188 	 * If command line arguments and environment
2189 	 * are placed somewhere else on stack, we can
2190 	 * set them up here, ARG_START/END to setup
2191 	 * command line argumets and ENV_START/END
2192 	 * for environment.
2193 	 */
2194 	case PR_SET_MM_START_STACK:
2195 	case PR_SET_MM_ARG_START:
2196 	case PR_SET_MM_ARG_END:
2197 	case PR_SET_MM_ENV_START:
2198 	case PR_SET_MM_ENV_END:
2199 		if (!vma) {
2200 			error = -EFAULT;
2201 			goto out;
2202 		}
2203 	}
2204 
2205 	mm->start_code	= prctl_map.start_code;
2206 	mm->end_code	= prctl_map.end_code;
2207 	mm->start_data	= prctl_map.start_data;
2208 	mm->end_data	= prctl_map.end_data;
2209 	mm->start_brk	= prctl_map.start_brk;
2210 	mm->brk		= prctl_map.brk;
2211 	mm->start_stack	= prctl_map.start_stack;
2212 	mm->arg_start	= prctl_map.arg_start;
2213 	mm->arg_end	= prctl_map.arg_end;
2214 	mm->env_start	= prctl_map.env_start;
2215 	mm->env_end	= prctl_map.env_end;
2216 
2217 	error = 0;
2218 out:
2219 	spin_unlock(&mm->arg_lock);
2220 	up_read(&mm->mmap_sem);
2221 	return error;
2222 }
2223 
2224 #ifdef CONFIG_CHECKPOINT_RESTORE
2225 static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr)
2226 {
2227 	return put_user(me->clear_child_tid, tid_addr);
2228 }
2229 #else
2230 static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr)
2231 {
2232 	return -EINVAL;
2233 }
2234 #endif
2235 
2236 static int propagate_has_child_subreaper(struct task_struct *p, void *data)
2237 {
2238 	/*
2239 	 * If task has has_child_subreaper - all its decendants
2240 	 * already have these flag too and new decendants will
2241 	 * inherit it on fork, skip them.
2242 	 *
2243 	 * If we've found child_reaper - skip descendants in
2244 	 * it's subtree as they will never get out pidns.
2245 	 */
2246 	if (p->signal->has_child_subreaper ||
2247 	    is_child_reaper(task_pid(p)))
2248 		return 0;
2249 
2250 	p->signal->has_child_subreaper = 1;
2251 	return 1;
2252 }
2253 
2254 int __weak arch_prctl_spec_ctrl_get(struct task_struct *t, unsigned long which)
2255 {
2256 	return -EINVAL;
2257 }
2258 
2259 int __weak arch_prctl_spec_ctrl_set(struct task_struct *t, unsigned long which,
2260 				    unsigned long ctrl)
2261 {
2262 	return -EINVAL;
2263 }
2264 
2265 #define PR_IO_FLUSHER (PF_MEMALLOC_NOIO | PF_LESS_THROTTLE)
2266 
2267 SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
2268 		unsigned long, arg4, unsigned long, arg5)
2269 {
2270 	struct task_struct *me = current;
2271 	unsigned char comm[sizeof(me->comm)];
2272 	long error;
2273 
2274 	error = security_task_prctl(option, arg2, arg3, arg4, arg5);
2275 	if (error != -ENOSYS)
2276 		return error;
2277 
2278 	error = 0;
2279 	switch (option) {
2280 	case PR_SET_PDEATHSIG:
2281 		if (!valid_signal(arg2)) {
2282 			error = -EINVAL;
2283 			break;
2284 		}
2285 		me->pdeath_signal = arg2;
2286 		break;
2287 	case PR_GET_PDEATHSIG:
2288 		error = put_user(me->pdeath_signal, (int __user *)arg2);
2289 		break;
2290 	case PR_GET_DUMPABLE:
2291 		error = get_dumpable(me->mm);
2292 		break;
2293 	case PR_SET_DUMPABLE:
2294 		if (arg2 != SUID_DUMP_DISABLE && arg2 != SUID_DUMP_USER) {
2295 			error = -EINVAL;
2296 			break;
2297 		}
2298 		set_dumpable(me->mm, arg2);
2299 		break;
2300 
2301 	case PR_SET_UNALIGN:
2302 		error = SET_UNALIGN_CTL(me, arg2);
2303 		break;
2304 	case PR_GET_UNALIGN:
2305 		error = GET_UNALIGN_CTL(me, arg2);
2306 		break;
2307 	case PR_SET_FPEMU:
2308 		error = SET_FPEMU_CTL(me, arg2);
2309 		break;
2310 	case PR_GET_FPEMU:
2311 		error = GET_FPEMU_CTL(me, arg2);
2312 		break;
2313 	case PR_SET_FPEXC:
2314 		error = SET_FPEXC_CTL(me, arg2);
2315 		break;
2316 	case PR_GET_FPEXC:
2317 		error = GET_FPEXC_CTL(me, arg2);
2318 		break;
2319 	case PR_GET_TIMING:
2320 		error = PR_TIMING_STATISTICAL;
2321 		break;
2322 	case PR_SET_TIMING:
2323 		if (arg2 != PR_TIMING_STATISTICAL)
2324 			error = -EINVAL;
2325 		break;
2326 	case PR_SET_NAME:
2327 		comm[sizeof(me->comm) - 1] = 0;
2328 		if (strncpy_from_user(comm, (char __user *)arg2,
2329 				      sizeof(me->comm) - 1) < 0)
2330 			return -EFAULT;
2331 		set_task_comm(me, comm);
2332 		proc_comm_connector(me);
2333 		break;
2334 	case PR_GET_NAME:
2335 		get_task_comm(comm, me);
2336 		if (copy_to_user((char __user *)arg2, comm, sizeof(comm)))
2337 			return -EFAULT;
2338 		break;
2339 	case PR_GET_ENDIAN:
2340 		error = GET_ENDIAN(me, arg2);
2341 		break;
2342 	case PR_SET_ENDIAN:
2343 		error = SET_ENDIAN(me, arg2);
2344 		break;
2345 	case PR_GET_SECCOMP:
2346 		error = prctl_get_seccomp();
2347 		break;
2348 	case PR_SET_SECCOMP:
2349 		error = prctl_set_seccomp(arg2, (char __user *)arg3);
2350 		break;
2351 	case PR_GET_TSC:
2352 		error = GET_TSC_CTL(arg2);
2353 		break;
2354 	case PR_SET_TSC:
2355 		error = SET_TSC_CTL(arg2);
2356 		break;
2357 	case PR_TASK_PERF_EVENTS_DISABLE:
2358 		error = perf_event_task_disable();
2359 		break;
2360 	case PR_TASK_PERF_EVENTS_ENABLE:
2361 		error = perf_event_task_enable();
2362 		break;
2363 	case PR_GET_TIMERSLACK:
2364 		if (current->timer_slack_ns > ULONG_MAX)
2365 			error = ULONG_MAX;
2366 		else
2367 			error = current->timer_slack_ns;
2368 		break;
2369 	case PR_SET_TIMERSLACK:
2370 		if (arg2 <= 0)
2371 			current->timer_slack_ns =
2372 					current->default_timer_slack_ns;
2373 		else
2374 			current->timer_slack_ns = arg2;
2375 		break;
2376 	case PR_MCE_KILL:
2377 		if (arg4 | arg5)
2378 			return -EINVAL;
2379 		switch (arg2) {
2380 		case PR_MCE_KILL_CLEAR:
2381 			if (arg3 != 0)
2382 				return -EINVAL;
2383 			current->flags &= ~PF_MCE_PROCESS;
2384 			break;
2385 		case PR_MCE_KILL_SET:
2386 			current->flags |= PF_MCE_PROCESS;
2387 			if (arg3 == PR_MCE_KILL_EARLY)
2388 				current->flags |= PF_MCE_EARLY;
2389 			else if (arg3 == PR_MCE_KILL_LATE)
2390 				current->flags &= ~PF_MCE_EARLY;
2391 			else if (arg3 == PR_MCE_KILL_DEFAULT)
2392 				current->flags &=
2393 						~(PF_MCE_EARLY|PF_MCE_PROCESS);
2394 			else
2395 				return -EINVAL;
2396 			break;
2397 		default:
2398 			return -EINVAL;
2399 		}
2400 		break;
2401 	case PR_MCE_KILL_GET:
2402 		if (arg2 | arg3 | arg4 | arg5)
2403 			return -EINVAL;
2404 		if (current->flags & PF_MCE_PROCESS)
2405 			error = (current->flags & PF_MCE_EARLY) ?
2406 				PR_MCE_KILL_EARLY : PR_MCE_KILL_LATE;
2407 		else
2408 			error = PR_MCE_KILL_DEFAULT;
2409 		break;
2410 	case PR_SET_MM:
2411 		error = prctl_set_mm(arg2, arg3, arg4, arg5);
2412 		break;
2413 	case PR_GET_TID_ADDRESS:
2414 		error = prctl_get_tid_address(me, (int __user **)arg2);
2415 		break;
2416 	case PR_SET_CHILD_SUBREAPER:
2417 		me->signal->is_child_subreaper = !!arg2;
2418 		if (!arg2)
2419 			break;
2420 
2421 		walk_process_tree(me, propagate_has_child_subreaper, NULL);
2422 		break;
2423 	case PR_GET_CHILD_SUBREAPER:
2424 		error = put_user(me->signal->is_child_subreaper,
2425 				 (int __user *)arg2);
2426 		break;
2427 	case PR_SET_NO_NEW_PRIVS:
2428 		if (arg2 != 1 || arg3 || arg4 || arg5)
2429 			return -EINVAL;
2430 
2431 		task_set_no_new_privs(current);
2432 		break;
2433 	case PR_GET_NO_NEW_PRIVS:
2434 		if (arg2 || arg3 || arg4 || arg5)
2435 			return -EINVAL;
2436 		return task_no_new_privs(current) ? 1 : 0;
2437 	case PR_GET_THP_DISABLE:
2438 		if (arg2 || arg3 || arg4 || arg5)
2439 			return -EINVAL;
2440 		error = !!test_bit(MMF_DISABLE_THP, &me->mm->flags);
2441 		break;
2442 	case PR_SET_THP_DISABLE:
2443 		if (arg3 || arg4 || arg5)
2444 			return -EINVAL;
2445 		if (down_write_killable(&me->mm->mmap_sem))
2446 			return -EINTR;
2447 		if (arg2)
2448 			set_bit(MMF_DISABLE_THP, &me->mm->flags);
2449 		else
2450 			clear_bit(MMF_DISABLE_THP, &me->mm->flags);
2451 		up_write(&me->mm->mmap_sem);
2452 		break;
2453 	case PR_MPX_ENABLE_MANAGEMENT:
2454 	case PR_MPX_DISABLE_MANAGEMENT:
2455 		/* No longer implemented: */
2456 		return -EINVAL;
2457 	case PR_SET_FP_MODE:
2458 		error = SET_FP_MODE(me, arg2);
2459 		break;
2460 	case PR_GET_FP_MODE:
2461 		error = GET_FP_MODE(me);
2462 		break;
2463 	case PR_SVE_SET_VL:
2464 		error = SVE_SET_VL(arg2);
2465 		break;
2466 	case PR_SVE_GET_VL:
2467 		error = SVE_GET_VL();
2468 		break;
2469 	case PR_GET_SPECULATION_CTRL:
2470 		if (arg3 || arg4 || arg5)
2471 			return -EINVAL;
2472 		error = arch_prctl_spec_ctrl_get(me, arg2);
2473 		break;
2474 	case PR_SET_SPECULATION_CTRL:
2475 		if (arg4 || arg5)
2476 			return -EINVAL;
2477 		error = arch_prctl_spec_ctrl_set(me, arg2, arg3);
2478 		break;
2479 	case PR_PAC_RESET_KEYS:
2480 		if (arg3 || arg4 || arg5)
2481 			return -EINVAL;
2482 		error = PAC_RESET_KEYS(me, arg2);
2483 		break;
2484 	case PR_SET_TAGGED_ADDR_CTRL:
2485 		if (arg3 || arg4 || arg5)
2486 			return -EINVAL;
2487 		error = SET_TAGGED_ADDR_CTRL(arg2);
2488 		break;
2489 	case PR_GET_TAGGED_ADDR_CTRL:
2490 		if (arg2 || arg3 || arg4 || arg5)
2491 			return -EINVAL;
2492 		error = GET_TAGGED_ADDR_CTRL();
2493 		break;
2494 	case PR_SET_IO_FLUSHER:
2495 		if (!capable(CAP_SYS_RESOURCE))
2496 			return -EPERM;
2497 
2498 		if (arg3 || arg4 || arg5)
2499 			return -EINVAL;
2500 
2501 		if (arg2 == 1)
2502 			current->flags |= PR_IO_FLUSHER;
2503 		else if (!arg2)
2504 			current->flags &= ~PR_IO_FLUSHER;
2505 		else
2506 			return -EINVAL;
2507 		break;
2508 	case PR_GET_IO_FLUSHER:
2509 		if (!capable(CAP_SYS_RESOURCE))
2510 			return -EPERM;
2511 
2512 		if (arg2 || arg3 || arg4 || arg5)
2513 			return -EINVAL;
2514 
2515 		error = (current->flags & PR_IO_FLUSHER) == PR_IO_FLUSHER;
2516 		break;
2517 	default:
2518 		error = -EINVAL;
2519 		break;
2520 	}
2521 	return error;
2522 }
2523 
2524 SYSCALL_DEFINE3(getcpu, unsigned __user *, cpup, unsigned __user *, nodep,
2525 		struct getcpu_cache __user *, unused)
2526 {
2527 	int err = 0;
2528 	int cpu = raw_smp_processor_id();
2529 
2530 	if (cpup)
2531 		err |= put_user(cpu, cpup);
2532 	if (nodep)
2533 		err |= put_user(cpu_to_node(cpu), nodep);
2534 	return err ? -EFAULT : 0;
2535 }
2536 
2537 /**
2538  * do_sysinfo - fill in sysinfo struct
2539  * @info: pointer to buffer to fill
2540  */
2541 static int do_sysinfo(struct sysinfo *info)
2542 {
2543 	unsigned long mem_total, sav_total;
2544 	unsigned int mem_unit, bitcount;
2545 	struct timespec64 tp;
2546 
2547 	memset(info, 0, sizeof(struct sysinfo));
2548 
2549 	ktime_get_boottime_ts64(&tp);
2550 	timens_add_boottime(&tp);
2551 	info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
2552 
2553 	get_avenrun(info->loads, 0, SI_LOAD_SHIFT - FSHIFT);
2554 
2555 	info->procs = nr_threads;
2556 
2557 	si_meminfo(info);
2558 	si_swapinfo(info);
2559 
2560 	/*
2561 	 * If the sum of all the available memory (i.e. ram + swap)
2562 	 * is less than can be stored in a 32 bit unsigned long then
2563 	 * we can be binary compatible with 2.2.x kernels.  If not,
2564 	 * well, in that case 2.2.x was broken anyways...
2565 	 *
2566 	 *  -Erik Andersen <andersee@debian.org>
2567 	 */
2568 
2569 	mem_total = info->totalram + info->totalswap;
2570 	if (mem_total < info->totalram || mem_total < info->totalswap)
2571 		goto out;
2572 	bitcount = 0;
2573 	mem_unit = info->mem_unit;
2574 	while (mem_unit > 1) {
2575 		bitcount++;
2576 		mem_unit >>= 1;
2577 		sav_total = mem_total;
2578 		mem_total <<= 1;
2579 		if (mem_total < sav_total)
2580 			goto out;
2581 	}
2582 
2583 	/*
2584 	 * If mem_total did not overflow, multiply all memory values by
2585 	 * info->mem_unit and set it to 1.  This leaves things compatible
2586 	 * with 2.2.x, and also retains compatibility with earlier 2.4.x
2587 	 * kernels...
2588 	 */
2589 
2590 	info->mem_unit = 1;
2591 	info->totalram <<= bitcount;
2592 	info->freeram <<= bitcount;
2593 	info->sharedram <<= bitcount;
2594 	info->bufferram <<= bitcount;
2595 	info->totalswap <<= bitcount;
2596 	info->freeswap <<= bitcount;
2597 	info->totalhigh <<= bitcount;
2598 	info->freehigh <<= bitcount;
2599 
2600 out:
2601 	return 0;
2602 }
2603 
2604 SYSCALL_DEFINE1(sysinfo, struct sysinfo __user *, info)
2605 {
2606 	struct sysinfo val;
2607 
2608 	do_sysinfo(&val);
2609 
2610 	if (copy_to_user(info, &val, sizeof(struct sysinfo)))
2611 		return -EFAULT;
2612 
2613 	return 0;
2614 }
2615 
2616 #ifdef CONFIG_COMPAT
2617 struct compat_sysinfo {
2618 	s32 uptime;
2619 	u32 loads[3];
2620 	u32 totalram;
2621 	u32 freeram;
2622 	u32 sharedram;
2623 	u32 bufferram;
2624 	u32 totalswap;
2625 	u32 freeswap;
2626 	u16 procs;
2627 	u16 pad;
2628 	u32 totalhigh;
2629 	u32 freehigh;
2630 	u32 mem_unit;
2631 	char _f[20-2*sizeof(u32)-sizeof(int)];
2632 };
2633 
2634 COMPAT_SYSCALL_DEFINE1(sysinfo, struct compat_sysinfo __user *, info)
2635 {
2636 	struct sysinfo s;
2637 
2638 	do_sysinfo(&s);
2639 
2640 	/* Check to see if any memory value is too large for 32-bit and scale
2641 	 *  down if needed
2642 	 */
2643 	if (upper_32_bits(s.totalram) || upper_32_bits(s.totalswap)) {
2644 		int bitcount = 0;
2645 
2646 		while (s.mem_unit < PAGE_SIZE) {
2647 			s.mem_unit <<= 1;
2648 			bitcount++;
2649 		}
2650 
2651 		s.totalram >>= bitcount;
2652 		s.freeram >>= bitcount;
2653 		s.sharedram >>= bitcount;
2654 		s.bufferram >>= bitcount;
2655 		s.totalswap >>= bitcount;
2656 		s.freeswap >>= bitcount;
2657 		s.totalhigh >>= bitcount;
2658 		s.freehigh >>= bitcount;
2659 	}
2660 
2661 	if (!access_ok(info, sizeof(struct compat_sysinfo)) ||
2662 	    __put_user(s.uptime, &info->uptime) ||
2663 	    __put_user(s.loads[0], &info->loads[0]) ||
2664 	    __put_user(s.loads[1], &info->loads[1]) ||
2665 	    __put_user(s.loads[2], &info->loads[2]) ||
2666 	    __put_user(s.totalram, &info->totalram) ||
2667 	    __put_user(s.freeram, &info->freeram) ||
2668 	    __put_user(s.sharedram, &info->sharedram) ||
2669 	    __put_user(s.bufferram, &info->bufferram) ||
2670 	    __put_user(s.totalswap, &info->totalswap) ||
2671 	    __put_user(s.freeswap, &info->freeswap) ||
2672 	    __put_user(s.procs, &info->procs) ||
2673 	    __put_user(s.totalhigh, &info->totalhigh) ||
2674 	    __put_user(s.freehigh, &info->freehigh) ||
2675 	    __put_user(s.mem_unit, &info->mem_unit))
2676 		return -EFAULT;
2677 
2678 	return 0;
2679 }
2680 #endif /* CONFIG_COMPAT */
2681