xref: /openbmc/linux/kernel/sys.c (revision 32daa5d7899e03433429bedf9e20d7963179703a)
1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   *  linux/kernel/sys.c
4   *
5   *  Copyright (C) 1991, 1992  Linus Torvalds
6   */
7  
8  #include <linux/export.h>
9  #include <linux/mm.h>
10  #include <linux/utsname.h>
11  #include <linux/mman.h>
12  #include <linux/reboot.h>
13  #include <linux/prctl.h>
14  #include <linux/highuid.h>
15  #include <linux/fs.h>
16  #include <linux/kmod.h>
17  #include <linux/perf_event.h>
18  #include <linux/resource.h>
19  #include <linux/kernel.h>
20  #include <linux/workqueue.h>
21  #include <linux/capability.h>
22  #include <linux/device.h>
23  #include <linux/key.h>
24  #include <linux/times.h>
25  #include <linux/posix-timers.h>
26  #include <linux/security.h>
27  #include <linux/suspend.h>
28  #include <linux/tty.h>
29  #include <linux/signal.h>
30  #include <linux/cn_proc.h>
31  #include <linux/getcpu.h>
32  #include <linux/task_io_accounting_ops.h>
33  #include <linux/seccomp.h>
34  #include <linux/cpu.h>
35  #include <linux/personality.h>
36  #include <linux/ptrace.h>
37  #include <linux/fs_struct.h>
38  #include <linux/file.h>
39  #include <linux/mount.h>
40  #include <linux/gfp.h>
41  #include <linux/syscore_ops.h>
42  #include <linux/version.h>
43  #include <linux/ctype.h>
44  #include <linux/syscall_user_dispatch.h>
45  
46  #include <linux/compat.h>
47  #include <linux/syscalls.h>
48  #include <linux/kprobes.h>
49  #include <linux/user_namespace.h>
50  #include <linux/time_namespace.h>
51  #include <linux/binfmts.h>
52  
53  #include <linux/sched.h>
54  #include <linux/sched/autogroup.h>
55  #include <linux/sched/loadavg.h>
56  #include <linux/sched/stat.h>
57  #include <linux/sched/mm.h>
58  #include <linux/sched/coredump.h>
59  #include <linux/sched/task.h>
60  #include <linux/sched/cputime.h>
61  #include <linux/rcupdate.h>
62  #include <linux/uidgid.h>
63  #include <linux/cred.h>
64  
65  #include <linux/nospec.h>
66  
67  #include <linux/kmsg_dump.h>
68  /* Move somewhere else to avoid recompiling? */
69  #include <generated/utsrelease.h>
70  
71  #include <linux/uaccess.h>
72  #include <asm/io.h>
73  #include <asm/unistd.h>
74  
75  #include "uid16.h"
76  
77  #ifndef SET_UNALIGN_CTL
78  # define SET_UNALIGN_CTL(a, b)	(-EINVAL)
79  #endif
80  #ifndef GET_UNALIGN_CTL
81  # define GET_UNALIGN_CTL(a, b)	(-EINVAL)
82  #endif
83  #ifndef SET_FPEMU_CTL
84  # define SET_FPEMU_CTL(a, b)	(-EINVAL)
85  #endif
86  #ifndef GET_FPEMU_CTL
87  # define GET_FPEMU_CTL(a, b)	(-EINVAL)
88  #endif
89  #ifndef SET_FPEXC_CTL
90  # define SET_FPEXC_CTL(a, b)	(-EINVAL)
91  #endif
92  #ifndef GET_FPEXC_CTL
93  # define GET_FPEXC_CTL(a, b)	(-EINVAL)
94  #endif
95  #ifndef GET_ENDIAN
96  # define GET_ENDIAN(a, b)	(-EINVAL)
97  #endif
98  #ifndef SET_ENDIAN
99  # define SET_ENDIAN(a, b)	(-EINVAL)
100  #endif
101  #ifndef GET_TSC_CTL
102  # define GET_TSC_CTL(a)		(-EINVAL)
103  #endif
104  #ifndef SET_TSC_CTL
105  # define SET_TSC_CTL(a)		(-EINVAL)
106  #endif
107  #ifndef GET_FP_MODE
108  # define GET_FP_MODE(a)		(-EINVAL)
109  #endif
110  #ifndef SET_FP_MODE
111  # define SET_FP_MODE(a,b)	(-EINVAL)
112  #endif
113  #ifndef SVE_SET_VL
114  # define SVE_SET_VL(a)		(-EINVAL)
115  #endif
116  #ifndef SVE_GET_VL
117  # define SVE_GET_VL()		(-EINVAL)
118  #endif
119  #ifndef PAC_RESET_KEYS
120  # define PAC_RESET_KEYS(a, b)	(-EINVAL)
121  #endif
122  #ifndef SET_TAGGED_ADDR_CTRL
123  # define SET_TAGGED_ADDR_CTRL(a)	(-EINVAL)
124  #endif
125  #ifndef GET_TAGGED_ADDR_CTRL
126  # define GET_TAGGED_ADDR_CTRL()		(-EINVAL)
127  #endif
128  
129  /*
130   * this is where the system-wide overflow UID and GID are defined, for
131   * architectures that now have 32-bit UID/GID but didn't in the past
132   */
133  
134  int overflowuid = DEFAULT_OVERFLOWUID;
135  int overflowgid = DEFAULT_OVERFLOWGID;
136  
137  EXPORT_SYMBOL(overflowuid);
138  EXPORT_SYMBOL(overflowgid);
139  
140  /*
141   * the same as above, but for filesystems which can only store a 16-bit
142   * UID and GID. as such, this is needed on all architectures
143   */
144  
145  int fs_overflowuid = DEFAULT_FS_OVERFLOWUID;
146  int fs_overflowgid = DEFAULT_FS_OVERFLOWGID;
147  
148  EXPORT_SYMBOL(fs_overflowuid);
149  EXPORT_SYMBOL(fs_overflowgid);
150  
151  /*
152   * Returns true if current's euid is same as p's uid or euid,
153   * or has CAP_SYS_NICE to p's user_ns.
154   *
155   * Called with rcu_read_lock, creds are safe
156   */
157  static bool set_one_prio_perm(struct task_struct *p)
158  {
159  	const struct cred *cred = current_cred(), *pcred = __task_cred(p);
160  
161  	if (uid_eq(pcred->uid,  cred->euid) ||
162  	    uid_eq(pcred->euid, cred->euid))
163  		return true;
164  	if (ns_capable(pcred->user_ns, CAP_SYS_NICE))
165  		return true;
166  	return false;
167  }
168  
169  /*
170   * set the priority of a task
171   * - the caller must hold the RCU read lock
172   */
173  static int set_one_prio(struct task_struct *p, int niceval, int error)
174  {
175  	int no_nice;
176  
177  	if (!set_one_prio_perm(p)) {
178  		error = -EPERM;
179  		goto out;
180  	}
181  	if (niceval < task_nice(p) && !can_nice(p, niceval)) {
182  		error = -EACCES;
183  		goto out;
184  	}
185  	no_nice = security_task_setnice(p, niceval);
186  	if (no_nice) {
187  		error = no_nice;
188  		goto out;
189  	}
190  	if (error == -ESRCH)
191  		error = 0;
192  	set_user_nice(p, niceval);
193  out:
194  	return error;
195  }
196  
197  SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval)
198  {
199  	struct task_struct *g, *p;
200  	struct user_struct *user;
201  	const struct cred *cred = current_cred();
202  	int error = -EINVAL;
203  	struct pid *pgrp;
204  	kuid_t uid;
205  
206  	if (which > PRIO_USER || which < PRIO_PROCESS)
207  		goto out;
208  
209  	/* normalize: avoid signed division (rounding problems) */
210  	error = -ESRCH;
211  	if (niceval < MIN_NICE)
212  		niceval = MIN_NICE;
213  	if (niceval > MAX_NICE)
214  		niceval = MAX_NICE;
215  
216  	rcu_read_lock();
217  	read_lock(&tasklist_lock);
218  	switch (which) {
219  	case PRIO_PROCESS:
220  		if (who)
221  			p = find_task_by_vpid(who);
222  		else
223  			p = current;
224  		if (p)
225  			error = set_one_prio(p, niceval, error);
226  		break;
227  	case PRIO_PGRP:
228  		if (who)
229  			pgrp = find_vpid(who);
230  		else
231  			pgrp = task_pgrp(current);
232  		do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
233  			error = set_one_prio(p, niceval, error);
234  		} while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
235  		break;
236  	case PRIO_USER:
237  		uid = make_kuid(cred->user_ns, who);
238  		user = cred->user;
239  		if (!who)
240  			uid = cred->uid;
241  		else if (!uid_eq(uid, cred->uid)) {
242  			user = find_user(uid);
243  			if (!user)
244  				goto out_unlock;	/* No processes for this user */
245  		}
246  		do_each_thread(g, p) {
247  			if (uid_eq(task_uid(p), uid) && task_pid_vnr(p))
248  				error = set_one_prio(p, niceval, error);
249  		} while_each_thread(g, p);
250  		if (!uid_eq(uid, cred->uid))
251  			free_uid(user);		/* For find_user() */
252  		break;
253  	}
254  out_unlock:
255  	read_unlock(&tasklist_lock);
256  	rcu_read_unlock();
257  out:
258  	return error;
259  }
260  
261  /*
262   * Ugh. To avoid negative return values, "getpriority()" will
263   * not return the normal nice-value, but a negated value that
264   * has been offset by 20 (ie it returns 40..1 instead of -20..19)
265   * to stay compatible.
266   */
267  SYSCALL_DEFINE2(getpriority, int, which, int, who)
268  {
269  	struct task_struct *g, *p;
270  	struct user_struct *user;
271  	const struct cred *cred = current_cred();
272  	long niceval, retval = -ESRCH;
273  	struct pid *pgrp;
274  	kuid_t uid;
275  
276  	if (which > PRIO_USER || which < PRIO_PROCESS)
277  		return -EINVAL;
278  
279  	rcu_read_lock();
280  	read_lock(&tasklist_lock);
281  	switch (which) {
282  	case PRIO_PROCESS:
283  		if (who)
284  			p = find_task_by_vpid(who);
285  		else
286  			p = current;
287  		if (p) {
288  			niceval = nice_to_rlimit(task_nice(p));
289  			if (niceval > retval)
290  				retval = niceval;
291  		}
292  		break;
293  	case PRIO_PGRP:
294  		if (who)
295  			pgrp = find_vpid(who);
296  		else
297  			pgrp = task_pgrp(current);
298  		do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
299  			niceval = nice_to_rlimit(task_nice(p));
300  			if (niceval > retval)
301  				retval = niceval;
302  		} while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
303  		break;
304  	case PRIO_USER:
305  		uid = make_kuid(cred->user_ns, who);
306  		user = cred->user;
307  		if (!who)
308  			uid = cred->uid;
309  		else if (!uid_eq(uid, cred->uid)) {
310  			user = find_user(uid);
311  			if (!user)
312  				goto out_unlock;	/* No processes for this user */
313  		}
314  		do_each_thread(g, p) {
315  			if (uid_eq(task_uid(p), uid) && task_pid_vnr(p)) {
316  				niceval = nice_to_rlimit(task_nice(p));
317  				if (niceval > retval)
318  					retval = niceval;
319  			}
320  		} while_each_thread(g, p);
321  		if (!uid_eq(uid, cred->uid))
322  			free_uid(user);		/* for find_user() */
323  		break;
324  	}
325  out_unlock:
326  	read_unlock(&tasklist_lock);
327  	rcu_read_unlock();
328  
329  	return retval;
330  }
331  
332  /*
333   * Unprivileged users may change the real gid to the effective gid
334   * or vice versa.  (BSD-style)
335   *
336   * If you set the real gid at all, or set the effective gid to a value not
337   * equal to the real gid, then the saved gid is set to the new effective gid.
338   *
339   * This makes it possible for a setgid program to completely drop its
340   * privileges, which is often a useful assertion to make when you are doing
341   * a security audit over a program.
342   *
343   * The general idea is that a program which uses just setregid() will be
344   * 100% compatible with BSD.  A program which uses just setgid() will be
345   * 100% compatible with POSIX with saved IDs.
346   *
347   * SMP: There are not races, the GIDs are checked only by filesystem
348   *      operations (as far as semantic preservation is concerned).
349   */
350  #ifdef CONFIG_MULTIUSER
351  long __sys_setregid(gid_t rgid, gid_t egid)
352  {
353  	struct user_namespace *ns = current_user_ns();
354  	const struct cred *old;
355  	struct cred *new;
356  	int retval;
357  	kgid_t krgid, kegid;
358  
359  	krgid = make_kgid(ns, rgid);
360  	kegid = make_kgid(ns, egid);
361  
362  	if ((rgid != (gid_t) -1) && !gid_valid(krgid))
363  		return -EINVAL;
364  	if ((egid != (gid_t) -1) && !gid_valid(kegid))
365  		return -EINVAL;
366  
367  	new = prepare_creds();
368  	if (!new)
369  		return -ENOMEM;
370  	old = current_cred();
371  
372  	retval = -EPERM;
373  	if (rgid != (gid_t) -1) {
374  		if (gid_eq(old->gid, krgid) ||
375  		    gid_eq(old->egid, krgid) ||
376  		    ns_capable_setid(old->user_ns, CAP_SETGID))
377  			new->gid = krgid;
378  		else
379  			goto error;
380  	}
381  	if (egid != (gid_t) -1) {
382  		if (gid_eq(old->gid, kegid) ||
383  		    gid_eq(old->egid, kegid) ||
384  		    gid_eq(old->sgid, kegid) ||
385  		    ns_capable_setid(old->user_ns, CAP_SETGID))
386  			new->egid = kegid;
387  		else
388  			goto error;
389  	}
390  
391  	if (rgid != (gid_t) -1 ||
392  	    (egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
393  		new->sgid = new->egid;
394  	new->fsgid = new->egid;
395  
396  	retval = security_task_fix_setgid(new, old, LSM_SETID_RE);
397  	if (retval < 0)
398  		goto error;
399  
400  	return commit_creds(new);
401  
402  error:
403  	abort_creds(new);
404  	return retval;
405  }
406  
407  SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
408  {
409  	return __sys_setregid(rgid, egid);
410  }
411  
412  /*
413   * setgid() is implemented like SysV w/ SAVED_IDS
414   *
415   * SMP: Same implicit races as above.
416   */
417  long __sys_setgid(gid_t gid)
418  {
419  	struct user_namespace *ns = current_user_ns();
420  	const struct cred *old;
421  	struct cred *new;
422  	int retval;
423  	kgid_t kgid;
424  
425  	kgid = make_kgid(ns, gid);
426  	if (!gid_valid(kgid))
427  		return -EINVAL;
428  
429  	new = prepare_creds();
430  	if (!new)
431  		return -ENOMEM;
432  	old = current_cred();
433  
434  	retval = -EPERM;
435  	if (ns_capable_setid(old->user_ns, CAP_SETGID))
436  		new->gid = new->egid = new->sgid = new->fsgid = kgid;
437  	else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
438  		new->egid = new->fsgid = kgid;
439  	else
440  		goto error;
441  
442  	retval = security_task_fix_setgid(new, old, LSM_SETID_ID);
443  	if (retval < 0)
444  		goto error;
445  
446  	return commit_creds(new);
447  
448  error:
449  	abort_creds(new);
450  	return retval;
451  }
452  
453  SYSCALL_DEFINE1(setgid, gid_t, gid)
454  {
455  	return __sys_setgid(gid);
456  }
457  
458  /*
459   * change the user struct in a credentials set to match the new UID
460   */
461  static int set_user(struct cred *new)
462  {
463  	struct user_struct *new_user;
464  
465  	new_user = alloc_uid(new->uid);
466  	if (!new_user)
467  		return -EAGAIN;
468  
469  	/*
470  	 * We don't fail in case of NPROC limit excess here because too many
471  	 * poorly written programs don't check set*uid() return code, assuming
472  	 * it never fails if called by root.  We may still enforce NPROC limit
473  	 * for programs doing set*uid()+execve() by harmlessly deferring the
474  	 * failure to the execve() stage.
475  	 */
476  	if (atomic_read(&new_user->processes) >= rlimit(RLIMIT_NPROC) &&
477  			new_user != INIT_USER)
478  		current->flags |= PF_NPROC_EXCEEDED;
479  	else
480  		current->flags &= ~PF_NPROC_EXCEEDED;
481  
482  	free_uid(new->user);
483  	new->user = new_user;
484  	return 0;
485  }
486  
487  /*
488   * Unprivileged users may change the real uid to the effective uid
489   * or vice versa.  (BSD-style)
490   *
491   * If you set the real uid at all, or set the effective uid to a value not
492   * equal to the real uid, then the saved uid is set to the new effective uid.
493   *
494   * This makes it possible for a setuid program to completely drop its
495   * privileges, which is often a useful assertion to make when you are doing
496   * a security audit over a program.
497   *
498   * The general idea is that a program which uses just setreuid() will be
499   * 100% compatible with BSD.  A program which uses just setuid() will be
500   * 100% compatible with POSIX with saved IDs.
501   */
502  long __sys_setreuid(uid_t ruid, uid_t euid)
503  {
504  	struct user_namespace *ns = current_user_ns();
505  	const struct cred *old;
506  	struct cred *new;
507  	int retval;
508  	kuid_t kruid, keuid;
509  
510  	kruid = make_kuid(ns, ruid);
511  	keuid = make_kuid(ns, euid);
512  
513  	if ((ruid != (uid_t) -1) && !uid_valid(kruid))
514  		return -EINVAL;
515  	if ((euid != (uid_t) -1) && !uid_valid(keuid))
516  		return -EINVAL;
517  
518  	new = prepare_creds();
519  	if (!new)
520  		return -ENOMEM;
521  	old = current_cred();
522  
523  	retval = -EPERM;
524  	if (ruid != (uid_t) -1) {
525  		new->uid = kruid;
526  		if (!uid_eq(old->uid, kruid) &&
527  		    !uid_eq(old->euid, kruid) &&
528  		    !ns_capable_setid(old->user_ns, CAP_SETUID))
529  			goto error;
530  	}
531  
532  	if (euid != (uid_t) -1) {
533  		new->euid = keuid;
534  		if (!uid_eq(old->uid, keuid) &&
535  		    !uid_eq(old->euid, keuid) &&
536  		    !uid_eq(old->suid, keuid) &&
537  		    !ns_capable_setid(old->user_ns, CAP_SETUID))
538  			goto error;
539  	}
540  
541  	if (!uid_eq(new->uid, old->uid)) {
542  		retval = set_user(new);
543  		if (retval < 0)
544  			goto error;
545  	}
546  	if (ruid != (uid_t) -1 ||
547  	    (euid != (uid_t) -1 && !uid_eq(keuid, old->uid)))
548  		new->suid = new->euid;
549  	new->fsuid = new->euid;
550  
551  	retval = security_task_fix_setuid(new, old, LSM_SETID_RE);
552  	if (retval < 0)
553  		goto error;
554  
555  	return commit_creds(new);
556  
557  error:
558  	abort_creds(new);
559  	return retval;
560  }
561  
562  SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
563  {
564  	return __sys_setreuid(ruid, euid);
565  }
566  
567  /*
568   * setuid() is implemented like SysV with SAVED_IDS
569   *
570   * Note that SAVED_ID's is deficient in that a setuid root program
571   * like sendmail, for example, cannot set its uid to be a normal
572   * user and then switch back, because if you're root, setuid() sets
573   * the saved uid too.  If you don't like this, blame the bright people
574   * in the POSIX committee and/or USG.  Note that the BSD-style setreuid()
575   * will allow a root program to temporarily drop privileges and be able to
576   * regain them by swapping the real and effective uid.
577   */
578  long __sys_setuid(uid_t uid)
579  {
580  	struct user_namespace *ns = current_user_ns();
581  	const struct cred *old;
582  	struct cred *new;
583  	int retval;
584  	kuid_t kuid;
585  
586  	kuid = make_kuid(ns, uid);
587  	if (!uid_valid(kuid))
588  		return -EINVAL;
589  
590  	new = prepare_creds();
591  	if (!new)
592  		return -ENOMEM;
593  	old = current_cred();
594  
595  	retval = -EPERM;
596  	if (ns_capable_setid(old->user_ns, CAP_SETUID)) {
597  		new->suid = new->uid = kuid;
598  		if (!uid_eq(kuid, old->uid)) {
599  			retval = set_user(new);
600  			if (retval < 0)
601  				goto error;
602  		}
603  	} else if (!uid_eq(kuid, old->uid) && !uid_eq(kuid, new->suid)) {
604  		goto error;
605  	}
606  
607  	new->fsuid = new->euid = kuid;
608  
609  	retval = security_task_fix_setuid(new, old, LSM_SETID_ID);
610  	if (retval < 0)
611  		goto error;
612  
613  	return commit_creds(new);
614  
615  error:
616  	abort_creds(new);
617  	return retval;
618  }
619  
620  SYSCALL_DEFINE1(setuid, uid_t, uid)
621  {
622  	return __sys_setuid(uid);
623  }
624  
625  
626  /*
627   * This function implements a generic ability to update ruid, euid,
628   * and suid.  This allows you to implement the 4.4 compatible seteuid().
629   */
630  long __sys_setresuid(uid_t ruid, uid_t euid, uid_t suid)
631  {
632  	struct user_namespace *ns = current_user_ns();
633  	const struct cred *old;
634  	struct cred *new;
635  	int retval;
636  	kuid_t kruid, keuid, ksuid;
637  
638  	kruid = make_kuid(ns, ruid);
639  	keuid = make_kuid(ns, euid);
640  	ksuid = make_kuid(ns, suid);
641  
642  	if ((ruid != (uid_t) -1) && !uid_valid(kruid))
643  		return -EINVAL;
644  
645  	if ((euid != (uid_t) -1) && !uid_valid(keuid))
646  		return -EINVAL;
647  
648  	if ((suid != (uid_t) -1) && !uid_valid(ksuid))
649  		return -EINVAL;
650  
651  	new = prepare_creds();
652  	if (!new)
653  		return -ENOMEM;
654  
655  	old = current_cred();
656  
657  	retval = -EPERM;
658  	if (!ns_capable_setid(old->user_ns, CAP_SETUID)) {
659  		if (ruid != (uid_t) -1        && !uid_eq(kruid, old->uid) &&
660  		    !uid_eq(kruid, old->euid) && !uid_eq(kruid, old->suid))
661  			goto error;
662  		if (euid != (uid_t) -1        && !uid_eq(keuid, old->uid) &&
663  		    !uid_eq(keuid, old->euid) && !uid_eq(keuid, old->suid))
664  			goto error;
665  		if (suid != (uid_t) -1        && !uid_eq(ksuid, old->uid) &&
666  		    !uid_eq(ksuid, old->euid) && !uid_eq(ksuid, old->suid))
667  			goto error;
668  	}
669  
670  	if (ruid != (uid_t) -1) {
671  		new->uid = kruid;
672  		if (!uid_eq(kruid, old->uid)) {
673  			retval = set_user(new);
674  			if (retval < 0)
675  				goto error;
676  		}
677  	}
678  	if (euid != (uid_t) -1)
679  		new->euid = keuid;
680  	if (suid != (uid_t) -1)
681  		new->suid = ksuid;
682  	new->fsuid = new->euid;
683  
684  	retval = security_task_fix_setuid(new, old, LSM_SETID_RES);
685  	if (retval < 0)
686  		goto error;
687  
688  	return commit_creds(new);
689  
690  error:
691  	abort_creds(new);
692  	return retval;
693  }
694  
695  SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
696  {
697  	return __sys_setresuid(ruid, euid, suid);
698  }
699  
700  SYSCALL_DEFINE3(getresuid, uid_t __user *, ruidp, uid_t __user *, euidp, uid_t __user *, suidp)
701  {
702  	const struct cred *cred = current_cred();
703  	int retval;
704  	uid_t ruid, euid, suid;
705  
706  	ruid = from_kuid_munged(cred->user_ns, cred->uid);
707  	euid = from_kuid_munged(cred->user_ns, cred->euid);
708  	suid = from_kuid_munged(cred->user_ns, cred->suid);
709  
710  	retval = put_user(ruid, ruidp);
711  	if (!retval) {
712  		retval = put_user(euid, euidp);
713  		if (!retval)
714  			return put_user(suid, suidp);
715  	}
716  	return retval;
717  }
718  
719  /*
720   * Same as above, but for rgid, egid, sgid.
721   */
722  long __sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid)
723  {
724  	struct user_namespace *ns = current_user_ns();
725  	const struct cred *old;
726  	struct cred *new;
727  	int retval;
728  	kgid_t krgid, kegid, ksgid;
729  
730  	krgid = make_kgid(ns, rgid);
731  	kegid = make_kgid(ns, egid);
732  	ksgid = make_kgid(ns, sgid);
733  
734  	if ((rgid != (gid_t) -1) && !gid_valid(krgid))
735  		return -EINVAL;
736  	if ((egid != (gid_t) -1) && !gid_valid(kegid))
737  		return -EINVAL;
738  	if ((sgid != (gid_t) -1) && !gid_valid(ksgid))
739  		return -EINVAL;
740  
741  	new = prepare_creds();
742  	if (!new)
743  		return -ENOMEM;
744  	old = current_cred();
745  
746  	retval = -EPERM;
747  	if (!ns_capable_setid(old->user_ns, CAP_SETGID)) {
748  		if (rgid != (gid_t) -1        && !gid_eq(krgid, old->gid) &&
749  		    !gid_eq(krgid, old->egid) && !gid_eq(krgid, old->sgid))
750  			goto error;
751  		if (egid != (gid_t) -1        && !gid_eq(kegid, old->gid) &&
752  		    !gid_eq(kegid, old->egid) && !gid_eq(kegid, old->sgid))
753  			goto error;
754  		if (sgid != (gid_t) -1        && !gid_eq(ksgid, old->gid) &&
755  		    !gid_eq(ksgid, old->egid) && !gid_eq(ksgid, old->sgid))
756  			goto error;
757  	}
758  
759  	if (rgid != (gid_t) -1)
760  		new->gid = krgid;
761  	if (egid != (gid_t) -1)
762  		new->egid = kegid;
763  	if (sgid != (gid_t) -1)
764  		new->sgid = ksgid;
765  	new->fsgid = new->egid;
766  
767  	retval = security_task_fix_setgid(new, old, LSM_SETID_RES);
768  	if (retval < 0)
769  		goto error;
770  
771  	return commit_creds(new);
772  
773  error:
774  	abort_creds(new);
775  	return retval;
776  }
777  
778  SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
779  {
780  	return __sys_setresgid(rgid, egid, sgid);
781  }
782  
783  SYSCALL_DEFINE3(getresgid, gid_t __user *, rgidp, gid_t __user *, egidp, gid_t __user *, sgidp)
784  {
785  	const struct cred *cred = current_cred();
786  	int retval;
787  	gid_t rgid, egid, sgid;
788  
789  	rgid = from_kgid_munged(cred->user_ns, cred->gid);
790  	egid = from_kgid_munged(cred->user_ns, cred->egid);
791  	sgid = from_kgid_munged(cred->user_ns, cred->sgid);
792  
793  	retval = put_user(rgid, rgidp);
794  	if (!retval) {
795  		retval = put_user(egid, egidp);
796  		if (!retval)
797  			retval = put_user(sgid, sgidp);
798  	}
799  
800  	return retval;
801  }
802  
803  
804  /*
805   * "setfsuid()" sets the fsuid - the uid used for filesystem checks. This
806   * is used for "access()" and for the NFS daemon (letting nfsd stay at
807   * whatever uid it wants to). It normally shadows "euid", except when
808   * explicitly set by setfsuid() or for access..
809   */
810  long __sys_setfsuid(uid_t uid)
811  {
812  	const struct cred *old;
813  	struct cred *new;
814  	uid_t old_fsuid;
815  	kuid_t kuid;
816  
817  	old = current_cred();
818  	old_fsuid = from_kuid_munged(old->user_ns, old->fsuid);
819  
820  	kuid = make_kuid(old->user_ns, uid);
821  	if (!uid_valid(kuid))
822  		return old_fsuid;
823  
824  	new = prepare_creds();
825  	if (!new)
826  		return old_fsuid;
827  
828  	if (uid_eq(kuid, old->uid)  || uid_eq(kuid, old->euid)  ||
829  	    uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) ||
830  	    ns_capable_setid(old->user_ns, CAP_SETUID)) {
831  		if (!uid_eq(kuid, old->fsuid)) {
832  			new->fsuid = kuid;
833  			if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
834  				goto change_okay;
835  		}
836  	}
837  
838  	abort_creds(new);
839  	return old_fsuid;
840  
841  change_okay:
842  	commit_creds(new);
843  	return old_fsuid;
844  }
845  
846  SYSCALL_DEFINE1(setfsuid, uid_t, uid)
847  {
848  	return __sys_setfsuid(uid);
849  }
850  
851  /*
852   * Samma pÃ¥ svenska..
853   */
854  long __sys_setfsgid(gid_t gid)
855  {
856  	const struct cred *old;
857  	struct cred *new;
858  	gid_t old_fsgid;
859  	kgid_t kgid;
860  
861  	old = current_cred();
862  	old_fsgid = from_kgid_munged(old->user_ns, old->fsgid);
863  
864  	kgid = make_kgid(old->user_ns, gid);
865  	if (!gid_valid(kgid))
866  		return old_fsgid;
867  
868  	new = prepare_creds();
869  	if (!new)
870  		return old_fsgid;
871  
872  	if (gid_eq(kgid, old->gid)  || gid_eq(kgid, old->egid)  ||
873  	    gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
874  	    ns_capable_setid(old->user_ns, CAP_SETGID)) {
875  		if (!gid_eq(kgid, old->fsgid)) {
876  			new->fsgid = kgid;
877  			if (security_task_fix_setgid(new,old,LSM_SETID_FS) == 0)
878  				goto change_okay;
879  		}
880  	}
881  
882  	abort_creds(new);
883  	return old_fsgid;
884  
885  change_okay:
886  	commit_creds(new);
887  	return old_fsgid;
888  }
889  
890  SYSCALL_DEFINE1(setfsgid, gid_t, gid)
891  {
892  	return __sys_setfsgid(gid);
893  }
894  #endif /* CONFIG_MULTIUSER */
895  
896  /**
897   * sys_getpid - return the thread group id of the current process
898   *
899   * Note, despite the name, this returns the tgid not the pid.  The tgid and
900   * the pid are identical unless CLONE_THREAD was specified on clone() in
901   * which case the tgid is the same in all threads of the same group.
902   *
903   * This is SMP safe as current->tgid does not change.
904   */
905  SYSCALL_DEFINE0(getpid)
906  {
907  	return task_tgid_vnr(current);
908  }
909  
910  /* Thread ID - the internal kernel "pid" */
911  SYSCALL_DEFINE0(gettid)
912  {
913  	return task_pid_vnr(current);
914  }
915  
916  /*
917   * Accessing ->real_parent is not SMP-safe, it could
918   * change from under us. However, we can use a stale
919   * value of ->real_parent under rcu_read_lock(), see
920   * release_task()->call_rcu(delayed_put_task_struct).
921   */
922  SYSCALL_DEFINE0(getppid)
923  {
924  	int pid;
925  
926  	rcu_read_lock();
927  	pid = task_tgid_vnr(rcu_dereference(current->real_parent));
928  	rcu_read_unlock();
929  
930  	return pid;
931  }
932  
933  SYSCALL_DEFINE0(getuid)
934  {
935  	/* Only we change this so SMP safe */
936  	return from_kuid_munged(current_user_ns(), current_uid());
937  }
938  
939  SYSCALL_DEFINE0(geteuid)
940  {
941  	/* Only we change this so SMP safe */
942  	return from_kuid_munged(current_user_ns(), current_euid());
943  }
944  
945  SYSCALL_DEFINE0(getgid)
946  {
947  	/* Only we change this so SMP safe */
948  	return from_kgid_munged(current_user_ns(), current_gid());
949  }
950  
951  SYSCALL_DEFINE0(getegid)
952  {
953  	/* Only we change this so SMP safe */
954  	return from_kgid_munged(current_user_ns(), current_egid());
955  }
956  
957  static void do_sys_times(struct tms *tms)
958  {
959  	u64 tgutime, tgstime, cutime, cstime;
960  
961  	thread_group_cputime_adjusted(current, &tgutime, &tgstime);
962  	cutime = current->signal->cutime;
963  	cstime = current->signal->cstime;
964  	tms->tms_utime = nsec_to_clock_t(tgutime);
965  	tms->tms_stime = nsec_to_clock_t(tgstime);
966  	tms->tms_cutime = nsec_to_clock_t(cutime);
967  	tms->tms_cstime = nsec_to_clock_t(cstime);
968  }
969  
970  SYSCALL_DEFINE1(times, struct tms __user *, tbuf)
971  {
972  	if (tbuf) {
973  		struct tms tmp;
974  
975  		do_sys_times(&tmp);
976  		if (copy_to_user(tbuf, &tmp, sizeof(struct tms)))
977  			return -EFAULT;
978  	}
979  	force_successful_syscall_return();
980  	return (long) jiffies_64_to_clock_t(get_jiffies_64());
981  }
982  
983  #ifdef CONFIG_COMPAT
984  static compat_clock_t clock_t_to_compat_clock_t(clock_t x)
985  {
986  	return compat_jiffies_to_clock_t(clock_t_to_jiffies(x));
987  }
988  
989  COMPAT_SYSCALL_DEFINE1(times, struct compat_tms __user *, tbuf)
990  {
991  	if (tbuf) {
992  		struct tms tms;
993  		struct compat_tms tmp;
994  
995  		do_sys_times(&tms);
996  		/* Convert our struct tms to the compat version. */
997  		tmp.tms_utime = clock_t_to_compat_clock_t(tms.tms_utime);
998  		tmp.tms_stime = clock_t_to_compat_clock_t(tms.tms_stime);
999  		tmp.tms_cutime = clock_t_to_compat_clock_t(tms.tms_cutime);
1000  		tmp.tms_cstime = clock_t_to_compat_clock_t(tms.tms_cstime);
1001  		if (copy_to_user(tbuf, &tmp, sizeof(tmp)))
1002  			return -EFAULT;
1003  	}
1004  	force_successful_syscall_return();
1005  	return compat_jiffies_to_clock_t(jiffies);
1006  }
1007  #endif
1008  
1009  /*
1010   * This needs some heavy checking ...
1011   * I just haven't the stomach for it. I also don't fully
1012   * understand sessions/pgrp etc. Let somebody who does explain it.
1013   *
1014   * OK, I think I have the protection semantics right.... this is really
1015   * only important on a multi-user system anyway, to make sure one user
1016   * can't send a signal to a process owned by another.  -TYT, 12/12/91
1017   *
1018   * !PF_FORKNOEXEC check to conform completely to POSIX.
1019   */
1020  SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid)
1021  {
1022  	struct task_struct *p;
1023  	struct task_struct *group_leader = current->group_leader;
1024  	struct pid *pgrp;
1025  	int err;
1026  
1027  	if (!pid)
1028  		pid = task_pid_vnr(group_leader);
1029  	if (!pgid)
1030  		pgid = pid;
1031  	if (pgid < 0)
1032  		return -EINVAL;
1033  	rcu_read_lock();
1034  
1035  	/* From this point forward we keep holding onto the tasklist lock
1036  	 * so that our parent does not change from under us. -DaveM
1037  	 */
1038  	write_lock_irq(&tasklist_lock);
1039  
1040  	err = -ESRCH;
1041  	p = find_task_by_vpid(pid);
1042  	if (!p)
1043  		goto out;
1044  
1045  	err = -EINVAL;
1046  	if (!thread_group_leader(p))
1047  		goto out;
1048  
1049  	if (same_thread_group(p->real_parent, group_leader)) {
1050  		err = -EPERM;
1051  		if (task_session(p) != task_session(group_leader))
1052  			goto out;
1053  		err = -EACCES;
1054  		if (!(p->flags & PF_FORKNOEXEC))
1055  			goto out;
1056  	} else {
1057  		err = -ESRCH;
1058  		if (p != group_leader)
1059  			goto out;
1060  	}
1061  
1062  	err = -EPERM;
1063  	if (p->signal->leader)
1064  		goto out;
1065  
1066  	pgrp = task_pid(p);
1067  	if (pgid != pid) {
1068  		struct task_struct *g;
1069  
1070  		pgrp = find_vpid(pgid);
1071  		g = pid_task(pgrp, PIDTYPE_PGID);
1072  		if (!g || task_session(g) != task_session(group_leader))
1073  			goto out;
1074  	}
1075  
1076  	err = security_task_setpgid(p, pgid);
1077  	if (err)
1078  		goto out;
1079  
1080  	if (task_pgrp(p) != pgrp)
1081  		change_pid(p, PIDTYPE_PGID, pgrp);
1082  
1083  	err = 0;
1084  out:
1085  	/* All paths lead to here, thus we are safe. -DaveM */
1086  	write_unlock_irq(&tasklist_lock);
1087  	rcu_read_unlock();
1088  	return err;
1089  }
1090  
1091  static int do_getpgid(pid_t pid)
1092  {
1093  	struct task_struct *p;
1094  	struct pid *grp;
1095  	int retval;
1096  
1097  	rcu_read_lock();
1098  	if (!pid)
1099  		grp = task_pgrp(current);
1100  	else {
1101  		retval = -ESRCH;
1102  		p = find_task_by_vpid(pid);
1103  		if (!p)
1104  			goto out;
1105  		grp = task_pgrp(p);
1106  		if (!grp)
1107  			goto out;
1108  
1109  		retval = security_task_getpgid(p);
1110  		if (retval)
1111  			goto out;
1112  	}
1113  	retval = pid_vnr(grp);
1114  out:
1115  	rcu_read_unlock();
1116  	return retval;
1117  }
1118  
1119  SYSCALL_DEFINE1(getpgid, pid_t, pid)
1120  {
1121  	return do_getpgid(pid);
1122  }
1123  
1124  #ifdef __ARCH_WANT_SYS_GETPGRP
1125  
1126  SYSCALL_DEFINE0(getpgrp)
1127  {
1128  	return do_getpgid(0);
1129  }
1130  
1131  #endif
1132  
1133  SYSCALL_DEFINE1(getsid, pid_t, pid)
1134  {
1135  	struct task_struct *p;
1136  	struct pid *sid;
1137  	int retval;
1138  
1139  	rcu_read_lock();
1140  	if (!pid)
1141  		sid = task_session(current);
1142  	else {
1143  		retval = -ESRCH;
1144  		p = find_task_by_vpid(pid);
1145  		if (!p)
1146  			goto out;
1147  		sid = task_session(p);
1148  		if (!sid)
1149  			goto out;
1150  
1151  		retval = security_task_getsid(p);
1152  		if (retval)
1153  			goto out;
1154  	}
1155  	retval = pid_vnr(sid);
1156  out:
1157  	rcu_read_unlock();
1158  	return retval;
1159  }
1160  
1161  static void set_special_pids(struct pid *pid)
1162  {
1163  	struct task_struct *curr = current->group_leader;
1164  
1165  	if (task_session(curr) != pid)
1166  		change_pid(curr, PIDTYPE_SID, pid);
1167  
1168  	if (task_pgrp(curr) != pid)
1169  		change_pid(curr, PIDTYPE_PGID, pid);
1170  }
1171  
1172  int ksys_setsid(void)
1173  {
1174  	struct task_struct *group_leader = current->group_leader;
1175  	struct pid *sid = task_pid(group_leader);
1176  	pid_t session = pid_vnr(sid);
1177  	int err = -EPERM;
1178  
1179  	write_lock_irq(&tasklist_lock);
1180  	/* Fail if I am already a session leader */
1181  	if (group_leader->signal->leader)
1182  		goto out;
1183  
1184  	/* Fail if a process group id already exists that equals the
1185  	 * proposed session id.
1186  	 */
1187  	if (pid_task(sid, PIDTYPE_PGID))
1188  		goto out;
1189  
1190  	group_leader->signal->leader = 1;
1191  	set_special_pids(sid);
1192  
1193  	proc_clear_tty(group_leader);
1194  
1195  	err = session;
1196  out:
1197  	write_unlock_irq(&tasklist_lock);
1198  	if (err > 0) {
1199  		proc_sid_connector(group_leader);
1200  		sched_autogroup_create_attach(group_leader);
1201  	}
1202  	return err;
1203  }
1204  
1205  SYSCALL_DEFINE0(setsid)
1206  {
1207  	return ksys_setsid();
1208  }
1209  
1210  DECLARE_RWSEM(uts_sem);
1211  
1212  #ifdef COMPAT_UTS_MACHINE
1213  #define override_architecture(name) \
1214  	(personality(current->personality) == PER_LINUX32 && \
1215  	 copy_to_user(name->machine, COMPAT_UTS_MACHINE, \
1216  		      sizeof(COMPAT_UTS_MACHINE)))
1217  #else
1218  #define override_architecture(name)	0
1219  #endif
1220  
1221  /*
1222   * Work around broken programs that cannot handle "Linux 3.0".
1223   * Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40
1224   * And we map 4.x and later versions to 2.6.60+x, so 4.0/5.0/6.0/... would be
1225   * 2.6.60.
1226   */
1227  static int override_release(char __user *release, size_t len)
1228  {
1229  	int ret = 0;
1230  
1231  	if (current->personality & UNAME26) {
1232  		const char *rest = UTS_RELEASE;
1233  		char buf[65] = { 0 };
1234  		int ndots = 0;
1235  		unsigned v;
1236  		size_t copy;
1237  
1238  		while (*rest) {
1239  			if (*rest == '.' && ++ndots >= 3)
1240  				break;
1241  			if (!isdigit(*rest) && *rest != '.')
1242  				break;
1243  			rest++;
1244  		}
1245  		v = LINUX_VERSION_PATCHLEVEL + 60;
1246  		copy = clamp_t(size_t, len, 1, sizeof(buf));
1247  		copy = scnprintf(buf, copy, "2.6.%u%s", v, rest);
1248  		ret = copy_to_user(release, buf, copy + 1);
1249  	}
1250  	return ret;
1251  }
1252  
1253  SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name)
1254  {
1255  	struct new_utsname tmp;
1256  
1257  	down_read(&uts_sem);
1258  	memcpy(&tmp, utsname(), sizeof(tmp));
1259  	up_read(&uts_sem);
1260  	if (copy_to_user(name, &tmp, sizeof(tmp)))
1261  		return -EFAULT;
1262  
1263  	if (override_release(name->release, sizeof(name->release)))
1264  		return -EFAULT;
1265  	if (override_architecture(name))
1266  		return -EFAULT;
1267  	return 0;
1268  }
1269  
1270  #ifdef __ARCH_WANT_SYS_OLD_UNAME
1271  /*
1272   * Old cruft
1273   */
1274  SYSCALL_DEFINE1(uname, struct old_utsname __user *, name)
1275  {
1276  	struct old_utsname tmp;
1277  
1278  	if (!name)
1279  		return -EFAULT;
1280  
1281  	down_read(&uts_sem);
1282  	memcpy(&tmp, utsname(), sizeof(tmp));
1283  	up_read(&uts_sem);
1284  	if (copy_to_user(name, &tmp, sizeof(tmp)))
1285  		return -EFAULT;
1286  
1287  	if (override_release(name->release, sizeof(name->release)))
1288  		return -EFAULT;
1289  	if (override_architecture(name))
1290  		return -EFAULT;
1291  	return 0;
1292  }
1293  
1294  SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
1295  {
1296  	struct oldold_utsname tmp;
1297  
1298  	if (!name)
1299  		return -EFAULT;
1300  
1301  	memset(&tmp, 0, sizeof(tmp));
1302  
1303  	down_read(&uts_sem);
1304  	memcpy(&tmp.sysname, &utsname()->sysname, __OLD_UTS_LEN);
1305  	memcpy(&tmp.nodename, &utsname()->nodename, __OLD_UTS_LEN);
1306  	memcpy(&tmp.release, &utsname()->release, __OLD_UTS_LEN);
1307  	memcpy(&tmp.version, &utsname()->version, __OLD_UTS_LEN);
1308  	memcpy(&tmp.machine, &utsname()->machine, __OLD_UTS_LEN);
1309  	up_read(&uts_sem);
1310  	if (copy_to_user(name, &tmp, sizeof(tmp)))
1311  		return -EFAULT;
1312  
1313  	if (override_architecture(name))
1314  		return -EFAULT;
1315  	if (override_release(name->release, sizeof(name->release)))
1316  		return -EFAULT;
1317  	return 0;
1318  }
1319  #endif
1320  
1321  SYSCALL_DEFINE2(sethostname, char __user *, name, int, len)
1322  {
1323  	int errno;
1324  	char tmp[__NEW_UTS_LEN];
1325  
1326  	if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN))
1327  		return -EPERM;
1328  
1329  	if (len < 0 || len > __NEW_UTS_LEN)
1330  		return -EINVAL;
1331  	errno = -EFAULT;
1332  	if (!copy_from_user(tmp, name, len)) {
1333  		struct new_utsname *u;
1334  
1335  		down_write(&uts_sem);
1336  		u = utsname();
1337  		memcpy(u->nodename, tmp, len);
1338  		memset(u->nodename + len, 0, sizeof(u->nodename) - len);
1339  		errno = 0;
1340  		uts_proc_notify(UTS_PROC_HOSTNAME);
1341  		up_write(&uts_sem);
1342  	}
1343  	return errno;
1344  }
1345  
1346  #ifdef __ARCH_WANT_SYS_GETHOSTNAME
1347  
1348  SYSCALL_DEFINE2(gethostname, char __user *, name, int, len)
1349  {
1350  	int i;
1351  	struct new_utsname *u;
1352  	char tmp[__NEW_UTS_LEN + 1];
1353  
1354  	if (len < 0)
1355  		return -EINVAL;
1356  	down_read(&uts_sem);
1357  	u = utsname();
1358  	i = 1 + strlen(u->nodename);
1359  	if (i > len)
1360  		i = len;
1361  	memcpy(tmp, u->nodename, i);
1362  	up_read(&uts_sem);
1363  	if (copy_to_user(name, tmp, i))
1364  		return -EFAULT;
1365  	return 0;
1366  }
1367  
1368  #endif
1369  
1370  /*
1371   * Only setdomainname; getdomainname can be implemented by calling
1372   * uname()
1373   */
1374  SYSCALL_DEFINE2(setdomainname, char __user *, name, int, len)
1375  {
1376  	int errno;
1377  	char tmp[__NEW_UTS_LEN];
1378  
1379  	if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN))
1380  		return -EPERM;
1381  	if (len < 0 || len > __NEW_UTS_LEN)
1382  		return -EINVAL;
1383  
1384  	errno = -EFAULT;
1385  	if (!copy_from_user(tmp, name, len)) {
1386  		struct new_utsname *u;
1387  
1388  		down_write(&uts_sem);
1389  		u = utsname();
1390  		memcpy(u->domainname, tmp, len);
1391  		memset(u->domainname + len, 0, sizeof(u->domainname) - len);
1392  		errno = 0;
1393  		uts_proc_notify(UTS_PROC_DOMAINNAME);
1394  		up_write(&uts_sem);
1395  	}
1396  	return errno;
1397  }
1398  
1399  SYSCALL_DEFINE2(getrlimit, unsigned int, resource, struct rlimit __user *, rlim)
1400  {
1401  	struct rlimit value;
1402  	int ret;
1403  
1404  	ret = do_prlimit(current, resource, NULL, &value);
1405  	if (!ret)
1406  		ret = copy_to_user(rlim, &value, sizeof(*rlim)) ? -EFAULT : 0;
1407  
1408  	return ret;
1409  }
1410  
1411  #ifdef CONFIG_COMPAT
1412  
1413  COMPAT_SYSCALL_DEFINE2(setrlimit, unsigned int, resource,
1414  		       struct compat_rlimit __user *, rlim)
1415  {
1416  	struct rlimit r;
1417  	struct compat_rlimit r32;
1418  
1419  	if (copy_from_user(&r32, rlim, sizeof(struct compat_rlimit)))
1420  		return -EFAULT;
1421  
1422  	if (r32.rlim_cur == COMPAT_RLIM_INFINITY)
1423  		r.rlim_cur = RLIM_INFINITY;
1424  	else
1425  		r.rlim_cur = r32.rlim_cur;
1426  	if (r32.rlim_max == COMPAT_RLIM_INFINITY)
1427  		r.rlim_max = RLIM_INFINITY;
1428  	else
1429  		r.rlim_max = r32.rlim_max;
1430  	return do_prlimit(current, resource, &r, NULL);
1431  }
1432  
1433  COMPAT_SYSCALL_DEFINE2(getrlimit, unsigned int, resource,
1434  		       struct compat_rlimit __user *, rlim)
1435  {
1436  	struct rlimit r;
1437  	int ret;
1438  
1439  	ret = do_prlimit(current, resource, NULL, &r);
1440  	if (!ret) {
1441  		struct compat_rlimit r32;
1442  		if (r.rlim_cur > COMPAT_RLIM_INFINITY)
1443  			r32.rlim_cur = COMPAT_RLIM_INFINITY;
1444  		else
1445  			r32.rlim_cur = r.rlim_cur;
1446  		if (r.rlim_max > COMPAT_RLIM_INFINITY)
1447  			r32.rlim_max = COMPAT_RLIM_INFINITY;
1448  		else
1449  			r32.rlim_max = r.rlim_max;
1450  
1451  		if (copy_to_user(rlim, &r32, sizeof(struct compat_rlimit)))
1452  			return -EFAULT;
1453  	}
1454  	return ret;
1455  }
1456  
1457  #endif
1458  
1459  #ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT
1460  
1461  /*
1462   *	Back compatibility for getrlimit. Needed for some apps.
1463   */
1464  SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
1465  		struct rlimit __user *, rlim)
1466  {
1467  	struct rlimit x;
1468  	if (resource >= RLIM_NLIMITS)
1469  		return -EINVAL;
1470  
1471  	resource = array_index_nospec(resource, RLIM_NLIMITS);
1472  	task_lock(current->group_leader);
1473  	x = current->signal->rlim[resource];
1474  	task_unlock(current->group_leader);
1475  	if (x.rlim_cur > 0x7FFFFFFF)
1476  		x.rlim_cur = 0x7FFFFFFF;
1477  	if (x.rlim_max > 0x7FFFFFFF)
1478  		x.rlim_max = 0x7FFFFFFF;
1479  	return copy_to_user(rlim, &x, sizeof(x)) ? -EFAULT : 0;
1480  }
1481  
1482  #ifdef CONFIG_COMPAT
1483  COMPAT_SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
1484  		       struct compat_rlimit __user *, rlim)
1485  {
1486  	struct rlimit r;
1487  
1488  	if (resource >= RLIM_NLIMITS)
1489  		return -EINVAL;
1490  
1491  	resource = array_index_nospec(resource, RLIM_NLIMITS);
1492  	task_lock(current->group_leader);
1493  	r = current->signal->rlim[resource];
1494  	task_unlock(current->group_leader);
1495  	if (r.rlim_cur > 0x7FFFFFFF)
1496  		r.rlim_cur = 0x7FFFFFFF;
1497  	if (r.rlim_max > 0x7FFFFFFF)
1498  		r.rlim_max = 0x7FFFFFFF;
1499  
1500  	if (put_user(r.rlim_cur, &rlim->rlim_cur) ||
1501  	    put_user(r.rlim_max, &rlim->rlim_max))
1502  		return -EFAULT;
1503  	return 0;
1504  }
1505  #endif
1506  
1507  #endif
1508  
1509  static inline bool rlim64_is_infinity(__u64 rlim64)
1510  {
1511  #if BITS_PER_LONG < 64
1512  	return rlim64 >= ULONG_MAX;
1513  #else
1514  	return rlim64 == RLIM64_INFINITY;
1515  #endif
1516  }
1517  
1518  static void rlim_to_rlim64(const struct rlimit *rlim, struct rlimit64 *rlim64)
1519  {
1520  	if (rlim->rlim_cur == RLIM_INFINITY)
1521  		rlim64->rlim_cur = RLIM64_INFINITY;
1522  	else
1523  		rlim64->rlim_cur = rlim->rlim_cur;
1524  	if (rlim->rlim_max == RLIM_INFINITY)
1525  		rlim64->rlim_max = RLIM64_INFINITY;
1526  	else
1527  		rlim64->rlim_max = rlim->rlim_max;
1528  }
1529  
1530  static void rlim64_to_rlim(const struct rlimit64 *rlim64, struct rlimit *rlim)
1531  {
1532  	if (rlim64_is_infinity(rlim64->rlim_cur))
1533  		rlim->rlim_cur = RLIM_INFINITY;
1534  	else
1535  		rlim->rlim_cur = (unsigned long)rlim64->rlim_cur;
1536  	if (rlim64_is_infinity(rlim64->rlim_max))
1537  		rlim->rlim_max = RLIM_INFINITY;
1538  	else
1539  		rlim->rlim_max = (unsigned long)rlim64->rlim_max;
1540  }
1541  
1542  /* make sure you are allowed to change @tsk limits before calling this */
1543  int do_prlimit(struct task_struct *tsk, unsigned int resource,
1544  		struct rlimit *new_rlim, struct rlimit *old_rlim)
1545  {
1546  	struct rlimit *rlim;
1547  	int retval = 0;
1548  
1549  	if (resource >= RLIM_NLIMITS)
1550  		return -EINVAL;
1551  	if (new_rlim) {
1552  		if (new_rlim->rlim_cur > new_rlim->rlim_max)
1553  			return -EINVAL;
1554  		if (resource == RLIMIT_NOFILE &&
1555  				new_rlim->rlim_max > sysctl_nr_open)
1556  			return -EPERM;
1557  	}
1558  
1559  	/* protect tsk->signal and tsk->sighand from disappearing */
1560  	read_lock(&tasklist_lock);
1561  	if (!tsk->sighand) {
1562  		retval = -ESRCH;
1563  		goto out;
1564  	}
1565  
1566  	rlim = tsk->signal->rlim + resource;
1567  	task_lock(tsk->group_leader);
1568  	if (new_rlim) {
1569  		/* Keep the capable check against init_user_ns until
1570  		   cgroups can contain all limits */
1571  		if (new_rlim->rlim_max > rlim->rlim_max &&
1572  				!capable(CAP_SYS_RESOURCE))
1573  			retval = -EPERM;
1574  		if (!retval)
1575  			retval = security_task_setrlimit(tsk, resource, new_rlim);
1576  	}
1577  	if (!retval) {
1578  		if (old_rlim)
1579  			*old_rlim = *rlim;
1580  		if (new_rlim)
1581  			*rlim = *new_rlim;
1582  	}
1583  	task_unlock(tsk->group_leader);
1584  
1585  	/*
1586  	 * RLIMIT_CPU handling. Arm the posix CPU timer if the limit is not
1587  	 * infite. In case of RLIM_INFINITY the posix CPU timer code
1588  	 * ignores the rlimit.
1589  	 */
1590  	 if (!retval && new_rlim && resource == RLIMIT_CPU &&
1591  	     new_rlim->rlim_cur != RLIM_INFINITY &&
1592  	     IS_ENABLED(CONFIG_POSIX_TIMERS))
1593  		update_rlimit_cpu(tsk, new_rlim->rlim_cur);
1594  out:
1595  	read_unlock(&tasklist_lock);
1596  	return retval;
1597  }
1598  
1599  /* rcu lock must be held */
1600  static int check_prlimit_permission(struct task_struct *task,
1601  				    unsigned int flags)
1602  {
1603  	const struct cred *cred = current_cred(), *tcred;
1604  	bool id_match;
1605  
1606  	if (current == task)
1607  		return 0;
1608  
1609  	tcred = __task_cred(task);
1610  	id_match = (uid_eq(cred->uid, tcred->euid) &&
1611  		    uid_eq(cred->uid, tcred->suid) &&
1612  		    uid_eq(cred->uid, tcred->uid)  &&
1613  		    gid_eq(cred->gid, tcred->egid) &&
1614  		    gid_eq(cred->gid, tcred->sgid) &&
1615  		    gid_eq(cred->gid, tcred->gid));
1616  	if (!id_match && !ns_capable(tcred->user_ns, CAP_SYS_RESOURCE))
1617  		return -EPERM;
1618  
1619  	return security_task_prlimit(cred, tcred, flags);
1620  }
1621  
1622  SYSCALL_DEFINE4(prlimit64, pid_t, pid, unsigned int, resource,
1623  		const struct rlimit64 __user *, new_rlim,
1624  		struct rlimit64 __user *, old_rlim)
1625  {
1626  	struct rlimit64 old64, new64;
1627  	struct rlimit old, new;
1628  	struct task_struct *tsk;
1629  	unsigned int checkflags = 0;
1630  	int ret;
1631  
1632  	if (old_rlim)
1633  		checkflags |= LSM_PRLIMIT_READ;
1634  
1635  	if (new_rlim) {
1636  		if (copy_from_user(&new64, new_rlim, sizeof(new64)))
1637  			return -EFAULT;
1638  		rlim64_to_rlim(&new64, &new);
1639  		checkflags |= LSM_PRLIMIT_WRITE;
1640  	}
1641  
1642  	rcu_read_lock();
1643  	tsk = pid ? find_task_by_vpid(pid) : current;
1644  	if (!tsk) {
1645  		rcu_read_unlock();
1646  		return -ESRCH;
1647  	}
1648  	ret = check_prlimit_permission(tsk, checkflags);
1649  	if (ret) {
1650  		rcu_read_unlock();
1651  		return ret;
1652  	}
1653  	get_task_struct(tsk);
1654  	rcu_read_unlock();
1655  
1656  	ret = do_prlimit(tsk, resource, new_rlim ? &new : NULL,
1657  			old_rlim ? &old : NULL);
1658  
1659  	if (!ret && old_rlim) {
1660  		rlim_to_rlim64(&old, &old64);
1661  		if (copy_to_user(old_rlim, &old64, sizeof(old64)))
1662  			ret = -EFAULT;
1663  	}
1664  
1665  	put_task_struct(tsk);
1666  	return ret;
1667  }
1668  
1669  SYSCALL_DEFINE2(setrlimit, unsigned int, resource, struct rlimit __user *, rlim)
1670  {
1671  	struct rlimit new_rlim;
1672  
1673  	if (copy_from_user(&new_rlim, rlim, sizeof(*rlim)))
1674  		return -EFAULT;
1675  	return do_prlimit(current, resource, &new_rlim, NULL);
1676  }
1677  
1678  /*
1679   * It would make sense to put struct rusage in the task_struct,
1680   * except that would make the task_struct be *really big*.  After
1681   * task_struct gets moved into malloc'ed memory, it would
1682   * make sense to do this.  It will make moving the rest of the information
1683   * a lot simpler!  (Which we're not doing right now because we're not
1684   * measuring them yet).
1685   *
1686   * When sampling multiple threads for RUSAGE_SELF, under SMP we might have
1687   * races with threads incrementing their own counters.  But since word
1688   * reads are atomic, we either get new values or old values and we don't
1689   * care which for the sums.  We always take the siglock to protect reading
1690   * the c* fields from p->signal from races with exit.c updating those
1691   * fields when reaping, so a sample either gets all the additions of a
1692   * given child after it's reaped, or none so this sample is before reaping.
1693   *
1694   * Locking:
1695   * We need to take the siglock for CHILDEREN, SELF and BOTH
1696   * for  the cases current multithreaded, non-current single threaded
1697   * non-current multithreaded.  Thread traversal is now safe with
1698   * the siglock held.
1699   * Strictly speaking, we donot need to take the siglock if we are current and
1700   * single threaded,  as no one else can take our signal_struct away, no one
1701   * else can  reap the  children to update signal->c* counters, and no one else
1702   * can race with the signal-> fields. If we do not take any lock, the
1703   * signal-> fields could be read out of order while another thread was just
1704   * exiting. So we should  place a read memory barrier when we avoid the lock.
1705   * On the writer side,  write memory barrier is implied in  __exit_signal
1706   * as __exit_signal releases  the siglock spinlock after updating the signal->
1707   * fields. But we don't do this yet to keep things simple.
1708   *
1709   */
1710  
1711  static void accumulate_thread_rusage(struct task_struct *t, struct rusage *r)
1712  {
1713  	r->ru_nvcsw += t->nvcsw;
1714  	r->ru_nivcsw += t->nivcsw;
1715  	r->ru_minflt += t->min_flt;
1716  	r->ru_majflt += t->maj_flt;
1717  	r->ru_inblock += task_io_get_inblock(t);
1718  	r->ru_oublock += task_io_get_oublock(t);
1719  }
1720  
1721  void getrusage(struct task_struct *p, int who, struct rusage *r)
1722  {
1723  	struct task_struct *t;
1724  	unsigned long flags;
1725  	u64 tgutime, tgstime, utime, stime;
1726  	unsigned long maxrss = 0;
1727  
1728  	memset((char *)r, 0, sizeof (*r));
1729  	utime = stime = 0;
1730  
1731  	if (who == RUSAGE_THREAD) {
1732  		task_cputime_adjusted(current, &utime, &stime);
1733  		accumulate_thread_rusage(p, r);
1734  		maxrss = p->signal->maxrss;
1735  		goto out;
1736  	}
1737  
1738  	if (!lock_task_sighand(p, &flags))
1739  		return;
1740  
1741  	switch (who) {
1742  	case RUSAGE_BOTH:
1743  	case RUSAGE_CHILDREN:
1744  		utime = p->signal->cutime;
1745  		stime = p->signal->cstime;
1746  		r->ru_nvcsw = p->signal->cnvcsw;
1747  		r->ru_nivcsw = p->signal->cnivcsw;
1748  		r->ru_minflt = p->signal->cmin_flt;
1749  		r->ru_majflt = p->signal->cmaj_flt;
1750  		r->ru_inblock = p->signal->cinblock;
1751  		r->ru_oublock = p->signal->coublock;
1752  		maxrss = p->signal->cmaxrss;
1753  
1754  		if (who == RUSAGE_CHILDREN)
1755  			break;
1756  		fallthrough;
1757  
1758  	case RUSAGE_SELF:
1759  		thread_group_cputime_adjusted(p, &tgutime, &tgstime);
1760  		utime += tgutime;
1761  		stime += tgstime;
1762  		r->ru_nvcsw += p->signal->nvcsw;
1763  		r->ru_nivcsw += p->signal->nivcsw;
1764  		r->ru_minflt += p->signal->min_flt;
1765  		r->ru_majflt += p->signal->maj_flt;
1766  		r->ru_inblock += p->signal->inblock;
1767  		r->ru_oublock += p->signal->oublock;
1768  		if (maxrss < p->signal->maxrss)
1769  			maxrss = p->signal->maxrss;
1770  		t = p;
1771  		do {
1772  			accumulate_thread_rusage(t, r);
1773  		} while_each_thread(p, t);
1774  		break;
1775  
1776  	default:
1777  		BUG();
1778  	}
1779  	unlock_task_sighand(p, &flags);
1780  
1781  out:
1782  	r->ru_utime = ns_to_kernel_old_timeval(utime);
1783  	r->ru_stime = ns_to_kernel_old_timeval(stime);
1784  
1785  	if (who != RUSAGE_CHILDREN) {
1786  		struct mm_struct *mm = get_task_mm(p);
1787  
1788  		if (mm) {
1789  			setmax_mm_hiwater_rss(&maxrss, mm);
1790  			mmput(mm);
1791  		}
1792  	}
1793  	r->ru_maxrss = maxrss * (PAGE_SIZE / 1024); /* convert pages to KBs */
1794  }
1795  
1796  SYSCALL_DEFINE2(getrusage, int, who, struct rusage __user *, ru)
1797  {
1798  	struct rusage r;
1799  
1800  	if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN &&
1801  	    who != RUSAGE_THREAD)
1802  		return -EINVAL;
1803  
1804  	getrusage(current, who, &r);
1805  	return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0;
1806  }
1807  
1808  #ifdef CONFIG_COMPAT
1809  COMPAT_SYSCALL_DEFINE2(getrusage, int, who, struct compat_rusage __user *, ru)
1810  {
1811  	struct rusage r;
1812  
1813  	if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN &&
1814  	    who != RUSAGE_THREAD)
1815  		return -EINVAL;
1816  
1817  	getrusage(current, who, &r);
1818  	return put_compat_rusage(&r, ru);
1819  }
1820  #endif
1821  
1822  SYSCALL_DEFINE1(umask, int, mask)
1823  {
1824  	mask = xchg(&current->fs->umask, mask & S_IRWXUGO);
1825  	return mask;
1826  }
1827  
1828  static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd)
1829  {
1830  	struct fd exe;
1831  	struct file *old_exe, *exe_file;
1832  	struct inode *inode;
1833  	int err;
1834  
1835  	exe = fdget(fd);
1836  	if (!exe.file)
1837  		return -EBADF;
1838  
1839  	inode = file_inode(exe.file);
1840  
1841  	/*
1842  	 * Because the original mm->exe_file points to executable file, make
1843  	 * sure that this one is executable as well, to avoid breaking an
1844  	 * overall picture.
1845  	 */
1846  	err = -EACCES;
1847  	if (!S_ISREG(inode->i_mode) || path_noexec(&exe.file->f_path))
1848  		goto exit;
1849  
1850  	err = file_permission(exe.file, MAY_EXEC);
1851  	if (err)
1852  		goto exit;
1853  
1854  	/*
1855  	 * Forbid mm->exe_file change if old file still mapped.
1856  	 */
1857  	exe_file = get_mm_exe_file(mm);
1858  	err = -EBUSY;
1859  	if (exe_file) {
1860  		struct vm_area_struct *vma;
1861  
1862  		mmap_read_lock(mm);
1863  		for (vma = mm->mmap; vma; vma = vma->vm_next) {
1864  			if (!vma->vm_file)
1865  				continue;
1866  			if (path_equal(&vma->vm_file->f_path,
1867  				       &exe_file->f_path))
1868  				goto exit_err;
1869  		}
1870  
1871  		mmap_read_unlock(mm);
1872  		fput(exe_file);
1873  	}
1874  
1875  	err = 0;
1876  	/* set the new file, lockless */
1877  	get_file(exe.file);
1878  	old_exe = xchg(&mm->exe_file, exe.file);
1879  	if (old_exe)
1880  		fput(old_exe);
1881  exit:
1882  	fdput(exe);
1883  	return err;
1884  exit_err:
1885  	mmap_read_unlock(mm);
1886  	fput(exe_file);
1887  	goto exit;
1888  }
1889  
1890  /*
1891   * Check arithmetic relations of passed addresses.
1892   *
1893   * WARNING: we don't require any capability here so be very careful
1894   * in what is allowed for modification from userspace.
1895   */
1896  static int validate_prctl_map_addr(struct prctl_mm_map *prctl_map)
1897  {
1898  	unsigned long mmap_max_addr = TASK_SIZE;
1899  	int error = -EINVAL, i;
1900  
1901  	static const unsigned char offsets[] = {
1902  		offsetof(struct prctl_mm_map, start_code),
1903  		offsetof(struct prctl_mm_map, end_code),
1904  		offsetof(struct prctl_mm_map, start_data),
1905  		offsetof(struct prctl_mm_map, end_data),
1906  		offsetof(struct prctl_mm_map, start_brk),
1907  		offsetof(struct prctl_mm_map, brk),
1908  		offsetof(struct prctl_mm_map, start_stack),
1909  		offsetof(struct prctl_mm_map, arg_start),
1910  		offsetof(struct prctl_mm_map, arg_end),
1911  		offsetof(struct prctl_mm_map, env_start),
1912  		offsetof(struct prctl_mm_map, env_end),
1913  	};
1914  
1915  	/*
1916  	 * Make sure the members are not somewhere outside
1917  	 * of allowed address space.
1918  	 */
1919  	for (i = 0; i < ARRAY_SIZE(offsets); i++) {
1920  		u64 val = *(u64 *)((char *)prctl_map + offsets[i]);
1921  
1922  		if ((unsigned long)val >= mmap_max_addr ||
1923  		    (unsigned long)val < mmap_min_addr)
1924  			goto out;
1925  	}
1926  
1927  	/*
1928  	 * Make sure the pairs are ordered.
1929  	 */
1930  #define __prctl_check_order(__m1, __op, __m2)				\
1931  	((unsigned long)prctl_map->__m1 __op				\
1932  	 (unsigned long)prctl_map->__m2) ? 0 : -EINVAL
1933  	error  = __prctl_check_order(start_code, <, end_code);
1934  	error |= __prctl_check_order(start_data,<=, end_data);
1935  	error |= __prctl_check_order(start_brk, <=, brk);
1936  	error |= __prctl_check_order(arg_start, <=, arg_end);
1937  	error |= __prctl_check_order(env_start, <=, env_end);
1938  	if (error)
1939  		goto out;
1940  #undef __prctl_check_order
1941  
1942  	error = -EINVAL;
1943  
1944  	/*
1945  	 * @brk should be after @end_data in traditional maps.
1946  	 */
1947  	if (prctl_map->start_brk <= prctl_map->end_data ||
1948  	    prctl_map->brk <= prctl_map->end_data)
1949  		goto out;
1950  
1951  	/*
1952  	 * Neither we should allow to override limits if they set.
1953  	 */
1954  	if (check_data_rlimit(rlimit(RLIMIT_DATA), prctl_map->brk,
1955  			      prctl_map->start_brk, prctl_map->end_data,
1956  			      prctl_map->start_data))
1957  			goto out;
1958  
1959  	error = 0;
1960  out:
1961  	return error;
1962  }
1963  
1964  #ifdef CONFIG_CHECKPOINT_RESTORE
1965  static int prctl_set_mm_map(int opt, const void __user *addr, unsigned long data_size)
1966  {
1967  	struct prctl_mm_map prctl_map = { .exe_fd = (u32)-1, };
1968  	unsigned long user_auxv[AT_VECTOR_SIZE];
1969  	struct mm_struct *mm = current->mm;
1970  	int error;
1971  
1972  	BUILD_BUG_ON(sizeof(user_auxv) != sizeof(mm->saved_auxv));
1973  	BUILD_BUG_ON(sizeof(struct prctl_mm_map) > 256);
1974  
1975  	if (opt == PR_SET_MM_MAP_SIZE)
1976  		return put_user((unsigned int)sizeof(prctl_map),
1977  				(unsigned int __user *)addr);
1978  
1979  	if (data_size != sizeof(prctl_map))
1980  		return -EINVAL;
1981  
1982  	if (copy_from_user(&prctl_map, addr, sizeof(prctl_map)))
1983  		return -EFAULT;
1984  
1985  	error = validate_prctl_map_addr(&prctl_map);
1986  	if (error)
1987  		return error;
1988  
1989  	if (prctl_map.auxv_size) {
1990  		/*
1991  		 * Someone is trying to cheat the auxv vector.
1992  		 */
1993  		if (!prctl_map.auxv ||
1994  				prctl_map.auxv_size > sizeof(mm->saved_auxv))
1995  			return -EINVAL;
1996  
1997  		memset(user_auxv, 0, sizeof(user_auxv));
1998  		if (copy_from_user(user_auxv,
1999  				   (const void __user *)prctl_map.auxv,
2000  				   prctl_map.auxv_size))
2001  			return -EFAULT;
2002  
2003  		/* Last entry must be AT_NULL as specification requires */
2004  		user_auxv[AT_VECTOR_SIZE - 2] = AT_NULL;
2005  		user_auxv[AT_VECTOR_SIZE - 1] = AT_NULL;
2006  	}
2007  
2008  	if (prctl_map.exe_fd != (u32)-1) {
2009  		/*
2010  		 * Check if the current user is checkpoint/restore capable.
2011  		 * At the time of this writing, it checks for CAP_SYS_ADMIN
2012  		 * or CAP_CHECKPOINT_RESTORE.
2013  		 * Note that a user with access to ptrace can masquerade an
2014  		 * arbitrary program as any executable, even setuid ones.
2015  		 * This may have implications in the tomoyo subsystem.
2016  		 */
2017  		if (!checkpoint_restore_ns_capable(current_user_ns()))
2018  			return -EPERM;
2019  
2020  		error = prctl_set_mm_exe_file(mm, prctl_map.exe_fd);
2021  		if (error)
2022  			return error;
2023  	}
2024  
2025  	/*
2026  	 * arg_lock protects concurent updates but we still need mmap_lock for
2027  	 * read to exclude races with sys_brk.
2028  	 */
2029  	mmap_read_lock(mm);
2030  
2031  	/*
2032  	 * We don't validate if these members are pointing to
2033  	 * real present VMAs because application may have correspond
2034  	 * VMAs already unmapped and kernel uses these members for statistics
2035  	 * output in procfs mostly, except
2036  	 *
2037  	 *  - @start_brk/@brk which are used in do_brk_flags but kernel lookups
2038  	 *    for VMAs when updating these memvers so anything wrong written
2039  	 *    here cause kernel to swear at userspace program but won't lead
2040  	 *    to any problem in kernel itself
2041  	 */
2042  
2043  	spin_lock(&mm->arg_lock);
2044  	mm->start_code	= prctl_map.start_code;
2045  	mm->end_code	= prctl_map.end_code;
2046  	mm->start_data	= prctl_map.start_data;
2047  	mm->end_data	= prctl_map.end_data;
2048  	mm->start_brk	= prctl_map.start_brk;
2049  	mm->brk		= prctl_map.brk;
2050  	mm->start_stack	= prctl_map.start_stack;
2051  	mm->arg_start	= prctl_map.arg_start;
2052  	mm->arg_end	= prctl_map.arg_end;
2053  	mm->env_start	= prctl_map.env_start;
2054  	mm->env_end	= prctl_map.env_end;
2055  	spin_unlock(&mm->arg_lock);
2056  
2057  	/*
2058  	 * Note this update of @saved_auxv is lockless thus
2059  	 * if someone reads this member in procfs while we're
2060  	 * updating -- it may get partly updated results. It's
2061  	 * known and acceptable trade off: we leave it as is to
2062  	 * not introduce additional locks here making the kernel
2063  	 * more complex.
2064  	 */
2065  	if (prctl_map.auxv_size)
2066  		memcpy(mm->saved_auxv, user_auxv, sizeof(user_auxv));
2067  
2068  	mmap_read_unlock(mm);
2069  	return 0;
2070  }
2071  #endif /* CONFIG_CHECKPOINT_RESTORE */
2072  
2073  static int prctl_set_auxv(struct mm_struct *mm, unsigned long addr,
2074  			  unsigned long len)
2075  {
2076  	/*
2077  	 * This doesn't move the auxiliary vector itself since it's pinned to
2078  	 * mm_struct, but it permits filling the vector with new values.  It's
2079  	 * up to the caller to provide sane values here, otherwise userspace
2080  	 * tools which use this vector might be unhappy.
2081  	 */
2082  	unsigned long user_auxv[AT_VECTOR_SIZE] = {};
2083  
2084  	if (len > sizeof(user_auxv))
2085  		return -EINVAL;
2086  
2087  	if (copy_from_user(user_auxv, (const void __user *)addr, len))
2088  		return -EFAULT;
2089  
2090  	/* Make sure the last entry is always AT_NULL */
2091  	user_auxv[AT_VECTOR_SIZE - 2] = 0;
2092  	user_auxv[AT_VECTOR_SIZE - 1] = 0;
2093  
2094  	BUILD_BUG_ON(sizeof(user_auxv) != sizeof(mm->saved_auxv));
2095  
2096  	task_lock(current);
2097  	memcpy(mm->saved_auxv, user_auxv, len);
2098  	task_unlock(current);
2099  
2100  	return 0;
2101  }
2102  
2103  static int prctl_set_mm(int opt, unsigned long addr,
2104  			unsigned long arg4, unsigned long arg5)
2105  {
2106  	struct mm_struct *mm = current->mm;
2107  	struct prctl_mm_map prctl_map = {
2108  		.auxv = NULL,
2109  		.auxv_size = 0,
2110  		.exe_fd = -1,
2111  	};
2112  	struct vm_area_struct *vma;
2113  	int error;
2114  
2115  	if (arg5 || (arg4 && (opt != PR_SET_MM_AUXV &&
2116  			      opt != PR_SET_MM_MAP &&
2117  			      opt != PR_SET_MM_MAP_SIZE)))
2118  		return -EINVAL;
2119  
2120  #ifdef CONFIG_CHECKPOINT_RESTORE
2121  	if (opt == PR_SET_MM_MAP || opt == PR_SET_MM_MAP_SIZE)
2122  		return prctl_set_mm_map(opt, (const void __user *)addr, arg4);
2123  #endif
2124  
2125  	if (!capable(CAP_SYS_RESOURCE))
2126  		return -EPERM;
2127  
2128  	if (opt == PR_SET_MM_EXE_FILE)
2129  		return prctl_set_mm_exe_file(mm, (unsigned int)addr);
2130  
2131  	if (opt == PR_SET_MM_AUXV)
2132  		return prctl_set_auxv(mm, addr, arg4);
2133  
2134  	if (addr >= TASK_SIZE || addr < mmap_min_addr)
2135  		return -EINVAL;
2136  
2137  	error = -EINVAL;
2138  
2139  	/*
2140  	 * arg_lock protects concurent updates of arg boundaries, we need
2141  	 * mmap_lock for a) concurrent sys_brk, b) finding VMA for addr
2142  	 * validation.
2143  	 */
2144  	mmap_read_lock(mm);
2145  	vma = find_vma(mm, addr);
2146  
2147  	spin_lock(&mm->arg_lock);
2148  	prctl_map.start_code	= mm->start_code;
2149  	prctl_map.end_code	= mm->end_code;
2150  	prctl_map.start_data	= mm->start_data;
2151  	prctl_map.end_data	= mm->end_data;
2152  	prctl_map.start_brk	= mm->start_brk;
2153  	prctl_map.brk		= mm->brk;
2154  	prctl_map.start_stack	= mm->start_stack;
2155  	prctl_map.arg_start	= mm->arg_start;
2156  	prctl_map.arg_end	= mm->arg_end;
2157  	prctl_map.env_start	= mm->env_start;
2158  	prctl_map.env_end	= mm->env_end;
2159  
2160  	switch (opt) {
2161  	case PR_SET_MM_START_CODE:
2162  		prctl_map.start_code = addr;
2163  		break;
2164  	case PR_SET_MM_END_CODE:
2165  		prctl_map.end_code = addr;
2166  		break;
2167  	case PR_SET_MM_START_DATA:
2168  		prctl_map.start_data = addr;
2169  		break;
2170  	case PR_SET_MM_END_DATA:
2171  		prctl_map.end_data = addr;
2172  		break;
2173  	case PR_SET_MM_START_STACK:
2174  		prctl_map.start_stack = addr;
2175  		break;
2176  	case PR_SET_MM_START_BRK:
2177  		prctl_map.start_brk = addr;
2178  		break;
2179  	case PR_SET_MM_BRK:
2180  		prctl_map.brk = addr;
2181  		break;
2182  	case PR_SET_MM_ARG_START:
2183  		prctl_map.arg_start = addr;
2184  		break;
2185  	case PR_SET_MM_ARG_END:
2186  		prctl_map.arg_end = addr;
2187  		break;
2188  	case PR_SET_MM_ENV_START:
2189  		prctl_map.env_start = addr;
2190  		break;
2191  	case PR_SET_MM_ENV_END:
2192  		prctl_map.env_end = addr;
2193  		break;
2194  	default:
2195  		goto out;
2196  	}
2197  
2198  	error = validate_prctl_map_addr(&prctl_map);
2199  	if (error)
2200  		goto out;
2201  
2202  	switch (opt) {
2203  	/*
2204  	 * If command line arguments and environment
2205  	 * are placed somewhere else on stack, we can
2206  	 * set them up here, ARG_START/END to setup
2207  	 * command line argumets and ENV_START/END
2208  	 * for environment.
2209  	 */
2210  	case PR_SET_MM_START_STACK:
2211  	case PR_SET_MM_ARG_START:
2212  	case PR_SET_MM_ARG_END:
2213  	case PR_SET_MM_ENV_START:
2214  	case PR_SET_MM_ENV_END:
2215  		if (!vma) {
2216  			error = -EFAULT;
2217  			goto out;
2218  		}
2219  	}
2220  
2221  	mm->start_code	= prctl_map.start_code;
2222  	mm->end_code	= prctl_map.end_code;
2223  	mm->start_data	= prctl_map.start_data;
2224  	mm->end_data	= prctl_map.end_data;
2225  	mm->start_brk	= prctl_map.start_brk;
2226  	mm->brk		= prctl_map.brk;
2227  	mm->start_stack	= prctl_map.start_stack;
2228  	mm->arg_start	= prctl_map.arg_start;
2229  	mm->arg_end	= prctl_map.arg_end;
2230  	mm->env_start	= prctl_map.env_start;
2231  	mm->env_end	= prctl_map.env_end;
2232  
2233  	error = 0;
2234  out:
2235  	spin_unlock(&mm->arg_lock);
2236  	mmap_read_unlock(mm);
2237  	return error;
2238  }
2239  
2240  #ifdef CONFIG_CHECKPOINT_RESTORE
2241  static int prctl_get_tid_address(struct task_struct *me, int __user * __user *tid_addr)
2242  {
2243  	return put_user(me->clear_child_tid, tid_addr);
2244  }
2245  #else
2246  static int prctl_get_tid_address(struct task_struct *me, int __user * __user *tid_addr)
2247  {
2248  	return -EINVAL;
2249  }
2250  #endif
2251  
2252  static int propagate_has_child_subreaper(struct task_struct *p, void *data)
2253  {
2254  	/*
2255  	 * If task has has_child_subreaper - all its decendants
2256  	 * already have these flag too and new decendants will
2257  	 * inherit it on fork, skip them.
2258  	 *
2259  	 * If we've found child_reaper - skip descendants in
2260  	 * it's subtree as they will never get out pidns.
2261  	 */
2262  	if (p->signal->has_child_subreaper ||
2263  	    is_child_reaper(task_pid(p)))
2264  		return 0;
2265  
2266  	p->signal->has_child_subreaper = 1;
2267  	return 1;
2268  }
2269  
2270  int __weak arch_prctl_spec_ctrl_get(struct task_struct *t, unsigned long which)
2271  {
2272  	return -EINVAL;
2273  }
2274  
2275  int __weak arch_prctl_spec_ctrl_set(struct task_struct *t, unsigned long which,
2276  				    unsigned long ctrl)
2277  {
2278  	return -EINVAL;
2279  }
2280  
2281  #define PR_IO_FLUSHER (PF_MEMALLOC_NOIO | PF_LOCAL_THROTTLE)
2282  
2283  SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
2284  		unsigned long, arg4, unsigned long, arg5)
2285  {
2286  	struct task_struct *me = current;
2287  	unsigned char comm[sizeof(me->comm)];
2288  	long error;
2289  
2290  	error = security_task_prctl(option, arg2, arg3, arg4, arg5);
2291  	if (error != -ENOSYS)
2292  		return error;
2293  
2294  	error = 0;
2295  	switch (option) {
2296  	case PR_SET_PDEATHSIG:
2297  		if (!valid_signal(arg2)) {
2298  			error = -EINVAL;
2299  			break;
2300  		}
2301  		me->pdeath_signal = arg2;
2302  		break;
2303  	case PR_GET_PDEATHSIG:
2304  		error = put_user(me->pdeath_signal, (int __user *)arg2);
2305  		break;
2306  	case PR_GET_DUMPABLE:
2307  		error = get_dumpable(me->mm);
2308  		break;
2309  	case PR_SET_DUMPABLE:
2310  		if (arg2 != SUID_DUMP_DISABLE && arg2 != SUID_DUMP_USER) {
2311  			error = -EINVAL;
2312  			break;
2313  		}
2314  		set_dumpable(me->mm, arg2);
2315  		break;
2316  
2317  	case PR_SET_UNALIGN:
2318  		error = SET_UNALIGN_CTL(me, arg2);
2319  		break;
2320  	case PR_GET_UNALIGN:
2321  		error = GET_UNALIGN_CTL(me, arg2);
2322  		break;
2323  	case PR_SET_FPEMU:
2324  		error = SET_FPEMU_CTL(me, arg2);
2325  		break;
2326  	case PR_GET_FPEMU:
2327  		error = GET_FPEMU_CTL(me, arg2);
2328  		break;
2329  	case PR_SET_FPEXC:
2330  		error = SET_FPEXC_CTL(me, arg2);
2331  		break;
2332  	case PR_GET_FPEXC:
2333  		error = GET_FPEXC_CTL(me, arg2);
2334  		break;
2335  	case PR_GET_TIMING:
2336  		error = PR_TIMING_STATISTICAL;
2337  		break;
2338  	case PR_SET_TIMING:
2339  		if (arg2 != PR_TIMING_STATISTICAL)
2340  			error = -EINVAL;
2341  		break;
2342  	case PR_SET_NAME:
2343  		comm[sizeof(me->comm) - 1] = 0;
2344  		if (strncpy_from_user(comm, (char __user *)arg2,
2345  				      sizeof(me->comm) - 1) < 0)
2346  			return -EFAULT;
2347  		set_task_comm(me, comm);
2348  		proc_comm_connector(me);
2349  		break;
2350  	case PR_GET_NAME:
2351  		get_task_comm(comm, me);
2352  		if (copy_to_user((char __user *)arg2, comm, sizeof(comm)))
2353  			return -EFAULT;
2354  		break;
2355  	case PR_GET_ENDIAN:
2356  		error = GET_ENDIAN(me, arg2);
2357  		break;
2358  	case PR_SET_ENDIAN:
2359  		error = SET_ENDIAN(me, arg2);
2360  		break;
2361  	case PR_GET_SECCOMP:
2362  		error = prctl_get_seccomp();
2363  		break;
2364  	case PR_SET_SECCOMP:
2365  		error = prctl_set_seccomp(arg2, (char __user *)arg3);
2366  		break;
2367  	case PR_GET_TSC:
2368  		error = GET_TSC_CTL(arg2);
2369  		break;
2370  	case PR_SET_TSC:
2371  		error = SET_TSC_CTL(arg2);
2372  		break;
2373  	case PR_TASK_PERF_EVENTS_DISABLE:
2374  		error = perf_event_task_disable();
2375  		break;
2376  	case PR_TASK_PERF_EVENTS_ENABLE:
2377  		error = perf_event_task_enable();
2378  		break;
2379  	case PR_GET_TIMERSLACK:
2380  		if (current->timer_slack_ns > ULONG_MAX)
2381  			error = ULONG_MAX;
2382  		else
2383  			error = current->timer_slack_ns;
2384  		break;
2385  	case PR_SET_TIMERSLACK:
2386  		if (arg2 <= 0)
2387  			current->timer_slack_ns =
2388  					current->default_timer_slack_ns;
2389  		else
2390  			current->timer_slack_ns = arg2;
2391  		break;
2392  	case PR_MCE_KILL:
2393  		if (arg4 | arg5)
2394  			return -EINVAL;
2395  		switch (arg2) {
2396  		case PR_MCE_KILL_CLEAR:
2397  			if (arg3 != 0)
2398  				return -EINVAL;
2399  			current->flags &= ~PF_MCE_PROCESS;
2400  			break;
2401  		case PR_MCE_KILL_SET:
2402  			current->flags |= PF_MCE_PROCESS;
2403  			if (arg3 == PR_MCE_KILL_EARLY)
2404  				current->flags |= PF_MCE_EARLY;
2405  			else if (arg3 == PR_MCE_KILL_LATE)
2406  				current->flags &= ~PF_MCE_EARLY;
2407  			else if (arg3 == PR_MCE_KILL_DEFAULT)
2408  				current->flags &=
2409  						~(PF_MCE_EARLY|PF_MCE_PROCESS);
2410  			else
2411  				return -EINVAL;
2412  			break;
2413  		default:
2414  			return -EINVAL;
2415  		}
2416  		break;
2417  	case PR_MCE_KILL_GET:
2418  		if (arg2 | arg3 | arg4 | arg5)
2419  			return -EINVAL;
2420  		if (current->flags & PF_MCE_PROCESS)
2421  			error = (current->flags & PF_MCE_EARLY) ?
2422  				PR_MCE_KILL_EARLY : PR_MCE_KILL_LATE;
2423  		else
2424  			error = PR_MCE_KILL_DEFAULT;
2425  		break;
2426  	case PR_SET_MM:
2427  		error = prctl_set_mm(arg2, arg3, arg4, arg5);
2428  		break;
2429  	case PR_GET_TID_ADDRESS:
2430  		error = prctl_get_tid_address(me, (int __user * __user *)arg2);
2431  		break;
2432  	case PR_SET_CHILD_SUBREAPER:
2433  		me->signal->is_child_subreaper = !!arg2;
2434  		if (!arg2)
2435  			break;
2436  
2437  		walk_process_tree(me, propagate_has_child_subreaper, NULL);
2438  		break;
2439  	case PR_GET_CHILD_SUBREAPER:
2440  		error = put_user(me->signal->is_child_subreaper,
2441  				 (int __user *)arg2);
2442  		break;
2443  	case PR_SET_NO_NEW_PRIVS:
2444  		if (arg2 != 1 || arg3 || arg4 || arg5)
2445  			return -EINVAL;
2446  
2447  		task_set_no_new_privs(current);
2448  		break;
2449  	case PR_GET_NO_NEW_PRIVS:
2450  		if (arg2 || arg3 || arg4 || arg5)
2451  			return -EINVAL;
2452  		return task_no_new_privs(current) ? 1 : 0;
2453  	case PR_GET_THP_DISABLE:
2454  		if (arg2 || arg3 || arg4 || arg5)
2455  			return -EINVAL;
2456  		error = !!test_bit(MMF_DISABLE_THP, &me->mm->flags);
2457  		break;
2458  	case PR_SET_THP_DISABLE:
2459  		if (arg3 || arg4 || arg5)
2460  			return -EINVAL;
2461  		if (mmap_write_lock_killable(me->mm))
2462  			return -EINTR;
2463  		if (arg2)
2464  			set_bit(MMF_DISABLE_THP, &me->mm->flags);
2465  		else
2466  			clear_bit(MMF_DISABLE_THP, &me->mm->flags);
2467  		mmap_write_unlock(me->mm);
2468  		break;
2469  	case PR_MPX_ENABLE_MANAGEMENT:
2470  	case PR_MPX_DISABLE_MANAGEMENT:
2471  		/* No longer implemented: */
2472  		return -EINVAL;
2473  	case PR_SET_FP_MODE:
2474  		error = SET_FP_MODE(me, arg2);
2475  		break;
2476  	case PR_GET_FP_MODE:
2477  		error = GET_FP_MODE(me);
2478  		break;
2479  	case PR_SVE_SET_VL:
2480  		error = SVE_SET_VL(arg2);
2481  		break;
2482  	case PR_SVE_GET_VL:
2483  		error = SVE_GET_VL();
2484  		break;
2485  	case PR_GET_SPECULATION_CTRL:
2486  		if (arg3 || arg4 || arg5)
2487  			return -EINVAL;
2488  		error = arch_prctl_spec_ctrl_get(me, arg2);
2489  		break;
2490  	case PR_SET_SPECULATION_CTRL:
2491  		if (arg4 || arg5)
2492  			return -EINVAL;
2493  		error = arch_prctl_spec_ctrl_set(me, arg2, arg3);
2494  		break;
2495  	case PR_PAC_RESET_KEYS:
2496  		if (arg3 || arg4 || arg5)
2497  			return -EINVAL;
2498  		error = PAC_RESET_KEYS(me, arg2);
2499  		break;
2500  	case PR_SET_TAGGED_ADDR_CTRL:
2501  		if (arg3 || arg4 || arg5)
2502  			return -EINVAL;
2503  		error = SET_TAGGED_ADDR_CTRL(arg2);
2504  		break;
2505  	case PR_GET_TAGGED_ADDR_CTRL:
2506  		if (arg2 || arg3 || arg4 || arg5)
2507  			return -EINVAL;
2508  		error = GET_TAGGED_ADDR_CTRL();
2509  		break;
2510  	case PR_SET_IO_FLUSHER:
2511  		if (!capable(CAP_SYS_RESOURCE))
2512  			return -EPERM;
2513  
2514  		if (arg3 || arg4 || arg5)
2515  			return -EINVAL;
2516  
2517  		if (arg2 == 1)
2518  			current->flags |= PR_IO_FLUSHER;
2519  		else if (!arg2)
2520  			current->flags &= ~PR_IO_FLUSHER;
2521  		else
2522  			return -EINVAL;
2523  		break;
2524  	case PR_GET_IO_FLUSHER:
2525  		if (!capable(CAP_SYS_RESOURCE))
2526  			return -EPERM;
2527  
2528  		if (arg2 || arg3 || arg4 || arg5)
2529  			return -EINVAL;
2530  
2531  		error = (current->flags & PR_IO_FLUSHER) == PR_IO_FLUSHER;
2532  		break;
2533  	case PR_SET_SYSCALL_USER_DISPATCH:
2534  		error = set_syscall_user_dispatch(arg2, arg3, arg4,
2535  						  (char __user *) arg5);
2536  		break;
2537  	default:
2538  		error = -EINVAL;
2539  		break;
2540  	}
2541  	return error;
2542  }
2543  
2544  SYSCALL_DEFINE3(getcpu, unsigned __user *, cpup, unsigned __user *, nodep,
2545  		struct getcpu_cache __user *, unused)
2546  {
2547  	int err = 0;
2548  	int cpu = raw_smp_processor_id();
2549  
2550  	if (cpup)
2551  		err |= put_user(cpu, cpup);
2552  	if (nodep)
2553  		err |= put_user(cpu_to_node(cpu), nodep);
2554  	return err ? -EFAULT : 0;
2555  }
2556  
2557  /**
2558   * do_sysinfo - fill in sysinfo struct
2559   * @info: pointer to buffer to fill
2560   */
2561  static int do_sysinfo(struct sysinfo *info)
2562  {
2563  	unsigned long mem_total, sav_total;
2564  	unsigned int mem_unit, bitcount;
2565  	struct timespec64 tp;
2566  
2567  	memset(info, 0, sizeof(struct sysinfo));
2568  
2569  	ktime_get_boottime_ts64(&tp);
2570  	timens_add_boottime(&tp);
2571  	info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
2572  
2573  	get_avenrun(info->loads, 0, SI_LOAD_SHIFT - FSHIFT);
2574  
2575  	info->procs = nr_threads;
2576  
2577  	si_meminfo(info);
2578  	si_swapinfo(info);
2579  
2580  	/*
2581  	 * If the sum of all the available memory (i.e. ram + swap)
2582  	 * is less than can be stored in a 32 bit unsigned long then
2583  	 * we can be binary compatible with 2.2.x kernels.  If not,
2584  	 * well, in that case 2.2.x was broken anyways...
2585  	 *
2586  	 *  -Erik Andersen <andersee@debian.org>
2587  	 */
2588  
2589  	mem_total = info->totalram + info->totalswap;
2590  	if (mem_total < info->totalram || mem_total < info->totalswap)
2591  		goto out;
2592  	bitcount = 0;
2593  	mem_unit = info->mem_unit;
2594  	while (mem_unit > 1) {
2595  		bitcount++;
2596  		mem_unit >>= 1;
2597  		sav_total = mem_total;
2598  		mem_total <<= 1;
2599  		if (mem_total < sav_total)
2600  			goto out;
2601  	}
2602  
2603  	/*
2604  	 * If mem_total did not overflow, multiply all memory values by
2605  	 * info->mem_unit and set it to 1.  This leaves things compatible
2606  	 * with 2.2.x, and also retains compatibility with earlier 2.4.x
2607  	 * kernels...
2608  	 */
2609  
2610  	info->mem_unit = 1;
2611  	info->totalram <<= bitcount;
2612  	info->freeram <<= bitcount;
2613  	info->sharedram <<= bitcount;
2614  	info->bufferram <<= bitcount;
2615  	info->totalswap <<= bitcount;
2616  	info->freeswap <<= bitcount;
2617  	info->totalhigh <<= bitcount;
2618  	info->freehigh <<= bitcount;
2619  
2620  out:
2621  	return 0;
2622  }
2623  
2624  SYSCALL_DEFINE1(sysinfo, struct sysinfo __user *, info)
2625  {
2626  	struct sysinfo val;
2627  
2628  	do_sysinfo(&val);
2629  
2630  	if (copy_to_user(info, &val, sizeof(struct sysinfo)))
2631  		return -EFAULT;
2632  
2633  	return 0;
2634  }
2635  
2636  #ifdef CONFIG_COMPAT
2637  struct compat_sysinfo {
2638  	s32 uptime;
2639  	u32 loads[3];
2640  	u32 totalram;
2641  	u32 freeram;
2642  	u32 sharedram;
2643  	u32 bufferram;
2644  	u32 totalswap;
2645  	u32 freeswap;
2646  	u16 procs;
2647  	u16 pad;
2648  	u32 totalhigh;
2649  	u32 freehigh;
2650  	u32 mem_unit;
2651  	char _f[20-2*sizeof(u32)-sizeof(int)];
2652  };
2653  
2654  COMPAT_SYSCALL_DEFINE1(sysinfo, struct compat_sysinfo __user *, info)
2655  {
2656  	struct sysinfo s;
2657  	struct compat_sysinfo s_32;
2658  
2659  	do_sysinfo(&s);
2660  
2661  	/* Check to see if any memory value is too large for 32-bit and scale
2662  	 *  down if needed
2663  	 */
2664  	if (upper_32_bits(s.totalram) || upper_32_bits(s.totalswap)) {
2665  		int bitcount = 0;
2666  
2667  		while (s.mem_unit < PAGE_SIZE) {
2668  			s.mem_unit <<= 1;
2669  			bitcount++;
2670  		}
2671  
2672  		s.totalram >>= bitcount;
2673  		s.freeram >>= bitcount;
2674  		s.sharedram >>= bitcount;
2675  		s.bufferram >>= bitcount;
2676  		s.totalswap >>= bitcount;
2677  		s.freeswap >>= bitcount;
2678  		s.totalhigh >>= bitcount;
2679  		s.freehigh >>= bitcount;
2680  	}
2681  
2682  	memset(&s_32, 0, sizeof(s_32));
2683  	s_32.uptime = s.uptime;
2684  	s_32.loads[0] = s.loads[0];
2685  	s_32.loads[1] = s.loads[1];
2686  	s_32.loads[2] = s.loads[2];
2687  	s_32.totalram = s.totalram;
2688  	s_32.freeram = s.freeram;
2689  	s_32.sharedram = s.sharedram;
2690  	s_32.bufferram = s.bufferram;
2691  	s_32.totalswap = s.totalswap;
2692  	s_32.freeswap = s.freeswap;
2693  	s_32.procs = s.procs;
2694  	s_32.totalhigh = s.totalhigh;
2695  	s_32.freehigh = s.freehigh;
2696  	s_32.mem_unit = s.mem_unit;
2697  	if (copy_to_user(info, &s_32, sizeof(s_32)))
2698  		return -EFAULT;
2699  	return 0;
2700  }
2701  #endif /* CONFIG_COMPAT */
2702