xref: /openbmc/linux/kernel/sys.c (revision bbaf1ff06af49e856501024abbe161d96c1f0d66)
1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   *  linux/kernel/sys.c
4   *
5   *  Copyright (C) 1991, 1992  Linus Torvalds
6   */
7  
8  #include <linux/export.h>
9  #include <linux/mm.h>
10  #include <linux/mm_inline.h>
11  #include <linux/utsname.h>
12  #include <linux/mman.h>
13  #include <linux/reboot.h>
14  #include <linux/prctl.h>
15  #include <linux/highuid.h>
16  #include <linux/fs.h>
17  #include <linux/kmod.h>
18  #include <linux/ksm.h>
19  #include <linux/perf_event.h>
20  #include <linux/resource.h>
21  #include <linux/kernel.h>
22  #include <linux/workqueue.h>
23  #include <linux/capability.h>
24  #include <linux/device.h>
25  #include <linux/key.h>
26  #include <linux/times.h>
27  #include <linux/posix-timers.h>
28  #include <linux/security.h>
29  #include <linux/random.h>
30  #include <linux/suspend.h>
31  #include <linux/tty.h>
32  #include <linux/signal.h>
33  #include <linux/cn_proc.h>
34  #include <linux/getcpu.h>
35  #include <linux/task_io_accounting_ops.h>
36  #include <linux/seccomp.h>
37  #include <linux/cpu.h>
38  #include <linux/personality.h>
39  #include <linux/ptrace.h>
40  #include <linux/fs_struct.h>
41  #include <linux/file.h>
42  #include <linux/mount.h>
43  #include <linux/gfp.h>
44  #include <linux/syscore_ops.h>
45  #include <linux/version.h>
46  #include <linux/ctype.h>
47  #include <linux/syscall_user_dispatch.h>
48  
49  #include <linux/compat.h>
50  #include <linux/syscalls.h>
51  #include <linux/kprobes.h>
52  #include <linux/user_namespace.h>
53  #include <linux/time_namespace.h>
54  #include <linux/binfmts.h>
55  
56  #include <linux/sched.h>
57  #include <linux/sched/autogroup.h>
58  #include <linux/sched/loadavg.h>
59  #include <linux/sched/stat.h>
60  #include <linux/sched/mm.h>
61  #include <linux/sched/coredump.h>
62  #include <linux/sched/task.h>
63  #include <linux/sched/cputime.h>
64  #include <linux/rcupdate.h>
65  #include <linux/uidgid.h>
66  #include <linux/cred.h>
67  
68  #include <linux/nospec.h>
69  
70  #include <linux/kmsg_dump.h>
71  /* Move somewhere else to avoid recompiling? */
72  #include <generated/utsrelease.h>
73  
74  #include <linux/uaccess.h>
75  #include <asm/io.h>
76  #include <asm/unistd.h>
77  
78  #include "uid16.h"
79  
80  #ifndef SET_UNALIGN_CTL
81  # define SET_UNALIGN_CTL(a, b)	(-EINVAL)
82  #endif
83  #ifndef GET_UNALIGN_CTL
84  # define GET_UNALIGN_CTL(a, b)	(-EINVAL)
85  #endif
86  #ifndef SET_FPEMU_CTL
87  # define SET_FPEMU_CTL(a, b)	(-EINVAL)
88  #endif
89  #ifndef GET_FPEMU_CTL
90  # define GET_FPEMU_CTL(a, b)	(-EINVAL)
91  #endif
92  #ifndef SET_FPEXC_CTL
93  # define SET_FPEXC_CTL(a, b)	(-EINVAL)
94  #endif
95  #ifndef GET_FPEXC_CTL
96  # define GET_FPEXC_CTL(a, b)	(-EINVAL)
97  #endif
98  #ifndef GET_ENDIAN
99  # define GET_ENDIAN(a, b)	(-EINVAL)
100  #endif
101  #ifndef SET_ENDIAN
102  # define SET_ENDIAN(a, b)	(-EINVAL)
103  #endif
104  #ifndef GET_TSC_CTL
105  # define GET_TSC_CTL(a)		(-EINVAL)
106  #endif
107  #ifndef SET_TSC_CTL
108  # define SET_TSC_CTL(a)		(-EINVAL)
109  #endif
110  #ifndef GET_FP_MODE
111  # define GET_FP_MODE(a)		(-EINVAL)
112  #endif
113  #ifndef SET_FP_MODE
114  # define SET_FP_MODE(a,b)	(-EINVAL)
115  #endif
116  #ifndef SVE_SET_VL
117  # define SVE_SET_VL(a)		(-EINVAL)
118  #endif
119  #ifndef SVE_GET_VL
120  # define SVE_GET_VL()		(-EINVAL)
121  #endif
122  #ifndef SME_SET_VL
123  # define SME_SET_VL(a)		(-EINVAL)
124  #endif
125  #ifndef SME_GET_VL
126  # define SME_GET_VL()		(-EINVAL)
127  #endif
128  #ifndef PAC_RESET_KEYS
129  # define PAC_RESET_KEYS(a, b)	(-EINVAL)
130  #endif
131  #ifndef PAC_SET_ENABLED_KEYS
132  # define PAC_SET_ENABLED_KEYS(a, b, c)	(-EINVAL)
133  #endif
134  #ifndef PAC_GET_ENABLED_KEYS
135  # define PAC_GET_ENABLED_KEYS(a)	(-EINVAL)
136  #endif
137  #ifndef SET_TAGGED_ADDR_CTRL
138  # define SET_TAGGED_ADDR_CTRL(a)	(-EINVAL)
139  #endif
140  #ifndef GET_TAGGED_ADDR_CTRL
141  # define GET_TAGGED_ADDR_CTRL()		(-EINVAL)
142  #endif
143  
144  /*
145   * this is where the system-wide overflow UID and GID are defined, for
146   * architectures that now have 32-bit UID/GID but didn't in the past
147   */
148  
149  int overflowuid = DEFAULT_OVERFLOWUID;
150  int overflowgid = DEFAULT_OVERFLOWGID;
151  
152  EXPORT_SYMBOL(overflowuid);
153  EXPORT_SYMBOL(overflowgid);
154  
155  /*
156   * the same as above, but for filesystems which can only store a 16-bit
157   * UID and GID. as such, this is needed on all architectures
158   */
159  
160  int fs_overflowuid = DEFAULT_FS_OVERFLOWUID;
161  int fs_overflowgid = DEFAULT_FS_OVERFLOWGID;
162  
163  EXPORT_SYMBOL(fs_overflowuid);
164  EXPORT_SYMBOL(fs_overflowgid);
165  
166  /*
167   * Returns true if current's euid is same as p's uid or euid,
168   * or has CAP_SYS_NICE to p's user_ns.
169   *
170   * Called with rcu_read_lock, creds are safe
171   */
172  static bool set_one_prio_perm(struct task_struct *p)
173  {
174  	const struct cred *cred = current_cred(), *pcred = __task_cred(p);
175  
176  	if (uid_eq(pcred->uid,  cred->euid) ||
177  	    uid_eq(pcred->euid, cred->euid))
178  		return true;
179  	if (ns_capable(pcred->user_ns, CAP_SYS_NICE))
180  		return true;
181  	return false;
182  }
183  
184  /*
185   * set the priority of a task
186   * - the caller must hold the RCU read lock
187   */
188  static int set_one_prio(struct task_struct *p, int niceval, int error)
189  {
190  	int no_nice;
191  
192  	if (!set_one_prio_perm(p)) {
193  		error = -EPERM;
194  		goto out;
195  	}
196  	if (niceval < task_nice(p) && !can_nice(p, niceval)) {
197  		error = -EACCES;
198  		goto out;
199  	}
200  	no_nice = security_task_setnice(p, niceval);
201  	if (no_nice) {
202  		error = no_nice;
203  		goto out;
204  	}
205  	if (error == -ESRCH)
206  		error = 0;
207  	set_user_nice(p, niceval);
208  out:
209  	return error;
210  }
211  
212  SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval)
213  {
214  	struct task_struct *g, *p;
215  	struct user_struct *user;
216  	const struct cred *cred = current_cred();
217  	int error = -EINVAL;
218  	struct pid *pgrp;
219  	kuid_t uid;
220  
221  	if (which > PRIO_USER || which < PRIO_PROCESS)
222  		goto out;
223  
224  	/* normalize: avoid signed division (rounding problems) */
225  	error = -ESRCH;
226  	if (niceval < MIN_NICE)
227  		niceval = MIN_NICE;
228  	if (niceval > MAX_NICE)
229  		niceval = MAX_NICE;
230  
231  	rcu_read_lock();
232  	switch (which) {
233  	case PRIO_PROCESS:
234  		if (who)
235  			p = find_task_by_vpid(who);
236  		else
237  			p = current;
238  		if (p)
239  			error = set_one_prio(p, niceval, error);
240  		break;
241  	case PRIO_PGRP:
242  		if (who)
243  			pgrp = find_vpid(who);
244  		else
245  			pgrp = task_pgrp(current);
246  		read_lock(&tasklist_lock);
247  		do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
248  			error = set_one_prio(p, niceval, error);
249  		} while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
250  		read_unlock(&tasklist_lock);
251  		break;
252  	case PRIO_USER:
253  		uid = make_kuid(cred->user_ns, who);
254  		user = cred->user;
255  		if (!who)
256  			uid = cred->uid;
257  		else if (!uid_eq(uid, cred->uid)) {
258  			user = find_user(uid);
259  			if (!user)
260  				goto out_unlock;	/* No processes for this user */
261  		}
262  		for_each_process_thread(g, p) {
263  			if (uid_eq(task_uid(p), uid) && task_pid_vnr(p))
264  				error = set_one_prio(p, niceval, error);
265  		}
266  		if (!uid_eq(uid, cred->uid))
267  			free_uid(user);		/* For find_user() */
268  		break;
269  	}
270  out_unlock:
271  	rcu_read_unlock();
272  out:
273  	return error;
274  }
275  
276  /*
277   * Ugh. To avoid negative return values, "getpriority()" will
278   * not return the normal nice-value, but a negated value that
279   * has been offset by 20 (ie it returns 40..1 instead of -20..19)
280   * to stay compatible.
281   */
282  SYSCALL_DEFINE2(getpriority, int, which, int, who)
283  {
284  	struct task_struct *g, *p;
285  	struct user_struct *user;
286  	const struct cred *cred = current_cred();
287  	long niceval, retval = -ESRCH;
288  	struct pid *pgrp;
289  	kuid_t uid;
290  
291  	if (which > PRIO_USER || which < PRIO_PROCESS)
292  		return -EINVAL;
293  
294  	rcu_read_lock();
295  	switch (which) {
296  	case PRIO_PROCESS:
297  		if (who)
298  			p = find_task_by_vpid(who);
299  		else
300  			p = current;
301  		if (p) {
302  			niceval = nice_to_rlimit(task_nice(p));
303  			if (niceval > retval)
304  				retval = niceval;
305  		}
306  		break;
307  	case PRIO_PGRP:
308  		if (who)
309  			pgrp = find_vpid(who);
310  		else
311  			pgrp = task_pgrp(current);
312  		read_lock(&tasklist_lock);
313  		do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
314  			niceval = nice_to_rlimit(task_nice(p));
315  			if (niceval > retval)
316  				retval = niceval;
317  		} while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
318  		read_unlock(&tasklist_lock);
319  		break;
320  	case PRIO_USER:
321  		uid = make_kuid(cred->user_ns, who);
322  		user = cred->user;
323  		if (!who)
324  			uid = cred->uid;
325  		else if (!uid_eq(uid, cred->uid)) {
326  			user = find_user(uid);
327  			if (!user)
328  				goto out_unlock;	/* No processes for this user */
329  		}
330  		for_each_process_thread(g, p) {
331  			if (uid_eq(task_uid(p), uid) && task_pid_vnr(p)) {
332  				niceval = nice_to_rlimit(task_nice(p));
333  				if (niceval > retval)
334  					retval = niceval;
335  			}
336  		}
337  		if (!uid_eq(uid, cred->uid))
338  			free_uid(user);		/* for find_user() */
339  		break;
340  	}
341  out_unlock:
342  	rcu_read_unlock();
343  
344  	return retval;
345  }
346  
347  /*
348   * Unprivileged users may change the real gid to the effective gid
349   * or vice versa.  (BSD-style)
350   *
351   * If you set the real gid at all, or set the effective gid to a value not
352   * equal to the real gid, then the saved gid is set to the new effective gid.
353   *
354   * This makes it possible for a setgid program to completely drop its
355   * privileges, which is often a useful assertion to make when you are doing
356   * a security audit over a program.
357   *
358   * The general idea is that a program which uses just setregid() will be
359   * 100% compatible with BSD.  A program which uses just setgid() will be
360   * 100% compatible with POSIX with saved IDs.
361   *
362   * SMP: There are not races, the GIDs are checked only by filesystem
363   *      operations (as far as semantic preservation is concerned).
364   */
365  #ifdef CONFIG_MULTIUSER
366  long __sys_setregid(gid_t rgid, gid_t egid)
367  {
368  	struct user_namespace *ns = current_user_ns();
369  	const struct cred *old;
370  	struct cred *new;
371  	int retval;
372  	kgid_t krgid, kegid;
373  
374  	krgid = make_kgid(ns, rgid);
375  	kegid = make_kgid(ns, egid);
376  
377  	if ((rgid != (gid_t) -1) && !gid_valid(krgid))
378  		return -EINVAL;
379  	if ((egid != (gid_t) -1) && !gid_valid(kegid))
380  		return -EINVAL;
381  
382  	new = prepare_creds();
383  	if (!new)
384  		return -ENOMEM;
385  	old = current_cred();
386  
387  	retval = -EPERM;
388  	if (rgid != (gid_t) -1) {
389  		if (gid_eq(old->gid, krgid) ||
390  		    gid_eq(old->egid, krgid) ||
391  		    ns_capable_setid(old->user_ns, CAP_SETGID))
392  			new->gid = krgid;
393  		else
394  			goto error;
395  	}
396  	if (egid != (gid_t) -1) {
397  		if (gid_eq(old->gid, kegid) ||
398  		    gid_eq(old->egid, kegid) ||
399  		    gid_eq(old->sgid, kegid) ||
400  		    ns_capable_setid(old->user_ns, CAP_SETGID))
401  			new->egid = kegid;
402  		else
403  			goto error;
404  	}
405  
406  	if (rgid != (gid_t) -1 ||
407  	    (egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
408  		new->sgid = new->egid;
409  	new->fsgid = new->egid;
410  
411  	retval = security_task_fix_setgid(new, old, LSM_SETID_RE);
412  	if (retval < 0)
413  		goto error;
414  
415  	return commit_creds(new);
416  
417  error:
418  	abort_creds(new);
419  	return retval;
420  }
421  
422  SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
423  {
424  	return __sys_setregid(rgid, egid);
425  }
426  
427  /*
428   * setgid() is implemented like SysV w/ SAVED_IDS
429   *
430   * SMP: Same implicit races as above.
431   */
432  long __sys_setgid(gid_t gid)
433  {
434  	struct user_namespace *ns = current_user_ns();
435  	const struct cred *old;
436  	struct cred *new;
437  	int retval;
438  	kgid_t kgid;
439  
440  	kgid = make_kgid(ns, gid);
441  	if (!gid_valid(kgid))
442  		return -EINVAL;
443  
444  	new = prepare_creds();
445  	if (!new)
446  		return -ENOMEM;
447  	old = current_cred();
448  
449  	retval = -EPERM;
450  	if (ns_capable_setid(old->user_ns, CAP_SETGID))
451  		new->gid = new->egid = new->sgid = new->fsgid = kgid;
452  	else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
453  		new->egid = new->fsgid = kgid;
454  	else
455  		goto error;
456  
457  	retval = security_task_fix_setgid(new, old, LSM_SETID_ID);
458  	if (retval < 0)
459  		goto error;
460  
461  	return commit_creds(new);
462  
463  error:
464  	abort_creds(new);
465  	return retval;
466  }
467  
468  SYSCALL_DEFINE1(setgid, gid_t, gid)
469  {
470  	return __sys_setgid(gid);
471  }
472  
473  /*
474   * change the user struct in a credentials set to match the new UID
475   */
476  static int set_user(struct cred *new)
477  {
478  	struct user_struct *new_user;
479  
480  	new_user = alloc_uid(new->uid);
481  	if (!new_user)
482  		return -EAGAIN;
483  
484  	free_uid(new->user);
485  	new->user = new_user;
486  	return 0;
487  }
488  
489  static void flag_nproc_exceeded(struct cred *new)
490  {
491  	if (new->ucounts == current_ucounts())
492  		return;
493  
494  	/*
495  	 * We don't fail in case of NPROC limit excess here because too many
496  	 * poorly written programs don't check set*uid() return code, assuming
497  	 * it never fails if called by root.  We may still enforce NPROC limit
498  	 * for programs doing set*uid()+execve() by harmlessly deferring the
499  	 * failure to the execve() stage.
500  	 */
501  	if (is_rlimit_overlimit(new->ucounts, UCOUNT_RLIMIT_NPROC, rlimit(RLIMIT_NPROC)) &&
502  			new->user != INIT_USER)
503  		current->flags |= PF_NPROC_EXCEEDED;
504  	else
505  		current->flags &= ~PF_NPROC_EXCEEDED;
506  }
507  
508  /*
509   * Unprivileged users may change the real uid to the effective uid
510   * or vice versa.  (BSD-style)
511   *
512   * If you set the real uid at all, or set the effective uid to a value not
513   * equal to the real uid, then the saved uid is set to the new effective uid.
514   *
515   * This makes it possible for a setuid program to completely drop its
516   * privileges, which is often a useful assertion to make when you are doing
517   * a security audit over a program.
518   *
519   * The general idea is that a program which uses just setreuid() will be
520   * 100% compatible with BSD.  A program which uses just setuid() will be
521   * 100% compatible with POSIX with saved IDs.
522   */
523  long __sys_setreuid(uid_t ruid, uid_t euid)
524  {
525  	struct user_namespace *ns = current_user_ns();
526  	const struct cred *old;
527  	struct cred *new;
528  	int retval;
529  	kuid_t kruid, keuid;
530  
531  	kruid = make_kuid(ns, ruid);
532  	keuid = make_kuid(ns, euid);
533  
534  	if ((ruid != (uid_t) -1) && !uid_valid(kruid))
535  		return -EINVAL;
536  	if ((euid != (uid_t) -1) && !uid_valid(keuid))
537  		return -EINVAL;
538  
539  	new = prepare_creds();
540  	if (!new)
541  		return -ENOMEM;
542  	old = current_cred();
543  
544  	retval = -EPERM;
545  	if (ruid != (uid_t) -1) {
546  		new->uid = kruid;
547  		if (!uid_eq(old->uid, kruid) &&
548  		    !uid_eq(old->euid, kruid) &&
549  		    !ns_capable_setid(old->user_ns, CAP_SETUID))
550  			goto error;
551  	}
552  
553  	if (euid != (uid_t) -1) {
554  		new->euid = keuid;
555  		if (!uid_eq(old->uid, keuid) &&
556  		    !uid_eq(old->euid, keuid) &&
557  		    !uid_eq(old->suid, keuid) &&
558  		    !ns_capable_setid(old->user_ns, CAP_SETUID))
559  			goto error;
560  	}
561  
562  	if (!uid_eq(new->uid, old->uid)) {
563  		retval = set_user(new);
564  		if (retval < 0)
565  			goto error;
566  	}
567  	if (ruid != (uid_t) -1 ||
568  	    (euid != (uid_t) -1 && !uid_eq(keuid, old->uid)))
569  		new->suid = new->euid;
570  	new->fsuid = new->euid;
571  
572  	retval = security_task_fix_setuid(new, old, LSM_SETID_RE);
573  	if (retval < 0)
574  		goto error;
575  
576  	retval = set_cred_ucounts(new);
577  	if (retval < 0)
578  		goto error;
579  
580  	flag_nproc_exceeded(new);
581  	return commit_creds(new);
582  
583  error:
584  	abort_creds(new);
585  	return retval;
586  }
587  
588  SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
589  {
590  	return __sys_setreuid(ruid, euid);
591  }
592  
593  /*
594   * setuid() is implemented like SysV with SAVED_IDS
595   *
596   * Note that SAVED_ID's is deficient in that a setuid root program
597   * like sendmail, for example, cannot set its uid to be a normal
598   * user and then switch back, because if you're root, setuid() sets
599   * the saved uid too.  If you don't like this, blame the bright people
600   * in the POSIX committee and/or USG.  Note that the BSD-style setreuid()
601   * will allow a root program to temporarily drop privileges and be able to
602   * regain them by swapping the real and effective uid.
603   */
604  long __sys_setuid(uid_t uid)
605  {
606  	struct user_namespace *ns = current_user_ns();
607  	const struct cred *old;
608  	struct cred *new;
609  	int retval;
610  	kuid_t kuid;
611  
612  	kuid = make_kuid(ns, uid);
613  	if (!uid_valid(kuid))
614  		return -EINVAL;
615  
616  	new = prepare_creds();
617  	if (!new)
618  		return -ENOMEM;
619  	old = current_cred();
620  
621  	retval = -EPERM;
622  	if (ns_capable_setid(old->user_ns, CAP_SETUID)) {
623  		new->suid = new->uid = kuid;
624  		if (!uid_eq(kuid, old->uid)) {
625  			retval = set_user(new);
626  			if (retval < 0)
627  				goto error;
628  		}
629  	} else if (!uid_eq(kuid, old->uid) && !uid_eq(kuid, new->suid)) {
630  		goto error;
631  	}
632  
633  	new->fsuid = new->euid = kuid;
634  
635  	retval = security_task_fix_setuid(new, old, LSM_SETID_ID);
636  	if (retval < 0)
637  		goto error;
638  
639  	retval = set_cred_ucounts(new);
640  	if (retval < 0)
641  		goto error;
642  
643  	flag_nproc_exceeded(new);
644  	return commit_creds(new);
645  
646  error:
647  	abort_creds(new);
648  	return retval;
649  }
650  
651  SYSCALL_DEFINE1(setuid, uid_t, uid)
652  {
653  	return __sys_setuid(uid);
654  }
655  
656  
657  /*
658   * This function implements a generic ability to update ruid, euid,
659   * and suid.  This allows you to implement the 4.4 compatible seteuid().
660   */
661  long __sys_setresuid(uid_t ruid, uid_t euid, uid_t suid)
662  {
663  	struct user_namespace *ns = current_user_ns();
664  	const struct cred *old;
665  	struct cred *new;
666  	int retval;
667  	kuid_t kruid, keuid, ksuid;
668  	bool ruid_new, euid_new, suid_new;
669  
670  	kruid = make_kuid(ns, ruid);
671  	keuid = make_kuid(ns, euid);
672  	ksuid = make_kuid(ns, suid);
673  
674  	if ((ruid != (uid_t) -1) && !uid_valid(kruid))
675  		return -EINVAL;
676  
677  	if ((euid != (uid_t) -1) && !uid_valid(keuid))
678  		return -EINVAL;
679  
680  	if ((suid != (uid_t) -1) && !uid_valid(ksuid))
681  		return -EINVAL;
682  
683  	old = current_cred();
684  
685  	/* check for no-op */
686  	if ((ruid == (uid_t) -1 || uid_eq(kruid, old->uid)) &&
687  	    (euid == (uid_t) -1 || (uid_eq(keuid, old->euid) &&
688  				    uid_eq(keuid, old->fsuid))) &&
689  	    (suid == (uid_t) -1 || uid_eq(ksuid, old->suid)))
690  		return 0;
691  
692  	ruid_new = ruid != (uid_t) -1        && !uid_eq(kruid, old->uid) &&
693  		   !uid_eq(kruid, old->euid) && !uid_eq(kruid, old->suid);
694  	euid_new = euid != (uid_t) -1        && !uid_eq(keuid, old->uid) &&
695  		   !uid_eq(keuid, old->euid) && !uid_eq(keuid, old->suid);
696  	suid_new = suid != (uid_t) -1        && !uid_eq(ksuid, old->uid) &&
697  		   !uid_eq(ksuid, old->euid) && !uid_eq(ksuid, old->suid);
698  	if ((ruid_new || euid_new || suid_new) &&
699  	    !ns_capable_setid(old->user_ns, CAP_SETUID))
700  		return -EPERM;
701  
702  	new = prepare_creds();
703  	if (!new)
704  		return -ENOMEM;
705  
706  	if (ruid != (uid_t) -1) {
707  		new->uid = kruid;
708  		if (!uid_eq(kruid, old->uid)) {
709  			retval = set_user(new);
710  			if (retval < 0)
711  				goto error;
712  		}
713  	}
714  	if (euid != (uid_t) -1)
715  		new->euid = keuid;
716  	if (suid != (uid_t) -1)
717  		new->suid = ksuid;
718  	new->fsuid = new->euid;
719  
720  	retval = security_task_fix_setuid(new, old, LSM_SETID_RES);
721  	if (retval < 0)
722  		goto error;
723  
724  	retval = set_cred_ucounts(new);
725  	if (retval < 0)
726  		goto error;
727  
728  	flag_nproc_exceeded(new);
729  	return commit_creds(new);
730  
731  error:
732  	abort_creds(new);
733  	return retval;
734  }
735  
736  SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
737  {
738  	return __sys_setresuid(ruid, euid, suid);
739  }
740  
741  SYSCALL_DEFINE3(getresuid, uid_t __user *, ruidp, uid_t __user *, euidp, uid_t __user *, suidp)
742  {
743  	const struct cred *cred = current_cred();
744  	int retval;
745  	uid_t ruid, euid, suid;
746  
747  	ruid = from_kuid_munged(cred->user_ns, cred->uid);
748  	euid = from_kuid_munged(cred->user_ns, cred->euid);
749  	suid = from_kuid_munged(cred->user_ns, cred->suid);
750  
751  	retval = put_user(ruid, ruidp);
752  	if (!retval) {
753  		retval = put_user(euid, euidp);
754  		if (!retval)
755  			return put_user(suid, suidp);
756  	}
757  	return retval;
758  }
759  
760  /*
761   * Same as above, but for rgid, egid, sgid.
762   */
763  long __sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid)
764  {
765  	struct user_namespace *ns = current_user_ns();
766  	const struct cred *old;
767  	struct cred *new;
768  	int retval;
769  	kgid_t krgid, kegid, ksgid;
770  	bool rgid_new, egid_new, sgid_new;
771  
772  	krgid = make_kgid(ns, rgid);
773  	kegid = make_kgid(ns, egid);
774  	ksgid = make_kgid(ns, sgid);
775  
776  	if ((rgid != (gid_t) -1) && !gid_valid(krgid))
777  		return -EINVAL;
778  	if ((egid != (gid_t) -1) && !gid_valid(kegid))
779  		return -EINVAL;
780  	if ((sgid != (gid_t) -1) && !gid_valid(ksgid))
781  		return -EINVAL;
782  
783  	old = current_cred();
784  
785  	/* check for no-op */
786  	if ((rgid == (gid_t) -1 || gid_eq(krgid, old->gid)) &&
787  	    (egid == (gid_t) -1 || (gid_eq(kegid, old->egid) &&
788  				    gid_eq(kegid, old->fsgid))) &&
789  	    (sgid == (gid_t) -1 || gid_eq(ksgid, old->sgid)))
790  		return 0;
791  
792  	rgid_new = rgid != (gid_t) -1        && !gid_eq(krgid, old->gid) &&
793  		   !gid_eq(krgid, old->egid) && !gid_eq(krgid, old->sgid);
794  	egid_new = egid != (gid_t) -1        && !gid_eq(kegid, old->gid) &&
795  		   !gid_eq(kegid, old->egid) && !gid_eq(kegid, old->sgid);
796  	sgid_new = sgid != (gid_t) -1        && !gid_eq(ksgid, old->gid) &&
797  		   !gid_eq(ksgid, old->egid) && !gid_eq(ksgid, old->sgid);
798  	if ((rgid_new || egid_new || sgid_new) &&
799  	    !ns_capable_setid(old->user_ns, CAP_SETGID))
800  		return -EPERM;
801  
802  	new = prepare_creds();
803  	if (!new)
804  		return -ENOMEM;
805  
806  	if (rgid != (gid_t) -1)
807  		new->gid = krgid;
808  	if (egid != (gid_t) -1)
809  		new->egid = kegid;
810  	if (sgid != (gid_t) -1)
811  		new->sgid = ksgid;
812  	new->fsgid = new->egid;
813  
814  	retval = security_task_fix_setgid(new, old, LSM_SETID_RES);
815  	if (retval < 0)
816  		goto error;
817  
818  	return commit_creds(new);
819  
820  error:
821  	abort_creds(new);
822  	return retval;
823  }
824  
825  SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
826  {
827  	return __sys_setresgid(rgid, egid, sgid);
828  }
829  
830  SYSCALL_DEFINE3(getresgid, gid_t __user *, rgidp, gid_t __user *, egidp, gid_t __user *, sgidp)
831  {
832  	const struct cred *cred = current_cred();
833  	int retval;
834  	gid_t rgid, egid, sgid;
835  
836  	rgid = from_kgid_munged(cred->user_ns, cred->gid);
837  	egid = from_kgid_munged(cred->user_ns, cred->egid);
838  	sgid = from_kgid_munged(cred->user_ns, cred->sgid);
839  
840  	retval = put_user(rgid, rgidp);
841  	if (!retval) {
842  		retval = put_user(egid, egidp);
843  		if (!retval)
844  			retval = put_user(sgid, sgidp);
845  	}
846  
847  	return retval;
848  }
849  
850  
851  /*
852   * "setfsuid()" sets the fsuid - the uid used for filesystem checks. This
853   * is used for "access()" and for the NFS daemon (letting nfsd stay at
854   * whatever uid it wants to). It normally shadows "euid", except when
855   * explicitly set by setfsuid() or for access..
856   */
857  long __sys_setfsuid(uid_t uid)
858  {
859  	const struct cred *old;
860  	struct cred *new;
861  	uid_t old_fsuid;
862  	kuid_t kuid;
863  
864  	old = current_cred();
865  	old_fsuid = from_kuid_munged(old->user_ns, old->fsuid);
866  
867  	kuid = make_kuid(old->user_ns, uid);
868  	if (!uid_valid(kuid))
869  		return old_fsuid;
870  
871  	new = prepare_creds();
872  	if (!new)
873  		return old_fsuid;
874  
875  	if (uid_eq(kuid, old->uid)  || uid_eq(kuid, old->euid)  ||
876  	    uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) ||
877  	    ns_capable_setid(old->user_ns, CAP_SETUID)) {
878  		if (!uid_eq(kuid, old->fsuid)) {
879  			new->fsuid = kuid;
880  			if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
881  				goto change_okay;
882  		}
883  	}
884  
885  	abort_creds(new);
886  	return old_fsuid;
887  
888  change_okay:
889  	commit_creds(new);
890  	return old_fsuid;
891  }
892  
893  SYSCALL_DEFINE1(setfsuid, uid_t, uid)
894  {
895  	return __sys_setfsuid(uid);
896  }
897  
898  /*
899   * Samma pÃ¥ svenska..
900   */
901  long __sys_setfsgid(gid_t gid)
902  {
903  	const struct cred *old;
904  	struct cred *new;
905  	gid_t old_fsgid;
906  	kgid_t kgid;
907  
908  	old = current_cred();
909  	old_fsgid = from_kgid_munged(old->user_ns, old->fsgid);
910  
911  	kgid = make_kgid(old->user_ns, gid);
912  	if (!gid_valid(kgid))
913  		return old_fsgid;
914  
915  	new = prepare_creds();
916  	if (!new)
917  		return old_fsgid;
918  
919  	if (gid_eq(kgid, old->gid)  || gid_eq(kgid, old->egid)  ||
920  	    gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
921  	    ns_capable_setid(old->user_ns, CAP_SETGID)) {
922  		if (!gid_eq(kgid, old->fsgid)) {
923  			new->fsgid = kgid;
924  			if (security_task_fix_setgid(new,old,LSM_SETID_FS) == 0)
925  				goto change_okay;
926  		}
927  	}
928  
929  	abort_creds(new);
930  	return old_fsgid;
931  
932  change_okay:
933  	commit_creds(new);
934  	return old_fsgid;
935  }
936  
937  SYSCALL_DEFINE1(setfsgid, gid_t, gid)
938  {
939  	return __sys_setfsgid(gid);
940  }
941  #endif /* CONFIG_MULTIUSER */
942  
943  /**
944   * sys_getpid - return the thread group id of the current process
945   *
946   * Note, despite the name, this returns the tgid not the pid.  The tgid and
947   * the pid are identical unless CLONE_THREAD was specified on clone() in
948   * which case the tgid is the same in all threads of the same group.
949   *
950   * This is SMP safe as current->tgid does not change.
951   */
952  SYSCALL_DEFINE0(getpid)
953  {
954  	return task_tgid_vnr(current);
955  }
956  
957  /* Thread ID - the internal kernel "pid" */
958  SYSCALL_DEFINE0(gettid)
959  {
960  	return task_pid_vnr(current);
961  }
962  
963  /*
964   * Accessing ->real_parent is not SMP-safe, it could
965   * change from under us. However, we can use a stale
966   * value of ->real_parent under rcu_read_lock(), see
967   * release_task()->call_rcu(delayed_put_task_struct).
968   */
969  SYSCALL_DEFINE0(getppid)
970  {
971  	int pid;
972  
973  	rcu_read_lock();
974  	pid = task_tgid_vnr(rcu_dereference(current->real_parent));
975  	rcu_read_unlock();
976  
977  	return pid;
978  }
979  
980  SYSCALL_DEFINE0(getuid)
981  {
982  	/* Only we change this so SMP safe */
983  	return from_kuid_munged(current_user_ns(), current_uid());
984  }
985  
986  SYSCALL_DEFINE0(geteuid)
987  {
988  	/* Only we change this so SMP safe */
989  	return from_kuid_munged(current_user_ns(), current_euid());
990  }
991  
992  SYSCALL_DEFINE0(getgid)
993  {
994  	/* Only we change this so SMP safe */
995  	return from_kgid_munged(current_user_ns(), current_gid());
996  }
997  
998  SYSCALL_DEFINE0(getegid)
999  {
1000  	/* Only we change this so SMP safe */
1001  	return from_kgid_munged(current_user_ns(), current_egid());
1002  }
1003  
1004  static void do_sys_times(struct tms *tms)
1005  {
1006  	u64 tgutime, tgstime, cutime, cstime;
1007  
1008  	thread_group_cputime_adjusted(current, &tgutime, &tgstime);
1009  	cutime = current->signal->cutime;
1010  	cstime = current->signal->cstime;
1011  	tms->tms_utime = nsec_to_clock_t(tgutime);
1012  	tms->tms_stime = nsec_to_clock_t(tgstime);
1013  	tms->tms_cutime = nsec_to_clock_t(cutime);
1014  	tms->tms_cstime = nsec_to_clock_t(cstime);
1015  }
1016  
1017  SYSCALL_DEFINE1(times, struct tms __user *, tbuf)
1018  {
1019  	if (tbuf) {
1020  		struct tms tmp;
1021  
1022  		do_sys_times(&tmp);
1023  		if (copy_to_user(tbuf, &tmp, sizeof(struct tms)))
1024  			return -EFAULT;
1025  	}
1026  	force_successful_syscall_return();
1027  	return (long) jiffies_64_to_clock_t(get_jiffies_64());
1028  }
1029  
1030  #ifdef CONFIG_COMPAT
1031  static compat_clock_t clock_t_to_compat_clock_t(clock_t x)
1032  {
1033  	return compat_jiffies_to_clock_t(clock_t_to_jiffies(x));
1034  }
1035  
1036  COMPAT_SYSCALL_DEFINE1(times, struct compat_tms __user *, tbuf)
1037  {
1038  	if (tbuf) {
1039  		struct tms tms;
1040  		struct compat_tms tmp;
1041  
1042  		do_sys_times(&tms);
1043  		/* Convert our struct tms to the compat version. */
1044  		tmp.tms_utime = clock_t_to_compat_clock_t(tms.tms_utime);
1045  		tmp.tms_stime = clock_t_to_compat_clock_t(tms.tms_stime);
1046  		tmp.tms_cutime = clock_t_to_compat_clock_t(tms.tms_cutime);
1047  		tmp.tms_cstime = clock_t_to_compat_clock_t(tms.tms_cstime);
1048  		if (copy_to_user(tbuf, &tmp, sizeof(tmp)))
1049  			return -EFAULT;
1050  	}
1051  	force_successful_syscall_return();
1052  	return compat_jiffies_to_clock_t(jiffies);
1053  }
1054  #endif
1055  
1056  /*
1057   * This needs some heavy checking ...
1058   * I just haven't the stomach for it. I also don't fully
1059   * understand sessions/pgrp etc. Let somebody who does explain it.
1060   *
1061   * OK, I think I have the protection semantics right.... this is really
1062   * only important on a multi-user system anyway, to make sure one user
1063   * can't send a signal to a process owned by another.  -TYT, 12/12/91
1064   *
1065   * !PF_FORKNOEXEC check to conform completely to POSIX.
1066   */
1067  SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid)
1068  {
1069  	struct task_struct *p;
1070  	struct task_struct *group_leader = current->group_leader;
1071  	struct pid *pgrp;
1072  	int err;
1073  
1074  	if (!pid)
1075  		pid = task_pid_vnr(group_leader);
1076  	if (!pgid)
1077  		pgid = pid;
1078  	if (pgid < 0)
1079  		return -EINVAL;
1080  	rcu_read_lock();
1081  
1082  	/* From this point forward we keep holding onto the tasklist lock
1083  	 * so that our parent does not change from under us. -DaveM
1084  	 */
1085  	write_lock_irq(&tasklist_lock);
1086  
1087  	err = -ESRCH;
1088  	p = find_task_by_vpid(pid);
1089  	if (!p)
1090  		goto out;
1091  
1092  	err = -EINVAL;
1093  	if (!thread_group_leader(p))
1094  		goto out;
1095  
1096  	if (same_thread_group(p->real_parent, group_leader)) {
1097  		err = -EPERM;
1098  		if (task_session(p) != task_session(group_leader))
1099  			goto out;
1100  		err = -EACCES;
1101  		if (!(p->flags & PF_FORKNOEXEC))
1102  			goto out;
1103  	} else {
1104  		err = -ESRCH;
1105  		if (p != group_leader)
1106  			goto out;
1107  	}
1108  
1109  	err = -EPERM;
1110  	if (p->signal->leader)
1111  		goto out;
1112  
1113  	pgrp = task_pid(p);
1114  	if (pgid != pid) {
1115  		struct task_struct *g;
1116  
1117  		pgrp = find_vpid(pgid);
1118  		g = pid_task(pgrp, PIDTYPE_PGID);
1119  		if (!g || task_session(g) != task_session(group_leader))
1120  			goto out;
1121  	}
1122  
1123  	err = security_task_setpgid(p, pgid);
1124  	if (err)
1125  		goto out;
1126  
1127  	if (task_pgrp(p) != pgrp)
1128  		change_pid(p, PIDTYPE_PGID, pgrp);
1129  
1130  	err = 0;
1131  out:
1132  	/* All paths lead to here, thus we are safe. -DaveM */
1133  	write_unlock_irq(&tasklist_lock);
1134  	rcu_read_unlock();
1135  	return err;
1136  }
1137  
1138  static int do_getpgid(pid_t pid)
1139  {
1140  	struct task_struct *p;
1141  	struct pid *grp;
1142  	int retval;
1143  
1144  	rcu_read_lock();
1145  	if (!pid)
1146  		grp = task_pgrp(current);
1147  	else {
1148  		retval = -ESRCH;
1149  		p = find_task_by_vpid(pid);
1150  		if (!p)
1151  			goto out;
1152  		grp = task_pgrp(p);
1153  		if (!grp)
1154  			goto out;
1155  
1156  		retval = security_task_getpgid(p);
1157  		if (retval)
1158  			goto out;
1159  	}
1160  	retval = pid_vnr(grp);
1161  out:
1162  	rcu_read_unlock();
1163  	return retval;
1164  }
1165  
1166  SYSCALL_DEFINE1(getpgid, pid_t, pid)
1167  {
1168  	return do_getpgid(pid);
1169  }
1170  
1171  #ifdef __ARCH_WANT_SYS_GETPGRP
1172  
1173  SYSCALL_DEFINE0(getpgrp)
1174  {
1175  	return do_getpgid(0);
1176  }
1177  
1178  #endif
1179  
1180  SYSCALL_DEFINE1(getsid, pid_t, pid)
1181  {
1182  	struct task_struct *p;
1183  	struct pid *sid;
1184  	int retval;
1185  
1186  	rcu_read_lock();
1187  	if (!pid)
1188  		sid = task_session(current);
1189  	else {
1190  		retval = -ESRCH;
1191  		p = find_task_by_vpid(pid);
1192  		if (!p)
1193  			goto out;
1194  		sid = task_session(p);
1195  		if (!sid)
1196  			goto out;
1197  
1198  		retval = security_task_getsid(p);
1199  		if (retval)
1200  			goto out;
1201  	}
1202  	retval = pid_vnr(sid);
1203  out:
1204  	rcu_read_unlock();
1205  	return retval;
1206  }
1207  
1208  static void set_special_pids(struct pid *pid)
1209  {
1210  	struct task_struct *curr = current->group_leader;
1211  
1212  	if (task_session(curr) != pid)
1213  		change_pid(curr, PIDTYPE_SID, pid);
1214  
1215  	if (task_pgrp(curr) != pid)
1216  		change_pid(curr, PIDTYPE_PGID, pid);
1217  }
1218  
1219  int ksys_setsid(void)
1220  {
1221  	struct task_struct *group_leader = current->group_leader;
1222  	struct pid *sid = task_pid(group_leader);
1223  	pid_t session = pid_vnr(sid);
1224  	int err = -EPERM;
1225  
1226  	write_lock_irq(&tasklist_lock);
1227  	/* Fail if I am already a session leader */
1228  	if (group_leader->signal->leader)
1229  		goto out;
1230  
1231  	/* Fail if a process group id already exists that equals the
1232  	 * proposed session id.
1233  	 */
1234  	if (pid_task(sid, PIDTYPE_PGID))
1235  		goto out;
1236  
1237  	group_leader->signal->leader = 1;
1238  	set_special_pids(sid);
1239  
1240  	proc_clear_tty(group_leader);
1241  
1242  	err = session;
1243  out:
1244  	write_unlock_irq(&tasklist_lock);
1245  	if (err > 0) {
1246  		proc_sid_connector(group_leader);
1247  		sched_autogroup_create_attach(group_leader);
1248  	}
1249  	return err;
1250  }
1251  
1252  SYSCALL_DEFINE0(setsid)
1253  {
1254  	return ksys_setsid();
1255  }
1256  
1257  DECLARE_RWSEM(uts_sem);
1258  
1259  #ifdef COMPAT_UTS_MACHINE
1260  #define override_architecture(name) \
1261  	(personality(current->personality) == PER_LINUX32 && \
1262  	 copy_to_user(name->machine, COMPAT_UTS_MACHINE, \
1263  		      sizeof(COMPAT_UTS_MACHINE)))
1264  #else
1265  #define override_architecture(name)	0
1266  #endif
1267  
1268  /*
1269   * Work around broken programs that cannot handle "Linux 3.0".
1270   * Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40
1271   * And we map 4.x and later versions to 2.6.60+x, so 4.0/5.0/6.0/... would be
1272   * 2.6.60.
1273   */
1274  static int override_release(char __user *release, size_t len)
1275  {
1276  	int ret = 0;
1277  
1278  	if (current->personality & UNAME26) {
1279  		const char *rest = UTS_RELEASE;
1280  		char buf[65] = { 0 };
1281  		int ndots = 0;
1282  		unsigned v;
1283  		size_t copy;
1284  
1285  		while (*rest) {
1286  			if (*rest == '.' && ++ndots >= 3)
1287  				break;
1288  			if (!isdigit(*rest) && *rest != '.')
1289  				break;
1290  			rest++;
1291  		}
1292  		v = LINUX_VERSION_PATCHLEVEL + 60;
1293  		copy = clamp_t(size_t, len, 1, sizeof(buf));
1294  		copy = scnprintf(buf, copy, "2.6.%u%s", v, rest);
1295  		ret = copy_to_user(release, buf, copy + 1);
1296  	}
1297  	return ret;
1298  }
1299  
1300  SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name)
1301  {
1302  	struct new_utsname tmp;
1303  
1304  	down_read(&uts_sem);
1305  	memcpy(&tmp, utsname(), sizeof(tmp));
1306  	up_read(&uts_sem);
1307  	if (copy_to_user(name, &tmp, sizeof(tmp)))
1308  		return -EFAULT;
1309  
1310  	if (override_release(name->release, sizeof(name->release)))
1311  		return -EFAULT;
1312  	if (override_architecture(name))
1313  		return -EFAULT;
1314  	return 0;
1315  }
1316  
1317  #ifdef __ARCH_WANT_SYS_OLD_UNAME
1318  /*
1319   * Old cruft
1320   */
1321  SYSCALL_DEFINE1(uname, struct old_utsname __user *, name)
1322  {
1323  	struct old_utsname tmp;
1324  
1325  	if (!name)
1326  		return -EFAULT;
1327  
1328  	down_read(&uts_sem);
1329  	memcpy(&tmp, utsname(), sizeof(tmp));
1330  	up_read(&uts_sem);
1331  	if (copy_to_user(name, &tmp, sizeof(tmp)))
1332  		return -EFAULT;
1333  
1334  	if (override_release(name->release, sizeof(name->release)))
1335  		return -EFAULT;
1336  	if (override_architecture(name))
1337  		return -EFAULT;
1338  	return 0;
1339  }
1340  
1341  SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
1342  {
1343  	struct oldold_utsname tmp;
1344  
1345  	if (!name)
1346  		return -EFAULT;
1347  
1348  	memset(&tmp, 0, sizeof(tmp));
1349  
1350  	down_read(&uts_sem);
1351  	memcpy(&tmp.sysname, &utsname()->sysname, __OLD_UTS_LEN);
1352  	memcpy(&tmp.nodename, &utsname()->nodename, __OLD_UTS_LEN);
1353  	memcpy(&tmp.release, &utsname()->release, __OLD_UTS_LEN);
1354  	memcpy(&tmp.version, &utsname()->version, __OLD_UTS_LEN);
1355  	memcpy(&tmp.machine, &utsname()->machine, __OLD_UTS_LEN);
1356  	up_read(&uts_sem);
1357  	if (copy_to_user(name, &tmp, sizeof(tmp)))
1358  		return -EFAULT;
1359  
1360  	if (override_architecture(name))
1361  		return -EFAULT;
1362  	if (override_release(name->release, sizeof(name->release)))
1363  		return -EFAULT;
1364  	return 0;
1365  }
1366  #endif
1367  
1368  SYSCALL_DEFINE2(sethostname, char __user *, name, int, len)
1369  {
1370  	int errno;
1371  	char tmp[__NEW_UTS_LEN];
1372  
1373  	if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN))
1374  		return -EPERM;
1375  
1376  	if (len < 0 || len > __NEW_UTS_LEN)
1377  		return -EINVAL;
1378  	errno = -EFAULT;
1379  	if (!copy_from_user(tmp, name, len)) {
1380  		struct new_utsname *u;
1381  
1382  		add_device_randomness(tmp, len);
1383  		down_write(&uts_sem);
1384  		u = utsname();
1385  		memcpy(u->nodename, tmp, len);
1386  		memset(u->nodename + len, 0, sizeof(u->nodename) - len);
1387  		errno = 0;
1388  		uts_proc_notify(UTS_PROC_HOSTNAME);
1389  		up_write(&uts_sem);
1390  	}
1391  	return errno;
1392  }
1393  
1394  #ifdef __ARCH_WANT_SYS_GETHOSTNAME
1395  
1396  SYSCALL_DEFINE2(gethostname, char __user *, name, int, len)
1397  {
1398  	int i;
1399  	struct new_utsname *u;
1400  	char tmp[__NEW_UTS_LEN + 1];
1401  
1402  	if (len < 0)
1403  		return -EINVAL;
1404  	down_read(&uts_sem);
1405  	u = utsname();
1406  	i = 1 + strlen(u->nodename);
1407  	if (i > len)
1408  		i = len;
1409  	memcpy(tmp, u->nodename, i);
1410  	up_read(&uts_sem);
1411  	if (copy_to_user(name, tmp, i))
1412  		return -EFAULT;
1413  	return 0;
1414  }
1415  
1416  #endif
1417  
1418  /*
1419   * Only setdomainname; getdomainname can be implemented by calling
1420   * uname()
1421   */
1422  SYSCALL_DEFINE2(setdomainname, char __user *, name, int, len)
1423  {
1424  	int errno;
1425  	char tmp[__NEW_UTS_LEN];
1426  
1427  	if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN))
1428  		return -EPERM;
1429  	if (len < 0 || len > __NEW_UTS_LEN)
1430  		return -EINVAL;
1431  
1432  	errno = -EFAULT;
1433  	if (!copy_from_user(tmp, name, len)) {
1434  		struct new_utsname *u;
1435  
1436  		add_device_randomness(tmp, len);
1437  		down_write(&uts_sem);
1438  		u = utsname();
1439  		memcpy(u->domainname, tmp, len);
1440  		memset(u->domainname + len, 0, sizeof(u->domainname) - len);
1441  		errno = 0;
1442  		uts_proc_notify(UTS_PROC_DOMAINNAME);
1443  		up_write(&uts_sem);
1444  	}
1445  	return errno;
1446  }
1447  
1448  /* make sure you are allowed to change @tsk limits before calling this */
1449  static int do_prlimit(struct task_struct *tsk, unsigned int resource,
1450  		      struct rlimit *new_rlim, struct rlimit *old_rlim)
1451  {
1452  	struct rlimit *rlim;
1453  	int retval = 0;
1454  
1455  	if (resource >= RLIM_NLIMITS)
1456  		return -EINVAL;
1457  	resource = array_index_nospec(resource, RLIM_NLIMITS);
1458  
1459  	if (new_rlim) {
1460  		if (new_rlim->rlim_cur > new_rlim->rlim_max)
1461  			return -EINVAL;
1462  		if (resource == RLIMIT_NOFILE &&
1463  				new_rlim->rlim_max > sysctl_nr_open)
1464  			return -EPERM;
1465  	}
1466  
1467  	/* Holding a refcount on tsk protects tsk->signal from disappearing. */
1468  	rlim = tsk->signal->rlim + resource;
1469  	task_lock(tsk->group_leader);
1470  	if (new_rlim) {
1471  		/*
1472  		 * Keep the capable check against init_user_ns until cgroups can
1473  		 * contain all limits.
1474  		 */
1475  		if (new_rlim->rlim_max > rlim->rlim_max &&
1476  				!capable(CAP_SYS_RESOURCE))
1477  			retval = -EPERM;
1478  		if (!retval)
1479  			retval = security_task_setrlimit(tsk, resource, new_rlim);
1480  	}
1481  	if (!retval) {
1482  		if (old_rlim)
1483  			*old_rlim = *rlim;
1484  		if (new_rlim)
1485  			*rlim = *new_rlim;
1486  	}
1487  	task_unlock(tsk->group_leader);
1488  
1489  	/*
1490  	 * RLIMIT_CPU handling. Arm the posix CPU timer if the limit is not
1491  	 * infinite. In case of RLIM_INFINITY the posix CPU timer code
1492  	 * ignores the rlimit.
1493  	 */
1494  	if (!retval && new_rlim && resource == RLIMIT_CPU &&
1495  	    new_rlim->rlim_cur != RLIM_INFINITY &&
1496  	    IS_ENABLED(CONFIG_POSIX_TIMERS)) {
1497  		/*
1498  		 * update_rlimit_cpu can fail if the task is exiting, but there
1499  		 * may be other tasks in the thread group that are not exiting,
1500  		 * and they need their cpu timers adjusted.
1501  		 *
1502  		 * The group_leader is the last task to be released, so if we
1503  		 * cannot update_rlimit_cpu on it, then the entire process is
1504  		 * exiting and we do not need to update at all.
1505  		 */
1506  		update_rlimit_cpu(tsk->group_leader, new_rlim->rlim_cur);
1507  	}
1508  
1509  	return retval;
1510  }
1511  
1512  SYSCALL_DEFINE2(getrlimit, unsigned int, resource, struct rlimit __user *, rlim)
1513  {
1514  	struct rlimit value;
1515  	int ret;
1516  
1517  	ret = do_prlimit(current, resource, NULL, &value);
1518  	if (!ret)
1519  		ret = copy_to_user(rlim, &value, sizeof(*rlim)) ? -EFAULT : 0;
1520  
1521  	return ret;
1522  }
1523  
1524  #ifdef CONFIG_COMPAT
1525  
1526  COMPAT_SYSCALL_DEFINE2(setrlimit, unsigned int, resource,
1527  		       struct compat_rlimit __user *, rlim)
1528  {
1529  	struct rlimit r;
1530  	struct compat_rlimit r32;
1531  
1532  	if (copy_from_user(&r32, rlim, sizeof(struct compat_rlimit)))
1533  		return -EFAULT;
1534  
1535  	if (r32.rlim_cur == COMPAT_RLIM_INFINITY)
1536  		r.rlim_cur = RLIM_INFINITY;
1537  	else
1538  		r.rlim_cur = r32.rlim_cur;
1539  	if (r32.rlim_max == COMPAT_RLIM_INFINITY)
1540  		r.rlim_max = RLIM_INFINITY;
1541  	else
1542  		r.rlim_max = r32.rlim_max;
1543  	return do_prlimit(current, resource, &r, NULL);
1544  }
1545  
1546  COMPAT_SYSCALL_DEFINE2(getrlimit, unsigned int, resource,
1547  		       struct compat_rlimit __user *, rlim)
1548  {
1549  	struct rlimit r;
1550  	int ret;
1551  
1552  	ret = do_prlimit(current, resource, NULL, &r);
1553  	if (!ret) {
1554  		struct compat_rlimit r32;
1555  		if (r.rlim_cur > COMPAT_RLIM_INFINITY)
1556  			r32.rlim_cur = COMPAT_RLIM_INFINITY;
1557  		else
1558  			r32.rlim_cur = r.rlim_cur;
1559  		if (r.rlim_max > COMPAT_RLIM_INFINITY)
1560  			r32.rlim_max = COMPAT_RLIM_INFINITY;
1561  		else
1562  			r32.rlim_max = r.rlim_max;
1563  
1564  		if (copy_to_user(rlim, &r32, sizeof(struct compat_rlimit)))
1565  			return -EFAULT;
1566  	}
1567  	return ret;
1568  }
1569  
1570  #endif
1571  
1572  #ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT
1573  
1574  /*
1575   *	Back compatibility for getrlimit. Needed for some apps.
1576   */
1577  SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
1578  		struct rlimit __user *, rlim)
1579  {
1580  	struct rlimit x;
1581  	if (resource >= RLIM_NLIMITS)
1582  		return -EINVAL;
1583  
1584  	resource = array_index_nospec(resource, RLIM_NLIMITS);
1585  	task_lock(current->group_leader);
1586  	x = current->signal->rlim[resource];
1587  	task_unlock(current->group_leader);
1588  	if (x.rlim_cur > 0x7FFFFFFF)
1589  		x.rlim_cur = 0x7FFFFFFF;
1590  	if (x.rlim_max > 0x7FFFFFFF)
1591  		x.rlim_max = 0x7FFFFFFF;
1592  	return copy_to_user(rlim, &x, sizeof(x)) ? -EFAULT : 0;
1593  }
1594  
1595  #ifdef CONFIG_COMPAT
1596  COMPAT_SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
1597  		       struct compat_rlimit __user *, rlim)
1598  {
1599  	struct rlimit r;
1600  
1601  	if (resource >= RLIM_NLIMITS)
1602  		return -EINVAL;
1603  
1604  	resource = array_index_nospec(resource, RLIM_NLIMITS);
1605  	task_lock(current->group_leader);
1606  	r = current->signal->rlim[resource];
1607  	task_unlock(current->group_leader);
1608  	if (r.rlim_cur > 0x7FFFFFFF)
1609  		r.rlim_cur = 0x7FFFFFFF;
1610  	if (r.rlim_max > 0x7FFFFFFF)
1611  		r.rlim_max = 0x7FFFFFFF;
1612  
1613  	if (put_user(r.rlim_cur, &rlim->rlim_cur) ||
1614  	    put_user(r.rlim_max, &rlim->rlim_max))
1615  		return -EFAULT;
1616  	return 0;
1617  }
1618  #endif
1619  
1620  #endif
1621  
1622  static inline bool rlim64_is_infinity(__u64 rlim64)
1623  {
1624  #if BITS_PER_LONG < 64
1625  	return rlim64 >= ULONG_MAX;
1626  #else
1627  	return rlim64 == RLIM64_INFINITY;
1628  #endif
1629  }
1630  
1631  static void rlim_to_rlim64(const struct rlimit *rlim, struct rlimit64 *rlim64)
1632  {
1633  	if (rlim->rlim_cur == RLIM_INFINITY)
1634  		rlim64->rlim_cur = RLIM64_INFINITY;
1635  	else
1636  		rlim64->rlim_cur = rlim->rlim_cur;
1637  	if (rlim->rlim_max == RLIM_INFINITY)
1638  		rlim64->rlim_max = RLIM64_INFINITY;
1639  	else
1640  		rlim64->rlim_max = rlim->rlim_max;
1641  }
1642  
1643  static void rlim64_to_rlim(const struct rlimit64 *rlim64, struct rlimit *rlim)
1644  {
1645  	if (rlim64_is_infinity(rlim64->rlim_cur))
1646  		rlim->rlim_cur = RLIM_INFINITY;
1647  	else
1648  		rlim->rlim_cur = (unsigned long)rlim64->rlim_cur;
1649  	if (rlim64_is_infinity(rlim64->rlim_max))
1650  		rlim->rlim_max = RLIM_INFINITY;
1651  	else
1652  		rlim->rlim_max = (unsigned long)rlim64->rlim_max;
1653  }
1654  
1655  /* rcu lock must be held */
1656  static int check_prlimit_permission(struct task_struct *task,
1657  				    unsigned int flags)
1658  {
1659  	const struct cred *cred = current_cred(), *tcred;
1660  	bool id_match;
1661  
1662  	if (current == task)
1663  		return 0;
1664  
1665  	tcred = __task_cred(task);
1666  	id_match = (uid_eq(cred->uid, tcred->euid) &&
1667  		    uid_eq(cred->uid, tcred->suid) &&
1668  		    uid_eq(cred->uid, tcred->uid)  &&
1669  		    gid_eq(cred->gid, tcred->egid) &&
1670  		    gid_eq(cred->gid, tcred->sgid) &&
1671  		    gid_eq(cred->gid, tcred->gid));
1672  	if (!id_match && !ns_capable(tcred->user_ns, CAP_SYS_RESOURCE))
1673  		return -EPERM;
1674  
1675  	return security_task_prlimit(cred, tcred, flags);
1676  }
1677  
1678  SYSCALL_DEFINE4(prlimit64, pid_t, pid, unsigned int, resource,
1679  		const struct rlimit64 __user *, new_rlim,
1680  		struct rlimit64 __user *, old_rlim)
1681  {
1682  	struct rlimit64 old64, new64;
1683  	struct rlimit old, new;
1684  	struct task_struct *tsk;
1685  	unsigned int checkflags = 0;
1686  	int ret;
1687  
1688  	if (old_rlim)
1689  		checkflags |= LSM_PRLIMIT_READ;
1690  
1691  	if (new_rlim) {
1692  		if (copy_from_user(&new64, new_rlim, sizeof(new64)))
1693  			return -EFAULT;
1694  		rlim64_to_rlim(&new64, &new);
1695  		checkflags |= LSM_PRLIMIT_WRITE;
1696  	}
1697  
1698  	rcu_read_lock();
1699  	tsk = pid ? find_task_by_vpid(pid) : current;
1700  	if (!tsk) {
1701  		rcu_read_unlock();
1702  		return -ESRCH;
1703  	}
1704  	ret = check_prlimit_permission(tsk, checkflags);
1705  	if (ret) {
1706  		rcu_read_unlock();
1707  		return ret;
1708  	}
1709  	get_task_struct(tsk);
1710  	rcu_read_unlock();
1711  
1712  	ret = do_prlimit(tsk, resource, new_rlim ? &new : NULL,
1713  			old_rlim ? &old : NULL);
1714  
1715  	if (!ret && old_rlim) {
1716  		rlim_to_rlim64(&old, &old64);
1717  		if (copy_to_user(old_rlim, &old64, sizeof(old64)))
1718  			ret = -EFAULT;
1719  	}
1720  
1721  	put_task_struct(tsk);
1722  	return ret;
1723  }
1724  
1725  SYSCALL_DEFINE2(setrlimit, unsigned int, resource, struct rlimit __user *, rlim)
1726  {
1727  	struct rlimit new_rlim;
1728  
1729  	if (copy_from_user(&new_rlim, rlim, sizeof(*rlim)))
1730  		return -EFAULT;
1731  	return do_prlimit(current, resource, &new_rlim, NULL);
1732  }
1733  
1734  /*
1735   * It would make sense to put struct rusage in the task_struct,
1736   * except that would make the task_struct be *really big*.  After
1737   * task_struct gets moved into malloc'ed memory, it would
1738   * make sense to do this.  It will make moving the rest of the information
1739   * a lot simpler!  (Which we're not doing right now because we're not
1740   * measuring them yet).
1741   *
1742   * When sampling multiple threads for RUSAGE_SELF, under SMP we might have
1743   * races with threads incrementing their own counters.  But since word
1744   * reads are atomic, we either get new values or old values and we don't
1745   * care which for the sums.  We always take the siglock to protect reading
1746   * the c* fields from p->signal from races with exit.c updating those
1747   * fields when reaping, so a sample either gets all the additions of a
1748   * given child after it's reaped, or none so this sample is before reaping.
1749   *
1750   * Locking:
1751   * We need to take the siglock for CHILDEREN, SELF and BOTH
1752   * for  the cases current multithreaded, non-current single threaded
1753   * non-current multithreaded.  Thread traversal is now safe with
1754   * the siglock held.
1755   * Strictly speaking, we donot need to take the siglock if we are current and
1756   * single threaded,  as no one else can take our signal_struct away, no one
1757   * else can  reap the  children to update signal->c* counters, and no one else
1758   * can race with the signal-> fields. If we do not take any lock, the
1759   * signal-> fields could be read out of order while another thread was just
1760   * exiting. So we should  place a read memory barrier when we avoid the lock.
1761   * On the writer side,  write memory barrier is implied in  __exit_signal
1762   * as __exit_signal releases  the siglock spinlock after updating the signal->
1763   * fields. But we don't do this yet to keep things simple.
1764   *
1765   */
1766  
1767  static void accumulate_thread_rusage(struct task_struct *t, struct rusage *r)
1768  {
1769  	r->ru_nvcsw += t->nvcsw;
1770  	r->ru_nivcsw += t->nivcsw;
1771  	r->ru_minflt += t->min_flt;
1772  	r->ru_majflt += t->maj_flt;
1773  	r->ru_inblock += task_io_get_inblock(t);
1774  	r->ru_oublock += task_io_get_oublock(t);
1775  }
1776  
1777  void getrusage(struct task_struct *p, int who, struct rusage *r)
1778  {
1779  	struct task_struct *t;
1780  	unsigned long flags;
1781  	u64 tgutime, tgstime, utime, stime;
1782  	unsigned long maxrss = 0;
1783  
1784  	memset((char *)r, 0, sizeof (*r));
1785  	utime = stime = 0;
1786  
1787  	if (who == RUSAGE_THREAD) {
1788  		task_cputime_adjusted(current, &utime, &stime);
1789  		accumulate_thread_rusage(p, r);
1790  		maxrss = p->signal->maxrss;
1791  		goto out;
1792  	}
1793  
1794  	if (!lock_task_sighand(p, &flags))
1795  		return;
1796  
1797  	switch (who) {
1798  	case RUSAGE_BOTH:
1799  	case RUSAGE_CHILDREN:
1800  		utime = p->signal->cutime;
1801  		stime = p->signal->cstime;
1802  		r->ru_nvcsw = p->signal->cnvcsw;
1803  		r->ru_nivcsw = p->signal->cnivcsw;
1804  		r->ru_minflt = p->signal->cmin_flt;
1805  		r->ru_majflt = p->signal->cmaj_flt;
1806  		r->ru_inblock = p->signal->cinblock;
1807  		r->ru_oublock = p->signal->coublock;
1808  		maxrss = p->signal->cmaxrss;
1809  
1810  		if (who == RUSAGE_CHILDREN)
1811  			break;
1812  		fallthrough;
1813  
1814  	case RUSAGE_SELF:
1815  		thread_group_cputime_adjusted(p, &tgutime, &tgstime);
1816  		utime += tgutime;
1817  		stime += tgstime;
1818  		r->ru_nvcsw += p->signal->nvcsw;
1819  		r->ru_nivcsw += p->signal->nivcsw;
1820  		r->ru_minflt += p->signal->min_flt;
1821  		r->ru_majflt += p->signal->maj_flt;
1822  		r->ru_inblock += p->signal->inblock;
1823  		r->ru_oublock += p->signal->oublock;
1824  		if (maxrss < p->signal->maxrss)
1825  			maxrss = p->signal->maxrss;
1826  		t = p;
1827  		do {
1828  			accumulate_thread_rusage(t, r);
1829  		} while_each_thread(p, t);
1830  		break;
1831  
1832  	default:
1833  		BUG();
1834  	}
1835  	unlock_task_sighand(p, &flags);
1836  
1837  out:
1838  	r->ru_utime = ns_to_kernel_old_timeval(utime);
1839  	r->ru_stime = ns_to_kernel_old_timeval(stime);
1840  
1841  	if (who != RUSAGE_CHILDREN) {
1842  		struct mm_struct *mm = get_task_mm(p);
1843  
1844  		if (mm) {
1845  			setmax_mm_hiwater_rss(&maxrss, mm);
1846  			mmput(mm);
1847  		}
1848  	}
1849  	r->ru_maxrss = maxrss * (PAGE_SIZE / 1024); /* convert pages to KBs */
1850  }
1851  
1852  SYSCALL_DEFINE2(getrusage, int, who, struct rusage __user *, ru)
1853  {
1854  	struct rusage r;
1855  
1856  	if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN &&
1857  	    who != RUSAGE_THREAD)
1858  		return -EINVAL;
1859  
1860  	getrusage(current, who, &r);
1861  	return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0;
1862  }
1863  
1864  #ifdef CONFIG_COMPAT
1865  COMPAT_SYSCALL_DEFINE2(getrusage, int, who, struct compat_rusage __user *, ru)
1866  {
1867  	struct rusage r;
1868  
1869  	if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN &&
1870  	    who != RUSAGE_THREAD)
1871  		return -EINVAL;
1872  
1873  	getrusage(current, who, &r);
1874  	return put_compat_rusage(&r, ru);
1875  }
1876  #endif
1877  
1878  SYSCALL_DEFINE1(umask, int, mask)
1879  {
1880  	mask = xchg(&current->fs->umask, mask & S_IRWXUGO);
1881  	return mask;
1882  }
1883  
1884  static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd)
1885  {
1886  	struct fd exe;
1887  	struct inode *inode;
1888  	int err;
1889  
1890  	exe = fdget(fd);
1891  	if (!exe.file)
1892  		return -EBADF;
1893  
1894  	inode = file_inode(exe.file);
1895  
1896  	/*
1897  	 * Because the original mm->exe_file points to executable file, make
1898  	 * sure that this one is executable as well, to avoid breaking an
1899  	 * overall picture.
1900  	 */
1901  	err = -EACCES;
1902  	if (!S_ISREG(inode->i_mode) || path_noexec(&exe.file->f_path))
1903  		goto exit;
1904  
1905  	err = file_permission(exe.file, MAY_EXEC);
1906  	if (err)
1907  		goto exit;
1908  
1909  	err = replace_mm_exe_file(mm, exe.file);
1910  exit:
1911  	fdput(exe);
1912  	return err;
1913  }
1914  
1915  /*
1916   * Check arithmetic relations of passed addresses.
1917   *
1918   * WARNING: we don't require any capability here so be very careful
1919   * in what is allowed for modification from userspace.
1920   */
1921  static int validate_prctl_map_addr(struct prctl_mm_map *prctl_map)
1922  {
1923  	unsigned long mmap_max_addr = TASK_SIZE;
1924  	int error = -EINVAL, i;
1925  
1926  	static const unsigned char offsets[] = {
1927  		offsetof(struct prctl_mm_map, start_code),
1928  		offsetof(struct prctl_mm_map, end_code),
1929  		offsetof(struct prctl_mm_map, start_data),
1930  		offsetof(struct prctl_mm_map, end_data),
1931  		offsetof(struct prctl_mm_map, start_brk),
1932  		offsetof(struct prctl_mm_map, brk),
1933  		offsetof(struct prctl_mm_map, start_stack),
1934  		offsetof(struct prctl_mm_map, arg_start),
1935  		offsetof(struct prctl_mm_map, arg_end),
1936  		offsetof(struct prctl_mm_map, env_start),
1937  		offsetof(struct prctl_mm_map, env_end),
1938  	};
1939  
1940  	/*
1941  	 * Make sure the members are not somewhere outside
1942  	 * of allowed address space.
1943  	 */
1944  	for (i = 0; i < ARRAY_SIZE(offsets); i++) {
1945  		u64 val = *(u64 *)((char *)prctl_map + offsets[i]);
1946  
1947  		if ((unsigned long)val >= mmap_max_addr ||
1948  		    (unsigned long)val < mmap_min_addr)
1949  			goto out;
1950  	}
1951  
1952  	/*
1953  	 * Make sure the pairs are ordered.
1954  	 */
1955  #define __prctl_check_order(__m1, __op, __m2)				\
1956  	((unsigned long)prctl_map->__m1 __op				\
1957  	 (unsigned long)prctl_map->__m2) ? 0 : -EINVAL
1958  	error  = __prctl_check_order(start_code, <, end_code);
1959  	error |= __prctl_check_order(start_data,<=, end_data);
1960  	error |= __prctl_check_order(start_brk, <=, brk);
1961  	error |= __prctl_check_order(arg_start, <=, arg_end);
1962  	error |= __prctl_check_order(env_start, <=, env_end);
1963  	if (error)
1964  		goto out;
1965  #undef __prctl_check_order
1966  
1967  	error = -EINVAL;
1968  
1969  	/*
1970  	 * Neither we should allow to override limits if they set.
1971  	 */
1972  	if (check_data_rlimit(rlimit(RLIMIT_DATA), prctl_map->brk,
1973  			      prctl_map->start_brk, prctl_map->end_data,
1974  			      prctl_map->start_data))
1975  			goto out;
1976  
1977  	error = 0;
1978  out:
1979  	return error;
1980  }
1981  
1982  #ifdef CONFIG_CHECKPOINT_RESTORE
1983  static int prctl_set_mm_map(int opt, const void __user *addr, unsigned long data_size)
1984  {
1985  	struct prctl_mm_map prctl_map = { .exe_fd = (u32)-1, };
1986  	unsigned long user_auxv[AT_VECTOR_SIZE];
1987  	struct mm_struct *mm = current->mm;
1988  	int error;
1989  
1990  	BUILD_BUG_ON(sizeof(user_auxv) != sizeof(mm->saved_auxv));
1991  	BUILD_BUG_ON(sizeof(struct prctl_mm_map) > 256);
1992  
1993  	if (opt == PR_SET_MM_MAP_SIZE)
1994  		return put_user((unsigned int)sizeof(prctl_map),
1995  				(unsigned int __user *)addr);
1996  
1997  	if (data_size != sizeof(prctl_map))
1998  		return -EINVAL;
1999  
2000  	if (copy_from_user(&prctl_map, addr, sizeof(prctl_map)))
2001  		return -EFAULT;
2002  
2003  	error = validate_prctl_map_addr(&prctl_map);
2004  	if (error)
2005  		return error;
2006  
2007  	if (prctl_map.auxv_size) {
2008  		/*
2009  		 * Someone is trying to cheat the auxv vector.
2010  		 */
2011  		if (!prctl_map.auxv ||
2012  				prctl_map.auxv_size > sizeof(mm->saved_auxv))
2013  			return -EINVAL;
2014  
2015  		memset(user_auxv, 0, sizeof(user_auxv));
2016  		if (copy_from_user(user_auxv,
2017  				   (const void __user *)prctl_map.auxv,
2018  				   prctl_map.auxv_size))
2019  			return -EFAULT;
2020  
2021  		/* Last entry must be AT_NULL as specification requires */
2022  		user_auxv[AT_VECTOR_SIZE - 2] = AT_NULL;
2023  		user_auxv[AT_VECTOR_SIZE - 1] = AT_NULL;
2024  	}
2025  
2026  	if (prctl_map.exe_fd != (u32)-1) {
2027  		/*
2028  		 * Check if the current user is checkpoint/restore capable.
2029  		 * At the time of this writing, it checks for CAP_SYS_ADMIN
2030  		 * or CAP_CHECKPOINT_RESTORE.
2031  		 * Note that a user with access to ptrace can masquerade an
2032  		 * arbitrary program as any executable, even setuid ones.
2033  		 * This may have implications in the tomoyo subsystem.
2034  		 */
2035  		if (!checkpoint_restore_ns_capable(current_user_ns()))
2036  			return -EPERM;
2037  
2038  		error = prctl_set_mm_exe_file(mm, prctl_map.exe_fd);
2039  		if (error)
2040  			return error;
2041  	}
2042  
2043  	/*
2044  	 * arg_lock protects concurrent updates but we still need mmap_lock for
2045  	 * read to exclude races with sys_brk.
2046  	 */
2047  	mmap_read_lock(mm);
2048  
2049  	/*
2050  	 * We don't validate if these members are pointing to
2051  	 * real present VMAs because application may have correspond
2052  	 * VMAs already unmapped and kernel uses these members for statistics
2053  	 * output in procfs mostly, except
2054  	 *
2055  	 *  - @start_brk/@brk which are used in do_brk_flags but kernel lookups
2056  	 *    for VMAs when updating these members so anything wrong written
2057  	 *    here cause kernel to swear at userspace program but won't lead
2058  	 *    to any problem in kernel itself
2059  	 */
2060  
2061  	spin_lock(&mm->arg_lock);
2062  	mm->start_code	= prctl_map.start_code;
2063  	mm->end_code	= prctl_map.end_code;
2064  	mm->start_data	= prctl_map.start_data;
2065  	mm->end_data	= prctl_map.end_data;
2066  	mm->start_brk	= prctl_map.start_brk;
2067  	mm->brk		= prctl_map.brk;
2068  	mm->start_stack	= prctl_map.start_stack;
2069  	mm->arg_start	= prctl_map.arg_start;
2070  	mm->arg_end	= prctl_map.arg_end;
2071  	mm->env_start	= prctl_map.env_start;
2072  	mm->env_end	= prctl_map.env_end;
2073  	spin_unlock(&mm->arg_lock);
2074  
2075  	/*
2076  	 * Note this update of @saved_auxv is lockless thus
2077  	 * if someone reads this member in procfs while we're
2078  	 * updating -- it may get partly updated results. It's
2079  	 * known and acceptable trade off: we leave it as is to
2080  	 * not introduce additional locks here making the kernel
2081  	 * more complex.
2082  	 */
2083  	if (prctl_map.auxv_size)
2084  		memcpy(mm->saved_auxv, user_auxv, sizeof(user_auxv));
2085  
2086  	mmap_read_unlock(mm);
2087  	return 0;
2088  }
2089  #endif /* CONFIG_CHECKPOINT_RESTORE */
2090  
2091  static int prctl_set_auxv(struct mm_struct *mm, unsigned long addr,
2092  			  unsigned long len)
2093  {
2094  	/*
2095  	 * This doesn't move the auxiliary vector itself since it's pinned to
2096  	 * mm_struct, but it permits filling the vector with new values.  It's
2097  	 * up to the caller to provide sane values here, otherwise userspace
2098  	 * tools which use this vector might be unhappy.
2099  	 */
2100  	unsigned long user_auxv[AT_VECTOR_SIZE] = {};
2101  
2102  	if (len > sizeof(user_auxv))
2103  		return -EINVAL;
2104  
2105  	if (copy_from_user(user_auxv, (const void __user *)addr, len))
2106  		return -EFAULT;
2107  
2108  	/* Make sure the last entry is always AT_NULL */
2109  	user_auxv[AT_VECTOR_SIZE - 2] = 0;
2110  	user_auxv[AT_VECTOR_SIZE - 1] = 0;
2111  
2112  	BUILD_BUG_ON(sizeof(user_auxv) != sizeof(mm->saved_auxv));
2113  
2114  	task_lock(current);
2115  	memcpy(mm->saved_auxv, user_auxv, len);
2116  	task_unlock(current);
2117  
2118  	return 0;
2119  }
2120  
2121  static int prctl_set_mm(int opt, unsigned long addr,
2122  			unsigned long arg4, unsigned long arg5)
2123  {
2124  	struct mm_struct *mm = current->mm;
2125  	struct prctl_mm_map prctl_map = {
2126  		.auxv = NULL,
2127  		.auxv_size = 0,
2128  		.exe_fd = -1,
2129  	};
2130  	struct vm_area_struct *vma;
2131  	int error;
2132  
2133  	if (arg5 || (arg4 && (opt != PR_SET_MM_AUXV &&
2134  			      opt != PR_SET_MM_MAP &&
2135  			      opt != PR_SET_MM_MAP_SIZE)))
2136  		return -EINVAL;
2137  
2138  #ifdef CONFIG_CHECKPOINT_RESTORE
2139  	if (opt == PR_SET_MM_MAP || opt == PR_SET_MM_MAP_SIZE)
2140  		return prctl_set_mm_map(opt, (const void __user *)addr, arg4);
2141  #endif
2142  
2143  	if (!capable(CAP_SYS_RESOURCE))
2144  		return -EPERM;
2145  
2146  	if (opt == PR_SET_MM_EXE_FILE)
2147  		return prctl_set_mm_exe_file(mm, (unsigned int)addr);
2148  
2149  	if (opt == PR_SET_MM_AUXV)
2150  		return prctl_set_auxv(mm, addr, arg4);
2151  
2152  	if (addr >= TASK_SIZE || addr < mmap_min_addr)
2153  		return -EINVAL;
2154  
2155  	error = -EINVAL;
2156  
2157  	/*
2158  	 * arg_lock protects concurrent updates of arg boundaries, we need
2159  	 * mmap_lock for a) concurrent sys_brk, b) finding VMA for addr
2160  	 * validation.
2161  	 */
2162  	mmap_read_lock(mm);
2163  	vma = find_vma(mm, addr);
2164  
2165  	spin_lock(&mm->arg_lock);
2166  	prctl_map.start_code	= mm->start_code;
2167  	prctl_map.end_code	= mm->end_code;
2168  	prctl_map.start_data	= mm->start_data;
2169  	prctl_map.end_data	= mm->end_data;
2170  	prctl_map.start_brk	= mm->start_brk;
2171  	prctl_map.brk		= mm->brk;
2172  	prctl_map.start_stack	= mm->start_stack;
2173  	prctl_map.arg_start	= mm->arg_start;
2174  	prctl_map.arg_end	= mm->arg_end;
2175  	prctl_map.env_start	= mm->env_start;
2176  	prctl_map.env_end	= mm->env_end;
2177  
2178  	switch (opt) {
2179  	case PR_SET_MM_START_CODE:
2180  		prctl_map.start_code = addr;
2181  		break;
2182  	case PR_SET_MM_END_CODE:
2183  		prctl_map.end_code = addr;
2184  		break;
2185  	case PR_SET_MM_START_DATA:
2186  		prctl_map.start_data = addr;
2187  		break;
2188  	case PR_SET_MM_END_DATA:
2189  		prctl_map.end_data = addr;
2190  		break;
2191  	case PR_SET_MM_START_STACK:
2192  		prctl_map.start_stack = addr;
2193  		break;
2194  	case PR_SET_MM_START_BRK:
2195  		prctl_map.start_brk = addr;
2196  		break;
2197  	case PR_SET_MM_BRK:
2198  		prctl_map.brk = addr;
2199  		break;
2200  	case PR_SET_MM_ARG_START:
2201  		prctl_map.arg_start = addr;
2202  		break;
2203  	case PR_SET_MM_ARG_END:
2204  		prctl_map.arg_end = addr;
2205  		break;
2206  	case PR_SET_MM_ENV_START:
2207  		prctl_map.env_start = addr;
2208  		break;
2209  	case PR_SET_MM_ENV_END:
2210  		prctl_map.env_end = addr;
2211  		break;
2212  	default:
2213  		goto out;
2214  	}
2215  
2216  	error = validate_prctl_map_addr(&prctl_map);
2217  	if (error)
2218  		goto out;
2219  
2220  	switch (opt) {
2221  	/*
2222  	 * If command line arguments and environment
2223  	 * are placed somewhere else on stack, we can
2224  	 * set them up here, ARG_START/END to setup
2225  	 * command line arguments and ENV_START/END
2226  	 * for environment.
2227  	 */
2228  	case PR_SET_MM_START_STACK:
2229  	case PR_SET_MM_ARG_START:
2230  	case PR_SET_MM_ARG_END:
2231  	case PR_SET_MM_ENV_START:
2232  	case PR_SET_MM_ENV_END:
2233  		if (!vma) {
2234  			error = -EFAULT;
2235  			goto out;
2236  		}
2237  	}
2238  
2239  	mm->start_code	= prctl_map.start_code;
2240  	mm->end_code	= prctl_map.end_code;
2241  	mm->start_data	= prctl_map.start_data;
2242  	mm->end_data	= prctl_map.end_data;
2243  	mm->start_brk	= prctl_map.start_brk;
2244  	mm->brk		= prctl_map.brk;
2245  	mm->start_stack	= prctl_map.start_stack;
2246  	mm->arg_start	= prctl_map.arg_start;
2247  	mm->arg_end	= prctl_map.arg_end;
2248  	mm->env_start	= prctl_map.env_start;
2249  	mm->env_end	= prctl_map.env_end;
2250  
2251  	error = 0;
2252  out:
2253  	spin_unlock(&mm->arg_lock);
2254  	mmap_read_unlock(mm);
2255  	return error;
2256  }
2257  
2258  #ifdef CONFIG_CHECKPOINT_RESTORE
2259  static int prctl_get_tid_address(struct task_struct *me, int __user * __user *tid_addr)
2260  {
2261  	return put_user(me->clear_child_tid, tid_addr);
2262  }
2263  #else
2264  static int prctl_get_tid_address(struct task_struct *me, int __user * __user *tid_addr)
2265  {
2266  	return -EINVAL;
2267  }
2268  #endif
2269  
2270  static int propagate_has_child_subreaper(struct task_struct *p, void *data)
2271  {
2272  	/*
2273  	 * If task has has_child_subreaper - all its descendants
2274  	 * already have these flag too and new descendants will
2275  	 * inherit it on fork, skip them.
2276  	 *
2277  	 * If we've found child_reaper - skip descendants in
2278  	 * it's subtree as they will never get out pidns.
2279  	 */
2280  	if (p->signal->has_child_subreaper ||
2281  	    is_child_reaper(task_pid(p)))
2282  		return 0;
2283  
2284  	p->signal->has_child_subreaper = 1;
2285  	return 1;
2286  }
2287  
2288  int __weak arch_prctl_spec_ctrl_get(struct task_struct *t, unsigned long which)
2289  {
2290  	return -EINVAL;
2291  }
2292  
2293  int __weak arch_prctl_spec_ctrl_set(struct task_struct *t, unsigned long which,
2294  				    unsigned long ctrl)
2295  {
2296  	return -EINVAL;
2297  }
2298  
2299  #define PR_IO_FLUSHER (PF_MEMALLOC_NOIO | PF_LOCAL_THROTTLE)
2300  
2301  #ifdef CONFIG_ANON_VMA_NAME
2302  
2303  #define ANON_VMA_NAME_MAX_LEN		80
2304  #define ANON_VMA_NAME_INVALID_CHARS	"\\`$[]"
2305  
2306  static inline bool is_valid_name_char(char ch)
2307  {
2308  	/* printable ascii characters, excluding ANON_VMA_NAME_INVALID_CHARS */
2309  	return ch > 0x1f && ch < 0x7f &&
2310  		!strchr(ANON_VMA_NAME_INVALID_CHARS, ch);
2311  }
2312  
2313  static int prctl_set_vma(unsigned long opt, unsigned long addr,
2314  			 unsigned long size, unsigned long arg)
2315  {
2316  	struct mm_struct *mm = current->mm;
2317  	const char __user *uname;
2318  	struct anon_vma_name *anon_name = NULL;
2319  	int error;
2320  
2321  	switch (opt) {
2322  	case PR_SET_VMA_ANON_NAME:
2323  		uname = (const char __user *)arg;
2324  		if (uname) {
2325  			char *name, *pch;
2326  
2327  			name = strndup_user(uname, ANON_VMA_NAME_MAX_LEN);
2328  			if (IS_ERR(name))
2329  				return PTR_ERR(name);
2330  
2331  			for (pch = name; *pch != '\0'; pch++) {
2332  				if (!is_valid_name_char(*pch)) {
2333  					kfree(name);
2334  					return -EINVAL;
2335  				}
2336  			}
2337  			/* anon_vma has its own copy */
2338  			anon_name = anon_vma_name_alloc(name);
2339  			kfree(name);
2340  			if (!anon_name)
2341  				return -ENOMEM;
2342  
2343  		}
2344  
2345  		mmap_write_lock(mm);
2346  		error = madvise_set_anon_name(mm, addr, size, anon_name);
2347  		mmap_write_unlock(mm);
2348  		anon_vma_name_put(anon_name);
2349  		break;
2350  	default:
2351  		error = -EINVAL;
2352  	}
2353  
2354  	return error;
2355  }
2356  
2357  #else /* CONFIG_ANON_VMA_NAME */
2358  static int prctl_set_vma(unsigned long opt, unsigned long start,
2359  			 unsigned long size, unsigned long arg)
2360  {
2361  	return -EINVAL;
2362  }
2363  #endif /* CONFIG_ANON_VMA_NAME */
2364  
2365  static inline int prctl_set_mdwe(unsigned long bits, unsigned long arg3,
2366  				 unsigned long arg4, unsigned long arg5)
2367  {
2368  	if (arg3 || arg4 || arg5)
2369  		return -EINVAL;
2370  
2371  	if (bits & ~(PR_MDWE_REFUSE_EXEC_GAIN))
2372  		return -EINVAL;
2373  
2374  	if (bits & PR_MDWE_REFUSE_EXEC_GAIN)
2375  		set_bit(MMF_HAS_MDWE, &current->mm->flags);
2376  	else if (test_bit(MMF_HAS_MDWE, &current->mm->flags))
2377  		return -EPERM; /* Cannot unset the flag */
2378  
2379  	return 0;
2380  }
2381  
2382  static inline int prctl_get_mdwe(unsigned long arg2, unsigned long arg3,
2383  				 unsigned long arg4, unsigned long arg5)
2384  {
2385  	if (arg2 || arg3 || arg4 || arg5)
2386  		return -EINVAL;
2387  
2388  	return test_bit(MMF_HAS_MDWE, &current->mm->flags) ?
2389  		PR_MDWE_REFUSE_EXEC_GAIN : 0;
2390  }
2391  
2392  static int prctl_get_auxv(void __user *addr, unsigned long len)
2393  {
2394  	struct mm_struct *mm = current->mm;
2395  	unsigned long size = min_t(unsigned long, sizeof(mm->saved_auxv), len);
2396  
2397  	if (size && copy_to_user(addr, mm->saved_auxv, size))
2398  		return -EFAULT;
2399  	return sizeof(mm->saved_auxv);
2400  }
2401  
2402  SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
2403  		unsigned long, arg4, unsigned long, arg5)
2404  {
2405  	struct task_struct *me = current;
2406  	unsigned char comm[sizeof(me->comm)];
2407  	long error;
2408  
2409  	error = security_task_prctl(option, arg2, arg3, arg4, arg5);
2410  	if (error != -ENOSYS)
2411  		return error;
2412  
2413  	error = 0;
2414  	switch (option) {
2415  	case PR_SET_PDEATHSIG:
2416  		if (!valid_signal(arg2)) {
2417  			error = -EINVAL;
2418  			break;
2419  		}
2420  		me->pdeath_signal = arg2;
2421  		break;
2422  	case PR_GET_PDEATHSIG:
2423  		error = put_user(me->pdeath_signal, (int __user *)arg2);
2424  		break;
2425  	case PR_GET_DUMPABLE:
2426  		error = get_dumpable(me->mm);
2427  		break;
2428  	case PR_SET_DUMPABLE:
2429  		if (arg2 != SUID_DUMP_DISABLE && arg2 != SUID_DUMP_USER) {
2430  			error = -EINVAL;
2431  			break;
2432  		}
2433  		set_dumpable(me->mm, arg2);
2434  		break;
2435  
2436  	case PR_SET_UNALIGN:
2437  		error = SET_UNALIGN_CTL(me, arg2);
2438  		break;
2439  	case PR_GET_UNALIGN:
2440  		error = GET_UNALIGN_CTL(me, arg2);
2441  		break;
2442  	case PR_SET_FPEMU:
2443  		error = SET_FPEMU_CTL(me, arg2);
2444  		break;
2445  	case PR_GET_FPEMU:
2446  		error = GET_FPEMU_CTL(me, arg2);
2447  		break;
2448  	case PR_SET_FPEXC:
2449  		error = SET_FPEXC_CTL(me, arg2);
2450  		break;
2451  	case PR_GET_FPEXC:
2452  		error = GET_FPEXC_CTL(me, arg2);
2453  		break;
2454  	case PR_GET_TIMING:
2455  		error = PR_TIMING_STATISTICAL;
2456  		break;
2457  	case PR_SET_TIMING:
2458  		if (arg2 != PR_TIMING_STATISTICAL)
2459  			error = -EINVAL;
2460  		break;
2461  	case PR_SET_NAME:
2462  		comm[sizeof(me->comm) - 1] = 0;
2463  		if (strncpy_from_user(comm, (char __user *)arg2,
2464  				      sizeof(me->comm) - 1) < 0)
2465  			return -EFAULT;
2466  		set_task_comm(me, comm);
2467  		proc_comm_connector(me);
2468  		break;
2469  	case PR_GET_NAME:
2470  		get_task_comm(comm, me);
2471  		if (copy_to_user((char __user *)arg2, comm, sizeof(comm)))
2472  			return -EFAULT;
2473  		break;
2474  	case PR_GET_ENDIAN:
2475  		error = GET_ENDIAN(me, arg2);
2476  		break;
2477  	case PR_SET_ENDIAN:
2478  		error = SET_ENDIAN(me, arg2);
2479  		break;
2480  	case PR_GET_SECCOMP:
2481  		error = prctl_get_seccomp();
2482  		break;
2483  	case PR_SET_SECCOMP:
2484  		error = prctl_set_seccomp(arg2, (char __user *)arg3);
2485  		break;
2486  	case PR_GET_TSC:
2487  		error = GET_TSC_CTL(arg2);
2488  		break;
2489  	case PR_SET_TSC:
2490  		error = SET_TSC_CTL(arg2);
2491  		break;
2492  	case PR_TASK_PERF_EVENTS_DISABLE:
2493  		error = perf_event_task_disable();
2494  		break;
2495  	case PR_TASK_PERF_EVENTS_ENABLE:
2496  		error = perf_event_task_enable();
2497  		break;
2498  	case PR_GET_TIMERSLACK:
2499  		if (current->timer_slack_ns > ULONG_MAX)
2500  			error = ULONG_MAX;
2501  		else
2502  			error = current->timer_slack_ns;
2503  		break;
2504  	case PR_SET_TIMERSLACK:
2505  		if (arg2 <= 0)
2506  			current->timer_slack_ns =
2507  					current->default_timer_slack_ns;
2508  		else
2509  			current->timer_slack_ns = arg2;
2510  		break;
2511  	case PR_MCE_KILL:
2512  		if (arg4 | arg5)
2513  			return -EINVAL;
2514  		switch (arg2) {
2515  		case PR_MCE_KILL_CLEAR:
2516  			if (arg3 != 0)
2517  				return -EINVAL;
2518  			current->flags &= ~PF_MCE_PROCESS;
2519  			break;
2520  		case PR_MCE_KILL_SET:
2521  			current->flags |= PF_MCE_PROCESS;
2522  			if (arg3 == PR_MCE_KILL_EARLY)
2523  				current->flags |= PF_MCE_EARLY;
2524  			else if (arg3 == PR_MCE_KILL_LATE)
2525  				current->flags &= ~PF_MCE_EARLY;
2526  			else if (arg3 == PR_MCE_KILL_DEFAULT)
2527  				current->flags &=
2528  						~(PF_MCE_EARLY|PF_MCE_PROCESS);
2529  			else
2530  				return -EINVAL;
2531  			break;
2532  	case PR_GET_AUXV:
2533  		if (arg4 || arg5)
2534  			return -EINVAL;
2535  		error = prctl_get_auxv((void __user *)arg2, arg3);
2536  		break;
2537  		default:
2538  			return -EINVAL;
2539  		}
2540  		break;
2541  	case PR_MCE_KILL_GET:
2542  		if (arg2 | arg3 | arg4 | arg5)
2543  			return -EINVAL;
2544  		if (current->flags & PF_MCE_PROCESS)
2545  			error = (current->flags & PF_MCE_EARLY) ?
2546  				PR_MCE_KILL_EARLY : PR_MCE_KILL_LATE;
2547  		else
2548  			error = PR_MCE_KILL_DEFAULT;
2549  		break;
2550  	case PR_SET_MM:
2551  		error = prctl_set_mm(arg2, arg3, arg4, arg5);
2552  		break;
2553  	case PR_GET_TID_ADDRESS:
2554  		error = prctl_get_tid_address(me, (int __user * __user *)arg2);
2555  		break;
2556  	case PR_SET_CHILD_SUBREAPER:
2557  		me->signal->is_child_subreaper = !!arg2;
2558  		if (!arg2)
2559  			break;
2560  
2561  		walk_process_tree(me, propagate_has_child_subreaper, NULL);
2562  		break;
2563  	case PR_GET_CHILD_SUBREAPER:
2564  		error = put_user(me->signal->is_child_subreaper,
2565  				 (int __user *)arg2);
2566  		break;
2567  	case PR_SET_NO_NEW_PRIVS:
2568  		if (arg2 != 1 || arg3 || arg4 || arg5)
2569  			return -EINVAL;
2570  
2571  		task_set_no_new_privs(current);
2572  		break;
2573  	case PR_GET_NO_NEW_PRIVS:
2574  		if (arg2 || arg3 || arg4 || arg5)
2575  			return -EINVAL;
2576  		return task_no_new_privs(current) ? 1 : 0;
2577  	case PR_GET_THP_DISABLE:
2578  		if (arg2 || arg3 || arg4 || arg5)
2579  			return -EINVAL;
2580  		error = !!test_bit(MMF_DISABLE_THP, &me->mm->flags);
2581  		break;
2582  	case PR_SET_THP_DISABLE:
2583  		if (arg3 || arg4 || arg5)
2584  			return -EINVAL;
2585  		if (mmap_write_lock_killable(me->mm))
2586  			return -EINTR;
2587  		if (arg2)
2588  			set_bit(MMF_DISABLE_THP, &me->mm->flags);
2589  		else
2590  			clear_bit(MMF_DISABLE_THP, &me->mm->flags);
2591  		mmap_write_unlock(me->mm);
2592  		break;
2593  	case PR_MPX_ENABLE_MANAGEMENT:
2594  	case PR_MPX_DISABLE_MANAGEMENT:
2595  		/* No longer implemented: */
2596  		return -EINVAL;
2597  	case PR_SET_FP_MODE:
2598  		error = SET_FP_MODE(me, arg2);
2599  		break;
2600  	case PR_GET_FP_MODE:
2601  		error = GET_FP_MODE(me);
2602  		break;
2603  	case PR_SVE_SET_VL:
2604  		error = SVE_SET_VL(arg2);
2605  		break;
2606  	case PR_SVE_GET_VL:
2607  		error = SVE_GET_VL();
2608  		break;
2609  	case PR_SME_SET_VL:
2610  		error = SME_SET_VL(arg2);
2611  		break;
2612  	case PR_SME_GET_VL:
2613  		error = SME_GET_VL();
2614  		break;
2615  	case PR_GET_SPECULATION_CTRL:
2616  		if (arg3 || arg4 || arg5)
2617  			return -EINVAL;
2618  		error = arch_prctl_spec_ctrl_get(me, arg2);
2619  		break;
2620  	case PR_SET_SPECULATION_CTRL:
2621  		if (arg4 || arg5)
2622  			return -EINVAL;
2623  		error = arch_prctl_spec_ctrl_set(me, arg2, arg3);
2624  		break;
2625  	case PR_PAC_RESET_KEYS:
2626  		if (arg3 || arg4 || arg5)
2627  			return -EINVAL;
2628  		error = PAC_RESET_KEYS(me, arg2);
2629  		break;
2630  	case PR_PAC_SET_ENABLED_KEYS:
2631  		if (arg4 || arg5)
2632  			return -EINVAL;
2633  		error = PAC_SET_ENABLED_KEYS(me, arg2, arg3);
2634  		break;
2635  	case PR_PAC_GET_ENABLED_KEYS:
2636  		if (arg2 || arg3 || arg4 || arg5)
2637  			return -EINVAL;
2638  		error = PAC_GET_ENABLED_KEYS(me);
2639  		break;
2640  	case PR_SET_TAGGED_ADDR_CTRL:
2641  		if (arg3 || arg4 || arg5)
2642  			return -EINVAL;
2643  		error = SET_TAGGED_ADDR_CTRL(arg2);
2644  		break;
2645  	case PR_GET_TAGGED_ADDR_CTRL:
2646  		if (arg2 || arg3 || arg4 || arg5)
2647  			return -EINVAL;
2648  		error = GET_TAGGED_ADDR_CTRL();
2649  		break;
2650  	case PR_SET_IO_FLUSHER:
2651  		if (!capable(CAP_SYS_RESOURCE))
2652  			return -EPERM;
2653  
2654  		if (arg3 || arg4 || arg5)
2655  			return -EINVAL;
2656  
2657  		if (arg2 == 1)
2658  			current->flags |= PR_IO_FLUSHER;
2659  		else if (!arg2)
2660  			current->flags &= ~PR_IO_FLUSHER;
2661  		else
2662  			return -EINVAL;
2663  		break;
2664  	case PR_GET_IO_FLUSHER:
2665  		if (!capable(CAP_SYS_RESOURCE))
2666  			return -EPERM;
2667  
2668  		if (arg2 || arg3 || arg4 || arg5)
2669  			return -EINVAL;
2670  
2671  		error = (current->flags & PR_IO_FLUSHER) == PR_IO_FLUSHER;
2672  		break;
2673  	case PR_SET_SYSCALL_USER_DISPATCH:
2674  		error = set_syscall_user_dispatch(arg2, arg3, arg4,
2675  						  (char __user *) arg5);
2676  		break;
2677  #ifdef CONFIG_SCHED_CORE
2678  	case PR_SCHED_CORE:
2679  		error = sched_core_share_pid(arg2, arg3, arg4, arg5);
2680  		break;
2681  #endif
2682  	case PR_SET_MDWE:
2683  		error = prctl_set_mdwe(arg2, arg3, arg4, arg5);
2684  		break;
2685  	case PR_GET_MDWE:
2686  		error = prctl_get_mdwe(arg2, arg3, arg4, arg5);
2687  		break;
2688  	case PR_SET_VMA:
2689  		error = prctl_set_vma(arg2, arg3, arg4, arg5);
2690  		break;
2691  #ifdef CONFIG_KSM
2692  	case PR_SET_MEMORY_MERGE:
2693  		if (arg3 || arg4 || arg5)
2694  			return -EINVAL;
2695  		if (mmap_write_lock_killable(me->mm))
2696  			return -EINTR;
2697  
2698  		if (arg2)
2699  			error = ksm_enable_merge_any(me->mm);
2700  		else
2701  			error = ksm_disable_merge_any(me->mm);
2702  		mmap_write_unlock(me->mm);
2703  		break;
2704  	case PR_GET_MEMORY_MERGE:
2705  		if (arg2 || arg3 || arg4 || arg5)
2706  			return -EINVAL;
2707  
2708  		error = !!test_bit(MMF_VM_MERGE_ANY, &me->mm->flags);
2709  		break;
2710  #endif
2711  	default:
2712  		error = -EINVAL;
2713  		break;
2714  	}
2715  	return error;
2716  }
2717  
2718  SYSCALL_DEFINE3(getcpu, unsigned __user *, cpup, unsigned __user *, nodep,
2719  		struct getcpu_cache __user *, unused)
2720  {
2721  	int err = 0;
2722  	int cpu = raw_smp_processor_id();
2723  
2724  	if (cpup)
2725  		err |= put_user(cpu, cpup);
2726  	if (nodep)
2727  		err |= put_user(cpu_to_node(cpu), nodep);
2728  	return err ? -EFAULT : 0;
2729  }
2730  
2731  /**
2732   * do_sysinfo - fill in sysinfo struct
2733   * @info: pointer to buffer to fill
2734   */
2735  static int do_sysinfo(struct sysinfo *info)
2736  {
2737  	unsigned long mem_total, sav_total;
2738  	unsigned int mem_unit, bitcount;
2739  	struct timespec64 tp;
2740  
2741  	memset(info, 0, sizeof(struct sysinfo));
2742  
2743  	ktime_get_boottime_ts64(&tp);
2744  	timens_add_boottime(&tp);
2745  	info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
2746  
2747  	get_avenrun(info->loads, 0, SI_LOAD_SHIFT - FSHIFT);
2748  
2749  	info->procs = nr_threads;
2750  
2751  	si_meminfo(info);
2752  	si_swapinfo(info);
2753  
2754  	/*
2755  	 * If the sum of all the available memory (i.e. ram + swap)
2756  	 * is less than can be stored in a 32 bit unsigned long then
2757  	 * we can be binary compatible with 2.2.x kernels.  If not,
2758  	 * well, in that case 2.2.x was broken anyways...
2759  	 *
2760  	 *  -Erik Andersen <andersee@debian.org>
2761  	 */
2762  
2763  	mem_total = info->totalram + info->totalswap;
2764  	if (mem_total < info->totalram || mem_total < info->totalswap)
2765  		goto out;
2766  	bitcount = 0;
2767  	mem_unit = info->mem_unit;
2768  	while (mem_unit > 1) {
2769  		bitcount++;
2770  		mem_unit >>= 1;
2771  		sav_total = mem_total;
2772  		mem_total <<= 1;
2773  		if (mem_total < sav_total)
2774  			goto out;
2775  	}
2776  
2777  	/*
2778  	 * If mem_total did not overflow, multiply all memory values by
2779  	 * info->mem_unit and set it to 1.  This leaves things compatible
2780  	 * with 2.2.x, and also retains compatibility with earlier 2.4.x
2781  	 * kernels...
2782  	 */
2783  
2784  	info->mem_unit = 1;
2785  	info->totalram <<= bitcount;
2786  	info->freeram <<= bitcount;
2787  	info->sharedram <<= bitcount;
2788  	info->bufferram <<= bitcount;
2789  	info->totalswap <<= bitcount;
2790  	info->freeswap <<= bitcount;
2791  	info->totalhigh <<= bitcount;
2792  	info->freehigh <<= bitcount;
2793  
2794  out:
2795  	return 0;
2796  }
2797  
2798  SYSCALL_DEFINE1(sysinfo, struct sysinfo __user *, info)
2799  {
2800  	struct sysinfo val;
2801  
2802  	do_sysinfo(&val);
2803  
2804  	if (copy_to_user(info, &val, sizeof(struct sysinfo)))
2805  		return -EFAULT;
2806  
2807  	return 0;
2808  }
2809  
2810  #ifdef CONFIG_COMPAT
2811  struct compat_sysinfo {
2812  	s32 uptime;
2813  	u32 loads[3];
2814  	u32 totalram;
2815  	u32 freeram;
2816  	u32 sharedram;
2817  	u32 bufferram;
2818  	u32 totalswap;
2819  	u32 freeswap;
2820  	u16 procs;
2821  	u16 pad;
2822  	u32 totalhigh;
2823  	u32 freehigh;
2824  	u32 mem_unit;
2825  	char _f[20-2*sizeof(u32)-sizeof(int)];
2826  };
2827  
2828  COMPAT_SYSCALL_DEFINE1(sysinfo, struct compat_sysinfo __user *, info)
2829  {
2830  	struct sysinfo s;
2831  	struct compat_sysinfo s_32;
2832  
2833  	do_sysinfo(&s);
2834  
2835  	/* Check to see if any memory value is too large for 32-bit and scale
2836  	 *  down if needed
2837  	 */
2838  	if (upper_32_bits(s.totalram) || upper_32_bits(s.totalswap)) {
2839  		int bitcount = 0;
2840  
2841  		while (s.mem_unit < PAGE_SIZE) {
2842  			s.mem_unit <<= 1;
2843  			bitcount++;
2844  		}
2845  
2846  		s.totalram >>= bitcount;
2847  		s.freeram >>= bitcount;
2848  		s.sharedram >>= bitcount;
2849  		s.bufferram >>= bitcount;
2850  		s.totalswap >>= bitcount;
2851  		s.freeswap >>= bitcount;
2852  		s.totalhigh >>= bitcount;
2853  		s.freehigh >>= bitcount;
2854  	}
2855  
2856  	memset(&s_32, 0, sizeof(s_32));
2857  	s_32.uptime = s.uptime;
2858  	s_32.loads[0] = s.loads[0];
2859  	s_32.loads[1] = s.loads[1];
2860  	s_32.loads[2] = s.loads[2];
2861  	s_32.totalram = s.totalram;
2862  	s_32.freeram = s.freeram;
2863  	s_32.sharedram = s.sharedram;
2864  	s_32.bufferram = s.bufferram;
2865  	s_32.totalswap = s.totalswap;
2866  	s_32.freeswap = s.freeswap;
2867  	s_32.procs = s.procs;
2868  	s_32.totalhigh = s.totalhigh;
2869  	s_32.freehigh = s.freehigh;
2870  	s_32.mem_unit = s.mem_unit;
2871  	if (copy_to_user(info, &s_32, sizeof(s_32)))
2872  		return -EFAULT;
2873  	return 0;
2874  }
2875  #endif /* CONFIG_COMPAT */
2876