xref: /openbmc/linux/kernel/fork.c (revision cf1788fb)
1  // SPDX-License-Identifier: GPL-2.0-only
2  /*
3   *  linux/kernel/fork.c
4   *
5   *  Copyright (C) 1991, 1992  Linus Torvalds
6   */
7  
8  /*
9   *  'fork.c' contains the help-routines for the 'fork' system call
10   * (see also entry.S and others).
11   * Fork is rather simple, once you get the hang of it, but the memory
12   * management can be a bitch. See 'mm/memory.c': 'copy_page_range()'
13   */
14  
15  #include <linux/anon_inodes.h>
16  #include <linux/slab.h>
17  #include <linux/sched/autogroup.h>
18  #include <linux/sched/mm.h>
19  #include <linux/sched/coredump.h>
20  #include <linux/sched/user.h>
21  #include <linux/sched/numa_balancing.h>
22  #include <linux/sched/stat.h>
23  #include <linux/sched/task.h>
24  #include <linux/sched/task_stack.h>
25  #include <linux/sched/cputime.h>
26  #include <linux/seq_file.h>
27  #include <linux/rtmutex.h>
28  #include <linux/init.h>
29  #include <linux/unistd.h>
30  #include <linux/module.h>
31  #include <linux/vmalloc.h>
32  #include <linux/completion.h>
33  #include <linux/personality.h>
34  #include <linux/mempolicy.h>
35  #include <linux/sem.h>
36  #include <linux/file.h>
37  #include <linux/fdtable.h>
38  #include <linux/iocontext.h>
39  #include <linux/key.h>
40  #include <linux/kmsan.h>
41  #include <linux/binfmts.h>
42  #include <linux/mman.h>
43  #include <linux/mmu_notifier.h>
44  #include <linux/fs.h>
45  #include <linux/mm.h>
46  #include <linux/mm_inline.h>
47  #include <linux/nsproxy.h>
48  #include <linux/capability.h>
49  #include <linux/cpu.h>
50  #include <linux/cgroup.h>
51  #include <linux/security.h>
52  #include <linux/hugetlb.h>
53  #include <linux/seccomp.h>
54  #include <linux/swap.h>
55  #include <linux/syscalls.h>
56  #include <linux/jiffies.h>
57  #include <linux/futex.h>
58  #include <linux/compat.h>
59  #include <linux/kthread.h>
60  #include <linux/task_io_accounting_ops.h>
61  #include <linux/rcupdate.h>
62  #include <linux/ptrace.h>
63  #include <linux/mount.h>
64  #include <linux/audit.h>
65  #include <linux/memcontrol.h>
66  #include <linux/ftrace.h>
67  #include <linux/proc_fs.h>
68  #include <linux/profile.h>
69  #include <linux/rmap.h>
70  #include <linux/ksm.h>
71  #include <linux/acct.h>
72  #include <linux/userfaultfd_k.h>
73  #include <linux/tsacct_kern.h>
74  #include <linux/cn_proc.h>
75  #include <linux/freezer.h>
76  #include <linux/delayacct.h>
77  #include <linux/taskstats_kern.h>
78  #include <linux/tty.h>
79  #include <linux/fs_struct.h>
80  #include <linux/magic.h>
81  #include <linux/perf_event.h>
82  #include <linux/posix-timers.h>
83  #include <linux/user-return-notifier.h>
84  #include <linux/oom.h>
85  #include <linux/khugepaged.h>
86  #include <linux/signalfd.h>
87  #include <linux/uprobes.h>
88  #include <linux/aio.h>
89  #include <linux/compiler.h>
90  #include <linux/sysctl.h>
91  #include <linux/kcov.h>
92  #include <linux/livepatch.h>
93  #include <linux/thread_info.h>
94  #include <linux/stackleak.h>
95  #include <linux/kasan.h>
96  #include <linux/scs.h>
97  #include <linux/io_uring.h>
98  #include <linux/bpf.h>
99  #include <linux/stackprotector.h>
100  #include <linux/user_events.h>
101  #include <linux/iommu.h>
102  
103  #include <asm/pgalloc.h>
104  #include <linux/uaccess.h>
105  #include <asm/mmu_context.h>
106  #include <asm/cacheflush.h>
107  #include <asm/tlbflush.h>
108  
109  #include <trace/events/sched.h>
110  
111  #define CREATE_TRACE_POINTS
112  #include <trace/events/task.h>
113  
114  /*
115   * Minimum number of threads to boot the kernel
116   */
117  #define MIN_THREADS 20
118  
119  /*
120   * Maximum number of threads
121   */
122  #define MAX_THREADS FUTEX_TID_MASK
123  
124  /*
125   * Protected counters by write_lock_irq(&tasklist_lock)
126   */
127  unsigned long total_forks;	/* Handle normal Linux uptimes. */
128  int nr_threads;			/* The idle threads do not count.. */
129  
130  static int max_threads;		/* tunable limit on nr_threads */
131  
132  #define NAMED_ARRAY_INDEX(x)	[x] = __stringify(x)
133  
134  static const char * const resident_page_types[] = {
135  	NAMED_ARRAY_INDEX(MM_FILEPAGES),
136  	NAMED_ARRAY_INDEX(MM_ANONPAGES),
137  	NAMED_ARRAY_INDEX(MM_SWAPENTS),
138  	NAMED_ARRAY_INDEX(MM_SHMEMPAGES),
139  };
140  
141  DEFINE_PER_CPU(unsigned long, process_counts) = 0;
142  
143  __cacheline_aligned DEFINE_RWLOCK(tasklist_lock);  /* outer */
144  
145  #ifdef CONFIG_PROVE_RCU
146  int lockdep_tasklist_lock_is_held(void)
147  {
148  	return lockdep_is_held(&tasklist_lock);
149  }
150  EXPORT_SYMBOL_GPL(lockdep_tasklist_lock_is_held);
151  #endif /* #ifdef CONFIG_PROVE_RCU */
152  
153  int nr_processes(void)
154  {
155  	int cpu;
156  	int total = 0;
157  
158  	for_each_possible_cpu(cpu)
159  		total += per_cpu(process_counts, cpu);
160  
161  	return total;
162  }
163  
164  void __weak arch_release_task_struct(struct task_struct *tsk)
165  {
166  }
167  
168  #ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR
169  static struct kmem_cache *task_struct_cachep;
170  
171  static inline struct task_struct *alloc_task_struct_node(int node)
172  {
173  	return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
174  }
175  
176  static inline void free_task_struct(struct task_struct *tsk)
177  {
178  	kmem_cache_free(task_struct_cachep, tsk);
179  }
180  #endif
181  
182  #ifndef CONFIG_ARCH_THREAD_STACK_ALLOCATOR
183  
184  /*
185   * Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a
186   * kmemcache based allocator.
187   */
188  # if THREAD_SIZE >= PAGE_SIZE || defined(CONFIG_VMAP_STACK)
189  
190  #  ifdef CONFIG_VMAP_STACK
191  /*
192   * vmalloc() is a bit slow, and calling vfree() enough times will force a TLB
193   * flush.  Try to minimize the number of calls by caching stacks.
194   */
195  #define NR_CACHED_STACKS 2
196  static DEFINE_PER_CPU(struct vm_struct *, cached_stacks[NR_CACHED_STACKS]);
197  
198  struct vm_stack {
199  	struct rcu_head rcu;
200  	struct vm_struct *stack_vm_area;
201  };
202  
203  static bool try_release_thread_stack_to_cache(struct vm_struct *vm)
204  {
205  	unsigned int i;
206  
207  	for (i = 0; i < NR_CACHED_STACKS; i++) {
208  		if (this_cpu_cmpxchg(cached_stacks[i], NULL, vm) != NULL)
209  			continue;
210  		return true;
211  	}
212  	return false;
213  }
214  
215  static void thread_stack_free_rcu(struct rcu_head *rh)
216  {
217  	struct vm_stack *vm_stack = container_of(rh, struct vm_stack, rcu);
218  
219  	if (try_release_thread_stack_to_cache(vm_stack->stack_vm_area))
220  		return;
221  
222  	vfree(vm_stack);
223  }
224  
225  static void thread_stack_delayed_free(struct task_struct *tsk)
226  {
227  	struct vm_stack *vm_stack = tsk->stack;
228  
229  	vm_stack->stack_vm_area = tsk->stack_vm_area;
230  	call_rcu(&vm_stack->rcu, thread_stack_free_rcu);
231  }
232  
233  static int free_vm_stack_cache(unsigned int cpu)
234  {
235  	struct vm_struct **cached_vm_stacks = per_cpu_ptr(cached_stacks, cpu);
236  	int i;
237  
238  	for (i = 0; i < NR_CACHED_STACKS; i++) {
239  		struct vm_struct *vm_stack = cached_vm_stacks[i];
240  
241  		if (!vm_stack)
242  			continue;
243  
244  		vfree(vm_stack->addr);
245  		cached_vm_stacks[i] = NULL;
246  	}
247  
248  	return 0;
249  }
250  
251  static int memcg_charge_kernel_stack(struct vm_struct *vm)
252  {
253  	int i;
254  	int ret;
255  	int nr_charged = 0;
256  
257  	BUG_ON(vm->nr_pages != THREAD_SIZE / PAGE_SIZE);
258  
259  	for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) {
260  		ret = memcg_kmem_charge_page(vm->pages[i], GFP_KERNEL, 0);
261  		if (ret)
262  			goto err;
263  		nr_charged++;
264  	}
265  	return 0;
266  err:
267  	for (i = 0; i < nr_charged; i++)
268  		memcg_kmem_uncharge_page(vm->pages[i], 0);
269  	return ret;
270  }
271  
272  static int alloc_thread_stack_node(struct task_struct *tsk, int node)
273  {
274  	struct vm_struct *vm;
275  	void *stack;
276  	int i;
277  
278  	for (i = 0; i < NR_CACHED_STACKS; i++) {
279  		struct vm_struct *s;
280  
281  		s = this_cpu_xchg(cached_stacks[i], NULL);
282  
283  		if (!s)
284  			continue;
285  
286  		/* Reset stack metadata. */
287  		kasan_unpoison_range(s->addr, THREAD_SIZE);
288  
289  		stack = kasan_reset_tag(s->addr);
290  
291  		/* Clear stale pointers from reused stack. */
292  		memset(stack, 0, THREAD_SIZE);
293  
294  		if (memcg_charge_kernel_stack(s)) {
295  			vfree(s->addr);
296  			return -ENOMEM;
297  		}
298  
299  		tsk->stack_vm_area = s;
300  		tsk->stack = stack;
301  		return 0;
302  	}
303  
304  	/*
305  	 * Allocated stacks are cached and later reused by new threads,
306  	 * so memcg accounting is performed manually on assigning/releasing
307  	 * stacks to tasks. Drop __GFP_ACCOUNT.
308  	 */
309  	stack = __vmalloc_node_range(THREAD_SIZE, THREAD_ALIGN,
310  				     VMALLOC_START, VMALLOC_END,
311  				     THREADINFO_GFP & ~__GFP_ACCOUNT,
312  				     PAGE_KERNEL,
313  				     0, node, __builtin_return_address(0));
314  	if (!stack)
315  		return -ENOMEM;
316  
317  	vm = find_vm_area(stack);
318  	if (memcg_charge_kernel_stack(vm)) {
319  		vfree(stack);
320  		return -ENOMEM;
321  	}
322  	/*
323  	 * We can't call find_vm_area() in interrupt context, and
324  	 * free_thread_stack() can be called in interrupt context,
325  	 * so cache the vm_struct.
326  	 */
327  	tsk->stack_vm_area = vm;
328  	stack = kasan_reset_tag(stack);
329  	tsk->stack = stack;
330  	return 0;
331  }
332  
333  static void free_thread_stack(struct task_struct *tsk)
334  {
335  	if (!try_release_thread_stack_to_cache(tsk->stack_vm_area))
336  		thread_stack_delayed_free(tsk);
337  
338  	tsk->stack = NULL;
339  	tsk->stack_vm_area = NULL;
340  }
341  
342  #  else /* !CONFIG_VMAP_STACK */
343  
344  static void thread_stack_free_rcu(struct rcu_head *rh)
345  {
346  	__free_pages(virt_to_page(rh), THREAD_SIZE_ORDER);
347  }
348  
349  static void thread_stack_delayed_free(struct task_struct *tsk)
350  {
351  	struct rcu_head *rh = tsk->stack;
352  
353  	call_rcu(rh, thread_stack_free_rcu);
354  }
355  
356  static int alloc_thread_stack_node(struct task_struct *tsk, int node)
357  {
358  	struct page *page = alloc_pages_node(node, THREADINFO_GFP,
359  					     THREAD_SIZE_ORDER);
360  
361  	if (likely(page)) {
362  		tsk->stack = kasan_reset_tag(page_address(page));
363  		return 0;
364  	}
365  	return -ENOMEM;
366  }
367  
368  static void free_thread_stack(struct task_struct *tsk)
369  {
370  	thread_stack_delayed_free(tsk);
371  	tsk->stack = NULL;
372  }
373  
374  #  endif /* CONFIG_VMAP_STACK */
375  # else /* !(THREAD_SIZE >= PAGE_SIZE || defined(CONFIG_VMAP_STACK)) */
376  
377  static struct kmem_cache *thread_stack_cache;
378  
379  static void thread_stack_free_rcu(struct rcu_head *rh)
380  {
381  	kmem_cache_free(thread_stack_cache, rh);
382  }
383  
384  static void thread_stack_delayed_free(struct task_struct *tsk)
385  {
386  	struct rcu_head *rh = tsk->stack;
387  
388  	call_rcu(rh, thread_stack_free_rcu);
389  }
390  
391  static int alloc_thread_stack_node(struct task_struct *tsk, int node)
392  {
393  	unsigned long *stack;
394  	stack = kmem_cache_alloc_node(thread_stack_cache, THREADINFO_GFP, node);
395  	stack = kasan_reset_tag(stack);
396  	tsk->stack = stack;
397  	return stack ? 0 : -ENOMEM;
398  }
399  
400  static void free_thread_stack(struct task_struct *tsk)
401  {
402  	thread_stack_delayed_free(tsk);
403  	tsk->stack = NULL;
404  }
405  
406  void thread_stack_cache_init(void)
407  {
408  	thread_stack_cache = kmem_cache_create_usercopy("thread_stack",
409  					THREAD_SIZE, THREAD_SIZE, 0, 0,
410  					THREAD_SIZE, NULL);
411  	BUG_ON(thread_stack_cache == NULL);
412  }
413  
414  # endif /* THREAD_SIZE >= PAGE_SIZE || defined(CONFIG_VMAP_STACK) */
415  #else /* CONFIG_ARCH_THREAD_STACK_ALLOCATOR */
416  
417  static int alloc_thread_stack_node(struct task_struct *tsk, int node)
418  {
419  	unsigned long *stack;
420  
421  	stack = arch_alloc_thread_stack_node(tsk, node);
422  	tsk->stack = stack;
423  	return stack ? 0 : -ENOMEM;
424  }
425  
426  static void free_thread_stack(struct task_struct *tsk)
427  {
428  	arch_free_thread_stack(tsk);
429  	tsk->stack = NULL;
430  }
431  
432  #endif /* !CONFIG_ARCH_THREAD_STACK_ALLOCATOR */
433  
434  /* SLAB cache for signal_struct structures (tsk->signal) */
435  static struct kmem_cache *signal_cachep;
436  
437  /* SLAB cache for sighand_struct structures (tsk->sighand) */
438  struct kmem_cache *sighand_cachep;
439  
440  /* SLAB cache for files_struct structures (tsk->files) */
441  struct kmem_cache *files_cachep;
442  
443  /* SLAB cache for fs_struct structures (tsk->fs) */
444  struct kmem_cache *fs_cachep;
445  
446  /* SLAB cache for vm_area_struct structures */
447  static struct kmem_cache *vm_area_cachep;
448  
449  /* SLAB cache for mm_struct structures (tsk->mm) */
450  static struct kmem_cache *mm_cachep;
451  
452  #ifdef CONFIG_PER_VMA_LOCK
453  
454  /* SLAB cache for vm_area_struct.lock */
455  static struct kmem_cache *vma_lock_cachep;
456  
457  static bool vma_lock_alloc(struct vm_area_struct *vma)
458  {
459  	vma->vm_lock = kmem_cache_alloc(vma_lock_cachep, GFP_KERNEL);
460  	if (!vma->vm_lock)
461  		return false;
462  
463  	init_rwsem(&vma->vm_lock->lock);
464  	vma->vm_lock_seq = -1;
465  
466  	return true;
467  }
468  
469  static inline void vma_lock_free(struct vm_area_struct *vma)
470  {
471  	kmem_cache_free(vma_lock_cachep, vma->vm_lock);
472  }
473  
474  #else /* CONFIG_PER_VMA_LOCK */
475  
476  static inline bool vma_lock_alloc(struct vm_area_struct *vma) { return true; }
477  static inline void vma_lock_free(struct vm_area_struct *vma) {}
478  
479  #endif /* CONFIG_PER_VMA_LOCK */
480  
481  struct vm_area_struct *vm_area_alloc(struct mm_struct *mm)
482  {
483  	struct vm_area_struct *vma;
484  
485  	vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
486  	if (!vma)
487  		return NULL;
488  
489  	vma_init(vma, mm);
490  	if (!vma_lock_alloc(vma)) {
491  		kmem_cache_free(vm_area_cachep, vma);
492  		return NULL;
493  	}
494  
495  	return vma;
496  }
497  
498  struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig)
499  {
500  	struct vm_area_struct *new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
501  
502  	if (!new)
503  		return NULL;
504  
505  	ASSERT_EXCLUSIVE_WRITER(orig->vm_flags);
506  	ASSERT_EXCLUSIVE_WRITER(orig->vm_file);
507  	/*
508  	 * orig->shared.rb may be modified concurrently, but the clone
509  	 * will be reinitialized.
510  	 */
511  	data_race(memcpy(new, orig, sizeof(*new)));
512  	if (!vma_lock_alloc(new)) {
513  		kmem_cache_free(vm_area_cachep, new);
514  		return NULL;
515  	}
516  	INIT_LIST_HEAD(&new->anon_vma_chain);
517  	vma_numab_state_init(new);
518  	dup_anon_vma_name(orig, new);
519  
520  	return new;
521  }
522  
523  void __vm_area_free(struct vm_area_struct *vma)
524  {
525  	vma_numab_state_free(vma);
526  	free_anon_vma_name(vma);
527  	vma_lock_free(vma);
528  	kmem_cache_free(vm_area_cachep, vma);
529  }
530  
531  #ifdef CONFIG_PER_VMA_LOCK
532  static void vm_area_free_rcu_cb(struct rcu_head *head)
533  {
534  	struct vm_area_struct *vma = container_of(head, struct vm_area_struct,
535  						  vm_rcu);
536  
537  	/* The vma should not be locked while being destroyed. */
538  	VM_BUG_ON_VMA(rwsem_is_locked(&vma->vm_lock->lock), vma);
539  	__vm_area_free(vma);
540  }
541  #endif
542  
543  void vm_area_free(struct vm_area_struct *vma)
544  {
545  #ifdef CONFIG_PER_VMA_LOCK
546  	call_rcu(&vma->vm_rcu, vm_area_free_rcu_cb);
547  #else
548  	__vm_area_free(vma);
549  #endif
550  }
551  
552  static void account_kernel_stack(struct task_struct *tsk, int account)
553  {
554  	if (IS_ENABLED(CONFIG_VMAP_STACK)) {
555  		struct vm_struct *vm = task_stack_vm_area(tsk);
556  		int i;
557  
558  		for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++)
559  			mod_lruvec_page_state(vm->pages[i], NR_KERNEL_STACK_KB,
560  					      account * (PAGE_SIZE / 1024));
561  	} else {
562  		void *stack = task_stack_page(tsk);
563  
564  		/* All stack pages are in the same node. */
565  		mod_lruvec_kmem_state(stack, NR_KERNEL_STACK_KB,
566  				      account * (THREAD_SIZE / 1024));
567  	}
568  }
569  
570  void exit_task_stack_account(struct task_struct *tsk)
571  {
572  	account_kernel_stack(tsk, -1);
573  
574  	if (IS_ENABLED(CONFIG_VMAP_STACK)) {
575  		struct vm_struct *vm;
576  		int i;
577  
578  		vm = task_stack_vm_area(tsk);
579  		for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++)
580  			memcg_kmem_uncharge_page(vm->pages[i], 0);
581  	}
582  }
583  
584  static void release_task_stack(struct task_struct *tsk)
585  {
586  	if (WARN_ON(READ_ONCE(tsk->__state) != TASK_DEAD))
587  		return;  /* Better to leak the stack than to free prematurely */
588  
589  	free_thread_stack(tsk);
590  }
591  
592  #ifdef CONFIG_THREAD_INFO_IN_TASK
593  void put_task_stack(struct task_struct *tsk)
594  {
595  	if (refcount_dec_and_test(&tsk->stack_refcount))
596  		release_task_stack(tsk);
597  }
598  #endif
599  
600  void free_task(struct task_struct *tsk)
601  {
602  #ifdef CONFIG_SECCOMP
603  	WARN_ON_ONCE(tsk->seccomp.filter);
604  #endif
605  	release_user_cpus_ptr(tsk);
606  	scs_release(tsk);
607  
608  #ifndef CONFIG_THREAD_INFO_IN_TASK
609  	/*
610  	 * The task is finally done with both the stack and thread_info,
611  	 * so free both.
612  	 */
613  	release_task_stack(tsk);
614  #else
615  	/*
616  	 * If the task had a separate stack allocation, it should be gone
617  	 * by now.
618  	 */
619  	WARN_ON_ONCE(refcount_read(&tsk->stack_refcount) != 0);
620  #endif
621  	rt_mutex_debug_task_free(tsk);
622  	ftrace_graph_exit_task(tsk);
623  	arch_release_task_struct(tsk);
624  	if (tsk->flags & PF_KTHREAD)
625  		free_kthread_struct(tsk);
626  	bpf_task_storage_free(tsk);
627  	free_task_struct(tsk);
628  }
629  EXPORT_SYMBOL(free_task);
630  
631  static void dup_mm_exe_file(struct mm_struct *mm, struct mm_struct *oldmm)
632  {
633  	struct file *exe_file;
634  
635  	exe_file = get_mm_exe_file(oldmm);
636  	RCU_INIT_POINTER(mm->exe_file, exe_file);
637  	/*
638  	 * We depend on the oldmm having properly denied write access to the
639  	 * exe_file already.
640  	 */
641  	if (exe_file && deny_write_access(exe_file))
642  		pr_warn_once("deny_write_access() failed in %s\n", __func__);
643  }
644  
645  #ifdef CONFIG_MMU
646  static __latent_entropy int dup_mmap(struct mm_struct *mm,
647  					struct mm_struct *oldmm)
648  {
649  	struct vm_area_struct *mpnt, *tmp;
650  	int retval;
651  	unsigned long charge = 0;
652  	LIST_HEAD(uf);
653  	VMA_ITERATOR(old_vmi, oldmm, 0);
654  	VMA_ITERATOR(vmi, mm, 0);
655  
656  	uprobe_start_dup_mmap();
657  	if (mmap_write_lock_killable(oldmm)) {
658  		retval = -EINTR;
659  		goto fail_uprobe_end;
660  	}
661  	flush_cache_dup_mm(oldmm);
662  	uprobe_dup_mmap(oldmm, mm);
663  	/*
664  	 * Not linked in yet - no deadlock potential:
665  	 */
666  	mmap_write_lock_nested(mm, SINGLE_DEPTH_NESTING);
667  
668  	/* No ordering required: file already has been exposed. */
669  	dup_mm_exe_file(mm, oldmm);
670  
671  	mm->total_vm = oldmm->total_vm;
672  	mm->data_vm = oldmm->data_vm;
673  	mm->exec_vm = oldmm->exec_vm;
674  	mm->stack_vm = oldmm->stack_vm;
675  
676  	retval = ksm_fork(mm, oldmm);
677  	if (retval)
678  		goto out;
679  	khugepaged_fork(mm, oldmm);
680  
681  	retval = vma_iter_bulk_alloc(&vmi, oldmm->map_count);
682  	if (retval)
683  		goto out;
684  
685  	mt_clear_in_rcu(vmi.mas.tree);
686  	for_each_vma(old_vmi, mpnt) {
687  		struct file *file;
688  
689  		vma_start_write(mpnt);
690  		if (mpnt->vm_flags & VM_DONTCOPY) {
691  			vm_stat_account(mm, mpnt->vm_flags, -vma_pages(mpnt));
692  			continue;
693  		}
694  		charge = 0;
695  		/*
696  		 * Don't duplicate many vmas if we've been oom-killed (for
697  		 * example)
698  		 */
699  		if (fatal_signal_pending(current)) {
700  			retval = -EINTR;
701  			goto loop_out;
702  		}
703  		if (mpnt->vm_flags & VM_ACCOUNT) {
704  			unsigned long len = vma_pages(mpnt);
705  
706  			if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
707  				goto fail_nomem;
708  			charge = len;
709  		}
710  		tmp = vm_area_dup(mpnt);
711  		if (!tmp)
712  			goto fail_nomem;
713  		retval = vma_dup_policy(mpnt, tmp);
714  		if (retval)
715  			goto fail_nomem_policy;
716  		tmp->vm_mm = mm;
717  		retval = dup_userfaultfd(tmp, &uf);
718  		if (retval)
719  			goto fail_nomem_anon_vma_fork;
720  		if (tmp->vm_flags & VM_WIPEONFORK) {
721  			/*
722  			 * VM_WIPEONFORK gets a clean slate in the child.
723  			 * Don't prepare anon_vma until fault since we don't
724  			 * copy page for current vma.
725  			 */
726  			tmp->anon_vma = NULL;
727  		} else if (anon_vma_fork(tmp, mpnt))
728  			goto fail_nomem_anon_vma_fork;
729  		vm_flags_clear(tmp, VM_LOCKED_MASK);
730  		file = tmp->vm_file;
731  		if (file) {
732  			struct address_space *mapping = file->f_mapping;
733  
734  			get_file(file);
735  			i_mmap_lock_write(mapping);
736  			if (tmp->vm_flags & VM_SHARED)
737  				mapping_allow_writable(mapping);
738  			flush_dcache_mmap_lock(mapping);
739  			/* insert tmp into the share list, just after mpnt */
740  			vma_interval_tree_insert_after(tmp, mpnt,
741  					&mapping->i_mmap);
742  			flush_dcache_mmap_unlock(mapping);
743  			i_mmap_unlock_write(mapping);
744  		}
745  
746  		/*
747  		 * Copy/update hugetlb private vma information.
748  		 */
749  		if (is_vm_hugetlb_page(tmp))
750  			hugetlb_dup_vma_private(tmp);
751  
752  		/* Link the vma into the MT */
753  		if (vma_iter_bulk_store(&vmi, tmp))
754  			goto fail_nomem_vmi_store;
755  
756  		mm->map_count++;
757  		if (!(tmp->vm_flags & VM_WIPEONFORK))
758  			retval = copy_page_range(tmp, mpnt);
759  
760  		if (tmp->vm_ops && tmp->vm_ops->open)
761  			tmp->vm_ops->open(tmp);
762  
763  		if (retval)
764  			goto loop_out;
765  	}
766  	/* a new mm has just been created */
767  	retval = arch_dup_mmap(oldmm, mm);
768  loop_out:
769  	vma_iter_free(&vmi);
770  	if (!retval)
771  		mt_set_in_rcu(vmi.mas.tree);
772  out:
773  	mmap_write_unlock(mm);
774  	flush_tlb_mm(oldmm);
775  	mmap_write_unlock(oldmm);
776  	dup_userfaultfd_complete(&uf);
777  fail_uprobe_end:
778  	uprobe_end_dup_mmap();
779  	return retval;
780  
781  fail_nomem_vmi_store:
782  	unlink_anon_vmas(tmp);
783  fail_nomem_anon_vma_fork:
784  	mpol_put(vma_policy(tmp));
785  fail_nomem_policy:
786  	vm_area_free(tmp);
787  fail_nomem:
788  	retval = -ENOMEM;
789  	vm_unacct_memory(charge);
790  	goto loop_out;
791  }
792  
793  static inline int mm_alloc_pgd(struct mm_struct *mm)
794  {
795  	mm->pgd = pgd_alloc(mm);
796  	if (unlikely(!mm->pgd))
797  		return -ENOMEM;
798  	return 0;
799  }
800  
801  static inline void mm_free_pgd(struct mm_struct *mm)
802  {
803  	pgd_free(mm, mm->pgd);
804  }
805  #else
806  static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
807  {
808  	mmap_write_lock(oldmm);
809  	dup_mm_exe_file(mm, oldmm);
810  	mmap_write_unlock(oldmm);
811  	return 0;
812  }
813  #define mm_alloc_pgd(mm)	(0)
814  #define mm_free_pgd(mm)
815  #endif /* CONFIG_MMU */
816  
817  static void check_mm(struct mm_struct *mm)
818  {
819  	int i;
820  
821  	BUILD_BUG_ON_MSG(ARRAY_SIZE(resident_page_types) != NR_MM_COUNTERS,
822  			 "Please make sure 'struct resident_page_types[]' is updated as well");
823  
824  	for (i = 0; i < NR_MM_COUNTERS; i++) {
825  		long x = percpu_counter_sum(&mm->rss_stat[i]);
826  
827  		if (unlikely(x))
828  			pr_alert("BUG: Bad rss-counter state mm:%p type:%s val:%ld\n",
829  				 mm, resident_page_types[i], x);
830  	}
831  
832  	if (mm_pgtables_bytes(mm))
833  		pr_alert("BUG: non-zero pgtables_bytes on freeing mm: %ld\n",
834  				mm_pgtables_bytes(mm));
835  
836  #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
837  	VM_BUG_ON_MM(mm->pmd_huge_pte, mm);
838  #endif
839  }
840  
841  #define allocate_mm()	(kmem_cache_alloc(mm_cachep, GFP_KERNEL))
842  #define free_mm(mm)	(kmem_cache_free(mm_cachep, (mm)))
843  
844  static void do_check_lazy_tlb(void *arg)
845  {
846  	struct mm_struct *mm = arg;
847  
848  	WARN_ON_ONCE(current->active_mm == mm);
849  }
850  
851  static void do_shoot_lazy_tlb(void *arg)
852  {
853  	struct mm_struct *mm = arg;
854  
855  	if (current->active_mm == mm) {
856  		WARN_ON_ONCE(current->mm);
857  		current->active_mm = &init_mm;
858  		switch_mm(mm, &init_mm, current);
859  	}
860  }
861  
862  static void cleanup_lazy_tlbs(struct mm_struct *mm)
863  {
864  	if (!IS_ENABLED(CONFIG_MMU_LAZY_TLB_SHOOTDOWN)) {
865  		/*
866  		 * In this case, lazy tlb mms are refounted and would not reach
867  		 * __mmdrop until all CPUs have switched away and mmdrop()ed.
868  		 */
869  		return;
870  	}
871  
872  	/*
873  	 * Lazy mm shootdown does not refcount "lazy tlb mm" usage, rather it
874  	 * requires lazy mm users to switch to another mm when the refcount
875  	 * drops to zero, before the mm is freed. This requires IPIs here to
876  	 * switch kernel threads to init_mm.
877  	 *
878  	 * archs that use IPIs to flush TLBs can piggy-back that lazy tlb mm
879  	 * switch with the final userspace teardown TLB flush which leaves the
880  	 * mm lazy on this CPU but no others, reducing the need for additional
881  	 * IPIs here. There are cases where a final IPI is still required here,
882  	 * such as the final mmdrop being performed on a different CPU than the
883  	 * one exiting, or kernel threads using the mm when userspace exits.
884  	 *
885  	 * IPI overheads have not found to be expensive, but they could be
886  	 * reduced in a number of possible ways, for example (roughly
887  	 * increasing order of complexity):
888  	 * - The last lazy reference created by exit_mm() could instead switch
889  	 *   to init_mm, however it's probable this will run on the same CPU
890  	 *   immediately afterwards, so this may not reduce IPIs much.
891  	 * - A batch of mms requiring IPIs could be gathered and freed at once.
892  	 * - CPUs store active_mm where it can be remotely checked without a
893  	 *   lock, to filter out false-positives in the cpumask.
894  	 * - After mm_users or mm_count reaches zero, switching away from the
895  	 *   mm could clear mm_cpumask to reduce some IPIs, perhaps together
896  	 *   with some batching or delaying of the final IPIs.
897  	 * - A delayed freeing and RCU-like quiescing sequence based on mm
898  	 *   switching to avoid IPIs completely.
899  	 */
900  	on_each_cpu_mask(mm_cpumask(mm), do_shoot_lazy_tlb, (void *)mm, 1);
901  	if (IS_ENABLED(CONFIG_DEBUG_VM_SHOOT_LAZIES))
902  		on_each_cpu(do_check_lazy_tlb, (void *)mm, 1);
903  }
904  
905  /*
906   * Called when the last reference to the mm
907   * is dropped: either by a lazy thread or by
908   * mmput. Free the page directory and the mm.
909   */
910  void __mmdrop(struct mm_struct *mm)
911  {
912  	int i;
913  
914  	BUG_ON(mm == &init_mm);
915  	WARN_ON_ONCE(mm == current->mm);
916  
917  	/* Ensure no CPUs are using this as their lazy tlb mm */
918  	cleanup_lazy_tlbs(mm);
919  
920  	WARN_ON_ONCE(mm == current->active_mm);
921  	mm_free_pgd(mm);
922  	destroy_context(mm);
923  	mmu_notifier_subscriptions_destroy(mm);
924  	check_mm(mm);
925  	put_user_ns(mm->user_ns);
926  	mm_pasid_drop(mm);
927  	mm_destroy_cid(mm);
928  
929  	for (i = 0; i < NR_MM_COUNTERS; i++)
930  		percpu_counter_destroy(&mm->rss_stat[i]);
931  	free_mm(mm);
932  }
933  EXPORT_SYMBOL_GPL(__mmdrop);
934  
935  static void mmdrop_async_fn(struct work_struct *work)
936  {
937  	struct mm_struct *mm;
938  
939  	mm = container_of(work, struct mm_struct, async_put_work);
940  	__mmdrop(mm);
941  }
942  
943  static void mmdrop_async(struct mm_struct *mm)
944  {
945  	if (unlikely(atomic_dec_and_test(&mm->mm_count))) {
946  		INIT_WORK(&mm->async_put_work, mmdrop_async_fn);
947  		schedule_work(&mm->async_put_work);
948  	}
949  }
950  
951  static inline void free_signal_struct(struct signal_struct *sig)
952  {
953  	taskstats_tgid_free(sig);
954  	sched_autogroup_exit(sig);
955  	/*
956  	 * __mmdrop is not safe to call from softirq context on x86 due to
957  	 * pgd_dtor so postpone it to the async context
958  	 */
959  	if (sig->oom_mm)
960  		mmdrop_async(sig->oom_mm);
961  	kmem_cache_free(signal_cachep, sig);
962  }
963  
964  static inline void put_signal_struct(struct signal_struct *sig)
965  {
966  	if (refcount_dec_and_test(&sig->sigcnt))
967  		free_signal_struct(sig);
968  }
969  
970  void __put_task_struct(struct task_struct *tsk)
971  {
972  	WARN_ON(!tsk->exit_state);
973  	WARN_ON(refcount_read(&tsk->usage));
974  	WARN_ON(tsk == current);
975  
976  	io_uring_free(tsk);
977  	cgroup_free(tsk);
978  	task_numa_free(tsk, true);
979  	security_task_free(tsk);
980  	exit_creds(tsk);
981  	delayacct_tsk_free(tsk);
982  	put_signal_struct(tsk->signal);
983  	sched_core_free(tsk);
984  	free_task(tsk);
985  }
986  EXPORT_SYMBOL_GPL(__put_task_struct);
987  
988  void __init __weak arch_task_cache_init(void) { }
989  
990  /*
991   * set_max_threads
992   */
993  static void set_max_threads(unsigned int max_threads_suggested)
994  {
995  	u64 threads;
996  	unsigned long nr_pages = totalram_pages();
997  
998  	/*
999  	 * The number of threads shall be limited such that the thread
1000  	 * structures may only consume a small part of the available memory.
1001  	 */
1002  	if (fls64(nr_pages) + fls64(PAGE_SIZE) > 64)
1003  		threads = MAX_THREADS;
1004  	else
1005  		threads = div64_u64((u64) nr_pages * (u64) PAGE_SIZE,
1006  				    (u64) THREAD_SIZE * 8UL);
1007  
1008  	if (threads > max_threads_suggested)
1009  		threads = max_threads_suggested;
1010  
1011  	max_threads = clamp_t(u64, threads, MIN_THREADS, MAX_THREADS);
1012  }
1013  
1014  #ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
1015  /* Initialized by the architecture: */
1016  int arch_task_struct_size __read_mostly;
1017  #endif
1018  
1019  #ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR
1020  static void task_struct_whitelist(unsigned long *offset, unsigned long *size)
1021  {
1022  	/* Fetch thread_struct whitelist for the architecture. */
1023  	arch_thread_struct_whitelist(offset, size);
1024  
1025  	/*
1026  	 * Handle zero-sized whitelist or empty thread_struct, otherwise
1027  	 * adjust offset to position of thread_struct in task_struct.
1028  	 */
1029  	if (unlikely(*size == 0))
1030  		*offset = 0;
1031  	else
1032  		*offset += offsetof(struct task_struct, thread);
1033  }
1034  #endif /* CONFIG_ARCH_TASK_STRUCT_ALLOCATOR */
1035  
1036  void __init fork_init(void)
1037  {
1038  	int i;
1039  #ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR
1040  #ifndef ARCH_MIN_TASKALIGN
1041  #define ARCH_MIN_TASKALIGN	0
1042  #endif
1043  	int align = max_t(int, L1_CACHE_BYTES, ARCH_MIN_TASKALIGN);
1044  	unsigned long useroffset, usersize;
1045  
1046  	/* create a slab on which task_structs can be allocated */
1047  	task_struct_whitelist(&useroffset, &usersize);
1048  	task_struct_cachep = kmem_cache_create_usercopy("task_struct",
1049  			arch_task_struct_size, align,
1050  			SLAB_PANIC|SLAB_ACCOUNT,
1051  			useroffset, usersize, NULL);
1052  #endif
1053  
1054  	/* do the arch specific task caches init */
1055  	arch_task_cache_init();
1056  
1057  	set_max_threads(MAX_THREADS);
1058  
1059  	init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2;
1060  	init_task.signal->rlim[RLIMIT_NPROC].rlim_max = max_threads/2;
1061  	init_task.signal->rlim[RLIMIT_SIGPENDING] =
1062  		init_task.signal->rlim[RLIMIT_NPROC];
1063  
1064  	for (i = 0; i < UCOUNT_COUNTS; i++)
1065  		init_user_ns.ucount_max[i] = max_threads/2;
1066  
1067  	set_userns_rlimit_max(&init_user_ns, UCOUNT_RLIMIT_NPROC,      RLIM_INFINITY);
1068  	set_userns_rlimit_max(&init_user_ns, UCOUNT_RLIMIT_MSGQUEUE,   RLIM_INFINITY);
1069  	set_userns_rlimit_max(&init_user_ns, UCOUNT_RLIMIT_SIGPENDING, RLIM_INFINITY);
1070  	set_userns_rlimit_max(&init_user_ns, UCOUNT_RLIMIT_MEMLOCK,    RLIM_INFINITY);
1071  
1072  #ifdef CONFIG_VMAP_STACK
1073  	cpuhp_setup_state(CPUHP_BP_PREPARE_DYN, "fork:vm_stack_cache",
1074  			  NULL, free_vm_stack_cache);
1075  #endif
1076  
1077  	scs_init();
1078  
1079  	lockdep_init_task(&init_task);
1080  	uprobes_init();
1081  }
1082  
1083  int __weak arch_dup_task_struct(struct task_struct *dst,
1084  					       struct task_struct *src)
1085  {
1086  	*dst = *src;
1087  	return 0;
1088  }
1089  
1090  void set_task_stack_end_magic(struct task_struct *tsk)
1091  {
1092  	unsigned long *stackend;
1093  
1094  	stackend = end_of_stack(tsk);
1095  	*stackend = STACK_END_MAGIC;	/* for overflow detection */
1096  }
1097  
1098  static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
1099  {
1100  	struct task_struct *tsk;
1101  	int err;
1102  
1103  	if (node == NUMA_NO_NODE)
1104  		node = tsk_fork_get_node(orig);
1105  	tsk = alloc_task_struct_node(node);
1106  	if (!tsk)
1107  		return NULL;
1108  
1109  	err = arch_dup_task_struct(tsk, orig);
1110  	if (err)
1111  		goto free_tsk;
1112  
1113  	err = alloc_thread_stack_node(tsk, node);
1114  	if (err)
1115  		goto free_tsk;
1116  
1117  #ifdef CONFIG_THREAD_INFO_IN_TASK
1118  	refcount_set(&tsk->stack_refcount, 1);
1119  #endif
1120  	account_kernel_stack(tsk, 1);
1121  
1122  	err = scs_prepare(tsk, node);
1123  	if (err)
1124  		goto free_stack;
1125  
1126  #ifdef CONFIG_SECCOMP
1127  	/*
1128  	 * We must handle setting up seccomp filters once we're under
1129  	 * the sighand lock in case orig has changed between now and
1130  	 * then. Until then, filter must be NULL to avoid messing up
1131  	 * the usage counts on the error path calling free_task.
1132  	 */
1133  	tsk->seccomp.filter = NULL;
1134  #endif
1135  
1136  	setup_thread_stack(tsk, orig);
1137  	clear_user_return_notifier(tsk);
1138  	clear_tsk_need_resched(tsk);
1139  	set_task_stack_end_magic(tsk);
1140  	clear_syscall_work_syscall_user_dispatch(tsk);
1141  
1142  #ifdef CONFIG_STACKPROTECTOR
1143  	tsk->stack_canary = get_random_canary();
1144  #endif
1145  	if (orig->cpus_ptr == &orig->cpus_mask)
1146  		tsk->cpus_ptr = &tsk->cpus_mask;
1147  	dup_user_cpus_ptr(tsk, orig, node);
1148  
1149  	/*
1150  	 * One for the user space visible state that goes away when reaped.
1151  	 * One for the scheduler.
1152  	 */
1153  	refcount_set(&tsk->rcu_users, 2);
1154  	/* One for the rcu users */
1155  	refcount_set(&tsk->usage, 1);
1156  #ifdef CONFIG_BLK_DEV_IO_TRACE
1157  	tsk->btrace_seq = 0;
1158  #endif
1159  	tsk->splice_pipe = NULL;
1160  	tsk->task_frag.page = NULL;
1161  	tsk->wake_q.next = NULL;
1162  	tsk->worker_private = NULL;
1163  
1164  	kcov_task_init(tsk);
1165  	kmsan_task_create(tsk);
1166  	kmap_local_fork(tsk);
1167  
1168  #ifdef CONFIG_FAULT_INJECTION
1169  	tsk->fail_nth = 0;
1170  #endif
1171  
1172  #ifdef CONFIG_BLK_CGROUP
1173  	tsk->throttle_disk = NULL;
1174  	tsk->use_memdelay = 0;
1175  #endif
1176  
1177  #ifdef CONFIG_IOMMU_SVA
1178  	tsk->pasid_activated = 0;
1179  #endif
1180  
1181  #ifdef CONFIG_MEMCG
1182  	tsk->active_memcg = NULL;
1183  #endif
1184  
1185  #ifdef CONFIG_CPU_SUP_INTEL
1186  	tsk->reported_split_lock = 0;
1187  #endif
1188  
1189  #ifdef CONFIG_SCHED_MM_CID
1190  	tsk->mm_cid = -1;
1191  	tsk->last_mm_cid = -1;
1192  	tsk->mm_cid_active = 0;
1193  	tsk->migrate_from_cpu = -1;
1194  #endif
1195  	return tsk;
1196  
1197  free_stack:
1198  	exit_task_stack_account(tsk);
1199  	free_thread_stack(tsk);
1200  free_tsk:
1201  	free_task_struct(tsk);
1202  	return NULL;
1203  }
1204  
1205  __cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock);
1206  
1207  static unsigned long default_dump_filter = MMF_DUMP_FILTER_DEFAULT;
1208  
1209  static int __init coredump_filter_setup(char *s)
1210  {
1211  	default_dump_filter =
1212  		(simple_strtoul(s, NULL, 0) << MMF_DUMP_FILTER_SHIFT) &
1213  		MMF_DUMP_FILTER_MASK;
1214  	return 1;
1215  }
1216  
1217  __setup("coredump_filter=", coredump_filter_setup);
1218  
1219  #include <linux/init_task.h>
1220  
1221  static void mm_init_aio(struct mm_struct *mm)
1222  {
1223  #ifdef CONFIG_AIO
1224  	spin_lock_init(&mm->ioctx_lock);
1225  	mm->ioctx_table = NULL;
1226  #endif
1227  }
1228  
1229  static __always_inline void mm_clear_owner(struct mm_struct *mm,
1230  					   struct task_struct *p)
1231  {
1232  #ifdef CONFIG_MEMCG
1233  	if (mm->owner == p)
1234  		WRITE_ONCE(mm->owner, NULL);
1235  #endif
1236  }
1237  
1238  static void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
1239  {
1240  #ifdef CONFIG_MEMCG
1241  	mm->owner = p;
1242  #endif
1243  }
1244  
1245  static void mm_init_uprobes_state(struct mm_struct *mm)
1246  {
1247  #ifdef CONFIG_UPROBES
1248  	mm->uprobes_state.xol_area = NULL;
1249  #endif
1250  }
1251  
1252  static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
1253  	struct user_namespace *user_ns)
1254  {
1255  	int i;
1256  
1257  	mt_init_flags(&mm->mm_mt, MM_MT_FLAGS);
1258  	mt_set_external_lock(&mm->mm_mt, &mm->mmap_lock);
1259  	atomic_set(&mm->mm_users, 1);
1260  	atomic_set(&mm->mm_count, 1);
1261  	seqcount_init(&mm->write_protect_seq);
1262  	mmap_init_lock(mm);
1263  	INIT_LIST_HEAD(&mm->mmlist);
1264  #ifdef CONFIG_PER_VMA_LOCK
1265  	mm->mm_lock_seq = 0;
1266  #endif
1267  	mm_pgtables_bytes_init(mm);
1268  	mm->map_count = 0;
1269  	mm->locked_vm = 0;
1270  	atomic64_set(&mm->pinned_vm, 0);
1271  	memset(&mm->rss_stat, 0, sizeof(mm->rss_stat));
1272  	spin_lock_init(&mm->page_table_lock);
1273  	spin_lock_init(&mm->arg_lock);
1274  	mm_init_cpumask(mm);
1275  	mm_init_aio(mm);
1276  	mm_init_owner(mm, p);
1277  	mm_pasid_init(mm);
1278  	RCU_INIT_POINTER(mm->exe_file, NULL);
1279  	mmu_notifier_subscriptions_init(mm);
1280  	init_tlb_flush_pending(mm);
1281  #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
1282  	mm->pmd_huge_pte = NULL;
1283  #endif
1284  	mm_init_uprobes_state(mm);
1285  	hugetlb_count_init(mm);
1286  
1287  	if (current->mm) {
1288  		mm->flags = current->mm->flags & MMF_INIT_MASK;
1289  		mm->def_flags = current->mm->def_flags & VM_INIT_DEF_MASK;
1290  	} else {
1291  		mm->flags = default_dump_filter;
1292  		mm->def_flags = 0;
1293  	}
1294  
1295  	if (mm_alloc_pgd(mm))
1296  		goto fail_nopgd;
1297  
1298  	if (init_new_context(p, mm))
1299  		goto fail_nocontext;
1300  
1301  	if (mm_alloc_cid(mm))
1302  		goto fail_cid;
1303  
1304  	for (i = 0; i < NR_MM_COUNTERS; i++)
1305  		if (percpu_counter_init(&mm->rss_stat[i], 0, GFP_KERNEL_ACCOUNT))
1306  			goto fail_pcpu;
1307  
1308  	mm->user_ns = get_user_ns(user_ns);
1309  	lru_gen_init_mm(mm);
1310  	return mm;
1311  
1312  fail_pcpu:
1313  	while (i > 0)
1314  		percpu_counter_destroy(&mm->rss_stat[--i]);
1315  	mm_destroy_cid(mm);
1316  fail_cid:
1317  	destroy_context(mm);
1318  fail_nocontext:
1319  	mm_free_pgd(mm);
1320  fail_nopgd:
1321  	free_mm(mm);
1322  	return NULL;
1323  }
1324  
1325  /*
1326   * Allocate and initialize an mm_struct.
1327   */
1328  struct mm_struct *mm_alloc(void)
1329  {
1330  	struct mm_struct *mm;
1331  
1332  	mm = allocate_mm();
1333  	if (!mm)
1334  		return NULL;
1335  
1336  	memset(mm, 0, sizeof(*mm));
1337  	return mm_init(mm, current, current_user_ns());
1338  }
1339  
1340  static inline void __mmput(struct mm_struct *mm)
1341  {
1342  	VM_BUG_ON(atomic_read(&mm->mm_users));
1343  
1344  	uprobe_clear_state(mm);
1345  	exit_aio(mm);
1346  	ksm_exit(mm);
1347  	khugepaged_exit(mm); /* must run before exit_mmap */
1348  	exit_mmap(mm);
1349  	mm_put_huge_zero_page(mm);
1350  	set_mm_exe_file(mm, NULL);
1351  	if (!list_empty(&mm->mmlist)) {
1352  		spin_lock(&mmlist_lock);
1353  		list_del(&mm->mmlist);
1354  		spin_unlock(&mmlist_lock);
1355  	}
1356  	if (mm->binfmt)
1357  		module_put(mm->binfmt->module);
1358  	lru_gen_del_mm(mm);
1359  	mmdrop(mm);
1360  }
1361  
1362  /*
1363   * Decrement the use count and release all resources for an mm.
1364   */
1365  void mmput(struct mm_struct *mm)
1366  {
1367  	might_sleep();
1368  
1369  	if (atomic_dec_and_test(&mm->mm_users))
1370  		__mmput(mm);
1371  }
1372  EXPORT_SYMBOL_GPL(mmput);
1373  
1374  #ifdef CONFIG_MMU
1375  static void mmput_async_fn(struct work_struct *work)
1376  {
1377  	struct mm_struct *mm = container_of(work, struct mm_struct,
1378  					    async_put_work);
1379  
1380  	__mmput(mm);
1381  }
1382  
1383  void mmput_async(struct mm_struct *mm)
1384  {
1385  	if (atomic_dec_and_test(&mm->mm_users)) {
1386  		INIT_WORK(&mm->async_put_work, mmput_async_fn);
1387  		schedule_work(&mm->async_put_work);
1388  	}
1389  }
1390  EXPORT_SYMBOL_GPL(mmput_async);
1391  #endif
1392  
1393  /**
1394   * set_mm_exe_file - change a reference to the mm's executable file
1395   *
1396   * This changes mm's executable file (shown as symlink /proc/[pid]/exe).
1397   *
1398   * Main users are mmput() and sys_execve(). Callers prevent concurrent
1399   * invocations: in mmput() nobody alive left, in execve task is single
1400   * threaded.
1401   *
1402   * Can only fail if new_exe_file != NULL.
1403   */
1404  int set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file)
1405  {
1406  	struct file *old_exe_file;
1407  
1408  	/*
1409  	 * It is safe to dereference the exe_file without RCU as
1410  	 * this function is only called if nobody else can access
1411  	 * this mm -- see comment above for justification.
1412  	 */
1413  	old_exe_file = rcu_dereference_raw(mm->exe_file);
1414  
1415  	if (new_exe_file) {
1416  		/*
1417  		 * We expect the caller (i.e., sys_execve) to already denied
1418  		 * write access, so this is unlikely to fail.
1419  		 */
1420  		if (unlikely(deny_write_access(new_exe_file)))
1421  			return -EACCES;
1422  		get_file(new_exe_file);
1423  	}
1424  	rcu_assign_pointer(mm->exe_file, new_exe_file);
1425  	if (old_exe_file) {
1426  		allow_write_access(old_exe_file);
1427  		fput(old_exe_file);
1428  	}
1429  	return 0;
1430  }
1431  
1432  /**
1433   * replace_mm_exe_file - replace a reference to the mm's executable file
1434   *
1435   * This changes mm's executable file (shown as symlink /proc/[pid]/exe),
1436   * dealing with concurrent invocation and without grabbing the mmap lock in
1437   * write mode.
1438   *
1439   * Main user is sys_prctl(PR_SET_MM_MAP/EXE_FILE).
1440   */
1441  int replace_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file)
1442  {
1443  	struct vm_area_struct *vma;
1444  	struct file *old_exe_file;
1445  	int ret = 0;
1446  
1447  	/* Forbid mm->exe_file change if old file still mapped. */
1448  	old_exe_file = get_mm_exe_file(mm);
1449  	if (old_exe_file) {
1450  		VMA_ITERATOR(vmi, mm, 0);
1451  		mmap_read_lock(mm);
1452  		for_each_vma(vmi, vma) {
1453  			if (!vma->vm_file)
1454  				continue;
1455  			if (path_equal(&vma->vm_file->f_path,
1456  				       &old_exe_file->f_path)) {
1457  				ret = -EBUSY;
1458  				break;
1459  			}
1460  		}
1461  		mmap_read_unlock(mm);
1462  		fput(old_exe_file);
1463  		if (ret)
1464  			return ret;
1465  	}
1466  
1467  	/* set the new file, lockless */
1468  	ret = deny_write_access(new_exe_file);
1469  	if (ret)
1470  		return -EACCES;
1471  	get_file(new_exe_file);
1472  
1473  	old_exe_file = xchg(&mm->exe_file, new_exe_file);
1474  	if (old_exe_file) {
1475  		/*
1476  		 * Don't race with dup_mmap() getting the file and disallowing
1477  		 * write access while someone might open the file writable.
1478  		 */
1479  		mmap_read_lock(mm);
1480  		allow_write_access(old_exe_file);
1481  		fput(old_exe_file);
1482  		mmap_read_unlock(mm);
1483  	}
1484  	return 0;
1485  }
1486  
1487  /**
1488   * get_mm_exe_file - acquire a reference to the mm's executable file
1489   *
1490   * Returns %NULL if mm has no associated executable file.
1491   * User must release file via fput().
1492   */
1493  struct file *get_mm_exe_file(struct mm_struct *mm)
1494  {
1495  	struct file *exe_file;
1496  
1497  	rcu_read_lock();
1498  	exe_file = rcu_dereference(mm->exe_file);
1499  	if (exe_file && !get_file_rcu(exe_file))
1500  		exe_file = NULL;
1501  	rcu_read_unlock();
1502  	return exe_file;
1503  }
1504  
1505  /**
1506   * get_task_exe_file - acquire a reference to the task's executable file
1507   *
1508   * Returns %NULL if task's mm (if any) has no associated executable file or
1509   * this is a kernel thread with borrowed mm (see the comment above get_task_mm).
1510   * User must release file via fput().
1511   */
1512  struct file *get_task_exe_file(struct task_struct *task)
1513  {
1514  	struct file *exe_file = NULL;
1515  	struct mm_struct *mm;
1516  
1517  	task_lock(task);
1518  	mm = task->mm;
1519  	if (mm) {
1520  		if (!(task->flags & PF_KTHREAD))
1521  			exe_file = get_mm_exe_file(mm);
1522  	}
1523  	task_unlock(task);
1524  	return exe_file;
1525  }
1526  
1527  /**
1528   * get_task_mm - acquire a reference to the task's mm
1529   *
1530   * Returns %NULL if the task has no mm.  Checks PF_KTHREAD (meaning
1531   * this kernel workthread has transiently adopted a user mm with use_mm,
1532   * to do its AIO) is not set and if so returns a reference to it, after
1533   * bumping up the use count.  User must release the mm via mmput()
1534   * after use.  Typically used by /proc and ptrace.
1535   */
1536  struct mm_struct *get_task_mm(struct task_struct *task)
1537  {
1538  	struct mm_struct *mm;
1539  
1540  	task_lock(task);
1541  	mm = task->mm;
1542  	if (mm) {
1543  		if (task->flags & PF_KTHREAD)
1544  			mm = NULL;
1545  		else
1546  			mmget(mm);
1547  	}
1548  	task_unlock(task);
1549  	return mm;
1550  }
1551  EXPORT_SYMBOL_GPL(get_task_mm);
1552  
1553  struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
1554  {
1555  	struct mm_struct *mm;
1556  	int err;
1557  
1558  	err =  down_read_killable(&task->signal->exec_update_lock);
1559  	if (err)
1560  		return ERR_PTR(err);
1561  
1562  	mm = get_task_mm(task);
1563  	if (mm && mm != current->mm &&
1564  			!ptrace_may_access(task, mode)) {
1565  		mmput(mm);
1566  		mm = ERR_PTR(-EACCES);
1567  	}
1568  	up_read(&task->signal->exec_update_lock);
1569  
1570  	return mm;
1571  }
1572  
1573  static void complete_vfork_done(struct task_struct *tsk)
1574  {
1575  	struct completion *vfork;
1576  
1577  	task_lock(tsk);
1578  	vfork = tsk->vfork_done;
1579  	if (likely(vfork)) {
1580  		tsk->vfork_done = NULL;
1581  		complete(vfork);
1582  	}
1583  	task_unlock(tsk);
1584  }
1585  
1586  static int wait_for_vfork_done(struct task_struct *child,
1587  				struct completion *vfork)
1588  {
1589  	unsigned int state = TASK_UNINTERRUPTIBLE|TASK_KILLABLE|TASK_FREEZABLE;
1590  	int killed;
1591  
1592  	cgroup_enter_frozen();
1593  	killed = wait_for_completion_state(vfork, state);
1594  	cgroup_leave_frozen(false);
1595  
1596  	if (killed) {
1597  		task_lock(child);
1598  		child->vfork_done = NULL;
1599  		task_unlock(child);
1600  	}
1601  
1602  	put_task_struct(child);
1603  	return killed;
1604  }
1605  
1606  /* Please note the differences between mmput and mm_release.
1607   * mmput is called whenever we stop holding onto a mm_struct,
1608   * error success whatever.
1609   *
1610   * mm_release is called after a mm_struct has been removed
1611   * from the current process.
1612   *
1613   * This difference is important for error handling, when we
1614   * only half set up a mm_struct for a new process and need to restore
1615   * the old one.  Because we mmput the new mm_struct before
1616   * restoring the old one. . .
1617   * Eric Biederman 10 January 1998
1618   */
1619  static void mm_release(struct task_struct *tsk, struct mm_struct *mm)
1620  {
1621  	uprobe_free_utask(tsk);
1622  
1623  	/* Get rid of any cached register state */
1624  	deactivate_mm(tsk, mm);
1625  
1626  	/*
1627  	 * Signal userspace if we're not exiting with a core dump
1628  	 * because we want to leave the value intact for debugging
1629  	 * purposes.
1630  	 */
1631  	if (tsk->clear_child_tid) {
1632  		if (atomic_read(&mm->mm_users) > 1) {
1633  			/*
1634  			 * We don't check the error code - if userspace has
1635  			 * not set up a proper pointer then tough luck.
1636  			 */
1637  			put_user(0, tsk->clear_child_tid);
1638  			do_futex(tsk->clear_child_tid, FUTEX_WAKE,
1639  					1, NULL, NULL, 0, 0);
1640  		}
1641  		tsk->clear_child_tid = NULL;
1642  	}
1643  
1644  	/*
1645  	 * All done, finally we can wake up parent and return this mm to him.
1646  	 * Also kthread_stop() uses this completion for synchronization.
1647  	 */
1648  	if (tsk->vfork_done)
1649  		complete_vfork_done(tsk);
1650  }
1651  
1652  void exit_mm_release(struct task_struct *tsk, struct mm_struct *mm)
1653  {
1654  	futex_exit_release(tsk);
1655  	mm_release(tsk, mm);
1656  }
1657  
1658  void exec_mm_release(struct task_struct *tsk, struct mm_struct *mm)
1659  {
1660  	futex_exec_release(tsk);
1661  	mm_release(tsk, mm);
1662  }
1663  
1664  /**
1665   * dup_mm() - duplicates an existing mm structure
1666   * @tsk: the task_struct with which the new mm will be associated.
1667   * @oldmm: the mm to duplicate.
1668   *
1669   * Allocates a new mm structure and duplicates the provided @oldmm structure
1670   * content into it.
1671   *
1672   * Return: the duplicated mm or NULL on failure.
1673   */
1674  static struct mm_struct *dup_mm(struct task_struct *tsk,
1675  				struct mm_struct *oldmm)
1676  {
1677  	struct mm_struct *mm;
1678  	int err;
1679  
1680  	mm = allocate_mm();
1681  	if (!mm)
1682  		goto fail_nomem;
1683  
1684  	memcpy(mm, oldmm, sizeof(*mm));
1685  
1686  	if (!mm_init(mm, tsk, mm->user_ns))
1687  		goto fail_nomem;
1688  
1689  	err = dup_mmap(mm, oldmm);
1690  	if (err)
1691  		goto free_pt;
1692  
1693  	mm->hiwater_rss = get_mm_rss(mm);
1694  	mm->hiwater_vm = mm->total_vm;
1695  
1696  	if (mm->binfmt && !try_module_get(mm->binfmt->module))
1697  		goto free_pt;
1698  
1699  	return mm;
1700  
1701  free_pt:
1702  	/* don't put binfmt in mmput, we haven't got module yet */
1703  	mm->binfmt = NULL;
1704  	mm_init_owner(mm, NULL);
1705  	mmput(mm);
1706  
1707  fail_nomem:
1708  	return NULL;
1709  }
1710  
1711  static int copy_mm(unsigned long clone_flags, struct task_struct *tsk)
1712  {
1713  	struct mm_struct *mm, *oldmm;
1714  
1715  	tsk->min_flt = tsk->maj_flt = 0;
1716  	tsk->nvcsw = tsk->nivcsw = 0;
1717  #ifdef CONFIG_DETECT_HUNG_TASK
1718  	tsk->last_switch_count = tsk->nvcsw + tsk->nivcsw;
1719  	tsk->last_switch_time = 0;
1720  #endif
1721  
1722  	tsk->mm = NULL;
1723  	tsk->active_mm = NULL;
1724  
1725  	/*
1726  	 * Are we cloning a kernel thread?
1727  	 *
1728  	 * We need to steal a active VM for that..
1729  	 */
1730  	oldmm = current->mm;
1731  	if (!oldmm)
1732  		return 0;
1733  
1734  	if (clone_flags & CLONE_VM) {
1735  		mmget(oldmm);
1736  		mm = oldmm;
1737  	} else {
1738  		mm = dup_mm(tsk, current->mm);
1739  		if (!mm)
1740  			return -ENOMEM;
1741  	}
1742  
1743  	tsk->mm = mm;
1744  	tsk->active_mm = mm;
1745  	sched_mm_cid_fork(tsk);
1746  	return 0;
1747  }
1748  
1749  static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
1750  {
1751  	struct fs_struct *fs = current->fs;
1752  	if (clone_flags & CLONE_FS) {
1753  		/* tsk->fs is already what we want */
1754  		spin_lock(&fs->lock);
1755  		if (fs->in_exec) {
1756  			spin_unlock(&fs->lock);
1757  			return -EAGAIN;
1758  		}
1759  		fs->users++;
1760  		spin_unlock(&fs->lock);
1761  		return 0;
1762  	}
1763  	tsk->fs = copy_fs_struct(fs);
1764  	if (!tsk->fs)
1765  		return -ENOMEM;
1766  	return 0;
1767  }
1768  
1769  static int copy_files(unsigned long clone_flags, struct task_struct *tsk,
1770  		      int no_files)
1771  {
1772  	struct files_struct *oldf, *newf;
1773  	int error = 0;
1774  
1775  	/*
1776  	 * A background process may not have any files ...
1777  	 */
1778  	oldf = current->files;
1779  	if (!oldf)
1780  		goto out;
1781  
1782  	if (no_files) {
1783  		tsk->files = NULL;
1784  		goto out;
1785  	}
1786  
1787  	if (clone_flags & CLONE_FILES) {
1788  		atomic_inc(&oldf->count);
1789  		goto out;
1790  	}
1791  
1792  	newf = dup_fd(oldf, NR_OPEN_MAX, &error);
1793  	if (!newf)
1794  		goto out;
1795  
1796  	tsk->files = newf;
1797  	error = 0;
1798  out:
1799  	return error;
1800  }
1801  
1802  static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk)
1803  {
1804  	struct sighand_struct *sig;
1805  
1806  	if (clone_flags & CLONE_SIGHAND) {
1807  		refcount_inc(&current->sighand->count);
1808  		return 0;
1809  	}
1810  	sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
1811  	RCU_INIT_POINTER(tsk->sighand, sig);
1812  	if (!sig)
1813  		return -ENOMEM;
1814  
1815  	refcount_set(&sig->count, 1);
1816  	spin_lock_irq(&current->sighand->siglock);
1817  	memcpy(sig->action, current->sighand->action, sizeof(sig->action));
1818  	spin_unlock_irq(&current->sighand->siglock);
1819  
1820  	/* Reset all signal handler not set to SIG_IGN to SIG_DFL. */
1821  	if (clone_flags & CLONE_CLEAR_SIGHAND)
1822  		flush_signal_handlers(tsk, 0);
1823  
1824  	return 0;
1825  }
1826  
1827  void __cleanup_sighand(struct sighand_struct *sighand)
1828  {
1829  	if (refcount_dec_and_test(&sighand->count)) {
1830  		signalfd_cleanup(sighand);
1831  		/*
1832  		 * sighand_cachep is SLAB_TYPESAFE_BY_RCU so we can free it
1833  		 * without an RCU grace period, see __lock_task_sighand().
1834  		 */
1835  		kmem_cache_free(sighand_cachep, sighand);
1836  	}
1837  }
1838  
1839  /*
1840   * Initialize POSIX timer handling for a thread group.
1841   */
1842  static void posix_cpu_timers_init_group(struct signal_struct *sig)
1843  {
1844  	struct posix_cputimers *pct = &sig->posix_cputimers;
1845  	unsigned long cpu_limit;
1846  
1847  	cpu_limit = READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur);
1848  	posix_cputimers_group_init(pct, cpu_limit);
1849  }
1850  
1851  static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
1852  {
1853  	struct signal_struct *sig;
1854  
1855  	if (clone_flags & CLONE_THREAD)
1856  		return 0;
1857  
1858  	sig = kmem_cache_zalloc(signal_cachep, GFP_KERNEL);
1859  	tsk->signal = sig;
1860  	if (!sig)
1861  		return -ENOMEM;
1862  
1863  	sig->nr_threads = 1;
1864  	sig->quick_threads = 1;
1865  	atomic_set(&sig->live, 1);
1866  	refcount_set(&sig->sigcnt, 1);
1867  
1868  	/* list_add(thread_node, thread_head) without INIT_LIST_HEAD() */
1869  	sig->thread_head = (struct list_head)LIST_HEAD_INIT(tsk->thread_node);
1870  	tsk->thread_node = (struct list_head)LIST_HEAD_INIT(sig->thread_head);
1871  
1872  	init_waitqueue_head(&sig->wait_chldexit);
1873  	sig->curr_target = tsk;
1874  	init_sigpending(&sig->shared_pending);
1875  	INIT_HLIST_HEAD(&sig->multiprocess);
1876  	seqlock_init(&sig->stats_lock);
1877  	prev_cputime_init(&sig->prev_cputime);
1878  
1879  #ifdef CONFIG_POSIX_TIMERS
1880  	INIT_LIST_HEAD(&sig->posix_timers);
1881  	hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1882  	sig->real_timer.function = it_real_fn;
1883  #endif
1884  
1885  	task_lock(current->group_leader);
1886  	memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim);
1887  	task_unlock(current->group_leader);
1888  
1889  	posix_cpu_timers_init_group(sig);
1890  
1891  	tty_audit_fork(sig);
1892  	sched_autogroup_fork(sig);
1893  
1894  	sig->oom_score_adj = current->signal->oom_score_adj;
1895  	sig->oom_score_adj_min = current->signal->oom_score_adj_min;
1896  
1897  	mutex_init(&sig->cred_guard_mutex);
1898  	init_rwsem(&sig->exec_update_lock);
1899  
1900  	return 0;
1901  }
1902  
1903  static void copy_seccomp(struct task_struct *p)
1904  {
1905  #ifdef CONFIG_SECCOMP
1906  	/*
1907  	 * Must be called with sighand->lock held, which is common to
1908  	 * all threads in the group. Holding cred_guard_mutex is not
1909  	 * needed because this new task is not yet running and cannot
1910  	 * be racing exec.
1911  	 */
1912  	assert_spin_locked(&current->sighand->siglock);
1913  
1914  	/* Ref-count the new filter user, and assign it. */
1915  	get_seccomp_filter(current);
1916  	p->seccomp = current->seccomp;
1917  
1918  	/*
1919  	 * Explicitly enable no_new_privs here in case it got set
1920  	 * between the task_struct being duplicated and holding the
1921  	 * sighand lock. The seccomp state and nnp must be in sync.
1922  	 */
1923  	if (task_no_new_privs(current))
1924  		task_set_no_new_privs(p);
1925  
1926  	/*
1927  	 * If the parent gained a seccomp mode after copying thread
1928  	 * flags and between before we held the sighand lock, we have
1929  	 * to manually enable the seccomp thread flag here.
1930  	 */
1931  	if (p->seccomp.mode != SECCOMP_MODE_DISABLED)
1932  		set_task_syscall_work(p, SECCOMP);
1933  #endif
1934  }
1935  
1936  SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr)
1937  {
1938  	current->clear_child_tid = tidptr;
1939  
1940  	return task_pid_vnr(current);
1941  }
1942  
1943  static void rt_mutex_init_task(struct task_struct *p)
1944  {
1945  	raw_spin_lock_init(&p->pi_lock);
1946  #ifdef CONFIG_RT_MUTEXES
1947  	p->pi_waiters = RB_ROOT_CACHED;
1948  	p->pi_top_task = NULL;
1949  	p->pi_blocked_on = NULL;
1950  #endif
1951  }
1952  
1953  static inline void init_task_pid_links(struct task_struct *task)
1954  {
1955  	enum pid_type type;
1956  
1957  	for (type = PIDTYPE_PID; type < PIDTYPE_MAX; ++type)
1958  		INIT_HLIST_NODE(&task->pid_links[type]);
1959  }
1960  
1961  static inline void
1962  init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid)
1963  {
1964  	if (type == PIDTYPE_PID)
1965  		task->thread_pid = pid;
1966  	else
1967  		task->signal->pids[type] = pid;
1968  }
1969  
1970  static inline void rcu_copy_process(struct task_struct *p)
1971  {
1972  #ifdef CONFIG_PREEMPT_RCU
1973  	p->rcu_read_lock_nesting = 0;
1974  	p->rcu_read_unlock_special.s = 0;
1975  	p->rcu_blocked_node = NULL;
1976  	INIT_LIST_HEAD(&p->rcu_node_entry);
1977  #endif /* #ifdef CONFIG_PREEMPT_RCU */
1978  #ifdef CONFIG_TASKS_RCU
1979  	p->rcu_tasks_holdout = false;
1980  	INIT_LIST_HEAD(&p->rcu_tasks_holdout_list);
1981  	p->rcu_tasks_idle_cpu = -1;
1982  #endif /* #ifdef CONFIG_TASKS_RCU */
1983  #ifdef CONFIG_TASKS_TRACE_RCU
1984  	p->trc_reader_nesting = 0;
1985  	p->trc_reader_special.s = 0;
1986  	INIT_LIST_HEAD(&p->trc_holdout_list);
1987  	INIT_LIST_HEAD(&p->trc_blkd_node);
1988  #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
1989  }
1990  
1991  struct pid *pidfd_pid(const struct file *file)
1992  {
1993  	if (file->f_op == &pidfd_fops)
1994  		return file->private_data;
1995  
1996  	return ERR_PTR(-EBADF);
1997  }
1998  
1999  static int pidfd_release(struct inode *inode, struct file *file)
2000  {
2001  	struct pid *pid = file->private_data;
2002  
2003  	file->private_data = NULL;
2004  	put_pid(pid);
2005  	return 0;
2006  }
2007  
2008  #ifdef CONFIG_PROC_FS
2009  /**
2010   * pidfd_show_fdinfo - print information about a pidfd
2011   * @m: proc fdinfo file
2012   * @f: file referencing a pidfd
2013   *
2014   * Pid:
2015   * This function will print the pid that a given pidfd refers to in the
2016   * pid namespace of the procfs instance.
2017   * If the pid namespace of the process is not a descendant of the pid
2018   * namespace of the procfs instance 0 will be shown as its pid. This is
2019   * similar to calling getppid() on a process whose parent is outside of
2020   * its pid namespace.
2021   *
2022   * NSpid:
2023   * If pid namespaces are supported then this function will also print
2024   * the pid of a given pidfd refers to for all descendant pid namespaces
2025   * starting from the current pid namespace of the instance, i.e. the
2026   * Pid field and the first entry in the NSpid field will be identical.
2027   * If the pid namespace of the process is not a descendant of the pid
2028   * namespace of the procfs instance 0 will be shown as its first NSpid
2029   * entry and no others will be shown.
2030   * Note that this differs from the Pid and NSpid fields in
2031   * /proc/<pid>/status where Pid and NSpid are always shown relative to
2032   * the  pid namespace of the procfs instance. The difference becomes
2033   * obvious when sending around a pidfd between pid namespaces from a
2034   * different branch of the tree, i.e. where no ancestral relation is
2035   * present between the pid namespaces:
2036   * - create two new pid namespaces ns1 and ns2 in the initial pid
2037   *   namespace (also take care to create new mount namespaces in the
2038   *   new pid namespace and mount procfs)
2039   * - create a process with a pidfd in ns1
2040   * - send pidfd from ns1 to ns2
2041   * - read /proc/self/fdinfo/<pidfd> and observe that both Pid and NSpid
2042   *   have exactly one entry, which is 0
2043   */
2044  static void pidfd_show_fdinfo(struct seq_file *m, struct file *f)
2045  {
2046  	struct pid *pid = f->private_data;
2047  	struct pid_namespace *ns;
2048  	pid_t nr = -1;
2049  
2050  	if (likely(pid_has_task(pid, PIDTYPE_PID))) {
2051  		ns = proc_pid_ns(file_inode(m->file)->i_sb);
2052  		nr = pid_nr_ns(pid, ns);
2053  	}
2054  
2055  	seq_put_decimal_ll(m, "Pid:\t", nr);
2056  
2057  #ifdef CONFIG_PID_NS
2058  	seq_put_decimal_ll(m, "\nNSpid:\t", nr);
2059  	if (nr > 0) {
2060  		int i;
2061  
2062  		/* If nr is non-zero it means that 'pid' is valid and that
2063  		 * ns, i.e. the pid namespace associated with the procfs
2064  		 * instance, is in the pid namespace hierarchy of pid.
2065  		 * Start at one below the already printed level.
2066  		 */
2067  		for (i = ns->level + 1; i <= pid->level; i++)
2068  			seq_put_decimal_ll(m, "\t", pid->numbers[i].nr);
2069  	}
2070  #endif
2071  	seq_putc(m, '\n');
2072  }
2073  #endif
2074  
2075  /*
2076   * Poll support for process exit notification.
2077   */
2078  static __poll_t pidfd_poll(struct file *file, struct poll_table_struct *pts)
2079  {
2080  	struct pid *pid = file->private_data;
2081  	__poll_t poll_flags = 0;
2082  
2083  	poll_wait(file, &pid->wait_pidfd, pts);
2084  
2085  	/*
2086  	 * Inform pollers only when the whole thread group exits.
2087  	 * If the thread group leader exits before all other threads in the
2088  	 * group, then poll(2) should block, similar to the wait(2) family.
2089  	 */
2090  	if (thread_group_exited(pid))
2091  		poll_flags = EPOLLIN | EPOLLRDNORM;
2092  
2093  	return poll_flags;
2094  }
2095  
2096  const struct file_operations pidfd_fops = {
2097  	.release = pidfd_release,
2098  	.poll = pidfd_poll,
2099  #ifdef CONFIG_PROC_FS
2100  	.show_fdinfo = pidfd_show_fdinfo,
2101  #endif
2102  };
2103  
2104  /**
2105   * __pidfd_prepare - allocate a new pidfd_file and reserve a pidfd
2106   * @pid:   the struct pid for which to create a pidfd
2107   * @flags: flags of the new @pidfd
2108   * @pidfd: the pidfd to return
2109   *
2110   * Allocate a new file that stashes @pid and reserve a new pidfd number in the
2111   * caller's file descriptor table. The pidfd is reserved but not installed yet.
2112  
2113   * The helper doesn't perform checks on @pid which makes it useful for pidfds
2114   * created via CLONE_PIDFD where @pid has no task attached when the pidfd and
2115   * pidfd file are prepared.
2116   *
2117   * If this function returns successfully the caller is responsible to either
2118   * call fd_install() passing the returned pidfd and pidfd file as arguments in
2119   * order to install the pidfd into its file descriptor table or they must use
2120   * put_unused_fd() and fput() on the returned pidfd and pidfd file
2121   * respectively.
2122   *
2123   * This function is useful when a pidfd must already be reserved but there
2124   * might still be points of failure afterwards and the caller wants to ensure
2125   * that no pidfd is leaked into its file descriptor table.
2126   *
2127   * Return: On success, a reserved pidfd is returned from the function and a new
2128   *         pidfd file is returned in the last argument to the function. On
2129   *         error, a negative error code is returned from the function and the
2130   *         last argument remains unchanged.
2131   */
2132  static int __pidfd_prepare(struct pid *pid, unsigned int flags, struct file **ret)
2133  {
2134  	int pidfd;
2135  	struct file *pidfd_file;
2136  
2137  	if (flags & ~(O_NONBLOCK | O_RDWR | O_CLOEXEC))
2138  		return -EINVAL;
2139  
2140  	pidfd = get_unused_fd_flags(O_RDWR | O_CLOEXEC);
2141  	if (pidfd < 0)
2142  		return pidfd;
2143  
2144  	pidfd_file = anon_inode_getfile("[pidfd]", &pidfd_fops, pid,
2145  					flags | O_RDWR | O_CLOEXEC);
2146  	if (IS_ERR(pidfd_file)) {
2147  		put_unused_fd(pidfd);
2148  		return PTR_ERR(pidfd_file);
2149  	}
2150  	get_pid(pid); /* held by pidfd_file now */
2151  	*ret = pidfd_file;
2152  	return pidfd;
2153  }
2154  
2155  /**
2156   * pidfd_prepare - allocate a new pidfd_file and reserve a pidfd
2157   * @pid:   the struct pid for which to create a pidfd
2158   * @flags: flags of the new @pidfd
2159   * @pidfd: the pidfd to return
2160   *
2161   * Allocate a new file that stashes @pid and reserve a new pidfd number in the
2162   * caller's file descriptor table. The pidfd is reserved but not installed yet.
2163   *
2164   * The helper verifies that @pid is used as a thread group leader.
2165   *
2166   * If this function returns successfully the caller is responsible to either
2167   * call fd_install() passing the returned pidfd and pidfd file as arguments in
2168   * order to install the pidfd into its file descriptor table or they must use
2169   * put_unused_fd() and fput() on the returned pidfd and pidfd file
2170   * respectively.
2171   *
2172   * This function is useful when a pidfd must already be reserved but there
2173   * might still be points of failure afterwards and the caller wants to ensure
2174   * that no pidfd is leaked into its file descriptor table.
2175   *
2176   * Return: On success, a reserved pidfd is returned from the function and a new
2177   *         pidfd file is returned in the last argument to the function. On
2178   *         error, a negative error code is returned from the function and the
2179   *         last argument remains unchanged.
2180   */
2181  int pidfd_prepare(struct pid *pid, unsigned int flags, struct file **ret)
2182  {
2183  	if (!pid || !pid_has_task(pid, PIDTYPE_TGID))
2184  		return -EINVAL;
2185  
2186  	return __pidfd_prepare(pid, flags, ret);
2187  }
2188  
2189  static void __delayed_free_task(struct rcu_head *rhp)
2190  {
2191  	struct task_struct *tsk = container_of(rhp, struct task_struct, rcu);
2192  
2193  	free_task(tsk);
2194  }
2195  
2196  static __always_inline void delayed_free_task(struct task_struct *tsk)
2197  {
2198  	if (IS_ENABLED(CONFIG_MEMCG))
2199  		call_rcu(&tsk->rcu, __delayed_free_task);
2200  	else
2201  		free_task(tsk);
2202  }
2203  
2204  static void copy_oom_score_adj(u64 clone_flags, struct task_struct *tsk)
2205  {
2206  	/* Skip if kernel thread */
2207  	if (!tsk->mm)
2208  		return;
2209  
2210  	/* Skip if spawning a thread or using vfork */
2211  	if ((clone_flags & (CLONE_VM | CLONE_THREAD | CLONE_VFORK)) != CLONE_VM)
2212  		return;
2213  
2214  	/* We need to synchronize with __set_oom_adj */
2215  	mutex_lock(&oom_adj_mutex);
2216  	set_bit(MMF_MULTIPROCESS, &tsk->mm->flags);
2217  	/* Update the values in case they were changed after copy_signal */
2218  	tsk->signal->oom_score_adj = current->signal->oom_score_adj;
2219  	tsk->signal->oom_score_adj_min = current->signal->oom_score_adj_min;
2220  	mutex_unlock(&oom_adj_mutex);
2221  }
2222  
2223  #ifdef CONFIG_RV
2224  static void rv_task_fork(struct task_struct *p)
2225  {
2226  	int i;
2227  
2228  	for (i = 0; i < RV_PER_TASK_MONITORS; i++)
2229  		p->rv[i].da_mon.monitoring = false;
2230  }
2231  #else
2232  #define rv_task_fork(p) do {} while (0)
2233  #endif
2234  
2235  /*
2236   * This creates a new process as a copy of the old one,
2237   * but does not actually start it yet.
2238   *
2239   * It copies the registers, and all the appropriate
2240   * parts of the process environment (as per the clone
2241   * flags). The actual kick-off is left to the caller.
2242   */
2243  __latent_entropy struct task_struct *copy_process(
2244  					struct pid *pid,
2245  					int trace,
2246  					int node,
2247  					struct kernel_clone_args *args)
2248  {
2249  	int pidfd = -1, retval;
2250  	struct task_struct *p;
2251  	struct multiprocess_signals delayed;
2252  	struct file *pidfile = NULL;
2253  	const u64 clone_flags = args->flags;
2254  	struct nsproxy *nsp = current->nsproxy;
2255  
2256  	/*
2257  	 * Don't allow sharing the root directory with processes in a different
2258  	 * namespace
2259  	 */
2260  	if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
2261  		return ERR_PTR(-EINVAL);
2262  
2263  	if ((clone_flags & (CLONE_NEWUSER|CLONE_FS)) == (CLONE_NEWUSER|CLONE_FS))
2264  		return ERR_PTR(-EINVAL);
2265  
2266  	/*
2267  	 * Thread groups must share signals as well, and detached threads
2268  	 * can only be started up within the thread group.
2269  	 */
2270  	if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND))
2271  		return ERR_PTR(-EINVAL);
2272  
2273  	/*
2274  	 * Shared signal handlers imply shared VM. By way of the above,
2275  	 * thread groups also imply shared VM. Blocking this case allows
2276  	 * for various simplifications in other code.
2277  	 */
2278  	if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM))
2279  		return ERR_PTR(-EINVAL);
2280  
2281  	/*
2282  	 * Siblings of global init remain as zombies on exit since they are
2283  	 * not reaped by their parent (swapper). To solve this and to avoid
2284  	 * multi-rooted process trees, prevent global and container-inits
2285  	 * from creating siblings.
2286  	 */
2287  	if ((clone_flags & CLONE_PARENT) &&
2288  				current->signal->flags & SIGNAL_UNKILLABLE)
2289  		return ERR_PTR(-EINVAL);
2290  
2291  	/*
2292  	 * If the new process will be in a different pid or user namespace
2293  	 * do not allow it to share a thread group with the forking task.
2294  	 */
2295  	if (clone_flags & CLONE_THREAD) {
2296  		if ((clone_flags & (CLONE_NEWUSER | CLONE_NEWPID)) ||
2297  		    (task_active_pid_ns(current) != nsp->pid_ns_for_children))
2298  			return ERR_PTR(-EINVAL);
2299  	}
2300  
2301  	if (clone_flags & CLONE_PIDFD) {
2302  		/*
2303  		 * - CLONE_DETACHED is blocked so that we can potentially
2304  		 *   reuse it later for CLONE_PIDFD.
2305  		 * - CLONE_THREAD is blocked until someone really needs it.
2306  		 */
2307  		if (clone_flags & (CLONE_DETACHED | CLONE_THREAD))
2308  			return ERR_PTR(-EINVAL);
2309  	}
2310  
2311  	/*
2312  	 * Force any signals received before this point to be delivered
2313  	 * before the fork happens.  Collect up signals sent to multiple
2314  	 * processes that happen during the fork and delay them so that
2315  	 * they appear to happen after the fork.
2316  	 */
2317  	sigemptyset(&delayed.signal);
2318  	INIT_HLIST_NODE(&delayed.node);
2319  
2320  	spin_lock_irq(&current->sighand->siglock);
2321  	if (!(clone_flags & CLONE_THREAD))
2322  		hlist_add_head(&delayed.node, &current->signal->multiprocess);
2323  	recalc_sigpending();
2324  	spin_unlock_irq(&current->sighand->siglock);
2325  	retval = -ERESTARTNOINTR;
2326  	if (task_sigpending(current))
2327  		goto fork_out;
2328  
2329  	retval = -ENOMEM;
2330  	p = dup_task_struct(current, node);
2331  	if (!p)
2332  		goto fork_out;
2333  	p->flags &= ~PF_KTHREAD;
2334  	if (args->kthread)
2335  		p->flags |= PF_KTHREAD;
2336  	if (args->user_worker) {
2337  		/*
2338  		 * Mark us a user worker, and block any signal that isn't
2339  		 * fatal or STOP
2340  		 */
2341  		p->flags |= PF_USER_WORKER;
2342  		siginitsetinv(&p->blocked, sigmask(SIGKILL)|sigmask(SIGSTOP));
2343  	}
2344  	if (args->io_thread)
2345  		p->flags |= PF_IO_WORKER;
2346  
2347  	if (args->name)
2348  		strscpy_pad(p->comm, args->name, sizeof(p->comm));
2349  
2350  	p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? args->child_tid : NULL;
2351  	/*
2352  	 * Clear TID on mm_release()?
2353  	 */
2354  	p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? args->child_tid : NULL;
2355  
2356  	ftrace_graph_init_task(p);
2357  
2358  	rt_mutex_init_task(p);
2359  
2360  	lockdep_assert_irqs_enabled();
2361  #ifdef CONFIG_PROVE_LOCKING
2362  	DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
2363  #endif
2364  	retval = copy_creds(p, clone_flags);
2365  	if (retval < 0)
2366  		goto bad_fork_free;
2367  
2368  	retval = -EAGAIN;
2369  	if (is_rlimit_overlimit(task_ucounts(p), UCOUNT_RLIMIT_NPROC, rlimit(RLIMIT_NPROC))) {
2370  		if (p->real_cred->user != INIT_USER &&
2371  		    !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
2372  			goto bad_fork_cleanup_count;
2373  	}
2374  	current->flags &= ~PF_NPROC_EXCEEDED;
2375  
2376  	/*
2377  	 * If multiple threads are within copy_process(), then this check
2378  	 * triggers too late. This doesn't hurt, the check is only there
2379  	 * to stop root fork bombs.
2380  	 */
2381  	retval = -EAGAIN;
2382  	if (data_race(nr_threads >= max_threads))
2383  		goto bad_fork_cleanup_count;
2384  
2385  	delayacct_tsk_init(p);	/* Must remain after dup_task_struct() */
2386  	p->flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER | PF_IDLE | PF_NO_SETAFFINITY);
2387  	p->flags |= PF_FORKNOEXEC;
2388  	INIT_LIST_HEAD(&p->children);
2389  	INIT_LIST_HEAD(&p->sibling);
2390  	rcu_copy_process(p);
2391  	p->vfork_done = NULL;
2392  	spin_lock_init(&p->alloc_lock);
2393  
2394  	init_sigpending(&p->pending);
2395  
2396  	p->utime = p->stime = p->gtime = 0;
2397  #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
2398  	p->utimescaled = p->stimescaled = 0;
2399  #endif
2400  	prev_cputime_init(&p->prev_cputime);
2401  
2402  #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
2403  	seqcount_init(&p->vtime.seqcount);
2404  	p->vtime.starttime = 0;
2405  	p->vtime.state = VTIME_INACTIVE;
2406  #endif
2407  
2408  #ifdef CONFIG_IO_URING
2409  	p->io_uring = NULL;
2410  #endif
2411  
2412  #if defined(SPLIT_RSS_COUNTING)
2413  	memset(&p->rss_stat, 0, sizeof(p->rss_stat));
2414  #endif
2415  
2416  	p->default_timer_slack_ns = current->timer_slack_ns;
2417  
2418  #ifdef CONFIG_PSI
2419  	p->psi_flags = 0;
2420  #endif
2421  
2422  	task_io_accounting_init(&p->ioac);
2423  	acct_clear_integrals(p);
2424  
2425  	posix_cputimers_init(&p->posix_cputimers);
2426  
2427  	p->io_context = NULL;
2428  	audit_set_context(p, NULL);
2429  	cgroup_fork(p);
2430  	if (args->kthread) {
2431  		if (!set_kthread_struct(p))
2432  			goto bad_fork_cleanup_delayacct;
2433  	}
2434  #ifdef CONFIG_NUMA
2435  	p->mempolicy = mpol_dup(p->mempolicy);
2436  	if (IS_ERR(p->mempolicy)) {
2437  		retval = PTR_ERR(p->mempolicy);
2438  		p->mempolicy = NULL;
2439  		goto bad_fork_cleanup_delayacct;
2440  	}
2441  #endif
2442  #ifdef CONFIG_CPUSETS
2443  	p->cpuset_mem_spread_rotor = NUMA_NO_NODE;
2444  	p->cpuset_slab_spread_rotor = NUMA_NO_NODE;
2445  	seqcount_spinlock_init(&p->mems_allowed_seq, &p->alloc_lock);
2446  #endif
2447  #ifdef CONFIG_TRACE_IRQFLAGS
2448  	memset(&p->irqtrace, 0, sizeof(p->irqtrace));
2449  	p->irqtrace.hardirq_disable_ip	= _THIS_IP_;
2450  	p->irqtrace.softirq_enable_ip	= _THIS_IP_;
2451  	p->softirqs_enabled		= 1;
2452  	p->softirq_context		= 0;
2453  #endif
2454  
2455  	p->pagefault_disabled = 0;
2456  
2457  #ifdef CONFIG_LOCKDEP
2458  	lockdep_init_task(p);
2459  #endif
2460  
2461  #ifdef CONFIG_DEBUG_MUTEXES
2462  	p->blocked_on = NULL; /* not blocked yet */
2463  #endif
2464  #ifdef CONFIG_BCACHE
2465  	p->sequential_io	= 0;
2466  	p->sequential_io_avg	= 0;
2467  #endif
2468  #ifdef CONFIG_BPF_SYSCALL
2469  	RCU_INIT_POINTER(p->bpf_storage, NULL);
2470  	p->bpf_ctx = NULL;
2471  #endif
2472  
2473  	/* Perform scheduler related setup. Assign this task to a CPU. */
2474  	retval = sched_fork(clone_flags, p);
2475  	if (retval)
2476  		goto bad_fork_cleanup_policy;
2477  
2478  	retval = perf_event_init_task(p, clone_flags);
2479  	if (retval)
2480  		goto bad_fork_cleanup_policy;
2481  	retval = audit_alloc(p);
2482  	if (retval)
2483  		goto bad_fork_cleanup_perf;
2484  	/* copy all the process information */
2485  	shm_init_task(p);
2486  	retval = security_task_alloc(p, clone_flags);
2487  	if (retval)
2488  		goto bad_fork_cleanup_audit;
2489  	retval = copy_semundo(clone_flags, p);
2490  	if (retval)
2491  		goto bad_fork_cleanup_security;
2492  	retval = copy_files(clone_flags, p, args->no_files);
2493  	if (retval)
2494  		goto bad_fork_cleanup_semundo;
2495  	retval = copy_fs(clone_flags, p);
2496  	if (retval)
2497  		goto bad_fork_cleanup_files;
2498  	retval = copy_sighand(clone_flags, p);
2499  	if (retval)
2500  		goto bad_fork_cleanup_fs;
2501  	retval = copy_signal(clone_flags, p);
2502  	if (retval)
2503  		goto bad_fork_cleanup_sighand;
2504  	retval = copy_mm(clone_flags, p);
2505  	if (retval)
2506  		goto bad_fork_cleanup_signal;
2507  	retval = copy_namespaces(clone_flags, p);
2508  	if (retval)
2509  		goto bad_fork_cleanup_mm;
2510  	retval = copy_io(clone_flags, p);
2511  	if (retval)
2512  		goto bad_fork_cleanup_namespaces;
2513  	retval = copy_thread(p, args);
2514  	if (retval)
2515  		goto bad_fork_cleanup_io;
2516  
2517  	stackleak_task_init(p);
2518  
2519  	if (pid != &init_struct_pid) {
2520  		pid = alloc_pid(p->nsproxy->pid_ns_for_children, args->set_tid,
2521  				args->set_tid_size);
2522  		if (IS_ERR(pid)) {
2523  			retval = PTR_ERR(pid);
2524  			goto bad_fork_cleanup_thread;
2525  		}
2526  	}
2527  
2528  	/*
2529  	 * This has to happen after we've potentially unshared the file
2530  	 * descriptor table (so that the pidfd doesn't leak into the child
2531  	 * if the fd table isn't shared).
2532  	 */
2533  	if (clone_flags & CLONE_PIDFD) {
2534  		/* Note that no task has been attached to @pid yet. */
2535  		retval = __pidfd_prepare(pid, O_RDWR | O_CLOEXEC, &pidfile);
2536  		if (retval < 0)
2537  			goto bad_fork_free_pid;
2538  		pidfd = retval;
2539  
2540  		retval = put_user(pidfd, args->pidfd);
2541  		if (retval)
2542  			goto bad_fork_put_pidfd;
2543  	}
2544  
2545  #ifdef CONFIG_BLOCK
2546  	p->plug = NULL;
2547  #endif
2548  	futex_init_task(p);
2549  
2550  	/*
2551  	 * sigaltstack should be cleared when sharing the same VM
2552  	 */
2553  	if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM)
2554  		sas_ss_reset(p);
2555  
2556  	/*
2557  	 * Syscall tracing and stepping should be turned off in the
2558  	 * child regardless of CLONE_PTRACE.
2559  	 */
2560  	user_disable_single_step(p);
2561  	clear_task_syscall_work(p, SYSCALL_TRACE);
2562  #if defined(CONFIG_GENERIC_ENTRY) || defined(TIF_SYSCALL_EMU)
2563  	clear_task_syscall_work(p, SYSCALL_EMU);
2564  #endif
2565  	clear_tsk_latency_tracing(p);
2566  
2567  	/* ok, now we should be set up.. */
2568  	p->pid = pid_nr(pid);
2569  	if (clone_flags & CLONE_THREAD) {
2570  		p->group_leader = current->group_leader;
2571  		p->tgid = current->tgid;
2572  	} else {
2573  		p->group_leader = p;
2574  		p->tgid = p->pid;
2575  	}
2576  
2577  	p->nr_dirtied = 0;
2578  	p->nr_dirtied_pause = 128 >> (PAGE_SHIFT - 10);
2579  	p->dirty_paused_when = 0;
2580  
2581  	p->pdeath_signal = 0;
2582  	INIT_LIST_HEAD(&p->thread_group);
2583  	p->task_works = NULL;
2584  	clear_posix_cputimers_work(p);
2585  
2586  #ifdef CONFIG_KRETPROBES
2587  	p->kretprobe_instances.first = NULL;
2588  #endif
2589  #ifdef CONFIG_RETHOOK
2590  	p->rethooks.first = NULL;
2591  #endif
2592  
2593  	/*
2594  	 * Ensure that the cgroup subsystem policies allow the new process to be
2595  	 * forked. It should be noted that the new process's css_set can be changed
2596  	 * between here and cgroup_post_fork() if an organisation operation is in
2597  	 * progress.
2598  	 */
2599  	retval = cgroup_can_fork(p, args);
2600  	if (retval)
2601  		goto bad_fork_put_pidfd;
2602  
2603  	/*
2604  	 * Now that the cgroups are pinned, re-clone the parent cgroup and put
2605  	 * the new task on the correct runqueue. All this *before* the task
2606  	 * becomes visible.
2607  	 *
2608  	 * This isn't part of ->can_fork() because while the re-cloning is
2609  	 * cgroup specific, it unconditionally needs to place the task on a
2610  	 * runqueue.
2611  	 */
2612  	sched_cgroup_fork(p, args);
2613  
2614  	/*
2615  	 * From this point on we must avoid any synchronous user-space
2616  	 * communication until we take the tasklist-lock. In particular, we do
2617  	 * not want user-space to be able to predict the process start-time by
2618  	 * stalling fork(2) after we recorded the start_time but before it is
2619  	 * visible to the system.
2620  	 */
2621  
2622  	p->start_time = ktime_get_ns();
2623  	p->start_boottime = ktime_get_boottime_ns();
2624  
2625  	/*
2626  	 * Make it visible to the rest of the system, but dont wake it up yet.
2627  	 * Need tasklist lock for parent etc handling!
2628  	 */
2629  	write_lock_irq(&tasklist_lock);
2630  
2631  	/* CLONE_PARENT re-uses the old parent */
2632  	if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) {
2633  		p->real_parent = current->real_parent;
2634  		p->parent_exec_id = current->parent_exec_id;
2635  		if (clone_flags & CLONE_THREAD)
2636  			p->exit_signal = -1;
2637  		else
2638  			p->exit_signal = current->group_leader->exit_signal;
2639  	} else {
2640  		p->real_parent = current;
2641  		p->parent_exec_id = current->self_exec_id;
2642  		p->exit_signal = args->exit_signal;
2643  	}
2644  
2645  	klp_copy_process(p);
2646  
2647  	sched_core_fork(p);
2648  
2649  	spin_lock(&current->sighand->siglock);
2650  
2651  	rv_task_fork(p);
2652  
2653  	rseq_fork(p, clone_flags);
2654  
2655  	/* Don't start children in a dying pid namespace */
2656  	if (unlikely(!(ns_of_pid(pid)->pid_allocated & PIDNS_ADDING))) {
2657  		retval = -ENOMEM;
2658  		goto bad_fork_cancel_cgroup;
2659  	}
2660  
2661  	/* Let kill terminate clone/fork in the middle */
2662  	if (fatal_signal_pending(current)) {
2663  		retval = -EINTR;
2664  		goto bad_fork_cancel_cgroup;
2665  	}
2666  
2667  	/* No more failure paths after this point. */
2668  
2669  	/*
2670  	 * Copy seccomp details explicitly here, in case they were changed
2671  	 * before holding sighand lock.
2672  	 */
2673  	copy_seccomp(p);
2674  
2675  	init_task_pid_links(p);
2676  	if (likely(p->pid)) {
2677  		ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
2678  
2679  		init_task_pid(p, PIDTYPE_PID, pid);
2680  		if (thread_group_leader(p)) {
2681  			init_task_pid(p, PIDTYPE_TGID, pid);
2682  			init_task_pid(p, PIDTYPE_PGID, task_pgrp(current));
2683  			init_task_pid(p, PIDTYPE_SID, task_session(current));
2684  
2685  			if (is_child_reaper(pid)) {
2686  				ns_of_pid(pid)->child_reaper = p;
2687  				p->signal->flags |= SIGNAL_UNKILLABLE;
2688  			}
2689  			p->signal->shared_pending.signal = delayed.signal;
2690  			p->signal->tty = tty_kref_get(current->signal->tty);
2691  			/*
2692  			 * Inherit has_child_subreaper flag under the same
2693  			 * tasklist_lock with adding child to the process tree
2694  			 * for propagate_has_child_subreaper optimization.
2695  			 */
2696  			p->signal->has_child_subreaper = p->real_parent->signal->has_child_subreaper ||
2697  							 p->real_parent->signal->is_child_subreaper;
2698  			list_add_tail(&p->sibling, &p->real_parent->children);
2699  			list_add_tail_rcu(&p->tasks, &init_task.tasks);
2700  			attach_pid(p, PIDTYPE_TGID);
2701  			attach_pid(p, PIDTYPE_PGID);
2702  			attach_pid(p, PIDTYPE_SID);
2703  			__this_cpu_inc(process_counts);
2704  		} else {
2705  			current->signal->nr_threads++;
2706  			current->signal->quick_threads++;
2707  			atomic_inc(&current->signal->live);
2708  			refcount_inc(&current->signal->sigcnt);
2709  			task_join_group_stop(p);
2710  			list_add_tail_rcu(&p->thread_group,
2711  					  &p->group_leader->thread_group);
2712  			list_add_tail_rcu(&p->thread_node,
2713  					  &p->signal->thread_head);
2714  		}
2715  		attach_pid(p, PIDTYPE_PID);
2716  		nr_threads++;
2717  	}
2718  	total_forks++;
2719  	hlist_del_init(&delayed.node);
2720  	spin_unlock(&current->sighand->siglock);
2721  	syscall_tracepoint_update(p);
2722  	write_unlock_irq(&tasklist_lock);
2723  
2724  	if (pidfile)
2725  		fd_install(pidfd, pidfile);
2726  
2727  	proc_fork_connector(p);
2728  	sched_post_fork(p);
2729  	cgroup_post_fork(p, args);
2730  	perf_event_fork(p);
2731  
2732  	trace_task_newtask(p, clone_flags);
2733  	uprobe_copy_process(p, clone_flags);
2734  	user_events_fork(p, clone_flags);
2735  
2736  	copy_oom_score_adj(clone_flags, p);
2737  
2738  	return p;
2739  
2740  bad_fork_cancel_cgroup:
2741  	sched_core_free(p);
2742  	spin_unlock(&current->sighand->siglock);
2743  	write_unlock_irq(&tasklist_lock);
2744  	cgroup_cancel_fork(p, args);
2745  bad_fork_put_pidfd:
2746  	if (clone_flags & CLONE_PIDFD) {
2747  		fput(pidfile);
2748  		put_unused_fd(pidfd);
2749  	}
2750  bad_fork_free_pid:
2751  	if (pid != &init_struct_pid)
2752  		free_pid(pid);
2753  bad_fork_cleanup_thread:
2754  	exit_thread(p);
2755  bad_fork_cleanup_io:
2756  	if (p->io_context)
2757  		exit_io_context(p);
2758  bad_fork_cleanup_namespaces:
2759  	exit_task_namespaces(p);
2760  bad_fork_cleanup_mm:
2761  	if (p->mm) {
2762  		mm_clear_owner(p->mm, p);
2763  		mmput(p->mm);
2764  	}
2765  bad_fork_cleanup_signal:
2766  	if (!(clone_flags & CLONE_THREAD))
2767  		free_signal_struct(p->signal);
2768  bad_fork_cleanup_sighand:
2769  	__cleanup_sighand(p->sighand);
2770  bad_fork_cleanup_fs:
2771  	exit_fs(p); /* blocking */
2772  bad_fork_cleanup_files:
2773  	exit_files(p); /* blocking */
2774  bad_fork_cleanup_semundo:
2775  	exit_sem(p);
2776  bad_fork_cleanup_security:
2777  	security_task_free(p);
2778  bad_fork_cleanup_audit:
2779  	audit_free(p);
2780  bad_fork_cleanup_perf:
2781  	perf_event_free_task(p);
2782  bad_fork_cleanup_policy:
2783  	lockdep_free_task(p);
2784  #ifdef CONFIG_NUMA
2785  	mpol_put(p->mempolicy);
2786  #endif
2787  bad_fork_cleanup_delayacct:
2788  	delayacct_tsk_free(p);
2789  bad_fork_cleanup_count:
2790  	dec_rlimit_ucounts(task_ucounts(p), UCOUNT_RLIMIT_NPROC, 1);
2791  	exit_creds(p);
2792  bad_fork_free:
2793  	WRITE_ONCE(p->__state, TASK_DEAD);
2794  	exit_task_stack_account(p);
2795  	put_task_stack(p);
2796  	delayed_free_task(p);
2797  fork_out:
2798  	spin_lock_irq(&current->sighand->siglock);
2799  	hlist_del_init(&delayed.node);
2800  	spin_unlock_irq(&current->sighand->siglock);
2801  	return ERR_PTR(retval);
2802  }
2803  
2804  static inline void init_idle_pids(struct task_struct *idle)
2805  {
2806  	enum pid_type type;
2807  
2808  	for (type = PIDTYPE_PID; type < PIDTYPE_MAX; ++type) {
2809  		INIT_HLIST_NODE(&idle->pid_links[type]); /* not really needed */
2810  		init_task_pid(idle, type, &init_struct_pid);
2811  	}
2812  }
2813  
2814  static int idle_dummy(void *dummy)
2815  {
2816  	/* This function is never called */
2817  	return 0;
2818  }
2819  
2820  struct task_struct * __init fork_idle(int cpu)
2821  {
2822  	struct task_struct *task;
2823  	struct kernel_clone_args args = {
2824  		.flags		= CLONE_VM,
2825  		.fn		= &idle_dummy,
2826  		.fn_arg		= NULL,
2827  		.kthread	= 1,
2828  		.idle		= 1,
2829  	};
2830  
2831  	task = copy_process(&init_struct_pid, 0, cpu_to_node(cpu), &args);
2832  	if (!IS_ERR(task)) {
2833  		init_idle_pids(task);
2834  		init_idle(task, cpu);
2835  	}
2836  
2837  	return task;
2838  }
2839  
2840  /*
2841   * This is like kernel_clone(), but shaved down and tailored to just
2842   * creating io_uring workers. It returns a created task, or an error pointer.
2843   * The returned task is inactive, and the caller must fire it up through
2844   * wake_up_new_task(p). All signals are blocked in the created task.
2845   */
2846  struct task_struct *create_io_thread(int (*fn)(void *), void *arg, int node)
2847  {
2848  	unsigned long flags = CLONE_FS|CLONE_FILES|CLONE_SIGHAND|CLONE_THREAD|
2849  				CLONE_IO;
2850  	struct kernel_clone_args args = {
2851  		.flags		= ((lower_32_bits(flags) | CLONE_VM |
2852  				    CLONE_UNTRACED) & ~CSIGNAL),
2853  		.exit_signal	= (lower_32_bits(flags) & CSIGNAL),
2854  		.fn		= fn,
2855  		.fn_arg		= arg,
2856  		.io_thread	= 1,
2857  		.user_worker	= 1,
2858  	};
2859  
2860  	return copy_process(NULL, 0, node, &args);
2861  }
2862  
2863  /*
2864   *  Ok, this is the main fork-routine.
2865   *
2866   * It copies the process, and if successful kick-starts
2867   * it and waits for it to finish using the VM if required.
2868   *
2869   * args->exit_signal is expected to be checked for sanity by the caller.
2870   */
2871  pid_t kernel_clone(struct kernel_clone_args *args)
2872  {
2873  	u64 clone_flags = args->flags;
2874  	struct completion vfork;
2875  	struct pid *pid;
2876  	struct task_struct *p;
2877  	int trace = 0;
2878  	pid_t nr;
2879  
2880  	/*
2881  	 * For legacy clone() calls, CLONE_PIDFD uses the parent_tid argument
2882  	 * to return the pidfd. Hence, CLONE_PIDFD and CLONE_PARENT_SETTID are
2883  	 * mutually exclusive. With clone3() CLONE_PIDFD has grown a separate
2884  	 * field in struct clone_args and it still doesn't make sense to have
2885  	 * them both point at the same memory location. Performing this check
2886  	 * here has the advantage that we don't need to have a separate helper
2887  	 * to check for legacy clone().
2888  	 */
2889  	if ((args->flags & CLONE_PIDFD) &&
2890  	    (args->flags & CLONE_PARENT_SETTID) &&
2891  	    (args->pidfd == args->parent_tid))
2892  		return -EINVAL;
2893  
2894  	/*
2895  	 * Determine whether and which event to report to ptracer.  When
2896  	 * called from kernel_thread or CLONE_UNTRACED is explicitly
2897  	 * requested, no event is reported; otherwise, report if the event
2898  	 * for the type of forking is enabled.
2899  	 */
2900  	if (!(clone_flags & CLONE_UNTRACED)) {
2901  		if (clone_flags & CLONE_VFORK)
2902  			trace = PTRACE_EVENT_VFORK;
2903  		else if (args->exit_signal != SIGCHLD)
2904  			trace = PTRACE_EVENT_CLONE;
2905  		else
2906  			trace = PTRACE_EVENT_FORK;
2907  
2908  		if (likely(!ptrace_event_enabled(current, trace)))
2909  			trace = 0;
2910  	}
2911  
2912  	p = copy_process(NULL, trace, NUMA_NO_NODE, args);
2913  	add_latent_entropy();
2914  
2915  	if (IS_ERR(p))
2916  		return PTR_ERR(p);
2917  
2918  	/*
2919  	 * Do this prior waking up the new thread - the thread pointer
2920  	 * might get invalid after that point, if the thread exits quickly.
2921  	 */
2922  	trace_sched_process_fork(current, p);
2923  
2924  	pid = get_task_pid(p, PIDTYPE_PID);
2925  	nr = pid_vnr(pid);
2926  
2927  	if (clone_flags & CLONE_PARENT_SETTID)
2928  		put_user(nr, args->parent_tid);
2929  
2930  	if (clone_flags & CLONE_VFORK) {
2931  		p->vfork_done = &vfork;
2932  		init_completion(&vfork);
2933  		get_task_struct(p);
2934  	}
2935  
2936  	if (IS_ENABLED(CONFIG_LRU_GEN) && !(clone_flags & CLONE_VM)) {
2937  		/* lock the task to synchronize with memcg migration */
2938  		task_lock(p);
2939  		lru_gen_add_mm(p->mm);
2940  		task_unlock(p);
2941  	}
2942  
2943  	wake_up_new_task(p);
2944  
2945  	/* forking complete and child started to run, tell ptracer */
2946  	if (unlikely(trace))
2947  		ptrace_event_pid(trace, pid);
2948  
2949  	if (clone_flags & CLONE_VFORK) {
2950  		if (!wait_for_vfork_done(p, &vfork))
2951  			ptrace_event_pid(PTRACE_EVENT_VFORK_DONE, pid);
2952  	}
2953  
2954  	put_pid(pid);
2955  	return nr;
2956  }
2957  
2958  /*
2959   * Create a kernel thread.
2960   */
2961  pid_t kernel_thread(int (*fn)(void *), void *arg, const char *name,
2962  		    unsigned long flags)
2963  {
2964  	struct kernel_clone_args args = {
2965  		.flags		= ((lower_32_bits(flags) | CLONE_VM |
2966  				    CLONE_UNTRACED) & ~CSIGNAL),
2967  		.exit_signal	= (lower_32_bits(flags) & CSIGNAL),
2968  		.fn		= fn,
2969  		.fn_arg		= arg,
2970  		.name		= name,
2971  		.kthread	= 1,
2972  	};
2973  
2974  	return kernel_clone(&args);
2975  }
2976  
2977  /*
2978   * Create a user mode thread.
2979   */
2980  pid_t user_mode_thread(int (*fn)(void *), void *arg, unsigned long flags)
2981  {
2982  	struct kernel_clone_args args = {
2983  		.flags		= ((lower_32_bits(flags) | CLONE_VM |
2984  				    CLONE_UNTRACED) & ~CSIGNAL),
2985  		.exit_signal	= (lower_32_bits(flags) & CSIGNAL),
2986  		.fn		= fn,
2987  		.fn_arg		= arg,
2988  	};
2989  
2990  	return kernel_clone(&args);
2991  }
2992  
2993  #ifdef __ARCH_WANT_SYS_FORK
2994  SYSCALL_DEFINE0(fork)
2995  {
2996  #ifdef CONFIG_MMU
2997  	struct kernel_clone_args args = {
2998  		.exit_signal = SIGCHLD,
2999  	};
3000  
3001  	return kernel_clone(&args);
3002  #else
3003  	/* can not support in nommu mode */
3004  	return -EINVAL;
3005  #endif
3006  }
3007  #endif
3008  
3009  #ifdef __ARCH_WANT_SYS_VFORK
3010  SYSCALL_DEFINE0(vfork)
3011  {
3012  	struct kernel_clone_args args = {
3013  		.flags		= CLONE_VFORK | CLONE_VM,
3014  		.exit_signal	= SIGCHLD,
3015  	};
3016  
3017  	return kernel_clone(&args);
3018  }
3019  #endif
3020  
3021  #ifdef __ARCH_WANT_SYS_CLONE
3022  #ifdef CONFIG_CLONE_BACKWARDS
3023  SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
3024  		 int __user *, parent_tidptr,
3025  		 unsigned long, tls,
3026  		 int __user *, child_tidptr)
3027  #elif defined(CONFIG_CLONE_BACKWARDS2)
3028  SYSCALL_DEFINE5(clone, unsigned long, newsp, unsigned long, clone_flags,
3029  		 int __user *, parent_tidptr,
3030  		 int __user *, child_tidptr,
3031  		 unsigned long, tls)
3032  #elif defined(CONFIG_CLONE_BACKWARDS3)
3033  SYSCALL_DEFINE6(clone, unsigned long, clone_flags, unsigned long, newsp,
3034  		int, stack_size,
3035  		int __user *, parent_tidptr,
3036  		int __user *, child_tidptr,
3037  		unsigned long, tls)
3038  #else
3039  SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
3040  		 int __user *, parent_tidptr,
3041  		 int __user *, child_tidptr,
3042  		 unsigned long, tls)
3043  #endif
3044  {
3045  	struct kernel_clone_args args = {
3046  		.flags		= (lower_32_bits(clone_flags) & ~CSIGNAL),
3047  		.pidfd		= parent_tidptr,
3048  		.child_tid	= child_tidptr,
3049  		.parent_tid	= parent_tidptr,
3050  		.exit_signal	= (lower_32_bits(clone_flags) & CSIGNAL),
3051  		.stack		= newsp,
3052  		.tls		= tls,
3053  	};
3054  
3055  	return kernel_clone(&args);
3056  }
3057  #endif
3058  
3059  #ifdef __ARCH_WANT_SYS_CLONE3
3060  
3061  noinline static int copy_clone_args_from_user(struct kernel_clone_args *kargs,
3062  					      struct clone_args __user *uargs,
3063  					      size_t usize)
3064  {
3065  	int err;
3066  	struct clone_args args;
3067  	pid_t *kset_tid = kargs->set_tid;
3068  
3069  	BUILD_BUG_ON(offsetofend(struct clone_args, tls) !=
3070  		     CLONE_ARGS_SIZE_VER0);
3071  	BUILD_BUG_ON(offsetofend(struct clone_args, set_tid_size) !=
3072  		     CLONE_ARGS_SIZE_VER1);
3073  	BUILD_BUG_ON(offsetofend(struct clone_args, cgroup) !=
3074  		     CLONE_ARGS_SIZE_VER2);
3075  	BUILD_BUG_ON(sizeof(struct clone_args) != CLONE_ARGS_SIZE_VER2);
3076  
3077  	if (unlikely(usize > PAGE_SIZE))
3078  		return -E2BIG;
3079  	if (unlikely(usize < CLONE_ARGS_SIZE_VER0))
3080  		return -EINVAL;
3081  
3082  	err = copy_struct_from_user(&args, sizeof(args), uargs, usize);
3083  	if (err)
3084  		return err;
3085  
3086  	if (unlikely(args.set_tid_size > MAX_PID_NS_LEVEL))
3087  		return -EINVAL;
3088  
3089  	if (unlikely(!args.set_tid && args.set_tid_size > 0))
3090  		return -EINVAL;
3091  
3092  	if (unlikely(args.set_tid && args.set_tid_size == 0))
3093  		return -EINVAL;
3094  
3095  	/*
3096  	 * Verify that higher 32bits of exit_signal are unset and that
3097  	 * it is a valid signal
3098  	 */
3099  	if (unlikely((args.exit_signal & ~((u64)CSIGNAL)) ||
3100  		     !valid_signal(args.exit_signal)))
3101  		return -EINVAL;
3102  
3103  	if ((args.flags & CLONE_INTO_CGROUP) &&
3104  	    (args.cgroup > INT_MAX || usize < CLONE_ARGS_SIZE_VER2))
3105  		return -EINVAL;
3106  
3107  	*kargs = (struct kernel_clone_args){
3108  		.flags		= args.flags,
3109  		.pidfd		= u64_to_user_ptr(args.pidfd),
3110  		.child_tid	= u64_to_user_ptr(args.child_tid),
3111  		.parent_tid	= u64_to_user_ptr(args.parent_tid),
3112  		.exit_signal	= args.exit_signal,
3113  		.stack		= args.stack,
3114  		.stack_size	= args.stack_size,
3115  		.tls		= args.tls,
3116  		.set_tid_size	= args.set_tid_size,
3117  		.cgroup		= args.cgroup,
3118  	};
3119  
3120  	if (args.set_tid &&
3121  		copy_from_user(kset_tid, u64_to_user_ptr(args.set_tid),
3122  			(kargs->set_tid_size * sizeof(pid_t))))
3123  		return -EFAULT;
3124  
3125  	kargs->set_tid = kset_tid;
3126  
3127  	return 0;
3128  }
3129  
3130  /**
3131   * clone3_stack_valid - check and prepare stack
3132   * @kargs: kernel clone args
3133   *
3134   * Verify that the stack arguments userspace gave us are sane.
3135   * In addition, set the stack direction for userspace since it's easy for us to
3136   * determine.
3137   */
3138  static inline bool clone3_stack_valid(struct kernel_clone_args *kargs)
3139  {
3140  	if (kargs->stack == 0) {
3141  		if (kargs->stack_size > 0)
3142  			return false;
3143  	} else {
3144  		if (kargs->stack_size == 0)
3145  			return false;
3146  
3147  		if (!access_ok((void __user *)kargs->stack, kargs->stack_size))
3148  			return false;
3149  
3150  #if !defined(CONFIG_STACK_GROWSUP) && !defined(CONFIG_IA64)
3151  		kargs->stack += kargs->stack_size;
3152  #endif
3153  	}
3154  
3155  	return true;
3156  }
3157  
3158  static bool clone3_args_valid(struct kernel_clone_args *kargs)
3159  {
3160  	/* Verify that no unknown flags are passed along. */
3161  	if (kargs->flags &
3162  	    ~(CLONE_LEGACY_FLAGS | CLONE_CLEAR_SIGHAND | CLONE_INTO_CGROUP))
3163  		return false;
3164  
3165  	/*
3166  	 * - make the CLONE_DETACHED bit reusable for clone3
3167  	 * - make the CSIGNAL bits reusable for clone3
3168  	 */
3169  	if (kargs->flags & (CLONE_DETACHED | (CSIGNAL & (~CLONE_NEWTIME))))
3170  		return false;
3171  
3172  	if ((kargs->flags & (CLONE_SIGHAND | CLONE_CLEAR_SIGHAND)) ==
3173  	    (CLONE_SIGHAND | CLONE_CLEAR_SIGHAND))
3174  		return false;
3175  
3176  	if ((kargs->flags & (CLONE_THREAD | CLONE_PARENT)) &&
3177  	    kargs->exit_signal)
3178  		return false;
3179  
3180  	if (!clone3_stack_valid(kargs))
3181  		return false;
3182  
3183  	return true;
3184  }
3185  
3186  /**
3187   * clone3 - create a new process with specific properties
3188   * @uargs: argument structure
3189   * @size:  size of @uargs
3190   *
3191   * clone3() is the extensible successor to clone()/clone2().
3192   * It takes a struct as argument that is versioned by its size.
3193   *
3194   * Return: On success, a positive PID for the child process.
3195   *         On error, a negative errno number.
3196   */
3197  SYSCALL_DEFINE2(clone3, struct clone_args __user *, uargs, size_t, size)
3198  {
3199  	int err;
3200  
3201  	struct kernel_clone_args kargs;
3202  	pid_t set_tid[MAX_PID_NS_LEVEL];
3203  
3204  	kargs.set_tid = set_tid;
3205  
3206  	err = copy_clone_args_from_user(&kargs, uargs, size);
3207  	if (err)
3208  		return err;
3209  
3210  	if (!clone3_args_valid(&kargs))
3211  		return -EINVAL;
3212  
3213  	return kernel_clone(&kargs);
3214  }
3215  #endif
3216  
3217  void walk_process_tree(struct task_struct *top, proc_visitor visitor, void *data)
3218  {
3219  	struct task_struct *leader, *parent, *child;
3220  	int res;
3221  
3222  	read_lock(&tasklist_lock);
3223  	leader = top = top->group_leader;
3224  down:
3225  	for_each_thread(leader, parent) {
3226  		list_for_each_entry(child, &parent->children, sibling) {
3227  			res = visitor(child, data);
3228  			if (res) {
3229  				if (res < 0)
3230  					goto out;
3231  				leader = child;
3232  				goto down;
3233  			}
3234  up:
3235  			;
3236  		}
3237  	}
3238  
3239  	if (leader != top) {
3240  		child = leader;
3241  		parent = child->real_parent;
3242  		leader = parent->group_leader;
3243  		goto up;
3244  	}
3245  out:
3246  	read_unlock(&tasklist_lock);
3247  }
3248  
3249  #ifndef ARCH_MIN_MMSTRUCT_ALIGN
3250  #define ARCH_MIN_MMSTRUCT_ALIGN 0
3251  #endif
3252  
3253  static void sighand_ctor(void *data)
3254  {
3255  	struct sighand_struct *sighand = data;
3256  
3257  	spin_lock_init(&sighand->siglock);
3258  	init_waitqueue_head(&sighand->signalfd_wqh);
3259  }
3260  
3261  void __init mm_cache_init(void)
3262  {
3263  	unsigned int mm_size;
3264  
3265  	/*
3266  	 * The mm_cpumask is located at the end of mm_struct, and is
3267  	 * dynamically sized based on the maximum CPU number this system
3268  	 * can have, taking hotplug into account (nr_cpu_ids).
3269  	 */
3270  	mm_size = sizeof(struct mm_struct) + cpumask_size() + mm_cid_size();
3271  
3272  	mm_cachep = kmem_cache_create_usercopy("mm_struct",
3273  			mm_size, ARCH_MIN_MMSTRUCT_ALIGN,
3274  			SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
3275  			offsetof(struct mm_struct, saved_auxv),
3276  			sizeof_field(struct mm_struct, saved_auxv),
3277  			NULL);
3278  }
3279  
3280  void __init proc_caches_init(void)
3281  {
3282  	sighand_cachep = kmem_cache_create("sighand_cache",
3283  			sizeof(struct sighand_struct), 0,
3284  			SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_TYPESAFE_BY_RCU|
3285  			SLAB_ACCOUNT, sighand_ctor);
3286  	signal_cachep = kmem_cache_create("signal_cache",
3287  			sizeof(struct signal_struct), 0,
3288  			SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
3289  			NULL);
3290  	files_cachep = kmem_cache_create("files_cache",
3291  			sizeof(struct files_struct), 0,
3292  			SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
3293  			NULL);
3294  	fs_cachep = kmem_cache_create("fs_cache",
3295  			sizeof(struct fs_struct), 0,
3296  			SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
3297  			NULL);
3298  
3299  	vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC|SLAB_ACCOUNT);
3300  #ifdef CONFIG_PER_VMA_LOCK
3301  	vma_lock_cachep = KMEM_CACHE(vma_lock, SLAB_PANIC|SLAB_ACCOUNT);
3302  #endif
3303  	mmap_init();
3304  	nsproxy_cache_init();
3305  }
3306  
3307  /*
3308   * Check constraints on flags passed to the unshare system call.
3309   */
3310  static int check_unshare_flags(unsigned long unshare_flags)
3311  {
3312  	if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND|
3313  				CLONE_VM|CLONE_FILES|CLONE_SYSVSEM|
3314  				CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWNET|
3315  				CLONE_NEWUSER|CLONE_NEWPID|CLONE_NEWCGROUP|
3316  				CLONE_NEWTIME))
3317  		return -EINVAL;
3318  	/*
3319  	 * Not implemented, but pretend it works if there is nothing
3320  	 * to unshare.  Note that unsharing the address space or the
3321  	 * signal handlers also need to unshare the signal queues (aka
3322  	 * CLONE_THREAD).
3323  	 */
3324  	if (unshare_flags & (CLONE_THREAD | CLONE_SIGHAND | CLONE_VM)) {
3325  		if (!thread_group_empty(current))
3326  			return -EINVAL;
3327  	}
3328  	if (unshare_flags & (CLONE_SIGHAND | CLONE_VM)) {
3329  		if (refcount_read(&current->sighand->count) > 1)
3330  			return -EINVAL;
3331  	}
3332  	if (unshare_flags & CLONE_VM) {
3333  		if (!current_is_single_threaded())
3334  			return -EINVAL;
3335  	}
3336  
3337  	return 0;
3338  }
3339  
3340  /*
3341   * Unshare the filesystem structure if it is being shared
3342   */
3343  static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
3344  {
3345  	struct fs_struct *fs = current->fs;
3346  
3347  	if (!(unshare_flags & CLONE_FS) || !fs)
3348  		return 0;
3349  
3350  	/* don't need lock here; in the worst case we'll do useless copy */
3351  	if (fs->users == 1)
3352  		return 0;
3353  
3354  	*new_fsp = copy_fs_struct(fs);
3355  	if (!*new_fsp)
3356  		return -ENOMEM;
3357  
3358  	return 0;
3359  }
3360  
3361  /*
3362   * Unshare file descriptor table if it is being shared
3363   */
3364  int unshare_fd(unsigned long unshare_flags, unsigned int max_fds,
3365  	       struct files_struct **new_fdp)
3366  {
3367  	struct files_struct *fd = current->files;
3368  	int error = 0;
3369  
3370  	if ((unshare_flags & CLONE_FILES) &&
3371  	    (fd && atomic_read(&fd->count) > 1)) {
3372  		*new_fdp = dup_fd(fd, max_fds, &error);
3373  		if (!*new_fdp)
3374  			return error;
3375  	}
3376  
3377  	return 0;
3378  }
3379  
3380  /*
3381   * unshare allows a process to 'unshare' part of the process
3382   * context which was originally shared using clone.  copy_*
3383   * functions used by kernel_clone() cannot be used here directly
3384   * because they modify an inactive task_struct that is being
3385   * constructed. Here we are modifying the current, active,
3386   * task_struct.
3387   */
3388  int ksys_unshare(unsigned long unshare_flags)
3389  {
3390  	struct fs_struct *fs, *new_fs = NULL;
3391  	struct files_struct *new_fd = NULL;
3392  	struct cred *new_cred = NULL;
3393  	struct nsproxy *new_nsproxy = NULL;
3394  	int do_sysvsem = 0;
3395  	int err;
3396  
3397  	/*
3398  	 * If unsharing a user namespace must also unshare the thread group
3399  	 * and unshare the filesystem root and working directories.
3400  	 */
3401  	if (unshare_flags & CLONE_NEWUSER)
3402  		unshare_flags |= CLONE_THREAD | CLONE_FS;
3403  	/*
3404  	 * If unsharing vm, must also unshare signal handlers.
3405  	 */
3406  	if (unshare_flags & CLONE_VM)
3407  		unshare_flags |= CLONE_SIGHAND;
3408  	/*
3409  	 * If unsharing a signal handlers, must also unshare the signal queues.
3410  	 */
3411  	if (unshare_flags & CLONE_SIGHAND)
3412  		unshare_flags |= CLONE_THREAD;
3413  	/*
3414  	 * If unsharing namespace, must also unshare filesystem information.
3415  	 */
3416  	if (unshare_flags & CLONE_NEWNS)
3417  		unshare_flags |= CLONE_FS;
3418  
3419  	err = check_unshare_flags(unshare_flags);
3420  	if (err)
3421  		goto bad_unshare_out;
3422  	/*
3423  	 * CLONE_NEWIPC must also detach from the undolist: after switching
3424  	 * to a new ipc namespace, the semaphore arrays from the old
3425  	 * namespace are unreachable.
3426  	 */
3427  	if (unshare_flags & (CLONE_NEWIPC|CLONE_SYSVSEM))
3428  		do_sysvsem = 1;
3429  	err = unshare_fs(unshare_flags, &new_fs);
3430  	if (err)
3431  		goto bad_unshare_out;
3432  	err = unshare_fd(unshare_flags, NR_OPEN_MAX, &new_fd);
3433  	if (err)
3434  		goto bad_unshare_cleanup_fs;
3435  	err = unshare_userns(unshare_flags, &new_cred);
3436  	if (err)
3437  		goto bad_unshare_cleanup_fd;
3438  	err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy,
3439  					 new_cred, new_fs);
3440  	if (err)
3441  		goto bad_unshare_cleanup_cred;
3442  
3443  	if (new_cred) {
3444  		err = set_cred_ucounts(new_cred);
3445  		if (err)
3446  			goto bad_unshare_cleanup_cred;
3447  	}
3448  
3449  	if (new_fs || new_fd || do_sysvsem || new_cred || new_nsproxy) {
3450  		if (do_sysvsem) {
3451  			/*
3452  			 * CLONE_SYSVSEM is equivalent to sys_exit().
3453  			 */
3454  			exit_sem(current);
3455  		}
3456  		if (unshare_flags & CLONE_NEWIPC) {
3457  			/* Orphan segments in old ns (see sem above). */
3458  			exit_shm(current);
3459  			shm_init_task(current);
3460  		}
3461  
3462  		if (new_nsproxy)
3463  			switch_task_namespaces(current, new_nsproxy);
3464  
3465  		task_lock(current);
3466  
3467  		if (new_fs) {
3468  			fs = current->fs;
3469  			spin_lock(&fs->lock);
3470  			current->fs = new_fs;
3471  			if (--fs->users)
3472  				new_fs = NULL;
3473  			else
3474  				new_fs = fs;
3475  			spin_unlock(&fs->lock);
3476  		}
3477  
3478  		if (new_fd)
3479  			swap(current->files, new_fd);
3480  
3481  		task_unlock(current);
3482  
3483  		if (new_cred) {
3484  			/* Install the new user namespace */
3485  			commit_creds(new_cred);
3486  			new_cred = NULL;
3487  		}
3488  	}
3489  
3490  	perf_event_namespaces(current);
3491  
3492  bad_unshare_cleanup_cred:
3493  	if (new_cred)
3494  		put_cred(new_cred);
3495  bad_unshare_cleanup_fd:
3496  	if (new_fd)
3497  		put_files_struct(new_fd);
3498  
3499  bad_unshare_cleanup_fs:
3500  	if (new_fs)
3501  		free_fs_struct(new_fs);
3502  
3503  bad_unshare_out:
3504  	return err;
3505  }
3506  
3507  SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
3508  {
3509  	return ksys_unshare(unshare_flags);
3510  }
3511  
3512  /*
3513   *	Helper to unshare the files of the current task.
3514   *	We don't want to expose copy_files internals to
3515   *	the exec layer of the kernel.
3516   */
3517  
3518  int unshare_files(void)
3519  {
3520  	struct task_struct *task = current;
3521  	struct files_struct *old, *copy = NULL;
3522  	int error;
3523  
3524  	error = unshare_fd(CLONE_FILES, NR_OPEN_MAX, &copy);
3525  	if (error || !copy)
3526  		return error;
3527  
3528  	old = task->files;
3529  	task_lock(task);
3530  	task->files = copy;
3531  	task_unlock(task);
3532  	put_files_struct(old);
3533  	return 0;
3534  }
3535  
3536  int sysctl_max_threads(struct ctl_table *table, int write,
3537  		       void *buffer, size_t *lenp, loff_t *ppos)
3538  {
3539  	struct ctl_table t;
3540  	int ret;
3541  	int threads = max_threads;
3542  	int min = 1;
3543  	int max = MAX_THREADS;
3544  
3545  	t = *table;
3546  	t.data = &threads;
3547  	t.extra1 = &min;
3548  	t.extra2 = &max;
3549  
3550  	ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
3551  	if (ret || !write)
3552  		return ret;
3553  
3554  	max_threads = threads;
3555  
3556  	return 0;
3557  }
3558