xref: /openbmc/linux/kernel/fork.c (revision 015f1e4297ad32f83251f3f4cee2389ce5516e9e)
1 /*
2  *  linux/kernel/fork.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  */
6 
7 /*
8  *  'fork.c' contains the help-routines for the 'fork' system call
9  * (see also entry.S and others).
10  * Fork is rather simple, once you get the hang of it, but the memory
11  * management can be a bitch. See 'mm/memory.c': 'copy_page_range()'
12  */
13 
14 #include <linux/slab.h>
15 #include <linux/init.h>
16 #include <linux/unistd.h>
17 #include <linux/module.h>
18 #include <linux/vmalloc.h>
19 #include <linux/completion.h>
20 #include <linux/personality.h>
21 #include <linux/mempolicy.h>
22 #include <linux/sem.h>
23 #include <linux/file.h>
24 #include <linux/fdtable.h>
25 #include <linux/iocontext.h>
26 #include <linux/key.h>
27 #include <linux/binfmts.h>
28 #include <linux/mman.h>
29 #include <linux/mmu_notifier.h>
30 #include <linux/fs.h>
31 #include <linux/nsproxy.h>
32 #include <linux/capability.h>
33 #include <linux/cpu.h>
34 #include <linux/cgroup.h>
35 #include <linux/security.h>
36 #include <linux/hugetlb.h>
37 #include <linux/swap.h>
38 #include <linux/syscalls.h>
39 #include <linux/jiffies.h>
40 #include <linux/futex.h>
41 #include <linux/compat.h>
42 #include <linux/kthread.h>
43 #include <linux/task_io_accounting_ops.h>
44 #include <linux/rcupdate.h>
45 #include <linux/ptrace.h>
46 #include <linux/mount.h>
47 #include <linux/audit.h>
48 #include <linux/memcontrol.h>
49 #include <linux/ftrace.h>
50 #include <linux/profile.h>
51 #include <linux/rmap.h>
52 #include <linux/ksm.h>
53 #include <linux/acct.h>
54 #include <linux/tsacct_kern.h>
55 #include <linux/cn_proc.h>
56 #include <linux/freezer.h>
57 #include <linux/delayacct.h>
58 #include <linux/taskstats_kern.h>
59 #include <linux/random.h>
60 #include <linux/tty.h>
61 #include <linux/blkdev.h>
62 #include <linux/fs_struct.h>
63 #include <linux/magic.h>
64 #include <linux/perf_event.h>
65 #include <linux/posix-timers.h>
66 #include <linux/user-return-notifier.h>
67 #include <linux/oom.h>
68 #include <linux/khugepaged.h>
69 
70 #include <asm/pgtable.h>
71 #include <asm/pgalloc.h>
72 #include <asm/uaccess.h>
73 #include <asm/mmu_context.h>
74 #include <asm/cacheflush.h>
75 #include <asm/tlbflush.h>
76 
77 #include <trace/events/sched.h>
78 
79 #define CREATE_TRACE_POINTS
80 #include <trace/events/task.h>
81 
82 /*
83  * Protected counters by write_lock_irq(&tasklist_lock)
84  */
85 unsigned long total_forks;	/* Handle normal Linux uptimes. */
86 int nr_threads;			/* The idle threads do not count.. */
87 
88 int max_threads;		/* tunable limit on nr_threads */
89 
90 DEFINE_PER_CPU(unsigned long, process_counts) = 0;
91 
92 __cacheline_aligned DEFINE_RWLOCK(tasklist_lock);  /* outer */
93 
94 #ifdef CONFIG_PROVE_RCU
95 int lockdep_tasklist_lock_is_held(void)
96 {
97 	return lockdep_is_held(&tasklist_lock);
98 }
99 EXPORT_SYMBOL_GPL(lockdep_tasklist_lock_is_held);
100 #endif /* #ifdef CONFIG_PROVE_RCU */
101 
102 int nr_processes(void)
103 {
104 	int cpu;
105 	int total = 0;
106 
107 	for_each_possible_cpu(cpu)
108 		total += per_cpu(process_counts, cpu);
109 
110 	return total;
111 }
112 
113 #ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
114 # define alloc_task_struct_node(node)		\
115 		kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node)
116 # define free_task_struct(tsk)			\
117 		kmem_cache_free(task_struct_cachep, (tsk))
118 static struct kmem_cache *task_struct_cachep;
119 #endif
120 
121 #ifndef __HAVE_ARCH_THREAD_INFO_ALLOCATOR
122 static struct thread_info *alloc_thread_info_node(struct task_struct *tsk,
123 						  int node)
124 {
125 #ifdef CONFIG_DEBUG_STACK_USAGE
126 	gfp_t mask = GFP_KERNEL | __GFP_ZERO;
127 #else
128 	gfp_t mask = GFP_KERNEL;
129 #endif
130 	struct page *page = alloc_pages_node(node, mask, THREAD_SIZE_ORDER);
131 
132 	return page ? page_address(page) : NULL;
133 }
134 
135 static inline void free_thread_info(struct thread_info *ti)
136 {
137 	free_pages((unsigned long)ti, THREAD_SIZE_ORDER);
138 }
139 #endif
140 
141 /* SLAB cache for signal_struct structures (tsk->signal) */
142 static struct kmem_cache *signal_cachep;
143 
144 /* SLAB cache for sighand_struct structures (tsk->sighand) */
145 struct kmem_cache *sighand_cachep;
146 
147 /* SLAB cache for files_struct structures (tsk->files) */
148 struct kmem_cache *files_cachep;
149 
150 /* SLAB cache for fs_struct structures (tsk->fs) */
151 struct kmem_cache *fs_cachep;
152 
153 /* SLAB cache for vm_area_struct structures */
154 struct kmem_cache *vm_area_cachep;
155 
156 /* SLAB cache for mm_struct structures (tsk->mm) */
157 static struct kmem_cache *mm_cachep;
158 
159 static void account_kernel_stack(struct thread_info *ti, int account)
160 {
161 	struct zone *zone = page_zone(virt_to_page(ti));
162 
163 	mod_zone_page_state(zone, NR_KERNEL_STACK, account);
164 }
165 
166 void free_task(struct task_struct *tsk)
167 {
168 	account_kernel_stack(tsk->stack, -1);
169 	free_thread_info(tsk->stack);
170 	rt_mutex_debug_task_free(tsk);
171 	ftrace_graph_exit_task(tsk);
172 	free_task_struct(tsk);
173 }
174 EXPORT_SYMBOL(free_task);
175 
176 static inline void free_signal_struct(struct signal_struct *sig)
177 {
178 	taskstats_tgid_free(sig);
179 	sched_autogroup_exit(sig);
180 	kmem_cache_free(signal_cachep, sig);
181 }
182 
183 static inline void put_signal_struct(struct signal_struct *sig)
184 {
185 	if (atomic_dec_and_test(&sig->sigcnt))
186 		free_signal_struct(sig);
187 }
188 
189 void __put_task_struct(struct task_struct *tsk)
190 {
191 	WARN_ON(!tsk->exit_state);
192 	WARN_ON(atomic_read(&tsk->usage));
193 	WARN_ON(tsk == current);
194 
195 	exit_creds(tsk);
196 	delayacct_tsk_free(tsk);
197 	put_signal_struct(tsk->signal);
198 
199 	if (!profile_handoff_task(tsk))
200 		free_task(tsk);
201 }
202 EXPORT_SYMBOL_GPL(__put_task_struct);
203 
204 /*
205  * macro override instead of weak attribute alias, to workaround
206  * gcc 4.1.0 and 4.1.1 bugs with weak attribute and empty functions.
207  */
208 #ifndef arch_task_cache_init
209 #define arch_task_cache_init()
210 #endif
211 
212 void __init fork_init(unsigned long mempages)
213 {
214 #ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
215 #ifndef ARCH_MIN_TASKALIGN
216 #define ARCH_MIN_TASKALIGN	L1_CACHE_BYTES
217 #endif
218 	/* create a slab on which task_structs can be allocated */
219 	task_struct_cachep =
220 		kmem_cache_create("task_struct", sizeof(struct task_struct),
221 			ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
222 #endif
223 
224 	/* do the arch specific task caches init */
225 	arch_task_cache_init();
226 
227 	/*
228 	 * The default maximum number of threads is set to a safe
229 	 * value: the thread structures can take up at most half
230 	 * of memory.
231 	 */
232 	max_threads = mempages / (8 * THREAD_SIZE / PAGE_SIZE);
233 
234 	/*
235 	 * we need to allow at least 20 threads to boot a system
236 	 */
237 	if (max_threads < 20)
238 		max_threads = 20;
239 
240 	init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2;
241 	init_task.signal->rlim[RLIMIT_NPROC].rlim_max = max_threads/2;
242 	init_task.signal->rlim[RLIMIT_SIGPENDING] =
243 		init_task.signal->rlim[RLIMIT_NPROC];
244 }
245 
246 int __attribute__((weak)) arch_dup_task_struct(struct task_struct *dst,
247 					       struct task_struct *src)
248 {
249 	*dst = *src;
250 	return 0;
251 }
252 
253 static struct task_struct *dup_task_struct(struct task_struct *orig)
254 {
255 	struct task_struct *tsk;
256 	struct thread_info *ti;
257 	unsigned long *stackend;
258 	int node = tsk_fork_get_node(orig);
259 	int err;
260 
261 	prepare_to_copy(orig);
262 
263 	tsk = alloc_task_struct_node(node);
264 	if (!tsk)
265 		return NULL;
266 
267 	ti = alloc_thread_info_node(tsk, node);
268 	if (!ti) {
269 		free_task_struct(tsk);
270 		return NULL;
271 	}
272 
273 	err = arch_dup_task_struct(tsk, orig);
274 	if (err)
275 		goto out;
276 
277 	tsk->stack = ti;
278 
279 	setup_thread_stack(tsk, orig);
280 	clear_user_return_notifier(tsk);
281 	clear_tsk_need_resched(tsk);
282 	stackend = end_of_stack(tsk);
283 	*stackend = STACK_END_MAGIC;	/* for overflow detection */
284 
285 #ifdef CONFIG_CC_STACKPROTECTOR
286 	tsk->stack_canary = get_random_int();
287 #endif
288 
289 	/*
290 	 * One for us, one for whoever does the "release_task()" (usually
291 	 * parent)
292 	 */
293 	atomic_set(&tsk->usage, 2);
294 #ifdef CONFIG_BLK_DEV_IO_TRACE
295 	tsk->btrace_seq = 0;
296 #endif
297 	tsk->splice_pipe = NULL;
298 
299 	account_kernel_stack(ti, 1);
300 
301 	return tsk;
302 
303 out:
304 	free_thread_info(ti);
305 	free_task_struct(tsk);
306 	return NULL;
307 }
308 
309 #ifdef CONFIG_MMU
310 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
311 {
312 	struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
313 	struct rb_node **rb_link, *rb_parent;
314 	int retval;
315 	unsigned long charge;
316 	struct mempolicy *pol;
317 
318 	down_write(&oldmm->mmap_sem);
319 	flush_cache_dup_mm(oldmm);
320 	/*
321 	 * Not linked in yet - no deadlock potential:
322 	 */
323 	down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING);
324 
325 	mm->locked_vm = 0;
326 	mm->mmap = NULL;
327 	mm->mmap_cache = NULL;
328 	mm->free_area_cache = oldmm->mmap_base;
329 	mm->cached_hole_size = ~0UL;
330 	mm->map_count = 0;
331 	cpumask_clear(mm_cpumask(mm));
332 	mm->mm_rb = RB_ROOT;
333 	rb_link = &mm->mm_rb.rb_node;
334 	rb_parent = NULL;
335 	pprev = &mm->mmap;
336 	retval = ksm_fork(mm, oldmm);
337 	if (retval)
338 		goto out;
339 	retval = khugepaged_fork(mm, oldmm);
340 	if (retval)
341 		goto out;
342 
343 	prev = NULL;
344 	for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
345 		struct file *file;
346 
347 		if (mpnt->vm_flags & VM_DONTCOPY) {
348 			long pages = vma_pages(mpnt);
349 			mm->total_vm -= pages;
350 			vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
351 								-pages);
352 			continue;
353 		}
354 		charge = 0;
355 		if (mpnt->vm_flags & VM_ACCOUNT) {
356 			unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
357 			if (security_vm_enough_memory(len))
358 				goto fail_nomem;
359 			charge = len;
360 		}
361 		tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
362 		if (!tmp)
363 			goto fail_nomem;
364 		*tmp = *mpnt;
365 		INIT_LIST_HEAD(&tmp->anon_vma_chain);
366 		pol = mpol_dup(vma_policy(mpnt));
367 		retval = PTR_ERR(pol);
368 		if (IS_ERR(pol))
369 			goto fail_nomem_policy;
370 		vma_set_policy(tmp, pol);
371 		tmp->vm_mm = mm;
372 		if (anon_vma_fork(tmp, mpnt))
373 			goto fail_nomem_anon_vma_fork;
374 		tmp->vm_flags &= ~VM_LOCKED;
375 		tmp->vm_next = tmp->vm_prev = NULL;
376 		file = tmp->vm_file;
377 		if (file) {
378 			struct inode *inode = file->f_path.dentry->d_inode;
379 			struct address_space *mapping = file->f_mapping;
380 
381 			get_file(file);
382 			if (tmp->vm_flags & VM_DENYWRITE)
383 				atomic_dec(&inode->i_writecount);
384 			mutex_lock(&mapping->i_mmap_mutex);
385 			if (tmp->vm_flags & VM_SHARED)
386 				mapping->i_mmap_writable++;
387 			flush_dcache_mmap_lock(mapping);
388 			/* insert tmp into the share list, just after mpnt */
389 			vma_prio_tree_add(tmp, mpnt);
390 			flush_dcache_mmap_unlock(mapping);
391 			mutex_unlock(&mapping->i_mmap_mutex);
392 		}
393 
394 		/*
395 		 * Clear hugetlb-related page reserves for children. This only
396 		 * affects MAP_PRIVATE mappings. Faults generated by the child
397 		 * are not guaranteed to succeed, even if read-only
398 		 */
399 		if (is_vm_hugetlb_page(tmp))
400 			reset_vma_resv_huge_pages(tmp);
401 
402 		/*
403 		 * Link in the new vma and copy the page table entries.
404 		 */
405 		*pprev = tmp;
406 		pprev = &tmp->vm_next;
407 		tmp->vm_prev = prev;
408 		prev = tmp;
409 
410 		__vma_link_rb(mm, tmp, rb_link, rb_parent);
411 		rb_link = &tmp->vm_rb.rb_right;
412 		rb_parent = &tmp->vm_rb;
413 
414 		mm->map_count++;
415 		retval = copy_page_range(mm, oldmm, mpnt);
416 
417 		if (tmp->vm_ops && tmp->vm_ops->open)
418 			tmp->vm_ops->open(tmp);
419 
420 		if (retval)
421 			goto out;
422 	}
423 	/* a new mm has just been created */
424 	arch_dup_mmap(oldmm, mm);
425 	retval = 0;
426 out:
427 	up_write(&mm->mmap_sem);
428 	flush_tlb_mm(oldmm);
429 	up_write(&oldmm->mmap_sem);
430 	return retval;
431 fail_nomem_anon_vma_fork:
432 	mpol_put(pol);
433 fail_nomem_policy:
434 	kmem_cache_free(vm_area_cachep, tmp);
435 fail_nomem:
436 	retval = -ENOMEM;
437 	vm_unacct_memory(charge);
438 	goto out;
439 }
440 
441 static inline int mm_alloc_pgd(struct mm_struct *mm)
442 {
443 	mm->pgd = pgd_alloc(mm);
444 	if (unlikely(!mm->pgd))
445 		return -ENOMEM;
446 	return 0;
447 }
448 
449 static inline void mm_free_pgd(struct mm_struct *mm)
450 {
451 	pgd_free(mm, mm->pgd);
452 }
453 #else
454 #define dup_mmap(mm, oldmm)	(0)
455 #define mm_alloc_pgd(mm)	(0)
456 #define mm_free_pgd(mm)
457 #endif /* CONFIG_MMU */
458 
459 __cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock);
460 
461 #define allocate_mm()	(kmem_cache_alloc(mm_cachep, GFP_KERNEL))
462 #define free_mm(mm)	(kmem_cache_free(mm_cachep, (mm)))
463 
464 static unsigned long default_dump_filter = MMF_DUMP_FILTER_DEFAULT;
465 
466 static int __init coredump_filter_setup(char *s)
467 {
468 	default_dump_filter =
469 		(simple_strtoul(s, NULL, 0) << MMF_DUMP_FILTER_SHIFT) &
470 		MMF_DUMP_FILTER_MASK;
471 	return 1;
472 }
473 
474 __setup("coredump_filter=", coredump_filter_setup);
475 
476 #include <linux/init_task.h>
477 
478 static void mm_init_aio(struct mm_struct *mm)
479 {
480 #ifdef CONFIG_AIO
481 	spin_lock_init(&mm->ioctx_lock);
482 	INIT_HLIST_HEAD(&mm->ioctx_list);
483 #endif
484 }
485 
486 static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p)
487 {
488 	atomic_set(&mm->mm_users, 1);
489 	atomic_set(&mm->mm_count, 1);
490 	init_rwsem(&mm->mmap_sem);
491 	INIT_LIST_HEAD(&mm->mmlist);
492 	mm->flags = (current->mm) ?
493 		(current->mm->flags & MMF_INIT_MASK) : default_dump_filter;
494 	mm->core_state = NULL;
495 	mm->nr_ptes = 0;
496 	memset(&mm->rss_stat, 0, sizeof(mm->rss_stat));
497 	spin_lock_init(&mm->page_table_lock);
498 	mm->free_area_cache = TASK_UNMAPPED_BASE;
499 	mm->cached_hole_size = ~0UL;
500 	mm_init_aio(mm);
501 	mm_init_owner(mm, p);
502 
503 	if (likely(!mm_alloc_pgd(mm))) {
504 		mm->def_flags = 0;
505 		mmu_notifier_mm_init(mm);
506 		return mm;
507 	}
508 
509 	free_mm(mm);
510 	return NULL;
511 }
512 
513 /*
514  * Allocate and initialize an mm_struct.
515  */
516 struct mm_struct *mm_alloc(void)
517 {
518 	struct mm_struct *mm;
519 
520 	mm = allocate_mm();
521 	if (!mm)
522 		return NULL;
523 
524 	memset(mm, 0, sizeof(*mm));
525 	mm_init_cpumask(mm);
526 	return mm_init(mm, current);
527 }
528 
529 /*
530  * Called when the last reference to the mm
531  * is dropped: either by a lazy thread or by
532  * mmput. Free the page directory and the mm.
533  */
534 void __mmdrop(struct mm_struct *mm)
535 {
536 	BUG_ON(mm == &init_mm);
537 	mm_free_pgd(mm);
538 	destroy_context(mm);
539 	mmu_notifier_mm_destroy(mm);
540 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
541 	VM_BUG_ON(mm->pmd_huge_pte);
542 #endif
543 	free_mm(mm);
544 }
545 EXPORT_SYMBOL_GPL(__mmdrop);
546 
547 /*
548  * Decrement the use count and release all resources for an mm.
549  */
550 void mmput(struct mm_struct *mm)
551 {
552 	might_sleep();
553 
554 	if (atomic_dec_and_test(&mm->mm_users)) {
555 		exit_aio(mm);
556 		ksm_exit(mm);
557 		khugepaged_exit(mm); /* must run before exit_mmap */
558 		exit_mmap(mm);
559 		set_mm_exe_file(mm, NULL);
560 		if (!list_empty(&mm->mmlist)) {
561 			spin_lock(&mmlist_lock);
562 			list_del(&mm->mmlist);
563 			spin_unlock(&mmlist_lock);
564 		}
565 		put_swap_token(mm);
566 		if (mm->binfmt)
567 			module_put(mm->binfmt->module);
568 		mmdrop(mm);
569 	}
570 }
571 EXPORT_SYMBOL_GPL(mmput);
572 
573 /*
574  * We added or removed a vma mapping the executable. The vmas are only mapped
575  * during exec and are not mapped with the mmap system call.
576  * Callers must hold down_write() on the mm's mmap_sem for these
577  */
578 void added_exe_file_vma(struct mm_struct *mm)
579 {
580 	mm->num_exe_file_vmas++;
581 }
582 
583 void removed_exe_file_vma(struct mm_struct *mm)
584 {
585 	mm->num_exe_file_vmas--;
586 	if ((mm->num_exe_file_vmas == 0) && mm->exe_file) {
587 		fput(mm->exe_file);
588 		mm->exe_file = NULL;
589 	}
590 
591 }
592 
593 void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file)
594 {
595 	if (new_exe_file)
596 		get_file(new_exe_file);
597 	if (mm->exe_file)
598 		fput(mm->exe_file);
599 	mm->exe_file = new_exe_file;
600 	mm->num_exe_file_vmas = 0;
601 }
602 
603 struct file *get_mm_exe_file(struct mm_struct *mm)
604 {
605 	struct file *exe_file;
606 
607 	/* We need mmap_sem to protect against races with removal of
608 	 * VM_EXECUTABLE vmas */
609 	down_read(&mm->mmap_sem);
610 	exe_file = mm->exe_file;
611 	if (exe_file)
612 		get_file(exe_file);
613 	up_read(&mm->mmap_sem);
614 	return exe_file;
615 }
616 
617 static void dup_mm_exe_file(struct mm_struct *oldmm, struct mm_struct *newmm)
618 {
619 	/* It's safe to write the exe_file pointer without exe_file_lock because
620 	 * this is called during fork when the task is not yet in /proc */
621 	newmm->exe_file = get_mm_exe_file(oldmm);
622 }
623 
624 /**
625  * get_task_mm - acquire a reference to the task's mm
626  *
627  * Returns %NULL if the task has no mm.  Checks PF_KTHREAD (meaning
628  * this kernel workthread has transiently adopted a user mm with use_mm,
629  * to do its AIO) is not set and if so returns a reference to it, after
630  * bumping up the use count.  User must release the mm via mmput()
631  * after use.  Typically used by /proc and ptrace.
632  */
633 struct mm_struct *get_task_mm(struct task_struct *task)
634 {
635 	struct mm_struct *mm;
636 
637 	task_lock(task);
638 	mm = task->mm;
639 	if (mm) {
640 		if (task->flags & PF_KTHREAD)
641 			mm = NULL;
642 		else
643 			atomic_inc(&mm->mm_users);
644 	}
645 	task_unlock(task);
646 	return mm;
647 }
648 EXPORT_SYMBOL_GPL(get_task_mm);
649 
650 struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
651 {
652 	struct mm_struct *mm;
653 	int err;
654 
655 	err =  mutex_lock_killable(&task->signal->cred_guard_mutex);
656 	if (err)
657 		return ERR_PTR(err);
658 
659 	mm = get_task_mm(task);
660 	if (mm && mm != current->mm &&
661 			!ptrace_may_access(task, mode)) {
662 		mmput(mm);
663 		mm = ERR_PTR(-EACCES);
664 	}
665 	mutex_unlock(&task->signal->cred_guard_mutex);
666 
667 	return mm;
668 }
669 
670 /* Please note the differences between mmput and mm_release.
671  * mmput is called whenever we stop holding onto a mm_struct,
672  * error success whatever.
673  *
674  * mm_release is called after a mm_struct has been removed
675  * from the current process.
676  *
677  * This difference is important for error handling, when we
678  * only half set up a mm_struct for a new process and need to restore
679  * the old one.  Because we mmput the new mm_struct before
680  * restoring the old one. . .
681  * Eric Biederman 10 January 1998
682  */
683 void mm_release(struct task_struct *tsk, struct mm_struct *mm)
684 {
685 	struct completion *vfork_done = tsk->vfork_done;
686 
687 	/* Get rid of any futexes when releasing the mm */
688 #ifdef CONFIG_FUTEX
689 	if (unlikely(tsk->robust_list)) {
690 		exit_robust_list(tsk);
691 		tsk->robust_list = NULL;
692 	}
693 #ifdef CONFIG_COMPAT
694 	if (unlikely(tsk->compat_robust_list)) {
695 		compat_exit_robust_list(tsk);
696 		tsk->compat_robust_list = NULL;
697 	}
698 #endif
699 	if (unlikely(!list_empty(&tsk->pi_state_list)))
700 		exit_pi_state_list(tsk);
701 #endif
702 
703 	/* Get rid of any cached register state */
704 	deactivate_mm(tsk, mm);
705 
706 	/* notify parent sleeping on vfork() */
707 	if (vfork_done) {
708 		tsk->vfork_done = NULL;
709 		complete(vfork_done);
710 	}
711 
712 	/*
713 	 * If we're exiting normally, clear a user-space tid field if
714 	 * requested.  We leave this alone when dying by signal, to leave
715 	 * the value intact in a core dump, and to save the unnecessary
716 	 * trouble otherwise.  Userland only wants this done for a sys_exit.
717 	 */
718 	if (tsk->clear_child_tid) {
719 		if (!(tsk->flags & PF_SIGNALED) &&
720 		    atomic_read(&mm->mm_users) > 1) {
721 			/*
722 			 * We don't check the error code - if userspace has
723 			 * not set up a proper pointer then tough luck.
724 			 */
725 			put_user(0, tsk->clear_child_tid);
726 			sys_futex(tsk->clear_child_tid, FUTEX_WAKE,
727 					1, NULL, NULL, 0);
728 		}
729 		tsk->clear_child_tid = NULL;
730 	}
731 }
732 
733 /*
734  * Allocate a new mm structure and copy contents from the
735  * mm structure of the passed in task structure.
736  */
737 struct mm_struct *dup_mm(struct task_struct *tsk)
738 {
739 	struct mm_struct *mm, *oldmm = current->mm;
740 	int err;
741 
742 	if (!oldmm)
743 		return NULL;
744 
745 	mm = allocate_mm();
746 	if (!mm)
747 		goto fail_nomem;
748 
749 	memcpy(mm, oldmm, sizeof(*mm));
750 	mm_init_cpumask(mm);
751 
752 	/* Initializing for Swap token stuff */
753 	mm->token_priority = 0;
754 	mm->last_interval = 0;
755 
756 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
757 	mm->pmd_huge_pte = NULL;
758 #endif
759 
760 	if (!mm_init(mm, tsk))
761 		goto fail_nomem;
762 
763 	if (init_new_context(tsk, mm))
764 		goto fail_nocontext;
765 
766 	dup_mm_exe_file(oldmm, mm);
767 
768 	err = dup_mmap(mm, oldmm);
769 	if (err)
770 		goto free_pt;
771 
772 	mm->hiwater_rss = get_mm_rss(mm);
773 	mm->hiwater_vm = mm->total_vm;
774 
775 	if (mm->binfmt && !try_module_get(mm->binfmt->module))
776 		goto free_pt;
777 
778 	return mm;
779 
780 free_pt:
781 	/* don't put binfmt in mmput, we haven't got module yet */
782 	mm->binfmt = NULL;
783 	mmput(mm);
784 
785 fail_nomem:
786 	return NULL;
787 
788 fail_nocontext:
789 	/*
790 	 * If init_new_context() failed, we cannot use mmput() to free the mm
791 	 * because it calls destroy_context()
792 	 */
793 	mm_free_pgd(mm);
794 	free_mm(mm);
795 	return NULL;
796 }
797 
798 static int copy_mm(unsigned long clone_flags, struct task_struct *tsk)
799 {
800 	struct mm_struct *mm, *oldmm;
801 	int retval;
802 
803 	tsk->min_flt = tsk->maj_flt = 0;
804 	tsk->nvcsw = tsk->nivcsw = 0;
805 #ifdef CONFIG_DETECT_HUNG_TASK
806 	tsk->last_switch_count = tsk->nvcsw + tsk->nivcsw;
807 #endif
808 
809 	tsk->mm = NULL;
810 	tsk->active_mm = NULL;
811 
812 	/*
813 	 * Are we cloning a kernel thread?
814 	 *
815 	 * We need to steal a active VM for that..
816 	 */
817 	oldmm = current->mm;
818 	if (!oldmm)
819 		return 0;
820 
821 	if (clone_flags & CLONE_VM) {
822 		atomic_inc(&oldmm->mm_users);
823 		mm = oldmm;
824 		goto good_mm;
825 	}
826 
827 	retval = -ENOMEM;
828 	mm = dup_mm(tsk);
829 	if (!mm)
830 		goto fail_nomem;
831 
832 good_mm:
833 	/* Initializing for Swap token stuff */
834 	mm->token_priority = 0;
835 	mm->last_interval = 0;
836 
837 	tsk->mm = mm;
838 	tsk->active_mm = mm;
839 	return 0;
840 
841 fail_nomem:
842 	return retval;
843 }
844 
845 static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
846 {
847 	struct fs_struct *fs = current->fs;
848 	if (clone_flags & CLONE_FS) {
849 		/* tsk->fs is already what we want */
850 		spin_lock(&fs->lock);
851 		if (fs->in_exec) {
852 			spin_unlock(&fs->lock);
853 			return -EAGAIN;
854 		}
855 		fs->users++;
856 		spin_unlock(&fs->lock);
857 		return 0;
858 	}
859 	tsk->fs = copy_fs_struct(fs);
860 	if (!tsk->fs)
861 		return -ENOMEM;
862 	return 0;
863 }
864 
865 static int copy_files(unsigned long clone_flags, struct task_struct *tsk)
866 {
867 	struct files_struct *oldf, *newf;
868 	int error = 0;
869 
870 	/*
871 	 * A background process may not have any files ...
872 	 */
873 	oldf = current->files;
874 	if (!oldf)
875 		goto out;
876 
877 	if (clone_flags & CLONE_FILES) {
878 		atomic_inc(&oldf->count);
879 		goto out;
880 	}
881 
882 	newf = dup_fd(oldf, &error);
883 	if (!newf)
884 		goto out;
885 
886 	tsk->files = newf;
887 	error = 0;
888 out:
889 	return error;
890 }
891 
892 static int copy_io(unsigned long clone_flags, struct task_struct *tsk)
893 {
894 #ifdef CONFIG_BLOCK
895 	struct io_context *ioc = current->io_context;
896 	struct io_context *new_ioc;
897 
898 	if (!ioc)
899 		return 0;
900 	/*
901 	 * Share io context with parent, if CLONE_IO is set
902 	 */
903 	if (clone_flags & CLONE_IO) {
904 		tsk->io_context = ioc_task_link(ioc);
905 		if (unlikely(!tsk->io_context))
906 			return -ENOMEM;
907 	} else if (ioprio_valid(ioc->ioprio)) {
908 		new_ioc = get_task_io_context(tsk, GFP_KERNEL, NUMA_NO_NODE);
909 		if (unlikely(!new_ioc))
910 			return -ENOMEM;
911 
912 		new_ioc->ioprio = ioc->ioprio;
913 		put_io_context(new_ioc);
914 	}
915 #endif
916 	return 0;
917 }
918 
919 static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk)
920 {
921 	struct sighand_struct *sig;
922 
923 	if (clone_flags & CLONE_SIGHAND) {
924 		atomic_inc(&current->sighand->count);
925 		return 0;
926 	}
927 	sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
928 	rcu_assign_pointer(tsk->sighand, sig);
929 	if (!sig)
930 		return -ENOMEM;
931 	atomic_set(&sig->count, 1);
932 	memcpy(sig->action, current->sighand->action, sizeof(sig->action));
933 	return 0;
934 }
935 
936 void __cleanup_sighand(struct sighand_struct *sighand)
937 {
938 	if (atomic_dec_and_test(&sighand->count))
939 		kmem_cache_free(sighand_cachep, sighand);
940 }
941 
942 
943 /*
944  * Initialize POSIX timer handling for a thread group.
945  */
946 static void posix_cpu_timers_init_group(struct signal_struct *sig)
947 {
948 	unsigned long cpu_limit;
949 
950 	/* Thread group counters. */
951 	thread_group_cputime_init(sig);
952 
953 	cpu_limit = ACCESS_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur);
954 	if (cpu_limit != RLIM_INFINITY) {
955 		sig->cputime_expires.prof_exp = secs_to_cputime(cpu_limit);
956 		sig->cputimer.running = 1;
957 	}
958 
959 	/* The timer lists. */
960 	INIT_LIST_HEAD(&sig->cpu_timers[0]);
961 	INIT_LIST_HEAD(&sig->cpu_timers[1]);
962 	INIT_LIST_HEAD(&sig->cpu_timers[2]);
963 }
964 
965 static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
966 {
967 	struct signal_struct *sig;
968 
969 	if (clone_flags & CLONE_THREAD)
970 		return 0;
971 
972 	sig = kmem_cache_zalloc(signal_cachep, GFP_KERNEL);
973 	tsk->signal = sig;
974 	if (!sig)
975 		return -ENOMEM;
976 
977 	sig->nr_threads = 1;
978 	atomic_set(&sig->live, 1);
979 	atomic_set(&sig->sigcnt, 1);
980 	init_waitqueue_head(&sig->wait_chldexit);
981 	if (clone_flags & CLONE_NEWPID)
982 		sig->flags |= SIGNAL_UNKILLABLE;
983 	sig->curr_target = tsk;
984 	init_sigpending(&sig->shared_pending);
985 	INIT_LIST_HEAD(&sig->posix_timers);
986 
987 	hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
988 	sig->real_timer.function = it_real_fn;
989 
990 	task_lock(current->group_leader);
991 	memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim);
992 	task_unlock(current->group_leader);
993 
994 	posix_cpu_timers_init_group(sig);
995 
996 	tty_audit_fork(sig);
997 	sched_autogroup_fork(sig);
998 
999 #ifdef CONFIG_CGROUPS
1000 	init_rwsem(&sig->group_rwsem);
1001 #endif
1002 
1003 	sig->oom_adj = current->signal->oom_adj;
1004 	sig->oom_score_adj = current->signal->oom_score_adj;
1005 	sig->oom_score_adj_min = current->signal->oom_score_adj_min;
1006 
1007 	mutex_init(&sig->cred_guard_mutex);
1008 
1009 	return 0;
1010 }
1011 
1012 static void copy_flags(unsigned long clone_flags, struct task_struct *p)
1013 {
1014 	unsigned long new_flags = p->flags;
1015 
1016 	new_flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER);
1017 	new_flags |= PF_FORKNOEXEC;
1018 	new_flags |= PF_STARTING;
1019 	p->flags = new_flags;
1020 }
1021 
1022 SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr)
1023 {
1024 	current->clear_child_tid = tidptr;
1025 
1026 	return task_pid_vnr(current);
1027 }
1028 
1029 static void rt_mutex_init_task(struct task_struct *p)
1030 {
1031 	raw_spin_lock_init(&p->pi_lock);
1032 #ifdef CONFIG_RT_MUTEXES
1033 	plist_head_init(&p->pi_waiters);
1034 	p->pi_blocked_on = NULL;
1035 #endif
1036 }
1037 
1038 #ifdef CONFIG_MM_OWNER
1039 void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
1040 {
1041 	mm->owner = p;
1042 }
1043 #endif /* CONFIG_MM_OWNER */
1044 
1045 /*
1046  * Initialize POSIX timer handling for a single task.
1047  */
1048 static void posix_cpu_timers_init(struct task_struct *tsk)
1049 {
1050 	tsk->cputime_expires.prof_exp = 0;
1051 	tsk->cputime_expires.virt_exp = 0;
1052 	tsk->cputime_expires.sched_exp = 0;
1053 	INIT_LIST_HEAD(&tsk->cpu_timers[0]);
1054 	INIT_LIST_HEAD(&tsk->cpu_timers[1]);
1055 	INIT_LIST_HEAD(&tsk->cpu_timers[2]);
1056 }
1057 
1058 /*
1059  * This creates a new process as a copy of the old one,
1060  * but does not actually start it yet.
1061  *
1062  * It copies the registers, and all the appropriate
1063  * parts of the process environment (as per the clone
1064  * flags). The actual kick-off is left to the caller.
1065  */
1066 static struct task_struct *copy_process(unsigned long clone_flags,
1067 					unsigned long stack_start,
1068 					struct pt_regs *regs,
1069 					unsigned long stack_size,
1070 					int __user *child_tidptr,
1071 					struct pid *pid,
1072 					int trace)
1073 {
1074 	int retval;
1075 	struct task_struct *p;
1076 	int cgroup_callbacks_done = 0;
1077 
1078 	if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
1079 		return ERR_PTR(-EINVAL);
1080 
1081 	/*
1082 	 * Thread groups must share signals as well, and detached threads
1083 	 * can only be started up within the thread group.
1084 	 */
1085 	if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND))
1086 		return ERR_PTR(-EINVAL);
1087 
1088 	/*
1089 	 * Shared signal handlers imply shared VM. By way of the above,
1090 	 * thread groups also imply shared VM. Blocking this case allows
1091 	 * for various simplifications in other code.
1092 	 */
1093 	if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM))
1094 		return ERR_PTR(-EINVAL);
1095 
1096 	/*
1097 	 * Siblings of global init remain as zombies on exit since they are
1098 	 * not reaped by their parent (swapper). To solve this and to avoid
1099 	 * multi-rooted process trees, prevent global and container-inits
1100 	 * from creating siblings.
1101 	 */
1102 	if ((clone_flags & CLONE_PARENT) &&
1103 				current->signal->flags & SIGNAL_UNKILLABLE)
1104 		return ERR_PTR(-EINVAL);
1105 
1106 	retval = security_task_create(clone_flags);
1107 	if (retval)
1108 		goto fork_out;
1109 
1110 	retval = -ENOMEM;
1111 	p = dup_task_struct(current);
1112 	if (!p)
1113 		goto fork_out;
1114 
1115 	ftrace_graph_init_task(p);
1116 
1117 	rt_mutex_init_task(p);
1118 
1119 #ifdef CONFIG_PROVE_LOCKING
1120 	DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);
1121 	DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
1122 #endif
1123 	retval = -EAGAIN;
1124 	if (atomic_read(&p->real_cred->user->processes) >=
1125 			task_rlimit(p, RLIMIT_NPROC)) {
1126 		if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
1127 		    p->real_cred->user != INIT_USER)
1128 			goto bad_fork_free;
1129 	}
1130 	current->flags &= ~PF_NPROC_EXCEEDED;
1131 
1132 	retval = copy_creds(p, clone_flags);
1133 	if (retval < 0)
1134 		goto bad_fork_free;
1135 
1136 	/*
1137 	 * If multiple threads are within copy_process(), then this check
1138 	 * triggers too late. This doesn't hurt, the check is only there
1139 	 * to stop root fork bombs.
1140 	 */
1141 	retval = -EAGAIN;
1142 	if (nr_threads >= max_threads)
1143 		goto bad_fork_cleanup_count;
1144 
1145 	if (!try_module_get(task_thread_info(p)->exec_domain->module))
1146 		goto bad_fork_cleanup_count;
1147 
1148 	p->did_exec = 0;
1149 	delayacct_tsk_init(p);	/* Must remain after dup_task_struct() */
1150 	copy_flags(clone_flags, p);
1151 	INIT_LIST_HEAD(&p->children);
1152 	INIT_LIST_HEAD(&p->sibling);
1153 	rcu_copy_process(p);
1154 	p->vfork_done = NULL;
1155 	spin_lock_init(&p->alloc_lock);
1156 
1157 	init_sigpending(&p->pending);
1158 
1159 	p->utime = p->stime = p->gtime = 0;
1160 	p->utimescaled = p->stimescaled = 0;
1161 #ifndef CONFIG_VIRT_CPU_ACCOUNTING
1162 	p->prev_utime = p->prev_stime = 0;
1163 #endif
1164 #if defined(SPLIT_RSS_COUNTING)
1165 	memset(&p->rss_stat, 0, sizeof(p->rss_stat));
1166 #endif
1167 
1168 	p->default_timer_slack_ns = current->timer_slack_ns;
1169 
1170 	task_io_accounting_init(&p->ioac);
1171 	acct_clear_integrals(p);
1172 
1173 	posix_cpu_timers_init(p);
1174 
1175 	do_posix_clock_monotonic_gettime(&p->start_time);
1176 	p->real_start_time = p->start_time;
1177 	monotonic_to_bootbased(&p->real_start_time);
1178 	p->io_context = NULL;
1179 	p->audit_context = NULL;
1180 	if (clone_flags & CLONE_THREAD)
1181 		threadgroup_change_begin(current);
1182 	cgroup_fork(p);
1183 #ifdef CONFIG_NUMA
1184 	p->mempolicy = mpol_dup(p->mempolicy);
1185 	if (IS_ERR(p->mempolicy)) {
1186 		retval = PTR_ERR(p->mempolicy);
1187 		p->mempolicy = NULL;
1188 		goto bad_fork_cleanup_cgroup;
1189 	}
1190 	mpol_fix_fork_child_flag(p);
1191 #endif
1192 #ifdef CONFIG_CPUSETS
1193 	p->cpuset_mem_spread_rotor = NUMA_NO_NODE;
1194 	p->cpuset_slab_spread_rotor = NUMA_NO_NODE;
1195 #endif
1196 #ifdef CONFIG_TRACE_IRQFLAGS
1197 	p->irq_events = 0;
1198 #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
1199 	p->hardirqs_enabled = 1;
1200 #else
1201 	p->hardirqs_enabled = 0;
1202 #endif
1203 	p->hardirq_enable_ip = 0;
1204 	p->hardirq_enable_event = 0;
1205 	p->hardirq_disable_ip = _THIS_IP_;
1206 	p->hardirq_disable_event = 0;
1207 	p->softirqs_enabled = 1;
1208 	p->softirq_enable_ip = _THIS_IP_;
1209 	p->softirq_enable_event = 0;
1210 	p->softirq_disable_ip = 0;
1211 	p->softirq_disable_event = 0;
1212 	p->hardirq_context = 0;
1213 	p->softirq_context = 0;
1214 #endif
1215 #ifdef CONFIG_LOCKDEP
1216 	p->lockdep_depth = 0; /* no locks held yet */
1217 	p->curr_chain_key = 0;
1218 	p->lockdep_recursion = 0;
1219 #endif
1220 
1221 #ifdef CONFIG_DEBUG_MUTEXES
1222 	p->blocked_on = NULL; /* not blocked yet */
1223 #endif
1224 #ifdef CONFIG_CGROUP_MEM_RES_CTLR
1225 	p->memcg_batch.do_batch = 0;
1226 	p->memcg_batch.memcg = NULL;
1227 #endif
1228 
1229 	/* Perform scheduler related setup. Assign this task to a CPU. */
1230 	sched_fork(p);
1231 
1232 	retval = perf_event_init_task(p);
1233 	if (retval)
1234 		goto bad_fork_cleanup_policy;
1235 	retval = audit_alloc(p);
1236 	if (retval)
1237 		goto bad_fork_cleanup_policy;
1238 	/* copy all the process information */
1239 	retval = copy_semundo(clone_flags, p);
1240 	if (retval)
1241 		goto bad_fork_cleanup_audit;
1242 	retval = copy_files(clone_flags, p);
1243 	if (retval)
1244 		goto bad_fork_cleanup_semundo;
1245 	retval = copy_fs(clone_flags, p);
1246 	if (retval)
1247 		goto bad_fork_cleanup_files;
1248 	retval = copy_sighand(clone_flags, p);
1249 	if (retval)
1250 		goto bad_fork_cleanup_fs;
1251 	retval = copy_signal(clone_flags, p);
1252 	if (retval)
1253 		goto bad_fork_cleanup_sighand;
1254 	retval = copy_mm(clone_flags, p);
1255 	if (retval)
1256 		goto bad_fork_cleanup_signal;
1257 	retval = copy_namespaces(clone_flags, p);
1258 	if (retval)
1259 		goto bad_fork_cleanup_mm;
1260 	retval = copy_io(clone_flags, p);
1261 	if (retval)
1262 		goto bad_fork_cleanup_namespaces;
1263 	retval = copy_thread(clone_flags, stack_start, stack_size, p, regs);
1264 	if (retval)
1265 		goto bad_fork_cleanup_io;
1266 
1267 	if (pid != &init_struct_pid) {
1268 		retval = -ENOMEM;
1269 		pid = alloc_pid(p->nsproxy->pid_ns);
1270 		if (!pid)
1271 			goto bad_fork_cleanup_io;
1272 	}
1273 
1274 	p->pid = pid_nr(pid);
1275 	p->tgid = p->pid;
1276 	if (clone_flags & CLONE_THREAD)
1277 		p->tgid = current->tgid;
1278 
1279 	p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
1280 	/*
1281 	 * Clear TID on mm_release()?
1282 	 */
1283 	p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr : NULL;
1284 #ifdef CONFIG_BLOCK
1285 	p->plug = NULL;
1286 #endif
1287 #ifdef CONFIG_FUTEX
1288 	p->robust_list = NULL;
1289 #ifdef CONFIG_COMPAT
1290 	p->compat_robust_list = NULL;
1291 #endif
1292 	INIT_LIST_HEAD(&p->pi_state_list);
1293 	p->pi_state_cache = NULL;
1294 #endif
1295 	/*
1296 	 * sigaltstack should be cleared when sharing the same VM
1297 	 */
1298 	if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM)
1299 		p->sas_ss_sp = p->sas_ss_size = 0;
1300 
1301 	/*
1302 	 * Syscall tracing and stepping should be turned off in the
1303 	 * child regardless of CLONE_PTRACE.
1304 	 */
1305 	user_disable_single_step(p);
1306 	clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE);
1307 #ifdef TIF_SYSCALL_EMU
1308 	clear_tsk_thread_flag(p, TIF_SYSCALL_EMU);
1309 #endif
1310 	clear_all_latency_tracing(p);
1311 
1312 	/* ok, now we should be set up.. */
1313 	p->exit_signal = (clone_flags & CLONE_THREAD) ? -1 : (clone_flags & CSIGNAL);
1314 	p->pdeath_signal = 0;
1315 	p->exit_state = 0;
1316 
1317 	p->nr_dirtied = 0;
1318 	p->nr_dirtied_pause = 128 >> (PAGE_SHIFT - 10);
1319 	p->dirty_paused_when = 0;
1320 
1321 	/*
1322 	 * Ok, make it visible to the rest of the system.
1323 	 * We dont wake it up yet.
1324 	 */
1325 	p->group_leader = p;
1326 	INIT_LIST_HEAD(&p->thread_group);
1327 
1328 	/* Now that the task is set up, run cgroup callbacks if
1329 	 * necessary. We need to run them before the task is visible
1330 	 * on the tasklist. */
1331 	cgroup_fork_callbacks(p);
1332 	cgroup_callbacks_done = 1;
1333 
1334 	/* Need tasklist lock for parent etc handling! */
1335 	write_lock_irq(&tasklist_lock);
1336 
1337 	/* CLONE_PARENT re-uses the old parent */
1338 	if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) {
1339 		p->real_parent = current->real_parent;
1340 		p->parent_exec_id = current->parent_exec_id;
1341 	} else {
1342 		p->real_parent = current;
1343 		p->parent_exec_id = current->self_exec_id;
1344 	}
1345 
1346 	spin_lock(&current->sighand->siglock);
1347 
1348 	/*
1349 	 * Process group and session signals need to be delivered to just the
1350 	 * parent before the fork or both the parent and the child after the
1351 	 * fork. Restart if a signal comes in before we add the new process to
1352 	 * it's process group.
1353 	 * A fatal signal pending means that current will exit, so the new
1354 	 * thread can't slip out of an OOM kill (or normal SIGKILL).
1355 	*/
1356 	recalc_sigpending();
1357 	if (signal_pending(current)) {
1358 		spin_unlock(&current->sighand->siglock);
1359 		write_unlock_irq(&tasklist_lock);
1360 		retval = -ERESTARTNOINTR;
1361 		goto bad_fork_free_pid;
1362 	}
1363 
1364 	if (clone_flags & CLONE_THREAD) {
1365 		current->signal->nr_threads++;
1366 		atomic_inc(&current->signal->live);
1367 		atomic_inc(&current->signal->sigcnt);
1368 		p->group_leader = current->group_leader;
1369 		list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);
1370 	}
1371 
1372 	if (likely(p->pid)) {
1373 		ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
1374 
1375 		if (thread_group_leader(p)) {
1376 			if (is_child_reaper(pid))
1377 				p->nsproxy->pid_ns->child_reaper = p;
1378 
1379 			p->signal->leader_pid = pid;
1380 			p->signal->tty = tty_kref_get(current->signal->tty);
1381 			attach_pid(p, PIDTYPE_PGID, task_pgrp(current));
1382 			attach_pid(p, PIDTYPE_SID, task_session(current));
1383 			list_add_tail(&p->sibling, &p->real_parent->children);
1384 			list_add_tail_rcu(&p->tasks, &init_task.tasks);
1385 			__this_cpu_inc(process_counts);
1386 		}
1387 		attach_pid(p, PIDTYPE_PID, pid);
1388 		nr_threads++;
1389 	}
1390 
1391 	total_forks++;
1392 	spin_unlock(&current->sighand->siglock);
1393 	write_unlock_irq(&tasklist_lock);
1394 	proc_fork_connector(p);
1395 	cgroup_post_fork(p);
1396 	if (clone_flags & CLONE_THREAD)
1397 		threadgroup_change_end(current);
1398 	perf_event_fork(p);
1399 
1400 	trace_task_newtask(p, clone_flags);
1401 
1402 	return p;
1403 
1404 bad_fork_free_pid:
1405 	if (pid != &init_struct_pid)
1406 		free_pid(pid);
1407 bad_fork_cleanup_io:
1408 	if (p->io_context)
1409 		exit_io_context(p);
1410 bad_fork_cleanup_namespaces:
1411 	exit_task_namespaces(p);
1412 bad_fork_cleanup_mm:
1413 	if (p->mm)
1414 		mmput(p->mm);
1415 bad_fork_cleanup_signal:
1416 	if (!(clone_flags & CLONE_THREAD))
1417 		free_signal_struct(p->signal);
1418 bad_fork_cleanup_sighand:
1419 	__cleanup_sighand(p->sighand);
1420 bad_fork_cleanup_fs:
1421 	exit_fs(p); /* blocking */
1422 bad_fork_cleanup_files:
1423 	exit_files(p); /* blocking */
1424 bad_fork_cleanup_semundo:
1425 	exit_sem(p);
1426 bad_fork_cleanup_audit:
1427 	audit_free(p);
1428 bad_fork_cleanup_policy:
1429 	perf_event_free_task(p);
1430 #ifdef CONFIG_NUMA
1431 	mpol_put(p->mempolicy);
1432 bad_fork_cleanup_cgroup:
1433 #endif
1434 	if (clone_flags & CLONE_THREAD)
1435 		threadgroup_change_end(current);
1436 	cgroup_exit(p, cgroup_callbacks_done);
1437 	delayacct_tsk_free(p);
1438 	module_put(task_thread_info(p)->exec_domain->module);
1439 bad_fork_cleanup_count:
1440 	atomic_dec(&p->cred->user->processes);
1441 	exit_creds(p);
1442 bad_fork_free:
1443 	free_task(p);
1444 fork_out:
1445 	return ERR_PTR(retval);
1446 }
1447 
1448 noinline struct pt_regs * __cpuinit __attribute__((weak)) idle_regs(struct pt_regs *regs)
1449 {
1450 	memset(regs, 0, sizeof(struct pt_regs));
1451 	return regs;
1452 }
1453 
1454 static inline void init_idle_pids(struct pid_link *links)
1455 {
1456 	enum pid_type type;
1457 
1458 	for (type = PIDTYPE_PID; type < PIDTYPE_MAX; ++type) {
1459 		INIT_HLIST_NODE(&links[type].node); /* not really needed */
1460 		links[type].pid = &init_struct_pid;
1461 	}
1462 }
1463 
1464 struct task_struct * __cpuinit fork_idle(int cpu)
1465 {
1466 	struct task_struct *task;
1467 	struct pt_regs regs;
1468 
1469 	task = copy_process(CLONE_VM, 0, idle_regs(&regs), 0, NULL,
1470 			    &init_struct_pid, 0);
1471 	if (!IS_ERR(task)) {
1472 		init_idle_pids(task->pids);
1473 		init_idle(task, cpu);
1474 	}
1475 
1476 	return task;
1477 }
1478 
1479 /*
1480  *  Ok, this is the main fork-routine.
1481  *
1482  * It copies the process, and if successful kick-starts
1483  * it and waits for it to finish using the VM if required.
1484  */
1485 long do_fork(unsigned long clone_flags,
1486 	      unsigned long stack_start,
1487 	      struct pt_regs *regs,
1488 	      unsigned long stack_size,
1489 	      int __user *parent_tidptr,
1490 	      int __user *child_tidptr)
1491 {
1492 	struct task_struct *p;
1493 	int trace = 0;
1494 	long nr;
1495 
1496 	/*
1497 	 * Do some preliminary argument and permissions checking before we
1498 	 * actually start allocating stuff
1499 	 */
1500 	if (clone_flags & CLONE_NEWUSER) {
1501 		if (clone_flags & CLONE_THREAD)
1502 			return -EINVAL;
1503 		/* hopefully this check will go away when userns support is
1504 		 * complete
1505 		 */
1506 		if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) ||
1507 				!capable(CAP_SETGID))
1508 			return -EPERM;
1509 	}
1510 
1511 	/*
1512 	 * Determine whether and which event to report to ptracer.  When
1513 	 * called from kernel_thread or CLONE_UNTRACED is explicitly
1514 	 * requested, no event is reported; otherwise, report if the event
1515 	 * for the type of forking is enabled.
1516 	 */
1517 	if (likely(user_mode(regs)) && !(clone_flags & CLONE_UNTRACED)) {
1518 		if (clone_flags & CLONE_VFORK)
1519 			trace = PTRACE_EVENT_VFORK;
1520 		else if ((clone_flags & CSIGNAL) != SIGCHLD)
1521 			trace = PTRACE_EVENT_CLONE;
1522 		else
1523 			trace = PTRACE_EVENT_FORK;
1524 
1525 		if (likely(!ptrace_event_enabled(current, trace)))
1526 			trace = 0;
1527 	}
1528 
1529 	p = copy_process(clone_flags, stack_start, regs, stack_size,
1530 			 child_tidptr, NULL, trace);
1531 	/*
1532 	 * Do this prior waking up the new thread - the thread pointer
1533 	 * might get invalid after that point, if the thread exits quickly.
1534 	 */
1535 	if (!IS_ERR(p)) {
1536 		struct completion vfork;
1537 
1538 		trace_sched_process_fork(current, p);
1539 
1540 		nr = task_pid_vnr(p);
1541 
1542 		if (clone_flags & CLONE_PARENT_SETTID)
1543 			put_user(nr, parent_tidptr);
1544 
1545 		if (clone_flags & CLONE_VFORK) {
1546 			p->vfork_done = &vfork;
1547 			init_completion(&vfork);
1548 		}
1549 
1550 		/*
1551 		 * We set PF_STARTING at creation in case tracing wants to
1552 		 * use this to distinguish a fully live task from one that
1553 		 * hasn't finished SIGSTOP raising yet.  Now we clear it
1554 		 * and set the child going.
1555 		 */
1556 		p->flags &= ~PF_STARTING;
1557 
1558 		wake_up_new_task(p);
1559 
1560 		/* forking complete and child started to run, tell ptracer */
1561 		if (unlikely(trace))
1562 			ptrace_event(trace, nr);
1563 
1564 		if (clone_flags & CLONE_VFORK) {
1565 			freezer_do_not_count();
1566 			wait_for_completion(&vfork);
1567 			freezer_count();
1568 			ptrace_event(PTRACE_EVENT_VFORK_DONE, nr);
1569 		}
1570 	} else {
1571 		nr = PTR_ERR(p);
1572 	}
1573 	return nr;
1574 }
1575 
1576 #ifndef ARCH_MIN_MMSTRUCT_ALIGN
1577 #define ARCH_MIN_MMSTRUCT_ALIGN 0
1578 #endif
1579 
1580 static void sighand_ctor(void *data)
1581 {
1582 	struct sighand_struct *sighand = data;
1583 
1584 	spin_lock_init(&sighand->siglock);
1585 	init_waitqueue_head(&sighand->signalfd_wqh);
1586 }
1587 
1588 void __init proc_caches_init(void)
1589 {
1590 	sighand_cachep = kmem_cache_create("sighand_cache",
1591 			sizeof(struct sighand_struct), 0,
1592 			SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU|
1593 			SLAB_NOTRACK, sighand_ctor);
1594 	signal_cachep = kmem_cache_create("signal_cache",
1595 			sizeof(struct signal_struct), 0,
1596 			SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
1597 	files_cachep = kmem_cache_create("files_cache",
1598 			sizeof(struct files_struct), 0,
1599 			SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
1600 	fs_cachep = kmem_cache_create("fs_cache",
1601 			sizeof(struct fs_struct), 0,
1602 			SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
1603 	/*
1604 	 * FIXME! The "sizeof(struct mm_struct)" currently includes the
1605 	 * whole struct cpumask for the OFFSTACK case. We could change
1606 	 * this to *only* allocate as much of it as required by the
1607 	 * maximum number of CPU's we can ever have.  The cpumask_allocation
1608 	 * is at the end of the structure, exactly for that reason.
1609 	 */
1610 	mm_cachep = kmem_cache_create("mm_struct",
1611 			sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
1612 			SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
1613 	vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC);
1614 	mmap_init();
1615 	nsproxy_cache_init();
1616 }
1617 
1618 /*
1619  * Check constraints on flags passed to the unshare system call.
1620  */
1621 static int check_unshare_flags(unsigned long unshare_flags)
1622 {
1623 	if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND|
1624 				CLONE_VM|CLONE_FILES|CLONE_SYSVSEM|
1625 				CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWNET))
1626 		return -EINVAL;
1627 	/*
1628 	 * Not implemented, but pretend it works if there is nothing to
1629 	 * unshare. Note that unsharing CLONE_THREAD or CLONE_SIGHAND
1630 	 * needs to unshare vm.
1631 	 */
1632 	if (unshare_flags & (CLONE_THREAD | CLONE_SIGHAND | CLONE_VM)) {
1633 		/* FIXME: get_task_mm() increments ->mm_users */
1634 		if (atomic_read(&current->mm->mm_users) > 1)
1635 			return -EINVAL;
1636 	}
1637 
1638 	return 0;
1639 }
1640 
1641 /*
1642  * Unshare the filesystem structure if it is being shared
1643  */
1644 static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
1645 {
1646 	struct fs_struct *fs = current->fs;
1647 
1648 	if (!(unshare_flags & CLONE_FS) || !fs)
1649 		return 0;
1650 
1651 	/* don't need lock here; in the worst case we'll do useless copy */
1652 	if (fs->users == 1)
1653 		return 0;
1654 
1655 	*new_fsp = copy_fs_struct(fs);
1656 	if (!*new_fsp)
1657 		return -ENOMEM;
1658 
1659 	return 0;
1660 }
1661 
1662 /*
1663  * Unshare file descriptor table if it is being shared
1664  */
1665 static int unshare_fd(unsigned long unshare_flags, struct files_struct **new_fdp)
1666 {
1667 	struct files_struct *fd = current->files;
1668 	int error = 0;
1669 
1670 	if ((unshare_flags & CLONE_FILES) &&
1671 	    (fd && atomic_read(&fd->count) > 1)) {
1672 		*new_fdp = dup_fd(fd, &error);
1673 		if (!*new_fdp)
1674 			return error;
1675 	}
1676 
1677 	return 0;
1678 }
1679 
1680 /*
1681  * unshare allows a process to 'unshare' part of the process
1682  * context which was originally shared using clone.  copy_*
1683  * functions used by do_fork() cannot be used here directly
1684  * because they modify an inactive task_struct that is being
1685  * constructed. Here we are modifying the current, active,
1686  * task_struct.
1687  */
1688 SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
1689 {
1690 	struct fs_struct *fs, *new_fs = NULL;
1691 	struct files_struct *fd, *new_fd = NULL;
1692 	struct nsproxy *new_nsproxy = NULL;
1693 	int do_sysvsem = 0;
1694 	int err;
1695 
1696 	err = check_unshare_flags(unshare_flags);
1697 	if (err)
1698 		goto bad_unshare_out;
1699 
1700 	/*
1701 	 * If unsharing namespace, must also unshare filesystem information.
1702 	 */
1703 	if (unshare_flags & CLONE_NEWNS)
1704 		unshare_flags |= CLONE_FS;
1705 	/*
1706 	 * CLONE_NEWIPC must also detach from the undolist: after switching
1707 	 * to a new ipc namespace, the semaphore arrays from the old
1708 	 * namespace are unreachable.
1709 	 */
1710 	if (unshare_flags & (CLONE_NEWIPC|CLONE_SYSVSEM))
1711 		do_sysvsem = 1;
1712 	err = unshare_fs(unshare_flags, &new_fs);
1713 	if (err)
1714 		goto bad_unshare_out;
1715 	err = unshare_fd(unshare_flags, &new_fd);
1716 	if (err)
1717 		goto bad_unshare_cleanup_fs;
1718 	err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy, new_fs);
1719 	if (err)
1720 		goto bad_unshare_cleanup_fd;
1721 
1722 	if (new_fs || new_fd || do_sysvsem || new_nsproxy) {
1723 		if (do_sysvsem) {
1724 			/*
1725 			 * CLONE_SYSVSEM is equivalent to sys_exit().
1726 			 */
1727 			exit_sem(current);
1728 		}
1729 
1730 		if (new_nsproxy) {
1731 			switch_task_namespaces(current, new_nsproxy);
1732 			new_nsproxy = NULL;
1733 		}
1734 
1735 		task_lock(current);
1736 
1737 		if (new_fs) {
1738 			fs = current->fs;
1739 			spin_lock(&fs->lock);
1740 			current->fs = new_fs;
1741 			if (--fs->users)
1742 				new_fs = NULL;
1743 			else
1744 				new_fs = fs;
1745 			spin_unlock(&fs->lock);
1746 		}
1747 
1748 		if (new_fd) {
1749 			fd = current->files;
1750 			current->files = new_fd;
1751 			new_fd = fd;
1752 		}
1753 
1754 		task_unlock(current);
1755 	}
1756 
1757 	if (new_nsproxy)
1758 		put_nsproxy(new_nsproxy);
1759 
1760 bad_unshare_cleanup_fd:
1761 	if (new_fd)
1762 		put_files_struct(new_fd);
1763 
1764 bad_unshare_cleanup_fs:
1765 	if (new_fs)
1766 		free_fs_struct(new_fs);
1767 
1768 bad_unshare_out:
1769 	return err;
1770 }
1771 
1772 /*
1773  *	Helper to unshare the files of the current task.
1774  *	We don't want to expose copy_files internals to
1775  *	the exec layer of the kernel.
1776  */
1777 
1778 int unshare_files(struct files_struct **displaced)
1779 {
1780 	struct task_struct *task = current;
1781 	struct files_struct *copy = NULL;
1782 	int error;
1783 
1784 	error = unshare_fd(CLONE_FILES, &copy);
1785 	if (error || !copy) {
1786 		*displaced = NULL;
1787 		return error;
1788 	}
1789 	*displaced = task->files;
1790 	task_lock(task);
1791 	task->files = copy;
1792 	task_unlock(task);
1793 	return 0;
1794 }
1795