xref: /openbmc/linux/mm/mmap.c (revision 93d90ad7)
1 /*
2  * mm/mmap.c
3  *
4  * Written by obz.
5  *
6  * Address space accounting code	<alan@lxorguk.ukuu.org.uk>
7  */
8 
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 
11 #include <linux/kernel.h>
12 #include <linux/slab.h>
13 #include <linux/backing-dev.h>
14 #include <linux/mm.h>
15 #include <linux/vmacache.h>
16 #include <linux/shm.h>
17 #include <linux/mman.h>
18 #include <linux/pagemap.h>
19 #include <linux/swap.h>
20 #include <linux/syscalls.h>
21 #include <linux/capability.h>
22 #include <linux/init.h>
23 #include <linux/file.h>
24 #include <linux/fs.h>
25 #include <linux/personality.h>
26 #include <linux/security.h>
27 #include <linux/hugetlb.h>
28 #include <linux/profile.h>
29 #include <linux/export.h>
30 #include <linux/mount.h>
31 #include <linux/mempolicy.h>
32 #include <linux/rmap.h>
33 #include <linux/mmu_notifier.h>
34 #include <linux/mmdebug.h>
35 #include <linux/perf_event.h>
36 #include <linux/audit.h>
37 #include <linux/khugepaged.h>
38 #include <linux/uprobes.h>
39 #include <linux/rbtree_augmented.h>
40 #include <linux/sched/sysctl.h>
41 #include <linux/notifier.h>
42 #include <linux/memory.h>
43 #include <linux/printk.h>
44 
45 #include <asm/uaccess.h>
46 #include <asm/cacheflush.h>
47 #include <asm/tlb.h>
48 #include <asm/mmu_context.h>
49 
50 #include "internal.h"
51 
52 #ifndef arch_mmap_check
53 #define arch_mmap_check(addr, len, flags)	(0)
54 #endif
55 
56 #ifndef arch_rebalance_pgtables
57 #define arch_rebalance_pgtables(addr, len)		(addr)
58 #endif
59 
60 static void unmap_region(struct mm_struct *mm,
61 		struct vm_area_struct *vma, struct vm_area_struct *prev,
62 		unsigned long start, unsigned long end);
63 
64 /* description of effects of mapping type and prot in current implementation.
65  * this is due to the limited x86 page protection hardware.  The expected
66  * behavior is in parens:
67  *
68  * map_type	prot
69  *		PROT_NONE	PROT_READ	PROT_WRITE	PROT_EXEC
70  * MAP_SHARED	r: (no) no	r: (yes) yes	r: (no) yes	r: (no) yes
71  *		w: (no) no	w: (no) no	w: (yes) yes	w: (no) no
72  *		x: (no) no	x: (no) yes	x: (no) yes	x: (yes) yes
73  *
74  * MAP_PRIVATE	r: (no) no	r: (yes) yes	r: (no) yes	r: (no) yes
75  *		w: (no) no	w: (no) no	w: (copy) copy	w: (no) no
76  *		x: (no) no	x: (no) yes	x: (no) yes	x: (yes) yes
77  *
78  */
79 pgprot_t protection_map[16] = {
80 	__P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
81 	__S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
82 };
83 
84 pgprot_t vm_get_page_prot(unsigned long vm_flags)
85 {
86 	return __pgprot(pgprot_val(protection_map[vm_flags &
87 				(VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
88 			pgprot_val(arch_vm_get_page_prot(vm_flags)));
89 }
90 EXPORT_SYMBOL(vm_get_page_prot);
91 
92 static pgprot_t vm_pgprot_modify(pgprot_t oldprot, unsigned long vm_flags)
93 {
94 	return pgprot_modify(oldprot, vm_get_page_prot(vm_flags));
95 }
96 
97 /* Update vma->vm_page_prot to reflect vma->vm_flags. */
98 void vma_set_page_prot(struct vm_area_struct *vma)
99 {
100 	unsigned long vm_flags = vma->vm_flags;
101 
102 	vma->vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags);
103 	if (vma_wants_writenotify(vma)) {
104 		vm_flags &= ~VM_SHARED;
105 		vma->vm_page_prot = vm_pgprot_modify(vma->vm_page_prot,
106 						     vm_flags);
107 	}
108 }
109 
110 
111 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS;  /* heuristic overcommit */
112 int sysctl_overcommit_ratio __read_mostly = 50;	/* default is 50% */
113 unsigned long sysctl_overcommit_kbytes __read_mostly;
114 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
115 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
116 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
117 /*
118  * Make sure vm_committed_as in one cacheline and not cacheline shared with
119  * other variables. It can be updated by several CPUs frequently.
120  */
121 struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp;
122 
123 /*
124  * The global memory commitment made in the system can be a metric
125  * that can be used to drive ballooning decisions when Linux is hosted
126  * as a guest. On Hyper-V, the host implements a policy engine for dynamically
127  * balancing memory across competing virtual machines that are hosted.
128  * Several metrics drive this policy engine including the guest reported
129  * memory commitment.
130  */
131 unsigned long vm_memory_committed(void)
132 {
133 	return percpu_counter_read_positive(&vm_committed_as);
134 }
135 EXPORT_SYMBOL_GPL(vm_memory_committed);
136 
137 /*
138  * Check that a process has enough memory to allocate a new virtual
139  * mapping. 0 means there is enough memory for the allocation to
140  * succeed and -ENOMEM implies there is not.
141  *
142  * We currently support three overcommit policies, which are set via the
143  * vm.overcommit_memory sysctl.  See Documentation/vm/overcommit-accounting
144  *
145  * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
146  * Additional code 2002 Jul 20 by Robert Love.
147  *
148  * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
149  *
150  * Note this is a helper function intended to be used by LSMs which
151  * wish to use this logic.
152  */
153 int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
154 {
155 	unsigned long free, allowed, reserve;
156 
157 	VM_WARN_ONCE(percpu_counter_read(&vm_committed_as) <
158 			-(s64)vm_committed_as_batch * num_online_cpus(),
159 			"memory commitment underflow");
160 
161 	vm_acct_memory(pages);
162 
163 	/*
164 	 * Sometimes we want to use more memory than we have
165 	 */
166 	if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
167 		return 0;
168 
169 	if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
170 		free = global_page_state(NR_FREE_PAGES);
171 		free += global_page_state(NR_FILE_PAGES);
172 
173 		/*
174 		 * shmem pages shouldn't be counted as free in this
175 		 * case, they can't be purged, only swapped out, and
176 		 * that won't affect the overall amount of available
177 		 * memory in the system.
178 		 */
179 		free -= global_page_state(NR_SHMEM);
180 
181 		free += get_nr_swap_pages();
182 
183 		/*
184 		 * Any slabs which are created with the
185 		 * SLAB_RECLAIM_ACCOUNT flag claim to have contents
186 		 * which are reclaimable, under pressure.  The dentry
187 		 * cache and most inode caches should fall into this
188 		 */
189 		free += global_page_state(NR_SLAB_RECLAIMABLE);
190 
191 		/*
192 		 * Leave reserved pages. The pages are not for anonymous pages.
193 		 */
194 		if (free <= totalreserve_pages)
195 			goto error;
196 		else
197 			free -= totalreserve_pages;
198 
199 		/*
200 		 * Reserve some for root
201 		 */
202 		if (!cap_sys_admin)
203 			free -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
204 
205 		if (free > pages)
206 			return 0;
207 
208 		goto error;
209 	}
210 
211 	allowed = vm_commit_limit();
212 	/*
213 	 * Reserve some for root
214 	 */
215 	if (!cap_sys_admin)
216 		allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
217 
218 	/*
219 	 * Don't let a single process grow so big a user can't recover
220 	 */
221 	if (mm) {
222 		reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
223 		allowed -= min(mm->total_vm / 32, reserve);
224 	}
225 
226 	if (percpu_counter_read_positive(&vm_committed_as) < allowed)
227 		return 0;
228 error:
229 	vm_unacct_memory(pages);
230 
231 	return -ENOMEM;
232 }
233 
234 /*
235  * Requires inode->i_mapping->i_mmap_rwsem
236  */
237 static void __remove_shared_vm_struct(struct vm_area_struct *vma,
238 		struct file *file, struct address_space *mapping)
239 {
240 	if (vma->vm_flags & VM_DENYWRITE)
241 		atomic_inc(&file_inode(file)->i_writecount);
242 	if (vma->vm_flags & VM_SHARED)
243 		mapping_unmap_writable(mapping);
244 
245 	flush_dcache_mmap_lock(mapping);
246 	if (unlikely(vma->vm_flags & VM_NONLINEAR))
247 		list_del_init(&vma->shared.nonlinear);
248 	else
249 		vma_interval_tree_remove(vma, &mapping->i_mmap);
250 	flush_dcache_mmap_unlock(mapping);
251 }
252 
253 /*
254  * Unlink a file-based vm structure from its interval tree, to hide
255  * vma from rmap and vmtruncate before freeing its page tables.
256  */
257 void unlink_file_vma(struct vm_area_struct *vma)
258 {
259 	struct file *file = vma->vm_file;
260 
261 	if (file) {
262 		struct address_space *mapping = file->f_mapping;
263 		i_mmap_lock_write(mapping);
264 		__remove_shared_vm_struct(vma, file, mapping);
265 		i_mmap_unlock_write(mapping);
266 	}
267 }
268 
269 /*
270  * Close a vm structure and free it, returning the next.
271  */
272 static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
273 {
274 	struct vm_area_struct *next = vma->vm_next;
275 
276 	might_sleep();
277 	if (vma->vm_ops && vma->vm_ops->close)
278 		vma->vm_ops->close(vma);
279 	if (vma->vm_file)
280 		fput(vma->vm_file);
281 	mpol_put(vma_policy(vma));
282 	kmem_cache_free(vm_area_cachep, vma);
283 	return next;
284 }
285 
286 static unsigned long do_brk(unsigned long addr, unsigned long len);
287 
288 SYSCALL_DEFINE1(brk, unsigned long, brk)
289 {
290 	unsigned long retval;
291 	unsigned long newbrk, oldbrk;
292 	struct mm_struct *mm = current->mm;
293 	unsigned long min_brk;
294 	bool populate;
295 
296 	down_write(&mm->mmap_sem);
297 
298 #ifdef CONFIG_COMPAT_BRK
299 	/*
300 	 * CONFIG_COMPAT_BRK can still be overridden by setting
301 	 * randomize_va_space to 2, which will still cause mm->start_brk
302 	 * to be arbitrarily shifted
303 	 */
304 	if (current->brk_randomized)
305 		min_brk = mm->start_brk;
306 	else
307 		min_brk = mm->end_data;
308 #else
309 	min_brk = mm->start_brk;
310 #endif
311 	if (brk < min_brk)
312 		goto out;
313 
314 	/*
315 	 * Check against rlimit here. If this check is done later after the test
316 	 * of oldbrk with newbrk then it can escape the test and let the data
317 	 * segment grow beyond its set limit the in case where the limit is
318 	 * not page aligned -Ram Gupta
319 	 */
320 	if (check_data_rlimit(rlimit(RLIMIT_DATA), brk, mm->start_brk,
321 			      mm->end_data, mm->start_data))
322 		goto out;
323 
324 	newbrk = PAGE_ALIGN(brk);
325 	oldbrk = PAGE_ALIGN(mm->brk);
326 	if (oldbrk == newbrk)
327 		goto set_brk;
328 
329 	/* Always allow shrinking brk. */
330 	if (brk <= mm->brk) {
331 		if (!do_munmap(mm, newbrk, oldbrk-newbrk))
332 			goto set_brk;
333 		goto out;
334 	}
335 
336 	/* Check against existing mmap mappings. */
337 	if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE))
338 		goto out;
339 
340 	/* Ok, looks good - let it rip. */
341 	if (do_brk(oldbrk, newbrk-oldbrk) != oldbrk)
342 		goto out;
343 
344 set_brk:
345 	mm->brk = brk;
346 	populate = newbrk > oldbrk && (mm->def_flags & VM_LOCKED) != 0;
347 	up_write(&mm->mmap_sem);
348 	if (populate)
349 		mm_populate(oldbrk, newbrk - oldbrk);
350 	return brk;
351 
352 out:
353 	retval = mm->brk;
354 	up_write(&mm->mmap_sem);
355 	return retval;
356 }
357 
358 static long vma_compute_subtree_gap(struct vm_area_struct *vma)
359 {
360 	unsigned long max, subtree_gap;
361 	max = vma->vm_start;
362 	if (vma->vm_prev)
363 		max -= vma->vm_prev->vm_end;
364 	if (vma->vm_rb.rb_left) {
365 		subtree_gap = rb_entry(vma->vm_rb.rb_left,
366 				struct vm_area_struct, vm_rb)->rb_subtree_gap;
367 		if (subtree_gap > max)
368 			max = subtree_gap;
369 	}
370 	if (vma->vm_rb.rb_right) {
371 		subtree_gap = rb_entry(vma->vm_rb.rb_right,
372 				struct vm_area_struct, vm_rb)->rb_subtree_gap;
373 		if (subtree_gap > max)
374 			max = subtree_gap;
375 	}
376 	return max;
377 }
378 
379 #ifdef CONFIG_DEBUG_VM_RB
380 static int browse_rb(struct rb_root *root)
381 {
382 	int i = 0, j, bug = 0;
383 	struct rb_node *nd, *pn = NULL;
384 	unsigned long prev = 0, pend = 0;
385 
386 	for (nd = rb_first(root); nd; nd = rb_next(nd)) {
387 		struct vm_area_struct *vma;
388 		vma = rb_entry(nd, struct vm_area_struct, vm_rb);
389 		if (vma->vm_start < prev) {
390 			pr_emerg("vm_start %lx < prev %lx\n",
391 				  vma->vm_start, prev);
392 			bug = 1;
393 		}
394 		if (vma->vm_start < pend) {
395 			pr_emerg("vm_start %lx < pend %lx\n",
396 				  vma->vm_start, pend);
397 			bug = 1;
398 		}
399 		if (vma->vm_start > vma->vm_end) {
400 			pr_emerg("vm_start %lx > vm_end %lx\n",
401 				  vma->vm_start, vma->vm_end);
402 			bug = 1;
403 		}
404 		if (vma->rb_subtree_gap != vma_compute_subtree_gap(vma)) {
405 			pr_emerg("free gap %lx, correct %lx\n",
406 			       vma->rb_subtree_gap,
407 			       vma_compute_subtree_gap(vma));
408 			bug = 1;
409 		}
410 		i++;
411 		pn = nd;
412 		prev = vma->vm_start;
413 		pend = vma->vm_end;
414 	}
415 	j = 0;
416 	for (nd = pn; nd; nd = rb_prev(nd))
417 		j++;
418 	if (i != j) {
419 		pr_emerg("backwards %d, forwards %d\n", j, i);
420 		bug = 1;
421 	}
422 	return bug ? -1 : i;
423 }
424 
425 static void validate_mm_rb(struct rb_root *root, struct vm_area_struct *ignore)
426 {
427 	struct rb_node *nd;
428 
429 	for (nd = rb_first(root); nd; nd = rb_next(nd)) {
430 		struct vm_area_struct *vma;
431 		vma = rb_entry(nd, struct vm_area_struct, vm_rb);
432 		VM_BUG_ON_VMA(vma != ignore &&
433 			vma->rb_subtree_gap != vma_compute_subtree_gap(vma),
434 			vma);
435 	}
436 }
437 
438 static void validate_mm(struct mm_struct *mm)
439 {
440 	int bug = 0;
441 	int i = 0;
442 	unsigned long highest_address = 0;
443 	struct vm_area_struct *vma = mm->mmap;
444 
445 	while (vma) {
446 		struct anon_vma_chain *avc;
447 
448 		vma_lock_anon_vma(vma);
449 		list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
450 			anon_vma_interval_tree_verify(avc);
451 		vma_unlock_anon_vma(vma);
452 		highest_address = vma->vm_end;
453 		vma = vma->vm_next;
454 		i++;
455 	}
456 	if (i != mm->map_count) {
457 		pr_emerg("map_count %d vm_next %d\n", mm->map_count, i);
458 		bug = 1;
459 	}
460 	if (highest_address != mm->highest_vm_end) {
461 		pr_emerg("mm->highest_vm_end %lx, found %lx\n",
462 			  mm->highest_vm_end, highest_address);
463 		bug = 1;
464 	}
465 	i = browse_rb(&mm->mm_rb);
466 	if (i != mm->map_count) {
467 		if (i != -1)
468 			pr_emerg("map_count %d rb %d\n", mm->map_count, i);
469 		bug = 1;
470 	}
471 	VM_BUG_ON_MM(bug, mm);
472 }
473 #else
474 #define validate_mm_rb(root, ignore) do { } while (0)
475 #define validate_mm(mm) do { } while (0)
476 #endif
477 
478 RB_DECLARE_CALLBACKS(static, vma_gap_callbacks, struct vm_area_struct, vm_rb,
479 		     unsigned long, rb_subtree_gap, vma_compute_subtree_gap)
480 
481 /*
482  * Update augmented rbtree rb_subtree_gap values after vma->vm_start or
483  * vma->vm_prev->vm_end values changed, without modifying the vma's position
484  * in the rbtree.
485  */
486 static void vma_gap_update(struct vm_area_struct *vma)
487 {
488 	/*
489 	 * As it turns out, RB_DECLARE_CALLBACKS() already created a callback
490 	 * function that does exacltly what we want.
491 	 */
492 	vma_gap_callbacks_propagate(&vma->vm_rb, NULL);
493 }
494 
495 static inline void vma_rb_insert(struct vm_area_struct *vma,
496 				 struct rb_root *root)
497 {
498 	/* All rb_subtree_gap values must be consistent prior to insertion */
499 	validate_mm_rb(root, NULL);
500 
501 	rb_insert_augmented(&vma->vm_rb, root, &vma_gap_callbacks);
502 }
503 
504 static void vma_rb_erase(struct vm_area_struct *vma, struct rb_root *root)
505 {
506 	/*
507 	 * All rb_subtree_gap values must be consistent prior to erase,
508 	 * with the possible exception of the vma being erased.
509 	 */
510 	validate_mm_rb(root, vma);
511 
512 	/*
513 	 * Note rb_erase_augmented is a fairly large inline function,
514 	 * so make sure we instantiate it only once with our desired
515 	 * augmented rbtree callbacks.
516 	 */
517 	rb_erase_augmented(&vma->vm_rb, root, &vma_gap_callbacks);
518 }
519 
520 /*
521  * vma has some anon_vma assigned, and is already inserted on that
522  * anon_vma's interval trees.
523  *
524  * Before updating the vma's vm_start / vm_end / vm_pgoff fields, the
525  * vma must be removed from the anon_vma's interval trees using
526  * anon_vma_interval_tree_pre_update_vma().
527  *
528  * After the update, the vma will be reinserted using
529  * anon_vma_interval_tree_post_update_vma().
530  *
531  * The entire update must be protected by exclusive mmap_sem and by
532  * the root anon_vma's mutex.
533  */
534 static inline void
535 anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma)
536 {
537 	struct anon_vma_chain *avc;
538 
539 	list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
540 		anon_vma_interval_tree_remove(avc, &avc->anon_vma->rb_root);
541 }
542 
543 static inline void
544 anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma)
545 {
546 	struct anon_vma_chain *avc;
547 
548 	list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
549 		anon_vma_interval_tree_insert(avc, &avc->anon_vma->rb_root);
550 }
551 
552 static int find_vma_links(struct mm_struct *mm, unsigned long addr,
553 		unsigned long end, struct vm_area_struct **pprev,
554 		struct rb_node ***rb_link, struct rb_node **rb_parent)
555 {
556 	struct rb_node **__rb_link, *__rb_parent, *rb_prev;
557 
558 	__rb_link = &mm->mm_rb.rb_node;
559 	rb_prev = __rb_parent = NULL;
560 
561 	while (*__rb_link) {
562 		struct vm_area_struct *vma_tmp;
563 
564 		__rb_parent = *__rb_link;
565 		vma_tmp = rb_entry(__rb_parent, struct vm_area_struct, vm_rb);
566 
567 		if (vma_tmp->vm_end > addr) {
568 			/* Fail if an existing vma overlaps the area */
569 			if (vma_tmp->vm_start < end)
570 				return -ENOMEM;
571 			__rb_link = &__rb_parent->rb_left;
572 		} else {
573 			rb_prev = __rb_parent;
574 			__rb_link = &__rb_parent->rb_right;
575 		}
576 	}
577 
578 	*pprev = NULL;
579 	if (rb_prev)
580 		*pprev = rb_entry(rb_prev, struct vm_area_struct, vm_rb);
581 	*rb_link = __rb_link;
582 	*rb_parent = __rb_parent;
583 	return 0;
584 }
585 
586 static unsigned long count_vma_pages_range(struct mm_struct *mm,
587 		unsigned long addr, unsigned long end)
588 {
589 	unsigned long nr_pages = 0;
590 	struct vm_area_struct *vma;
591 
592 	/* Find first overlaping mapping */
593 	vma = find_vma_intersection(mm, addr, end);
594 	if (!vma)
595 		return 0;
596 
597 	nr_pages = (min(end, vma->vm_end) -
598 		max(addr, vma->vm_start)) >> PAGE_SHIFT;
599 
600 	/* Iterate over the rest of the overlaps */
601 	for (vma = vma->vm_next; vma; vma = vma->vm_next) {
602 		unsigned long overlap_len;
603 
604 		if (vma->vm_start > end)
605 			break;
606 
607 		overlap_len = min(end, vma->vm_end) - vma->vm_start;
608 		nr_pages += overlap_len >> PAGE_SHIFT;
609 	}
610 
611 	return nr_pages;
612 }
613 
614 void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma,
615 		struct rb_node **rb_link, struct rb_node *rb_parent)
616 {
617 	/* Update tracking information for the gap following the new vma. */
618 	if (vma->vm_next)
619 		vma_gap_update(vma->vm_next);
620 	else
621 		mm->highest_vm_end = vma->vm_end;
622 
623 	/*
624 	 * vma->vm_prev wasn't known when we followed the rbtree to find the
625 	 * correct insertion point for that vma. As a result, we could not
626 	 * update the vma vm_rb parents rb_subtree_gap values on the way down.
627 	 * So, we first insert the vma with a zero rb_subtree_gap value
628 	 * (to be consistent with what we did on the way down), and then
629 	 * immediately update the gap to the correct value. Finally we
630 	 * rebalance the rbtree after all augmented values have been set.
631 	 */
632 	rb_link_node(&vma->vm_rb, rb_parent, rb_link);
633 	vma->rb_subtree_gap = 0;
634 	vma_gap_update(vma);
635 	vma_rb_insert(vma, &mm->mm_rb);
636 }
637 
638 static void __vma_link_file(struct vm_area_struct *vma)
639 {
640 	struct file *file;
641 
642 	file = vma->vm_file;
643 	if (file) {
644 		struct address_space *mapping = file->f_mapping;
645 
646 		if (vma->vm_flags & VM_DENYWRITE)
647 			atomic_dec(&file_inode(file)->i_writecount);
648 		if (vma->vm_flags & VM_SHARED)
649 			atomic_inc(&mapping->i_mmap_writable);
650 
651 		flush_dcache_mmap_lock(mapping);
652 		if (unlikely(vma->vm_flags & VM_NONLINEAR))
653 			vma_nonlinear_insert(vma, &mapping->i_mmap_nonlinear);
654 		else
655 			vma_interval_tree_insert(vma, &mapping->i_mmap);
656 		flush_dcache_mmap_unlock(mapping);
657 	}
658 }
659 
660 static void
661 __vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
662 	struct vm_area_struct *prev, struct rb_node **rb_link,
663 	struct rb_node *rb_parent)
664 {
665 	__vma_link_list(mm, vma, prev, rb_parent);
666 	__vma_link_rb(mm, vma, rb_link, rb_parent);
667 }
668 
669 static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
670 			struct vm_area_struct *prev, struct rb_node **rb_link,
671 			struct rb_node *rb_parent)
672 {
673 	struct address_space *mapping = NULL;
674 
675 	if (vma->vm_file) {
676 		mapping = vma->vm_file->f_mapping;
677 		i_mmap_lock_write(mapping);
678 	}
679 
680 	__vma_link(mm, vma, prev, rb_link, rb_parent);
681 	__vma_link_file(vma);
682 
683 	if (mapping)
684 		i_mmap_unlock_write(mapping);
685 
686 	mm->map_count++;
687 	validate_mm(mm);
688 }
689 
690 /*
691  * Helper for vma_adjust() in the split_vma insert case: insert a vma into the
692  * mm's list and rbtree.  It has already been inserted into the interval tree.
693  */
694 static void __insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
695 {
696 	struct vm_area_struct *prev;
697 	struct rb_node **rb_link, *rb_parent;
698 
699 	if (find_vma_links(mm, vma->vm_start, vma->vm_end,
700 			   &prev, &rb_link, &rb_parent))
701 		BUG();
702 	__vma_link(mm, vma, prev, rb_link, rb_parent);
703 	mm->map_count++;
704 }
705 
706 static inline void
707 __vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma,
708 		struct vm_area_struct *prev)
709 {
710 	struct vm_area_struct *next;
711 
712 	vma_rb_erase(vma, &mm->mm_rb);
713 	prev->vm_next = next = vma->vm_next;
714 	if (next)
715 		next->vm_prev = prev;
716 
717 	/* Kill the cache */
718 	vmacache_invalidate(mm);
719 }
720 
721 /*
722  * We cannot adjust vm_start, vm_end, vm_pgoff fields of a vma that
723  * is already present in an i_mmap tree without adjusting the tree.
724  * The following helper function should be used when such adjustments
725  * are necessary.  The "insert" vma (if any) is to be inserted
726  * before we drop the necessary locks.
727  */
728 int vma_adjust(struct vm_area_struct *vma, unsigned long start,
729 	unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert)
730 {
731 	struct mm_struct *mm = vma->vm_mm;
732 	struct vm_area_struct *next = vma->vm_next;
733 	struct vm_area_struct *importer = NULL;
734 	struct address_space *mapping = NULL;
735 	struct rb_root *root = NULL;
736 	struct anon_vma *anon_vma = NULL;
737 	struct file *file = vma->vm_file;
738 	bool start_changed = false, end_changed = false;
739 	long adjust_next = 0;
740 	int remove_next = 0;
741 
742 	if (next && !insert) {
743 		struct vm_area_struct *exporter = NULL;
744 
745 		if (end >= next->vm_end) {
746 			/*
747 			 * vma expands, overlapping all the next, and
748 			 * perhaps the one after too (mprotect case 6).
749 			 */
750 again:			remove_next = 1 + (end > next->vm_end);
751 			end = next->vm_end;
752 			exporter = next;
753 			importer = vma;
754 		} else if (end > next->vm_start) {
755 			/*
756 			 * vma expands, overlapping part of the next:
757 			 * mprotect case 5 shifting the boundary up.
758 			 */
759 			adjust_next = (end - next->vm_start) >> PAGE_SHIFT;
760 			exporter = next;
761 			importer = vma;
762 		} else if (end < vma->vm_end) {
763 			/*
764 			 * vma shrinks, and !insert tells it's not
765 			 * split_vma inserting another: so it must be
766 			 * mprotect case 4 shifting the boundary down.
767 			 */
768 			adjust_next = -((vma->vm_end - end) >> PAGE_SHIFT);
769 			exporter = vma;
770 			importer = next;
771 		}
772 
773 		/*
774 		 * Easily overlooked: when mprotect shifts the boundary,
775 		 * make sure the expanding vma has anon_vma set if the
776 		 * shrinking vma had, to cover any anon pages imported.
777 		 */
778 		if (exporter && exporter->anon_vma && !importer->anon_vma) {
779 			int error;
780 
781 			importer->anon_vma = exporter->anon_vma;
782 			error = anon_vma_clone(importer, exporter);
783 			if (error) {
784 				importer->anon_vma = NULL;
785 				return error;
786 			}
787 		}
788 	}
789 
790 	if (file) {
791 		mapping = file->f_mapping;
792 		if (!(vma->vm_flags & VM_NONLINEAR)) {
793 			root = &mapping->i_mmap;
794 			uprobe_munmap(vma, vma->vm_start, vma->vm_end);
795 
796 			if (adjust_next)
797 				uprobe_munmap(next, next->vm_start,
798 							next->vm_end);
799 		}
800 
801 		i_mmap_lock_write(mapping);
802 		if (insert) {
803 			/*
804 			 * Put into interval tree now, so instantiated pages
805 			 * are visible to arm/parisc __flush_dcache_page
806 			 * throughout; but we cannot insert into address
807 			 * space until vma start or end is updated.
808 			 */
809 			__vma_link_file(insert);
810 		}
811 	}
812 
813 	vma_adjust_trans_huge(vma, start, end, adjust_next);
814 
815 	anon_vma = vma->anon_vma;
816 	if (!anon_vma && adjust_next)
817 		anon_vma = next->anon_vma;
818 	if (anon_vma) {
819 		VM_BUG_ON_VMA(adjust_next && next->anon_vma &&
820 			  anon_vma != next->anon_vma, next);
821 		anon_vma_lock_write(anon_vma);
822 		anon_vma_interval_tree_pre_update_vma(vma);
823 		if (adjust_next)
824 			anon_vma_interval_tree_pre_update_vma(next);
825 	}
826 
827 	if (root) {
828 		flush_dcache_mmap_lock(mapping);
829 		vma_interval_tree_remove(vma, root);
830 		if (adjust_next)
831 			vma_interval_tree_remove(next, root);
832 	}
833 
834 	if (start != vma->vm_start) {
835 		vma->vm_start = start;
836 		start_changed = true;
837 	}
838 	if (end != vma->vm_end) {
839 		vma->vm_end = end;
840 		end_changed = true;
841 	}
842 	vma->vm_pgoff = pgoff;
843 	if (adjust_next) {
844 		next->vm_start += adjust_next << PAGE_SHIFT;
845 		next->vm_pgoff += adjust_next;
846 	}
847 
848 	if (root) {
849 		if (adjust_next)
850 			vma_interval_tree_insert(next, root);
851 		vma_interval_tree_insert(vma, root);
852 		flush_dcache_mmap_unlock(mapping);
853 	}
854 
855 	if (remove_next) {
856 		/*
857 		 * vma_merge has merged next into vma, and needs
858 		 * us to remove next before dropping the locks.
859 		 */
860 		__vma_unlink(mm, next, vma);
861 		if (file)
862 			__remove_shared_vm_struct(next, file, mapping);
863 	} else if (insert) {
864 		/*
865 		 * split_vma has split insert from vma, and needs
866 		 * us to insert it before dropping the locks
867 		 * (it may either follow vma or precede it).
868 		 */
869 		__insert_vm_struct(mm, insert);
870 	} else {
871 		if (start_changed)
872 			vma_gap_update(vma);
873 		if (end_changed) {
874 			if (!next)
875 				mm->highest_vm_end = end;
876 			else if (!adjust_next)
877 				vma_gap_update(next);
878 		}
879 	}
880 
881 	if (anon_vma) {
882 		anon_vma_interval_tree_post_update_vma(vma);
883 		if (adjust_next)
884 			anon_vma_interval_tree_post_update_vma(next);
885 		anon_vma_unlock_write(anon_vma);
886 	}
887 	if (mapping)
888 		i_mmap_unlock_write(mapping);
889 
890 	if (root) {
891 		uprobe_mmap(vma);
892 
893 		if (adjust_next)
894 			uprobe_mmap(next);
895 	}
896 
897 	if (remove_next) {
898 		if (file) {
899 			uprobe_munmap(next, next->vm_start, next->vm_end);
900 			fput(file);
901 		}
902 		if (next->anon_vma)
903 			anon_vma_merge(vma, next);
904 		mm->map_count--;
905 		mpol_put(vma_policy(next));
906 		kmem_cache_free(vm_area_cachep, next);
907 		/*
908 		 * In mprotect's case 6 (see comments on vma_merge),
909 		 * we must remove another next too. It would clutter
910 		 * up the code too much to do both in one go.
911 		 */
912 		next = vma->vm_next;
913 		if (remove_next == 2)
914 			goto again;
915 		else if (next)
916 			vma_gap_update(next);
917 		else
918 			mm->highest_vm_end = end;
919 	}
920 	if (insert && file)
921 		uprobe_mmap(insert);
922 
923 	validate_mm(mm);
924 
925 	return 0;
926 }
927 
928 /*
929  * If the vma has a ->close operation then the driver probably needs to release
930  * per-vma resources, so we don't attempt to merge those.
931  */
932 static inline int is_mergeable_vma(struct vm_area_struct *vma,
933 			struct file *file, unsigned long vm_flags)
934 {
935 	/*
936 	 * VM_SOFTDIRTY should not prevent from VMA merging, if we
937 	 * match the flags but dirty bit -- the caller should mark
938 	 * merged VMA as dirty. If dirty bit won't be excluded from
939 	 * comparison, we increase pressue on the memory system forcing
940 	 * the kernel to generate new VMAs when old one could be
941 	 * extended instead.
942 	 */
943 	if ((vma->vm_flags ^ vm_flags) & ~VM_SOFTDIRTY)
944 		return 0;
945 	if (vma->vm_file != file)
946 		return 0;
947 	if (vma->vm_ops && vma->vm_ops->close)
948 		return 0;
949 	return 1;
950 }
951 
952 static inline int is_mergeable_anon_vma(struct anon_vma *anon_vma1,
953 					struct anon_vma *anon_vma2,
954 					struct vm_area_struct *vma)
955 {
956 	/*
957 	 * The list_is_singular() test is to avoid merging VMA cloned from
958 	 * parents. This can improve scalability caused by anon_vma lock.
959 	 */
960 	if ((!anon_vma1 || !anon_vma2) && (!vma ||
961 		list_is_singular(&vma->anon_vma_chain)))
962 		return 1;
963 	return anon_vma1 == anon_vma2;
964 }
965 
966 /*
967  * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
968  * in front of (at a lower virtual address and file offset than) the vma.
969  *
970  * We cannot merge two vmas if they have differently assigned (non-NULL)
971  * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
972  *
973  * We don't check here for the merged mmap wrapping around the end of pagecache
974  * indices (16TB on ia32) because do_mmap_pgoff() does not permit mmap's which
975  * wrap, nor mmaps which cover the final page at index -1UL.
976  */
977 static int
978 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
979 	struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
980 {
981 	if (is_mergeable_vma(vma, file, vm_flags) &&
982 	    is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
983 		if (vma->vm_pgoff == vm_pgoff)
984 			return 1;
985 	}
986 	return 0;
987 }
988 
989 /*
990  * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
991  * beyond (at a higher virtual address and file offset than) the vma.
992  *
993  * We cannot merge two vmas if they have differently assigned (non-NULL)
994  * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
995  */
996 static int
997 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
998 	struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
999 {
1000 	if (is_mergeable_vma(vma, file, vm_flags) &&
1001 	    is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
1002 		pgoff_t vm_pglen;
1003 		vm_pglen = vma_pages(vma);
1004 		if (vma->vm_pgoff + vm_pglen == vm_pgoff)
1005 			return 1;
1006 	}
1007 	return 0;
1008 }
1009 
1010 /*
1011  * Given a mapping request (addr,end,vm_flags,file,pgoff), figure out
1012  * whether that can be merged with its predecessor or its successor.
1013  * Or both (it neatly fills a hole).
1014  *
1015  * In most cases - when called for mmap, brk or mremap - [addr,end) is
1016  * certain not to be mapped by the time vma_merge is called; but when
1017  * called for mprotect, it is certain to be already mapped (either at
1018  * an offset within prev, or at the start of next), and the flags of
1019  * this area are about to be changed to vm_flags - and the no-change
1020  * case has already been eliminated.
1021  *
1022  * The following mprotect cases have to be considered, where AAAA is
1023  * the area passed down from mprotect_fixup, never extending beyond one
1024  * vma, PPPPPP is the prev vma specified, and NNNNNN the next vma after:
1025  *
1026  *     AAAA             AAAA                AAAA          AAAA
1027  *    PPPPPPNNNNNN    PPPPPPNNNNNN    PPPPPPNNNNNN    PPPPNNNNXXXX
1028  *    cannot merge    might become    might become    might become
1029  *                    PPNNNNNNNNNN    PPPPPPPPPPNN    PPPPPPPPPPPP 6 or
1030  *    mmap, brk or    case 4 below    case 5 below    PPPPPPPPXXXX 7 or
1031  *    mremap move:                                    PPPPNNNNNNNN 8
1032  *        AAAA
1033  *    PPPP    NNNN    PPPPPPPPPPPP    PPPPPPPPNNNN    PPPPNNNNNNNN
1034  *    might become    case 1 below    case 2 below    case 3 below
1035  *
1036  * Odd one out? Case 8, because it extends NNNN but needs flags of XXXX:
1037  * mprotect_fixup updates vm_flags & vm_page_prot on successful return.
1038  */
1039 struct vm_area_struct *vma_merge(struct mm_struct *mm,
1040 			struct vm_area_struct *prev, unsigned long addr,
1041 			unsigned long end, unsigned long vm_flags,
1042 			struct anon_vma *anon_vma, struct file *file,
1043 			pgoff_t pgoff, struct mempolicy *policy)
1044 {
1045 	pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
1046 	struct vm_area_struct *area, *next;
1047 	int err;
1048 
1049 	/*
1050 	 * We later require that vma->vm_flags == vm_flags,
1051 	 * so this tests vma->vm_flags & VM_SPECIAL, too.
1052 	 */
1053 	if (vm_flags & VM_SPECIAL)
1054 		return NULL;
1055 
1056 	if (prev)
1057 		next = prev->vm_next;
1058 	else
1059 		next = mm->mmap;
1060 	area = next;
1061 	if (next && next->vm_end == end)		/* cases 6, 7, 8 */
1062 		next = next->vm_next;
1063 
1064 	/*
1065 	 * Can it merge with the predecessor?
1066 	 */
1067 	if (prev && prev->vm_end == addr &&
1068 			mpol_equal(vma_policy(prev), policy) &&
1069 			can_vma_merge_after(prev, vm_flags,
1070 						anon_vma, file, pgoff)) {
1071 		/*
1072 		 * OK, it can.  Can we now merge in the successor as well?
1073 		 */
1074 		if (next && end == next->vm_start &&
1075 				mpol_equal(policy, vma_policy(next)) &&
1076 				can_vma_merge_before(next, vm_flags,
1077 					anon_vma, file, pgoff+pglen) &&
1078 				is_mergeable_anon_vma(prev->anon_vma,
1079 						      next->anon_vma, NULL)) {
1080 							/* cases 1, 6 */
1081 			err = vma_adjust(prev, prev->vm_start,
1082 				next->vm_end, prev->vm_pgoff, NULL);
1083 		} else					/* cases 2, 5, 7 */
1084 			err = vma_adjust(prev, prev->vm_start,
1085 				end, prev->vm_pgoff, NULL);
1086 		if (err)
1087 			return NULL;
1088 		khugepaged_enter_vma_merge(prev, vm_flags);
1089 		return prev;
1090 	}
1091 
1092 	/*
1093 	 * Can this new request be merged in front of next?
1094 	 */
1095 	if (next && end == next->vm_start &&
1096 			mpol_equal(policy, vma_policy(next)) &&
1097 			can_vma_merge_before(next, vm_flags,
1098 					anon_vma, file, pgoff+pglen)) {
1099 		if (prev && addr < prev->vm_end)	/* case 4 */
1100 			err = vma_adjust(prev, prev->vm_start,
1101 				addr, prev->vm_pgoff, NULL);
1102 		else					/* cases 3, 8 */
1103 			err = vma_adjust(area, addr, next->vm_end,
1104 				next->vm_pgoff - pglen, NULL);
1105 		if (err)
1106 			return NULL;
1107 		khugepaged_enter_vma_merge(area, vm_flags);
1108 		return area;
1109 	}
1110 
1111 	return NULL;
1112 }
1113 
1114 /*
1115  * Rough compatbility check to quickly see if it's even worth looking
1116  * at sharing an anon_vma.
1117  *
1118  * They need to have the same vm_file, and the flags can only differ
1119  * in things that mprotect may change.
1120  *
1121  * NOTE! The fact that we share an anon_vma doesn't _have_ to mean that
1122  * we can merge the two vma's. For example, we refuse to merge a vma if
1123  * there is a vm_ops->close() function, because that indicates that the
1124  * driver is doing some kind of reference counting. But that doesn't
1125  * really matter for the anon_vma sharing case.
1126  */
1127 static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *b)
1128 {
1129 	return a->vm_end == b->vm_start &&
1130 		mpol_equal(vma_policy(a), vma_policy(b)) &&
1131 		a->vm_file == b->vm_file &&
1132 		!((a->vm_flags ^ b->vm_flags) & ~(VM_READ|VM_WRITE|VM_EXEC|VM_SOFTDIRTY)) &&
1133 		b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT);
1134 }
1135 
1136 /*
1137  * Do some basic sanity checking to see if we can re-use the anon_vma
1138  * from 'old'. The 'a'/'b' vma's are in VM order - one of them will be
1139  * the same as 'old', the other will be the new one that is trying
1140  * to share the anon_vma.
1141  *
1142  * NOTE! This runs with mm_sem held for reading, so it is possible that
1143  * the anon_vma of 'old' is concurrently in the process of being set up
1144  * by another page fault trying to merge _that_. But that's ok: if it
1145  * is being set up, that automatically means that it will be a singleton
1146  * acceptable for merging, so we can do all of this optimistically. But
1147  * we do that ACCESS_ONCE() to make sure that we never re-load the pointer.
1148  *
1149  * IOW: that the "list_is_singular()" test on the anon_vma_chain only
1150  * matters for the 'stable anon_vma' case (ie the thing we want to avoid
1151  * is to return an anon_vma that is "complex" due to having gone through
1152  * a fork).
1153  *
1154  * We also make sure that the two vma's are compatible (adjacent,
1155  * and with the same memory policies). That's all stable, even with just
1156  * a read lock on the mm_sem.
1157  */
1158 static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old, struct vm_area_struct *a, struct vm_area_struct *b)
1159 {
1160 	if (anon_vma_compatible(a, b)) {
1161 		struct anon_vma *anon_vma = ACCESS_ONCE(old->anon_vma);
1162 
1163 		if (anon_vma && list_is_singular(&old->anon_vma_chain))
1164 			return anon_vma;
1165 	}
1166 	return NULL;
1167 }
1168 
1169 /*
1170  * find_mergeable_anon_vma is used by anon_vma_prepare, to check
1171  * neighbouring vmas for a suitable anon_vma, before it goes off
1172  * to allocate a new anon_vma.  It checks because a repetitive
1173  * sequence of mprotects and faults may otherwise lead to distinct
1174  * anon_vmas being allocated, preventing vma merge in subsequent
1175  * mprotect.
1176  */
1177 struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma)
1178 {
1179 	struct anon_vma *anon_vma;
1180 	struct vm_area_struct *near;
1181 
1182 	near = vma->vm_next;
1183 	if (!near)
1184 		goto try_prev;
1185 
1186 	anon_vma = reusable_anon_vma(near, vma, near);
1187 	if (anon_vma)
1188 		return anon_vma;
1189 try_prev:
1190 	near = vma->vm_prev;
1191 	if (!near)
1192 		goto none;
1193 
1194 	anon_vma = reusable_anon_vma(near, near, vma);
1195 	if (anon_vma)
1196 		return anon_vma;
1197 none:
1198 	/*
1199 	 * There's no absolute need to look only at touching neighbours:
1200 	 * we could search further afield for "compatible" anon_vmas.
1201 	 * But it would probably just be a waste of time searching,
1202 	 * or lead to too many vmas hanging off the same anon_vma.
1203 	 * We're trying to allow mprotect remerging later on,
1204 	 * not trying to minimize memory used for anon_vmas.
1205 	 */
1206 	return NULL;
1207 }
1208 
1209 #ifdef CONFIG_PROC_FS
1210 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
1211 						struct file *file, long pages)
1212 {
1213 	const unsigned long stack_flags
1214 		= VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
1215 
1216 	mm->total_vm += pages;
1217 
1218 	if (file) {
1219 		mm->shared_vm += pages;
1220 		if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
1221 			mm->exec_vm += pages;
1222 	} else if (flags & stack_flags)
1223 		mm->stack_vm += pages;
1224 }
1225 #endif /* CONFIG_PROC_FS */
1226 
1227 /*
1228  * If a hint addr is less than mmap_min_addr change hint to be as
1229  * low as possible but still greater than mmap_min_addr
1230  */
1231 static inline unsigned long round_hint_to_min(unsigned long hint)
1232 {
1233 	hint &= PAGE_MASK;
1234 	if (((void *)hint != NULL) &&
1235 	    (hint < mmap_min_addr))
1236 		return PAGE_ALIGN(mmap_min_addr);
1237 	return hint;
1238 }
1239 
1240 static inline int mlock_future_check(struct mm_struct *mm,
1241 				     unsigned long flags,
1242 				     unsigned long len)
1243 {
1244 	unsigned long locked, lock_limit;
1245 
1246 	/*  mlock MCL_FUTURE? */
1247 	if (flags & VM_LOCKED) {
1248 		locked = len >> PAGE_SHIFT;
1249 		locked += mm->locked_vm;
1250 		lock_limit = rlimit(RLIMIT_MEMLOCK);
1251 		lock_limit >>= PAGE_SHIFT;
1252 		if (locked > lock_limit && !capable(CAP_IPC_LOCK))
1253 			return -EAGAIN;
1254 	}
1255 	return 0;
1256 }
1257 
1258 /*
1259  * The caller must hold down_write(&current->mm->mmap_sem).
1260  */
1261 
1262 unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
1263 			unsigned long len, unsigned long prot,
1264 			unsigned long flags, unsigned long pgoff,
1265 			unsigned long *populate)
1266 {
1267 	struct mm_struct *mm = current->mm;
1268 	vm_flags_t vm_flags;
1269 
1270 	*populate = 0;
1271 
1272 	/*
1273 	 * Does the application expect PROT_READ to imply PROT_EXEC?
1274 	 *
1275 	 * (the exception is when the underlying filesystem is noexec
1276 	 *  mounted, in which case we dont add PROT_EXEC.)
1277 	 */
1278 	if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
1279 		if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
1280 			prot |= PROT_EXEC;
1281 
1282 	if (!len)
1283 		return -EINVAL;
1284 
1285 	if (!(flags & MAP_FIXED))
1286 		addr = round_hint_to_min(addr);
1287 
1288 	/* Careful about overflows.. */
1289 	len = PAGE_ALIGN(len);
1290 	if (!len)
1291 		return -ENOMEM;
1292 
1293 	/* offset overflow? */
1294 	if ((pgoff + (len >> PAGE_SHIFT)) < pgoff)
1295 		return -EOVERFLOW;
1296 
1297 	/* Too many mappings? */
1298 	if (mm->map_count > sysctl_max_map_count)
1299 		return -ENOMEM;
1300 
1301 	/* Obtain the address to map to. we verify (or select) it and ensure
1302 	 * that it represents a valid section of the address space.
1303 	 */
1304 	addr = get_unmapped_area(file, addr, len, pgoff, flags);
1305 	if (addr & ~PAGE_MASK)
1306 		return addr;
1307 
1308 	/* Do simple checking here so the lower-level routines won't have
1309 	 * to. we assume access permissions have been handled by the open
1310 	 * of the memory object, so we don't do any here.
1311 	 */
1312 	vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
1313 			mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
1314 
1315 	if (flags & MAP_LOCKED)
1316 		if (!can_do_mlock())
1317 			return -EPERM;
1318 
1319 	if (mlock_future_check(mm, vm_flags, len))
1320 		return -EAGAIN;
1321 
1322 	if (file) {
1323 		struct inode *inode = file_inode(file);
1324 
1325 		switch (flags & MAP_TYPE) {
1326 		case MAP_SHARED:
1327 			if ((prot&PROT_WRITE) && !(file->f_mode&FMODE_WRITE))
1328 				return -EACCES;
1329 
1330 			/*
1331 			 * Make sure we don't allow writing to an append-only
1332 			 * file..
1333 			 */
1334 			if (IS_APPEND(inode) && (file->f_mode & FMODE_WRITE))
1335 				return -EACCES;
1336 
1337 			/*
1338 			 * Make sure there are no mandatory locks on the file.
1339 			 */
1340 			if (locks_verify_locked(file))
1341 				return -EAGAIN;
1342 
1343 			vm_flags |= VM_SHARED | VM_MAYSHARE;
1344 			if (!(file->f_mode & FMODE_WRITE))
1345 				vm_flags &= ~(VM_MAYWRITE | VM_SHARED);
1346 
1347 			/* fall through */
1348 		case MAP_PRIVATE:
1349 			if (!(file->f_mode & FMODE_READ))
1350 				return -EACCES;
1351 			if (file->f_path.mnt->mnt_flags & MNT_NOEXEC) {
1352 				if (vm_flags & VM_EXEC)
1353 					return -EPERM;
1354 				vm_flags &= ~VM_MAYEXEC;
1355 			}
1356 
1357 			if (!file->f_op->mmap)
1358 				return -ENODEV;
1359 			if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
1360 				return -EINVAL;
1361 			break;
1362 
1363 		default:
1364 			return -EINVAL;
1365 		}
1366 	} else {
1367 		switch (flags & MAP_TYPE) {
1368 		case MAP_SHARED:
1369 			if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
1370 				return -EINVAL;
1371 			/*
1372 			 * Ignore pgoff.
1373 			 */
1374 			pgoff = 0;
1375 			vm_flags |= VM_SHARED | VM_MAYSHARE;
1376 			break;
1377 		case MAP_PRIVATE:
1378 			/*
1379 			 * Set pgoff according to addr for anon_vma.
1380 			 */
1381 			pgoff = addr >> PAGE_SHIFT;
1382 			break;
1383 		default:
1384 			return -EINVAL;
1385 		}
1386 	}
1387 
1388 	/*
1389 	 * Set 'VM_NORESERVE' if we should not account for the
1390 	 * memory use of this mapping.
1391 	 */
1392 	if (flags & MAP_NORESERVE) {
1393 		/* We honor MAP_NORESERVE if allowed to overcommit */
1394 		if (sysctl_overcommit_memory != OVERCOMMIT_NEVER)
1395 			vm_flags |= VM_NORESERVE;
1396 
1397 		/* hugetlb applies strict overcommit unless MAP_NORESERVE */
1398 		if (file && is_file_hugepages(file))
1399 			vm_flags |= VM_NORESERVE;
1400 	}
1401 
1402 	addr = mmap_region(file, addr, len, vm_flags, pgoff);
1403 	if (!IS_ERR_VALUE(addr) &&
1404 	    ((vm_flags & VM_LOCKED) ||
1405 	     (flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE))
1406 		*populate = len;
1407 	return addr;
1408 }
1409 
1410 SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
1411 		unsigned long, prot, unsigned long, flags,
1412 		unsigned long, fd, unsigned long, pgoff)
1413 {
1414 	struct file *file = NULL;
1415 	unsigned long retval = -EBADF;
1416 
1417 	if (!(flags & MAP_ANONYMOUS)) {
1418 		audit_mmap_fd(fd, flags);
1419 		file = fget(fd);
1420 		if (!file)
1421 			goto out;
1422 		if (is_file_hugepages(file))
1423 			len = ALIGN(len, huge_page_size(hstate_file(file)));
1424 		retval = -EINVAL;
1425 		if (unlikely(flags & MAP_HUGETLB && !is_file_hugepages(file)))
1426 			goto out_fput;
1427 	} else if (flags & MAP_HUGETLB) {
1428 		struct user_struct *user = NULL;
1429 		struct hstate *hs;
1430 
1431 		hs = hstate_sizelog((flags >> MAP_HUGE_SHIFT) & SHM_HUGE_MASK);
1432 		if (!hs)
1433 			return -EINVAL;
1434 
1435 		len = ALIGN(len, huge_page_size(hs));
1436 		/*
1437 		 * VM_NORESERVE is used because the reservations will be
1438 		 * taken when vm_ops->mmap() is called
1439 		 * A dummy user value is used because we are not locking
1440 		 * memory so no accounting is necessary
1441 		 */
1442 		file = hugetlb_file_setup(HUGETLB_ANON_FILE, len,
1443 				VM_NORESERVE,
1444 				&user, HUGETLB_ANONHUGE_INODE,
1445 				(flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK);
1446 		if (IS_ERR(file))
1447 			return PTR_ERR(file);
1448 	}
1449 
1450 	flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
1451 
1452 	retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff);
1453 out_fput:
1454 	if (file)
1455 		fput(file);
1456 out:
1457 	return retval;
1458 }
1459 
1460 #ifdef __ARCH_WANT_SYS_OLD_MMAP
1461 struct mmap_arg_struct {
1462 	unsigned long addr;
1463 	unsigned long len;
1464 	unsigned long prot;
1465 	unsigned long flags;
1466 	unsigned long fd;
1467 	unsigned long offset;
1468 };
1469 
1470 SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
1471 {
1472 	struct mmap_arg_struct a;
1473 
1474 	if (copy_from_user(&a, arg, sizeof(a)))
1475 		return -EFAULT;
1476 	if (a.offset & ~PAGE_MASK)
1477 		return -EINVAL;
1478 
1479 	return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
1480 			      a.offset >> PAGE_SHIFT);
1481 }
1482 #endif /* __ARCH_WANT_SYS_OLD_MMAP */
1483 
1484 /*
1485  * Some shared mappigns will want the pages marked read-only
1486  * to track write events. If so, we'll downgrade vm_page_prot
1487  * to the private version (using protection_map[] without the
1488  * VM_SHARED bit).
1489  */
1490 int vma_wants_writenotify(struct vm_area_struct *vma)
1491 {
1492 	vm_flags_t vm_flags = vma->vm_flags;
1493 
1494 	/* If it was private or non-writable, the write bit is already clear */
1495 	if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
1496 		return 0;
1497 
1498 	/* The backer wishes to know when pages are first written to? */
1499 	if (vma->vm_ops && vma->vm_ops->page_mkwrite)
1500 		return 1;
1501 
1502 	/* The open routine did something to the protections that pgprot_modify
1503 	 * won't preserve? */
1504 	if (pgprot_val(vma->vm_page_prot) !=
1505 	    pgprot_val(vm_pgprot_modify(vma->vm_page_prot, vm_flags)))
1506 		return 0;
1507 
1508 	/* Do we need to track softdirty? */
1509 	if (IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) && !(vm_flags & VM_SOFTDIRTY))
1510 		return 1;
1511 
1512 	/* Specialty mapping? */
1513 	if (vm_flags & VM_PFNMAP)
1514 		return 0;
1515 
1516 	/* Can the mapping track the dirty pages? */
1517 	return vma->vm_file && vma->vm_file->f_mapping &&
1518 		mapping_cap_account_dirty(vma->vm_file->f_mapping);
1519 }
1520 
1521 /*
1522  * We account for memory if it's a private writeable mapping,
1523  * not hugepages and VM_NORESERVE wasn't set.
1524  */
1525 static inline int accountable_mapping(struct file *file, vm_flags_t vm_flags)
1526 {
1527 	/*
1528 	 * hugetlb has its own accounting separate from the core VM
1529 	 * VM_HUGETLB may not be set yet so we cannot check for that flag.
1530 	 */
1531 	if (file && is_file_hugepages(file))
1532 		return 0;
1533 
1534 	return (vm_flags & (VM_NORESERVE | VM_SHARED | VM_WRITE)) == VM_WRITE;
1535 }
1536 
1537 unsigned long mmap_region(struct file *file, unsigned long addr,
1538 		unsigned long len, vm_flags_t vm_flags, unsigned long pgoff)
1539 {
1540 	struct mm_struct *mm = current->mm;
1541 	struct vm_area_struct *vma, *prev;
1542 	int error;
1543 	struct rb_node **rb_link, *rb_parent;
1544 	unsigned long charged = 0;
1545 
1546 	/* Check against address space limit. */
1547 	if (!may_expand_vm(mm, len >> PAGE_SHIFT)) {
1548 		unsigned long nr_pages;
1549 
1550 		/*
1551 		 * MAP_FIXED may remove pages of mappings that intersects with
1552 		 * requested mapping. Account for the pages it would unmap.
1553 		 */
1554 		if (!(vm_flags & MAP_FIXED))
1555 			return -ENOMEM;
1556 
1557 		nr_pages = count_vma_pages_range(mm, addr, addr + len);
1558 
1559 		if (!may_expand_vm(mm, (len >> PAGE_SHIFT) - nr_pages))
1560 			return -ENOMEM;
1561 	}
1562 
1563 	/* Clear old maps */
1564 	error = -ENOMEM;
1565 munmap_back:
1566 	if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
1567 		if (do_munmap(mm, addr, len))
1568 			return -ENOMEM;
1569 		goto munmap_back;
1570 	}
1571 
1572 	/*
1573 	 * Private writable mapping: check memory availability
1574 	 */
1575 	if (accountable_mapping(file, vm_flags)) {
1576 		charged = len >> PAGE_SHIFT;
1577 		if (security_vm_enough_memory_mm(mm, charged))
1578 			return -ENOMEM;
1579 		vm_flags |= VM_ACCOUNT;
1580 	}
1581 
1582 	/*
1583 	 * Can we just expand an old mapping?
1584 	 */
1585 	vma = vma_merge(mm, prev, addr, addr + len, vm_flags, NULL, file, pgoff, NULL);
1586 	if (vma)
1587 		goto out;
1588 
1589 	/*
1590 	 * Determine the object being mapped and call the appropriate
1591 	 * specific mapper. the address has already been validated, but
1592 	 * not unmapped, but the maps are removed from the list.
1593 	 */
1594 	vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
1595 	if (!vma) {
1596 		error = -ENOMEM;
1597 		goto unacct_error;
1598 	}
1599 
1600 	vma->vm_mm = mm;
1601 	vma->vm_start = addr;
1602 	vma->vm_end = addr + len;
1603 	vma->vm_flags = vm_flags;
1604 	vma->vm_page_prot = vm_get_page_prot(vm_flags);
1605 	vma->vm_pgoff = pgoff;
1606 	INIT_LIST_HEAD(&vma->anon_vma_chain);
1607 
1608 	if (file) {
1609 		if (vm_flags & VM_DENYWRITE) {
1610 			error = deny_write_access(file);
1611 			if (error)
1612 				goto free_vma;
1613 		}
1614 		if (vm_flags & VM_SHARED) {
1615 			error = mapping_map_writable(file->f_mapping);
1616 			if (error)
1617 				goto allow_write_and_free_vma;
1618 		}
1619 
1620 		/* ->mmap() can change vma->vm_file, but must guarantee that
1621 		 * vma_link() below can deny write-access if VM_DENYWRITE is set
1622 		 * and map writably if VM_SHARED is set. This usually means the
1623 		 * new file must not have been exposed to user-space, yet.
1624 		 */
1625 		vma->vm_file = get_file(file);
1626 		error = file->f_op->mmap(file, vma);
1627 		if (error)
1628 			goto unmap_and_free_vma;
1629 
1630 		/* Can addr have changed??
1631 		 *
1632 		 * Answer: Yes, several device drivers can do it in their
1633 		 *         f_op->mmap method. -DaveM
1634 		 * Bug: If addr is changed, prev, rb_link, rb_parent should
1635 		 *      be updated for vma_link()
1636 		 */
1637 		WARN_ON_ONCE(addr != vma->vm_start);
1638 
1639 		addr = vma->vm_start;
1640 		vm_flags = vma->vm_flags;
1641 	} else if (vm_flags & VM_SHARED) {
1642 		error = shmem_zero_setup(vma);
1643 		if (error)
1644 			goto free_vma;
1645 	}
1646 
1647 	vma_link(mm, vma, prev, rb_link, rb_parent);
1648 	/* Once vma denies write, undo our temporary denial count */
1649 	if (file) {
1650 		if (vm_flags & VM_SHARED)
1651 			mapping_unmap_writable(file->f_mapping);
1652 		if (vm_flags & VM_DENYWRITE)
1653 			allow_write_access(file);
1654 	}
1655 	file = vma->vm_file;
1656 out:
1657 	perf_event_mmap(vma);
1658 
1659 	vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
1660 	if (vm_flags & VM_LOCKED) {
1661 		if (!((vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) ||
1662 					vma == get_gate_vma(current->mm)))
1663 			mm->locked_vm += (len >> PAGE_SHIFT);
1664 		else
1665 			vma->vm_flags &= ~VM_LOCKED;
1666 	}
1667 
1668 	if (file)
1669 		uprobe_mmap(vma);
1670 
1671 	/*
1672 	 * New (or expanded) vma always get soft dirty status.
1673 	 * Otherwise user-space soft-dirty page tracker won't
1674 	 * be able to distinguish situation when vma area unmapped,
1675 	 * then new mapped in-place (which must be aimed as
1676 	 * a completely new data area).
1677 	 */
1678 	vma->vm_flags |= VM_SOFTDIRTY;
1679 
1680 	vma_set_page_prot(vma);
1681 
1682 	return addr;
1683 
1684 unmap_and_free_vma:
1685 	vma->vm_file = NULL;
1686 	fput(file);
1687 
1688 	/* Undo any partial mapping done by a device driver. */
1689 	unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
1690 	charged = 0;
1691 	if (vm_flags & VM_SHARED)
1692 		mapping_unmap_writable(file->f_mapping);
1693 allow_write_and_free_vma:
1694 	if (vm_flags & VM_DENYWRITE)
1695 		allow_write_access(file);
1696 free_vma:
1697 	kmem_cache_free(vm_area_cachep, vma);
1698 unacct_error:
1699 	if (charged)
1700 		vm_unacct_memory(charged);
1701 	return error;
1702 }
1703 
1704 unsigned long unmapped_area(struct vm_unmapped_area_info *info)
1705 {
1706 	/*
1707 	 * We implement the search by looking for an rbtree node that
1708 	 * immediately follows a suitable gap. That is,
1709 	 * - gap_start = vma->vm_prev->vm_end <= info->high_limit - length;
1710 	 * - gap_end   = vma->vm_start        >= info->low_limit  + length;
1711 	 * - gap_end - gap_start >= length
1712 	 */
1713 
1714 	struct mm_struct *mm = current->mm;
1715 	struct vm_area_struct *vma;
1716 	unsigned long length, low_limit, high_limit, gap_start, gap_end;
1717 
1718 	/* Adjust search length to account for worst case alignment overhead */
1719 	length = info->length + info->align_mask;
1720 	if (length < info->length)
1721 		return -ENOMEM;
1722 
1723 	/* Adjust search limits by the desired length */
1724 	if (info->high_limit < length)
1725 		return -ENOMEM;
1726 	high_limit = info->high_limit - length;
1727 
1728 	if (info->low_limit > high_limit)
1729 		return -ENOMEM;
1730 	low_limit = info->low_limit + length;
1731 
1732 	/* Check if rbtree root looks promising */
1733 	if (RB_EMPTY_ROOT(&mm->mm_rb))
1734 		goto check_highest;
1735 	vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb);
1736 	if (vma->rb_subtree_gap < length)
1737 		goto check_highest;
1738 
1739 	while (true) {
1740 		/* Visit left subtree if it looks promising */
1741 		gap_end = vma->vm_start;
1742 		if (gap_end >= low_limit && vma->vm_rb.rb_left) {
1743 			struct vm_area_struct *left =
1744 				rb_entry(vma->vm_rb.rb_left,
1745 					 struct vm_area_struct, vm_rb);
1746 			if (left->rb_subtree_gap >= length) {
1747 				vma = left;
1748 				continue;
1749 			}
1750 		}
1751 
1752 		gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
1753 check_current:
1754 		/* Check if current node has a suitable gap */
1755 		if (gap_start > high_limit)
1756 			return -ENOMEM;
1757 		if (gap_end >= low_limit && gap_end - gap_start >= length)
1758 			goto found;
1759 
1760 		/* Visit right subtree if it looks promising */
1761 		if (vma->vm_rb.rb_right) {
1762 			struct vm_area_struct *right =
1763 				rb_entry(vma->vm_rb.rb_right,
1764 					 struct vm_area_struct, vm_rb);
1765 			if (right->rb_subtree_gap >= length) {
1766 				vma = right;
1767 				continue;
1768 			}
1769 		}
1770 
1771 		/* Go back up the rbtree to find next candidate node */
1772 		while (true) {
1773 			struct rb_node *prev = &vma->vm_rb;
1774 			if (!rb_parent(prev))
1775 				goto check_highest;
1776 			vma = rb_entry(rb_parent(prev),
1777 				       struct vm_area_struct, vm_rb);
1778 			if (prev == vma->vm_rb.rb_left) {
1779 				gap_start = vma->vm_prev->vm_end;
1780 				gap_end = vma->vm_start;
1781 				goto check_current;
1782 			}
1783 		}
1784 	}
1785 
1786 check_highest:
1787 	/* Check highest gap, which does not precede any rbtree node */
1788 	gap_start = mm->highest_vm_end;
1789 	gap_end = ULONG_MAX;  /* Only for VM_BUG_ON below */
1790 	if (gap_start > high_limit)
1791 		return -ENOMEM;
1792 
1793 found:
1794 	/* We found a suitable gap. Clip it with the original low_limit. */
1795 	if (gap_start < info->low_limit)
1796 		gap_start = info->low_limit;
1797 
1798 	/* Adjust gap address to the desired alignment */
1799 	gap_start += (info->align_offset - gap_start) & info->align_mask;
1800 
1801 	VM_BUG_ON(gap_start + info->length > info->high_limit);
1802 	VM_BUG_ON(gap_start + info->length > gap_end);
1803 	return gap_start;
1804 }
1805 
1806 unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
1807 {
1808 	struct mm_struct *mm = current->mm;
1809 	struct vm_area_struct *vma;
1810 	unsigned long length, low_limit, high_limit, gap_start, gap_end;
1811 
1812 	/* Adjust search length to account for worst case alignment overhead */
1813 	length = info->length + info->align_mask;
1814 	if (length < info->length)
1815 		return -ENOMEM;
1816 
1817 	/*
1818 	 * Adjust search limits by the desired length.
1819 	 * See implementation comment at top of unmapped_area().
1820 	 */
1821 	gap_end = info->high_limit;
1822 	if (gap_end < length)
1823 		return -ENOMEM;
1824 	high_limit = gap_end - length;
1825 
1826 	if (info->low_limit > high_limit)
1827 		return -ENOMEM;
1828 	low_limit = info->low_limit + length;
1829 
1830 	/* Check highest gap, which does not precede any rbtree node */
1831 	gap_start = mm->highest_vm_end;
1832 	if (gap_start <= high_limit)
1833 		goto found_highest;
1834 
1835 	/* Check if rbtree root looks promising */
1836 	if (RB_EMPTY_ROOT(&mm->mm_rb))
1837 		return -ENOMEM;
1838 	vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb);
1839 	if (vma->rb_subtree_gap < length)
1840 		return -ENOMEM;
1841 
1842 	while (true) {
1843 		/* Visit right subtree if it looks promising */
1844 		gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
1845 		if (gap_start <= high_limit && vma->vm_rb.rb_right) {
1846 			struct vm_area_struct *right =
1847 				rb_entry(vma->vm_rb.rb_right,
1848 					 struct vm_area_struct, vm_rb);
1849 			if (right->rb_subtree_gap >= length) {
1850 				vma = right;
1851 				continue;
1852 			}
1853 		}
1854 
1855 check_current:
1856 		/* Check if current node has a suitable gap */
1857 		gap_end = vma->vm_start;
1858 		if (gap_end < low_limit)
1859 			return -ENOMEM;
1860 		if (gap_start <= high_limit && gap_end - gap_start >= length)
1861 			goto found;
1862 
1863 		/* Visit left subtree if it looks promising */
1864 		if (vma->vm_rb.rb_left) {
1865 			struct vm_area_struct *left =
1866 				rb_entry(vma->vm_rb.rb_left,
1867 					 struct vm_area_struct, vm_rb);
1868 			if (left->rb_subtree_gap >= length) {
1869 				vma = left;
1870 				continue;
1871 			}
1872 		}
1873 
1874 		/* Go back up the rbtree to find next candidate node */
1875 		while (true) {
1876 			struct rb_node *prev = &vma->vm_rb;
1877 			if (!rb_parent(prev))
1878 				return -ENOMEM;
1879 			vma = rb_entry(rb_parent(prev),
1880 				       struct vm_area_struct, vm_rb);
1881 			if (prev == vma->vm_rb.rb_right) {
1882 				gap_start = vma->vm_prev ?
1883 					vma->vm_prev->vm_end : 0;
1884 				goto check_current;
1885 			}
1886 		}
1887 	}
1888 
1889 found:
1890 	/* We found a suitable gap. Clip it with the original high_limit. */
1891 	if (gap_end > info->high_limit)
1892 		gap_end = info->high_limit;
1893 
1894 found_highest:
1895 	/* Compute highest gap address at the desired alignment */
1896 	gap_end -= info->length;
1897 	gap_end -= (gap_end - info->align_offset) & info->align_mask;
1898 
1899 	VM_BUG_ON(gap_end < info->low_limit);
1900 	VM_BUG_ON(gap_end < gap_start);
1901 	return gap_end;
1902 }
1903 
1904 /* Get an address range which is currently unmapped.
1905  * For shmat() with addr=0.
1906  *
1907  * Ugly calling convention alert:
1908  * Return value with the low bits set means error value,
1909  * ie
1910  *	if (ret & ~PAGE_MASK)
1911  *		error = ret;
1912  *
1913  * This function "knows" that -ENOMEM has the bits set.
1914  */
1915 #ifndef HAVE_ARCH_UNMAPPED_AREA
1916 unsigned long
1917 arch_get_unmapped_area(struct file *filp, unsigned long addr,
1918 		unsigned long len, unsigned long pgoff, unsigned long flags)
1919 {
1920 	struct mm_struct *mm = current->mm;
1921 	struct vm_area_struct *vma;
1922 	struct vm_unmapped_area_info info;
1923 
1924 	if (len > TASK_SIZE - mmap_min_addr)
1925 		return -ENOMEM;
1926 
1927 	if (flags & MAP_FIXED)
1928 		return addr;
1929 
1930 	if (addr) {
1931 		addr = PAGE_ALIGN(addr);
1932 		vma = find_vma(mm, addr);
1933 		if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
1934 		    (!vma || addr + len <= vma->vm_start))
1935 			return addr;
1936 	}
1937 
1938 	info.flags = 0;
1939 	info.length = len;
1940 	info.low_limit = mm->mmap_base;
1941 	info.high_limit = TASK_SIZE;
1942 	info.align_mask = 0;
1943 	return vm_unmapped_area(&info);
1944 }
1945 #endif
1946 
1947 /*
1948  * This mmap-allocator allocates new areas top-down from below the
1949  * stack's low limit (the base):
1950  */
1951 #ifndef HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1952 unsigned long
1953 arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
1954 			  const unsigned long len, const unsigned long pgoff,
1955 			  const unsigned long flags)
1956 {
1957 	struct vm_area_struct *vma;
1958 	struct mm_struct *mm = current->mm;
1959 	unsigned long addr = addr0;
1960 	struct vm_unmapped_area_info info;
1961 
1962 	/* requested length too big for entire address space */
1963 	if (len > TASK_SIZE - mmap_min_addr)
1964 		return -ENOMEM;
1965 
1966 	if (flags & MAP_FIXED)
1967 		return addr;
1968 
1969 	/* requesting a specific address */
1970 	if (addr) {
1971 		addr = PAGE_ALIGN(addr);
1972 		vma = find_vma(mm, addr);
1973 		if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
1974 				(!vma || addr + len <= vma->vm_start))
1975 			return addr;
1976 	}
1977 
1978 	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
1979 	info.length = len;
1980 	info.low_limit = max(PAGE_SIZE, mmap_min_addr);
1981 	info.high_limit = mm->mmap_base;
1982 	info.align_mask = 0;
1983 	addr = vm_unmapped_area(&info);
1984 
1985 	/*
1986 	 * A failed mmap() very likely causes application failure,
1987 	 * so fall back to the bottom-up function here. This scenario
1988 	 * can happen with large stack limits and large mmap()
1989 	 * allocations.
1990 	 */
1991 	if (addr & ~PAGE_MASK) {
1992 		VM_BUG_ON(addr != -ENOMEM);
1993 		info.flags = 0;
1994 		info.low_limit = TASK_UNMAPPED_BASE;
1995 		info.high_limit = TASK_SIZE;
1996 		addr = vm_unmapped_area(&info);
1997 	}
1998 
1999 	return addr;
2000 }
2001 #endif
2002 
2003 unsigned long
2004 get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
2005 		unsigned long pgoff, unsigned long flags)
2006 {
2007 	unsigned long (*get_area)(struct file *, unsigned long,
2008 				  unsigned long, unsigned long, unsigned long);
2009 
2010 	unsigned long error = arch_mmap_check(addr, len, flags);
2011 	if (error)
2012 		return error;
2013 
2014 	/* Careful about overflows.. */
2015 	if (len > TASK_SIZE)
2016 		return -ENOMEM;
2017 
2018 	get_area = current->mm->get_unmapped_area;
2019 	if (file && file->f_op->get_unmapped_area)
2020 		get_area = file->f_op->get_unmapped_area;
2021 	addr = get_area(file, addr, len, pgoff, flags);
2022 	if (IS_ERR_VALUE(addr))
2023 		return addr;
2024 
2025 	if (addr > TASK_SIZE - len)
2026 		return -ENOMEM;
2027 	if (addr & ~PAGE_MASK)
2028 		return -EINVAL;
2029 
2030 	addr = arch_rebalance_pgtables(addr, len);
2031 	error = security_mmap_addr(addr);
2032 	return error ? error : addr;
2033 }
2034 
2035 EXPORT_SYMBOL(get_unmapped_area);
2036 
2037 /* Look up the first VMA which satisfies  addr < vm_end,  NULL if none. */
2038 struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
2039 {
2040 	struct rb_node *rb_node;
2041 	struct vm_area_struct *vma;
2042 
2043 	/* Check the cache first. */
2044 	vma = vmacache_find(mm, addr);
2045 	if (likely(vma))
2046 		return vma;
2047 
2048 	rb_node = mm->mm_rb.rb_node;
2049 	vma = NULL;
2050 
2051 	while (rb_node) {
2052 		struct vm_area_struct *tmp;
2053 
2054 		tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb);
2055 
2056 		if (tmp->vm_end > addr) {
2057 			vma = tmp;
2058 			if (tmp->vm_start <= addr)
2059 				break;
2060 			rb_node = rb_node->rb_left;
2061 		} else
2062 			rb_node = rb_node->rb_right;
2063 	}
2064 
2065 	if (vma)
2066 		vmacache_update(addr, vma);
2067 	return vma;
2068 }
2069 
2070 EXPORT_SYMBOL(find_vma);
2071 
2072 /*
2073  * Same as find_vma, but also return a pointer to the previous VMA in *pprev.
2074  */
2075 struct vm_area_struct *
2076 find_vma_prev(struct mm_struct *mm, unsigned long addr,
2077 			struct vm_area_struct **pprev)
2078 {
2079 	struct vm_area_struct *vma;
2080 
2081 	vma = find_vma(mm, addr);
2082 	if (vma) {
2083 		*pprev = vma->vm_prev;
2084 	} else {
2085 		struct rb_node *rb_node = mm->mm_rb.rb_node;
2086 		*pprev = NULL;
2087 		while (rb_node) {
2088 			*pprev = rb_entry(rb_node, struct vm_area_struct, vm_rb);
2089 			rb_node = rb_node->rb_right;
2090 		}
2091 	}
2092 	return vma;
2093 }
2094 
2095 /*
2096  * Verify that the stack growth is acceptable and
2097  * update accounting. This is shared with both the
2098  * grow-up and grow-down cases.
2099  */
2100 static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, unsigned long grow)
2101 {
2102 	struct mm_struct *mm = vma->vm_mm;
2103 	struct rlimit *rlim = current->signal->rlim;
2104 	unsigned long new_start, actual_size;
2105 
2106 	/* address space limit tests */
2107 	if (!may_expand_vm(mm, grow))
2108 		return -ENOMEM;
2109 
2110 	/* Stack limit test */
2111 	actual_size = size;
2112 	if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN)))
2113 		actual_size -= PAGE_SIZE;
2114 	if (actual_size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
2115 		return -ENOMEM;
2116 
2117 	/* mlock limit tests */
2118 	if (vma->vm_flags & VM_LOCKED) {
2119 		unsigned long locked;
2120 		unsigned long limit;
2121 		locked = mm->locked_vm + grow;
2122 		limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
2123 		limit >>= PAGE_SHIFT;
2124 		if (locked > limit && !capable(CAP_IPC_LOCK))
2125 			return -ENOMEM;
2126 	}
2127 
2128 	/* Check to ensure the stack will not grow into a hugetlb-only region */
2129 	new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start :
2130 			vma->vm_end - size;
2131 	if (is_hugepage_only_range(vma->vm_mm, new_start, size))
2132 		return -EFAULT;
2133 
2134 	/*
2135 	 * Overcommit..  This must be the final test, as it will
2136 	 * update security statistics.
2137 	 */
2138 	if (security_vm_enough_memory_mm(mm, grow))
2139 		return -ENOMEM;
2140 
2141 	/* Ok, everything looks good - let it rip */
2142 	if (vma->vm_flags & VM_LOCKED)
2143 		mm->locked_vm += grow;
2144 	vm_stat_account(mm, vma->vm_flags, vma->vm_file, grow);
2145 	return 0;
2146 }
2147 
2148 #if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
2149 /*
2150  * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
2151  * vma is the last one with address > vma->vm_end.  Have to extend vma.
2152  */
2153 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
2154 {
2155 	int error;
2156 
2157 	if (!(vma->vm_flags & VM_GROWSUP))
2158 		return -EFAULT;
2159 
2160 	/*
2161 	 * We must make sure the anon_vma is allocated
2162 	 * so that the anon_vma locking is not a noop.
2163 	 */
2164 	if (unlikely(anon_vma_prepare(vma)))
2165 		return -ENOMEM;
2166 	vma_lock_anon_vma(vma);
2167 
2168 	/*
2169 	 * vma->vm_start/vm_end cannot change under us because the caller
2170 	 * is required to hold the mmap_sem in read mode.  We need the
2171 	 * anon_vma lock to serialize against concurrent expand_stacks.
2172 	 * Also guard against wrapping around to address 0.
2173 	 */
2174 	if (address < PAGE_ALIGN(address+4))
2175 		address = PAGE_ALIGN(address+4);
2176 	else {
2177 		vma_unlock_anon_vma(vma);
2178 		return -ENOMEM;
2179 	}
2180 	error = 0;
2181 
2182 	/* Somebody else might have raced and expanded it already */
2183 	if (address > vma->vm_end) {
2184 		unsigned long size, grow;
2185 
2186 		size = address - vma->vm_start;
2187 		grow = (address - vma->vm_end) >> PAGE_SHIFT;
2188 
2189 		error = -ENOMEM;
2190 		if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) {
2191 			error = acct_stack_growth(vma, size, grow);
2192 			if (!error) {
2193 				/*
2194 				 * vma_gap_update() doesn't support concurrent
2195 				 * updates, but we only hold a shared mmap_sem
2196 				 * lock here, so we need to protect against
2197 				 * concurrent vma expansions.
2198 				 * vma_lock_anon_vma() doesn't help here, as
2199 				 * we don't guarantee that all growable vmas
2200 				 * in a mm share the same root anon vma.
2201 				 * So, we reuse mm->page_table_lock to guard
2202 				 * against concurrent vma expansions.
2203 				 */
2204 				spin_lock(&vma->vm_mm->page_table_lock);
2205 				anon_vma_interval_tree_pre_update_vma(vma);
2206 				vma->vm_end = address;
2207 				anon_vma_interval_tree_post_update_vma(vma);
2208 				if (vma->vm_next)
2209 					vma_gap_update(vma->vm_next);
2210 				else
2211 					vma->vm_mm->highest_vm_end = address;
2212 				spin_unlock(&vma->vm_mm->page_table_lock);
2213 
2214 				perf_event_mmap(vma);
2215 			}
2216 		}
2217 	}
2218 	vma_unlock_anon_vma(vma);
2219 	khugepaged_enter_vma_merge(vma, vma->vm_flags);
2220 	validate_mm(vma->vm_mm);
2221 	return error;
2222 }
2223 #endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */
2224 
2225 /*
2226  * vma is the first one with address < vma->vm_start.  Have to extend vma.
2227  */
2228 int expand_downwards(struct vm_area_struct *vma,
2229 				   unsigned long address)
2230 {
2231 	int error;
2232 
2233 	/*
2234 	 * We must make sure the anon_vma is allocated
2235 	 * so that the anon_vma locking is not a noop.
2236 	 */
2237 	if (unlikely(anon_vma_prepare(vma)))
2238 		return -ENOMEM;
2239 
2240 	address &= PAGE_MASK;
2241 	error = security_mmap_addr(address);
2242 	if (error)
2243 		return error;
2244 
2245 	vma_lock_anon_vma(vma);
2246 
2247 	/*
2248 	 * vma->vm_start/vm_end cannot change under us because the caller
2249 	 * is required to hold the mmap_sem in read mode.  We need the
2250 	 * anon_vma lock to serialize against concurrent expand_stacks.
2251 	 */
2252 
2253 	/* Somebody else might have raced and expanded it already */
2254 	if (address < vma->vm_start) {
2255 		unsigned long size, grow;
2256 
2257 		size = vma->vm_end - address;
2258 		grow = (vma->vm_start - address) >> PAGE_SHIFT;
2259 
2260 		error = -ENOMEM;
2261 		if (grow <= vma->vm_pgoff) {
2262 			error = acct_stack_growth(vma, size, grow);
2263 			if (!error) {
2264 				/*
2265 				 * vma_gap_update() doesn't support concurrent
2266 				 * updates, but we only hold a shared mmap_sem
2267 				 * lock here, so we need to protect against
2268 				 * concurrent vma expansions.
2269 				 * vma_lock_anon_vma() doesn't help here, as
2270 				 * we don't guarantee that all growable vmas
2271 				 * in a mm share the same root anon vma.
2272 				 * So, we reuse mm->page_table_lock to guard
2273 				 * against concurrent vma expansions.
2274 				 */
2275 				spin_lock(&vma->vm_mm->page_table_lock);
2276 				anon_vma_interval_tree_pre_update_vma(vma);
2277 				vma->vm_start = address;
2278 				vma->vm_pgoff -= grow;
2279 				anon_vma_interval_tree_post_update_vma(vma);
2280 				vma_gap_update(vma);
2281 				spin_unlock(&vma->vm_mm->page_table_lock);
2282 
2283 				perf_event_mmap(vma);
2284 			}
2285 		}
2286 	}
2287 	vma_unlock_anon_vma(vma);
2288 	khugepaged_enter_vma_merge(vma, vma->vm_flags);
2289 	validate_mm(vma->vm_mm);
2290 	return error;
2291 }
2292 
2293 /*
2294  * Note how expand_stack() refuses to expand the stack all the way to
2295  * abut the next virtual mapping, *unless* that mapping itself is also
2296  * a stack mapping. We want to leave room for a guard page, after all
2297  * (the guard page itself is not added here, that is done by the
2298  * actual page faulting logic)
2299  *
2300  * This matches the behavior of the guard page logic (see mm/memory.c:
2301  * check_stack_guard_page()), which only allows the guard page to be
2302  * removed under these circumstances.
2303  */
2304 #ifdef CONFIG_STACK_GROWSUP
2305 int expand_stack(struct vm_area_struct *vma, unsigned long address)
2306 {
2307 	struct vm_area_struct *next;
2308 
2309 	address &= PAGE_MASK;
2310 	next = vma->vm_next;
2311 	if (next && next->vm_start == address + PAGE_SIZE) {
2312 		if (!(next->vm_flags & VM_GROWSUP))
2313 			return -ENOMEM;
2314 	}
2315 	return expand_upwards(vma, address);
2316 }
2317 
2318 struct vm_area_struct *
2319 find_extend_vma(struct mm_struct *mm, unsigned long addr)
2320 {
2321 	struct vm_area_struct *vma, *prev;
2322 
2323 	addr &= PAGE_MASK;
2324 	vma = find_vma_prev(mm, addr, &prev);
2325 	if (vma && (vma->vm_start <= addr))
2326 		return vma;
2327 	if (!prev || expand_stack(prev, addr))
2328 		return NULL;
2329 	if (prev->vm_flags & VM_LOCKED)
2330 		__mlock_vma_pages_range(prev, addr, prev->vm_end, NULL);
2331 	return prev;
2332 }
2333 #else
2334 int expand_stack(struct vm_area_struct *vma, unsigned long address)
2335 {
2336 	struct vm_area_struct *prev;
2337 
2338 	address &= PAGE_MASK;
2339 	prev = vma->vm_prev;
2340 	if (prev && prev->vm_end == address) {
2341 		if (!(prev->vm_flags & VM_GROWSDOWN))
2342 			return -ENOMEM;
2343 	}
2344 	return expand_downwards(vma, address);
2345 }
2346 
2347 struct vm_area_struct *
2348 find_extend_vma(struct mm_struct *mm, unsigned long addr)
2349 {
2350 	struct vm_area_struct *vma;
2351 	unsigned long start;
2352 
2353 	addr &= PAGE_MASK;
2354 	vma = find_vma(mm, addr);
2355 	if (!vma)
2356 		return NULL;
2357 	if (vma->vm_start <= addr)
2358 		return vma;
2359 	if (!(vma->vm_flags & VM_GROWSDOWN))
2360 		return NULL;
2361 	start = vma->vm_start;
2362 	if (expand_stack(vma, addr))
2363 		return NULL;
2364 	if (vma->vm_flags & VM_LOCKED)
2365 		__mlock_vma_pages_range(vma, addr, start, NULL);
2366 	return vma;
2367 }
2368 #endif
2369 
2370 EXPORT_SYMBOL_GPL(find_extend_vma);
2371 
2372 /*
2373  * Ok - we have the memory areas we should free on the vma list,
2374  * so release them, and do the vma updates.
2375  *
2376  * Called with the mm semaphore held.
2377  */
2378 static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
2379 {
2380 	unsigned long nr_accounted = 0;
2381 
2382 	/* Update high watermark before we lower total_vm */
2383 	update_hiwater_vm(mm);
2384 	do {
2385 		long nrpages = vma_pages(vma);
2386 
2387 		if (vma->vm_flags & VM_ACCOUNT)
2388 			nr_accounted += nrpages;
2389 		vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
2390 		vma = remove_vma(vma);
2391 	} while (vma);
2392 	vm_unacct_memory(nr_accounted);
2393 	validate_mm(mm);
2394 }
2395 
2396 /*
2397  * Get rid of page table information in the indicated region.
2398  *
2399  * Called with the mm semaphore held.
2400  */
2401 static void unmap_region(struct mm_struct *mm,
2402 		struct vm_area_struct *vma, struct vm_area_struct *prev,
2403 		unsigned long start, unsigned long end)
2404 {
2405 	struct vm_area_struct *next = prev ? prev->vm_next : mm->mmap;
2406 	struct mmu_gather tlb;
2407 
2408 	lru_add_drain();
2409 	tlb_gather_mmu(&tlb, mm, start, end);
2410 	update_hiwater_rss(mm);
2411 	unmap_vmas(&tlb, vma, start, end);
2412 	free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
2413 				 next ? next->vm_start : USER_PGTABLES_CEILING);
2414 	tlb_finish_mmu(&tlb, start, end);
2415 }
2416 
2417 /*
2418  * Create a list of vma's touched by the unmap, removing them from the mm's
2419  * vma list as we go..
2420  */
2421 static void
2422 detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
2423 	struct vm_area_struct *prev, unsigned long end)
2424 {
2425 	struct vm_area_struct **insertion_point;
2426 	struct vm_area_struct *tail_vma = NULL;
2427 
2428 	insertion_point = (prev ? &prev->vm_next : &mm->mmap);
2429 	vma->vm_prev = NULL;
2430 	do {
2431 		vma_rb_erase(vma, &mm->mm_rb);
2432 		mm->map_count--;
2433 		tail_vma = vma;
2434 		vma = vma->vm_next;
2435 	} while (vma && vma->vm_start < end);
2436 	*insertion_point = vma;
2437 	if (vma) {
2438 		vma->vm_prev = prev;
2439 		vma_gap_update(vma);
2440 	} else
2441 		mm->highest_vm_end = prev ? prev->vm_end : 0;
2442 	tail_vma->vm_next = NULL;
2443 
2444 	/* Kill the cache */
2445 	vmacache_invalidate(mm);
2446 }
2447 
2448 /*
2449  * __split_vma() bypasses sysctl_max_map_count checking.  We use this on the
2450  * munmap path where it doesn't make sense to fail.
2451  */
2452 static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
2453 	      unsigned long addr, int new_below)
2454 {
2455 	struct vm_area_struct *new;
2456 	int err = -ENOMEM;
2457 
2458 	if (is_vm_hugetlb_page(vma) && (addr &
2459 					~(huge_page_mask(hstate_vma(vma)))))
2460 		return -EINVAL;
2461 
2462 	new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
2463 	if (!new)
2464 		goto out_err;
2465 
2466 	/* most fields are the same, copy all, and then fixup */
2467 	*new = *vma;
2468 
2469 	INIT_LIST_HEAD(&new->anon_vma_chain);
2470 
2471 	if (new_below)
2472 		new->vm_end = addr;
2473 	else {
2474 		new->vm_start = addr;
2475 		new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
2476 	}
2477 
2478 	err = vma_dup_policy(vma, new);
2479 	if (err)
2480 		goto out_free_vma;
2481 
2482 	err = anon_vma_clone(new, vma);
2483 	if (err)
2484 		goto out_free_mpol;
2485 
2486 	if (new->vm_file)
2487 		get_file(new->vm_file);
2488 
2489 	if (new->vm_ops && new->vm_ops->open)
2490 		new->vm_ops->open(new);
2491 
2492 	if (new_below)
2493 		err = vma_adjust(vma, addr, vma->vm_end, vma->vm_pgoff +
2494 			((addr - new->vm_start) >> PAGE_SHIFT), new);
2495 	else
2496 		err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
2497 
2498 	/* Success. */
2499 	if (!err)
2500 		return 0;
2501 
2502 	/* Clean everything up if vma_adjust failed. */
2503 	if (new->vm_ops && new->vm_ops->close)
2504 		new->vm_ops->close(new);
2505 	if (new->vm_file)
2506 		fput(new->vm_file);
2507 	unlink_anon_vmas(new);
2508  out_free_mpol:
2509 	mpol_put(vma_policy(new));
2510  out_free_vma:
2511 	kmem_cache_free(vm_area_cachep, new);
2512  out_err:
2513 	return err;
2514 }
2515 
2516 /*
2517  * Split a vma into two pieces at address 'addr', a new vma is allocated
2518  * either for the first part or the tail.
2519  */
2520 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
2521 	      unsigned long addr, int new_below)
2522 {
2523 	if (mm->map_count >= sysctl_max_map_count)
2524 		return -ENOMEM;
2525 
2526 	return __split_vma(mm, vma, addr, new_below);
2527 }
2528 
2529 /* Munmap is split into 2 main parts -- this part which finds
2530  * what needs doing, and the areas themselves, which do the
2531  * work.  This now handles partial unmappings.
2532  * Jeremy Fitzhardinge <jeremy@goop.org>
2533  */
2534 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
2535 {
2536 	unsigned long end;
2537 	struct vm_area_struct *vma, *prev, *last;
2538 
2539 	if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
2540 		return -EINVAL;
2541 
2542 	len = PAGE_ALIGN(len);
2543 	if (len == 0)
2544 		return -EINVAL;
2545 
2546 	/* Find the first overlapping VMA */
2547 	vma = find_vma(mm, start);
2548 	if (!vma)
2549 		return 0;
2550 	prev = vma->vm_prev;
2551 	/* we have  start < vma->vm_end  */
2552 
2553 	/* if it doesn't overlap, we have nothing.. */
2554 	end = start + len;
2555 	if (vma->vm_start >= end)
2556 		return 0;
2557 
2558 	/*
2559 	 * If we need to split any vma, do it now to save pain later.
2560 	 *
2561 	 * Note: mremap's move_vma VM_ACCOUNT handling assumes a partially
2562 	 * unmapped vm_area_struct will remain in use: so lower split_vma
2563 	 * places tmp vma above, and higher split_vma places tmp vma below.
2564 	 */
2565 	if (start > vma->vm_start) {
2566 		int error;
2567 
2568 		/*
2569 		 * Make sure that map_count on return from munmap() will
2570 		 * not exceed its limit; but let map_count go just above
2571 		 * its limit temporarily, to help free resources as expected.
2572 		 */
2573 		if (end < vma->vm_end && mm->map_count >= sysctl_max_map_count)
2574 			return -ENOMEM;
2575 
2576 		error = __split_vma(mm, vma, start, 0);
2577 		if (error)
2578 			return error;
2579 		prev = vma;
2580 	}
2581 
2582 	/* Does it split the last one? */
2583 	last = find_vma(mm, end);
2584 	if (last && end > last->vm_start) {
2585 		int error = __split_vma(mm, last, end, 1);
2586 		if (error)
2587 			return error;
2588 	}
2589 	vma = prev ? prev->vm_next : mm->mmap;
2590 
2591 	/*
2592 	 * unlock any mlock()ed ranges before detaching vmas
2593 	 */
2594 	if (mm->locked_vm) {
2595 		struct vm_area_struct *tmp = vma;
2596 		while (tmp && tmp->vm_start < end) {
2597 			if (tmp->vm_flags & VM_LOCKED) {
2598 				mm->locked_vm -= vma_pages(tmp);
2599 				munlock_vma_pages_all(tmp);
2600 			}
2601 			tmp = tmp->vm_next;
2602 		}
2603 	}
2604 
2605 	/*
2606 	 * Remove the vma's, and unmap the actual pages
2607 	 */
2608 	detach_vmas_to_be_unmapped(mm, vma, prev, end);
2609 	unmap_region(mm, vma, prev, start, end);
2610 
2611 	arch_unmap(mm, vma, start, end);
2612 
2613 	/* Fix up all other VM information */
2614 	remove_vma_list(mm, vma);
2615 
2616 	return 0;
2617 }
2618 
2619 int vm_munmap(unsigned long start, size_t len)
2620 {
2621 	int ret;
2622 	struct mm_struct *mm = current->mm;
2623 
2624 	down_write(&mm->mmap_sem);
2625 	ret = do_munmap(mm, start, len);
2626 	up_write(&mm->mmap_sem);
2627 	return ret;
2628 }
2629 EXPORT_SYMBOL(vm_munmap);
2630 
2631 SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
2632 {
2633 	profile_munmap(addr);
2634 	return vm_munmap(addr, len);
2635 }
2636 
2637 static inline void verify_mm_writelocked(struct mm_struct *mm)
2638 {
2639 #ifdef CONFIG_DEBUG_VM
2640 	if (unlikely(down_read_trylock(&mm->mmap_sem))) {
2641 		WARN_ON(1);
2642 		up_read(&mm->mmap_sem);
2643 	}
2644 #endif
2645 }
2646 
2647 /*
2648  *  this is really a simplified "do_mmap".  it only handles
2649  *  anonymous maps.  eventually we may be able to do some
2650  *  brk-specific accounting here.
2651  */
2652 static unsigned long do_brk(unsigned long addr, unsigned long len)
2653 {
2654 	struct mm_struct *mm = current->mm;
2655 	struct vm_area_struct *vma, *prev;
2656 	unsigned long flags;
2657 	struct rb_node **rb_link, *rb_parent;
2658 	pgoff_t pgoff = addr >> PAGE_SHIFT;
2659 	int error;
2660 
2661 	len = PAGE_ALIGN(len);
2662 	if (!len)
2663 		return addr;
2664 
2665 	flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
2666 
2667 	error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
2668 	if (error & ~PAGE_MASK)
2669 		return error;
2670 
2671 	error = mlock_future_check(mm, mm->def_flags, len);
2672 	if (error)
2673 		return error;
2674 
2675 	/*
2676 	 * mm->mmap_sem is required to protect against another thread
2677 	 * changing the mappings in case we sleep.
2678 	 */
2679 	verify_mm_writelocked(mm);
2680 
2681 	/*
2682 	 * Clear old maps.  this also does some error checking for us
2683 	 */
2684  munmap_back:
2685 	if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
2686 		if (do_munmap(mm, addr, len))
2687 			return -ENOMEM;
2688 		goto munmap_back;
2689 	}
2690 
2691 	/* Check against address space limits *after* clearing old maps... */
2692 	if (!may_expand_vm(mm, len >> PAGE_SHIFT))
2693 		return -ENOMEM;
2694 
2695 	if (mm->map_count > sysctl_max_map_count)
2696 		return -ENOMEM;
2697 
2698 	if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
2699 		return -ENOMEM;
2700 
2701 	/* Can we just expand an old private anonymous mapping? */
2702 	vma = vma_merge(mm, prev, addr, addr + len, flags,
2703 					NULL, NULL, pgoff, NULL);
2704 	if (vma)
2705 		goto out;
2706 
2707 	/*
2708 	 * create a vma struct for an anonymous mapping
2709 	 */
2710 	vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
2711 	if (!vma) {
2712 		vm_unacct_memory(len >> PAGE_SHIFT);
2713 		return -ENOMEM;
2714 	}
2715 
2716 	INIT_LIST_HEAD(&vma->anon_vma_chain);
2717 	vma->vm_mm = mm;
2718 	vma->vm_start = addr;
2719 	vma->vm_end = addr + len;
2720 	vma->vm_pgoff = pgoff;
2721 	vma->vm_flags = flags;
2722 	vma->vm_page_prot = vm_get_page_prot(flags);
2723 	vma_link(mm, vma, prev, rb_link, rb_parent);
2724 out:
2725 	perf_event_mmap(vma);
2726 	mm->total_vm += len >> PAGE_SHIFT;
2727 	if (flags & VM_LOCKED)
2728 		mm->locked_vm += (len >> PAGE_SHIFT);
2729 	vma->vm_flags |= VM_SOFTDIRTY;
2730 	return addr;
2731 }
2732 
2733 unsigned long vm_brk(unsigned long addr, unsigned long len)
2734 {
2735 	struct mm_struct *mm = current->mm;
2736 	unsigned long ret;
2737 	bool populate;
2738 
2739 	down_write(&mm->mmap_sem);
2740 	ret = do_brk(addr, len);
2741 	populate = ((mm->def_flags & VM_LOCKED) != 0);
2742 	up_write(&mm->mmap_sem);
2743 	if (populate)
2744 		mm_populate(addr, len);
2745 	return ret;
2746 }
2747 EXPORT_SYMBOL(vm_brk);
2748 
2749 /* Release all mmaps. */
2750 void exit_mmap(struct mm_struct *mm)
2751 {
2752 	struct mmu_gather tlb;
2753 	struct vm_area_struct *vma;
2754 	unsigned long nr_accounted = 0;
2755 
2756 	/* mm's last user has gone, and its about to be pulled down */
2757 	mmu_notifier_release(mm);
2758 
2759 	if (mm->locked_vm) {
2760 		vma = mm->mmap;
2761 		while (vma) {
2762 			if (vma->vm_flags & VM_LOCKED)
2763 				munlock_vma_pages_all(vma);
2764 			vma = vma->vm_next;
2765 		}
2766 	}
2767 
2768 	arch_exit_mmap(mm);
2769 
2770 	vma = mm->mmap;
2771 	if (!vma)	/* Can happen if dup_mmap() received an OOM */
2772 		return;
2773 
2774 	lru_add_drain();
2775 	flush_cache_mm(mm);
2776 	tlb_gather_mmu(&tlb, mm, 0, -1);
2777 	/* update_hiwater_rss(mm) here? but nobody should be looking */
2778 	/* Use -1 here to ensure all VMAs in the mm are unmapped */
2779 	unmap_vmas(&tlb, vma, 0, -1);
2780 
2781 	free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, USER_PGTABLES_CEILING);
2782 	tlb_finish_mmu(&tlb, 0, -1);
2783 
2784 	/*
2785 	 * Walk the list again, actually closing and freeing it,
2786 	 * with preemption enabled, without holding any MM locks.
2787 	 */
2788 	while (vma) {
2789 		if (vma->vm_flags & VM_ACCOUNT)
2790 			nr_accounted += vma_pages(vma);
2791 		vma = remove_vma(vma);
2792 	}
2793 	vm_unacct_memory(nr_accounted);
2794 
2795 	WARN_ON(atomic_long_read(&mm->nr_ptes) >
2796 			(FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
2797 }
2798 
2799 /* Insert vm structure into process list sorted by address
2800  * and into the inode's i_mmap tree.  If vm_file is non-NULL
2801  * then i_mmap_rwsem is taken here.
2802  */
2803 int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
2804 {
2805 	struct vm_area_struct *prev;
2806 	struct rb_node **rb_link, *rb_parent;
2807 
2808 	/*
2809 	 * The vm_pgoff of a purely anonymous vma should be irrelevant
2810 	 * until its first write fault, when page's anon_vma and index
2811 	 * are set.  But now set the vm_pgoff it will almost certainly
2812 	 * end up with (unless mremap moves it elsewhere before that
2813 	 * first wfault), so /proc/pid/maps tells a consistent story.
2814 	 *
2815 	 * By setting it to reflect the virtual start address of the
2816 	 * vma, merges and splits can happen in a seamless way, just
2817 	 * using the existing file pgoff checks and manipulations.
2818 	 * Similarly in do_mmap_pgoff and in do_brk.
2819 	 */
2820 	if (!vma->vm_file) {
2821 		BUG_ON(vma->anon_vma);
2822 		vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT;
2823 	}
2824 	if (find_vma_links(mm, vma->vm_start, vma->vm_end,
2825 			   &prev, &rb_link, &rb_parent))
2826 		return -ENOMEM;
2827 	if ((vma->vm_flags & VM_ACCOUNT) &&
2828 	     security_vm_enough_memory_mm(mm, vma_pages(vma)))
2829 		return -ENOMEM;
2830 
2831 	vma_link(mm, vma, prev, rb_link, rb_parent);
2832 	return 0;
2833 }
2834 
2835 /*
2836  * Copy the vma structure to a new location in the same mm,
2837  * prior to moving page table entries, to effect an mremap move.
2838  */
2839 struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
2840 	unsigned long addr, unsigned long len, pgoff_t pgoff,
2841 	bool *need_rmap_locks)
2842 {
2843 	struct vm_area_struct *vma = *vmap;
2844 	unsigned long vma_start = vma->vm_start;
2845 	struct mm_struct *mm = vma->vm_mm;
2846 	struct vm_area_struct *new_vma, *prev;
2847 	struct rb_node **rb_link, *rb_parent;
2848 	bool faulted_in_anon_vma = true;
2849 
2850 	/*
2851 	 * If anonymous vma has not yet been faulted, update new pgoff
2852 	 * to match new location, to increase its chance of merging.
2853 	 */
2854 	if (unlikely(!vma->vm_file && !vma->anon_vma)) {
2855 		pgoff = addr >> PAGE_SHIFT;
2856 		faulted_in_anon_vma = false;
2857 	}
2858 
2859 	if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent))
2860 		return NULL;	/* should never get here */
2861 	new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags,
2862 			vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma));
2863 	if (new_vma) {
2864 		/*
2865 		 * Source vma may have been merged into new_vma
2866 		 */
2867 		if (unlikely(vma_start >= new_vma->vm_start &&
2868 			     vma_start < new_vma->vm_end)) {
2869 			/*
2870 			 * The only way we can get a vma_merge with
2871 			 * self during an mremap is if the vma hasn't
2872 			 * been faulted in yet and we were allowed to
2873 			 * reset the dst vma->vm_pgoff to the
2874 			 * destination address of the mremap to allow
2875 			 * the merge to happen. mremap must change the
2876 			 * vm_pgoff linearity between src and dst vmas
2877 			 * (in turn preventing a vma_merge) to be
2878 			 * safe. It is only safe to keep the vm_pgoff
2879 			 * linear if there are no pages mapped yet.
2880 			 */
2881 			VM_BUG_ON_VMA(faulted_in_anon_vma, new_vma);
2882 			*vmap = vma = new_vma;
2883 		}
2884 		*need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff);
2885 	} else {
2886 		new_vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
2887 		if (new_vma) {
2888 			*new_vma = *vma;
2889 			new_vma->vm_start = addr;
2890 			new_vma->vm_end = addr + len;
2891 			new_vma->vm_pgoff = pgoff;
2892 			if (vma_dup_policy(vma, new_vma))
2893 				goto out_free_vma;
2894 			INIT_LIST_HEAD(&new_vma->anon_vma_chain);
2895 			if (anon_vma_clone(new_vma, vma))
2896 				goto out_free_mempol;
2897 			if (new_vma->vm_file)
2898 				get_file(new_vma->vm_file);
2899 			if (new_vma->vm_ops && new_vma->vm_ops->open)
2900 				new_vma->vm_ops->open(new_vma);
2901 			vma_link(mm, new_vma, prev, rb_link, rb_parent);
2902 			*need_rmap_locks = false;
2903 		}
2904 	}
2905 	return new_vma;
2906 
2907  out_free_mempol:
2908 	mpol_put(vma_policy(new_vma));
2909  out_free_vma:
2910 	kmem_cache_free(vm_area_cachep, new_vma);
2911 	return NULL;
2912 }
2913 
2914 /*
2915  * Return true if the calling process may expand its vm space by the passed
2916  * number of pages
2917  */
2918 int may_expand_vm(struct mm_struct *mm, unsigned long npages)
2919 {
2920 	unsigned long cur = mm->total_vm;	/* pages */
2921 	unsigned long lim;
2922 
2923 	lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
2924 
2925 	if (cur + npages > lim)
2926 		return 0;
2927 	return 1;
2928 }
2929 
2930 static int special_mapping_fault(struct vm_area_struct *vma,
2931 				 struct vm_fault *vmf);
2932 
2933 /*
2934  * Having a close hook prevents vma merging regardless of flags.
2935  */
2936 static void special_mapping_close(struct vm_area_struct *vma)
2937 {
2938 }
2939 
2940 static const char *special_mapping_name(struct vm_area_struct *vma)
2941 {
2942 	return ((struct vm_special_mapping *)vma->vm_private_data)->name;
2943 }
2944 
2945 static const struct vm_operations_struct special_mapping_vmops = {
2946 	.close = special_mapping_close,
2947 	.fault = special_mapping_fault,
2948 	.name = special_mapping_name,
2949 };
2950 
2951 static const struct vm_operations_struct legacy_special_mapping_vmops = {
2952 	.close = special_mapping_close,
2953 	.fault = special_mapping_fault,
2954 };
2955 
2956 static int special_mapping_fault(struct vm_area_struct *vma,
2957 				struct vm_fault *vmf)
2958 {
2959 	pgoff_t pgoff;
2960 	struct page **pages;
2961 
2962 	/*
2963 	 * special mappings have no vm_file, and in that case, the mm
2964 	 * uses vm_pgoff internally. So we have to subtract it from here.
2965 	 * We are allowed to do this because we are the mm; do not copy
2966 	 * this code into drivers!
2967 	 */
2968 	pgoff = vmf->pgoff - vma->vm_pgoff;
2969 
2970 	if (vma->vm_ops == &legacy_special_mapping_vmops)
2971 		pages = vma->vm_private_data;
2972 	else
2973 		pages = ((struct vm_special_mapping *)vma->vm_private_data)->
2974 			pages;
2975 
2976 	for (; pgoff && *pages; ++pages)
2977 		pgoff--;
2978 
2979 	if (*pages) {
2980 		struct page *page = *pages;
2981 		get_page(page);
2982 		vmf->page = page;
2983 		return 0;
2984 	}
2985 
2986 	return VM_FAULT_SIGBUS;
2987 }
2988 
2989 static struct vm_area_struct *__install_special_mapping(
2990 	struct mm_struct *mm,
2991 	unsigned long addr, unsigned long len,
2992 	unsigned long vm_flags, const struct vm_operations_struct *ops,
2993 	void *priv)
2994 {
2995 	int ret;
2996 	struct vm_area_struct *vma;
2997 
2998 	vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
2999 	if (unlikely(vma == NULL))
3000 		return ERR_PTR(-ENOMEM);
3001 
3002 	INIT_LIST_HEAD(&vma->anon_vma_chain);
3003 	vma->vm_mm = mm;
3004 	vma->vm_start = addr;
3005 	vma->vm_end = addr + len;
3006 
3007 	vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND | VM_SOFTDIRTY;
3008 	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
3009 
3010 	vma->vm_ops = ops;
3011 	vma->vm_private_data = priv;
3012 
3013 	ret = insert_vm_struct(mm, vma);
3014 	if (ret)
3015 		goto out;
3016 
3017 	mm->total_vm += len >> PAGE_SHIFT;
3018 
3019 	perf_event_mmap(vma);
3020 
3021 	return vma;
3022 
3023 out:
3024 	kmem_cache_free(vm_area_cachep, vma);
3025 	return ERR_PTR(ret);
3026 }
3027 
3028 /*
3029  * Called with mm->mmap_sem held for writing.
3030  * Insert a new vma covering the given region, with the given flags.
3031  * Its pages are supplied by the given array of struct page *.
3032  * The array can be shorter than len >> PAGE_SHIFT if it's null-terminated.
3033  * The region past the last page supplied will always produce SIGBUS.
3034  * The array pointer and the pages it points to are assumed to stay alive
3035  * for as long as this mapping might exist.
3036  */
3037 struct vm_area_struct *_install_special_mapping(
3038 	struct mm_struct *mm,
3039 	unsigned long addr, unsigned long len,
3040 	unsigned long vm_flags, const struct vm_special_mapping *spec)
3041 {
3042 	return __install_special_mapping(mm, addr, len, vm_flags,
3043 					 &special_mapping_vmops, (void *)spec);
3044 }
3045 
3046 int install_special_mapping(struct mm_struct *mm,
3047 			    unsigned long addr, unsigned long len,
3048 			    unsigned long vm_flags, struct page **pages)
3049 {
3050 	struct vm_area_struct *vma = __install_special_mapping(
3051 		mm, addr, len, vm_flags, &legacy_special_mapping_vmops,
3052 		(void *)pages);
3053 
3054 	return PTR_ERR_OR_ZERO(vma);
3055 }
3056 
3057 static DEFINE_MUTEX(mm_all_locks_mutex);
3058 
3059 static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma)
3060 {
3061 	if (!test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_node)) {
3062 		/*
3063 		 * The LSB of head.next can't change from under us
3064 		 * because we hold the mm_all_locks_mutex.
3065 		 */
3066 		down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_sem);
3067 		/*
3068 		 * We can safely modify head.next after taking the
3069 		 * anon_vma->root->rwsem. If some other vma in this mm shares
3070 		 * the same anon_vma we won't take it again.
3071 		 *
3072 		 * No need of atomic instructions here, head.next
3073 		 * can't change from under us thanks to the
3074 		 * anon_vma->root->rwsem.
3075 		 */
3076 		if (__test_and_set_bit(0, (unsigned long *)
3077 				       &anon_vma->root->rb_root.rb_node))
3078 			BUG();
3079 	}
3080 }
3081 
3082 static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
3083 {
3084 	if (!test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
3085 		/*
3086 		 * AS_MM_ALL_LOCKS can't change from under us because
3087 		 * we hold the mm_all_locks_mutex.
3088 		 *
3089 		 * Operations on ->flags have to be atomic because
3090 		 * even if AS_MM_ALL_LOCKS is stable thanks to the
3091 		 * mm_all_locks_mutex, there may be other cpus
3092 		 * changing other bitflags in parallel to us.
3093 		 */
3094 		if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags))
3095 			BUG();
3096 		down_write_nest_lock(&mapping->i_mmap_rwsem, &mm->mmap_sem);
3097 	}
3098 }
3099 
3100 /*
3101  * This operation locks against the VM for all pte/vma/mm related
3102  * operations that could ever happen on a certain mm. This includes
3103  * vmtruncate, try_to_unmap, and all page faults.
3104  *
3105  * The caller must take the mmap_sem in write mode before calling
3106  * mm_take_all_locks(). The caller isn't allowed to release the
3107  * mmap_sem until mm_drop_all_locks() returns.
3108  *
3109  * mmap_sem in write mode is required in order to block all operations
3110  * that could modify pagetables and free pages without need of
3111  * altering the vma layout (for example populate_range() with
3112  * nonlinear vmas). It's also needed in write mode to avoid new
3113  * anon_vmas to be associated with existing vmas.
3114  *
3115  * A single task can't take more than one mm_take_all_locks() in a row
3116  * or it would deadlock.
3117  *
3118  * The LSB in anon_vma->rb_root.rb_node and the AS_MM_ALL_LOCKS bitflag in
3119  * mapping->flags avoid to take the same lock twice, if more than one
3120  * vma in this mm is backed by the same anon_vma or address_space.
3121  *
3122  * We can take all the locks in random order because the VM code
3123  * taking i_mmap_rwsem or anon_vma->rwsem outside the mmap_sem never
3124  * takes more than one of them in a row. Secondly we're protected
3125  * against a concurrent mm_take_all_locks() by the mm_all_locks_mutex.
3126  *
3127  * mm_take_all_locks() and mm_drop_all_locks are expensive operations
3128  * that may have to take thousand of locks.
3129  *
3130  * mm_take_all_locks() can fail if it's interrupted by signals.
3131  */
3132 int mm_take_all_locks(struct mm_struct *mm)
3133 {
3134 	struct vm_area_struct *vma;
3135 	struct anon_vma_chain *avc;
3136 
3137 	BUG_ON(down_read_trylock(&mm->mmap_sem));
3138 
3139 	mutex_lock(&mm_all_locks_mutex);
3140 
3141 	for (vma = mm->mmap; vma; vma = vma->vm_next) {
3142 		if (signal_pending(current))
3143 			goto out_unlock;
3144 		if (vma->vm_file && vma->vm_file->f_mapping)
3145 			vm_lock_mapping(mm, vma->vm_file->f_mapping);
3146 	}
3147 
3148 	for (vma = mm->mmap; vma; vma = vma->vm_next) {
3149 		if (signal_pending(current))
3150 			goto out_unlock;
3151 		if (vma->anon_vma)
3152 			list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
3153 				vm_lock_anon_vma(mm, avc->anon_vma);
3154 	}
3155 
3156 	return 0;
3157 
3158 out_unlock:
3159 	mm_drop_all_locks(mm);
3160 	return -EINTR;
3161 }
3162 
3163 static void vm_unlock_anon_vma(struct anon_vma *anon_vma)
3164 {
3165 	if (test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_node)) {
3166 		/*
3167 		 * The LSB of head.next can't change to 0 from under
3168 		 * us because we hold the mm_all_locks_mutex.
3169 		 *
3170 		 * We must however clear the bitflag before unlocking
3171 		 * the vma so the users using the anon_vma->rb_root will
3172 		 * never see our bitflag.
3173 		 *
3174 		 * No need of atomic instructions here, head.next
3175 		 * can't change from under us until we release the
3176 		 * anon_vma->root->rwsem.
3177 		 */
3178 		if (!__test_and_clear_bit(0, (unsigned long *)
3179 					  &anon_vma->root->rb_root.rb_node))
3180 			BUG();
3181 		anon_vma_unlock_write(anon_vma);
3182 	}
3183 }
3184 
3185 static void vm_unlock_mapping(struct address_space *mapping)
3186 {
3187 	if (test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
3188 		/*
3189 		 * AS_MM_ALL_LOCKS can't change to 0 from under us
3190 		 * because we hold the mm_all_locks_mutex.
3191 		 */
3192 		i_mmap_unlock_write(mapping);
3193 		if (!test_and_clear_bit(AS_MM_ALL_LOCKS,
3194 					&mapping->flags))
3195 			BUG();
3196 	}
3197 }
3198 
3199 /*
3200  * The mmap_sem cannot be released by the caller until
3201  * mm_drop_all_locks() returns.
3202  */
3203 void mm_drop_all_locks(struct mm_struct *mm)
3204 {
3205 	struct vm_area_struct *vma;
3206 	struct anon_vma_chain *avc;
3207 
3208 	BUG_ON(down_read_trylock(&mm->mmap_sem));
3209 	BUG_ON(!mutex_is_locked(&mm_all_locks_mutex));
3210 
3211 	for (vma = mm->mmap; vma; vma = vma->vm_next) {
3212 		if (vma->anon_vma)
3213 			list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
3214 				vm_unlock_anon_vma(avc->anon_vma);
3215 		if (vma->vm_file && vma->vm_file->f_mapping)
3216 			vm_unlock_mapping(vma->vm_file->f_mapping);
3217 	}
3218 
3219 	mutex_unlock(&mm_all_locks_mutex);
3220 }
3221 
3222 /*
3223  * initialise the VMA slab
3224  */
3225 void __init mmap_init(void)
3226 {
3227 	int ret;
3228 
3229 	ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL);
3230 	VM_BUG_ON(ret);
3231 }
3232 
3233 /*
3234  * Initialise sysctl_user_reserve_kbytes.
3235  *
3236  * This is intended to prevent a user from starting a single memory hogging
3237  * process, such that they cannot recover (kill the hog) in OVERCOMMIT_NEVER
3238  * mode.
3239  *
3240  * The default value is min(3% of free memory, 128MB)
3241  * 128MB is enough to recover with sshd/login, bash, and top/kill.
3242  */
3243 static int init_user_reserve(void)
3244 {
3245 	unsigned long free_kbytes;
3246 
3247 	free_kbytes = global_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
3248 
3249 	sysctl_user_reserve_kbytes = min(free_kbytes / 32, 1UL << 17);
3250 	return 0;
3251 }
3252 subsys_initcall(init_user_reserve);
3253 
3254 /*
3255  * Initialise sysctl_admin_reserve_kbytes.
3256  *
3257  * The purpose of sysctl_admin_reserve_kbytes is to allow the sys admin
3258  * to log in and kill a memory hogging process.
3259  *
3260  * Systems with more than 256MB will reserve 8MB, enough to recover
3261  * with sshd, bash, and top in OVERCOMMIT_GUESS. Smaller systems will
3262  * only reserve 3% of free pages by default.
3263  */
3264 static int init_admin_reserve(void)
3265 {
3266 	unsigned long free_kbytes;
3267 
3268 	free_kbytes = global_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
3269 
3270 	sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13);
3271 	return 0;
3272 }
3273 subsys_initcall(init_admin_reserve);
3274 
3275 /*
3276  * Reinititalise user and admin reserves if memory is added or removed.
3277  *
3278  * The default user reserve max is 128MB, and the default max for the
3279  * admin reserve is 8MB. These are usually, but not always, enough to
3280  * enable recovery from a memory hogging process using login/sshd, a shell,
3281  * and tools like top. It may make sense to increase or even disable the
3282  * reserve depending on the existence of swap or variations in the recovery
3283  * tools. So, the admin may have changed them.
3284  *
3285  * If memory is added and the reserves have been eliminated or increased above
3286  * the default max, then we'll trust the admin.
3287  *
3288  * If memory is removed and there isn't enough free memory, then we
3289  * need to reset the reserves.
3290  *
3291  * Otherwise keep the reserve set by the admin.
3292  */
3293 static int reserve_mem_notifier(struct notifier_block *nb,
3294 			     unsigned long action, void *data)
3295 {
3296 	unsigned long tmp, free_kbytes;
3297 
3298 	switch (action) {
3299 	case MEM_ONLINE:
3300 		/* Default max is 128MB. Leave alone if modified by operator. */
3301 		tmp = sysctl_user_reserve_kbytes;
3302 		if (0 < tmp && tmp < (1UL << 17))
3303 			init_user_reserve();
3304 
3305 		/* Default max is 8MB.  Leave alone if modified by operator. */
3306 		tmp = sysctl_admin_reserve_kbytes;
3307 		if (0 < tmp && tmp < (1UL << 13))
3308 			init_admin_reserve();
3309 
3310 		break;
3311 	case MEM_OFFLINE:
3312 		free_kbytes = global_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
3313 
3314 		if (sysctl_user_reserve_kbytes > free_kbytes) {
3315 			init_user_reserve();
3316 			pr_info("vm.user_reserve_kbytes reset to %lu\n",
3317 				sysctl_user_reserve_kbytes);
3318 		}
3319 
3320 		if (sysctl_admin_reserve_kbytes > free_kbytes) {
3321 			init_admin_reserve();
3322 			pr_info("vm.admin_reserve_kbytes reset to %lu\n",
3323 				sysctl_admin_reserve_kbytes);
3324 		}
3325 		break;
3326 	default:
3327 		break;
3328 	}
3329 	return NOTIFY_OK;
3330 }
3331 
3332 static struct notifier_block reserve_mem_nb = {
3333 	.notifier_call = reserve_mem_notifier,
3334 };
3335 
3336 static int __meminit init_reserve_notifier(void)
3337 {
3338 	if (register_hotmemory_notifier(&reserve_mem_nb))
3339 		pr_err("Failed registering memory add/remove notifier for admin reserve\n");
3340 
3341 	return 0;
3342 }
3343 subsys_initcall(init_reserve_notifier);
3344