xref: /openbmc/linux/mm/mmap.c (revision 17bc4815de586d001c82d0ddf75247283c3f002a)
1  /*
2   * mm/mmap.c
3   *
4   * Written by obz.
5   *
6   * Address space accounting code	<alan@lxorguk.ukuu.org.uk>
7   */
8  
9  #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10  
11  #include <linux/kernel.h>
12  #include <linux/slab.h>
13  #include <linux/backing-dev.h>
14  #include <linux/mm.h>
15  #include <linux/vmacache.h>
16  #include <linux/shm.h>
17  #include <linux/mman.h>
18  #include <linux/pagemap.h>
19  #include <linux/swap.h>
20  #include <linux/syscalls.h>
21  #include <linux/capability.h>
22  #include <linux/init.h>
23  #include <linux/file.h>
24  #include <linux/fs.h>
25  #include <linux/personality.h>
26  #include <linux/security.h>
27  #include <linux/hugetlb.h>
28  #include <linux/shmem_fs.h>
29  #include <linux/profile.h>
30  #include <linux/export.h>
31  #include <linux/mount.h>
32  #include <linux/mempolicy.h>
33  #include <linux/rmap.h>
34  #include <linux/mmu_notifier.h>
35  #include <linux/mmdebug.h>
36  #include <linux/perf_event.h>
37  #include <linux/audit.h>
38  #include <linux/khugepaged.h>
39  #include <linux/uprobes.h>
40  #include <linux/rbtree_augmented.h>
41  #include <linux/notifier.h>
42  #include <linux/memory.h>
43  #include <linux/printk.h>
44  #include <linux/userfaultfd_k.h>
45  #include <linux/moduleparam.h>
46  #include <linux/pkeys.h>
47  #include <linux/oom.h>
48  
49  #include <linux/uaccess.h>
50  #include <asm/cacheflush.h>
51  #include <asm/tlb.h>
52  #include <asm/mmu_context.h>
53  
54  #include "internal.h"
55  
56  #ifndef arch_mmap_check
57  #define arch_mmap_check(addr, len, flags)	(0)
58  #endif
59  
60  #ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS
61  const int mmap_rnd_bits_min = CONFIG_ARCH_MMAP_RND_BITS_MIN;
62  const int mmap_rnd_bits_max = CONFIG_ARCH_MMAP_RND_BITS_MAX;
63  int mmap_rnd_bits __read_mostly = CONFIG_ARCH_MMAP_RND_BITS;
64  #endif
65  #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
66  const int mmap_rnd_compat_bits_min = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN;
67  const int mmap_rnd_compat_bits_max = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX;
68  int mmap_rnd_compat_bits __read_mostly = CONFIG_ARCH_MMAP_RND_COMPAT_BITS;
69  #endif
70  
71  static bool ignore_rlimit_data;
72  core_param(ignore_rlimit_data, ignore_rlimit_data, bool, 0644);
73  
74  static void unmap_region(struct mm_struct *mm,
75  		struct vm_area_struct *vma, struct vm_area_struct *prev,
76  		unsigned long start, unsigned long end);
77  
78  /* description of effects of mapping type and prot in current implementation.
79   * this is due to the limited x86 page protection hardware.  The expected
80   * behavior is in parens:
81   *
82   * map_type	prot
83   *		PROT_NONE	PROT_READ	PROT_WRITE	PROT_EXEC
84   * MAP_SHARED	r: (no) no	r: (yes) yes	r: (no) yes	r: (no) yes
85   *		w: (no) no	w: (no) no	w: (yes) yes	w: (no) no
86   *		x: (no) no	x: (no) yes	x: (no) yes	x: (yes) yes
87   *
88   * MAP_PRIVATE	r: (no) no	r: (yes) yes	r: (no) yes	r: (no) yes
89   *		w: (no) no	w: (no) no	w: (copy) copy	w: (no) no
90   *		x: (no) no	x: (no) yes	x: (no) yes	x: (yes) yes
91   *
92   * On arm64, PROT_EXEC has the following behaviour for both MAP_SHARED and
93   * MAP_PRIVATE:
94   *								r: (no) no
95   *								w: (no) no
96   *								x: (yes) yes
97   */
98  pgprot_t protection_map[16] __ro_after_init = {
99  	__P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
100  	__S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
101  };
102  
103  #ifndef CONFIG_ARCH_HAS_FILTER_PGPROT
104  static inline pgprot_t arch_filter_pgprot(pgprot_t prot)
105  {
106  	return prot;
107  }
108  #endif
109  
110  pgprot_t vm_get_page_prot(unsigned long vm_flags)
111  {
112  	pgprot_t ret = __pgprot(pgprot_val(protection_map[vm_flags &
113  				(VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
114  			pgprot_val(arch_vm_get_page_prot(vm_flags)));
115  
116  	return arch_filter_pgprot(ret);
117  }
118  EXPORT_SYMBOL(vm_get_page_prot);
119  
120  static pgprot_t vm_pgprot_modify(pgprot_t oldprot, unsigned long vm_flags)
121  {
122  	return pgprot_modify(oldprot, vm_get_page_prot(vm_flags));
123  }
124  
125  /* Update vma->vm_page_prot to reflect vma->vm_flags. */
126  void vma_set_page_prot(struct vm_area_struct *vma)
127  {
128  	unsigned long vm_flags = vma->vm_flags;
129  	pgprot_t vm_page_prot;
130  
131  	vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags);
132  	if (vma_wants_writenotify(vma, vm_page_prot)) {
133  		vm_flags &= ~VM_SHARED;
134  		vm_page_prot = vm_pgprot_modify(vm_page_prot, vm_flags);
135  	}
136  	/* remove_protection_ptes reads vma->vm_page_prot without mmap_sem */
137  	WRITE_ONCE(vma->vm_page_prot, vm_page_prot);
138  }
139  
140  /*
141   * Requires inode->i_mapping->i_mmap_rwsem
142   */
143  static void __remove_shared_vm_struct(struct vm_area_struct *vma,
144  		struct file *file, struct address_space *mapping)
145  {
146  	if (vma->vm_flags & VM_DENYWRITE)
147  		atomic_inc(&file_inode(file)->i_writecount);
148  	if (vma->vm_flags & VM_SHARED)
149  		mapping_unmap_writable(mapping);
150  
151  	flush_dcache_mmap_lock(mapping);
152  	vma_interval_tree_remove(vma, &mapping->i_mmap);
153  	flush_dcache_mmap_unlock(mapping);
154  }
155  
156  /*
157   * Unlink a file-based vm structure from its interval tree, to hide
158   * vma from rmap and vmtruncate before freeing its page tables.
159   */
160  void unlink_file_vma(struct vm_area_struct *vma)
161  {
162  	struct file *file = vma->vm_file;
163  
164  	if (file) {
165  		struct address_space *mapping = file->f_mapping;
166  		i_mmap_lock_write(mapping);
167  		__remove_shared_vm_struct(vma, file, mapping);
168  		i_mmap_unlock_write(mapping);
169  	}
170  }
171  
172  /*
173   * Close a vm structure and free it, returning the next.
174   */
175  static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
176  {
177  	struct vm_area_struct *next = vma->vm_next;
178  
179  	might_sleep();
180  	if (vma->vm_ops && vma->vm_ops->close)
181  		vma->vm_ops->close(vma);
182  	if (vma->vm_file)
183  		fput(vma->vm_file);
184  	mpol_put(vma_policy(vma));
185  	vm_area_free(vma);
186  	return next;
187  }
188  
189  static int do_brk_flags(unsigned long addr, unsigned long request, unsigned long flags,
190  		struct list_head *uf);
191  SYSCALL_DEFINE1(brk, unsigned long, brk)
192  {
193  	unsigned long retval;
194  	unsigned long newbrk, oldbrk, origbrk;
195  	struct mm_struct *mm = current->mm;
196  	struct vm_area_struct *next;
197  	unsigned long min_brk;
198  	bool populate;
199  	bool downgraded = false;
200  	LIST_HEAD(uf);
201  
202  	if (down_write_killable(&mm->mmap_sem))
203  		return -EINTR;
204  
205  	origbrk = mm->brk;
206  
207  #ifdef CONFIG_COMPAT_BRK
208  	/*
209  	 * CONFIG_COMPAT_BRK can still be overridden by setting
210  	 * randomize_va_space to 2, which will still cause mm->start_brk
211  	 * to be arbitrarily shifted
212  	 */
213  	if (current->brk_randomized)
214  		min_brk = mm->start_brk;
215  	else
216  		min_brk = mm->end_data;
217  #else
218  	min_brk = mm->start_brk;
219  #endif
220  	if (brk < min_brk)
221  		goto out;
222  
223  	/*
224  	 * Check against rlimit here. If this check is done later after the test
225  	 * of oldbrk with newbrk then it can escape the test and let the data
226  	 * segment grow beyond its set limit the in case where the limit is
227  	 * not page aligned -Ram Gupta
228  	 */
229  	if (check_data_rlimit(rlimit(RLIMIT_DATA), brk, mm->start_brk,
230  			      mm->end_data, mm->start_data))
231  		goto out;
232  
233  	newbrk = PAGE_ALIGN(brk);
234  	oldbrk = PAGE_ALIGN(mm->brk);
235  	if (oldbrk == newbrk) {
236  		mm->brk = brk;
237  		goto success;
238  	}
239  
240  	/*
241  	 * Always allow shrinking brk.
242  	 * __do_munmap() may downgrade mmap_sem to read.
243  	 */
244  	if (brk <= mm->brk) {
245  		int ret;
246  
247  		/*
248  		 * mm->brk must to be protected by write mmap_sem so update it
249  		 * before downgrading mmap_sem. When __do_munmap() fails,
250  		 * mm->brk will be restored from origbrk.
251  		 */
252  		mm->brk = brk;
253  		ret = __do_munmap(mm, newbrk, oldbrk-newbrk, &uf, true);
254  		if (ret < 0) {
255  			mm->brk = origbrk;
256  			goto out;
257  		} else if (ret == 1) {
258  			downgraded = true;
259  		}
260  		goto success;
261  	}
262  
263  	/* Check against existing mmap mappings. */
264  	next = find_vma(mm, oldbrk);
265  	if (next && newbrk + PAGE_SIZE > vm_start_gap(next))
266  		goto out;
267  
268  	/* Ok, looks good - let it rip. */
269  	if (do_brk_flags(oldbrk, newbrk-oldbrk, 0, &uf) < 0)
270  		goto out;
271  	mm->brk = brk;
272  
273  success:
274  	populate = newbrk > oldbrk && (mm->def_flags & VM_LOCKED) != 0;
275  	if (downgraded)
276  		up_read(&mm->mmap_sem);
277  	else
278  		up_write(&mm->mmap_sem);
279  	userfaultfd_unmap_complete(mm, &uf);
280  	if (populate)
281  		mm_populate(oldbrk, newbrk - oldbrk);
282  	return brk;
283  
284  out:
285  	retval = origbrk;
286  	up_write(&mm->mmap_sem);
287  	return retval;
288  }
289  
290  static long vma_compute_subtree_gap(struct vm_area_struct *vma)
291  {
292  	unsigned long max, prev_end, subtree_gap;
293  
294  	/*
295  	 * Note: in the rare case of a VM_GROWSDOWN above a VM_GROWSUP, we
296  	 * allow two stack_guard_gaps between them here, and when choosing
297  	 * an unmapped area; whereas when expanding we only require one.
298  	 * That's a little inconsistent, but keeps the code here simpler.
299  	 */
300  	max = vm_start_gap(vma);
301  	if (vma->vm_prev) {
302  		prev_end = vm_end_gap(vma->vm_prev);
303  		if (max > prev_end)
304  			max -= prev_end;
305  		else
306  			max = 0;
307  	}
308  	if (vma->vm_rb.rb_left) {
309  		subtree_gap = rb_entry(vma->vm_rb.rb_left,
310  				struct vm_area_struct, vm_rb)->rb_subtree_gap;
311  		if (subtree_gap > max)
312  			max = subtree_gap;
313  	}
314  	if (vma->vm_rb.rb_right) {
315  		subtree_gap = rb_entry(vma->vm_rb.rb_right,
316  				struct vm_area_struct, vm_rb)->rb_subtree_gap;
317  		if (subtree_gap > max)
318  			max = subtree_gap;
319  	}
320  	return max;
321  }
322  
323  #ifdef CONFIG_DEBUG_VM_RB
324  static int browse_rb(struct mm_struct *mm)
325  {
326  	struct rb_root *root = &mm->mm_rb;
327  	int i = 0, j, bug = 0;
328  	struct rb_node *nd, *pn = NULL;
329  	unsigned long prev = 0, pend = 0;
330  
331  	for (nd = rb_first(root); nd; nd = rb_next(nd)) {
332  		struct vm_area_struct *vma;
333  		vma = rb_entry(nd, struct vm_area_struct, vm_rb);
334  		if (vma->vm_start < prev) {
335  			pr_emerg("vm_start %lx < prev %lx\n",
336  				  vma->vm_start, prev);
337  			bug = 1;
338  		}
339  		if (vma->vm_start < pend) {
340  			pr_emerg("vm_start %lx < pend %lx\n",
341  				  vma->vm_start, pend);
342  			bug = 1;
343  		}
344  		if (vma->vm_start > vma->vm_end) {
345  			pr_emerg("vm_start %lx > vm_end %lx\n",
346  				  vma->vm_start, vma->vm_end);
347  			bug = 1;
348  		}
349  		spin_lock(&mm->page_table_lock);
350  		if (vma->rb_subtree_gap != vma_compute_subtree_gap(vma)) {
351  			pr_emerg("free gap %lx, correct %lx\n",
352  			       vma->rb_subtree_gap,
353  			       vma_compute_subtree_gap(vma));
354  			bug = 1;
355  		}
356  		spin_unlock(&mm->page_table_lock);
357  		i++;
358  		pn = nd;
359  		prev = vma->vm_start;
360  		pend = vma->vm_end;
361  	}
362  	j = 0;
363  	for (nd = pn; nd; nd = rb_prev(nd))
364  		j++;
365  	if (i != j) {
366  		pr_emerg("backwards %d, forwards %d\n", j, i);
367  		bug = 1;
368  	}
369  	return bug ? -1 : i;
370  }
371  
372  static void validate_mm_rb(struct rb_root *root, struct vm_area_struct *ignore)
373  {
374  	struct rb_node *nd;
375  
376  	for (nd = rb_first(root); nd; nd = rb_next(nd)) {
377  		struct vm_area_struct *vma;
378  		vma = rb_entry(nd, struct vm_area_struct, vm_rb);
379  		VM_BUG_ON_VMA(vma != ignore &&
380  			vma->rb_subtree_gap != vma_compute_subtree_gap(vma),
381  			vma);
382  	}
383  }
384  
385  static void validate_mm(struct mm_struct *mm)
386  {
387  	int bug = 0;
388  	int i = 0;
389  	unsigned long highest_address = 0;
390  	struct vm_area_struct *vma = mm->mmap;
391  
392  	while (vma) {
393  		struct anon_vma *anon_vma = vma->anon_vma;
394  		struct anon_vma_chain *avc;
395  
396  		if (anon_vma) {
397  			anon_vma_lock_read(anon_vma);
398  			list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
399  				anon_vma_interval_tree_verify(avc);
400  			anon_vma_unlock_read(anon_vma);
401  		}
402  
403  		highest_address = vm_end_gap(vma);
404  		vma = vma->vm_next;
405  		i++;
406  	}
407  	if (i != mm->map_count) {
408  		pr_emerg("map_count %d vm_next %d\n", mm->map_count, i);
409  		bug = 1;
410  	}
411  	if (highest_address != mm->highest_vm_end) {
412  		pr_emerg("mm->highest_vm_end %lx, found %lx\n",
413  			  mm->highest_vm_end, highest_address);
414  		bug = 1;
415  	}
416  	i = browse_rb(mm);
417  	if (i != mm->map_count) {
418  		if (i != -1)
419  			pr_emerg("map_count %d rb %d\n", mm->map_count, i);
420  		bug = 1;
421  	}
422  	VM_BUG_ON_MM(bug, mm);
423  }
424  #else
425  #define validate_mm_rb(root, ignore) do { } while (0)
426  #define validate_mm(mm) do { } while (0)
427  #endif
428  
429  RB_DECLARE_CALLBACKS(static, vma_gap_callbacks, struct vm_area_struct, vm_rb,
430  		     unsigned long, rb_subtree_gap, vma_compute_subtree_gap)
431  
432  /*
433   * Update augmented rbtree rb_subtree_gap values after vma->vm_start or
434   * vma->vm_prev->vm_end values changed, without modifying the vma's position
435   * in the rbtree.
436   */
437  static void vma_gap_update(struct vm_area_struct *vma)
438  {
439  	/*
440  	 * As it turns out, RB_DECLARE_CALLBACKS() already created a callback
441  	 * function that does exacltly what we want.
442  	 */
443  	vma_gap_callbacks_propagate(&vma->vm_rb, NULL);
444  }
445  
446  static inline void vma_rb_insert(struct vm_area_struct *vma,
447  				 struct rb_root *root)
448  {
449  	/* All rb_subtree_gap values must be consistent prior to insertion */
450  	validate_mm_rb(root, NULL);
451  
452  	rb_insert_augmented(&vma->vm_rb, root, &vma_gap_callbacks);
453  }
454  
455  static void __vma_rb_erase(struct vm_area_struct *vma, struct rb_root *root)
456  {
457  	/*
458  	 * Note rb_erase_augmented is a fairly large inline function,
459  	 * so make sure we instantiate it only once with our desired
460  	 * augmented rbtree callbacks.
461  	 */
462  	rb_erase_augmented(&vma->vm_rb, root, &vma_gap_callbacks);
463  }
464  
465  static __always_inline void vma_rb_erase_ignore(struct vm_area_struct *vma,
466  						struct rb_root *root,
467  						struct vm_area_struct *ignore)
468  {
469  	/*
470  	 * All rb_subtree_gap values must be consistent prior to erase,
471  	 * with the possible exception of the "next" vma being erased if
472  	 * next->vm_start was reduced.
473  	 */
474  	validate_mm_rb(root, ignore);
475  
476  	__vma_rb_erase(vma, root);
477  }
478  
479  static __always_inline void vma_rb_erase(struct vm_area_struct *vma,
480  					 struct rb_root *root)
481  {
482  	/*
483  	 * All rb_subtree_gap values must be consistent prior to erase,
484  	 * with the possible exception of the vma being erased.
485  	 */
486  	validate_mm_rb(root, vma);
487  
488  	__vma_rb_erase(vma, root);
489  }
490  
491  /*
492   * vma has some anon_vma assigned, and is already inserted on that
493   * anon_vma's interval trees.
494   *
495   * Before updating the vma's vm_start / vm_end / vm_pgoff fields, the
496   * vma must be removed from the anon_vma's interval trees using
497   * anon_vma_interval_tree_pre_update_vma().
498   *
499   * After the update, the vma will be reinserted using
500   * anon_vma_interval_tree_post_update_vma().
501   *
502   * The entire update must be protected by exclusive mmap_sem and by
503   * the root anon_vma's mutex.
504   */
505  static inline void
506  anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma)
507  {
508  	struct anon_vma_chain *avc;
509  
510  	list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
511  		anon_vma_interval_tree_remove(avc, &avc->anon_vma->rb_root);
512  }
513  
514  static inline void
515  anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma)
516  {
517  	struct anon_vma_chain *avc;
518  
519  	list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
520  		anon_vma_interval_tree_insert(avc, &avc->anon_vma->rb_root);
521  }
522  
523  static int find_vma_links(struct mm_struct *mm, unsigned long addr,
524  		unsigned long end, struct vm_area_struct **pprev,
525  		struct rb_node ***rb_link, struct rb_node **rb_parent)
526  {
527  	struct rb_node **__rb_link, *__rb_parent, *rb_prev;
528  
529  	__rb_link = &mm->mm_rb.rb_node;
530  	rb_prev = __rb_parent = NULL;
531  
532  	while (*__rb_link) {
533  		struct vm_area_struct *vma_tmp;
534  
535  		__rb_parent = *__rb_link;
536  		vma_tmp = rb_entry(__rb_parent, struct vm_area_struct, vm_rb);
537  
538  		if (vma_tmp->vm_end > addr) {
539  			/* Fail if an existing vma overlaps the area */
540  			if (vma_tmp->vm_start < end)
541  				return -ENOMEM;
542  			__rb_link = &__rb_parent->rb_left;
543  		} else {
544  			rb_prev = __rb_parent;
545  			__rb_link = &__rb_parent->rb_right;
546  		}
547  	}
548  
549  	*pprev = NULL;
550  	if (rb_prev)
551  		*pprev = rb_entry(rb_prev, struct vm_area_struct, vm_rb);
552  	*rb_link = __rb_link;
553  	*rb_parent = __rb_parent;
554  	return 0;
555  }
556  
557  static unsigned long count_vma_pages_range(struct mm_struct *mm,
558  		unsigned long addr, unsigned long end)
559  {
560  	unsigned long nr_pages = 0;
561  	struct vm_area_struct *vma;
562  
563  	/* Find first overlaping mapping */
564  	vma = find_vma_intersection(mm, addr, end);
565  	if (!vma)
566  		return 0;
567  
568  	nr_pages = (min(end, vma->vm_end) -
569  		max(addr, vma->vm_start)) >> PAGE_SHIFT;
570  
571  	/* Iterate over the rest of the overlaps */
572  	for (vma = vma->vm_next; vma; vma = vma->vm_next) {
573  		unsigned long overlap_len;
574  
575  		if (vma->vm_start > end)
576  			break;
577  
578  		overlap_len = min(end, vma->vm_end) - vma->vm_start;
579  		nr_pages += overlap_len >> PAGE_SHIFT;
580  	}
581  
582  	return nr_pages;
583  }
584  
585  void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma,
586  		struct rb_node **rb_link, struct rb_node *rb_parent)
587  {
588  	/* Update tracking information for the gap following the new vma. */
589  	if (vma->vm_next)
590  		vma_gap_update(vma->vm_next);
591  	else
592  		mm->highest_vm_end = vm_end_gap(vma);
593  
594  	/*
595  	 * vma->vm_prev wasn't known when we followed the rbtree to find the
596  	 * correct insertion point for that vma. As a result, we could not
597  	 * update the vma vm_rb parents rb_subtree_gap values on the way down.
598  	 * So, we first insert the vma with a zero rb_subtree_gap value
599  	 * (to be consistent with what we did on the way down), and then
600  	 * immediately update the gap to the correct value. Finally we
601  	 * rebalance the rbtree after all augmented values have been set.
602  	 */
603  	rb_link_node(&vma->vm_rb, rb_parent, rb_link);
604  	vma->rb_subtree_gap = 0;
605  	vma_gap_update(vma);
606  	vma_rb_insert(vma, &mm->mm_rb);
607  }
608  
609  static void __vma_link_file(struct vm_area_struct *vma)
610  {
611  	struct file *file;
612  
613  	file = vma->vm_file;
614  	if (file) {
615  		struct address_space *mapping = file->f_mapping;
616  
617  		if (vma->vm_flags & VM_DENYWRITE)
618  			atomic_dec(&file_inode(file)->i_writecount);
619  		if (vma->vm_flags & VM_SHARED)
620  			atomic_inc(&mapping->i_mmap_writable);
621  
622  		flush_dcache_mmap_lock(mapping);
623  		vma_interval_tree_insert(vma, &mapping->i_mmap);
624  		flush_dcache_mmap_unlock(mapping);
625  	}
626  }
627  
628  static void
629  __vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
630  	struct vm_area_struct *prev, struct rb_node **rb_link,
631  	struct rb_node *rb_parent)
632  {
633  	__vma_link_list(mm, vma, prev, rb_parent);
634  	__vma_link_rb(mm, vma, rb_link, rb_parent);
635  }
636  
637  static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
638  			struct vm_area_struct *prev, struct rb_node **rb_link,
639  			struct rb_node *rb_parent)
640  {
641  	struct address_space *mapping = NULL;
642  
643  	if (vma->vm_file) {
644  		mapping = vma->vm_file->f_mapping;
645  		i_mmap_lock_write(mapping);
646  	}
647  
648  	__vma_link(mm, vma, prev, rb_link, rb_parent);
649  	__vma_link_file(vma);
650  
651  	if (mapping)
652  		i_mmap_unlock_write(mapping);
653  
654  	mm->map_count++;
655  	validate_mm(mm);
656  }
657  
658  /*
659   * Helper for vma_adjust() in the split_vma insert case: insert a vma into the
660   * mm's list and rbtree.  It has already been inserted into the interval tree.
661   */
662  static void __insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
663  {
664  	struct vm_area_struct *prev;
665  	struct rb_node **rb_link, *rb_parent;
666  
667  	if (find_vma_links(mm, vma->vm_start, vma->vm_end,
668  			   &prev, &rb_link, &rb_parent))
669  		BUG();
670  	__vma_link(mm, vma, prev, rb_link, rb_parent);
671  	mm->map_count++;
672  }
673  
674  static __always_inline void __vma_unlink_common(struct mm_struct *mm,
675  						struct vm_area_struct *vma,
676  						struct vm_area_struct *prev,
677  						bool has_prev,
678  						struct vm_area_struct *ignore)
679  {
680  	struct vm_area_struct *next;
681  
682  	vma_rb_erase_ignore(vma, &mm->mm_rb, ignore);
683  	next = vma->vm_next;
684  	if (has_prev)
685  		prev->vm_next = next;
686  	else {
687  		prev = vma->vm_prev;
688  		if (prev)
689  			prev->vm_next = next;
690  		else
691  			mm->mmap = next;
692  	}
693  	if (next)
694  		next->vm_prev = prev;
695  
696  	/* Kill the cache */
697  	vmacache_invalidate(mm);
698  }
699  
700  static inline void __vma_unlink_prev(struct mm_struct *mm,
701  				     struct vm_area_struct *vma,
702  				     struct vm_area_struct *prev)
703  {
704  	__vma_unlink_common(mm, vma, prev, true, vma);
705  }
706  
707  /*
708   * We cannot adjust vm_start, vm_end, vm_pgoff fields of a vma that
709   * is already present in an i_mmap tree without adjusting the tree.
710   * The following helper function should be used when such adjustments
711   * are necessary.  The "insert" vma (if any) is to be inserted
712   * before we drop the necessary locks.
713   */
714  int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
715  	unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert,
716  	struct vm_area_struct *expand)
717  {
718  	struct mm_struct *mm = vma->vm_mm;
719  	struct vm_area_struct *next = vma->vm_next, *orig_vma = vma;
720  	struct address_space *mapping = NULL;
721  	struct rb_root_cached *root = NULL;
722  	struct anon_vma *anon_vma = NULL;
723  	struct file *file = vma->vm_file;
724  	bool start_changed = false, end_changed = false;
725  	long adjust_next = 0;
726  	int remove_next = 0;
727  
728  	if (next && !insert) {
729  		struct vm_area_struct *exporter = NULL, *importer = NULL;
730  
731  		if (end >= next->vm_end) {
732  			/*
733  			 * vma expands, overlapping all the next, and
734  			 * perhaps the one after too (mprotect case 6).
735  			 * The only other cases that gets here are
736  			 * case 1, case 7 and case 8.
737  			 */
738  			if (next == expand) {
739  				/*
740  				 * The only case where we don't expand "vma"
741  				 * and we expand "next" instead is case 8.
742  				 */
743  				VM_WARN_ON(end != next->vm_end);
744  				/*
745  				 * remove_next == 3 means we're
746  				 * removing "vma" and that to do so we
747  				 * swapped "vma" and "next".
748  				 */
749  				remove_next = 3;
750  				VM_WARN_ON(file != next->vm_file);
751  				swap(vma, next);
752  			} else {
753  				VM_WARN_ON(expand != vma);
754  				/*
755  				 * case 1, 6, 7, remove_next == 2 is case 6,
756  				 * remove_next == 1 is case 1 or 7.
757  				 */
758  				remove_next = 1 + (end > next->vm_end);
759  				VM_WARN_ON(remove_next == 2 &&
760  					   end != next->vm_next->vm_end);
761  				VM_WARN_ON(remove_next == 1 &&
762  					   end != next->vm_end);
763  				/* trim end to next, for case 6 first pass */
764  				end = next->vm_end;
765  			}
766  
767  			exporter = next;
768  			importer = vma;
769  
770  			/*
771  			 * If next doesn't have anon_vma, import from vma after
772  			 * next, if the vma overlaps with it.
773  			 */
774  			if (remove_next == 2 && !next->anon_vma)
775  				exporter = next->vm_next;
776  
777  		} else if (end > next->vm_start) {
778  			/*
779  			 * vma expands, overlapping part of the next:
780  			 * mprotect case 5 shifting the boundary up.
781  			 */
782  			adjust_next = (end - next->vm_start) >> PAGE_SHIFT;
783  			exporter = next;
784  			importer = vma;
785  			VM_WARN_ON(expand != importer);
786  		} else if (end < vma->vm_end) {
787  			/*
788  			 * vma shrinks, and !insert tells it's not
789  			 * split_vma inserting another: so it must be
790  			 * mprotect case 4 shifting the boundary down.
791  			 */
792  			adjust_next = -((vma->vm_end - end) >> PAGE_SHIFT);
793  			exporter = vma;
794  			importer = next;
795  			VM_WARN_ON(expand != importer);
796  		}
797  
798  		/*
799  		 * Easily overlooked: when mprotect shifts the boundary,
800  		 * make sure the expanding vma has anon_vma set if the
801  		 * shrinking vma had, to cover any anon pages imported.
802  		 */
803  		if (exporter && exporter->anon_vma && !importer->anon_vma) {
804  			int error;
805  
806  			importer->anon_vma = exporter->anon_vma;
807  			error = anon_vma_clone(importer, exporter);
808  			if (error)
809  				return error;
810  		}
811  	}
812  again:
813  	vma_adjust_trans_huge(orig_vma, start, end, adjust_next);
814  
815  	if (file) {
816  		mapping = file->f_mapping;
817  		root = &mapping->i_mmap;
818  		uprobe_munmap(vma, vma->vm_start, vma->vm_end);
819  
820  		if (adjust_next)
821  			uprobe_munmap(next, next->vm_start, next->vm_end);
822  
823  		i_mmap_lock_write(mapping);
824  		if (insert) {
825  			/*
826  			 * Put into interval tree now, so instantiated pages
827  			 * are visible to arm/parisc __flush_dcache_page
828  			 * throughout; but we cannot insert into address
829  			 * space until vma start or end is updated.
830  			 */
831  			__vma_link_file(insert);
832  		}
833  	}
834  
835  	anon_vma = vma->anon_vma;
836  	if (!anon_vma && adjust_next)
837  		anon_vma = next->anon_vma;
838  	if (anon_vma) {
839  		VM_WARN_ON(adjust_next && next->anon_vma &&
840  			   anon_vma != next->anon_vma);
841  		anon_vma_lock_write(anon_vma);
842  		anon_vma_interval_tree_pre_update_vma(vma);
843  		if (adjust_next)
844  			anon_vma_interval_tree_pre_update_vma(next);
845  	}
846  
847  	if (root) {
848  		flush_dcache_mmap_lock(mapping);
849  		vma_interval_tree_remove(vma, root);
850  		if (adjust_next)
851  			vma_interval_tree_remove(next, root);
852  	}
853  
854  	if (start != vma->vm_start) {
855  		vma->vm_start = start;
856  		start_changed = true;
857  	}
858  	if (end != vma->vm_end) {
859  		vma->vm_end = end;
860  		end_changed = true;
861  	}
862  	vma->vm_pgoff = pgoff;
863  	if (adjust_next) {
864  		next->vm_start += adjust_next << PAGE_SHIFT;
865  		next->vm_pgoff += adjust_next;
866  	}
867  
868  	if (root) {
869  		if (adjust_next)
870  			vma_interval_tree_insert(next, root);
871  		vma_interval_tree_insert(vma, root);
872  		flush_dcache_mmap_unlock(mapping);
873  	}
874  
875  	if (remove_next) {
876  		/*
877  		 * vma_merge has merged next into vma, and needs
878  		 * us to remove next before dropping the locks.
879  		 */
880  		if (remove_next != 3)
881  			__vma_unlink_prev(mm, next, vma);
882  		else
883  			/*
884  			 * vma is not before next if they've been
885  			 * swapped.
886  			 *
887  			 * pre-swap() next->vm_start was reduced so
888  			 * tell validate_mm_rb to ignore pre-swap()
889  			 * "next" (which is stored in post-swap()
890  			 * "vma").
891  			 */
892  			__vma_unlink_common(mm, next, NULL, false, vma);
893  		if (file)
894  			__remove_shared_vm_struct(next, file, mapping);
895  	} else if (insert) {
896  		/*
897  		 * split_vma has split insert from vma, and needs
898  		 * us to insert it before dropping the locks
899  		 * (it may either follow vma or precede it).
900  		 */
901  		__insert_vm_struct(mm, insert);
902  	} else {
903  		if (start_changed)
904  			vma_gap_update(vma);
905  		if (end_changed) {
906  			if (!next)
907  				mm->highest_vm_end = vm_end_gap(vma);
908  			else if (!adjust_next)
909  				vma_gap_update(next);
910  		}
911  	}
912  
913  	if (anon_vma) {
914  		anon_vma_interval_tree_post_update_vma(vma);
915  		if (adjust_next)
916  			anon_vma_interval_tree_post_update_vma(next);
917  		anon_vma_unlock_write(anon_vma);
918  	}
919  	if (mapping)
920  		i_mmap_unlock_write(mapping);
921  
922  	if (root) {
923  		uprobe_mmap(vma);
924  
925  		if (adjust_next)
926  			uprobe_mmap(next);
927  	}
928  
929  	if (remove_next) {
930  		if (file) {
931  			uprobe_munmap(next, next->vm_start, next->vm_end);
932  			fput(file);
933  		}
934  		if (next->anon_vma)
935  			anon_vma_merge(vma, next);
936  		mm->map_count--;
937  		mpol_put(vma_policy(next));
938  		vm_area_free(next);
939  		/*
940  		 * In mprotect's case 6 (see comments on vma_merge),
941  		 * we must remove another next too. It would clutter
942  		 * up the code too much to do both in one go.
943  		 */
944  		if (remove_next != 3) {
945  			/*
946  			 * If "next" was removed and vma->vm_end was
947  			 * expanded (up) over it, in turn
948  			 * "next->vm_prev->vm_end" changed and the
949  			 * "vma->vm_next" gap must be updated.
950  			 */
951  			next = vma->vm_next;
952  		} else {
953  			/*
954  			 * For the scope of the comment "next" and
955  			 * "vma" considered pre-swap(): if "vma" was
956  			 * removed, next->vm_start was expanded (down)
957  			 * over it and the "next" gap must be updated.
958  			 * Because of the swap() the post-swap() "vma"
959  			 * actually points to pre-swap() "next"
960  			 * (post-swap() "next" as opposed is now a
961  			 * dangling pointer).
962  			 */
963  			next = vma;
964  		}
965  		if (remove_next == 2) {
966  			remove_next = 1;
967  			end = next->vm_end;
968  			goto again;
969  		}
970  		else if (next)
971  			vma_gap_update(next);
972  		else {
973  			/*
974  			 * If remove_next == 2 we obviously can't
975  			 * reach this path.
976  			 *
977  			 * If remove_next == 3 we can't reach this
978  			 * path because pre-swap() next is always not
979  			 * NULL. pre-swap() "next" is not being
980  			 * removed and its next->vm_end is not altered
981  			 * (and furthermore "end" already matches
982  			 * next->vm_end in remove_next == 3).
983  			 *
984  			 * We reach this only in the remove_next == 1
985  			 * case if the "next" vma that was removed was
986  			 * the highest vma of the mm. However in such
987  			 * case next->vm_end == "end" and the extended
988  			 * "vma" has vma->vm_end == next->vm_end so
989  			 * mm->highest_vm_end doesn't need any update
990  			 * in remove_next == 1 case.
991  			 */
992  			VM_WARN_ON(mm->highest_vm_end != vm_end_gap(vma));
993  		}
994  	}
995  	if (insert && file)
996  		uprobe_mmap(insert);
997  
998  	validate_mm(mm);
999  
1000  	return 0;
1001  }
1002  
1003  /*
1004   * If the vma has a ->close operation then the driver probably needs to release
1005   * per-vma resources, so we don't attempt to merge those.
1006   */
1007  static inline int is_mergeable_vma(struct vm_area_struct *vma,
1008  				struct file *file, unsigned long vm_flags,
1009  				struct vm_userfaultfd_ctx vm_userfaultfd_ctx)
1010  {
1011  	/*
1012  	 * VM_SOFTDIRTY should not prevent from VMA merging, if we
1013  	 * match the flags but dirty bit -- the caller should mark
1014  	 * merged VMA as dirty. If dirty bit won't be excluded from
1015  	 * comparison, we increase pressue on the memory system forcing
1016  	 * the kernel to generate new VMAs when old one could be
1017  	 * extended instead.
1018  	 */
1019  	if ((vma->vm_flags ^ vm_flags) & ~VM_SOFTDIRTY)
1020  		return 0;
1021  	if (vma->vm_file != file)
1022  		return 0;
1023  	if (vma->vm_ops && vma->vm_ops->close)
1024  		return 0;
1025  	if (!is_mergeable_vm_userfaultfd_ctx(vma, vm_userfaultfd_ctx))
1026  		return 0;
1027  	return 1;
1028  }
1029  
1030  static inline int is_mergeable_anon_vma(struct anon_vma *anon_vma1,
1031  					struct anon_vma *anon_vma2,
1032  					struct vm_area_struct *vma)
1033  {
1034  	/*
1035  	 * The list_is_singular() test is to avoid merging VMA cloned from
1036  	 * parents. This can improve scalability caused by anon_vma lock.
1037  	 */
1038  	if ((!anon_vma1 || !anon_vma2) && (!vma ||
1039  		list_is_singular(&vma->anon_vma_chain)))
1040  		return 1;
1041  	return anon_vma1 == anon_vma2;
1042  }
1043  
1044  /*
1045   * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
1046   * in front of (at a lower virtual address and file offset than) the vma.
1047   *
1048   * We cannot merge two vmas if they have differently assigned (non-NULL)
1049   * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
1050   *
1051   * We don't check here for the merged mmap wrapping around the end of pagecache
1052   * indices (16TB on ia32) because do_mmap_pgoff() does not permit mmap's which
1053   * wrap, nor mmaps which cover the final page at index -1UL.
1054   */
1055  static int
1056  can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
1057  		     struct anon_vma *anon_vma, struct file *file,
1058  		     pgoff_t vm_pgoff,
1059  		     struct vm_userfaultfd_ctx vm_userfaultfd_ctx)
1060  {
1061  	if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx) &&
1062  	    is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
1063  		if (vma->vm_pgoff == vm_pgoff)
1064  			return 1;
1065  	}
1066  	return 0;
1067  }
1068  
1069  /*
1070   * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
1071   * beyond (at a higher virtual address and file offset than) the vma.
1072   *
1073   * We cannot merge two vmas if they have differently assigned (non-NULL)
1074   * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
1075   */
1076  static int
1077  can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
1078  		    struct anon_vma *anon_vma, struct file *file,
1079  		    pgoff_t vm_pgoff,
1080  		    struct vm_userfaultfd_ctx vm_userfaultfd_ctx)
1081  {
1082  	if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx) &&
1083  	    is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
1084  		pgoff_t vm_pglen;
1085  		vm_pglen = vma_pages(vma);
1086  		if (vma->vm_pgoff + vm_pglen == vm_pgoff)
1087  			return 1;
1088  	}
1089  	return 0;
1090  }
1091  
1092  /*
1093   * Given a mapping request (addr,end,vm_flags,file,pgoff), figure out
1094   * whether that can be merged with its predecessor or its successor.
1095   * Or both (it neatly fills a hole).
1096   *
1097   * In most cases - when called for mmap, brk or mremap - [addr,end) is
1098   * certain not to be mapped by the time vma_merge is called; but when
1099   * called for mprotect, it is certain to be already mapped (either at
1100   * an offset within prev, or at the start of next), and the flags of
1101   * this area are about to be changed to vm_flags - and the no-change
1102   * case has already been eliminated.
1103   *
1104   * The following mprotect cases have to be considered, where AAAA is
1105   * the area passed down from mprotect_fixup, never extending beyond one
1106   * vma, PPPPPP is the prev vma specified, and NNNNNN the next vma after:
1107   *
1108   *     AAAA             AAAA                AAAA          AAAA
1109   *    PPPPPPNNNNNN    PPPPPPNNNNNN    PPPPPPNNNNNN    PPPPNNNNXXXX
1110   *    cannot merge    might become    might become    might become
1111   *                    PPNNNNNNNNNN    PPPPPPPPPPNN    PPPPPPPPPPPP 6 or
1112   *    mmap, brk or    case 4 below    case 5 below    PPPPPPPPXXXX 7 or
1113   *    mremap move:                                    PPPPXXXXXXXX 8
1114   *        AAAA
1115   *    PPPP    NNNN    PPPPPPPPPPPP    PPPPPPPPNNNN    PPPPNNNNNNNN
1116   *    might become    case 1 below    case 2 below    case 3 below
1117   *
1118   * It is important for case 8 that the the vma NNNN overlapping the
1119   * region AAAA is never going to extended over XXXX. Instead XXXX must
1120   * be extended in region AAAA and NNNN must be removed. This way in
1121   * all cases where vma_merge succeeds, the moment vma_adjust drops the
1122   * rmap_locks, the properties of the merged vma will be already
1123   * correct for the whole merged range. Some of those properties like
1124   * vm_page_prot/vm_flags may be accessed by rmap_walks and they must
1125   * be correct for the whole merged range immediately after the
1126   * rmap_locks are released. Otherwise if XXXX would be removed and
1127   * NNNN would be extended over the XXXX range, remove_migration_ptes
1128   * or other rmap walkers (if working on addresses beyond the "end"
1129   * parameter) may establish ptes with the wrong permissions of NNNN
1130   * instead of the right permissions of XXXX.
1131   */
1132  struct vm_area_struct *vma_merge(struct mm_struct *mm,
1133  			struct vm_area_struct *prev, unsigned long addr,
1134  			unsigned long end, unsigned long vm_flags,
1135  			struct anon_vma *anon_vma, struct file *file,
1136  			pgoff_t pgoff, struct mempolicy *policy,
1137  			struct vm_userfaultfd_ctx vm_userfaultfd_ctx)
1138  {
1139  	pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
1140  	struct vm_area_struct *area, *next;
1141  	int err;
1142  
1143  	/*
1144  	 * We later require that vma->vm_flags == vm_flags,
1145  	 * so this tests vma->vm_flags & VM_SPECIAL, too.
1146  	 */
1147  	if (vm_flags & VM_SPECIAL)
1148  		return NULL;
1149  
1150  	if (prev)
1151  		next = prev->vm_next;
1152  	else
1153  		next = mm->mmap;
1154  	area = next;
1155  	if (area && area->vm_end == end)		/* cases 6, 7, 8 */
1156  		next = next->vm_next;
1157  
1158  	/* verify some invariant that must be enforced by the caller */
1159  	VM_WARN_ON(prev && addr <= prev->vm_start);
1160  	VM_WARN_ON(area && end > area->vm_end);
1161  	VM_WARN_ON(addr >= end);
1162  
1163  	/*
1164  	 * Can it merge with the predecessor?
1165  	 */
1166  	if (prev && prev->vm_end == addr &&
1167  			mpol_equal(vma_policy(prev), policy) &&
1168  			can_vma_merge_after(prev, vm_flags,
1169  					    anon_vma, file, pgoff,
1170  					    vm_userfaultfd_ctx)) {
1171  		/*
1172  		 * OK, it can.  Can we now merge in the successor as well?
1173  		 */
1174  		if (next && end == next->vm_start &&
1175  				mpol_equal(policy, vma_policy(next)) &&
1176  				can_vma_merge_before(next, vm_flags,
1177  						     anon_vma, file,
1178  						     pgoff+pglen,
1179  						     vm_userfaultfd_ctx) &&
1180  				is_mergeable_anon_vma(prev->anon_vma,
1181  						      next->anon_vma, NULL)) {
1182  							/* cases 1, 6 */
1183  			err = __vma_adjust(prev, prev->vm_start,
1184  					 next->vm_end, prev->vm_pgoff, NULL,
1185  					 prev);
1186  		} else					/* cases 2, 5, 7 */
1187  			err = __vma_adjust(prev, prev->vm_start,
1188  					 end, prev->vm_pgoff, NULL, prev);
1189  		if (err)
1190  			return NULL;
1191  		khugepaged_enter_vma_merge(prev, vm_flags);
1192  		return prev;
1193  	}
1194  
1195  	/*
1196  	 * Can this new request be merged in front of next?
1197  	 */
1198  	if (next && end == next->vm_start &&
1199  			mpol_equal(policy, vma_policy(next)) &&
1200  			can_vma_merge_before(next, vm_flags,
1201  					     anon_vma, file, pgoff+pglen,
1202  					     vm_userfaultfd_ctx)) {
1203  		if (prev && addr < prev->vm_end)	/* case 4 */
1204  			err = __vma_adjust(prev, prev->vm_start,
1205  					 addr, prev->vm_pgoff, NULL, next);
1206  		else {					/* cases 3, 8 */
1207  			err = __vma_adjust(area, addr, next->vm_end,
1208  					 next->vm_pgoff - pglen, NULL, next);
1209  			/*
1210  			 * In case 3 area is already equal to next and
1211  			 * this is a noop, but in case 8 "area" has
1212  			 * been removed and next was expanded over it.
1213  			 */
1214  			area = next;
1215  		}
1216  		if (err)
1217  			return NULL;
1218  		khugepaged_enter_vma_merge(area, vm_flags);
1219  		return area;
1220  	}
1221  
1222  	return NULL;
1223  }
1224  
1225  /*
1226   * Rough compatbility check to quickly see if it's even worth looking
1227   * at sharing an anon_vma.
1228   *
1229   * They need to have the same vm_file, and the flags can only differ
1230   * in things that mprotect may change.
1231   *
1232   * NOTE! The fact that we share an anon_vma doesn't _have_ to mean that
1233   * we can merge the two vma's. For example, we refuse to merge a vma if
1234   * there is a vm_ops->close() function, because that indicates that the
1235   * driver is doing some kind of reference counting. But that doesn't
1236   * really matter for the anon_vma sharing case.
1237   */
1238  static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *b)
1239  {
1240  	return a->vm_end == b->vm_start &&
1241  		mpol_equal(vma_policy(a), vma_policy(b)) &&
1242  		a->vm_file == b->vm_file &&
1243  		!((a->vm_flags ^ b->vm_flags) & ~(VM_READ|VM_WRITE|VM_EXEC|VM_SOFTDIRTY)) &&
1244  		b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT);
1245  }
1246  
1247  /*
1248   * Do some basic sanity checking to see if we can re-use the anon_vma
1249   * from 'old'. The 'a'/'b' vma's are in VM order - one of them will be
1250   * the same as 'old', the other will be the new one that is trying
1251   * to share the anon_vma.
1252   *
1253   * NOTE! This runs with mm_sem held for reading, so it is possible that
1254   * the anon_vma of 'old' is concurrently in the process of being set up
1255   * by another page fault trying to merge _that_. But that's ok: if it
1256   * is being set up, that automatically means that it will be a singleton
1257   * acceptable for merging, so we can do all of this optimistically. But
1258   * we do that READ_ONCE() to make sure that we never re-load the pointer.
1259   *
1260   * IOW: that the "list_is_singular()" test on the anon_vma_chain only
1261   * matters for the 'stable anon_vma' case (ie the thing we want to avoid
1262   * is to return an anon_vma that is "complex" due to having gone through
1263   * a fork).
1264   *
1265   * We also make sure that the two vma's are compatible (adjacent,
1266   * and with the same memory policies). That's all stable, even with just
1267   * a read lock on the mm_sem.
1268   */
1269  static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old, struct vm_area_struct *a, struct vm_area_struct *b)
1270  {
1271  	if (anon_vma_compatible(a, b)) {
1272  		struct anon_vma *anon_vma = READ_ONCE(old->anon_vma);
1273  
1274  		if (anon_vma && list_is_singular(&old->anon_vma_chain))
1275  			return anon_vma;
1276  	}
1277  	return NULL;
1278  }
1279  
1280  /*
1281   * find_mergeable_anon_vma is used by anon_vma_prepare, to check
1282   * neighbouring vmas for a suitable anon_vma, before it goes off
1283   * to allocate a new anon_vma.  It checks because a repetitive
1284   * sequence of mprotects and faults may otherwise lead to distinct
1285   * anon_vmas being allocated, preventing vma merge in subsequent
1286   * mprotect.
1287   */
1288  struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma)
1289  {
1290  	struct anon_vma *anon_vma;
1291  	struct vm_area_struct *near;
1292  
1293  	near = vma->vm_next;
1294  	if (!near)
1295  		goto try_prev;
1296  
1297  	anon_vma = reusable_anon_vma(near, vma, near);
1298  	if (anon_vma)
1299  		return anon_vma;
1300  try_prev:
1301  	near = vma->vm_prev;
1302  	if (!near)
1303  		goto none;
1304  
1305  	anon_vma = reusable_anon_vma(near, near, vma);
1306  	if (anon_vma)
1307  		return anon_vma;
1308  none:
1309  	/*
1310  	 * There's no absolute need to look only at touching neighbours:
1311  	 * we could search further afield for "compatible" anon_vmas.
1312  	 * But it would probably just be a waste of time searching,
1313  	 * or lead to too many vmas hanging off the same anon_vma.
1314  	 * We're trying to allow mprotect remerging later on,
1315  	 * not trying to minimize memory used for anon_vmas.
1316  	 */
1317  	return NULL;
1318  }
1319  
1320  /*
1321   * If a hint addr is less than mmap_min_addr change hint to be as
1322   * low as possible but still greater than mmap_min_addr
1323   */
1324  static inline unsigned long round_hint_to_min(unsigned long hint)
1325  {
1326  	hint &= PAGE_MASK;
1327  	if (((void *)hint != NULL) &&
1328  	    (hint < mmap_min_addr))
1329  		return PAGE_ALIGN(mmap_min_addr);
1330  	return hint;
1331  }
1332  
1333  static inline int mlock_future_check(struct mm_struct *mm,
1334  				     unsigned long flags,
1335  				     unsigned long len)
1336  {
1337  	unsigned long locked, lock_limit;
1338  
1339  	/*  mlock MCL_FUTURE? */
1340  	if (flags & VM_LOCKED) {
1341  		locked = len >> PAGE_SHIFT;
1342  		locked += mm->locked_vm;
1343  		lock_limit = rlimit(RLIMIT_MEMLOCK);
1344  		lock_limit >>= PAGE_SHIFT;
1345  		if (locked > lock_limit && !capable(CAP_IPC_LOCK))
1346  			return -EAGAIN;
1347  	}
1348  	return 0;
1349  }
1350  
1351  static inline u64 file_mmap_size_max(struct file *file, struct inode *inode)
1352  {
1353  	if (S_ISREG(inode->i_mode))
1354  		return MAX_LFS_FILESIZE;
1355  
1356  	if (S_ISBLK(inode->i_mode))
1357  		return MAX_LFS_FILESIZE;
1358  
1359  	/* Special "we do even unsigned file positions" case */
1360  	if (file->f_mode & FMODE_UNSIGNED_OFFSET)
1361  		return 0;
1362  
1363  	/* Yes, random drivers might want more. But I'm tired of buggy drivers */
1364  	return ULONG_MAX;
1365  }
1366  
1367  static inline bool file_mmap_ok(struct file *file, struct inode *inode,
1368  				unsigned long pgoff, unsigned long len)
1369  {
1370  	u64 maxsize = file_mmap_size_max(file, inode);
1371  
1372  	if (maxsize && len > maxsize)
1373  		return false;
1374  	maxsize -= len;
1375  	if (pgoff > maxsize >> PAGE_SHIFT)
1376  		return false;
1377  	return true;
1378  }
1379  
1380  /*
1381   * The caller must hold down_write(&current->mm->mmap_sem).
1382   */
1383  unsigned long do_mmap(struct file *file, unsigned long addr,
1384  			unsigned long len, unsigned long prot,
1385  			unsigned long flags, vm_flags_t vm_flags,
1386  			unsigned long pgoff, unsigned long *populate,
1387  			struct list_head *uf)
1388  {
1389  	struct mm_struct *mm = current->mm;
1390  	int pkey = 0;
1391  
1392  	*populate = 0;
1393  
1394  	if (!len)
1395  		return -EINVAL;
1396  
1397  	/*
1398  	 * Does the application expect PROT_READ to imply PROT_EXEC?
1399  	 *
1400  	 * (the exception is when the underlying filesystem is noexec
1401  	 *  mounted, in which case we dont add PROT_EXEC.)
1402  	 */
1403  	if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
1404  		if (!(file && path_noexec(&file->f_path)))
1405  			prot |= PROT_EXEC;
1406  
1407  	/* force arch specific MAP_FIXED handling in get_unmapped_area */
1408  	if (flags & MAP_FIXED_NOREPLACE)
1409  		flags |= MAP_FIXED;
1410  
1411  	if (!(flags & MAP_FIXED))
1412  		addr = round_hint_to_min(addr);
1413  
1414  	/* Careful about overflows.. */
1415  	len = PAGE_ALIGN(len);
1416  	if (!len)
1417  		return -ENOMEM;
1418  
1419  	/* offset overflow? */
1420  	if ((pgoff + (len >> PAGE_SHIFT)) < pgoff)
1421  		return -EOVERFLOW;
1422  
1423  	/* Too many mappings? */
1424  	if (mm->map_count > sysctl_max_map_count)
1425  		return -ENOMEM;
1426  
1427  	/* Obtain the address to map to. we verify (or select) it and ensure
1428  	 * that it represents a valid section of the address space.
1429  	 */
1430  	addr = get_unmapped_area(file, addr, len, pgoff, flags);
1431  	if (offset_in_page(addr))
1432  		return addr;
1433  
1434  	if (flags & MAP_FIXED_NOREPLACE) {
1435  		struct vm_area_struct *vma = find_vma(mm, addr);
1436  
1437  		if (vma && vma->vm_start < addr + len)
1438  			return -EEXIST;
1439  	}
1440  
1441  	if (prot == PROT_EXEC) {
1442  		pkey = execute_only_pkey(mm);
1443  		if (pkey < 0)
1444  			pkey = 0;
1445  	}
1446  
1447  	/* Do simple checking here so the lower-level routines won't have
1448  	 * to. we assume access permissions have been handled by the open
1449  	 * of the memory object, so we don't do any here.
1450  	 */
1451  	vm_flags |= calc_vm_prot_bits(prot, pkey) | calc_vm_flag_bits(flags) |
1452  			mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
1453  
1454  	if (flags & MAP_LOCKED)
1455  		if (!can_do_mlock())
1456  			return -EPERM;
1457  
1458  	if (mlock_future_check(mm, vm_flags, len))
1459  		return -EAGAIN;
1460  
1461  	if (file) {
1462  		struct inode *inode = file_inode(file);
1463  		unsigned long flags_mask;
1464  
1465  		if (!file_mmap_ok(file, inode, pgoff, len))
1466  			return -EOVERFLOW;
1467  
1468  		flags_mask = LEGACY_MAP_MASK | file->f_op->mmap_supported_flags;
1469  
1470  		switch (flags & MAP_TYPE) {
1471  		case MAP_SHARED:
1472  			/*
1473  			 * Force use of MAP_SHARED_VALIDATE with non-legacy
1474  			 * flags. E.g. MAP_SYNC is dangerous to use with
1475  			 * MAP_SHARED as you don't know which consistency model
1476  			 * you will get. We silently ignore unsupported flags
1477  			 * with MAP_SHARED to preserve backward compatibility.
1478  			 */
1479  			flags &= LEGACY_MAP_MASK;
1480  			/* fall through */
1481  		case MAP_SHARED_VALIDATE:
1482  			if (flags & ~flags_mask)
1483  				return -EOPNOTSUPP;
1484  			if ((prot&PROT_WRITE) && !(file->f_mode&FMODE_WRITE))
1485  				return -EACCES;
1486  
1487  			/*
1488  			 * Make sure we don't allow writing to an append-only
1489  			 * file..
1490  			 */
1491  			if (IS_APPEND(inode) && (file->f_mode & FMODE_WRITE))
1492  				return -EACCES;
1493  
1494  			/*
1495  			 * Make sure there are no mandatory locks on the file.
1496  			 */
1497  			if (locks_verify_locked(file))
1498  				return -EAGAIN;
1499  
1500  			vm_flags |= VM_SHARED | VM_MAYSHARE;
1501  			if (!(file->f_mode & FMODE_WRITE))
1502  				vm_flags &= ~(VM_MAYWRITE | VM_SHARED);
1503  
1504  			/* fall through */
1505  		case MAP_PRIVATE:
1506  			if (!(file->f_mode & FMODE_READ))
1507  				return -EACCES;
1508  			if (path_noexec(&file->f_path)) {
1509  				if (vm_flags & VM_EXEC)
1510  					return -EPERM;
1511  				vm_flags &= ~VM_MAYEXEC;
1512  			}
1513  
1514  			if (!file->f_op->mmap)
1515  				return -ENODEV;
1516  			if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
1517  				return -EINVAL;
1518  			break;
1519  
1520  		default:
1521  			return -EINVAL;
1522  		}
1523  	} else {
1524  		switch (flags & MAP_TYPE) {
1525  		case MAP_SHARED:
1526  			if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
1527  				return -EINVAL;
1528  			/*
1529  			 * Ignore pgoff.
1530  			 */
1531  			pgoff = 0;
1532  			vm_flags |= VM_SHARED | VM_MAYSHARE;
1533  			break;
1534  		case MAP_PRIVATE:
1535  			/*
1536  			 * Set pgoff according to addr for anon_vma.
1537  			 */
1538  			pgoff = addr >> PAGE_SHIFT;
1539  			break;
1540  		default:
1541  			return -EINVAL;
1542  		}
1543  	}
1544  
1545  	/*
1546  	 * Set 'VM_NORESERVE' if we should not account for the
1547  	 * memory use of this mapping.
1548  	 */
1549  	if (flags & MAP_NORESERVE) {
1550  		/* We honor MAP_NORESERVE if allowed to overcommit */
1551  		if (sysctl_overcommit_memory != OVERCOMMIT_NEVER)
1552  			vm_flags |= VM_NORESERVE;
1553  
1554  		/* hugetlb applies strict overcommit unless MAP_NORESERVE */
1555  		if (file && is_file_hugepages(file))
1556  			vm_flags |= VM_NORESERVE;
1557  	}
1558  
1559  	addr = mmap_region(file, addr, len, vm_flags, pgoff, uf);
1560  	if (!IS_ERR_VALUE(addr) &&
1561  	    ((vm_flags & VM_LOCKED) ||
1562  	     (flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE))
1563  		*populate = len;
1564  	return addr;
1565  }
1566  
1567  unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len,
1568  			      unsigned long prot, unsigned long flags,
1569  			      unsigned long fd, unsigned long pgoff)
1570  {
1571  	struct file *file = NULL;
1572  	unsigned long retval;
1573  
1574  	if (!(flags & MAP_ANONYMOUS)) {
1575  		audit_mmap_fd(fd, flags);
1576  		file = fget(fd);
1577  		if (!file)
1578  			return -EBADF;
1579  		if (is_file_hugepages(file))
1580  			len = ALIGN(len, huge_page_size(hstate_file(file)));
1581  		retval = -EINVAL;
1582  		if (unlikely(flags & MAP_HUGETLB && !is_file_hugepages(file)))
1583  			goto out_fput;
1584  	} else if (flags & MAP_HUGETLB) {
1585  		struct user_struct *user = NULL;
1586  		struct hstate *hs;
1587  
1588  		hs = hstate_sizelog((flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK);
1589  		if (!hs)
1590  			return -EINVAL;
1591  
1592  		len = ALIGN(len, huge_page_size(hs));
1593  		/*
1594  		 * VM_NORESERVE is used because the reservations will be
1595  		 * taken when vm_ops->mmap() is called
1596  		 * A dummy user value is used because we are not locking
1597  		 * memory so no accounting is necessary
1598  		 */
1599  		file = hugetlb_file_setup(HUGETLB_ANON_FILE, len,
1600  				VM_NORESERVE,
1601  				&user, HUGETLB_ANONHUGE_INODE,
1602  				(flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK);
1603  		if (IS_ERR(file))
1604  			return PTR_ERR(file);
1605  	}
1606  
1607  	flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
1608  
1609  	retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff);
1610  out_fput:
1611  	if (file)
1612  		fput(file);
1613  	return retval;
1614  }
1615  
1616  SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
1617  		unsigned long, prot, unsigned long, flags,
1618  		unsigned long, fd, unsigned long, pgoff)
1619  {
1620  	return ksys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
1621  }
1622  
1623  #ifdef __ARCH_WANT_SYS_OLD_MMAP
1624  struct mmap_arg_struct {
1625  	unsigned long addr;
1626  	unsigned long len;
1627  	unsigned long prot;
1628  	unsigned long flags;
1629  	unsigned long fd;
1630  	unsigned long offset;
1631  };
1632  
1633  SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
1634  {
1635  	struct mmap_arg_struct a;
1636  
1637  	if (copy_from_user(&a, arg, sizeof(a)))
1638  		return -EFAULT;
1639  	if (offset_in_page(a.offset))
1640  		return -EINVAL;
1641  
1642  	return ksys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
1643  			       a.offset >> PAGE_SHIFT);
1644  }
1645  #endif /* __ARCH_WANT_SYS_OLD_MMAP */
1646  
1647  /*
1648   * Some shared mappigns will want the pages marked read-only
1649   * to track write events. If so, we'll downgrade vm_page_prot
1650   * to the private version (using protection_map[] without the
1651   * VM_SHARED bit).
1652   */
1653  int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot)
1654  {
1655  	vm_flags_t vm_flags = vma->vm_flags;
1656  	const struct vm_operations_struct *vm_ops = vma->vm_ops;
1657  
1658  	/* If it was private or non-writable, the write bit is already clear */
1659  	if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
1660  		return 0;
1661  
1662  	/* The backer wishes to know when pages are first written to? */
1663  	if (vm_ops && (vm_ops->page_mkwrite || vm_ops->pfn_mkwrite))
1664  		return 1;
1665  
1666  	/* The open routine did something to the protections that pgprot_modify
1667  	 * won't preserve? */
1668  	if (pgprot_val(vm_page_prot) !=
1669  	    pgprot_val(vm_pgprot_modify(vm_page_prot, vm_flags)))
1670  		return 0;
1671  
1672  	/* Do we need to track softdirty? */
1673  	if (IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) && !(vm_flags & VM_SOFTDIRTY))
1674  		return 1;
1675  
1676  	/* Specialty mapping? */
1677  	if (vm_flags & VM_PFNMAP)
1678  		return 0;
1679  
1680  	/* Can the mapping track the dirty pages? */
1681  	return vma->vm_file && vma->vm_file->f_mapping &&
1682  		mapping_cap_account_dirty(vma->vm_file->f_mapping);
1683  }
1684  
1685  /*
1686   * We account for memory if it's a private writeable mapping,
1687   * not hugepages and VM_NORESERVE wasn't set.
1688   */
1689  static inline int accountable_mapping(struct file *file, vm_flags_t vm_flags)
1690  {
1691  	/*
1692  	 * hugetlb has its own accounting separate from the core VM
1693  	 * VM_HUGETLB may not be set yet so we cannot check for that flag.
1694  	 */
1695  	if (file && is_file_hugepages(file))
1696  		return 0;
1697  
1698  	return (vm_flags & (VM_NORESERVE | VM_SHARED | VM_WRITE)) == VM_WRITE;
1699  }
1700  
1701  unsigned long mmap_region(struct file *file, unsigned long addr,
1702  		unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
1703  		struct list_head *uf)
1704  {
1705  	struct mm_struct *mm = current->mm;
1706  	struct vm_area_struct *vma, *prev;
1707  	int error;
1708  	struct rb_node **rb_link, *rb_parent;
1709  	unsigned long charged = 0;
1710  
1711  	/* Check against address space limit. */
1712  	if (!may_expand_vm(mm, vm_flags, len >> PAGE_SHIFT)) {
1713  		unsigned long nr_pages;
1714  
1715  		/*
1716  		 * MAP_FIXED may remove pages of mappings that intersects with
1717  		 * requested mapping. Account for the pages it would unmap.
1718  		 */
1719  		nr_pages = count_vma_pages_range(mm, addr, addr + len);
1720  
1721  		if (!may_expand_vm(mm, vm_flags,
1722  					(len >> PAGE_SHIFT) - nr_pages))
1723  			return -ENOMEM;
1724  	}
1725  
1726  	/* Clear old maps */
1727  	while (find_vma_links(mm, addr, addr + len, &prev, &rb_link,
1728  			      &rb_parent)) {
1729  		if (do_munmap(mm, addr, len, uf))
1730  			return -ENOMEM;
1731  	}
1732  
1733  	/*
1734  	 * Private writable mapping: check memory availability
1735  	 */
1736  	if (accountable_mapping(file, vm_flags)) {
1737  		charged = len >> PAGE_SHIFT;
1738  		if (security_vm_enough_memory_mm(mm, charged))
1739  			return -ENOMEM;
1740  		vm_flags |= VM_ACCOUNT;
1741  	}
1742  
1743  	/*
1744  	 * Can we just expand an old mapping?
1745  	 */
1746  	vma = vma_merge(mm, prev, addr, addr + len, vm_flags,
1747  			NULL, file, pgoff, NULL, NULL_VM_UFFD_CTX);
1748  	if (vma)
1749  		goto out;
1750  
1751  	/*
1752  	 * Determine the object being mapped and call the appropriate
1753  	 * specific mapper. the address has already been validated, but
1754  	 * not unmapped, but the maps are removed from the list.
1755  	 */
1756  	vma = vm_area_alloc(mm);
1757  	if (!vma) {
1758  		error = -ENOMEM;
1759  		goto unacct_error;
1760  	}
1761  
1762  	vma->vm_start = addr;
1763  	vma->vm_end = addr + len;
1764  	vma->vm_flags = vm_flags;
1765  	vma->vm_page_prot = vm_get_page_prot(vm_flags);
1766  	vma->vm_pgoff = pgoff;
1767  
1768  	if (file) {
1769  		if (vm_flags & VM_DENYWRITE) {
1770  			error = deny_write_access(file);
1771  			if (error)
1772  				goto free_vma;
1773  		}
1774  		if (vm_flags & VM_SHARED) {
1775  			error = mapping_map_writable(file->f_mapping);
1776  			if (error)
1777  				goto allow_write_and_free_vma;
1778  		}
1779  
1780  		/* ->mmap() can change vma->vm_file, but must guarantee that
1781  		 * vma_link() below can deny write-access if VM_DENYWRITE is set
1782  		 * and map writably if VM_SHARED is set. This usually means the
1783  		 * new file must not have been exposed to user-space, yet.
1784  		 */
1785  		vma->vm_file = get_file(file);
1786  		error = call_mmap(file, vma);
1787  		if (error)
1788  			goto unmap_and_free_vma;
1789  
1790  		/* Can addr have changed??
1791  		 *
1792  		 * Answer: Yes, several device drivers can do it in their
1793  		 *         f_op->mmap method. -DaveM
1794  		 * Bug: If addr is changed, prev, rb_link, rb_parent should
1795  		 *      be updated for vma_link()
1796  		 */
1797  		WARN_ON_ONCE(addr != vma->vm_start);
1798  
1799  		addr = vma->vm_start;
1800  		vm_flags = vma->vm_flags;
1801  	} else if (vm_flags & VM_SHARED) {
1802  		error = shmem_zero_setup(vma);
1803  		if (error)
1804  			goto free_vma;
1805  	} else {
1806  		vma_set_anonymous(vma);
1807  	}
1808  
1809  	vma_link(mm, vma, prev, rb_link, rb_parent);
1810  	/* Once vma denies write, undo our temporary denial count */
1811  	if (file) {
1812  		if (vm_flags & VM_SHARED)
1813  			mapping_unmap_writable(file->f_mapping);
1814  		if (vm_flags & VM_DENYWRITE)
1815  			allow_write_access(file);
1816  	}
1817  	file = vma->vm_file;
1818  out:
1819  	perf_event_mmap(vma);
1820  
1821  	vm_stat_account(mm, vm_flags, len >> PAGE_SHIFT);
1822  	if (vm_flags & VM_LOCKED) {
1823  		if ((vm_flags & VM_SPECIAL) || vma_is_dax(vma) ||
1824  					is_vm_hugetlb_page(vma) ||
1825  					vma == get_gate_vma(current->mm))
1826  			vma->vm_flags &= VM_LOCKED_CLEAR_MASK;
1827  		else
1828  			mm->locked_vm += (len >> PAGE_SHIFT);
1829  	}
1830  
1831  	if (file)
1832  		uprobe_mmap(vma);
1833  
1834  	/*
1835  	 * New (or expanded) vma always get soft dirty status.
1836  	 * Otherwise user-space soft-dirty page tracker won't
1837  	 * be able to distinguish situation when vma area unmapped,
1838  	 * then new mapped in-place (which must be aimed as
1839  	 * a completely new data area).
1840  	 */
1841  	vma->vm_flags |= VM_SOFTDIRTY;
1842  
1843  	vma_set_page_prot(vma);
1844  
1845  	return addr;
1846  
1847  unmap_and_free_vma:
1848  	vma->vm_file = NULL;
1849  	fput(file);
1850  
1851  	/* Undo any partial mapping done by a device driver. */
1852  	unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
1853  	charged = 0;
1854  	if (vm_flags & VM_SHARED)
1855  		mapping_unmap_writable(file->f_mapping);
1856  allow_write_and_free_vma:
1857  	if (vm_flags & VM_DENYWRITE)
1858  		allow_write_access(file);
1859  free_vma:
1860  	vm_area_free(vma);
1861  unacct_error:
1862  	if (charged)
1863  		vm_unacct_memory(charged);
1864  	return error;
1865  }
1866  
1867  unsigned long unmapped_area(struct vm_unmapped_area_info *info)
1868  {
1869  	/*
1870  	 * We implement the search by looking for an rbtree node that
1871  	 * immediately follows a suitable gap. That is,
1872  	 * - gap_start = vma->vm_prev->vm_end <= info->high_limit - length;
1873  	 * - gap_end   = vma->vm_start        >= info->low_limit  + length;
1874  	 * - gap_end - gap_start >= length
1875  	 */
1876  
1877  	struct mm_struct *mm = current->mm;
1878  	struct vm_area_struct *vma;
1879  	unsigned long length, low_limit, high_limit, gap_start, gap_end;
1880  
1881  	/* Adjust search length to account for worst case alignment overhead */
1882  	length = info->length + info->align_mask;
1883  	if (length < info->length)
1884  		return -ENOMEM;
1885  
1886  	/* Adjust search limits by the desired length */
1887  	if (info->high_limit < length)
1888  		return -ENOMEM;
1889  	high_limit = info->high_limit - length;
1890  
1891  	if (info->low_limit > high_limit)
1892  		return -ENOMEM;
1893  	low_limit = info->low_limit + length;
1894  
1895  	/* Check if rbtree root looks promising */
1896  	if (RB_EMPTY_ROOT(&mm->mm_rb))
1897  		goto check_highest;
1898  	vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb);
1899  	if (vma->rb_subtree_gap < length)
1900  		goto check_highest;
1901  
1902  	while (true) {
1903  		/* Visit left subtree if it looks promising */
1904  		gap_end = vm_start_gap(vma);
1905  		if (gap_end >= low_limit && vma->vm_rb.rb_left) {
1906  			struct vm_area_struct *left =
1907  				rb_entry(vma->vm_rb.rb_left,
1908  					 struct vm_area_struct, vm_rb);
1909  			if (left->rb_subtree_gap >= length) {
1910  				vma = left;
1911  				continue;
1912  			}
1913  		}
1914  
1915  		gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0;
1916  check_current:
1917  		/* Check if current node has a suitable gap */
1918  		if (gap_start > high_limit)
1919  			return -ENOMEM;
1920  		if (gap_end >= low_limit &&
1921  		    gap_end > gap_start && gap_end - gap_start >= length)
1922  			goto found;
1923  
1924  		/* Visit right subtree if it looks promising */
1925  		if (vma->vm_rb.rb_right) {
1926  			struct vm_area_struct *right =
1927  				rb_entry(vma->vm_rb.rb_right,
1928  					 struct vm_area_struct, vm_rb);
1929  			if (right->rb_subtree_gap >= length) {
1930  				vma = right;
1931  				continue;
1932  			}
1933  		}
1934  
1935  		/* Go back up the rbtree to find next candidate node */
1936  		while (true) {
1937  			struct rb_node *prev = &vma->vm_rb;
1938  			if (!rb_parent(prev))
1939  				goto check_highest;
1940  			vma = rb_entry(rb_parent(prev),
1941  				       struct vm_area_struct, vm_rb);
1942  			if (prev == vma->vm_rb.rb_left) {
1943  				gap_start = vm_end_gap(vma->vm_prev);
1944  				gap_end = vm_start_gap(vma);
1945  				goto check_current;
1946  			}
1947  		}
1948  	}
1949  
1950  check_highest:
1951  	/* Check highest gap, which does not precede any rbtree node */
1952  	gap_start = mm->highest_vm_end;
1953  	gap_end = ULONG_MAX;  /* Only for VM_BUG_ON below */
1954  	if (gap_start > high_limit)
1955  		return -ENOMEM;
1956  
1957  found:
1958  	/* We found a suitable gap. Clip it with the original low_limit. */
1959  	if (gap_start < info->low_limit)
1960  		gap_start = info->low_limit;
1961  
1962  	/* Adjust gap address to the desired alignment */
1963  	gap_start += (info->align_offset - gap_start) & info->align_mask;
1964  
1965  	VM_BUG_ON(gap_start + info->length > info->high_limit);
1966  	VM_BUG_ON(gap_start + info->length > gap_end);
1967  	return gap_start;
1968  }
1969  
1970  unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
1971  {
1972  	struct mm_struct *mm = current->mm;
1973  	struct vm_area_struct *vma;
1974  	unsigned long length, low_limit, high_limit, gap_start, gap_end;
1975  
1976  	/* Adjust search length to account for worst case alignment overhead */
1977  	length = info->length + info->align_mask;
1978  	if (length < info->length)
1979  		return -ENOMEM;
1980  
1981  	/*
1982  	 * Adjust search limits by the desired length.
1983  	 * See implementation comment at top of unmapped_area().
1984  	 */
1985  	gap_end = info->high_limit;
1986  	if (gap_end < length)
1987  		return -ENOMEM;
1988  	high_limit = gap_end - length;
1989  
1990  	if (info->low_limit > high_limit)
1991  		return -ENOMEM;
1992  	low_limit = info->low_limit + length;
1993  
1994  	/* Check highest gap, which does not precede any rbtree node */
1995  	gap_start = mm->highest_vm_end;
1996  	if (gap_start <= high_limit)
1997  		goto found_highest;
1998  
1999  	/* Check if rbtree root looks promising */
2000  	if (RB_EMPTY_ROOT(&mm->mm_rb))
2001  		return -ENOMEM;
2002  	vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb);
2003  	if (vma->rb_subtree_gap < length)
2004  		return -ENOMEM;
2005  
2006  	while (true) {
2007  		/* Visit right subtree if it looks promising */
2008  		gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0;
2009  		if (gap_start <= high_limit && vma->vm_rb.rb_right) {
2010  			struct vm_area_struct *right =
2011  				rb_entry(vma->vm_rb.rb_right,
2012  					 struct vm_area_struct, vm_rb);
2013  			if (right->rb_subtree_gap >= length) {
2014  				vma = right;
2015  				continue;
2016  			}
2017  		}
2018  
2019  check_current:
2020  		/* Check if current node has a suitable gap */
2021  		gap_end = vm_start_gap(vma);
2022  		if (gap_end < low_limit)
2023  			return -ENOMEM;
2024  		if (gap_start <= high_limit &&
2025  		    gap_end > gap_start && gap_end - gap_start >= length)
2026  			goto found;
2027  
2028  		/* Visit left subtree if it looks promising */
2029  		if (vma->vm_rb.rb_left) {
2030  			struct vm_area_struct *left =
2031  				rb_entry(vma->vm_rb.rb_left,
2032  					 struct vm_area_struct, vm_rb);
2033  			if (left->rb_subtree_gap >= length) {
2034  				vma = left;
2035  				continue;
2036  			}
2037  		}
2038  
2039  		/* Go back up the rbtree to find next candidate node */
2040  		while (true) {
2041  			struct rb_node *prev = &vma->vm_rb;
2042  			if (!rb_parent(prev))
2043  				return -ENOMEM;
2044  			vma = rb_entry(rb_parent(prev),
2045  				       struct vm_area_struct, vm_rb);
2046  			if (prev == vma->vm_rb.rb_right) {
2047  				gap_start = vma->vm_prev ?
2048  					vm_end_gap(vma->vm_prev) : 0;
2049  				goto check_current;
2050  			}
2051  		}
2052  	}
2053  
2054  found:
2055  	/* We found a suitable gap. Clip it with the original high_limit. */
2056  	if (gap_end > info->high_limit)
2057  		gap_end = info->high_limit;
2058  
2059  found_highest:
2060  	/* Compute highest gap address at the desired alignment */
2061  	gap_end -= info->length;
2062  	gap_end -= (gap_end - info->align_offset) & info->align_mask;
2063  
2064  	VM_BUG_ON(gap_end < info->low_limit);
2065  	VM_BUG_ON(gap_end < gap_start);
2066  	return gap_end;
2067  }
2068  
2069  
2070  #ifndef arch_get_mmap_end
2071  #define arch_get_mmap_end(addr)	(TASK_SIZE)
2072  #endif
2073  
2074  #ifndef arch_get_mmap_base
2075  #define arch_get_mmap_base(addr, base) (base)
2076  #endif
2077  
2078  /* Get an address range which is currently unmapped.
2079   * For shmat() with addr=0.
2080   *
2081   * Ugly calling convention alert:
2082   * Return value with the low bits set means error value,
2083   * ie
2084   *	if (ret & ~PAGE_MASK)
2085   *		error = ret;
2086   *
2087   * This function "knows" that -ENOMEM has the bits set.
2088   */
2089  #ifndef HAVE_ARCH_UNMAPPED_AREA
2090  unsigned long
2091  arch_get_unmapped_area(struct file *filp, unsigned long addr,
2092  		unsigned long len, unsigned long pgoff, unsigned long flags)
2093  {
2094  	struct mm_struct *mm = current->mm;
2095  	struct vm_area_struct *vma, *prev;
2096  	struct vm_unmapped_area_info info;
2097  	const unsigned long mmap_end = arch_get_mmap_end(addr);
2098  
2099  	if (len > mmap_end - mmap_min_addr)
2100  		return -ENOMEM;
2101  
2102  	if (flags & MAP_FIXED)
2103  		return addr;
2104  
2105  	if (addr) {
2106  		addr = PAGE_ALIGN(addr);
2107  		vma = find_vma_prev(mm, addr, &prev);
2108  		if (mmap_end - len >= addr && addr >= mmap_min_addr &&
2109  		    (!vma || addr + len <= vm_start_gap(vma)) &&
2110  		    (!prev || addr >= vm_end_gap(prev)))
2111  			return addr;
2112  	}
2113  
2114  	info.flags = 0;
2115  	info.length = len;
2116  	info.low_limit = mm->mmap_base;
2117  	info.high_limit = mmap_end;
2118  	info.align_mask = 0;
2119  	return vm_unmapped_area(&info);
2120  }
2121  #endif
2122  
2123  /*
2124   * This mmap-allocator allocates new areas top-down from below the
2125   * stack's low limit (the base):
2126   */
2127  #ifndef HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
2128  unsigned long
2129  arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
2130  			  const unsigned long len, const unsigned long pgoff,
2131  			  const unsigned long flags)
2132  {
2133  	struct vm_area_struct *vma, *prev;
2134  	struct mm_struct *mm = current->mm;
2135  	unsigned long addr = addr0;
2136  	struct vm_unmapped_area_info info;
2137  	const unsigned long mmap_end = arch_get_mmap_end(addr);
2138  
2139  	/* requested length too big for entire address space */
2140  	if (len > mmap_end - mmap_min_addr)
2141  		return -ENOMEM;
2142  
2143  	if (flags & MAP_FIXED)
2144  		return addr;
2145  
2146  	/* requesting a specific address */
2147  	if (addr) {
2148  		addr = PAGE_ALIGN(addr);
2149  		vma = find_vma_prev(mm, addr, &prev);
2150  		if (mmap_end - len >= addr && addr >= mmap_min_addr &&
2151  				(!vma || addr + len <= vm_start_gap(vma)) &&
2152  				(!prev || addr >= vm_end_gap(prev)))
2153  			return addr;
2154  	}
2155  
2156  	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
2157  	info.length = len;
2158  	info.low_limit = max(PAGE_SIZE, mmap_min_addr);
2159  	info.high_limit = arch_get_mmap_base(addr, mm->mmap_base);
2160  	info.align_mask = 0;
2161  	addr = vm_unmapped_area(&info);
2162  
2163  	/*
2164  	 * A failed mmap() very likely causes application failure,
2165  	 * so fall back to the bottom-up function here. This scenario
2166  	 * can happen with large stack limits and large mmap()
2167  	 * allocations.
2168  	 */
2169  	if (offset_in_page(addr)) {
2170  		VM_BUG_ON(addr != -ENOMEM);
2171  		info.flags = 0;
2172  		info.low_limit = TASK_UNMAPPED_BASE;
2173  		info.high_limit = mmap_end;
2174  		addr = vm_unmapped_area(&info);
2175  	}
2176  
2177  	return addr;
2178  }
2179  #endif
2180  
2181  unsigned long
2182  get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
2183  		unsigned long pgoff, unsigned long flags)
2184  {
2185  	unsigned long (*get_area)(struct file *, unsigned long,
2186  				  unsigned long, unsigned long, unsigned long);
2187  
2188  	unsigned long error = arch_mmap_check(addr, len, flags);
2189  	if (error)
2190  		return error;
2191  
2192  	/* Careful about overflows.. */
2193  	if (len > TASK_SIZE)
2194  		return -ENOMEM;
2195  
2196  	get_area = current->mm->get_unmapped_area;
2197  	if (file) {
2198  		if (file->f_op->get_unmapped_area)
2199  			get_area = file->f_op->get_unmapped_area;
2200  	} else if (flags & MAP_SHARED) {
2201  		/*
2202  		 * mmap_region() will call shmem_zero_setup() to create a file,
2203  		 * so use shmem's get_unmapped_area in case it can be huge.
2204  		 * do_mmap_pgoff() will clear pgoff, so match alignment.
2205  		 */
2206  		pgoff = 0;
2207  		get_area = shmem_get_unmapped_area;
2208  	}
2209  
2210  	addr = get_area(file, addr, len, pgoff, flags);
2211  	if (IS_ERR_VALUE(addr))
2212  		return addr;
2213  
2214  	if (addr > TASK_SIZE - len)
2215  		return -ENOMEM;
2216  	if (offset_in_page(addr))
2217  		return -EINVAL;
2218  
2219  	error = security_mmap_addr(addr);
2220  	return error ? error : addr;
2221  }
2222  
2223  EXPORT_SYMBOL(get_unmapped_area);
2224  
2225  /* Look up the first VMA which satisfies  addr < vm_end,  NULL if none. */
2226  struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
2227  {
2228  	struct rb_node *rb_node;
2229  	struct vm_area_struct *vma;
2230  
2231  	/* Check the cache first. */
2232  	vma = vmacache_find(mm, addr);
2233  	if (likely(vma))
2234  		return vma;
2235  
2236  	rb_node = mm->mm_rb.rb_node;
2237  
2238  	while (rb_node) {
2239  		struct vm_area_struct *tmp;
2240  
2241  		tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb);
2242  
2243  		if (tmp->vm_end > addr) {
2244  			vma = tmp;
2245  			if (tmp->vm_start <= addr)
2246  				break;
2247  			rb_node = rb_node->rb_left;
2248  		} else
2249  			rb_node = rb_node->rb_right;
2250  	}
2251  
2252  	if (vma)
2253  		vmacache_update(addr, vma);
2254  	return vma;
2255  }
2256  
2257  EXPORT_SYMBOL(find_vma);
2258  
2259  /*
2260   * Same as find_vma, but also return a pointer to the previous VMA in *pprev.
2261   */
2262  struct vm_area_struct *
2263  find_vma_prev(struct mm_struct *mm, unsigned long addr,
2264  			struct vm_area_struct **pprev)
2265  {
2266  	struct vm_area_struct *vma;
2267  
2268  	vma = find_vma(mm, addr);
2269  	if (vma) {
2270  		*pprev = vma->vm_prev;
2271  	} else {
2272  		struct rb_node *rb_node = mm->mm_rb.rb_node;
2273  		*pprev = NULL;
2274  		while (rb_node) {
2275  			*pprev = rb_entry(rb_node, struct vm_area_struct, vm_rb);
2276  			rb_node = rb_node->rb_right;
2277  		}
2278  	}
2279  	return vma;
2280  }
2281  
2282  /*
2283   * Verify that the stack growth is acceptable and
2284   * update accounting. This is shared with both the
2285   * grow-up and grow-down cases.
2286   */
2287  static int acct_stack_growth(struct vm_area_struct *vma,
2288  			     unsigned long size, unsigned long grow)
2289  {
2290  	struct mm_struct *mm = vma->vm_mm;
2291  	unsigned long new_start;
2292  
2293  	/* address space limit tests */
2294  	if (!may_expand_vm(mm, vma->vm_flags, grow))
2295  		return -ENOMEM;
2296  
2297  	/* Stack limit test */
2298  	if (size > rlimit(RLIMIT_STACK))
2299  		return -ENOMEM;
2300  
2301  	/* mlock limit tests */
2302  	if (vma->vm_flags & VM_LOCKED) {
2303  		unsigned long locked;
2304  		unsigned long limit;
2305  		locked = mm->locked_vm + grow;
2306  		limit = rlimit(RLIMIT_MEMLOCK);
2307  		limit >>= PAGE_SHIFT;
2308  		if (locked > limit && !capable(CAP_IPC_LOCK))
2309  			return -ENOMEM;
2310  	}
2311  
2312  	/* Check to ensure the stack will not grow into a hugetlb-only region */
2313  	new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start :
2314  			vma->vm_end - size;
2315  	if (is_hugepage_only_range(vma->vm_mm, new_start, size))
2316  		return -EFAULT;
2317  
2318  	/*
2319  	 * Overcommit..  This must be the final test, as it will
2320  	 * update security statistics.
2321  	 */
2322  	if (security_vm_enough_memory_mm(mm, grow))
2323  		return -ENOMEM;
2324  
2325  	return 0;
2326  }
2327  
2328  #if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
2329  /*
2330   * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
2331   * vma is the last one with address > vma->vm_end.  Have to extend vma.
2332   */
2333  int expand_upwards(struct vm_area_struct *vma, unsigned long address)
2334  {
2335  	struct mm_struct *mm = vma->vm_mm;
2336  	struct vm_area_struct *next;
2337  	unsigned long gap_addr;
2338  	int error = 0;
2339  
2340  	if (!(vma->vm_flags & VM_GROWSUP))
2341  		return -EFAULT;
2342  
2343  	/* Guard against exceeding limits of the address space. */
2344  	address &= PAGE_MASK;
2345  	if (address >= (TASK_SIZE & PAGE_MASK))
2346  		return -ENOMEM;
2347  	address += PAGE_SIZE;
2348  
2349  	/* Enforce stack_guard_gap */
2350  	gap_addr = address + stack_guard_gap;
2351  
2352  	/* Guard against overflow */
2353  	if (gap_addr < address || gap_addr > TASK_SIZE)
2354  		gap_addr = TASK_SIZE;
2355  
2356  	next = vma->vm_next;
2357  	if (next && next->vm_start < gap_addr &&
2358  			(next->vm_flags & (VM_WRITE|VM_READ|VM_EXEC))) {
2359  		if (!(next->vm_flags & VM_GROWSUP))
2360  			return -ENOMEM;
2361  		/* Check that both stack segments have the same anon_vma? */
2362  	}
2363  
2364  	/* We must make sure the anon_vma is allocated. */
2365  	if (unlikely(anon_vma_prepare(vma)))
2366  		return -ENOMEM;
2367  
2368  	/*
2369  	 * vma->vm_start/vm_end cannot change under us because the caller
2370  	 * is required to hold the mmap_sem in read mode.  We need the
2371  	 * anon_vma lock to serialize against concurrent expand_stacks.
2372  	 */
2373  	anon_vma_lock_write(vma->anon_vma);
2374  
2375  	/* Somebody else might have raced and expanded it already */
2376  	if (address > vma->vm_end) {
2377  		unsigned long size, grow;
2378  
2379  		size = address - vma->vm_start;
2380  		grow = (address - vma->vm_end) >> PAGE_SHIFT;
2381  
2382  		error = -ENOMEM;
2383  		if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) {
2384  			error = acct_stack_growth(vma, size, grow);
2385  			if (!error) {
2386  				/*
2387  				 * vma_gap_update() doesn't support concurrent
2388  				 * updates, but we only hold a shared mmap_sem
2389  				 * lock here, so we need to protect against
2390  				 * concurrent vma expansions.
2391  				 * anon_vma_lock_write() doesn't help here, as
2392  				 * we don't guarantee that all growable vmas
2393  				 * in a mm share the same root anon vma.
2394  				 * So, we reuse mm->page_table_lock to guard
2395  				 * against concurrent vma expansions.
2396  				 */
2397  				spin_lock(&mm->page_table_lock);
2398  				if (vma->vm_flags & VM_LOCKED)
2399  					mm->locked_vm += grow;
2400  				vm_stat_account(mm, vma->vm_flags, grow);
2401  				anon_vma_interval_tree_pre_update_vma(vma);
2402  				vma->vm_end = address;
2403  				anon_vma_interval_tree_post_update_vma(vma);
2404  				if (vma->vm_next)
2405  					vma_gap_update(vma->vm_next);
2406  				else
2407  					mm->highest_vm_end = vm_end_gap(vma);
2408  				spin_unlock(&mm->page_table_lock);
2409  
2410  				perf_event_mmap(vma);
2411  			}
2412  		}
2413  	}
2414  	anon_vma_unlock_write(vma->anon_vma);
2415  	khugepaged_enter_vma_merge(vma, vma->vm_flags);
2416  	validate_mm(mm);
2417  	return error;
2418  }
2419  #endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */
2420  
2421  /*
2422   * vma is the first one with address < vma->vm_start.  Have to extend vma.
2423   */
2424  int expand_downwards(struct vm_area_struct *vma,
2425  				   unsigned long address)
2426  {
2427  	struct mm_struct *mm = vma->vm_mm;
2428  	struct vm_area_struct *prev;
2429  	int error;
2430  
2431  	address &= PAGE_MASK;
2432  	error = security_mmap_addr(address);
2433  	if (error)
2434  		return error;
2435  
2436  	/* Enforce stack_guard_gap */
2437  	prev = vma->vm_prev;
2438  	/* Check that both stack segments have the same anon_vma? */
2439  	if (prev && !(prev->vm_flags & VM_GROWSDOWN) &&
2440  			(prev->vm_flags & (VM_WRITE|VM_READ|VM_EXEC))) {
2441  		if (address - prev->vm_end < stack_guard_gap)
2442  			return -ENOMEM;
2443  	}
2444  
2445  	/* We must make sure the anon_vma is allocated. */
2446  	if (unlikely(anon_vma_prepare(vma)))
2447  		return -ENOMEM;
2448  
2449  	/*
2450  	 * vma->vm_start/vm_end cannot change under us because the caller
2451  	 * is required to hold the mmap_sem in read mode.  We need the
2452  	 * anon_vma lock to serialize against concurrent expand_stacks.
2453  	 */
2454  	anon_vma_lock_write(vma->anon_vma);
2455  
2456  	/* Somebody else might have raced and expanded it already */
2457  	if (address < vma->vm_start) {
2458  		unsigned long size, grow;
2459  
2460  		size = vma->vm_end - address;
2461  		grow = (vma->vm_start - address) >> PAGE_SHIFT;
2462  
2463  		error = -ENOMEM;
2464  		if (grow <= vma->vm_pgoff) {
2465  			error = acct_stack_growth(vma, size, grow);
2466  			if (!error) {
2467  				/*
2468  				 * vma_gap_update() doesn't support concurrent
2469  				 * updates, but we only hold a shared mmap_sem
2470  				 * lock here, so we need to protect against
2471  				 * concurrent vma expansions.
2472  				 * anon_vma_lock_write() doesn't help here, as
2473  				 * we don't guarantee that all growable vmas
2474  				 * in a mm share the same root anon vma.
2475  				 * So, we reuse mm->page_table_lock to guard
2476  				 * against concurrent vma expansions.
2477  				 */
2478  				spin_lock(&mm->page_table_lock);
2479  				if (vma->vm_flags & VM_LOCKED)
2480  					mm->locked_vm += grow;
2481  				vm_stat_account(mm, vma->vm_flags, grow);
2482  				anon_vma_interval_tree_pre_update_vma(vma);
2483  				vma->vm_start = address;
2484  				vma->vm_pgoff -= grow;
2485  				anon_vma_interval_tree_post_update_vma(vma);
2486  				vma_gap_update(vma);
2487  				spin_unlock(&mm->page_table_lock);
2488  
2489  				perf_event_mmap(vma);
2490  			}
2491  		}
2492  	}
2493  	anon_vma_unlock_write(vma->anon_vma);
2494  	khugepaged_enter_vma_merge(vma, vma->vm_flags);
2495  	validate_mm(mm);
2496  	return error;
2497  }
2498  
2499  /* enforced gap between the expanding stack and other mappings. */
2500  unsigned long stack_guard_gap = 256UL<<PAGE_SHIFT;
2501  
2502  static int __init cmdline_parse_stack_guard_gap(char *p)
2503  {
2504  	unsigned long val;
2505  	char *endptr;
2506  
2507  	val = simple_strtoul(p, &endptr, 10);
2508  	if (!*endptr)
2509  		stack_guard_gap = val << PAGE_SHIFT;
2510  
2511  	return 0;
2512  }
2513  __setup("stack_guard_gap=", cmdline_parse_stack_guard_gap);
2514  
2515  #ifdef CONFIG_STACK_GROWSUP
2516  int expand_stack(struct vm_area_struct *vma, unsigned long address)
2517  {
2518  	return expand_upwards(vma, address);
2519  }
2520  
2521  struct vm_area_struct *
2522  find_extend_vma(struct mm_struct *mm, unsigned long addr)
2523  {
2524  	struct vm_area_struct *vma, *prev;
2525  
2526  	addr &= PAGE_MASK;
2527  	vma = find_vma_prev(mm, addr, &prev);
2528  	if (vma && (vma->vm_start <= addr))
2529  		return vma;
2530  	if (!prev || expand_stack(prev, addr))
2531  		return NULL;
2532  	if (prev->vm_flags & VM_LOCKED)
2533  		populate_vma_page_range(prev, addr, prev->vm_end, NULL);
2534  	return prev;
2535  }
2536  #else
2537  int expand_stack(struct vm_area_struct *vma, unsigned long address)
2538  {
2539  	return expand_downwards(vma, address);
2540  }
2541  
2542  struct vm_area_struct *
2543  find_extend_vma(struct mm_struct *mm, unsigned long addr)
2544  {
2545  	struct vm_area_struct *vma;
2546  	unsigned long start;
2547  
2548  	addr &= PAGE_MASK;
2549  	vma = find_vma(mm, addr);
2550  	if (!vma)
2551  		return NULL;
2552  	if (vma->vm_start <= addr)
2553  		return vma;
2554  	if (!(vma->vm_flags & VM_GROWSDOWN))
2555  		return NULL;
2556  	start = vma->vm_start;
2557  	if (expand_stack(vma, addr))
2558  		return NULL;
2559  	if (vma->vm_flags & VM_LOCKED)
2560  		populate_vma_page_range(vma, addr, start, NULL);
2561  	return vma;
2562  }
2563  #endif
2564  
2565  EXPORT_SYMBOL_GPL(find_extend_vma);
2566  
2567  /*
2568   * Ok - we have the memory areas we should free on the vma list,
2569   * so release them, and do the vma updates.
2570   *
2571   * Called with the mm semaphore held.
2572   */
2573  static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
2574  {
2575  	unsigned long nr_accounted = 0;
2576  
2577  	/* Update high watermark before we lower total_vm */
2578  	update_hiwater_vm(mm);
2579  	do {
2580  		long nrpages = vma_pages(vma);
2581  
2582  		if (vma->vm_flags & VM_ACCOUNT)
2583  			nr_accounted += nrpages;
2584  		vm_stat_account(mm, vma->vm_flags, -nrpages);
2585  		vma = remove_vma(vma);
2586  	} while (vma);
2587  	vm_unacct_memory(nr_accounted);
2588  	validate_mm(mm);
2589  }
2590  
2591  /*
2592   * Get rid of page table information in the indicated region.
2593   *
2594   * Called with the mm semaphore held.
2595   */
2596  static void unmap_region(struct mm_struct *mm,
2597  		struct vm_area_struct *vma, struct vm_area_struct *prev,
2598  		unsigned long start, unsigned long end)
2599  {
2600  	struct vm_area_struct *next = prev ? prev->vm_next : mm->mmap;
2601  	struct mmu_gather tlb;
2602  
2603  	lru_add_drain();
2604  	tlb_gather_mmu(&tlb, mm, start, end);
2605  	update_hiwater_rss(mm);
2606  	unmap_vmas(&tlb, vma, start, end);
2607  	free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
2608  				 next ? next->vm_start : USER_PGTABLES_CEILING);
2609  	tlb_finish_mmu(&tlb, start, end);
2610  }
2611  
2612  /*
2613   * Create a list of vma's touched by the unmap, removing them from the mm's
2614   * vma list as we go..
2615   */
2616  static void
2617  detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
2618  	struct vm_area_struct *prev, unsigned long end)
2619  {
2620  	struct vm_area_struct **insertion_point;
2621  	struct vm_area_struct *tail_vma = NULL;
2622  
2623  	insertion_point = (prev ? &prev->vm_next : &mm->mmap);
2624  	vma->vm_prev = NULL;
2625  	do {
2626  		vma_rb_erase(vma, &mm->mm_rb);
2627  		mm->map_count--;
2628  		tail_vma = vma;
2629  		vma = vma->vm_next;
2630  	} while (vma && vma->vm_start < end);
2631  	*insertion_point = vma;
2632  	if (vma) {
2633  		vma->vm_prev = prev;
2634  		vma_gap_update(vma);
2635  	} else
2636  		mm->highest_vm_end = prev ? vm_end_gap(prev) : 0;
2637  	tail_vma->vm_next = NULL;
2638  
2639  	/* Kill the cache */
2640  	vmacache_invalidate(mm);
2641  }
2642  
2643  /*
2644   * __split_vma() bypasses sysctl_max_map_count checking.  We use this where it
2645   * has already been checked or doesn't make sense to fail.
2646   */
2647  int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
2648  		unsigned long addr, int new_below)
2649  {
2650  	struct vm_area_struct *new;
2651  	int err;
2652  
2653  	if (vma->vm_ops && vma->vm_ops->split) {
2654  		err = vma->vm_ops->split(vma, addr);
2655  		if (err)
2656  			return err;
2657  	}
2658  
2659  	new = vm_area_dup(vma);
2660  	if (!new)
2661  		return -ENOMEM;
2662  
2663  	if (new_below)
2664  		new->vm_end = addr;
2665  	else {
2666  		new->vm_start = addr;
2667  		new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
2668  	}
2669  
2670  	err = vma_dup_policy(vma, new);
2671  	if (err)
2672  		goto out_free_vma;
2673  
2674  	err = anon_vma_clone(new, vma);
2675  	if (err)
2676  		goto out_free_mpol;
2677  
2678  	if (new->vm_file)
2679  		get_file(new->vm_file);
2680  
2681  	if (new->vm_ops && new->vm_ops->open)
2682  		new->vm_ops->open(new);
2683  
2684  	if (new_below)
2685  		err = vma_adjust(vma, addr, vma->vm_end, vma->vm_pgoff +
2686  			((addr - new->vm_start) >> PAGE_SHIFT), new);
2687  	else
2688  		err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
2689  
2690  	/* Success. */
2691  	if (!err)
2692  		return 0;
2693  
2694  	/* Clean everything up if vma_adjust failed. */
2695  	if (new->vm_ops && new->vm_ops->close)
2696  		new->vm_ops->close(new);
2697  	if (new->vm_file)
2698  		fput(new->vm_file);
2699  	unlink_anon_vmas(new);
2700   out_free_mpol:
2701  	mpol_put(vma_policy(new));
2702   out_free_vma:
2703  	vm_area_free(new);
2704  	return err;
2705  }
2706  
2707  /*
2708   * Split a vma into two pieces at address 'addr', a new vma is allocated
2709   * either for the first part or the tail.
2710   */
2711  int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
2712  	      unsigned long addr, int new_below)
2713  {
2714  	if (mm->map_count >= sysctl_max_map_count)
2715  		return -ENOMEM;
2716  
2717  	return __split_vma(mm, vma, addr, new_below);
2718  }
2719  
2720  /* Munmap is split into 2 main parts -- this part which finds
2721   * what needs doing, and the areas themselves, which do the
2722   * work.  This now handles partial unmappings.
2723   * Jeremy Fitzhardinge <jeremy@goop.org>
2724   */
2725  int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
2726  		struct list_head *uf, bool downgrade)
2727  {
2728  	unsigned long end;
2729  	struct vm_area_struct *vma, *prev, *last;
2730  
2731  	if ((offset_in_page(start)) || start > TASK_SIZE || len > TASK_SIZE-start)
2732  		return -EINVAL;
2733  
2734  	len = PAGE_ALIGN(len);
2735  	if (len == 0)
2736  		return -EINVAL;
2737  
2738  	/* Find the first overlapping VMA */
2739  	vma = find_vma(mm, start);
2740  	if (!vma)
2741  		return 0;
2742  	prev = vma->vm_prev;
2743  	/* we have  start < vma->vm_end  */
2744  
2745  	/* if it doesn't overlap, we have nothing.. */
2746  	end = start + len;
2747  	if (vma->vm_start >= end)
2748  		return 0;
2749  
2750  	/*
2751  	 * If we need to split any vma, do it now to save pain later.
2752  	 *
2753  	 * Note: mremap's move_vma VM_ACCOUNT handling assumes a partially
2754  	 * unmapped vm_area_struct will remain in use: so lower split_vma
2755  	 * places tmp vma above, and higher split_vma places tmp vma below.
2756  	 */
2757  	if (start > vma->vm_start) {
2758  		int error;
2759  
2760  		/*
2761  		 * Make sure that map_count on return from munmap() will
2762  		 * not exceed its limit; but let map_count go just above
2763  		 * its limit temporarily, to help free resources as expected.
2764  		 */
2765  		if (end < vma->vm_end && mm->map_count >= sysctl_max_map_count)
2766  			return -ENOMEM;
2767  
2768  		error = __split_vma(mm, vma, start, 0);
2769  		if (error)
2770  			return error;
2771  		prev = vma;
2772  	}
2773  
2774  	/* Does it split the last one? */
2775  	last = find_vma(mm, end);
2776  	if (last && end > last->vm_start) {
2777  		int error = __split_vma(mm, last, end, 1);
2778  		if (error)
2779  			return error;
2780  	}
2781  	vma = prev ? prev->vm_next : mm->mmap;
2782  
2783  	if (unlikely(uf)) {
2784  		/*
2785  		 * If userfaultfd_unmap_prep returns an error the vmas
2786  		 * will remain splitted, but userland will get a
2787  		 * highly unexpected error anyway. This is no
2788  		 * different than the case where the first of the two
2789  		 * __split_vma fails, but we don't undo the first
2790  		 * split, despite we could. This is unlikely enough
2791  		 * failure that it's not worth optimizing it for.
2792  		 */
2793  		int error = userfaultfd_unmap_prep(vma, start, end, uf);
2794  		if (error)
2795  			return error;
2796  	}
2797  
2798  	/*
2799  	 * unlock any mlock()ed ranges before detaching vmas
2800  	 */
2801  	if (mm->locked_vm) {
2802  		struct vm_area_struct *tmp = vma;
2803  		while (tmp && tmp->vm_start < end) {
2804  			if (tmp->vm_flags & VM_LOCKED) {
2805  				mm->locked_vm -= vma_pages(tmp);
2806  				munlock_vma_pages_all(tmp);
2807  			}
2808  
2809  			tmp = tmp->vm_next;
2810  		}
2811  	}
2812  
2813  	/* Detach vmas from rbtree */
2814  	detach_vmas_to_be_unmapped(mm, vma, prev, end);
2815  
2816  	/*
2817  	 * mpx unmap needs to be called with mmap_sem held for write.
2818  	 * It is safe to call it before unmap_region().
2819  	 */
2820  	arch_unmap(mm, vma, start, end);
2821  
2822  	if (downgrade)
2823  		downgrade_write(&mm->mmap_sem);
2824  
2825  	unmap_region(mm, vma, prev, start, end);
2826  
2827  	/* Fix up all other VM information */
2828  	remove_vma_list(mm, vma);
2829  
2830  	return downgrade ? 1 : 0;
2831  }
2832  
2833  int do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
2834  	      struct list_head *uf)
2835  {
2836  	return __do_munmap(mm, start, len, uf, false);
2837  }
2838  
2839  static int __vm_munmap(unsigned long start, size_t len, bool downgrade)
2840  {
2841  	int ret;
2842  	struct mm_struct *mm = current->mm;
2843  	LIST_HEAD(uf);
2844  
2845  	if (down_write_killable(&mm->mmap_sem))
2846  		return -EINTR;
2847  
2848  	ret = __do_munmap(mm, start, len, &uf, downgrade);
2849  	/*
2850  	 * Returning 1 indicates mmap_sem is downgraded.
2851  	 * But 1 is not legal return value of vm_munmap() and munmap(), reset
2852  	 * it to 0 before return.
2853  	 */
2854  	if (ret == 1) {
2855  		up_read(&mm->mmap_sem);
2856  		ret = 0;
2857  	} else
2858  		up_write(&mm->mmap_sem);
2859  
2860  	userfaultfd_unmap_complete(mm, &uf);
2861  	return ret;
2862  }
2863  
2864  int vm_munmap(unsigned long start, size_t len)
2865  {
2866  	return __vm_munmap(start, len, false);
2867  }
2868  EXPORT_SYMBOL(vm_munmap);
2869  
2870  SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
2871  {
2872  	profile_munmap(addr);
2873  	return __vm_munmap(addr, len, true);
2874  }
2875  
2876  
2877  /*
2878   * Emulation of deprecated remap_file_pages() syscall.
2879   */
2880  SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
2881  		unsigned long, prot, unsigned long, pgoff, unsigned long, flags)
2882  {
2883  
2884  	struct mm_struct *mm = current->mm;
2885  	struct vm_area_struct *vma;
2886  	unsigned long populate = 0;
2887  	unsigned long ret = -EINVAL;
2888  	struct file *file;
2889  
2890  	pr_warn_once("%s (%d) uses deprecated remap_file_pages() syscall. See Documentation/vm/remap_file_pages.rst.\n",
2891  		     current->comm, current->pid);
2892  
2893  	if (prot)
2894  		return ret;
2895  	start = start & PAGE_MASK;
2896  	size = size & PAGE_MASK;
2897  
2898  	if (start + size <= start)
2899  		return ret;
2900  
2901  	/* Does pgoff wrap? */
2902  	if (pgoff + (size >> PAGE_SHIFT) < pgoff)
2903  		return ret;
2904  
2905  	if (down_write_killable(&mm->mmap_sem))
2906  		return -EINTR;
2907  
2908  	vma = find_vma(mm, start);
2909  
2910  	if (!vma || !(vma->vm_flags & VM_SHARED))
2911  		goto out;
2912  
2913  	if (start < vma->vm_start)
2914  		goto out;
2915  
2916  	if (start + size > vma->vm_end) {
2917  		struct vm_area_struct *next;
2918  
2919  		for (next = vma->vm_next; next; next = next->vm_next) {
2920  			/* hole between vmas ? */
2921  			if (next->vm_start != next->vm_prev->vm_end)
2922  				goto out;
2923  
2924  			if (next->vm_file != vma->vm_file)
2925  				goto out;
2926  
2927  			if (next->vm_flags != vma->vm_flags)
2928  				goto out;
2929  
2930  			if (start + size <= next->vm_end)
2931  				break;
2932  		}
2933  
2934  		if (!next)
2935  			goto out;
2936  	}
2937  
2938  	prot |= vma->vm_flags & VM_READ ? PROT_READ : 0;
2939  	prot |= vma->vm_flags & VM_WRITE ? PROT_WRITE : 0;
2940  	prot |= vma->vm_flags & VM_EXEC ? PROT_EXEC : 0;
2941  
2942  	flags &= MAP_NONBLOCK;
2943  	flags |= MAP_SHARED | MAP_FIXED | MAP_POPULATE;
2944  	if (vma->vm_flags & VM_LOCKED) {
2945  		struct vm_area_struct *tmp;
2946  		flags |= MAP_LOCKED;
2947  
2948  		/* drop PG_Mlocked flag for over-mapped range */
2949  		for (tmp = vma; tmp->vm_start >= start + size;
2950  				tmp = tmp->vm_next) {
2951  			/*
2952  			 * Split pmd and munlock page on the border
2953  			 * of the range.
2954  			 */
2955  			vma_adjust_trans_huge(tmp, start, start + size, 0);
2956  
2957  			munlock_vma_pages_range(tmp,
2958  					max(tmp->vm_start, start),
2959  					min(tmp->vm_end, start + size));
2960  		}
2961  	}
2962  
2963  	file = get_file(vma->vm_file);
2964  	ret = do_mmap_pgoff(vma->vm_file, start, size,
2965  			prot, flags, pgoff, &populate, NULL);
2966  	fput(file);
2967  out:
2968  	up_write(&mm->mmap_sem);
2969  	if (populate)
2970  		mm_populate(ret, populate);
2971  	if (!IS_ERR_VALUE(ret))
2972  		ret = 0;
2973  	return ret;
2974  }
2975  
2976  /*
2977   *  this is really a simplified "do_mmap".  it only handles
2978   *  anonymous maps.  eventually we may be able to do some
2979   *  brk-specific accounting here.
2980   */
2981  static int do_brk_flags(unsigned long addr, unsigned long len, unsigned long flags, struct list_head *uf)
2982  {
2983  	struct mm_struct *mm = current->mm;
2984  	struct vm_area_struct *vma, *prev;
2985  	struct rb_node **rb_link, *rb_parent;
2986  	pgoff_t pgoff = addr >> PAGE_SHIFT;
2987  	int error;
2988  
2989  	/* Until we need other flags, refuse anything except VM_EXEC. */
2990  	if ((flags & (~VM_EXEC)) != 0)
2991  		return -EINVAL;
2992  	flags |= VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
2993  
2994  	error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
2995  	if (offset_in_page(error))
2996  		return error;
2997  
2998  	error = mlock_future_check(mm, mm->def_flags, len);
2999  	if (error)
3000  		return error;
3001  
3002  	/*
3003  	 * Clear old maps.  this also does some error checking for us
3004  	 */
3005  	while (find_vma_links(mm, addr, addr + len, &prev, &rb_link,
3006  			      &rb_parent)) {
3007  		if (do_munmap(mm, addr, len, uf))
3008  			return -ENOMEM;
3009  	}
3010  
3011  	/* Check against address space limits *after* clearing old maps... */
3012  	if (!may_expand_vm(mm, flags, len >> PAGE_SHIFT))
3013  		return -ENOMEM;
3014  
3015  	if (mm->map_count > sysctl_max_map_count)
3016  		return -ENOMEM;
3017  
3018  	if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
3019  		return -ENOMEM;
3020  
3021  	/* Can we just expand an old private anonymous mapping? */
3022  	vma = vma_merge(mm, prev, addr, addr + len, flags,
3023  			NULL, NULL, pgoff, NULL, NULL_VM_UFFD_CTX);
3024  	if (vma)
3025  		goto out;
3026  
3027  	/*
3028  	 * create a vma struct for an anonymous mapping
3029  	 */
3030  	vma = vm_area_alloc(mm);
3031  	if (!vma) {
3032  		vm_unacct_memory(len >> PAGE_SHIFT);
3033  		return -ENOMEM;
3034  	}
3035  
3036  	vma_set_anonymous(vma);
3037  	vma->vm_start = addr;
3038  	vma->vm_end = addr + len;
3039  	vma->vm_pgoff = pgoff;
3040  	vma->vm_flags = flags;
3041  	vma->vm_page_prot = vm_get_page_prot(flags);
3042  	vma_link(mm, vma, prev, rb_link, rb_parent);
3043  out:
3044  	perf_event_mmap(vma);
3045  	mm->total_vm += len >> PAGE_SHIFT;
3046  	mm->data_vm += len >> PAGE_SHIFT;
3047  	if (flags & VM_LOCKED)
3048  		mm->locked_vm += (len >> PAGE_SHIFT);
3049  	vma->vm_flags |= VM_SOFTDIRTY;
3050  	return 0;
3051  }
3052  
3053  int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags)
3054  {
3055  	struct mm_struct *mm = current->mm;
3056  	unsigned long len;
3057  	int ret;
3058  	bool populate;
3059  	LIST_HEAD(uf);
3060  
3061  	len = PAGE_ALIGN(request);
3062  	if (len < request)
3063  		return -ENOMEM;
3064  	if (!len)
3065  		return 0;
3066  
3067  	if (down_write_killable(&mm->mmap_sem))
3068  		return -EINTR;
3069  
3070  	ret = do_brk_flags(addr, len, flags, &uf);
3071  	populate = ((mm->def_flags & VM_LOCKED) != 0);
3072  	up_write(&mm->mmap_sem);
3073  	userfaultfd_unmap_complete(mm, &uf);
3074  	if (populate && !ret)
3075  		mm_populate(addr, len);
3076  	return ret;
3077  }
3078  EXPORT_SYMBOL(vm_brk_flags);
3079  
3080  int vm_brk(unsigned long addr, unsigned long len)
3081  {
3082  	return vm_brk_flags(addr, len, 0);
3083  }
3084  EXPORT_SYMBOL(vm_brk);
3085  
3086  /* Release all mmaps. */
3087  void exit_mmap(struct mm_struct *mm)
3088  {
3089  	struct mmu_gather tlb;
3090  	struct vm_area_struct *vma;
3091  	unsigned long nr_accounted = 0;
3092  
3093  	/* mm's last user has gone, and its about to be pulled down */
3094  	mmu_notifier_release(mm);
3095  
3096  	if (unlikely(mm_is_oom_victim(mm))) {
3097  		/*
3098  		 * Manually reap the mm to free as much memory as possible.
3099  		 * Then, as the oom reaper does, set MMF_OOM_SKIP to disregard
3100  		 * this mm from further consideration.  Taking mm->mmap_sem for
3101  		 * write after setting MMF_OOM_SKIP will guarantee that the oom
3102  		 * reaper will not run on this mm again after mmap_sem is
3103  		 * dropped.
3104  		 *
3105  		 * Nothing can be holding mm->mmap_sem here and the above call
3106  		 * to mmu_notifier_release(mm) ensures mmu notifier callbacks in
3107  		 * __oom_reap_task_mm() will not block.
3108  		 *
3109  		 * This needs to be done before calling munlock_vma_pages_all(),
3110  		 * which clears VM_LOCKED, otherwise the oom reaper cannot
3111  		 * reliably test it.
3112  		 */
3113  		(void)__oom_reap_task_mm(mm);
3114  
3115  		set_bit(MMF_OOM_SKIP, &mm->flags);
3116  		down_write(&mm->mmap_sem);
3117  		up_write(&mm->mmap_sem);
3118  	}
3119  
3120  	if (mm->locked_vm) {
3121  		vma = mm->mmap;
3122  		while (vma) {
3123  			if (vma->vm_flags & VM_LOCKED)
3124  				munlock_vma_pages_all(vma);
3125  			vma = vma->vm_next;
3126  		}
3127  	}
3128  
3129  	arch_exit_mmap(mm);
3130  
3131  	vma = mm->mmap;
3132  	if (!vma)	/* Can happen if dup_mmap() received an OOM */
3133  		return;
3134  
3135  	lru_add_drain();
3136  	flush_cache_mm(mm);
3137  	tlb_gather_mmu(&tlb, mm, 0, -1);
3138  	/* update_hiwater_rss(mm) here? but nobody should be looking */
3139  	/* Use -1 here to ensure all VMAs in the mm are unmapped */
3140  	unmap_vmas(&tlb, vma, 0, -1);
3141  	free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, USER_PGTABLES_CEILING);
3142  	tlb_finish_mmu(&tlb, 0, -1);
3143  
3144  	/*
3145  	 * Walk the list again, actually closing and freeing it,
3146  	 * with preemption enabled, without holding any MM locks.
3147  	 */
3148  	while (vma) {
3149  		if (vma->vm_flags & VM_ACCOUNT)
3150  			nr_accounted += vma_pages(vma);
3151  		vma = remove_vma(vma);
3152  	}
3153  	vm_unacct_memory(nr_accounted);
3154  }
3155  
3156  /* Insert vm structure into process list sorted by address
3157   * and into the inode's i_mmap tree.  If vm_file is non-NULL
3158   * then i_mmap_rwsem is taken here.
3159   */
3160  int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
3161  {
3162  	struct vm_area_struct *prev;
3163  	struct rb_node **rb_link, *rb_parent;
3164  
3165  	if (find_vma_links(mm, vma->vm_start, vma->vm_end,
3166  			   &prev, &rb_link, &rb_parent))
3167  		return -ENOMEM;
3168  	if ((vma->vm_flags & VM_ACCOUNT) &&
3169  	     security_vm_enough_memory_mm(mm, vma_pages(vma)))
3170  		return -ENOMEM;
3171  
3172  	/*
3173  	 * The vm_pgoff of a purely anonymous vma should be irrelevant
3174  	 * until its first write fault, when page's anon_vma and index
3175  	 * are set.  But now set the vm_pgoff it will almost certainly
3176  	 * end up with (unless mremap moves it elsewhere before that
3177  	 * first wfault), so /proc/pid/maps tells a consistent story.
3178  	 *
3179  	 * By setting it to reflect the virtual start address of the
3180  	 * vma, merges and splits can happen in a seamless way, just
3181  	 * using the existing file pgoff checks and manipulations.
3182  	 * Similarly in do_mmap_pgoff and in do_brk.
3183  	 */
3184  	if (vma_is_anonymous(vma)) {
3185  		BUG_ON(vma->anon_vma);
3186  		vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT;
3187  	}
3188  
3189  	vma_link(mm, vma, prev, rb_link, rb_parent);
3190  	return 0;
3191  }
3192  
3193  /*
3194   * Copy the vma structure to a new location in the same mm,
3195   * prior to moving page table entries, to effect an mremap move.
3196   */
3197  struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
3198  	unsigned long addr, unsigned long len, pgoff_t pgoff,
3199  	bool *need_rmap_locks)
3200  {
3201  	struct vm_area_struct *vma = *vmap;
3202  	unsigned long vma_start = vma->vm_start;
3203  	struct mm_struct *mm = vma->vm_mm;
3204  	struct vm_area_struct *new_vma, *prev;
3205  	struct rb_node **rb_link, *rb_parent;
3206  	bool faulted_in_anon_vma = true;
3207  
3208  	/*
3209  	 * If anonymous vma has not yet been faulted, update new pgoff
3210  	 * to match new location, to increase its chance of merging.
3211  	 */
3212  	if (unlikely(vma_is_anonymous(vma) && !vma->anon_vma)) {
3213  		pgoff = addr >> PAGE_SHIFT;
3214  		faulted_in_anon_vma = false;
3215  	}
3216  
3217  	if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent))
3218  		return NULL;	/* should never get here */
3219  	new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags,
3220  			    vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
3221  			    vma->vm_userfaultfd_ctx);
3222  	if (new_vma) {
3223  		/*
3224  		 * Source vma may have been merged into new_vma
3225  		 */
3226  		if (unlikely(vma_start >= new_vma->vm_start &&
3227  			     vma_start < new_vma->vm_end)) {
3228  			/*
3229  			 * The only way we can get a vma_merge with
3230  			 * self during an mremap is if the vma hasn't
3231  			 * been faulted in yet and we were allowed to
3232  			 * reset the dst vma->vm_pgoff to the
3233  			 * destination address of the mremap to allow
3234  			 * the merge to happen. mremap must change the
3235  			 * vm_pgoff linearity between src and dst vmas
3236  			 * (in turn preventing a vma_merge) to be
3237  			 * safe. It is only safe to keep the vm_pgoff
3238  			 * linear if there are no pages mapped yet.
3239  			 */
3240  			VM_BUG_ON_VMA(faulted_in_anon_vma, new_vma);
3241  			*vmap = vma = new_vma;
3242  		}
3243  		*need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff);
3244  	} else {
3245  		new_vma = vm_area_dup(vma);
3246  		if (!new_vma)
3247  			goto out;
3248  		new_vma->vm_start = addr;
3249  		new_vma->vm_end = addr + len;
3250  		new_vma->vm_pgoff = pgoff;
3251  		if (vma_dup_policy(vma, new_vma))
3252  			goto out_free_vma;
3253  		if (anon_vma_clone(new_vma, vma))
3254  			goto out_free_mempol;
3255  		if (new_vma->vm_file)
3256  			get_file(new_vma->vm_file);
3257  		if (new_vma->vm_ops && new_vma->vm_ops->open)
3258  			new_vma->vm_ops->open(new_vma);
3259  		vma_link(mm, new_vma, prev, rb_link, rb_parent);
3260  		*need_rmap_locks = false;
3261  	}
3262  	return new_vma;
3263  
3264  out_free_mempol:
3265  	mpol_put(vma_policy(new_vma));
3266  out_free_vma:
3267  	vm_area_free(new_vma);
3268  out:
3269  	return NULL;
3270  }
3271  
3272  /*
3273   * Return true if the calling process may expand its vm space by the passed
3274   * number of pages
3275   */
3276  bool may_expand_vm(struct mm_struct *mm, vm_flags_t flags, unsigned long npages)
3277  {
3278  	if (mm->total_vm + npages > rlimit(RLIMIT_AS) >> PAGE_SHIFT)
3279  		return false;
3280  
3281  	if (is_data_mapping(flags) &&
3282  	    mm->data_vm + npages > rlimit(RLIMIT_DATA) >> PAGE_SHIFT) {
3283  		/* Workaround for Valgrind */
3284  		if (rlimit(RLIMIT_DATA) == 0 &&
3285  		    mm->data_vm + npages <= rlimit_max(RLIMIT_DATA) >> PAGE_SHIFT)
3286  			return true;
3287  
3288  		pr_warn_once("%s (%d): VmData %lu exceed data ulimit %lu. Update limits%s.\n",
3289  			     current->comm, current->pid,
3290  			     (mm->data_vm + npages) << PAGE_SHIFT,
3291  			     rlimit(RLIMIT_DATA),
3292  			     ignore_rlimit_data ? "" : " or use boot option ignore_rlimit_data");
3293  
3294  		if (!ignore_rlimit_data)
3295  			return false;
3296  	}
3297  
3298  	return true;
3299  }
3300  
3301  void vm_stat_account(struct mm_struct *mm, vm_flags_t flags, long npages)
3302  {
3303  	mm->total_vm += npages;
3304  
3305  	if (is_exec_mapping(flags))
3306  		mm->exec_vm += npages;
3307  	else if (is_stack_mapping(flags))
3308  		mm->stack_vm += npages;
3309  	else if (is_data_mapping(flags))
3310  		mm->data_vm += npages;
3311  }
3312  
3313  static vm_fault_t special_mapping_fault(struct vm_fault *vmf);
3314  
3315  /*
3316   * Having a close hook prevents vma merging regardless of flags.
3317   */
3318  static void special_mapping_close(struct vm_area_struct *vma)
3319  {
3320  }
3321  
3322  static const char *special_mapping_name(struct vm_area_struct *vma)
3323  {
3324  	return ((struct vm_special_mapping *)vma->vm_private_data)->name;
3325  }
3326  
3327  static int special_mapping_mremap(struct vm_area_struct *new_vma)
3328  {
3329  	struct vm_special_mapping *sm = new_vma->vm_private_data;
3330  
3331  	if (WARN_ON_ONCE(current->mm != new_vma->vm_mm))
3332  		return -EFAULT;
3333  
3334  	if (sm->mremap)
3335  		return sm->mremap(sm, new_vma);
3336  
3337  	return 0;
3338  }
3339  
3340  static const struct vm_operations_struct special_mapping_vmops = {
3341  	.close = special_mapping_close,
3342  	.fault = special_mapping_fault,
3343  	.mremap = special_mapping_mremap,
3344  	.name = special_mapping_name,
3345  };
3346  
3347  static const struct vm_operations_struct legacy_special_mapping_vmops = {
3348  	.close = special_mapping_close,
3349  	.fault = special_mapping_fault,
3350  };
3351  
3352  static vm_fault_t special_mapping_fault(struct vm_fault *vmf)
3353  {
3354  	struct vm_area_struct *vma = vmf->vma;
3355  	pgoff_t pgoff;
3356  	struct page **pages;
3357  
3358  	if (vma->vm_ops == &legacy_special_mapping_vmops) {
3359  		pages = vma->vm_private_data;
3360  	} else {
3361  		struct vm_special_mapping *sm = vma->vm_private_data;
3362  
3363  		if (sm->fault)
3364  			return sm->fault(sm, vmf->vma, vmf);
3365  
3366  		pages = sm->pages;
3367  	}
3368  
3369  	for (pgoff = vmf->pgoff; pgoff && *pages; ++pages)
3370  		pgoff--;
3371  
3372  	if (*pages) {
3373  		struct page *page = *pages;
3374  		get_page(page);
3375  		vmf->page = page;
3376  		return 0;
3377  	}
3378  
3379  	return VM_FAULT_SIGBUS;
3380  }
3381  
3382  static struct vm_area_struct *__install_special_mapping(
3383  	struct mm_struct *mm,
3384  	unsigned long addr, unsigned long len,
3385  	unsigned long vm_flags, void *priv,
3386  	const struct vm_operations_struct *ops)
3387  {
3388  	int ret;
3389  	struct vm_area_struct *vma;
3390  
3391  	vma = vm_area_alloc(mm);
3392  	if (unlikely(vma == NULL))
3393  		return ERR_PTR(-ENOMEM);
3394  
3395  	vma->vm_start = addr;
3396  	vma->vm_end = addr + len;
3397  
3398  	vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND | VM_SOFTDIRTY;
3399  	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
3400  
3401  	vma->vm_ops = ops;
3402  	vma->vm_private_data = priv;
3403  
3404  	ret = insert_vm_struct(mm, vma);
3405  	if (ret)
3406  		goto out;
3407  
3408  	vm_stat_account(mm, vma->vm_flags, len >> PAGE_SHIFT);
3409  
3410  	perf_event_mmap(vma);
3411  
3412  	return vma;
3413  
3414  out:
3415  	vm_area_free(vma);
3416  	return ERR_PTR(ret);
3417  }
3418  
3419  bool vma_is_special_mapping(const struct vm_area_struct *vma,
3420  	const struct vm_special_mapping *sm)
3421  {
3422  	return vma->vm_private_data == sm &&
3423  		(vma->vm_ops == &special_mapping_vmops ||
3424  		 vma->vm_ops == &legacy_special_mapping_vmops);
3425  }
3426  
3427  /*
3428   * Called with mm->mmap_sem held for writing.
3429   * Insert a new vma covering the given region, with the given flags.
3430   * Its pages are supplied by the given array of struct page *.
3431   * The array can be shorter than len >> PAGE_SHIFT if it's null-terminated.
3432   * The region past the last page supplied will always produce SIGBUS.
3433   * The array pointer and the pages it points to are assumed to stay alive
3434   * for as long as this mapping might exist.
3435   */
3436  struct vm_area_struct *_install_special_mapping(
3437  	struct mm_struct *mm,
3438  	unsigned long addr, unsigned long len,
3439  	unsigned long vm_flags, const struct vm_special_mapping *spec)
3440  {
3441  	return __install_special_mapping(mm, addr, len, vm_flags, (void *)spec,
3442  					&special_mapping_vmops);
3443  }
3444  
3445  int install_special_mapping(struct mm_struct *mm,
3446  			    unsigned long addr, unsigned long len,
3447  			    unsigned long vm_flags, struct page **pages)
3448  {
3449  	struct vm_area_struct *vma = __install_special_mapping(
3450  		mm, addr, len, vm_flags, (void *)pages,
3451  		&legacy_special_mapping_vmops);
3452  
3453  	return PTR_ERR_OR_ZERO(vma);
3454  }
3455  
3456  static DEFINE_MUTEX(mm_all_locks_mutex);
3457  
3458  static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma)
3459  {
3460  	if (!test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) {
3461  		/*
3462  		 * The LSB of head.next can't change from under us
3463  		 * because we hold the mm_all_locks_mutex.
3464  		 */
3465  		down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_sem);
3466  		/*
3467  		 * We can safely modify head.next after taking the
3468  		 * anon_vma->root->rwsem. If some other vma in this mm shares
3469  		 * the same anon_vma we won't take it again.
3470  		 *
3471  		 * No need of atomic instructions here, head.next
3472  		 * can't change from under us thanks to the
3473  		 * anon_vma->root->rwsem.
3474  		 */
3475  		if (__test_and_set_bit(0, (unsigned long *)
3476  				       &anon_vma->root->rb_root.rb_root.rb_node))
3477  			BUG();
3478  	}
3479  }
3480  
3481  static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
3482  {
3483  	if (!test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
3484  		/*
3485  		 * AS_MM_ALL_LOCKS can't change from under us because
3486  		 * we hold the mm_all_locks_mutex.
3487  		 *
3488  		 * Operations on ->flags have to be atomic because
3489  		 * even if AS_MM_ALL_LOCKS is stable thanks to the
3490  		 * mm_all_locks_mutex, there may be other cpus
3491  		 * changing other bitflags in parallel to us.
3492  		 */
3493  		if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags))
3494  			BUG();
3495  		down_write_nest_lock(&mapping->i_mmap_rwsem, &mm->mmap_sem);
3496  	}
3497  }
3498  
3499  /*
3500   * This operation locks against the VM for all pte/vma/mm related
3501   * operations that could ever happen on a certain mm. This includes
3502   * vmtruncate, try_to_unmap, and all page faults.
3503   *
3504   * The caller must take the mmap_sem in write mode before calling
3505   * mm_take_all_locks(). The caller isn't allowed to release the
3506   * mmap_sem until mm_drop_all_locks() returns.
3507   *
3508   * mmap_sem in write mode is required in order to block all operations
3509   * that could modify pagetables and free pages without need of
3510   * altering the vma layout. It's also needed in write mode to avoid new
3511   * anon_vmas to be associated with existing vmas.
3512   *
3513   * A single task can't take more than one mm_take_all_locks() in a row
3514   * or it would deadlock.
3515   *
3516   * The LSB in anon_vma->rb_root.rb_node and the AS_MM_ALL_LOCKS bitflag in
3517   * mapping->flags avoid to take the same lock twice, if more than one
3518   * vma in this mm is backed by the same anon_vma or address_space.
3519   *
3520   * We take locks in following order, accordingly to comment at beginning
3521   * of mm/rmap.c:
3522   *   - all hugetlbfs_i_mmap_rwsem_key locks (aka mapping->i_mmap_rwsem for
3523   *     hugetlb mapping);
3524   *   - all i_mmap_rwsem locks;
3525   *   - all anon_vma->rwseml
3526   *
3527   * We can take all locks within these types randomly because the VM code
3528   * doesn't nest them and we protected from parallel mm_take_all_locks() by
3529   * mm_all_locks_mutex.
3530   *
3531   * mm_take_all_locks() and mm_drop_all_locks are expensive operations
3532   * that may have to take thousand of locks.
3533   *
3534   * mm_take_all_locks() can fail if it's interrupted by signals.
3535   */
3536  int mm_take_all_locks(struct mm_struct *mm)
3537  {
3538  	struct vm_area_struct *vma;
3539  	struct anon_vma_chain *avc;
3540  
3541  	BUG_ON(down_read_trylock(&mm->mmap_sem));
3542  
3543  	mutex_lock(&mm_all_locks_mutex);
3544  
3545  	for (vma = mm->mmap; vma; vma = vma->vm_next) {
3546  		if (signal_pending(current))
3547  			goto out_unlock;
3548  		if (vma->vm_file && vma->vm_file->f_mapping &&
3549  				is_vm_hugetlb_page(vma))
3550  			vm_lock_mapping(mm, vma->vm_file->f_mapping);
3551  	}
3552  
3553  	for (vma = mm->mmap; vma; vma = vma->vm_next) {
3554  		if (signal_pending(current))
3555  			goto out_unlock;
3556  		if (vma->vm_file && vma->vm_file->f_mapping &&
3557  				!is_vm_hugetlb_page(vma))
3558  			vm_lock_mapping(mm, vma->vm_file->f_mapping);
3559  	}
3560  
3561  	for (vma = mm->mmap; vma; vma = vma->vm_next) {
3562  		if (signal_pending(current))
3563  			goto out_unlock;
3564  		if (vma->anon_vma)
3565  			list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
3566  				vm_lock_anon_vma(mm, avc->anon_vma);
3567  	}
3568  
3569  	return 0;
3570  
3571  out_unlock:
3572  	mm_drop_all_locks(mm);
3573  	return -EINTR;
3574  }
3575  
3576  static void vm_unlock_anon_vma(struct anon_vma *anon_vma)
3577  {
3578  	if (test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) {
3579  		/*
3580  		 * The LSB of head.next can't change to 0 from under
3581  		 * us because we hold the mm_all_locks_mutex.
3582  		 *
3583  		 * We must however clear the bitflag before unlocking
3584  		 * the vma so the users using the anon_vma->rb_root will
3585  		 * never see our bitflag.
3586  		 *
3587  		 * No need of atomic instructions here, head.next
3588  		 * can't change from under us until we release the
3589  		 * anon_vma->root->rwsem.
3590  		 */
3591  		if (!__test_and_clear_bit(0, (unsigned long *)
3592  					  &anon_vma->root->rb_root.rb_root.rb_node))
3593  			BUG();
3594  		anon_vma_unlock_write(anon_vma);
3595  	}
3596  }
3597  
3598  static void vm_unlock_mapping(struct address_space *mapping)
3599  {
3600  	if (test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
3601  		/*
3602  		 * AS_MM_ALL_LOCKS can't change to 0 from under us
3603  		 * because we hold the mm_all_locks_mutex.
3604  		 */
3605  		i_mmap_unlock_write(mapping);
3606  		if (!test_and_clear_bit(AS_MM_ALL_LOCKS,
3607  					&mapping->flags))
3608  			BUG();
3609  	}
3610  }
3611  
3612  /*
3613   * The mmap_sem cannot be released by the caller until
3614   * mm_drop_all_locks() returns.
3615   */
3616  void mm_drop_all_locks(struct mm_struct *mm)
3617  {
3618  	struct vm_area_struct *vma;
3619  	struct anon_vma_chain *avc;
3620  
3621  	BUG_ON(down_read_trylock(&mm->mmap_sem));
3622  	BUG_ON(!mutex_is_locked(&mm_all_locks_mutex));
3623  
3624  	for (vma = mm->mmap; vma; vma = vma->vm_next) {
3625  		if (vma->anon_vma)
3626  			list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
3627  				vm_unlock_anon_vma(avc->anon_vma);
3628  		if (vma->vm_file && vma->vm_file->f_mapping)
3629  			vm_unlock_mapping(vma->vm_file->f_mapping);
3630  	}
3631  
3632  	mutex_unlock(&mm_all_locks_mutex);
3633  }
3634  
3635  /*
3636   * initialise the percpu counter for VM
3637   */
3638  void __init mmap_init(void)
3639  {
3640  	int ret;
3641  
3642  	ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL);
3643  	VM_BUG_ON(ret);
3644  }
3645  
3646  /*
3647   * Initialise sysctl_user_reserve_kbytes.
3648   *
3649   * This is intended to prevent a user from starting a single memory hogging
3650   * process, such that they cannot recover (kill the hog) in OVERCOMMIT_NEVER
3651   * mode.
3652   *
3653   * The default value is min(3% of free memory, 128MB)
3654   * 128MB is enough to recover with sshd/login, bash, and top/kill.
3655   */
3656  static int init_user_reserve(void)
3657  {
3658  	unsigned long free_kbytes;
3659  
3660  	free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
3661  
3662  	sysctl_user_reserve_kbytes = min(free_kbytes / 32, 1UL << 17);
3663  	return 0;
3664  }
3665  subsys_initcall(init_user_reserve);
3666  
3667  /*
3668   * Initialise sysctl_admin_reserve_kbytes.
3669   *
3670   * The purpose of sysctl_admin_reserve_kbytes is to allow the sys admin
3671   * to log in and kill a memory hogging process.
3672   *
3673   * Systems with more than 256MB will reserve 8MB, enough to recover
3674   * with sshd, bash, and top in OVERCOMMIT_GUESS. Smaller systems will
3675   * only reserve 3% of free pages by default.
3676   */
3677  static int init_admin_reserve(void)
3678  {
3679  	unsigned long free_kbytes;
3680  
3681  	free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
3682  
3683  	sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13);
3684  	return 0;
3685  }
3686  subsys_initcall(init_admin_reserve);
3687  
3688  /*
3689   * Reinititalise user and admin reserves if memory is added or removed.
3690   *
3691   * The default user reserve max is 128MB, and the default max for the
3692   * admin reserve is 8MB. These are usually, but not always, enough to
3693   * enable recovery from a memory hogging process using login/sshd, a shell,
3694   * and tools like top. It may make sense to increase or even disable the
3695   * reserve depending on the existence of swap or variations in the recovery
3696   * tools. So, the admin may have changed them.
3697   *
3698   * If memory is added and the reserves have been eliminated or increased above
3699   * the default max, then we'll trust the admin.
3700   *
3701   * If memory is removed and there isn't enough free memory, then we
3702   * need to reset the reserves.
3703   *
3704   * Otherwise keep the reserve set by the admin.
3705   */
3706  static int reserve_mem_notifier(struct notifier_block *nb,
3707  			     unsigned long action, void *data)
3708  {
3709  	unsigned long tmp, free_kbytes;
3710  
3711  	switch (action) {
3712  	case MEM_ONLINE:
3713  		/* Default max is 128MB. Leave alone if modified by operator. */
3714  		tmp = sysctl_user_reserve_kbytes;
3715  		if (0 < tmp && tmp < (1UL << 17))
3716  			init_user_reserve();
3717  
3718  		/* Default max is 8MB.  Leave alone if modified by operator. */
3719  		tmp = sysctl_admin_reserve_kbytes;
3720  		if (0 < tmp && tmp < (1UL << 13))
3721  			init_admin_reserve();
3722  
3723  		break;
3724  	case MEM_OFFLINE:
3725  		free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
3726  
3727  		if (sysctl_user_reserve_kbytes > free_kbytes) {
3728  			init_user_reserve();
3729  			pr_info("vm.user_reserve_kbytes reset to %lu\n",
3730  				sysctl_user_reserve_kbytes);
3731  		}
3732  
3733  		if (sysctl_admin_reserve_kbytes > free_kbytes) {
3734  			init_admin_reserve();
3735  			pr_info("vm.admin_reserve_kbytes reset to %lu\n",
3736  				sysctl_admin_reserve_kbytes);
3737  		}
3738  		break;
3739  	default:
3740  		break;
3741  	}
3742  	return NOTIFY_OK;
3743  }
3744  
3745  static struct notifier_block reserve_mem_nb = {
3746  	.notifier_call = reserve_mem_notifier,
3747  };
3748  
3749  static int __meminit init_reserve_notifier(void)
3750  {
3751  	if (register_hotmemory_notifier(&reserve_mem_nb))
3752  		pr_err("Failed registering memory add/remove notifier for admin reserve\n");
3753  
3754  	return 0;
3755  }
3756  subsys_initcall(init_reserve_notifier);
3757