xref: /openbmc/linux/mm/mmap.c (revision a6978d1b7bb8f3a25305e8ff7d367f7289614c5d)
1  // SPDX-License-Identifier: GPL-2.0-only
2  /*
3   * mm/mmap.c
4   *
5   * Written by obz.
6   *
7   * Address space accounting code	<alan@lxorguk.ukuu.org.uk>
8   */
9  
10  #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11  
12  #include <linux/kernel.h>
13  #include <linux/slab.h>
14  #include <linux/backing-dev.h>
15  #include <linux/mm.h>
16  #include <linux/mm_inline.h>
17  #include <linux/shm.h>
18  #include <linux/mman.h>
19  #include <linux/pagemap.h>
20  #include <linux/swap.h>
21  #include <linux/syscalls.h>
22  #include <linux/capability.h>
23  #include <linux/init.h>
24  #include <linux/file.h>
25  #include <linux/fs.h>
26  #include <linux/personality.h>
27  #include <linux/security.h>
28  #include <linux/hugetlb.h>
29  #include <linux/shmem_fs.h>
30  #include <linux/profile.h>
31  #include <linux/export.h>
32  #include <linux/mount.h>
33  #include <linux/mempolicy.h>
34  #include <linux/rmap.h>
35  #include <linux/mmu_notifier.h>
36  #include <linux/mmdebug.h>
37  #include <linux/perf_event.h>
38  #include <linux/audit.h>
39  #include <linux/khugepaged.h>
40  #include <linux/uprobes.h>
41  #include <linux/notifier.h>
42  #include <linux/memory.h>
43  #include <linux/printk.h>
44  #include <linux/userfaultfd_k.h>
45  #include <linux/moduleparam.h>
46  #include <linux/pkeys.h>
47  #include <linux/oom.h>
48  #include <linux/sched/mm.h>
49  #include <linux/ksm.h>
50  
51  #include <linux/uaccess.h>
52  #include <asm/cacheflush.h>
53  #include <asm/tlb.h>
54  #include <asm/mmu_context.h>
55  
56  #define CREATE_TRACE_POINTS
57  #include <trace/events/mmap.h>
58  
59  #include "internal.h"
60  
61  #ifndef arch_mmap_check
62  #define arch_mmap_check(addr, len, flags)	(0)
63  #endif
64  
65  #ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS
66  const int mmap_rnd_bits_min = CONFIG_ARCH_MMAP_RND_BITS_MIN;
67  const int mmap_rnd_bits_max = CONFIG_ARCH_MMAP_RND_BITS_MAX;
68  int mmap_rnd_bits __read_mostly = CONFIG_ARCH_MMAP_RND_BITS;
69  #endif
70  #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
71  const int mmap_rnd_compat_bits_min = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN;
72  const int mmap_rnd_compat_bits_max = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX;
73  int mmap_rnd_compat_bits __read_mostly = CONFIG_ARCH_MMAP_RND_COMPAT_BITS;
74  #endif
75  
76  static bool ignore_rlimit_data;
77  core_param(ignore_rlimit_data, ignore_rlimit_data, bool, 0644);
78  
79  static void unmap_region(struct mm_struct *mm, struct ma_state *mas,
80  		struct vm_area_struct *vma, struct vm_area_struct *prev,
81  		struct vm_area_struct *next, unsigned long start,
82  		unsigned long end, unsigned long tree_end, bool mm_wr_locked);
83  
84  static pgprot_t vm_pgprot_modify(pgprot_t oldprot, unsigned long vm_flags)
85  {
86  	return pgprot_modify(oldprot, vm_get_page_prot(vm_flags));
87  }
88  
89  /* Update vma->vm_page_prot to reflect vma->vm_flags. */
90  void vma_set_page_prot(struct vm_area_struct *vma)
91  {
92  	unsigned long vm_flags = vma->vm_flags;
93  	pgprot_t vm_page_prot;
94  
95  	vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags);
96  	if (vma_wants_writenotify(vma, vm_page_prot)) {
97  		vm_flags &= ~VM_SHARED;
98  		vm_page_prot = vm_pgprot_modify(vm_page_prot, vm_flags);
99  	}
100  	/* remove_protection_ptes reads vma->vm_page_prot without mmap_lock */
101  	WRITE_ONCE(vma->vm_page_prot, vm_page_prot);
102  }
103  
104  /*
105   * Requires inode->i_mapping->i_mmap_rwsem
106   */
107  static void __remove_shared_vm_struct(struct vm_area_struct *vma,
108  		struct file *file, struct address_space *mapping)
109  {
110  	if (vma->vm_flags & VM_SHARED)
111  		mapping_unmap_writable(mapping);
112  
113  	flush_dcache_mmap_lock(mapping);
114  	vma_interval_tree_remove(vma, &mapping->i_mmap);
115  	flush_dcache_mmap_unlock(mapping);
116  }
117  
118  /*
119   * Unlink a file-based vm structure from its interval tree, to hide
120   * vma from rmap and vmtruncate before freeing its page tables.
121   */
122  void unlink_file_vma(struct vm_area_struct *vma)
123  {
124  	struct file *file = vma->vm_file;
125  
126  	if (file) {
127  		struct address_space *mapping = file->f_mapping;
128  		i_mmap_lock_write(mapping);
129  		__remove_shared_vm_struct(vma, file, mapping);
130  		i_mmap_unlock_write(mapping);
131  	}
132  }
133  
134  /*
135   * Close a vm structure and free it.
136   */
137  static void remove_vma(struct vm_area_struct *vma, bool unreachable)
138  {
139  	might_sleep();
140  	if (vma->vm_ops && vma->vm_ops->close)
141  		vma->vm_ops->close(vma);
142  	if (vma->vm_file)
143  		fput(vma->vm_file);
144  	mpol_put(vma_policy(vma));
145  	if (unreachable)
146  		__vm_area_free(vma);
147  	else
148  		vm_area_free(vma);
149  }
150  
151  static inline struct vm_area_struct *vma_prev_limit(struct vma_iterator *vmi,
152  						    unsigned long min)
153  {
154  	return mas_prev(&vmi->mas, min);
155  }
156  
157  /*
158   * check_brk_limits() - Use platform specific check of range & verify mlock
159   * limits.
160   * @addr: The address to check
161   * @len: The size of increase.
162   *
163   * Return: 0 on success.
164   */
165  static int check_brk_limits(unsigned long addr, unsigned long len)
166  {
167  	unsigned long mapped_addr;
168  
169  	mapped_addr = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
170  	if (IS_ERR_VALUE(mapped_addr))
171  		return mapped_addr;
172  
173  	return mlock_future_ok(current->mm, current->mm->def_flags, len)
174  		? 0 : -EAGAIN;
175  }
176  static int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *brkvma,
177  		unsigned long addr, unsigned long request, unsigned long flags);
178  SYSCALL_DEFINE1(brk, unsigned long, brk)
179  {
180  	unsigned long newbrk, oldbrk, origbrk;
181  	struct mm_struct *mm = current->mm;
182  	struct vm_area_struct *brkvma, *next = NULL;
183  	unsigned long min_brk;
184  	bool populate = false;
185  	LIST_HEAD(uf);
186  	struct vma_iterator vmi;
187  
188  	if (mmap_write_lock_killable(mm))
189  		return -EINTR;
190  
191  	origbrk = mm->brk;
192  
193  #ifdef CONFIG_COMPAT_BRK
194  	/*
195  	 * CONFIG_COMPAT_BRK can still be overridden by setting
196  	 * randomize_va_space to 2, which will still cause mm->start_brk
197  	 * to be arbitrarily shifted
198  	 */
199  	if (current->brk_randomized)
200  		min_brk = mm->start_brk;
201  	else
202  		min_brk = mm->end_data;
203  #else
204  	min_brk = mm->start_brk;
205  #endif
206  	if (brk < min_brk)
207  		goto out;
208  
209  	/*
210  	 * Check against rlimit here. If this check is done later after the test
211  	 * of oldbrk with newbrk then it can escape the test and let the data
212  	 * segment grow beyond its set limit the in case where the limit is
213  	 * not page aligned -Ram Gupta
214  	 */
215  	if (check_data_rlimit(rlimit(RLIMIT_DATA), brk, mm->start_brk,
216  			      mm->end_data, mm->start_data))
217  		goto out;
218  
219  	newbrk = PAGE_ALIGN(brk);
220  	oldbrk = PAGE_ALIGN(mm->brk);
221  	if (oldbrk == newbrk) {
222  		mm->brk = brk;
223  		goto success;
224  	}
225  
226  	/* Always allow shrinking brk. */
227  	if (brk <= mm->brk) {
228  		/* Search one past newbrk */
229  		vma_iter_init(&vmi, mm, newbrk);
230  		brkvma = vma_find(&vmi, oldbrk);
231  		if (!brkvma || brkvma->vm_start >= oldbrk)
232  			goto out; /* mapping intersects with an existing non-brk vma. */
233  		/*
234  		 * mm->brk must be protected by write mmap_lock.
235  		 * do_vma_munmap() will drop the lock on success,  so update it
236  		 * before calling do_vma_munmap().
237  		 */
238  		mm->brk = brk;
239  		if (do_vma_munmap(&vmi, brkvma, newbrk, oldbrk, &uf, true))
240  			goto out;
241  
242  		goto success_unlocked;
243  	}
244  
245  	if (check_brk_limits(oldbrk, newbrk - oldbrk))
246  		goto out;
247  
248  	/*
249  	 * Only check if the next VMA is within the stack_guard_gap of the
250  	 * expansion area
251  	 */
252  	vma_iter_init(&vmi, mm, oldbrk);
253  	next = vma_find(&vmi, newbrk + PAGE_SIZE + stack_guard_gap);
254  	if (next && newbrk + PAGE_SIZE > vm_start_gap(next))
255  		goto out;
256  
257  	brkvma = vma_prev_limit(&vmi, mm->start_brk);
258  	/* Ok, looks good - let it rip. */
259  	if (do_brk_flags(&vmi, brkvma, oldbrk, newbrk - oldbrk, 0) < 0)
260  		goto out;
261  
262  	mm->brk = brk;
263  	if (mm->def_flags & VM_LOCKED)
264  		populate = true;
265  
266  success:
267  	mmap_write_unlock(mm);
268  success_unlocked:
269  	userfaultfd_unmap_complete(mm, &uf);
270  	if (populate)
271  		mm_populate(oldbrk, newbrk - oldbrk);
272  	return brk;
273  
274  out:
275  	mm->brk = origbrk;
276  	mmap_write_unlock(mm);
277  	return origbrk;
278  }
279  
280  #if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
281  static void validate_mm(struct mm_struct *mm)
282  {
283  	int bug = 0;
284  	int i = 0;
285  	struct vm_area_struct *vma;
286  	VMA_ITERATOR(vmi, mm, 0);
287  
288  	mt_validate(&mm->mm_mt);
289  	for_each_vma(vmi, vma) {
290  #ifdef CONFIG_DEBUG_VM_RB
291  		struct anon_vma *anon_vma = vma->anon_vma;
292  		struct anon_vma_chain *avc;
293  #endif
294  		unsigned long vmi_start, vmi_end;
295  		bool warn = 0;
296  
297  		vmi_start = vma_iter_addr(&vmi);
298  		vmi_end = vma_iter_end(&vmi);
299  		if (VM_WARN_ON_ONCE_MM(vma->vm_end != vmi_end, mm))
300  			warn = 1;
301  
302  		if (VM_WARN_ON_ONCE_MM(vma->vm_start != vmi_start, mm))
303  			warn = 1;
304  
305  		if (warn) {
306  			pr_emerg("issue in %s\n", current->comm);
307  			dump_stack();
308  			dump_vma(vma);
309  			pr_emerg("tree range: %px start %lx end %lx\n", vma,
310  				 vmi_start, vmi_end - 1);
311  			vma_iter_dump_tree(&vmi);
312  		}
313  
314  #ifdef CONFIG_DEBUG_VM_RB
315  		if (anon_vma) {
316  			anon_vma_lock_read(anon_vma);
317  			list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
318  				anon_vma_interval_tree_verify(avc);
319  			anon_vma_unlock_read(anon_vma);
320  		}
321  #endif
322  		i++;
323  	}
324  	if (i != mm->map_count) {
325  		pr_emerg("map_count %d vma iterator %d\n", mm->map_count, i);
326  		bug = 1;
327  	}
328  	VM_BUG_ON_MM(bug, mm);
329  }
330  
331  #else /* !CONFIG_DEBUG_VM_MAPLE_TREE */
332  #define validate_mm(mm) do { } while (0)
333  #endif /* CONFIG_DEBUG_VM_MAPLE_TREE */
334  
335  /*
336   * vma has some anon_vma assigned, and is already inserted on that
337   * anon_vma's interval trees.
338   *
339   * Before updating the vma's vm_start / vm_end / vm_pgoff fields, the
340   * vma must be removed from the anon_vma's interval trees using
341   * anon_vma_interval_tree_pre_update_vma().
342   *
343   * After the update, the vma will be reinserted using
344   * anon_vma_interval_tree_post_update_vma().
345   *
346   * The entire update must be protected by exclusive mmap_lock and by
347   * the root anon_vma's mutex.
348   */
349  static inline void
350  anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma)
351  {
352  	struct anon_vma_chain *avc;
353  
354  	list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
355  		anon_vma_interval_tree_remove(avc, &avc->anon_vma->rb_root);
356  }
357  
358  static inline void
359  anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma)
360  {
361  	struct anon_vma_chain *avc;
362  
363  	list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
364  		anon_vma_interval_tree_insert(avc, &avc->anon_vma->rb_root);
365  }
366  
367  static unsigned long count_vma_pages_range(struct mm_struct *mm,
368  		unsigned long addr, unsigned long end)
369  {
370  	VMA_ITERATOR(vmi, mm, addr);
371  	struct vm_area_struct *vma;
372  	unsigned long nr_pages = 0;
373  
374  	for_each_vma_range(vmi, vma, end) {
375  		unsigned long vm_start = max(addr, vma->vm_start);
376  		unsigned long vm_end = min(end, vma->vm_end);
377  
378  		nr_pages += PHYS_PFN(vm_end - vm_start);
379  	}
380  
381  	return nr_pages;
382  }
383  
384  static void __vma_link_file(struct vm_area_struct *vma,
385  			    struct address_space *mapping)
386  {
387  	if (vma->vm_flags & VM_SHARED)
388  		mapping_allow_writable(mapping);
389  
390  	flush_dcache_mmap_lock(mapping);
391  	vma_interval_tree_insert(vma, &mapping->i_mmap);
392  	flush_dcache_mmap_unlock(mapping);
393  }
394  
395  static int vma_link(struct mm_struct *mm, struct vm_area_struct *vma)
396  {
397  	VMA_ITERATOR(vmi, mm, 0);
398  	struct address_space *mapping = NULL;
399  
400  	vma_iter_config(&vmi, vma->vm_start, vma->vm_end);
401  	if (vma_iter_prealloc(&vmi, vma))
402  		return -ENOMEM;
403  
404  	vma_start_write(vma);
405  
406  	vma_iter_store(&vmi, vma);
407  
408  	if (vma->vm_file) {
409  		mapping = vma->vm_file->f_mapping;
410  		i_mmap_lock_write(mapping);
411  		__vma_link_file(vma, mapping);
412  		i_mmap_unlock_write(mapping);
413  	}
414  
415  	mm->map_count++;
416  	validate_mm(mm);
417  	return 0;
418  }
419  
420  /*
421   * init_multi_vma_prep() - Initializer for struct vma_prepare
422   * @vp: The vma_prepare struct
423   * @vma: The vma that will be altered once locked
424   * @next: The next vma if it is to be adjusted
425   * @remove: The first vma to be removed
426   * @remove2: The second vma to be removed
427   */
428  static inline void init_multi_vma_prep(struct vma_prepare *vp,
429  		struct vm_area_struct *vma, struct vm_area_struct *next,
430  		struct vm_area_struct *remove, struct vm_area_struct *remove2)
431  {
432  	memset(vp, 0, sizeof(struct vma_prepare));
433  	vp->vma = vma;
434  	vp->anon_vma = vma->anon_vma;
435  	vp->remove = remove;
436  	vp->remove2 = remove2;
437  	vp->adj_next = next;
438  	if (!vp->anon_vma && next)
439  		vp->anon_vma = next->anon_vma;
440  
441  	vp->file = vma->vm_file;
442  	if (vp->file)
443  		vp->mapping = vma->vm_file->f_mapping;
444  
445  }
446  
447  /*
448   * init_vma_prep() - Initializer wrapper for vma_prepare struct
449   * @vp: The vma_prepare struct
450   * @vma: The vma that will be altered once locked
451   */
452  static inline void init_vma_prep(struct vma_prepare *vp,
453  				 struct vm_area_struct *vma)
454  {
455  	init_multi_vma_prep(vp, vma, NULL, NULL, NULL);
456  }
457  
458  
459  /*
460   * vma_prepare() - Helper function for handling locking VMAs prior to altering
461   * @vp: The initialized vma_prepare struct
462   */
463  static inline void vma_prepare(struct vma_prepare *vp)
464  {
465  	if (vp->file) {
466  		uprobe_munmap(vp->vma, vp->vma->vm_start, vp->vma->vm_end);
467  
468  		if (vp->adj_next)
469  			uprobe_munmap(vp->adj_next, vp->adj_next->vm_start,
470  				      vp->adj_next->vm_end);
471  
472  		i_mmap_lock_write(vp->mapping);
473  		if (vp->insert && vp->insert->vm_file) {
474  			/*
475  			 * Put into interval tree now, so instantiated pages
476  			 * are visible to arm/parisc __flush_dcache_page
477  			 * throughout; but we cannot insert into address
478  			 * space until vma start or end is updated.
479  			 */
480  			__vma_link_file(vp->insert,
481  					vp->insert->vm_file->f_mapping);
482  		}
483  	}
484  
485  	if (vp->anon_vma) {
486  		anon_vma_lock_write(vp->anon_vma);
487  		anon_vma_interval_tree_pre_update_vma(vp->vma);
488  		if (vp->adj_next)
489  			anon_vma_interval_tree_pre_update_vma(vp->adj_next);
490  	}
491  
492  	if (vp->file) {
493  		flush_dcache_mmap_lock(vp->mapping);
494  		vma_interval_tree_remove(vp->vma, &vp->mapping->i_mmap);
495  		if (vp->adj_next)
496  			vma_interval_tree_remove(vp->adj_next,
497  						 &vp->mapping->i_mmap);
498  	}
499  
500  }
501  
502  /*
503   * vma_complete- Helper function for handling the unlocking after altering VMAs,
504   * or for inserting a VMA.
505   *
506   * @vp: The vma_prepare struct
507   * @vmi: The vma iterator
508   * @mm: The mm_struct
509   */
510  static inline void vma_complete(struct vma_prepare *vp,
511  				struct vma_iterator *vmi, struct mm_struct *mm)
512  {
513  	if (vp->file) {
514  		if (vp->adj_next)
515  			vma_interval_tree_insert(vp->adj_next,
516  						 &vp->mapping->i_mmap);
517  		vma_interval_tree_insert(vp->vma, &vp->mapping->i_mmap);
518  		flush_dcache_mmap_unlock(vp->mapping);
519  	}
520  
521  	if (vp->remove && vp->file) {
522  		__remove_shared_vm_struct(vp->remove, vp->file, vp->mapping);
523  		if (vp->remove2)
524  			__remove_shared_vm_struct(vp->remove2, vp->file,
525  						  vp->mapping);
526  	} else if (vp->insert) {
527  		/*
528  		 * split_vma has split insert from vma, and needs
529  		 * us to insert it before dropping the locks
530  		 * (it may either follow vma or precede it).
531  		 */
532  		vma_iter_store(vmi, vp->insert);
533  		mm->map_count++;
534  	}
535  
536  	if (vp->anon_vma) {
537  		anon_vma_interval_tree_post_update_vma(vp->vma);
538  		if (vp->adj_next)
539  			anon_vma_interval_tree_post_update_vma(vp->adj_next);
540  		anon_vma_unlock_write(vp->anon_vma);
541  	}
542  
543  	if (vp->file) {
544  		i_mmap_unlock_write(vp->mapping);
545  		uprobe_mmap(vp->vma);
546  
547  		if (vp->adj_next)
548  			uprobe_mmap(vp->adj_next);
549  	}
550  
551  	if (vp->remove) {
552  again:
553  		vma_mark_detached(vp->remove, true);
554  		if (vp->file) {
555  			uprobe_munmap(vp->remove, vp->remove->vm_start,
556  				      vp->remove->vm_end);
557  			fput(vp->file);
558  		}
559  		if (vp->remove->anon_vma)
560  			anon_vma_merge(vp->vma, vp->remove);
561  		mm->map_count--;
562  		mpol_put(vma_policy(vp->remove));
563  		if (!vp->remove2)
564  			WARN_ON_ONCE(vp->vma->vm_end < vp->remove->vm_end);
565  		vm_area_free(vp->remove);
566  
567  		/*
568  		 * In mprotect's case 6 (see comments on vma_merge),
569  		 * we are removing both mid and next vmas
570  		 */
571  		if (vp->remove2) {
572  			vp->remove = vp->remove2;
573  			vp->remove2 = NULL;
574  			goto again;
575  		}
576  	}
577  	if (vp->insert && vp->file)
578  		uprobe_mmap(vp->insert);
579  	validate_mm(mm);
580  }
581  
582  /*
583   * dup_anon_vma() - Helper function to duplicate anon_vma
584   * @dst: The destination VMA
585   * @src: The source VMA
586   * @dup: Pointer to the destination VMA when successful.
587   *
588   * Returns: 0 on success.
589   */
590  static inline int dup_anon_vma(struct vm_area_struct *dst,
591  		struct vm_area_struct *src, struct vm_area_struct **dup)
592  {
593  	/*
594  	 * Easily overlooked: when mprotect shifts the boundary, make sure the
595  	 * expanding vma has anon_vma set if the shrinking vma had, to cover any
596  	 * anon pages imported.
597  	 */
598  	if (src->anon_vma && !dst->anon_vma) {
599  		int ret;
600  
601  		vma_assert_write_locked(dst);
602  		dst->anon_vma = src->anon_vma;
603  		ret = anon_vma_clone(dst, src);
604  		if (ret)
605  			return ret;
606  
607  		*dup = dst;
608  	}
609  
610  	return 0;
611  }
612  
613  /*
614   * vma_expand - Expand an existing VMA
615   *
616   * @vmi: The vma iterator
617   * @vma: The vma to expand
618   * @start: The start of the vma
619   * @end: The exclusive end of the vma
620   * @pgoff: The page offset of vma
621   * @next: The current of next vma.
622   *
623   * Expand @vma to @start and @end.  Can expand off the start and end.  Will
624   * expand over @next if it's different from @vma and @end == @next->vm_end.
625   * Checking if the @vma can expand and merge with @next needs to be handled by
626   * the caller.
627   *
628   * Returns: 0 on success
629   */
630  int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma,
631  	       unsigned long start, unsigned long end, pgoff_t pgoff,
632  	       struct vm_area_struct *next)
633  {
634  	struct vm_area_struct *anon_dup = NULL;
635  	bool remove_next = false;
636  	struct vma_prepare vp;
637  
638  	vma_start_write(vma);
639  	if (next && (vma != next) && (end == next->vm_end)) {
640  		int ret;
641  
642  		remove_next = true;
643  		vma_start_write(next);
644  		ret = dup_anon_vma(vma, next, &anon_dup);
645  		if (ret)
646  			return ret;
647  	}
648  
649  	init_multi_vma_prep(&vp, vma, NULL, remove_next ? next : NULL, NULL);
650  	/* Not merging but overwriting any part of next is not handled. */
651  	VM_WARN_ON(next && !vp.remove &&
652  		  next != vma && end > next->vm_start);
653  	/* Only handles expanding */
654  	VM_WARN_ON(vma->vm_start < start || vma->vm_end > end);
655  
656  	/* Note: vma iterator must be pointing to 'start' */
657  	vma_iter_config(vmi, start, end);
658  	if (vma_iter_prealloc(vmi, vma))
659  		goto nomem;
660  
661  	vma_prepare(&vp);
662  	vma_adjust_trans_huge(vma, start, end, 0);
663  	vma->vm_start = start;
664  	vma->vm_end = end;
665  	vma->vm_pgoff = pgoff;
666  	vma_iter_store(vmi, vma);
667  
668  	vma_complete(&vp, vmi, vma->vm_mm);
669  	return 0;
670  
671  nomem:
672  	if (anon_dup)
673  		unlink_anon_vmas(anon_dup);
674  	return -ENOMEM;
675  }
676  
677  /*
678   * vma_shrink() - Reduce an existing VMAs memory area
679   * @vmi: The vma iterator
680   * @vma: The VMA to modify
681   * @start: The new start
682   * @end: The new end
683   *
684   * Returns: 0 on success, -ENOMEM otherwise
685   */
686  int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma,
687  	       unsigned long start, unsigned long end, pgoff_t pgoff)
688  {
689  	struct vma_prepare vp;
690  
691  	WARN_ON((vma->vm_start != start) && (vma->vm_end != end));
692  
693  	if (vma->vm_start < start)
694  		vma_iter_config(vmi, vma->vm_start, start);
695  	else
696  		vma_iter_config(vmi, end, vma->vm_end);
697  
698  	if (vma_iter_prealloc(vmi, NULL))
699  		return -ENOMEM;
700  
701  	vma_start_write(vma);
702  
703  	init_vma_prep(&vp, vma);
704  	vma_prepare(&vp);
705  	vma_adjust_trans_huge(vma, start, end, 0);
706  
707  	vma_iter_clear(vmi);
708  	vma->vm_start = start;
709  	vma->vm_end = end;
710  	vma->vm_pgoff = pgoff;
711  	vma_complete(&vp, vmi, vma->vm_mm);
712  	return 0;
713  }
714  
715  /*
716   * If the vma has a ->close operation then the driver probably needs to release
717   * per-vma resources, so we don't attempt to merge those if the caller indicates
718   * the current vma may be removed as part of the merge.
719   */
720  static inline bool is_mergeable_vma(struct vm_area_struct *vma,
721  		struct file *file, unsigned long vm_flags,
722  		struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
723  		struct anon_vma_name *anon_name, bool may_remove_vma)
724  {
725  	/*
726  	 * VM_SOFTDIRTY should not prevent from VMA merging, if we
727  	 * match the flags but dirty bit -- the caller should mark
728  	 * merged VMA as dirty. If dirty bit won't be excluded from
729  	 * comparison, we increase pressure on the memory system forcing
730  	 * the kernel to generate new VMAs when old one could be
731  	 * extended instead.
732  	 */
733  	if ((vma->vm_flags ^ vm_flags) & ~VM_SOFTDIRTY)
734  		return false;
735  	if (vma->vm_file != file)
736  		return false;
737  	if (may_remove_vma && vma->vm_ops && vma->vm_ops->close)
738  		return false;
739  	if (!is_mergeable_vm_userfaultfd_ctx(vma, vm_userfaultfd_ctx))
740  		return false;
741  	if (!anon_vma_name_eq(anon_vma_name(vma), anon_name))
742  		return false;
743  	return true;
744  }
745  
746  static inline bool is_mergeable_anon_vma(struct anon_vma *anon_vma1,
747  		 struct anon_vma *anon_vma2, struct vm_area_struct *vma)
748  {
749  	/*
750  	 * The list_is_singular() test is to avoid merging VMA cloned from
751  	 * parents. This can improve scalability caused by anon_vma lock.
752  	 */
753  	if ((!anon_vma1 || !anon_vma2) && (!vma ||
754  		list_is_singular(&vma->anon_vma_chain)))
755  		return true;
756  	return anon_vma1 == anon_vma2;
757  }
758  
759  /*
760   * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
761   * in front of (at a lower virtual address and file offset than) the vma.
762   *
763   * We cannot merge two vmas if they have differently assigned (non-NULL)
764   * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
765   *
766   * We don't check here for the merged mmap wrapping around the end of pagecache
767   * indices (16TB on ia32) because do_mmap() does not permit mmap's which
768   * wrap, nor mmaps which cover the final page at index -1UL.
769   *
770   * We assume the vma may be removed as part of the merge.
771   */
772  static bool
773  can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
774  		struct anon_vma *anon_vma, struct file *file,
775  		pgoff_t vm_pgoff, struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
776  		struct anon_vma_name *anon_name)
777  {
778  	if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx, anon_name, true) &&
779  	    is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
780  		if (vma->vm_pgoff == vm_pgoff)
781  			return true;
782  	}
783  	return false;
784  }
785  
786  /*
787   * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
788   * beyond (at a higher virtual address and file offset than) the vma.
789   *
790   * We cannot merge two vmas if they have differently assigned (non-NULL)
791   * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
792   *
793   * We assume that vma is not removed as part of the merge.
794   */
795  static bool
796  can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
797  		struct anon_vma *anon_vma, struct file *file,
798  		pgoff_t vm_pgoff, struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
799  		struct anon_vma_name *anon_name)
800  {
801  	if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx, anon_name, false) &&
802  	    is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
803  		pgoff_t vm_pglen;
804  		vm_pglen = vma_pages(vma);
805  		if (vma->vm_pgoff + vm_pglen == vm_pgoff)
806  			return true;
807  	}
808  	return false;
809  }
810  
811  /*
812   * Given a mapping request (addr,end,vm_flags,file,pgoff,anon_name),
813   * figure out whether that can be merged with its predecessor or its
814   * successor.  Or both (it neatly fills a hole).
815   *
816   * In most cases - when called for mmap, brk or mremap - [addr,end) is
817   * certain not to be mapped by the time vma_merge is called; but when
818   * called for mprotect, it is certain to be already mapped (either at
819   * an offset within prev, or at the start of next), and the flags of
820   * this area are about to be changed to vm_flags - and the no-change
821   * case has already been eliminated.
822   *
823   * The following mprotect cases have to be considered, where **** is
824   * the area passed down from mprotect_fixup, never extending beyond one
825   * vma, PPPP is the previous vma, CCCC is a concurrent vma that starts
826   * at the same address as **** and is of the same or larger span, and
827   * NNNN the next vma after ****:
828   *
829   *     ****             ****                   ****
830   *    PPPPPPNNNNNN    PPPPPPNNNNNN       PPPPPPCCCCCC
831   *    cannot merge    might become       might become
832   *                    PPNNNNNNNNNN       PPPPPPPPPPCC
833   *    mmap, brk or    case 4 below       case 5 below
834   *    mremap move:
835   *                        ****               ****
836   *                    PPPP    NNNN       PPPPCCCCNNNN
837   *                    might become       might become
838   *                    PPPPPPPPPPPP 1 or  PPPPPPPPPPPP 6 or
839   *                    PPPPPPPPNNNN 2 or  PPPPPPPPNNNN 7 or
840   *                    PPPPNNNNNNNN 3     PPPPNNNNNNNN 8
841   *
842   * It is important for case 8 that the vma CCCC overlapping the
843   * region **** is never going to extended over NNNN. Instead NNNN must
844   * be extended in region **** and CCCC must be removed. This way in
845   * all cases where vma_merge succeeds, the moment vma_merge drops the
846   * rmap_locks, the properties of the merged vma will be already
847   * correct for the whole merged range. Some of those properties like
848   * vm_page_prot/vm_flags may be accessed by rmap_walks and they must
849   * be correct for the whole merged range immediately after the
850   * rmap_locks are released. Otherwise if NNNN would be removed and
851   * CCCC would be extended over the NNNN range, remove_migration_ptes
852   * or other rmap walkers (if working on addresses beyond the "end"
853   * parameter) may establish ptes with the wrong permissions of CCCC
854   * instead of the right permissions of NNNN.
855   *
856   * In the code below:
857   * PPPP is represented by *prev
858   * CCCC is represented by *curr or not represented at all (NULL)
859   * NNNN is represented by *next or not represented at all (NULL)
860   * **** is not represented - it will be merged and the vma containing the
861   *      area is returned, or the function will return NULL
862   */
863  struct vm_area_struct *vma_merge(struct vma_iterator *vmi, struct mm_struct *mm,
864  			struct vm_area_struct *prev, unsigned long addr,
865  			unsigned long end, unsigned long vm_flags,
866  			struct anon_vma *anon_vma, struct file *file,
867  			pgoff_t pgoff, struct mempolicy *policy,
868  			struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
869  			struct anon_vma_name *anon_name)
870  {
871  	struct vm_area_struct *curr, *next, *res;
872  	struct vm_area_struct *vma, *adjust, *remove, *remove2;
873  	struct vm_area_struct *anon_dup = NULL;
874  	struct vma_prepare vp;
875  	pgoff_t vma_pgoff;
876  	int err = 0;
877  	bool merge_prev = false;
878  	bool merge_next = false;
879  	bool vma_expanded = false;
880  	unsigned long vma_start = addr;
881  	unsigned long vma_end = end;
882  	pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
883  	long adj_start = 0;
884  
885  	/*
886  	 * We later require that vma->vm_flags == vm_flags,
887  	 * so this tests vma->vm_flags & VM_SPECIAL, too.
888  	 */
889  	if (vm_flags & VM_SPECIAL)
890  		return NULL;
891  
892  	/* Does the input range span an existing VMA? (cases 5 - 8) */
893  	curr = find_vma_intersection(mm, prev ? prev->vm_end : 0, end);
894  
895  	if (!curr ||			/* cases 1 - 4 */
896  	    end == curr->vm_end)	/* cases 6 - 8, adjacent VMA */
897  		next = vma_lookup(mm, end);
898  	else
899  		next = NULL;		/* case 5 */
900  
901  	if (prev) {
902  		vma_start = prev->vm_start;
903  		vma_pgoff = prev->vm_pgoff;
904  
905  		/* Can we merge the predecessor? */
906  		if (addr == prev->vm_end && mpol_equal(vma_policy(prev), policy)
907  		    && can_vma_merge_after(prev, vm_flags, anon_vma, file,
908  					   pgoff, vm_userfaultfd_ctx, anon_name)) {
909  			merge_prev = true;
910  			vma_prev(vmi);
911  		}
912  	}
913  
914  	/* Can we merge the successor? */
915  	if (next && mpol_equal(policy, vma_policy(next)) &&
916  	    can_vma_merge_before(next, vm_flags, anon_vma, file, pgoff+pglen,
917  				 vm_userfaultfd_ctx, anon_name)) {
918  		merge_next = true;
919  	}
920  
921  	/* Verify some invariant that must be enforced by the caller. */
922  	VM_WARN_ON(prev && addr <= prev->vm_start);
923  	VM_WARN_ON(curr && (addr != curr->vm_start || end > curr->vm_end));
924  	VM_WARN_ON(addr >= end);
925  
926  	if (!merge_prev && !merge_next)
927  		return NULL; /* Not mergeable. */
928  
929  	if (merge_prev)
930  		vma_start_write(prev);
931  
932  	res = vma = prev;
933  	remove = remove2 = adjust = NULL;
934  
935  	/* Can we merge both the predecessor and the successor? */
936  	if (merge_prev && merge_next &&
937  	    is_mergeable_anon_vma(prev->anon_vma, next->anon_vma, NULL)) {
938  		vma_start_write(next);
939  		remove = next;				/* case 1 */
940  		vma_end = next->vm_end;
941  		err = dup_anon_vma(prev, next, &anon_dup);
942  		if (curr) {				/* case 6 */
943  			vma_start_write(curr);
944  			remove = curr;
945  			remove2 = next;
946  			if (!next->anon_vma)
947  				err = dup_anon_vma(prev, curr, &anon_dup);
948  		}
949  	} else if (merge_prev) {			/* case 2 */
950  		if (curr) {
951  			vma_start_write(curr);
952  			if (end == curr->vm_end) {	/* case 7 */
953  				/*
954  				 * can_vma_merge_after() assumed we would not be
955  				 * removing prev vma, so it skipped the check
956  				 * for vm_ops->close, but we are removing curr
957  				 */
958  				if (curr->vm_ops && curr->vm_ops->close)
959  					err = -EINVAL;
960  				remove = curr;
961  			} else {			/* case 5 */
962  				adjust = curr;
963  				adj_start = (end - curr->vm_start);
964  			}
965  			if (!err)
966  				err = dup_anon_vma(prev, curr, &anon_dup);
967  		}
968  	} else { /* merge_next */
969  		vma_start_write(next);
970  		res = next;
971  		if (prev && addr < prev->vm_end) {	/* case 4 */
972  			vma_start_write(prev);
973  			vma_end = addr;
974  			adjust = next;
975  			adj_start = -(prev->vm_end - addr);
976  			err = dup_anon_vma(next, prev, &anon_dup);
977  		} else {
978  			/*
979  			 * Note that cases 3 and 8 are the ONLY ones where prev
980  			 * is permitted to be (but is not necessarily) NULL.
981  			 */
982  			vma = next;			/* case 3 */
983  			vma_start = addr;
984  			vma_end = next->vm_end;
985  			vma_pgoff = next->vm_pgoff - pglen;
986  			if (curr) {			/* case 8 */
987  				vma_pgoff = curr->vm_pgoff;
988  				vma_start_write(curr);
989  				remove = curr;
990  				err = dup_anon_vma(next, curr, &anon_dup);
991  			}
992  		}
993  	}
994  
995  	/* Error in anon_vma clone. */
996  	if (err)
997  		goto anon_vma_fail;
998  
999  	if (vma_start < vma->vm_start || vma_end > vma->vm_end)
1000  		vma_expanded = true;
1001  
1002  	if (vma_expanded) {
1003  		vma_iter_config(vmi, vma_start, vma_end);
1004  	} else {
1005  		vma_iter_config(vmi, adjust->vm_start + adj_start,
1006  				adjust->vm_end);
1007  	}
1008  
1009  	if (vma_iter_prealloc(vmi, vma))
1010  		goto prealloc_fail;
1011  
1012  	init_multi_vma_prep(&vp, vma, adjust, remove, remove2);
1013  	VM_WARN_ON(vp.anon_vma && adjust && adjust->anon_vma &&
1014  		   vp.anon_vma != adjust->anon_vma);
1015  
1016  	vma_prepare(&vp);
1017  	vma_adjust_trans_huge(vma, vma_start, vma_end, adj_start);
1018  
1019  	vma->vm_start = vma_start;
1020  	vma->vm_end = vma_end;
1021  	vma->vm_pgoff = vma_pgoff;
1022  
1023  	if (vma_expanded)
1024  		vma_iter_store(vmi, vma);
1025  
1026  	if (adj_start) {
1027  		adjust->vm_start += adj_start;
1028  		adjust->vm_pgoff += adj_start >> PAGE_SHIFT;
1029  		if (adj_start < 0) {
1030  			WARN_ON(vma_expanded);
1031  			vma_iter_store(vmi, next);
1032  		}
1033  	}
1034  
1035  	vma_complete(&vp, vmi, mm);
1036  	khugepaged_enter_vma(res, vm_flags);
1037  	return res;
1038  
1039  prealloc_fail:
1040  	if (anon_dup)
1041  		unlink_anon_vmas(anon_dup);
1042  
1043  anon_vma_fail:
1044  	vma_iter_set(vmi, addr);
1045  	vma_iter_load(vmi);
1046  	return NULL;
1047  }
1048  
1049  /*
1050   * Rough compatibility check to quickly see if it's even worth looking
1051   * at sharing an anon_vma.
1052   *
1053   * They need to have the same vm_file, and the flags can only differ
1054   * in things that mprotect may change.
1055   *
1056   * NOTE! The fact that we share an anon_vma doesn't _have_ to mean that
1057   * we can merge the two vma's. For example, we refuse to merge a vma if
1058   * there is a vm_ops->close() function, because that indicates that the
1059   * driver is doing some kind of reference counting. But that doesn't
1060   * really matter for the anon_vma sharing case.
1061   */
1062  static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *b)
1063  {
1064  	return a->vm_end == b->vm_start &&
1065  		mpol_equal(vma_policy(a), vma_policy(b)) &&
1066  		a->vm_file == b->vm_file &&
1067  		!((a->vm_flags ^ b->vm_flags) & ~(VM_ACCESS_FLAGS | VM_SOFTDIRTY)) &&
1068  		b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT);
1069  }
1070  
1071  /*
1072   * Do some basic sanity checking to see if we can re-use the anon_vma
1073   * from 'old'. The 'a'/'b' vma's are in VM order - one of them will be
1074   * the same as 'old', the other will be the new one that is trying
1075   * to share the anon_vma.
1076   *
1077   * NOTE! This runs with mmap_lock held for reading, so it is possible that
1078   * the anon_vma of 'old' is concurrently in the process of being set up
1079   * by another page fault trying to merge _that_. But that's ok: if it
1080   * is being set up, that automatically means that it will be a singleton
1081   * acceptable for merging, so we can do all of this optimistically. But
1082   * we do that READ_ONCE() to make sure that we never re-load the pointer.
1083   *
1084   * IOW: that the "list_is_singular()" test on the anon_vma_chain only
1085   * matters for the 'stable anon_vma' case (ie the thing we want to avoid
1086   * is to return an anon_vma that is "complex" due to having gone through
1087   * a fork).
1088   *
1089   * We also make sure that the two vma's are compatible (adjacent,
1090   * and with the same memory policies). That's all stable, even with just
1091   * a read lock on the mmap_lock.
1092   */
1093  static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old, struct vm_area_struct *a, struct vm_area_struct *b)
1094  {
1095  	if (anon_vma_compatible(a, b)) {
1096  		struct anon_vma *anon_vma = READ_ONCE(old->anon_vma);
1097  
1098  		if (anon_vma && list_is_singular(&old->anon_vma_chain))
1099  			return anon_vma;
1100  	}
1101  	return NULL;
1102  }
1103  
1104  /*
1105   * find_mergeable_anon_vma is used by anon_vma_prepare, to check
1106   * neighbouring vmas for a suitable anon_vma, before it goes off
1107   * to allocate a new anon_vma.  It checks because a repetitive
1108   * sequence of mprotects and faults may otherwise lead to distinct
1109   * anon_vmas being allocated, preventing vma merge in subsequent
1110   * mprotect.
1111   */
1112  struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma)
1113  {
1114  	MA_STATE(mas, &vma->vm_mm->mm_mt, vma->vm_end, vma->vm_end);
1115  	struct anon_vma *anon_vma = NULL;
1116  	struct vm_area_struct *prev, *next;
1117  
1118  	/* Try next first. */
1119  	next = mas_walk(&mas);
1120  	if (next) {
1121  		anon_vma = reusable_anon_vma(next, vma, next);
1122  		if (anon_vma)
1123  			return anon_vma;
1124  	}
1125  
1126  	prev = mas_prev(&mas, 0);
1127  	VM_BUG_ON_VMA(prev != vma, vma);
1128  	prev = mas_prev(&mas, 0);
1129  	/* Try prev next. */
1130  	if (prev)
1131  		anon_vma = reusable_anon_vma(prev, prev, vma);
1132  
1133  	/*
1134  	 * We might reach here with anon_vma == NULL if we can't find
1135  	 * any reusable anon_vma.
1136  	 * There's no absolute need to look only at touching neighbours:
1137  	 * we could search further afield for "compatible" anon_vmas.
1138  	 * But it would probably just be a waste of time searching,
1139  	 * or lead to too many vmas hanging off the same anon_vma.
1140  	 * We're trying to allow mprotect remerging later on,
1141  	 * not trying to minimize memory used for anon_vmas.
1142  	 */
1143  	return anon_vma;
1144  }
1145  
1146  /*
1147   * If a hint addr is less than mmap_min_addr change hint to be as
1148   * low as possible but still greater than mmap_min_addr
1149   */
1150  static inline unsigned long round_hint_to_min(unsigned long hint)
1151  {
1152  	hint &= PAGE_MASK;
1153  	if (((void *)hint != NULL) &&
1154  	    (hint < mmap_min_addr))
1155  		return PAGE_ALIGN(mmap_min_addr);
1156  	return hint;
1157  }
1158  
1159  bool mlock_future_ok(struct mm_struct *mm, unsigned long flags,
1160  			unsigned long bytes)
1161  {
1162  	unsigned long locked_pages, limit_pages;
1163  
1164  	if (!(flags & VM_LOCKED) || capable(CAP_IPC_LOCK))
1165  		return true;
1166  
1167  	locked_pages = bytes >> PAGE_SHIFT;
1168  	locked_pages += mm->locked_vm;
1169  
1170  	limit_pages = rlimit(RLIMIT_MEMLOCK);
1171  	limit_pages >>= PAGE_SHIFT;
1172  
1173  	return locked_pages <= limit_pages;
1174  }
1175  
1176  static inline u64 file_mmap_size_max(struct file *file, struct inode *inode)
1177  {
1178  	if (S_ISREG(inode->i_mode))
1179  		return MAX_LFS_FILESIZE;
1180  
1181  	if (S_ISBLK(inode->i_mode))
1182  		return MAX_LFS_FILESIZE;
1183  
1184  	if (S_ISSOCK(inode->i_mode))
1185  		return MAX_LFS_FILESIZE;
1186  
1187  	/* Special "we do even unsigned file positions" case */
1188  	if (file->f_mode & FMODE_UNSIGNED_OFFSET)
1189  		return 0;
1190  
1191  	/* Yes, random drivers might want more. But I'm tired of buggy drivers */
1192  	return ULONG_MAX;
1193  }
1194  
1195  static inline bool file_mmap_ok(struct file *file, struct inode *inode,
1196  				unsigned long pgoff, unsigned long len)
1197  {
1198  	u64 maxsize = file_mmap_size_max(file, inode);
1199  
1200  	if (maxsize && len > maxsize)
1201  		return false;
1202  	maxsize -= len;
1203  	if (pgoff > maxsize >> PAGE_SHIFT)
1204  		return false;
1205  	return true;
1206  }
1207  
1208  /*
1209   * The caller must write-lock current->mm->mmap_lock.
1210   */
1211  unsigned long do_mmap(struct file *file, unsigned long addr,
1212  			unsigned long len, unsigned long prot,
1213  			unsigned long flags, vm_flags_t vm_flags,
1214  			unsigned long pgoff, unsigned long *populate,
1215  			struct list_head *uf)
1216  {
1217  	struct mm_struct *mm = current->mm;
1218  	int pkey = 0;
1219  
1220  	*populate = 0;
1221  
1222  	if (!len)
1223  		return -EINVAL;
1224  
1225  	/*
1226  	 * Does the application expect PROT_READ to imply PROT_EXEC?
1227  	 *
1228  	 * (the exception is when the underlying filesystem is noexec
1229  	 *  mounted, in which case we dont add PROT_EXEC.)
1230  	 */
1231  	if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
1232  		if (!(file && path_noexec(&file->f_path)))
1233  			prot |= PROT_EXEC;
1234  
1235  	/* force arch specific MAP_FIXED handling in get_unmapped_area */
1236  	if (flags & MAP_FIXED_NOREPLACE)
1237  		flags |= MAP_FIXED;
1238  
1239  	if (!(flags & MAP_FIXED))
1240  		addr = round_hint_to_min(addr);
1241  
1242  	/* Careful about overflows.. */
1243  	len = PAGE_ALIGN(len);
1244  	if (!len)
1245  		return -ENOMEM;
1246  
1247  	/* offset overflow? */
1248  	if ((pgoff + (len >> PAGE_SHIFT)) < pgoff)
1249  		return -EOVERFLOW;
1250  
1251  	/* Too many mappings? */
1252  	if (mm->map_count > sysctl_max_map_count)
1253  		return -ENOMEM;
1254  
1255  	/* Obtain the address to map to. we verify (or select) it and ensure
1256  	 * that it represents a valid section of the address space.
1257  	 */
1258  	addr = get_unmapped_area(file, addr, len, pgoff, flags);
1259  	if (IS_ERR_VALUE(addr))
1260  		return addr;
1261  
1262  	if (flags & MAP_FIXED_NOREPLACE) {
1263  		if (find_vma_intersection(mm, addr, addr + len))
1264  			return -EEXIST;
1265  	}
1266  
1267  	if (prot == PROT_EXEC) {
1268  		pkey = execute_only_pkey(mm);
1269  		if (pkey < 0)
1270  			pkey = 0;
1271  	}
1272  
1273  	/* Do simple checking here so the lower-level routines won't have
1274  	 * to. we assume access permissions have been handled by the open
1275  	 * of the memory object, so we don't do any here.
1276  	 */
1277  	vm_flags |= calc_vm_prot_bits(prot, pkey) | calc_vm_flag_bits(flags) |
1278  			mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
1279  
1280  	if (flags & MAP_LOCKED)
1281  		if (!can_do_mlock())
1282  			return -EPERM;
1283  
1284  	if (!mlock_future_ok(mm, vm_flags, len))
1285  		return -EAGAIN;
1286  
1287  	if (file) {
1288  		struct inode *inode = file_inode(file);
1289  		unsigned long flags_mask;
1290  
1291  		if (!file_mmap_ok(file, inode, pgoff, len))
1292  			return -EOVERFLOW;
1293  
1294  		flags_mask = LEGACY_MAP_MASK | file->f_op->mmap_supported_flags;
1295  
1296  		switch (flags & MAP_TYPE) {
1297  		case MAP_SHARED:
1298  			/*
1299  			 * Force use of MAP_SHARED_VALIDATE with non-legacy
1300  			 * flags. E.g. MAP_SYNC is dangerous to use with
1301  			 * MAP_SHARED as you don't know which consistency model
1302  			 * you will get. We silently ignore unsupported flags
1303  			 * with MAP_SHARED to preserve backward compatibility.
1304  			 */
1305  			flags &= LEGACY_MAP_MASK;
1306  			fallthrough;
1307  		case MAP_SHARED_VALIDATE:
1308  			if (flags & ~flags_mask)
1309  				return -EOPNOTSUPP;
1310  			if (prot & PROT_WRITE) {
1311  				if (!(file->f_mode & FMODE_WRITE))
1312  					return -EACCES;
1313  				if (IS_SWAPFILE(file->f_mapping->host))
1314  					return -ETXTBSY;
1315  			}
1316  
1317  			/*
1318  			 * Make sure we don't allow writing to an append-only
1319  			 * file..
1320  			 */
1321  			if (IS_APPEND(inode) && (file->f_mode & FMODE_WRITE))
1322  				return -EACCES;
1323  
1324  			vm_flags |= VM_SHARED | VM_MAYSHARE;
1325  			if (!(file->f_mode & FMODE_WRITE))
1326  				vm_flags &= ~(VM_MAYWRITE | VM_SHARED);
1327  			fallthrough;
1328  		case MAP_PRIVATE:
1329  			if (!(file->f_mode & FMODE_READ))
1330  				return -EACCES;
1331  			if (path_noexec(&file->f_path)) {
1332  				if (vm_flags & VM_EXEC)
1333  					return -EPERM;
1334  				vm_flags &= ~VM_MAYEXEC;
1335  			}
1336  
1337  			if (!file->f_op->mmap)
1338  				return -ENODEV;
1339  			if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
1340  				return -EINVAL;
1341  			break;
1342  
1343  		default:
1344  			return -EINVAL;
1345  		}
1346  	} else {
1347  		switch (flags & MAP_TYPE) {
1348  		case MAP_SHARED:
1349  			if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
1350  				return -EINVAL;
1351  			/*
1352  			 * Ignore pgoff.
1353  			 */
1354  			pgoff = 0;
1355  			vm_flags |= VM_SHARED | VM_MAYSHARE;
1356  			break;
1357  		case MAP_PRIVATE:
1358  			/*
1359  			 * Set pgoff according to addr for anon_vma.
1360  			 */
1361  			pgoff = addr >> PAGE_SHIFT;
1362  			break;
1363  		default:
1364  			return -EINVAL;
1365  		}
1366  	}
1367  
1368  	/*
1369  	 * Set 'VM_NORESERVE' if we should not account for the
1370  	 * memory use of this mapping.
1371  	 */
1372  	if (flags & MAP_NORESERVE) {
1373  		/* We honor MAP_NORESERVE if allowed to overcommit */
1374  		if (sysctl_overcommit_memory != OVERCOMMIT_NEVER)
1375  			vm_flags |= VM_NORESERVE;
1376  
1377  		/* hugetlb applies strict overcommit unless MAP_NORESERVE */
1378  		if (file && is_file_hugepages(file))
1379  			vm_flags |= VM_NORESERVE;
1380  	}
1381  
1382  	addr = mmap_region(file, addr, len, vm_flags, pgoff, uf);
1383  	if (!IS_ERR_VALUE(addr) &&
1384  	    ((vm_flags & VM_LOCKED) ||
1385  	     (flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE))
1386  		*populate = len;
1387  	return addr;
1388  }
1389  
1390  unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len,
1391  			      unsigned long prot, unsigned long flags,
1392  			      unsigned long fd, unsigned long pgoff)
1393  {
1394  	struct file *file = NULL;
1395  	unsigned long retval;
1396  
1397  	if (!(flags & MAP_ANONYMOUS)) {
1398  		audit_mmap_fd(fd, flags);
1399  		file = fget(fd);
1400  		if (!file)
1401  			return -EBADF;
1402  		if (is_file_hugepages(file)) {
1403  			len = ALIGN(len, huge_page_size(hstate_file(file)));
1404  		} else if (unlikely(flags & MAP_HUGETLB)) {
1405  			retval = -EINVAL;
1406  			goto out_fput;
1407  		}
1408  	} else if (flags & MAP_HUGETLB) {
1409  		struct hstate *hs;
1410  
1411  		hs = hstate_sizelog((flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK);
1412  		if (!hs)
1413  			return -EINVAL;
1414  
1415  		len = ALIGN(len, huge_page_size(hs));
1416  		/*
1417  		 * VM_NORESERVE is used because the reservations will be
1418  		 * taken when vm_ops->mmap() is called
1419  		 */
1420  		file = hugetlb_file_setup(HUGETLB_ANON_FILE, len,
1421  				VM_NORESERVE,
1422  				HUGETLB_ANONHUGE_INODE,
1423  				(flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK);
1424  		if (IS_ERR(file))
1425  			return PTR_ERR(file);
1426  	}
1427  
1428  	retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff);
1429  out_fput:
1430  	if (file)
1431  		fput(file);
1432  	return retval;
1433  }
1434  
1435  SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
1436  		unsigned long, prot, unsigned long, flags,
1437  		unsigned long, fd, unsigned long, pgoff)
1438  {
1439  	return ksys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
1440  }
1441  
1442  #ifdef __ARCH_WANT_SYS_OLD_MMAP
1443  struct mmap_arg_struct {
1444  	unsigned long addr;
1445  	unsigned long len;
1446  	unsigned long prot;
1447  	unsigned long flags;
1448  	unsigned long fd;
1449  	unsigned long offset;
1450  };
1451  
1452  SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
1453  {
1454  	struct mmap_arg_struct a;
1455  
1456  	if (copy_from_user(&a, arg, sizeof(a)))
1457  		return -EFAULT;
1458  	if (offset_in_page(a.offset))
1459  		return -EINVAL;
1460  
1461  	return ksys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
1462  			       a.offset >> PAGE_SHIFT);
1463  }
1464  #endif /* __ARCH_WANT_SYS_OLD_MMAP */
1465  
1466  static bool vm_ops_needs_writenotify(const struct vm_operations_struct *vm_ops)
1467  {
1468  	return vm_ops && (vm_ops->page_mkwrite || vm_ops->pfn_mkwrite);
1469  }
1470  
1471  static bool vma_is_shared_writable(struct vm_area_struct *vma)
1472  {
1473  	return (vma->vm_flags & (VM_WRITE | VM_SHARED)) ==
1474  		(VM_WRITE | VM_SHARED);
1475  }
1476  
1477  static bool vma_fs_can_writeback(struct vm_area_struct *vma)
1478  {
1479  	/* No managed pages to writeback. */
1480  	if (vma->vm_flags & VM_PFNMAP)
1481  		return false;
1482  
1483  	return vma->vm_file && vma->vm_file->f_mapping &&
1484  		mapping_can_writeback(vma->vm_file->f_mapping);
1485  }
1486  
1487  /*
1488   * Does this VMA require the underlying folios to have their dirty state
1489   * tracked?
1490   */
1491  bool vma_needs_dirty_tracking(struct vm_area_struct *vma)
1492  {
1493  	/* Only shared, writable VMAs require dirty tracking. */
1494  	if (!vma_is_shared_writable(vma))
1495  		return false;
1496  
1497  	/* Does the filesystem need to be notified? */
1498  	if (vm_ops_needs_writenotify(vma->vm_ops))
1499  		return true;
1500  
1501  	/*
1502  	 * Even if the filesystem doesn't indicate a need for writenotify, if it
1503  	 * can writeback, dirty tracking is still required.
1504  	 */
1505  	return vma_fs_can_writeback(vma);
1506  }
1507  
1508  /*
1509   * Some shared mappings will want the pages marked read-only
1510   * to track write events. If so, we'll downgrade vm_page_prot
1511   * to the private version (using protection_map[] without the
1512   * VM_SHARED bit).
1513   */
1514  int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot)
1515  {
1516  	/* If it was private or non-writable, the write bit is already clear */
1517  	if (!vma_is_shared_writable(vma))
1518  		return 0;
1519  
1520  	/* The backer wishes to know when pages are first written to? */
1521  	if (vm_ops_needs_writenotify(vma->vm_ops))
1522  		return 1;
1523  
1524  	/* The open routine did something to the protections that pgprot_modify
1525  	 * won't preserve? */
1526  	if (pgprot_val(vm_page_prot) !=
1527  	    pgprot_val(vm_pgprot_modify(vm_page_prot, vma->vm_flags)))
1528  		return 0;
1529  
1530  	/*
1531  	 * Do we need to track softdirty? hugetlb does not support softdirty
1532  	 * tracking yet.
1533  	 */
1534  	if (vma_soft_dirty_enabled(vma) && !is_vm_hugetlb_page(vma))
1535  		return 1;
1536  
1537  	/* Do we need write faults for uffd-wp tracking? */
1538  	if (userfaultfd_wp(vma))
1539  		return 1;
1540  
1541  	/* Can the mapping track the dirty pages? */
1542  	return vma_fs_can_writeback(vma);
1543  }
1544  
1545  /*
1546   * We account for memory if it's a private writeable mapping,
1547   * not hugepages and VM_NORESERVE wasn't set.
1548   */
1549  static inline int accountable_mapping(struct file *file, vm_flags_t vm_flags)
1550  {
1551  	/*
1552  	 * hugetlb has its own accounting separate from the core VM
1553  	 * VM_HUGETLB may not be set yet so we cannot check for that flag.
1554  	 */
1555  	if (file && is_file_hugepages(file))
1556  		return 0;
1557  
1558  	return (vm_flags & (VM_NORESERVE | VM_SHARED | VM_WRITE)) == VM_WRITE;
1559  }
1560  
1561  /**
1562   * unmapped_area() - Find an area between the low_limit and the high_limit with
1563   * the correct alignment and offset, all from @info. Note: current->mm is used
1564   * for the search.
1565   *
1566   * @info: The unmapped area information including the range [low_limit -
1567   * high_limit), the alignment offset and mask.
1568   *
1569   * Return: A memory address or -ENOMEM.
1570   */
1571  static unsigned long unmapped_area(struct vm_unmapped_area_info *info)
1572  {
1573  	unsigned long length, gap;
1574  	unsigned long low_limit, high_limit;
1575  	struct vm_area_struct *tmp;
1576  
1577  	MA_STATE(mas, &current->mm->mm_mt, 0, 0);
1578  
1579  	/* Adjust search length to account for worst case alignment overhead */
1580  	length = info->length + info->align_mask;
1581  	if (length < info->length)
1582  		return -ENOMEM;
1583  
1584  	low_limit = info->low_limit;
1585  	if (low_limit < mmap_min_addr)
1586  		low_limit = mmap_min_addr;
1587  	high_limit = info->high_limit;
1588  retry:
1589  	if (mas_empty_area(&mas, low_limit, high_limit - 1, length))
1590  		return -ENOMEM;
1591  
1592  	gap = mas.index;
1593  	gap += (info->align_offset - gap) & info->align_mask;
1594  	tmp = mas_next(&mas, ULONG_MAX);
1595  	if (tmp && (tmp->vm_flags & VM_STARTGAP_FLAGS)) { /* Avoid prev check if possible */
1596  		if (vm_start_gap(tmp) < gap + length - 1) {
1597  			low_limit = tmp->vm_end;
1598  			mas_reset(&mas);
1599  			goto retry;
1600  		}
1601  	} else {
1602  		tmp = mas_prev(&mas, 0);
1603  		if (tmp && vm_end_gap(tmp) > gap) {
1604  			low_limit = vm_end_gap(tmp);
1605  			mas_reset(&mas);
1606  			goto retry;
1607  		}
1608  	}
1609  
1610  	return gap;
1611  }
1612  
1613  /**
1614   * unmapped_area_topdown() - Find an area between the low_limit and the
1615   * high_limit with the correct alignment and offset at the highest available
1616   * address, all from @info. Note: current->mm is used for the search.
1617   *
1618   * @info: The unmapped area information including the range [low_limit -
1619   * high_limit), the alignment offset and mask.
1620   *
1621   * Return: A memory address or -ENOMEM.
1622   */
1623  static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
1624  {
1625  	unsigned long length, gap, gap_end;
1626  	unsigned long low_limit, high_limit;
1627  	struct vm_area_struct *tmp;
1628  
1629  	MA_STATE(mas, &current->mm->mm_mt, 0, 0);
1630  	/* Adjust search length to account for worst case alignment overhead */
1631  	length = info->length + info->align_mask;
1632  	if (length < info->length)
1633  		return -ENOMEM;
1634  
1635  	low_limit = info->low_limit;
1636  	if (low_limit < mmap_min_addr)
1637  		low_limit = mmap_min_addr;
1638  	high_limit = info->high_limit;
1639  retry:
1640  	if (mas_empty_area_rev(&mas, low_limit, high_limit - 1, length))
1641  		return -ENOMEM;
1642  
1643  	gap = mas.last + 1 - info->length;
1644  	gap -= (gap - info->align_offset) & info->align_mask;
1645  	gap_end = mas.last;
1646  	tmp = mas_next(&mas, ULONG_MAX);
1647  	if (tmp && (tmp->vm_flags & VM_STARTGAP_FLAGS)) { /* Avoid prev check if possible */
1648  		if (vm_start_gap(tmp) <= gap_end) {
1649  			high_limit = vm_start_gap(tmp);
1650  			mas_reset(&mas);
1651  			goto retry;
1652  		}
1653  	} else {
1654  		tmp = mas_prev(&mas, 0);
1655  		if (tmp && vm_end_gap(tmp) > gap) {
1656  			high_limit = tmp->vm_start;
1657  			mas_reset(&mas);
1658  			goto retry;
1659  		}
1660  	}
1661  
1662  	return gap;
1663  }
1664  
1665  /*
1666   * Search for an unmapped address range.
1667   *
1668   * We are looking for a range that:
1669   * - does not intersect with any VMA;
1670   * - is contained within the [low_limit, high_limit) interval;
1671   * - is at least the desired size.
1672   * - satisfies (begin_addr & align_mask) == (align_offset & align_mask)
1673   */
1674  unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info)
1675  {
1676  	unsigned long addr;
1677  
1678  	if (info->flags & VM_UNMAPPED_AREA_TOPDOWN)
1679  		addr = unmapped_area_topdown(info);
1680  	else
1681  		addr = unmapped_area(info);
1682  
1683  	trace_vm_unmapped_area(addr, info);
1684  	return addr;
1685  }
1686  
1687  /* Get an address range which is currently unmapped.
1688   * For shmat() with addr=0.
1689   *
1690   * Ugly calling convention alert:
1691   * Return value with the low bits set means error value,
1692   * ie
1693   *	if (ret & ~PAGE_MASK)
1694   *		error = ret;
1695   *
1696   * This function "knows" that -ENOMEM has the bits set.
1697   */
1698  unsigned long
1699  generic_get_unmapped_area(struct file *filp, unsigned long addr,
1700  			  unsigned long len, unsigned long pgoff,
1701  			  unsigned long flags)
1702  {
1703  	struct mm_struct *mm = current->mm;
1704  	struct vm_area_struct *vma, *prev;
1705  	struct vm_unmapped_area_info info;
1706  	const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags);
1707  
1708  	if (len > mmap_end - mmap_min_addr)
1709  		return -ENOMEM;
1710  
1711  	if (flags & MAP_FIXED)
1712  		return addr;
1713  
1714  	if (addr) {
1715  		addr = PAGE_ALIGN(addr);
1716  		vma = find_vma_prev(mm, addr, &prev);
1717  		if (mmap_end - len >= addr && addr >= mmap_min_addr &&
1718  		    (!vma || addr + len <= vm_start_gap(vma)) &&
1719  		    (!prev || addr >= vm_end_gap(prev)))
1720  			return addr;
1721  	}
1722  
1723  	info.flags = 0;
1724  	info.length = len;
1725  	info.low_limit = mm->mmap_base;
1726  	info.high_limit = mmap_end;
1727  	info.align_mask = 0;
1728  	info.align_offset = 0;
1729  	return vm_unmapped_area(&info);
1730  }
1731  
1732  #ifndef HAVE_ARCH_UNMAPPED_AREA
1733  unsigned long
1734  arch_get_unmapped_area(struct file *filp, unsigned long addr,
1735  		       unsigned long len, unsigned long pgoff,
1736  		       unsigned long flags)
1737  {
1738  	return generic_get_unmapped_area(filp, addr, len, pgoff, flags);
1739  }
1740  #endif
1741  
1742  /*
1743   * This mmap-allocator allocates new areas top-down from below the
1744   * stack's low limit (the base):
1745   */
1746  unsigned long
1747  generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
1748  				  unsigned long len, unsigned long pgoff,
1749  				  unsigned long flags)
1750  {
1751  	struct vm_area_struct *vma, *prev;
1752  	struct mm_struct *mm = current->mm;
1753  	struct vm_unmapped_area_info info;
1754  	const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags);
1755  
1756  	/* requested length too big for entire address space */
1757  	if (len > mmap_end - mmap_min_addr)
1758  		return -ENOMEM;
1759  
1760  	if (flags & MAP_FIXED)
1761  		return addr;
1762  
1763  	/* requesting a specific address */
1764  	if (addr) {
1765  		addr = PAGE_ALIGN(addr);
1766  		vma = find_vma_prev(mm, addr, &prev);
1767  		if (mmap_end - len >= addr && addr >= mmap_min_addr &&
1768  				(!vma || addr + len <= vm_start_gap(vma)) &&
1769  				(!prev || addr >= vm_end_gap(prev)))
1770  			return addr;
1771  	}
1772  
1773  	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
1774  	info.length = len;
1775  	info.low_limit = PAGE_SIZE;
1776  	info.high_limit = arch_get_mmap_base(addr, mm->mmap_base);
1777  	info.align_mask = 0;
1778  	info.align_offset = 0;
1779  	addr = vm_unmapped_area(&info);
1780  
1781  	/*
1782  	 * A failed mmap() very likely causes application failure,
1783  	 * so fall back to the bottom-up function here. This scenario
1784  	 * can happen with large stack limits and large mmap()
1785  	 * allocations.
1786  	 */
1787  	if (offset_in_page(addr)) {
1788  		VM_BUG_ON(addr != -ENOMEM);
1789  		info.flags = 0;
1790  		info.low_limit = TASK_UNMAPPED_BASE;
1791  		info.high_limit = mmap_end;
1792  		addr = vm_unmapped_area(&info);
1793  	}
1794  
1795  	return addr;
1796  }
1797  
1798  #ifndef HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1799  unsigned long
1800  arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
1801  			       unsigned long len, unsigned long pgoff,
1802  			       unsigned long flags)
1803  {
1804  	return generic_get_unmapped_area_topdown(filp, addr, len, pgoff, flags);
1805  }
1806  #endif
1807  
1808  unsigned long
1809  get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
1810  		unsigned long pgoff, unsigned long flags)
1811  {
1812  	unsigned long (*get_area)(struct file *, unsigned long,
1813  				  unsigned long, unsigned long, unsigned long);
1814  
1815  	unsigned long error = arch_mmap_check(addr, len, flags);
1816  	if (error)
1817  		return error;
1818  
1819  	/* Careful about overflows.. */
1820  	if (len > TASK_SIZE)
1821  		return -ENOMEM;
1822  
1823  	get_area = current->mm->get_unmapped_area;
1824  	if (file) {
1825  		if (file->f_op->get_unmapped_area)
1826  			get_area = file->f_op->get_unmapped_area;
1827  	} else if (flags & MAP_SHARED) {
1828  		/*
1829  		 * mmap_region() will call shmem_zero_setup() to create a file,
1830  		 * so use shmem's get_unmapped_area in case it can be huge.
1831  		 * do_mmap() will clear pgoff, so match alignment.
1832  		 */
1833  		pgoff = 0;
1834  		get_area = shmem_get_unmapped_area;
1835  	}
1836  
1837  	addr = get_area(file, addr, len, pgoff, flags);
1838  	if (IS_ERR_VALUE(addr))
1839  		return addr;
1840  
1841  	if (addr > TASK_SIZE - len)
1842  		return -ENOMEM;
1843  	if (offset_in_page(addr))
1844  		return -EINVAL;
1845  
1846  	error = security_mmap_addr(addr);
1847  	return error ? error : addr;
1848  }
1849  
1850  EXPORT_SYMBOL(get_unmapped_area);
1851  
1852  /**
1853   * find_vma_intersection() - Look up the first VMA which intersects the interval
1854   * @mm: The process address space.
1855   * @start_addr: The inclusive start user address.
1856   * @end_addr: The exclusive end user address.
1857   *
1858   * Returns: The first VMA within the provided range, %NULL otherwise.  Assumes
1859   * start_addr < end_addr.
1860   */
1861  struct vm_area_struct *find_vma_intersection(struct mm_struct *mm,
1862  					     unsigned long start_addr,
1863  					     unsigned long end_addr)
1864  {
1865  	unsigned long index = start_addr;
1866  
1867  	mmap_assert_locked(mm);
1868  	return mt_find(&mm->mm_mt, &index, end_addr - 1);
1869  }
1870  EXPORT_SYMBOL(find_vma_intersection);
1871  
1872  /**
1873   * find_vma() - Find the VMA for a given address, or the next VMA.
1874   * @mm: The mm_struct to check
1875   * @addr: The address
1876   *
1877   * Returns: The VMA associated with addr, or the next VMA.
1878   * May return %NULL in the case of no VMA at addr or above.
1879   */
1880  struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
1881  {
1882  	unsigned long index = addr;
1883  
1884  	mmap_assert_locked(mm);
1885  	return mt_find(&mm->mm_mt, &index, ULONG_MAX);
1886  }
1887  EXPORT_SYMBOL(find_vma);
1888  
1889  /**
1890   * find_vma_prev() - Find the VMA for a given address, or the next vma and
1891   * set %pprev to the previous VMA, if any.
1892   * @mm: The mm_struct to check
1893   * @addr: The address
1894   * @pprev: The pointer to set to the previous VMA
1895   *
1896   * Note that RCU lock is missing here since the external mmap_lock() is used
1897   * instead.
1898   *
1899   * Returns: The VMA associated with @addr, or the next vma.
1900   * May return %NULL in the case of no vma at addr or above.
1901   */
1902  struct vm_area_struct *
1903  find_vma_prev(struct mm_struct *mm, unsigned long addr,
1904  			struct vm_area_struct **pprev)
1905  {
1906  	struct vm_area_struct *vma;
1907  	MA_STATE(mas, &mm->mm_mt, addr, addr);
1908  
1909  	vma = mas_walk(&mas);
1910  	*pprev = mas_prev(&mas, 0);
1911  	if (!vma)
1912  		vma = mas_next(&mas, ULONG_MAX);
1913  	return vma;
1914  }
1915  
1916  /*
1917   * Verify that the stack growth is acceptable and
1918   * update accounting. This is shared with both the
1919   * grow-up and grow-down cases.
1920   */
1921  static int acct_stack_growth(struct vm_area_struct *vma,
1922  			     unsigned long size, unsigned long grow)
1923  {
1924  	struct mm_struct *mm = vma->vm_mm;
1925  	unsigned long new_start;
1926  
1927  	/* address space limit tests */
1928  	if (!may_expand_vm(mm, vma->vm_flags, grow))
1929  		return -ENOMEM;
1930  
1931  	/* Stack limit test */
1932  	if (size > rlimit(RLIMIT_STACK))
1933  		return -ENOMEM;
1934  
1935  	/* mlock limit tests */
1936  	if (!mlock_future_ok(mm, vma->vm_flags, grow << PAGE_SHIFT))
1937  		return -ENOMEM;
1938  
1939  	/* Check to ensure the stack will not grow into a hugetlb-only region */
1940  	new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start :
1941  			vma->vm_end - size;
1942  	if (is_hugepage_only_range(vma->vm_mm, new_start, size))
1943  		return -EFAULT;
1944  
1945  	/*
1946  	 * Overcommit..  This must be the final test, as it will
1947  	 * update security statistics.
1948  	 */
1949  	if (security_vm_enough_memory_mm(mm, grow))
1950  		return -ENOMEM;
1951  
1952  	return 0;
1953  }
1954  
1955  #if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
1956  /*
1957   * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
1958   * vma is the last one with address > vma->vm_end.  Have to extend vma.
1959   */
1960  static int expand_upwards(struct vm_area_struct *vma, unsigned long address)
1961  {
1962  	struct mm_struct *mm = vma->vm_mm;
1963  	struct vm_area_struct *next;
1964  	unsigned long gap_addr;
1965  	int error = 0;
1966  	MA_STATE(mas, &mm->mm_mt, vma->vm_start, address);
1967  
1968  	if (!(vma->vm_flags & VM_GROWSUP))
1969  		return -EFAULT;
1970  
1971  	/* Guard against exceeding limits of the address space. */
1972  	address &= PAGE_MASK;
1973  	if (address >= (TASK_SIZE & PAGE_MASK))
1974  		return -ENOMEM;
1975  	address += PAGE_SIZE;
1976  
1977  	/* Enforce stack_guard_gap */
1978  	gap_addr = address + stack_guard_gap;
1979  
1980  	/* Guard against overflow */
1981  	if (gap_addr < address || gap_addr > TASK_SIZE)
1982  		gap_addr = TASK_SIZE;
1983  
1984  	next = find_vma_intersection(mm, vma->vm_end, gap_addr);
1985  	if (next && vma_is_accessible(next)) {
1986  		if (!(next->vm_flags & VM_GROWSUP))
1987  			return -ENOMEM;
1988  		/* Check that both stack segments have the same anon_vma? */
1989  	}
1990  
1991  	if (next)
1992  		mas_prev_range(&mas, address);
1993  
1994  	__mas_set_range(&mas, vma->vm_start, address - 1);
1995  	if (mas_preallocate(&mas, vma, GFP_KERNEL))
1996  		return -ENOMEM;
1997  
1998  	/* We must make sure the anon_vma is allocated. */
1999  	if (unlikely(anon_vma_prepare(vma))) {
2000  		mas_destroy(&mas);
2001  		return -ENOMEM;
2002  	}
2003  
2004  	/* Lock the VMA before expanding to prevent concurrent page faults */
2005  	vma_start_write(vma);
2006  	/*
2007  	 * vma->vm_start/vm_end cannot change under us because the caller
2008  	 * is required to hold the mmap_lock in read mode.  We need the
2009  	 * anon_vma lock to serialize against concurrent expand_stacks.
2010  	 */
2011  	anon_vma_lock_write(vma->anon_vma);
2012  
2013  	/* Somebody else might have raced and expanded it already */
2014  	if (address > vma->vm_end) {
2015  		unsigned long size, grow;
2016  
2017  		size = address - vma->vm_start;
2018  		grow = (address - vma->vm_end) >> PAGE_SHIFT;
2019  
2020  		error = -ENOMEM;
2021  		if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) {
2022  			error = acct_stack_growth(vma, size, grow);
2023  			if (!error) {
2024  				/*
2025  				 * We only hold a shared mmap_lock lock here, so
2026  				 * we need to protect against concurrent vma
2027  				 * expansions.  anon_vma_lock_write() doesn't
2028  				 * help here, as we don't guarantee that all
2029  				 * growable vmas in a mm share the same root
2030  				 * anon vma.  So, we reuse mm->page_table_lock
2031  				 * to guard against concurrent vma expansions.
2032  				 */
2033  				spin_lock(&mm->page_table_lock);
2034  				if (vma->vm_flags & VM_LOCKED)
2035  					mm->locked_vm += grow;
2036  				vm_stat_account(mm, vma->vm_flags, grow);
2037  				anon_vma_interval_tree_pre_update_vma(vma);
2038  				vma->vm_end = address;
2039  				/* Overwrite old entry in mtree. */
2040  				mas_store_prealloc(&mas, vma);
2041  				anon_vma_interval_tree_post_update_vma(vma);
2042  				spin_unlock(&mm->page_table_lock);
2043  
2044  				perf_event_mmap(vma);
2045  			}
2046  		}
2047  	}
2048  	anon_vma_unlock_write(vma->anon_vma);
2049  	khugepaged_enter_vma(vma, vma->vm_flags);
2050  	mas_destroy(&mas);
2051  	validate_mm(mm);
2052  	return error;
2053  }
2054  #endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */
2055  
2056  /*
2057   * vma is the first one with address < vma->vm_start.  Have to extend vma.
2058   * mmap_lock held for writing.
2059   */
2060  int expand_downwards(struct vm_area_struct *vma, unsigned long address)
2061  {
2062  	struct mm_struct *mm = vma->vm_mm;
2063  	MA_STATE(mas, &mm->mm_mt, vma->vm_start, vma->vm_start);
2064  	struct vm_area_struct *prev;
2065  	int error = 0;
2066  
2067  	if (!(vma->vm_flags & VM_GROWSDOWN))
2068  		return -EFAULT;
2069  
2070  	address &= PAGE_MASK;
2071  	if (address < mmap_min_addr || address < FIRST_USER_ADDRESS)
2072  		return -EPERM;
2073  
2074  	/* Enforce stack_guard_gap */
2075  	prev = mas_prev(&mas, 0);
2076  	/* Check that both stack segments have the same anon_vma? */
2077  	if (prev) {
2078  		if (!(prev->vm_flags & VM_GROWSDOWN) &&
2079  		    vma_is_accessible(prev) &&
2080  		    (address - prev->vm_end < stack_guard_gap))
2081  			return -ENOMEM;
2082  	}
2083  
2084  	if (prev)
2085  		mas_next_range(&mas, vma->vm_start);
2086  
2087  	__mas_set_range(&mas, address, vma->vm_end - 1);
2088  	if (mas_preallocate(&mas, vma, GFP_KERNEL))
2089  		return -ENOMEM;
2090  
2091  	/* We must make sure the anon_vma is allocated. */
2092  	if (unlikely(anon_vma_prepare(vma))) {
2093  		mas_destroy(&mas);
2094  		return -ENOMEM;
2095  	}
2096  
2097  	/* Lock the VMA before expanding to prevent concurrent page faults */
2098  	vma_start_write(vma);
2099  	/*
2100  	 * vma->vm_start/vm_end cannot change under us because the caller
2101  	 * is required to hold the mmap_lock in read mode.  We need the
2102  	 * anon_vma lock to serialize against concurrent expand_stacks.
2103  	 */
2104  	anon_vma_lock_write(vma->anon_vma);
2105  
2106  	/* Somebody else might have raced and expanded it already */
2107  	if (address < vma->vm_start) {
2108  		unsigned long size, grow;
2109  
2110  		size = vma->vm_end - address;
2111  		grow = (vma->vm_start - address) >> PAGE_SHIFT;
2112  
2113  		error = -ENOMEM;
2114  		if (grow <= vma->vm_pgoff) {
2115  			error = acct_stack_growth(vma, size, grow);
2116  			if (!error) {
2117  				/*
2118  				 * We only hold a shared mmap_lock lock here, so
2119  				 * we need to protect against concurrent vma
2120  				 * expansions.  anon_vma_lock_write() doesn't
2121  				 * help here, as we don't guarantee that all
2122  				 * growable vmas in a mm share the same root
2123  				 * anon vma.  So, we reuse mm->page_table_lock
2124  				 * to guard against concurrent vma expansions.
2125  				 */
2126  				spin_lock(&mm->page_table_lock);
2127  				if (vma->vm_flags & VM_LOCKED)
2128  					mm->locked_vm += grow;
2129  				vm_stat_account(mm, vma->vm_flags, grow);
2130  				anon_vma_interval_tree_pre_update_vma(vma);
2131  				vma->vm_start = address;
2132  				vma->vm_pgoff -= grow;
2133  				/* Overwrite old entry in mtree. */
2134  				mas_store_prealloc(&mas, vma);
2135  				anon_vma_interval_tree_post_update_vma(vma);
2136  				spin_unlock(&mm->page_table_lock);
2137  
2138  				perf_event_mmap(vma);
2139  			}
2140  		}
2141  	}
2142  	anon_vma_unlock_write(vma->anon_vma);
2143  	khugepaged_enter_vma(vma, vma->vm_flags);
2144  	mas_destroy(&mas);
2145  	validate_mm(mm);
2146  	return error;
2147  }
2148  
2149  /* enforced gap between the expanding stack and other mappings. */
2150  unsigned long stack_guard_gap = 256UL<<PAGE_SHIFT;
2151  
2152  static int __init cmdline_parse_stack_guard_gap(char *p)
2153  {
2154  	unsigned long val;
2155  	char *endptr;
2156  
2157  	val = simple_strtoul(p, &endptr, 10);
2158  	if (!*endptr)
2159  		stack_guard_gap = val << PAGE_SHIFT;
2160  
2161  	return 1;
2162  }
2163  __setup("stack_guard_gap=", cmdline_parse_stack_guard_gap);
2164  
2165  #ifdef CONFIG_STACK_GROWSUP
2166  int expand_stack_locked(struct vm_area_struct *vma, unsigned long address)
2167  {
2168  	return expand_upwards(vma, address);
2169  }
2170  
2171  struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm, unsigned long addr)
2172  {
2173  	struct vm_area_struct *vma, *prev;
2174  
2175  	addr &= PAGE_MASK;
2176  	vma = find_vma_prev(mm, addr, &prev);
2177  	if (vma && (vma->vm_start <= addr))
2178  		return vma;
2179  	if (!prev)
2180  		return NULL;
2181  	if (expand_stack_locked(prev, addr))
2182  		return NULL;
2183  	if (prev->vm_flags & VM_LOCKED)
2184  		populate_vma_page_range(prev, addr, prev->vm_end, NULL);
2185  	return prev;
2186  }
2187  #else
2188  int expand_stack_locked(struct vm_area_struct *vma, unsigned long address)
2189  {
2190  	if (unlikely(!(vma->vm_flags & VM_GROWSDOWN)))
2191  		return -EINVAL;
2192  	return expand_downwards(vma, address);
2193  }
2194  
2195  struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm, unsigned long addr)
2196  {
2197  	struct vm_area_struct *vma;
2198  	unsigned long start;
2199  
2200  	addr &= PAGE_MASK;
2201  	vma = find_vma(mm, addr);
2202  	if (!vma)
2203  		return NULL;
2204  	if (vma->vm_start <= addr)
2205  		return vma;
2206  	start = vma->vm_start;
2207  	if (expand_stack_locked(vma, addr))
2208  		return NULL;
2209  	if (vma->vm_flags & VM_LOCKED)
2210  		populate_vma_page_range(vma, addr, start, NULL);
2211  	return vma;
2212  }
2213  #endif
2214  
2215  /*
2216   * IA64 has some horrid mapping rules: it can expand both up and down,
2217   * but with various special rules.
2218   *
2219   * We'll get rid of this architecture eventually, so the ugliness is
2220   * temporary.
2221   */
2222  #ifdef CONFIG_IA64
2223  static inline bool vma_expand_ok(struct vm_area_struct *vma, unsigned long addr)
2224  {
2225  	return REGION_NUMBER(addr) == REGION_NUMBER(vma->vm_start) &&
2226  		REGION_OFFSET(addr) < RGN_MAP_LIMIT;
2227  }
2228  
2229  /*
2230   * IA64 stacks grow down, but there's a special register backing store
2231   * that can grow up. Only sequentially, though, so the new address must
2232   * match vm_end.
2233   */
2234  static inline int vma_expand_up(struct vm_area_struct *vma, unsigned long addr)
2235  {
2236  	if (!vma_expand_ok(vma, addr))
2237  		return -EFAULT;
2238  	if (vma->vm_end != (addr & PAGE_MASK))
2239  		return -EFAULT;
2240  	return expand_upwards(vma, addr);
2241  }
2242  
2243  static inline bool vma_expand_down(struct vm_area_struct *vma, unsigned long addr)
2244  {
2245  	if (!vma_expand_ok(vma, addr))
2246  		return -EFAULT;
2247  	return expand_downwards(vma, addr);
2248  }
2249  
2250  #elif defined(CONFIG_STACK_GROWSUP)
2251  
2252  #define vma_expand_up(vma,addr) expand_upwards(vma, addr)
2253  #define vma_expand_down(vma, addr) (-EFAULT)
2254  
2255  #else
2256  
2257  #define vma_expand_up(vma,addr) (-EFAULT)
2258  #define vma_expand_down(vma, addr) expand_downwards(vma, addr)
2259  
2260  #endif
2261  
2262  /*
2263   * expand_stack(): legacy interface for page faulting. Don't use unless
2264   * you have to.
2265   *
2266   * This is called with the mm locked for reading, drops the lock, takes
2267   * the lock for writing, tries to look up a vma again, expands it if
2268   * necessary, and downgrades the lock to reading again.
2269   *
2270   * If no vma is found or it can't be expanded, it returns NULL and has
2271   * dropped the lock.
2272   */
2273  struct vm_area_struct *expand_stack(struct mm_struct *mm, unsigned long addr)
2274  {
2275  	struct vm_area_struct *vma, *prev;
2276  
2277  	mmap_read_unlock(mm);
2278  	if (mmap_write_lock_killable(mm))
2279  		return NULL;
2280  
2281  	vma = find_vma_prev(mm, addr, &prev);
2282  	if (vma && vma->vm_start <= addr)
2283  		goto success;
2284  
2285  	if (prev && !vma_expand_up(prev, addr)) {
2286  		vma = prev;
2287  		goto success;
2288  	}
2289  
2290  	if (vma && !vma_expand_down(vma, addr))
2291  		goto success;
2292  
2293  	mmap_write_unlock(mm);
2294  	return NULL;
2295  
2296  success:
2297  	mmap_write_downgrade(mm);
2298  	return vma;
2299  }
2300  
2301  /*
2302   * Ok - we have the memory areas we should free on a maple tree so release them,
2303   * and do the vma updates.
2304   *
2305   * Called with the mm semaphore held.
2306   */
2307  static inline void remove_mt(struct mm_struct *mm, struct ma_state *mas)
2308  {
2309  	unsigned long nr_accounted = 0;
2310  	struct vm_area_struct *vma;
2311  
2312  	/* Update high watermark before we lower total_vm */
2313  	update_hiwater_vm(mm);
2314  	mas_for_each(mas, vma, ULONG_MAX) {
2315  		long nrpages = vma_pages(vma);
2316  
2317  		if (vma->vm_flags & VM_ACCOUNT)
2318  			nr_accounted += nrpages;
2319  		vm_stat_account(mm, vma->vm_flags, -nrpages);
2320  		remove_vma(vma, false);
2321  	}
2322  	vm_unacct_memory(nr_accounted);
2323  }
2324  
2325  /*
2326   * Get rid of page table information in the indicated region.
2327   *
2328   * Called with the mm semaphore held.
2329   */
2330  static void unmap_region(struct mm_struct *mm, struct ma_state *mas,
2331  		struct vm_area_struct *vma, struct vm_area_struct *prev,
2332  		struct vm_area_struct *next, unsigned long start,
2333  		unsigned long end, unsigned long tree_end, bool mm_wr_locked)
2334  {
2335  	struct mmu_gather tlb;
2336  	unsigned long mt_start = mas->index;
2337  
2338  	lru_add_drain();
2339  	tlb_gather_mmu(&tlb, mm);
2340  	update_hiwater_rss(mm);
2341  	unmap_vmas(&tlb, mas, vma, start, end, tree_end, mm_wr_locked);
2342  	mas_set(mas, mt_start);
2343  	free_pgtables(&tlb, mas, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
2344  				 next ? next->vm_start : USER_PGTABLES_CEILING,
2345  				 mm_wr_locked);
2346  	tlb_finish_mmu(&tlb);
2347  }
2348  
2349  /*
2350   * __split_vma() bypasses sysctl_max_map_count checking.  We use this where it
2351   * has already been checked or doesn't make sense to fail.
2352   * VMA Iterator will point to the end VMA.
2353   */
2354  int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
2355  		unsigned long addr, int new_below)
2356  {
2357  	struct vma_prepare vp;
2358  	struct vm_area_struct *new;
2359  	int err;
2360  
2361  	WARN_ON(vma->vm_start >= addr);
2362  	WARN_ON(vma->vm_end <= addr);
2363  
2364  	if (vma->vm_ops && vma->vm_ops->may_split) {
2365  		err = vma->vm_ops->may_split(vma, addr);
2366  		if (err)
2367  			return err;
2368  	}
2369  
2370  	new = vm_area_dup(vma);
2371  	if (!new)
2372  		return -ENOMEM;
2373  
2374  	if (new_below) {
2375  		new->vm_end = addr;
2376  	} else {
2377  		new->vm_start = addr;
2378  		new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
2379  	}
2380  
2381  	err = -ENOMEM;
2382  	vma_iter_config(vmi, new->vm_start, new->vm_end);
2383  	if (vma_iter_prealloc(vmi, new))
2384  		goto out_free_vma;
2385  
2386  	err = vma_dup_policy(vma, new);
2387  	if (err)
2388  		goto out_free_vmi;
2389  
2390  	err = anon_vma_clone(new, vma);
2391  	if (err)
2392  		goto out_free_mpol;
2393  
2394  	if (new->vm_file)
2395  		get_file(new->vm_file);
2396  
2397  	if (new->vm_ops && new->vm_ops->open)
2398  		new->vm_ops->open(new);
2399  
2400  	vma_start_write(vma);
2401  	vma_start_write(new);
2402  
2403  	init_vma_prep(&vp, vma);
2404  	vp.insert = new;
2405  	vma_prepare(&vp);
2406  	vma_adjust_trans_huge(vma, vma->vm_start, addr, 0);
2407  
2408  	if (new_below) {
2409  		vma->vm_start = addr;
2410  		vma->vm_pgoff += (addr - new->vm_start) >> PAGE_SHIFT;
2411  	} else {
2412  		vma->vm_end = addr;
2413  	}
2414  
2415  	/* vma_complete stores the new vma */
2416  	vma_complete(&vp, vmi, vma->vm_mm);
2417  
2418  	/* Success. */
2419  	if (new_below)
2420  		vma_next(vmi);
2421  	return 0;
2422  
2423  out_free_mpol:
2424  	mpol_put(vma_policy(new));
2425  out_free_vmi:
2426  	vma_iter_free(vmi);
2427  out_free_vma:
2428  	vm_area_free(new);
2429  	return err;
2430  }
2431  
2432  /*
2433   * Split a vma into two pieces at address 'addr', a new vma is allocated
2434   * either for the first part or the tail.
2435   */
2436  int split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
2437  	      unsigned long addr, int new_below)
2438  {
2439  	if (vma->vm_mm->map_count >= sysctl_max_map_count)
2440  		return -ENOMEM;
2441  
2442  	return __split_vma(vmi, vma, addr, new_below);
2443  }
2444  
2445  /*
2446   * do_vmi_align_munmap() - munmap the aligned region from @start to @end.
2447   * @vmi: The vma iterator
2448   * @vma: The starting vm_area_struct
2449   * @mm: The mm_struct
2450   * @start: The aligned start address to munmap.
2451   * @end: The aligned end address to munmap.
2452   * @uf: The userfaultfd list_head
2453   * @unlock: Set to true to drop the mmap_lock.  unlocking only happens on
2454   * success.
2455   *
2456   * Return: 0 on success and drops the lock if so directed, error and leaves the
2457   * lock held otherwise.
2458   */
2459  static int
2460  do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
2461  		    struct mm_struct *mm, unsigned long start,
2462  		    unsigned long end, struct list_head *uf, bool unlock)
2463  {
2464  	struct vm_area_struct *prev, *next = NULL;
2465  	struct maple_tree mt_detach;
2466  	int count = 0;
2467  	int error = -ENOMEM;
2468  	unsigned long locked_vm = 0;
2469  	MA_STATE(mas_detach, &mt_detach, 0, 0);
2470  	mt_init_flags(&mt_detach, vmi->mas.tree->ma_flags & MT_FLAGS_LOCK_MASK);
2471  	mt_on_stack(mt_detach);
2472  
2473  	/*
2474  	 * If we need to split any vma, do it now to save pain later.
2475  	 *
2476  	 * Note: mremap's move_vma VM_ACCOUNT handling assumes a partially
2477  	 * unmapped vm_area_struct will remain in use: so lower split_vma
2478  	 * places tmp vma above, and higher split_vma places tmp vma below.
2479  	 */
2480  
2481  	/* Does it split the first one? */
2482  	if (start > vma->vm_start) {
2483  
2484  		/*
2485  		 * Make sure that map_count on return from munmap() will
2486  		 * not exceed its limit; but let map_count go just above
2487  		 * its limit temporarily, to help free resources as expected.
2488  		 */
2489  		if (end < vma->vm_end && mm->map_count >= sysctl_max_map_count)
2490  			goto map_count_exceeded;
2491  
2492  		error = __split_vma(vmi, vma, start, 1);
2493  		if (error)
2494  			goto start_split_failed;
2495  	}
2496  
2497  	/*
2498  	 * Detach a range of VMAs from the mm. Using next as a temp variable as
2499  	 * it is always overwritten.
2500  	 */
2501  	next = vma;
2502  	do {
2503  		/* Does it split the end? */
2504  		if (next->vm_end > end) {
2505  			error = __split_vma(vmi, next, end, 0);
2506  			if (error)
2507  				goto end_split_failed;
2508  		}
2509  		vma_start_write(next);
2510  		mas_set(&mas_detach, count);
2511  		error = mas_store_gfp(&mas_detach, next, GFP_KERNEL);
2512  		if (error)
2513  			goto munmap_gather_failed;
2514  		vma_mark_detached(next, true);
2515  		if (next->vm_flags & VM_LOCKED)
2516  			locked_vm += vma_pages(next);
2517  
2518  		count++;
2519  		if (unlikely(uf)) {
2520  			/*
2521  			 * If userfaultfd_unmap_prep returns an error the vmas
2522  			 * will remain split, but userland will get a
2523  			 * highly unexpected error anyway. This is no
2524  			 * different than the case where the first of the two
2525  			 * __split_vma fails, but we don't undo the first
2526  			 * split, despite we could. This is unlikely enough
2527  			 * failure that it's not worth optimizing it for.
2528  			 */
2529  			error = userfaultfd_unmap_prep(next, start, end, uf);
2530  
2531  			if (error)
2532  				goto userfaultfd_error;
2533  		}
2534  #ifdef CONFIG_DEBUG_VM_MAPLE_TREE
2535  		BUG_ON(next->vm_start < start);
2536  		BUG_ON(next->vm_start > end);
2537  #endif
2538  	} for_each_vma_range(*vmi, next, end);
2539  
2540  #if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
2541  	/* Make sure no VMAs are about to be lost. */
2542  	{
2543  		MA_STATE(test, &mt_detach, 0, 0);
2544  		struct vm_area_struct *vma_mas, *vma_test;
2545  		int test_count = 0;
2546  
2547  		vma_iter_set(vmi, start);
2548  		rcu_read_lock();
2549  		vma_test = mas_find(&test, count - 1);
2550  		for_each_vma_range(*vmi, vma_mas, end) {
2551  			BUG_ON(vma_mas != vma_test);
2552  			test_count++;
2553  			vma_test = mas_next(&test, count - 1);
2554  		}
2555  		rcu_read_unlock();
2556  		BUG_ON(count != test_count);
2557  	}
2558  #endif
2559  
2560  	while (vma_iter_addr(vmi) > start)
2561  		vma_iter_prev_range(vmi);
2562  
2563  	error = vma_iter_clear_gfp(vmi, start, end, GFP_KERNEL);
2564  	if (error)
2565  		goto clear_tree_failed;
2566  
2567  	/* Point of no return */
2568  	mm->locked_vm -= locked_vm;
2569  	mm->map_count -= count;
2570  	if (unlock)
2571  		mmap_write_downgrade(mm);
2572  
2573  	prev = vma_iter_prev_range(vmi);
2574  	next = vma_next(vmi);
2575  	if (next)
2576  		vma_iter_prev_range(vmi);
2577  
2578  	/*
2579  	 * We can free page tables without write-locking mmap_lock because VMAs
2580  	 * were isolated before we downgraded mmap_lock.
2581  	 */
2582  	mas_set(&mas_detach, 1);
2583  	unmap_region(mm, &mas_detach, vma, prev, next, start, end, count,
2584  		     !unlock);
2585  	/* Statistics and freeing VMAs */
2586  	mas_set(&mas_detach, 0);
2587  	remove_mt(mm, &mas_detach);
2588  	validate_mm(mm);
2589  	if (unlock)
2590  		mmap_read_unlock(mm);
2591  
2592  	__mt_destroy(&mt_detach);
2593  	return 0;
2594  
2595  clear_tree_failed:
2596  userfaultfd_error:
2597  munmap_gather_failed:
2598  end_split_failed:
2599  	mas_set(&mas_detach, 0);
2600  	mas_for_each(&mas_detach, next, end)
2601  		vma_mark_detached(next, false);
2602  
2603  	__mt_destroy(&mt_detach);
2604  start_split_failed:
2605  map_count_exceeded:
2606  	validate_mm(mm);
2607  	return error;
2608  }
2609  
2610  /*
2611   * do_vmi_munmap() - munmap a given range.
2612   * @vmi: The vma iterator
2613   * @mm: The mm_struct
2614   * @start: The start address to munmap
2615   * @len: The length of the range to munmap
2616   * @uf: The userfaultfd list_head
2617   * @unlock: set to true if the user wants to drop the mmap_lock on success
2618   *
2619   * This function takes a @mas that is either pointing to the previous VMA or set
2620   * to MA_START and sets it up to remove the mapping(s).  The @len will be
2621   * aligned and any arch_unmap work will be preformed.
2622   *
2623   * Return: 0 on success and drops the lock if so directed, error and leaves the
2624   * lock held otherwise.
2625   */
2626  int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
2627  		  unsigned long start, size_t len, struct list_head *uf,
2628  		  bool unlock)
2629  {
2630  	unsigned long end;
2631  	struct vm_area_struct *vma;
2632  
2633  	if ((offset_in_page(start)) || start > TASK_SIZE || len > TASK_SIZE-start)
2634  		return -EINVAL;
2635  
2636  	end = start + PAGE_ALIGN(len);
2637  	if (end == start)
2638  		return -EINVAL;
2639  
2640  	 /* arch_unmap() might do unmaps itself.  */
2641  	arch_unmap(mm, start, end);
2642  
2643  	/* Find the first overlapping VMA */
2644  	vma = vma_find(vmi, end);
2645  	if (!vma) {
2646  		if (unlock)
2647  			mmap_write_unlock(mm);
2648  		return 0;
2649  	}
2650  
2651  	return do_vmi_align_munmap(vmi, vma, mm, start, end, uf, unlock);
2652  }
2653  
2654  /* do_munmap() - Wrapper function for non-maple tree aware do_munmap() calls.
2655   * @mm: The mm_struct
2656   * @start: The start address to munmap
2657   * @len: The length to be munmapped.
2658   * @uf: The userfaultfd list_head
2659   *
2660   * Return: 0 on success, error otherwise.
2661   */
2662  int do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
2663  	      struct list_head *uf)
2664  {
2665  	VMA_ITERATOR(vmi, mm, start);
2666  
2667  	return do_vmi_munmap(&vmi, mm, start, len, uf, false);
2668  }
2669  
2670  unsigned long mmap_region(struct file *file, unsigned long addr,
2671  		unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
2672  		struct list_head *uf)
2673  {
2674  	struct mm_struct *mm = current->mm;
2675  	struct vm_area_struct *vma = NULL;
2676  	struct vm_area_struct *next, *prev, *merge;
2677  	pgoff_t pglen = len >> PAGE_SHIFT;
2678  	unsigned long charged = 0;
2679  	unsigned long end = addr + len;
2680  	unsigned long merge_start = addr, merge_end = end;
2681  	pgoff_t vm_pgoff;
2682  	int error;
2683  	VMA_ITERATOR(vmi, mm, addr);
2684  
2685  	/* Check against address space limit. */
2686  	if (!may_expand_vm(mm, vm_flags, len >> PAGE_SHIFT)) {
2687  		unsigned long nr_pages;
2688  
2689  		/*
2690  		 * MAP_FIXED may remove pages of mappings that intersects with
2691  		 * requested mapping. Account for the pages it would unmap.
2692  		 */
2693  		nr_pages = count_vma_pages_range(mm, addr, end);
2694  
2695  		if (!may_expand_vm(mm, vm_flags,
2696  					(len >> PAGE_SHIFT) - nr_pages))
2697  			return -ENOMEM;
2698  	}
2699  
2700  	/* Unmap any existing mapping in the area */
2701  	if (do_vmi_munmap(&vmi, mm, addr, len, uf, false))
2702  		return -ENOMEM;
2703  
2704  	/*
2705  	 * Private writable mapping: check memory availability
2706  	 */
2707  	if (accountable_mapping(file, vm_flags)) {
2708  		charged = len >> PAGE_SHIFT;
2709  		if (security_vm_enough_memory_mm(mm, charged))
2710  			return -ENOMEM;
2711  		vm_flags |= VM_ACCOUNT;
2712  	}
2713  
2714  	next = vma_next(&vmi);
2715  	prev = vma_prev(&vmi);
2716  	if (vm_flags & VM_SPECIAL) {
2717  		if (prev)
2718  			vma_iter_next_range(&vmi);
2719  		goto cannot_expand;
2720  	}
2721  
2722  	/* Attempt to expand an old mapping */
2723  	/* Check next */
2724  	if (next && next->vm_start == end && !vma_policy(next) &&
2725  	    can_vma_merge_before(next, vm_flags, NULL, file, pgoff+pglen,
2726  				 NULL_VM_UFFD_CTX, NULL)) {
2727  		merge_end = next->vm_end;
2728  		vma = next;
2729  		vm_pgoff = next->vm_pgoff - pglen;
2730  	}
2731  
2732  	/* Check prev */
2733  	if (prev && prev->vm_end == addr && !vma_policy(prev) &&
2734  	    (vma ? can_vma_merge_after(prev, vm_flags, vma->anon_vma, file,
2735  				       pgoff, vma->vm_userfaultfd_ctx, NULL) :
2736  		   can_vma_merge_after(prev, vm_flags, NULL, file, pgoff,
2737  				       NULL_VM_UFFD_CTX, NULL))) {
2738  		merge_start = prev->vm_start;
2739  		vma = prev;
2740  		vm_pgoff = prev->vm_pgoff;
2741  	} else if (prev) {
2742  		vma_iter_next_range(&vmi);
2743  	}
2744  
2745  	/* Actually expand, if possible */
2746  	if (vma &&
2747  	    !vma_expand(&vmi, vma, merge_start, merge_end, vm_pgoff, next)) {
2748  		khugepaged_enter_vma(vma, vm_flags);
2749  		goto expanded;
2750  	}
2751  
2752  	if (vma == prev)
2753  		vma_iter_set(&vmi, addr);
2754  cannot_expand:
2755  
2756  	/*
2757  	 * Determine the object being mapped and call the appropriate
2758  	 * specific mapper. the address has already been validated, but
2759  	 * not unmapped, but the maps are removed from the list.
2760  	 */
2761  	vma = vm_area_alloc(mm);
2762  	if (!vma) {
2763  		error = -ENOMEM;
2764  		goto unacct_error;
2765  	}
2766  
2767  	vma_iter_config(&vmi, addr, end);
2768  	vma->vm_start = addr;
2769  	vma->vm_end = end;
2770  	vm_flags_init(vma, vm_flags);
2771  	vma->vm_page_prot = vm_get_page_prot(vm_flags);
2772  	vma->vm_pgoff = pgoff;
2773  
2774  	if (file) {
2775  		if (vm_flags & VM_SHARED) {
2776  			error = mapping_map_writable(file->f_mapping);
2777  			if (error)
2778  				goto free_vma;
2779  		}
2780  
2781  		vma->vm_file = get_file(file);
2782  		error = call_mmap(file, vma);
2783  		if (error)
2784  			goto unmap_and_free_vma;
2785  
2786  		/*
2787  		 * Expansion is handled above, merging is handled below.
2788  		 * Drivers should not alter the address of the VMA.
2789  		 */
2790  		error = -EINVAL;
2791  		if (WARN_ON((addr != vma->vm_start)))
2792  			goto close_and_free_vma;
2793  
2794  		vma_iter_config(&vmi, addr, end);
2795  		/*
2796  		 * If vm_flags changed after call_mmap(), we should try merge
2797  		 * vma again as we may succeed this time.
2798  		 */
2799  		if (unlikely(vm_flags != vma->vm_flags && prev)) {
2800  			merge = vma_merge(&vmi, mm, prev, vma->vm_start,
2801  				    vma->vm_end, vma->vm_flags, NULL,
2802  				    vma->vm_file, vma->vm_pgoff, NULL,
2803  				    NULL_VM_UFFD_CTX, NULL);
2804  			if (merge) {
2805  				/*
2806  				 * ->mmap() can change vma->vm_file and fput
2807  				 * the original file. So fput the vma->vm_file
2808  				 * here or we would add an extra fput for file
2809  				 * and cause general protection fault
2810  				 * ultimately.
2811  				 */
2812  				fput(vma->vm_file);
2813  				vm_area_free(vma);
2814  				vma = merge;
2815  				/* Update vm_flags to pick up the change. */
2816  				vm_flags = vma->vm_flags;
2817  				goto unmap_writable;
2818  			}
2819  		}
2820  
2821  		vm_flags = vma->vm_flags;
2822  	} else if (vm_flags & VM_SHARED) {
2823  		error = shmem_zero_setup(vma);
2824  		if (error)
2825  			goto free_vma;
2826  	} else {
2827  		vma_set_anonymous(vma);
2828  	}
2829  
2830  	if (map_deny_write_exec(vma, vma->vm_flags)) {
2831  		error = -EACCES;
2832  		goto close_and_free_vma;
2833  	}
2834  
2835  	/* Allow architectures to sanity-check the vm_flags */
2836  	error = -EINVAL;
2837  	if (!arch_validate_flags(vma->vm_flags))
2838  		goto close_and_free_vma;
2839  
2840  	error = -ENOMEM;
2841  	if (vma_iter_prealloc(&vmi, vma))
2842  		goto close_and_free_vma;
2843  
2844  	/* Lock the VMA since it is modified after insertion into VMA tree */
2845  	vma_start_write(vma);
2846  	vma_iter_store(&vmi, vma);
2847  	mm->map_count++;
2848  	if (vma->vm_file) {
2849  		i_mmap_lock_write(vma->vm_file->f_mapping);
2850  		if (vma->vm_flags & VM_SHARED)
2851  			mapping_allow_writable(vma->vm_file->f_mapping);
2852  
2853  		flush_dcache_mmap_lock(vma->vm_file->f_mapping);
2854  		vma_interval_tree_insert(vma, &vma->vm_file->f_mapping->i_mmap);
2855  		flush_dcache_mmap_unlock(vma->vm_file->f_mapping);
2856  		i_mmap_unlock_write(vma->vm_file->f_mapping);
2857  	}
2858  
2859  	/*
2860  	 * vma_merge() calls khugepaged_enter_vma() either, the below
2861  	 * call covers the non-merge case.
2862  	 */
2863  	khugepaged_enter_vma(vma, vma->vm_flags);
2864  
2865  	/* Once vma denies write, undo our temporary denial count */
2866  unmap_writable:
2867  	if (file && vm_flags & VM_SHARED)
2868  		mapping_unmap_writable(file->f_mapping);
2869  	file = vma->vm_file;
2870  	ksm_add_vma(vma);
2871  expanded:
2872  	perf_event_mmap(vma);
2873  
2874  	vm_stat_account(mm, vm_flags, len >> PAGE_SHIFT);
2875  	if (vm_flags & VM_LOCKED) {
2876  		if ((vm_flags & VM_SPECIAL) || vma_is_dax(vma) ||
2877  					is_vm_hugetlb_page(vma) ||
2878  					vma == get_gate_vma(current->mm))
2879  			vm_flags_clear(vma, VM_LOCKED_MASK);
2880  		else
2881  			mm->locked_vm += (len >> PAGE_SHIFT);
2882  	}
2883  
2884  	if (file)
2885  		uprobe_mmap(vma);
2886  
2887  	/*
2888  	 * New (or expanded) vma always get soft dirty status.
2889  	 * Otherwise user-space soft-dirty page tracker won't
2890  	 * be able to distinguish situation when vma area unmapped,
2891  	 * then new mapped in-place (which must be aimed as
2892  	 * a completely new data area).
2893  	 */
2894  	vm_flags_set(vma, VM_SOFTDIRTY);
2895  
2896  	vma_set_page_prot(vma);
2897  
2898  	validate_mm(mm);
2899  	return addr;
2900  
2901  close_and_free_vma:
2902  	if (file && vma->vm_ops && vma->vm_ops->close)
2903  		vma->vm_ops->close(vma);
2904  
2905  	if (file || vma->vm_file) {
2906  unmap_and_free_vma:
2907  		fput(vma->vm_file);
2908  		vma->vm_file = NULL;
2909  
2910  		vma_iter_set(&vmi, vma->vm_end);
2911  		/* Undo any partial mapping done by a device driver. */
2912  		unmap_region(mm, &vmi.mas, vma, prev, next, vma->vm_start,
2913  			     vma->vm_end, vma->vm_end, true);
2914  	}
2915  	if (file && (vm_flags & VM_SHARED))
2916  		mapping_unmap_writable(file->f_mapping);
2917  free_vma:
2918  	vm_area_free(vma);
2919  unacct_error:
2920  	if (charged)
2921  		vm_unacct_memory(charged);
2922  	validate_mm(mm);
2923  	return error;
2924  }
2925  
2926  static int __vm_munmap(unsigned long start, size_t len, bool unlock)
2927  {
2928  	int ret;
2929  	struct mm_struct *mm = current->mm;
2930  	LIST_HEAD(uf);
2931  	VMA_ITERATOR(vmi, mm, start);
2932  
2933  	if (mmap_write_lock_killable(mm))
2934  		return -EINTR;
2935  
2936  	ret = do_vmi_munmap(&vmi, mm, start, len, &uf, unlock);
2937  	if (ret || !unlock)
2938  		mmap_write_unlock(mm);
2939  
2940  	userfaultfd_unmap_complete(mm, &uf);
2941  	return ret;
2942  }
2943  
2944  int vm_munmap(unsigned long start, size_t len)
2945  {
2946  	return __vm_munmap(start, len, false);
2947  }
2948  EXPORT_SYMBOL(vm_munmap);
2949  
2950  SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
2951  {
2952  	addr = untagged_addr(addr);
2953  	return __vm_munmap(addr, len, true);
2954  }
2955  
2956  
2957  /*
2958   * Emulation of deprecated remap_file_pages() syscall.
2959   */
2960  SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
2961  		unsigned long, prot, unsigned long, pgoff, unsigned long, flags)
2962  {
2963  
2964  	struct mm_struct *mm = current->mm;
2965  	struct vm_area_struct *vma;
2966  	unsigned long populate = 0;
2967  	unsigned long ret = -EINVAL;
2968  	struct file *file;
2969  
2970  	pr_warn_once("%s (%d) uses deprecated remap_file_pages() syscall. See Documentation/mm/remap_file_pages.rst.\n",
2971  		     current->comm, current->pid);
2972  
2973  	if (prot)
2974  		return ret;
2975  	start = start & PAGE_MASK;
2976  	size = size & PAGE_MASK;
2977  
2978  	if (start + size <= start)
2979  		return ret;
2980  
2981  	/* Does pgoff wrap? */
2982  	if (pgoff + (size >> PAGE_SHIFT) < pgoff)
2983  		return ret;
2984  
2985  	if (mmap_write_lock_killable(mm))
2986  		return -EINTR;
2987  
2988  	vma = vma_lookup(mm, start);
2989  
2990  	if (!vma || !(vma->vm_flags & VM_SHARED))
2991  		goto out;
2992  
2993  	if (start + size > vma->vm_end) {
2994  		VMA_ITERATOR(vmi, mm, vma->vm_end);
2995  		struct vm_area_struct *next, *prev = vma;
2996  
2997  		for_each_vma_range(vmi, next, start + size) {
2998  			/* hole between vmas ? */
2999  			if (next->vm_start != prev->vm_end)
3000  				goto out;
3001  
3002  			if (next->vm_file != vma->vm_file)
3003  				goto out;
3004  
3005  			if (next->vm_flags != vma->vm_flags)
3006  				goto out;
3007  
3008  			if (start + size <= next->vm_end)
3009  				break;
3010  
3011  			prev = next;
3012  		}
3013  
3014  		if (!next)
3015  			goto out;
3016  	}
3017  
3018  	prot |= vma->vm_flags & VM_READ ? PROT_READ : 0;
3019  	prot |= vma->vm_flags & VM_WRITE ? PROT_WRITE : 0;
3020  	prot |= vma->vm_flags & VM_EXEC ? PROT_EXEC : 0;
3021  
3022  	flags &= MAP_NONBLOCK;
3023  	flags |= MAP_SHARED | MAP_FIXED | MAP_POPULATE;
3024  	if (vma->vm_flags & VM_LOCKED)
3025  		flags |= MAP_LOCKED;
3026  
3027  	file = get_file(vma->vm_file);
3028  	ret = do_mmap(vma->vm_file, start, size,
3029  			prot, flags, 0, pgoff, &populate, NULL);
3030  	fput(file);
3031  out:
3032  	mmap_write_unlock(mm);
3033  	if (populate)
3034  		mm_populate(ret, populate);
3035  	if (!IS_ERR_VALUE(ret))
3036  		ret = 0;
3037  	return ret;
3038  }
3039  
3040  /*
3041   * do_vma_munmap() - Unmap a full or partial vma.
3042   * @vmi: The vma iterator pointing at the vma
3043   * @vma: The first vma to be munmapped
3044   * @start: the start of the address to unmap
3045   * @end: The end of the address to unmap
3046   * @uf: The userfaultfd list_head
3047   * @unlock: Drop the lock on success
3048   *
3049   * unmaps a VMA mapping when the vma iterator is already in position.
3050   * Does not handle alignment.
3051   *
3052   * Return: 0 on success drops the lock of so directed, error on failure and will
3053   * still hold the lock.
3054   */
3055  int do_vma_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
3056  		unsigned long start, unsigned long end, struct list_head *uf,
3057  		bool unlock)
3058  {
3059  	struct mm_struct *mm = vma->vm_mm;
3060  
3061  	arch_unmap(mm, start, end);
3062  	return do_vmi_align_munmap(vmi, vma, mm, start, end, uf, unlock);
3063  }
3064  
3065  /*
3066   * do_brk_flags() - Increase the brk vma if the flags match.
3067   * @vmi: The vma iterator
3068   * @addr: The start address
3069   * @len: The length of the increase
3070   * @vma: The vma,
3071   * @flags: The VMA Flags
3072   *
3073   * Extend the brk VMA from addr to addr + len.  If the VMA is NULL or the flags
3074   * do not match then create a new anonymous VMA.  Eventually we may be able to
3075   * do some brk-specific accounting here.
3076   */
3077  static int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *vma,
3078  		unsigned long addr, unsigned long len, unsigned long flags)
3079  {
3080  	struct mm_struct *mm = current->mm;
3081  	struct vma_prepare vp;
3082  
3083  	/*
3084  	 * Check against address space limits by the changed size
3085  	 * Note: This happens *after* clearing old mappings in some code paths.
3086  	 */
3087  	flags |= VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
3088  	if (!may_expand_vm(mm, flags, len >> PAGE_SHIFT))
3089  		return -ENOMEM;
3090  
3091  	if (mm->map_count > sysctl_max_map_count)
3092  		return -ENOMEM;
3093  
3094  	if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
3095  		return -ENOMEM;
3096  
3097  	/*
3098  	 * Expand the existing vma if possible; Note that singular lists do not
3099  	 * occur after forking, so the expand will only happen on new VMAs.
3100  	 */
3101  	if (vma && vma->vm_end == addr && !vma_policy(vma) &&
3102  	    can_vma_merge_after(vma, flags, NULL, NULL,
3103  				addr >> PAGE_SHIFT, NULL_VM_UFFD_CTX, NULL)) {
3104  		vma_iter_config(vmi, vma->vm_start, addr + len);
3105  		if (vma_iter_prealloc(vmi, vma))
3106  			goto unacct_fail;
3107  
3108  		vma_start_write(vma);
3109  
3110  		init_vma_prep(&vp, vma);
3111  		vma_prepare(&vp);
3112  		vma_adjust_trans_huge(vma, vma->vm_start, addr + len, 0);
3113  		vma->vm_end = addr + len;
3114  		vm_flags_set(vma, VM_SOFTDIRTY);
3115  		vma_iter_store(vmi, vma);
3116  
3117  		vma_complete(&vp, vmi, mm);
3118  		khugepaged_enter_vma(vma, flags);
3119  		goto out;
3120  	}
3121  
3122  	if (vma)
3123  		vma_iter_next_range(vmi);
3124  	/* create a vma struct for an anonymous mapping */
3125  	vma = vm_area_alloc(mm);
3126  	if (!vma)
3127  		goto unacct_fail;
3128  
3129  	vma_set_anonymous(vma);
3130  	vma->vm_start = addr;
3131  	vma->vm_end = addr + len;
3132  	vma->vm_pgoff = addr >> PAGE_SHIFT;
3133  	vm_flags_init(vma, flags);
3134  	vma->vm_page_prot = vm_get_page_prot(flags);
3135  	vma_start_write(vma);
3136  	if (vma_iter_store_gfp(vmi, vma, GFP_KERNEL))
3137  		goto mas_store_fail;
3138  
3139  	mm->map_count++;
3140  	validate_mm(mm);
3141  	ksm_add_vma(vma);
3142  out:
3143  	perf_event_mmap(vma);
3144  	mm->total_vm += len >> PAGE_SHIFT;
3145  	mm->data_vm += len >> PAGE_SHIFT;
3146  	if (flags & VM_LOCKED)
3147  		mm->locked_vm += (len >> PAGE_SHIFT);
3148  	vm_flags_set(vma, VM_SOFTDIRTY);
3149  	return 0;
3150  
3151  mas_store_fail:
3152  	vm_area_free(vma);
3153  unacct_fail:
3154  	vm_unacct_memory(len >> PAGE_SHIFT);
3155  	return -ENOMEM;
3156  }
3157  
3158  int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags)
3159  {
3160  	struct mm_struct *mm = current->mm;
3161  	struct vm_area_struct *vma = NULL;
3162  	unsigned long len;
3163  	int ret;
3164  	bool populate;
3165  	LIST_HEAD(uf);
3166  	VMA_ITERATOR(vmi, mm, addr);
3167  
3168  	len = PAGE_ALIGN(request);
3169  	if (len < request)
3170  		return -ENOMEM;
3171  	if (!len)
3172  		return 0;
3173  
3174  	/* Until we need other flags, refuse anything except VM_EXEC. */
3175  	if ((flags & (~VM_EXEC)) != 0)
3176  		return -EINVAL;
3177  
3178  	if (mmap_write_lock_killable(mm))
3179  		return -EINTR;
3180  
3181  	ret = check_brk_limits(addr, len);
3182  	if (ret)
3183  		goto limits_failed;
3184  
3185  	ret = do_vmi_munmap(&vmi, mm, addr, len, &uf, 0);
3186  	if (ret)
3187  		goto munmap_failed;
3188  
3189  	vma = vma_prev(&vmi);
3190  	ret = do_brk_flags(&vmi, vma, addr, len, flags);
3191  	populate = ((mm->def_flags & VM_LOCKED) != 0);
3192  	mmap_write_unlock(mm);
3193  	userfaultfd_unmap_complete(mm, &uf);
3194  	if (populate && !ret)
3195  		mm_populate(addr, len);
3196  	return ret;
3197  
3198  munmap_failed:
3199  limits_failed:
3200  	mmap_write_unlock(mm);
3201  	return ret;
3202  }
3203  EXPORT_SYMBOL(vm_brk_flags);
3204  
3205  int vm_brk(unsigned long addr, unsigned long len)
3206  {
3207  	return vm_brk_flags(addr, len, 0);
3208  }
3209  EXPORT_SYMBOL(vm_brk);
3210  
3211  /* Release all mmaps. */
3212  void exit_mmap(struct mm_struct *mm)
3213  {
3214  	struct mmu_gather tlb;
3215  	struct vm_area_struct *vma;
3216  	unsigned long nr_accounted = 0;
3217  	MA_STATE(mas, &mm->mm_mt, 0, 0);
3218  	int count = 0;
3219  
3220  	/* mm's last user has gone, and its about to be pulled down */
3221  	mmu_notifier_release(mm);
3222  
3223  	mmap_read_lock(mm);
3224  	arch_exit_mmap(mm);
3225  
3226  	vma = mas_find(&mas, ULONG_MAX);
3227  	if (!vma) {
3228  		/* Can happen if dup_mmap() received an OOM */
3229  		mmap_read_unlock(mm);
3230  		return;
3231  	}
3232  
3233  	lru_add_drain();
3234  	flush_cache_mm(mm);
3235  	tlb_gather_mmu_fullmm(&tlb, mm);
3236  	/* update_hiwater_rss(mm) here? but nobody should be looking */
3237  	/* Use ULONG_MAX here to ensure all VMAs in the mm are unmapped */
3238  	unmap_vmas(&tlb, &mas, vma, 0, ULONG_MAX, ULONG_MAX, false);
3239  	mmap_read_unlock(mm);
3240  
3241  	/*
3242  	 * Set MMF_OOM_SKIP to hide this task from the oom killer/reaper
3243  	 * because the memory has been already freed.
3244  	 */
3245  	set_bit(MMF_OOM_SKIP, &mm->flags);
3246  	mmap_write_lock(mm);
3247  	mt_clear_in_rcu(&mm->mm_mt);
3248  	mas_set(&mas, vma->vm_end);
3249  	free_pgtables(&tlb, &mas, vma, FIRST_USER_ADDRESS,
3250  		      USER_PGTABLES_CEILING, true);
3251  	tlb_finish_mmu(&tlb);
3252  
3253  	/*
3254  	 * Walk the list again, actually closing and freeing it, with preemption
3255  	 * enabled, without holding any MM locks besides the unreachable
3256  	 * mmap_write_lock.
3257  	 */
3258  	mas_set(&mas, vma->vm_end);
3259  	do {
3260  		if (vma->vm_flags & VM_ACCOUNT)
3261  			nr_accounted += vma_pages(vma);
3262  		remove_vma(vma, true);
3263  		count++;
3264  		cond_resched();
3265  	} while ((vma = mas_find(&mas, ULONG_MAX)) != NULL);
3266  
3267  	BUG_ON(count != mm->map_count);
3268  
3269  	trace_exit_mmap(mm);
3270  	__mt_destroy(&mm->mm_mt);
3271  	mmap_write_unlock(mm);
3272  	vm_unacct_memory(nr_accounted);
3273  }
3274  
3275  /* Insert vm structure into process list sorted by address
3276   * and into the inode's i_mmap tree.  If vm_file is non-NULL
3277   * then i_mmap_rwsem is taken here.
3278   */
3279  int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
3280  {
3281  	unsigned long charged = vma_pages(vma);
3282  
3283  
3284  	if (find_vma_intersection(mm, vma->vm_start, vma->vm_end))
3285  		return -ENOMEM;
3286  
3287  	if ((vma->vm_flags & VM_ACCOUNT) &&
3288  	     security_vm_enough_memory_mm(mm, charged))
3289  		return -ENOMEM;
3290  
3291  	/*
3292  	 * The vm_pgoff of a purely anonymous vma should be irrelevant
3293  	 * until its first write fault, when page's anon_vma and index
3294  	 * are set.  But now set the vm_pgoff it will almost certainly
3295  	 * end up with (unless mremap moves it elsewhere before that
3296  	 * first wfault), so /proc/pid/maps tells a consistent story.
3297  	 *
3298  	 * By setting it to reflect the virtual start address of the
3299  	 * vma, merges and splits can happen in a seamless way, just
3300  	 * using the existing file pgoff checks and manipulations.
3301  	 * Similarly in do_mmap and in do_brk_flags.
3302  	 */
3303  	if (vma_is_anonymous(vma)) {
3304  		BUG_ON(vma->anon_vma);
3305  		vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT;
3306  	}
3307  
3308  	if (vma_link(mm, vma)) {
3309  		vm_unacct_memory(charged);
3310  		return -ENOMEM;
3311  	}
3312  
3313  	return 0;
3314  }
3315  
3316  /*
3317   * Copy the vma structure to a new location in the same mm,
3318   * prior to moving page table entries, to effect an mremap move.
3319   */
3320  struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
3321  	unsigned long addr, unsigned long len, pgoff_t pgoff,
3322  	bool *need_rmap_locks)
3323  {
3324  	struct vm_area_struct *vma = *vmap;
3325  	unsigned long vma_start = vma->vm_start;
3326  	struct mm_struct *mm = vma->vm_mm;
3327  	struct vm_area_struct *new_vma, *prev;
3328  	bool faulted_in_anon_vma = true;
3329  	VMA_ITERATOR(vmi, mm, addr);
3330  
3331  	/*
3332  	 * If anonymous vma has not yet been faulted, update new pgoff
3333  	 * to match new location, to increase its chance of merging.
3334  	 */
3335  	if (unlikely(vma_is_anonymous(vma) && !vma->anon_vma)) {
3336  		pgoff = addr >> PAGE_SHIFT;
3337  		faulted_in_anon_vma = false;
3338  	}
3339  
3340  	new_vma = find_vma_prev(mm, addr, &prev);
3341  	if (new_vma && new_vma->vm_start < addr + len)
3342  		return NULL;	/* should never get here */
3343  
3344  	new_vma = vma_merge(&vmi, mm, prev, addr, addr + len, vma->vm_flags,
3345  			    vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
3346  			    vma->vm_userfaultfd_ctx, anon_vma_name(vma));
3347  	if (new_vma) {
3348  		/*
3349  		 * Source vma may have been merged into new_vma
3350  		 */
3351  		if (unlikely(vma_start >= new_vma->vm_start &&
3352  			     vma_start < new_vma->vm_end)) {
3353  			/*
3354  			 * The only way we can get a vma_merge with
3355  			 * self during an mremap is if the vma hasn't
3356  			 * been faulted in yet and we were allowed to
3357  			 * reset the dst vma->vm_pgoff to the
3358  			 * destination address of the mremap to allow
3359  			 * the merge to happen. mremap must change the
3360  			 * vm_pgoff linearity between src and dst vmas
3361  			 * (in turn preventing a vma_merge) to be
3362  			 * safe. It is only safe to keep the vm_pgoff
3363  			 * linear if there are no pages mapped yet.
3364  			 */
3365  			VM_BUG_ON_VMA(faulted_in_anon_vma, new_vma);
3366  			*vmap = vma = new_vma;
3367  		}
3368  		*need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff);
3369  	} else {
3370  		new_vma = vm_area_dup(vma);
3371  		if (!new_vma)
3372  			goto out;
3373  		new_vma->vm_start = addr;
3374  		new_vma->vm_end = addr + len;
3375  		new_vma->vm_pgoff = pgoff;
3376  		if (vma_dup_policy(vma, new_vma))
3377  			goto out_free_vma;
3378  		if (anon_vma_clone(new_vma, vma))
3379  			goto out_free_mempol;
3380  		if (new_vma->vm_file)
3381  			get_file(new_vma->vm_file);
3382  		if (new_vma->vm_ops && new_vma->vm_ops->open)
3383  			new_vma->vm_ops->open(new_vma);
3384  		if (vma_link(mm, new_vma))
3385  			goto out_vma_link;
3386  		*need_rmap_locks = false;
3387  	}
3388  	return new_vma;
3389  
3390  out_vma_link:
3391  	if (new_vma->vm_ops && new_vma->vm_ops->close)
3392  		new_vma->vm_ops->close(new_vma);
3393  
3394  	if (new_vma->vm_file)
3395  		fput(new_vma->vm_file);
3396  
3397  	unlink_anon_vmas(new_vma);
3398  out_free_mempol:
3399  	mpol_put(vma_policy(new_vma));
3400  out_free_vma:
3401  	vm_area_free(new_vma);
3402  out:
3403  	return NULL;
3404  }
3405  
3406  /*
3407   * Return true if the calling process may expand its vm space by the passed
3408   * number of pages
3409   */
3410  bool may_expand_vm(struct mm_struct *mm, vm_flags_t flags, unsigned long npages)
3411  {
3412  	if (mm->total_vm + npages > rlimit(RLIMIT_AS) >> PAGE_SHIFT)
3413  		return false;
3414  
3415  	if (is_data_mapping(flags) &&
3416  	    mm->data_vm + npages > rlimit(RLIMIT_DATA) >> PAGE_SHIFT) {
3417  		/* Workaround for Valgrind */
3418  		if (rlimit(RLIMIT_DATA) == 0 &&
3419  		    mm->data_vm + npages <= rlimit_max(RLIMIT_DATA) >> PAGE_SHIFT)
3420  			return true;
3421  
3422  		pr_warn_once("%s (%d): VmData %lu exceed data ulimit %lu. Update limits%s.\n",
3423  			     current->comm, current->pid,
3424  			     (mm->data_vm + npages) << PAGE_SHIFT,
3425  			     rlimit(RLIMIT_DATA),
3426  			     ignore_rlimit_data ? "" : " or use boot option ignore_rlimit_data");
3427  
3428  		if (!ignore_rlimit_data)
3429  			return false;
3430  	}
3431  
3432  	return true;
3433  }
3434  
3435  void vm_stat_account(struct mm_struct *mm, vm_flags_t flags, long npages)
3436  {
3437  	WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm)+npages);
3438  
3439  	if (is_exec_mapping(flags))
3440  		mm->exec_vm += npages;
3441  	else if (is_stack_mapping(flags))
3442  		mm->stack_vm += npages;
3443  	else if (is_data_mapping(flags))
3444  		mm->data_vm += npages;
3445  }
3446  
3447  static vm_fault_t special_mapping_fault(struct vm_fault *vmf);
3448  
3449  /*
3450   * Having a close hook prevents vma merging regardless of flags.
3451   */
3452  static void special_mapping_close(struct vm_area_struct *vma)
3453  {
3454  }
3455  
3456  static const char *special_mapping_name(struct vm_area_struct *vma)
3457  {
3458  	return ((struct vm_special_mapping *)vma->vm_private_data)->name;
3459  }
3460  
3461  static int special_mapping_mremap(struct vm_area_struct *new_vma)
3462  {
3463  	struct vm_special_mapping *sm = new_vma->vm_private_data;
3464  
3465  	if (WARN_ON_ONCE(current->mm != new_vma->vm_mm))
3466  		return -EFAULT;
3467  
3468  	if (sm->mremap)
3469  		return sm->mremap(sm, new_vma);
3470  
3471  	return 0;
3472  }
3473  
3474  static int special_mapping_split(struct vm_area_struct *vma, unsigned long addr)
3475  {
3476  	/*
3477  	 * Forbid splitting special mappings - kernel has expectations over
3478  	 * the number of pages in mapping. Together with VM_DONTEXPAND
3479  	 * the size of vma should stay the same over the special mapping's
3480  	 * lifetime.
3481  	 */
3482  	return -EINVAL;
3483  }
3484  
3485  static const struct vm_operations_struct special_mapping_vmops = {
3486  	.close = special_mapping_close,
3487  	.fault = special_mapping_fault,
3488  	.mremap = special_mapping_mremap,
3489  	.name = special_mapping_name,
3490  	/* vDSO code relies that VVAR can't be accessed remotely */
3491  	.access = NULL,
3492  	.may_split = special_mapping_split,
3493  };
3494  
3495  static const struct vm_operations_struct legacy_special_mapping_vmops = {
3496  	.close = special_mapping_close,
3497  	.fault = special_mapping_fault,
3498  };
3499  
3500  static vm_fault_t special_mapping_fault(struct vm_fault *vmf)
3501  {
3502  	struct vm_area_struct *vma = vmf->vma;
3503  	pgoff_t pgoff;
3504  	struct page **pages;
3505  
3506  	if (vma->vm_ops == &legacy_special_mapping_vmops) {
3507  		pages = vma->vm_private_data;
3508  	} else {
3509  		struct vm_special_mapping *sm = vma->vm_private_data;
3510  
3511  		if (sm->fault)
3512  			return sm->fault(sm, vmf->vma, vmf);
3513  
3514  		pages = sm->pages;
3515  	}
3516  
3517  	for (pgoff = vmf->pgoff; pgoff && *pages; ++pages)
3518  		pgoff--;
3519  
3520  	if (*pages) {
3521  		struct page *page = *pages;
3522  		get_page(page);
3523  		vmf->page = page;
3524  		return 0;
3525  	}
3526  
3527  	return VM_FAULT_SIGBUS;
3528  }
3529  
3530  static struct vm_area_struct *__install_special_mapping(
3531  	struct mm_struct *mm,
3532  	unsigned long addr, unsigned long len,
3533  	unsigned long vm_flags, void *priv,
3534  	const struct vm_operations_struct *ops)
3535  {
3536  	int ret;
3537  	struct vm_area_struct *vma;
3538  
3539  	vma = vm_area_alloc(mm);
3540  	if (unlikely(vma == NULL))
3541  		return ERR_PTR(-ENOMEM);
3542  
3543  	vma->vm_start = addr;
3544  	vma->vm_end = addr + len;
3545  
3546  	vm_flags_init(vma, (vm_flags | mm->def_flags |
3547  		      VM_DONTEXPAND | VM_SOFTDIRTY) & ~VM_LOCKED_MASK);
3548  	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
3549  
3550  	vma->vm_ops = ops;
3551  	vma->vm_private_data = priv;
3552  
3553  	ret = insert_vm_struct(mm, vma);
3554  	if (ret)
3555  		goto out;
3556  
3557  	vm_stat_account(mm, vma->vm_flags, len >> PAGE_SHIFT);
3558  
3559  	perf_event_mmap(vma);
3560  
3561  	return vma;
3562  
3563  out:
3564  	vm_area_free(vma);
3565  	return ERR_PTR(ret);
3566  }
3567  
3568  bool vma_is_special_mapping(const struct vm_area_struct *vma,
3569  	const struct vm_special_mapping *sm)
3570  {
3571  	return vma->vm_private_data == sm &&
3572  		(vma->vm_ops == &special_mapping_vmops ||
3573  		 vma->vm_ops == &legacy_special_mapping_vmops);
3574  }
3575  
3576  /*
3577   * Called with mm->mmap_lock held for writing.
3578   * Insert a new vma covering the given region, with the given flags.
3579   * Its pages are supplied by the given array of struct page *.
3580   * The array can be shorter than len >> PAGE_SHIFT if it's null-terminated.
3581   * The region past the last page supplied will always produce SIGBUS.
3582   * The array pointer and the pages it points to are assumed to stay alive
3583   * for as long as this mapping might exist.
3584   */
3585  struct vm_area_struct *_install_special_mapping(
3586  	struct mm_struct *mm,
3587  	unsigned long addr, unsigned long len,
3588  	unsigned long vm_flags, const struct vm_special_mapping *spec)
3589  {
3590  	return __install_special_mapping(mm, addr, len, vm_flags, (void *)spec,
3591  					&special_mapping_vmops);
3592  }
3593  
3594  int install_special_mapping(struct mm_struct *mm,
3595  			    unsigned long addr, unsigned long len,
3596  			    unsigned long vm_flags, struct page **pages)
3597  {
3598  	struct vm_area_struct *vma = __install_special_mapping(
3599  		mm, addr, len, vm_flags, (void *)pages,
3600  		&legacy_special_mapping_vmops);
3601  
3602  	return PTR_ERR_OR_ZERO(vma);
3603  }
3604  
3605  static DEFINE_MUTEX(mm_all_locks_mutex);
3606  
3607  static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma)
3608  {
3609  	if (!test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) {
3610  		/*
3611  		 * The LSB of head.next can't change from under us
3612  		 * because we hold the mm_all_locks_mutex.
3613  		 */
3614  		down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_lock);
3615  		/*
3616  		 * We can safely modify head.next after taking the
3617  		 * anon_vma->root->rwsem. If some other vma in this mm shares
3618  		 * the same anon_vma we won't take it again.
3619  		 *
3620  		 * No need of atomic instructions here, head.next
3621  		 * can't change from under us thanks to the
3622  		 * anon_vma->root->rwsem.
3623  		 */
3624  		if (__test_and_set_bit(0, (unsigned long *)
3625  				       &anon_vma->root->rb_root.rb_root.rb_node))
3626  			BUG();
3627  	}
3628  }
3629  
3630  static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
3631  {
3632  	if (!test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
3633  		/*
3634  		 * AS_MM_ALL_LOCKS can't change from under us because
3635  		 * we hold the mm_all_locks_mutex.
3636  		 *
3637  		 * Operations on ->flags have to be atomic because
3638  		 * even if AS_MM_ALL_LOCKS is stable thanks to the
3639  		 * mm_all_locks_mutex, there may be other cpus
3640  		 * changing other bitflags in parallel to us.
3641  		 */
3642  		if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags))
3643  			BUG();
3644  		down_write_nest_lock(&mapping->i_mmap_rwsem, &mm->mmap_lock);
3645  	}
3646  }
3647  
3648  /*
3649   * This operation locks against the VM for all pte/vma/mm related
3650   * operations that could ever happen on a certain mm. This includes
3651   * vmtruncate, try_to_unmap, and all page faults.
3652   *
3653   * The caller must take the mmap_lock in write mode before calling
3654   * mm_take_all_locks(). The caller isn't allowed to release the
3655   * mmap_lock until mm_drop_all_locks() returns.
3656   *
3657   * mmap_lock in write mode is required in order to block all operations
3658   * that could modify pagetables and free pages without need of
3659   * altering the vma layout. It's also needed in write mode to avoid new
3660   * anon_vmas to be associated with existing vmas.
3661   *
3662   * A single task can't take more than one mm_take_all_locks() in a row
3663   * or it would deadlock.
3664   *
3665   * The LSB in anon_vma->rb_root.rb_node and the AS_MM_ALL_LOCKS bitflag in
3666   * mapping->flags avoid to take the same lock twice, if more than one
3667   * vma in this mm is backed by the same anon_vma or address_space.
3668   *
3669   * We take locks in following order, accordingly to comment at beginning
3670   * of mm/rmap.c:
3671   *   - all hugetlbfs_i_mmap_rwsem_key locks (aka mapping->i_mmap_rwsem for
3672   *     hugetlb mapping);
3673   *   - all vmas marked locked
3674   *   - all i_mmap_rwsem locks;
3675   *   - all anon_vma->rwseml
3676   *
3677   * We can take all locks within these types randomly because the VM code
3678   * doesn't nest them and we protected from parallel mm_take_all_locks() by
3679   * mm_all_locks_mutex.
3680   *
3681   * mm_take_all_locks() and mm_drop_all_locks are expensive operations
3682   * that may have to take thousand of locks.
3683   *
3684   * mm_take_all_locks() can fail if it's interrupted by signals.
3685   */
3686  int mm_take_all_locks(struct mm_struct *mm)
3687  {
3688  	struct vm_area_struct *vma;
3689  	struct anon_vma_chain *avc;
3690  	MA_STATE(mas, &mm->mm_mt, 0, 0);
3691  
3692  	mmap_assert_write_locked(mm);
3693  
3694  	mutex_lock(&mm_all_locks_mutex);
3695  
3696  	/*
3697  	 * vma_start_write() does not have a complement in mm_drop_all_locks()
3698  	 * because vma_start_write() is always asymmetrical; it marks a VMA as
3699  	 * being written to until mmap_write_unlock() or mmap_write_downgrade()
3700  	 * is reached.
3701  	 */
3702  	mas_for_each(&mas, vma, ULONG_MAX) {
3703  		if (signal_pending(current))
3704  			goto out_unlock;
3705  		vma_start_write(vma);
3706  	}
3707  
3708  	mas_set(&mas, 0);
3709  	mas_for_each(&mas, vma, ULONG_MAX) {
3710  		if (signal_pending(current))
3711  			goto out_unlock;
3712  		if (vma->vm_file && vma->vm_file->f_mapping &&
3713  				is_vm_hugetlb_page(vma))
3714  			vm_lock_mapping(mm, vma->vm_file->f_mapping);
3715  	}
3716  
3717  	mas_set(&mas, 0);
3718  	mas_for_each(&mas, vma, ULONG_MAX) {
3719  		if (signal_pending(current))
3720  			goto out_unlock;
3721  		if (vma->vm_file && vma->vm_file->f_mapping &&
3722  				!is_vm_hugetlb_page(vma))
3723  			vm_lock_mapping(mm, vma->vm_file->f_mapping);
3724  	}
3725  
3726  	mas_set(&mas, 0);
3727  	mas_for_each(&mas, vma, ULONG_MAX) {
3728  		if (signal_pending(current))
3729  			goto out_unlock;
3730  		if (vma->anon_vma)
3731  			list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
3732  				vm_lock_anon_vma(mm, avc->anon_vma);
3733  	}
3734  
3735  	return 0;
3736  
3737  out_unlock:
3738  	mm_drop_all_locks(mm);
3739  	return -EINTR;
3740  }
3741  
3742  static void vm_unlock_anon_vma(struct anon_vma *anon_vma)
3743  {
3744  	if (test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) {
3745  		/*
3746  		 * The LSB of head.next can't change to 0 from under
3747  		 * us because we hold the mm_all_locks_mutex.
3748  		 *
3749  		 * We must however clear the bitflag before unlocking
3750  		 * the vma so the users using the anon_vma->rb_root will
3751  		 * never see our bitflag.
3752  		 *
3753  		 * No need of atomic instructions here, head.next
3754  		 * can't change from under us until we release the
3755  		 * anon_vma->root->rwsem.
3756  		 */
3757  		if (!__test_and_clear_bit(0, (unsigned long *)
3758  					  &anon_vma->root->rb_root.rb_root.rb_node))
3759  			BUG();
3760  		anon_vma_unlock_write(anon_vma);
3761  	}
3762  }
3763  
3764  static void vm_unlock_mapping(struct address_space *mapping)
3765  {
3766  	if (test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
3767  		/*
3768  		 * AS_MM_ALL_LOCKS can't change to 0 from under us
3769  		 * because we hold the mm_all_locks_mutex.
3770  		 */
3771  		i_mmap_unlock_write(mapping);
3772  		if (!test_and_clear_bit(AS_MM_ALL_LOCKS,
3773  					&mapping->flags))
3774  			BUG();
3775  	}
3776  }
3777  
3778  /*
3779   * The mmap_lock cannot be released by the caller until
3780   * mm_drop_all_locks() returns.
3781   */
3782  void mm_drop_all_locks(struct mm_struct *mm)
3783  {
3784  	struct vm_area_struct *vma;
3785  	struct anon_vma_chain *avc;
3786  	MA_STATE(mas, &mm->mm_mt, 0, 0);
3787  
3788  	mmap_assert_write_locked(mm);
3789  	BUG_ON(!mutex_is_locked(&mm_all_locks_mutex));
3790  
3791  	mas_for_each(&mas, vma, ULONG_MAX) {
3792  		if (vma->anon_vma)
3793  			list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
3794  				vm_unlock_anon_vma(avc->anon_vma);
3795  		if (vma->vm_file && vma->vm_file->f_mapping)
3796  			vm_unlock_mapping(vma->vm_file->f_mapping);
3797  	}
3798  
3799  	mutex_unlock(&mm_all_locks_mutex);
3800  }
3801  
3802  /*
3803   * initialise the percpu counter for VM
3804   */
3805  void __init mmap_init(void)
3806  {
3807  	int ret;
3808  
3809  	ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL);
3810  	VM_BUG_ON(ret);
3811  }
3812  
3813  /*
3814   * Initialise sysctl_user_reserve_kbytes.
3815   *
3816   * This is intended to prevent a user from starting a single memory hogging
3817   * process, such that they cannot recover (kill the hog) in OVERCOMMIT_NEVER
3818   * mode.
3819   *
3820   * The default value is min(3% of free memory, 128MB)
3821   * 128MB is enough to recover with sshd/login, bash, and top/kill.
3822   */
3823  static int init_user_reserve(void)
3824  {
3825  	unsigned long free_kbytes;
3826  
3827  	free_kbytes = K(global_zone_page_state(NR_FREE_PAGES));
3828  
3829  	sysctl_user_reserve_kbytes = min(free_kbytes / 32, 1UL << 17);
3830  	return 0;
3831  }
3832  subsys_initcall(init_user_reserve);
3833  
3834  /*
3835   * Initialise sysctl_admin_reserve_kbytes.
3836   *
3837   * The purpose of sysctl_admin_reserve_kbytes is to allow the sys admin
3838   * to log in and kill a memory hogging process.
3839   *
3840   * Systems with more than 256MB will reserve 8MB, enough to recover
3841   * with sshd, bash, and top in OVERCOMMIT_GUESS. Smaller systems will
3842   * only reserve 3% of free pages by default.
3843   */
3844  static int init_admin_reserve(void)
3845  {
3846  	unsigned long free_kbytes;
3847  
3848  	free_kbytes = K(global_zone_page_state(NR_FREE_PAGES));
3849  
3850  	sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13);
3851  	return 0;
3852  }
3853  subsys_initcall(init_admin_reserve);
3854  
3855  /*
3856   * Reinititalise user and admin reserves if memory is added or removed.
3857   *
3858   * The default user reserve max is 128MB, and the default max for the
3859   * admin reserve is 8MB. These are usually, but not always, enough to
3860   * enable recovery from a memory hogging process using login/sshd, a shell,
3861   * and tools like top. It may make sense to increase or even disable the
3862   * reserve depending on the existence of swap or variations in the recovery
3863   * tools. So, the admin may have changed them.
3864   *
3865   * If memory is added and the reserves have been eliminated or increased above
3866   * the default max, then we'll trust the admin.
3867   *
3868   * If memory is removed and there isn't enough free memory, then we
3869   * need to reset the reserves.
3870   *
3871   * Otherwise keep the reserve set by the admin.
3872   */
3873  static int reserve_mem_notifier(struct notifier_block *nb,
3874  			     unsigned long action, void *data)
3875  {
3876  	unsigned long tmp, free_kbytes;
3877  
3878  	switch (action) {
3879  	case MEM_ONLINE:
3880  		/* Default max is 128MB. Leave alone if modified by operator. */
3881  		tmp = sysctl_user_reserve_kbytes;
3882  		if (0 < tmp && tmp < (1UL << 17))
3883  			init_user_reserve();
3884  
3885  		/* Default max is 8MB.  Leave alone if modified by operator. */
3886  		tmp = sysctl_admin_reserve_kbytes;
3887  		if (0 < tmp && tmp < (1UL << 13))
3888  			init_admin_reserve();
3889  
3890  		break;
3891  	case MEM_OFFLINE:
3892  		free_kbytes = K(global_zone_page_state(NR_FREE_PAGES));
3893  
3894  		if (sysctl_user_reserve_kbytes > free_kbytes) {
3895  			init_user_reserve();
3896  			pr_info("vm.user_reserve_kbytes reset to %lu\n",
3897  				sysctl_user_reserve_kbytes);
3898  		}
3899  
3900  		if (sysctl_admin_reserve_kbytes > free_kbytes) {
3901  			init_admin_reserve();
3902  			pr_info("vm.admin_reserve_kbytes reset to %lu\n",
3903  				sysctl_admin_reserve_kbytes);
3904  		}
3905  		break;
3906  	default:
3907  		break;
3908  	}
3909  	return NOTIFY_OK;
3910  }
3911  
3912  static int __meminit init_reserve_notifier(void)
3913  {
3914  	if (hotplug_memory_notifier(reserve_mem_notifier, DEFAULT_CALLBACK_PRI))
3915  		pr_err("Failed registering memory add/remove notifier for admin reserve\n");
3916  
3917  	return 0;
3918  }
3919  subsys_initcall(init_reserve_notifier);
3920