1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * mm/mmap.c
4 *
5 * Written by obz.
6 *
7 * Address space accounting code <alan@lxorguk.ukuu.org.uk>
8 */
9
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
12 #include <linux/kernel.h>
13 #include <linux/slab.h>
14 #include <linux/backing-dev.h>
15 #include <linux/mm.h>
16 #include <linux/mm_inline.h>
17 #include <linux/shm.h>
18 #include <linux/mman.h>
19 #include <linux/pagemap.h>
20 #include <linux/swap.h>
21 #include <linux/syscalls.h>
22 #include <linux/capability.h>
23 #include <linux/init.h>
24 #include <linux/file.h>
25 #include <linux/fs.h>
26 #include <linux/personality.h>
27 #include <linux/security.h>
28 #include <linux/hugetlb.h>
29 #include <linux/shmem_fs.h>
30 #include <linux/profile.h>
31 #include <linux/export.h>
32 #include <linux/mount.h>
33 #include <linux/mempolicy.h>
34 #include <linux/rmap.h>
35 #include <linux/mmu_notifier.h>
36 #include <linux/mmdebug.h>
37 #include <linux/perf_event.h>
38 #include <linux/audit.h>
39 #include <linux/khugepaged.h>
40 #include <linux/uprobes.h>
41 #include <linux/notifier.h>
42 #include <linux/memory.h>
43 #include <linux/printk.h>
44 #include <linux/userfaultfd_k.h>
45 #include <linux/moduleparam.h>
46 #include <linux/pkeys.h>
47 #include <linux/oom.h>
48 #include <linux/sched/mm.h>
49 #include <linux/ksm.h>
50
51 #include <linux/uaccess.h>
52 #include <asm/cacheflush.h>
53 #include <asm/tlb.h>
54 #include <asm/mmu_context.h>
55
56 #define CREATE_TRACE_POINTS
57 #include <trace/events/mmap.h>
58
59 #include "internal.h"
60
61 #ifndef arch_mmap_check
62 #define arch_mmap_check(addr, len, flags) (0)
63 #endif
64
65 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS
66 const int mmap_rnd_bits_min = CONFIG_ARCH_MMAP_RND_BITS_MIN;
67 const int mmap_rnd_bits_max = CONFIG_ARCH_MMAP_RND_BITS_MAX;
68 int mmap_rnd_bits __read_mostly = CONFIG_ARCH_MMAP_RND_BITS;
69 #endif
70 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
71 const int mmap_rnd_compat_bits_min = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN;
72 const int mmap_rnd_compat_bits_max = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX;
73 int mmap_rnd_compat_bits __read_mostly = CONFIG_ARCH_MMAP_RND_COMPAT_BITS;
74 #endif
75
76 static bool ignore_rlimit_data;
77 core_param(ignore_rlimit_data, ignore_rlimit_data, bool, 0644);
78
79 static void unmap_region(struct mm_struct *mm, struct ma_state *mas,
80 struct vm_area_struct *vma, struct vm_area_struct *prev,
81 struct vm_area_struct *next, unsigned long start,
82 unsigned long end, unsigned long tree_end, bool mm_wr_locked);
83
vm_pgprot_modify(pgprot_t oldprot,unsigned long vm_flags)84 static pgprot_t vm_pgprot_modify(pgprot_t oldprot, unsigned long vm_flags)
85 {
86 return pgprot_modify(oldprot, vm_get_page_prot(vm_flags));
87 }
88
89 /* Update vma->vm_page_prot to reflect vma->vm_flags. */
vma_set_page_prot(struct vm_area_struct * vma)90 void vma_set_page_prot(struct vm_area_struct *vma)
91 {
92 unsigned long vm_flags = vma->vm_flags;
93 pgprot_t vm_page_prot;
94
95 vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags);
96 if (vma_wants_writenotify(vma, vm_page_prot)) {
97 vm_flags &= ~VM_SHARED;
98 vm_page_prot = vm_pgprot_modify(vm_page_prot, vm_flags);
99 }
100 /* remove_protection_ptes reads vma->vm_page_prot without mmap_lock */
101 WRITE_ONCE(vma->vm_page_prot, vm_page_prot);
102 }
103
104 /*
105 * Requires inode->i_mapping->i_mmap_rwsem
106 */
__remove_shared_vm_struct(struct vm_area_struct * vma,struct file * file,struct address_space * mapping)107 static void __remove_shared_vm_struct(struct vm_area_struct *vma,
108 struct file *file, struct address_space *mapping)
109 {
110 if (vma->vm_flags & VM_SHARED)
111 mapping_unmap_writable(mapping);
112
113 flush_dcache_mmap_lock(mapping);
114 vma_interval_tree_remove(vma, &mapping->i_mmap);
115 flush_dcache_mmap_unlock(mapping);
116 }
117
118 /*
119 * Unlink a file-based vm structure from its interval tree, to hide
120 * vma from rmap and vmtruncate before freeing its page tables.
121 */
unlink_file_vma(struct vm_area_struct * vma)122 void unlink_file_vma(struct vm_area_struct *vma)
123 {
124 struct file *file = vma->vm_file;
125
126 if (file) {
127 struct address_space *mapping = file->f_mapping;
128 i_mmap_lock_write(mapping);
129 __remove_shared_vm_struct(vma, file, mapping);
130 i_mmap_unlock_write(mapping);
131 }
132 }
133
134 /*
135 * Close a vm structure and free it.
136 */
remove_vma(struct vm_area_struct * vma,bool unreachable)137 static void remove_vma(struct vm_area_struct *vma, bool unreachable)
138 {
139 might_sleep();
140 vma_close(vma);
141 if (vma->vm_file)
142 fput(vma->vm_file);
143 mpol_put(vma_policy(vma));
144 if (unreachable)
145 __vm_area_free(vma);
146 else
147 vm_area_free(vma);
148 }
149
vma_prev_limit(struct vma_iterator * vmi,unsigned long min)150 static inline struct vm_area_struct *vma_prev_limit(struct vma_iterator *vmi,
151 unsigned long min)
152 {
153 return mas_prev(&vmi->mas, min);
154 }
155
156 /*
157 * check_brk_limits() - Use platform specific check of range & verify mlock
158 * limits.
159 * @addr: The address to check
160 * @len: The size of increase.
161 *
162 * Return: 0 on success.
163 */
check_brk_limits(unsigned long addr,unsigned long len)164 static int check_brk_limits(unsigned long addr, unsigned long len)
165 {
166 unsigned long mapped_addr;
167
168 mapped_addr = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
169 if (IS_ERR_VALUE(mapped_addr))
170 return mapped_addr;
171
172 return mlock_future_ok(current->mm, current->mm->def_flags, len)
173 ? 0 : -EAGAIN;
174 }
175 static int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *brkvma,
176 unsigned long addr, unsigned long request, unsigned long flags);
SYSCALL_DEFINE1(brk,unsigned long,brk)177 SYSCALL_DEFINE1(brk, unsigned long, brk)
178 {
179 unsigned long newbrk, oldbrk, origbrk;
180 struct mm_struct *mm = current->mm;
181 struct vm_area_struct *brkvma, *next = NULL;
182 unsigned long min_brk;
183 bool populate = false;
184 LIST_HEAD(uf);
185 struct vma_iterator vmi;
186
187 if (mmap_write_lock_killable(mm))
188 return -EINTR;
189
190 origbrk = mm->brk;
191
192 #ifdef CONFIG_COMPAT_BRK
193 /*
194 * CONFIG_COMPAT_BRK can still be overridden by setting
195 * randomize_va_space to 2, which will still cause mm->start_brk
196 * to be arbitrarily shifted
197 */
198 if (current->brk_randomized)
199 min_brk = mm->start_brk;
200 else
201 min_brk = mm->end_data;
202 #else
203 min_brk = mm->start_brk;
204 #endif
205 if (brk < min_brk)
206 goto out;
207
208 /*
209 * Check against rlimit here. If this check is done later after the test
210 * of oldbrk with newbrk then it can escape the test and let the data
211 * segment grow beyond its set limit the in case where the limit is
212 * not page aligned -Ram Gupta
213 */
214 if (check_data_rlimit(rlimit(RLIMIT_DATA), brk, mm->start_brk,
215 mm->end_data, mm->start_data))
216 goto out;
217
218 newbrk = PAGE_ALIGN(brk);
219 oldbrk = PAGE_ALIGN(mm->brk);
220 if (oldbrk == newbrk) {
221 mm->brk = brk;
222 goto success;
223 }
224
225 /* Always allow shrinking brk. */
226 if (brk <= mm->brk) {
227 /* Search one past newbrk */
228 vma_iter_init(&vmi, mm, newbrk);
229 brkvma = vma_find(&vmi, oldbrk);
230 if (!brkvma || brkvma->vm_start >= oldbrk)
231 goto out; /* mapping intersects with an existing non-brk vma. */
232 /*
233 * mm->brk must be protected by write mmap_lock.
234 * do_vma_munmap() will drop the lock on success, so update it
235 * before calling do_vma_munmap().
236 */
237 mm->brk = brk;
238 if (do_vma_munmap(&vmi, brkvma, newbrk, oldbrk, &uf, true))
239 goto out;
240
241 goto success_unlocked;
242 }
243
244 if (check_brk_limits(oldbrk, newbrk - oldbrk))
245 goto out;
246
247 /*
248 * Only check if the next VMA is within the stack_guard_gap of the
249 * expansion area
250 */
251 vma_iter_init(&vmi, mm, oldbrk);
252 next = vma_find(&vmi, newbrk + PAGE_SIZE + stack_guard_gap);
253 if (next && newbrk + PAGE_SIZE > vm_start_gap(next))
254 goto out;
255
256 brkvma = vma_prev_limit(&vmi, mm->start_brk);
257 /* Ok, looks good - let it rip. */
258 if (do_brk_flags(&vmi, brkvma, oldbrk, newbrk - oldbrk, 0) < 0)
259 goto out;
260
261 mm->brk = brk;
262 if (mm->def_flags & VM_LOCKED)
263 populate = true;
264
265 success:
266 mmap_write_unlock(mm);
267 success_unlocked:
268 userfaultfd_unmap_complete(mm, &uf);
269 if (populate)
270 mm_populate(oldbrk, newbrk - oldbrk);
271 return brk;
272
273 out:
274 mm->brk = origbrk;
275 mmap_write_unlock(mm);
276 return origbrk;
277 }
278
279 #if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
validate_mm(struct mm_struct * mm)280 static void validate_mm(struct mm_struct *mm)
281 {
282 int bug = 0;
283 int i = 0;
284 struct vm_area_struct *vma;
285 VMA_ITERATOR(vmi, mm, 0);
286
287 mt_validate(&mm->mm_mt);
288 for_each_vma(vmi, vma) {
289 #ifdef CONFIG_DEBUG_VM_RB
290 struct anon_vma *anon_vma = vma->anon_vma;
291 struct anon_vma_chain *avc;
292 #endif
293 unsigned long vmi_start, vmi_end;
294 bool warn = 0;
295
296 vmi_start = vma_iter_addr(&vmi);
297 vmi_end = vma_iter_end(&vmi);
298 if (VM_WARN_ON_ONCE_MM(vma->vm_end != vmi_end, mm))
299 warn = 1;
300
301 if (VM_WARN_ON_ONCE_MM(vma->vm_start != vmi_start, mm))
302 warn = 1;
303
304 if (warn) {
305 pr_emerg("issue in %s\n", current->comm);
306 dump_stack();
307 dump_vma(vma);
308 pr_emerg("tree range: %px start %lx end %lx\n", vma,
309 vmi_start, vmi_end - 1);
310 vma_iter_dump_tree(&vmi);
311 }
312
313 #ifdef CONFIG_DEBUG_VM_RB
314 if (anon_vma) {
315 anon_vma_lock_read(anon_vma);
316 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
317 anon_vma_interval_tree_verify(avc);
318 anon_vma_unlock_read(anon_vma);
319 }
320 #endif
321 i++;
322 }
323 if (i != mm->map_count) {
324 pr_emerg("map_count %d vma iterator %d\n", mm->map_count, i);
325 bug = 1;
326 }
327 VM_BUG_ON_MM(bug, mm);
328 }
329
330 #else /* !CONFIG_DEBUG_VM_MAPLE_TREE */
331 #define validate_mm(mm) do { } while (0)
332 #endif /* CONFIG_DEBUG_VM_MAPLE_TREE */
333
334 /*
335 * vma has some anon_vma assigned, and is already inserted on that
336 * anon_vma's interval trees.
337 *
338 * Before updating the vma's vm_start / vm_end / vm_pgoff fields, the
339 * vma must be removed from the anon_vma's interval trees using
340 * anon_vma_interval_tree_pre_update_vma().
341 *
342 * After the update, the vma will be reinserted using
343 * anon_vma_interval_tree_post_update_vma().
344 *
345 * The entire update must be protected by exclusive mmap_lock and by
346 * the root anon_vma's mutex.
347 */
348 static inline void
anon_vma_interval_tree_pre_update_vma(struct vm_area_struct * vma)349 anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma)
350 {
351 struct anon_vma_chain *avc;
352
353 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
354 anon_vma_interval_tree_remove(avc, &avc->anon_vma->rb_root);
355 }
356
357 static inline void
anon_vma_interval_tree_post_update_vma(struct vm_area_struct * vma)358 anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma)
359 {
360 struct anon_vma_chain *avc;
361
362 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
363 anon_vma_interval_tree_insert(avc, &avc->anon_vma->rb_root);
364 }
365
count_vma_pages_range(struct mm_struct * mm,unsigned long addr,unsigned long end)366 static unsigned long count_vma_pages_range(struct mm_struct *mm,
367 unsigned long addr, unsigned long end)
368 {
369 VMA_ITERATOR(vmi, mm, addr);
370 struct vm_area_struct *vma;
371 unsigned long nr_pages = 0;
372
373 for_each_vma_range(vmi, vma, end) {
374 unsigned long vm_start = max(addr, vma->vm_start);
375 unsigned long vm_end = min(end, vma->vm_end);
376
377 nr_pages += PHYS_PFN(vm_end - vm_start);
378 }
379
380 return nr_pages;
381 }
382
__vma_link_file(struct vm_area_struct * vma,struct address_space * mapping)383 static void __vma_link_file(struct vm_area_struct *vma,
384 struct address_space *mapping)
385 {
386 if (vma->vm_flags & VM_SHARED)
387 mapping_allow_writable(mapping);
388
389 flush_dcache_mmap_lock(mapping);
390 vma_interval_tree_insert(vma, &mapping->i_mmap);
391 flush_dcache_mmap_unlock(mapping);
392 }
393
vma_link(struct mm_struct * mm,struct vm_area_struct * vma)394 static int vma_link(struct mm_struct *mm, struct vm_area_struct *vma)
395 {
396 VMA_ITERATOR(vmi, mm, 0);
397 struct address_space *mapping = NULL;
398
399 vma_iter_config(&vmi, vma->vm_start, vma->vm_end);
400 if (vma_iter_prealloc(&vmi, vma))
401 return -ENOMEM;
402
403 vma_start_write(vma);
404
405 vma_iter_store(&vmi, vma);
406
407 if (vma->vm_file) {
408 mapping = vma->vm_file->f_mapping;
409 i_mmap_lock_write(mapping);
410 __vma_link_file(vma, mapping);
411 i_mmap_unlock_write(mapping);
412 }
413
414 mm->map_count++;
415 validate_mm(mm);
416 return 0;
417 }
418
419 /*
420 * init_multi_vma_prep() - Initializer for struct vma_prepare
421 * @vp: The vma_prepare struct
422 * @vma: The vma that will be altered once locked
423 * @next: The next vma if it is to be adjusted
424 * @remove: The first vma to be removed
425 * @remove2: The second vma to be removed
426 */
init_multi_vma_prep(struct vma_prepare * vp,struct vm_area_struct * vma,struct vm_area_struct * next,struct vm_area_struct * remove,struct vm_area_struct * remove2)427 static inline void init_multi_vma_prep(struct vma_prepare *vp,
428 struct vm_area_struct *vma, struct vm_area_struct *next,
429 struct vm_area_struct *remove, struct vm_area_struct *remove2)
430 {
431 memset(vp, 0, sizeof(struct vma_prepare));
432 vp->vma = vma;
433 vp->anon_vma = vma->anon_vma;
434 vp->remove = remove;
435 vp->remove2 = remove2;
436 vp->adj_next = next;
437 if (!vp->anon_vma && next)
438 vp->anon_vma = next->anon_vma;
439
440 vp->file = vma->vm_file;
441 if (vp->file)
442 vp->mapping = vma->vm_file->f_mapping;
443
444 }
445
446 /*
447 * init_vma_prep() - Initializer wrapper for vma_prepare struct
448 * @vp: The vma_prepare struct
449 * @vma: The vma that will be altered once locked
450 */
init_vma_prep(struct vma_prepare * vp,struct vm_area_struct * vma)451 static inline void init_vma_prep(struct vma_prepare *vp,
452 struct vm_area_struct *vma)
453 {
454 init_multi_vma_prep(vp, vma, NULL, NULL, NULL);
455 }
456
457
458 /*
459 * vma_prepare() - Helper function for handling locking VMAs prior to altering
460 * @vp: The initialized vma_prepare struct
461 */
vma_prepare(struct vma_prepare * vp)462 static inline void vma_prepare(struct vma_prepare *vp)
463 {
464 if (vp->file) {
465 uprobe_munmap(vp->vma, vp->vma->vm_start, vp->vma->vm_end);
466
467 if (vp->adj_next)
468 uprobe_munmap(vp->adj_next, vp->adj_next->vm_start,
469 vp->adj_next->vm_end);
470
471 i_mmap_lock_write(vp->mapping);
472 if (vp->insert && vp->insert->vm_file) {
473 /*
474 * Put into interval tree now, so instantiated pages
475 * are visible to arm/parisc __flush_dcache_page
476 * throughout; but we cannot insert into address
477 * space until vma start or end is updated.
478 */
479 __vma_link_file(vp->insert,
480 vp->insert->vm_file->f_mapping);
481 }
482 }
483
484 if (vp->anon_vma) {
485 anon_vma_lock_write(vp->anon_vma);
486 anon_vma_interval_tree_pre_update_vma(vp->vma);
487 if (vp->adj_next)
488 anon_vma_interval_tree_pre_update_vma(vp->adj_next);
489 }
490
491 if (vp->file) {
492 flush_dcache_mmap_lock(vp->mapping);
493 vma_interval_tree_remove(vp->vma, &vp->mapping->i_mmap);
494 if (vp->adj_next)
495 vma_interval_tree_remove(vp->adj_next,
496 &vp->mapping->i_mmap);
497 }
498
499 }
500
501 /*
502 * vma_complete- Helper function for handling the unlocking after altering VMAs,
503 * or for inserting a VMA.
504 *
505 * @vp: The vma_prepare struct
506 * @vmi: The vma iterator
507 * @mm: The mm_struct
508 */
vma_complete(struct vma_prepare * vp,struct vma_iterator * vmi,struct mm_struct * mm)509 static inline void vma_complete(struct vma_prepare *vp,
510 struct vma_iterator *vmi, struct mm_struct *mm)
511 {
512 if (vp->file) {
513 if (vp->adj_next)
514 vma_interval_tree_insert(vp->adj_next,
515 &vp->mapping->i_mmap);
516 vma_interval_tree_insert(vp->vma, &vp->mapping->i_mmap);
517 flush_dcache_mmap_unlock(vp->mapping);
518 }
519
520 if (vp->remove && vp->file) {
521 __remove_shared_vm_struct(vp->remove, vp->file, vp->mapping);
522 if (vp->remove2)
523 __remove_shared_vm_struct(vp->remove2, vp->file,
524 vp->mapping);
525 } else if (vp->insert) {
526 /*
527 * split_vma has split insert from vma, and needs
528 * us to insert it before dropping the locks
529 * (it may either follow vma or precede it).
530 */
531 vma_iter_store(vmi, vp->insert);
532 mm->map_count++;
533 }
534
535 if (vp->anon_vma) {
536 anon_vma_interval_tree_post_update_vma(vp->vma);
537 if (vp->adj_next)
538 anon_vma_interval_tree_post_update_vma(vp->adj_next);
539 anon_vma_unlock_write(vp->anon_vma);
540 }
541
542 if (vp->file) {
543 i_mmap_unlock_write(vp->mapping);
544 uprobe_mmap(vp->vma);
545
546 if (vp->adj_next)
547 uprobe_mmap(vp->adj_next);
548 }
549
550 if (vp->remove) {
551 again:
552 vma_mark_detached(vp->remove, true);
553 if (vp->file) {
554 uprobe_munmap(vp->remove, vp->remove->vm_start,
555 vp->remove->vm_end);
556 fput(vp->file);
557 }
558 if (vp->remove->anon_vma)
559 anon_vma_merge(vp->vma, vp->remove);
560 mm->map_count--;
561 mpol_put(vma_policy(vp->remove));
562 if (!vp->remove2)
563 WARN_ON_ONCE(vp->vma->vm_end < vp->remove->vm_end);
564 vm_area_free(vp->remove);
565
566 /*
567 * In mprotect's case 6 (see comments on vma_merge),
568 * we are removing both mid and next vmas
569 */
570 if (vp->remove2) {
571 vp->remove = vp->remove2;
572 vp->remove2 = NULL;
573 goto again;
574 }
575 }
576 if (vp->insert && vp->file)
577 uprobe_mmap(vp->insert);
578 validate_mm(mm);
579 }
580
581 /*
582 * dup_anon_vma() - Helper function to duplicate anon_vma
583 * @dst: The destination VMA
584 * @src: The source VMA
585 * @dup: Pointer to the destination VMA when successful.
586 *
587 * Returns: 0 on success.
588 */
dup_anon_vma(struct vm_area_struct * dst,struct vm_area_struct * src,struct vm_area_struct ** dup)589 static inline int dup_anon_vma(struct vm_area_struct *dst,
590 struct vm_area_struct *src, struct vm_area_struct **dup)
591 {
592 /*
593 * Easily overlooked: when mprotect shifts the boundary, make sure the
594 * expanding vma has anon_vma set if the shrinking vma had, to cover any
595 * anon pages imported.
596 */
597 if (src->anon_vma && !dst->anon_vma) {
598 int ret;
599
600 vma_assert_write_locked(dst);
601 dst->anon_vma = src->anon_vma;
602 ret = anon_vma_clone(dst, src);
603 if (ret)
604 return ret;
605
606 *dup = dst;
607 }
608
609 return 0;
610 }
611
612 /*
613 * vma_expand - Expand an existing VMA
614 *
615 * @vmi: The vma iterator
616 * @vma: The vma to expand
617 * @start: The start of the vma
618 * @end: The exclusive end of the vma
619 * @pgoff: The page offset of vma
620 * @next: The current of next vma.
621 *
622 * Expand @vma to @start and @end. Can expand off the start and end. Will
623 * expand over @next if it's different from @vma and @end == @next->vm_end.
624 * Checking if the @vma can expand and merge with @next needs to be handled by
625 * the caller.
626 *
627 * Returns: 0 on success
628 */
vma_expand(struct vma_iterator * vmi,struct vm_area_struct * vma,unsigned long start,unsigned long end,pgoff_t pgoff,struct vm_area_struct * next)629 int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma,
630 unsigned long start, unsigned long end, pgoff_t pgoff,
631 struct vm_area_struct *next)
632 {
633 struct vm_area_struct *anon_dup = NULL;
634 bool remove_next = false;
635 struct vma_prepare vp;
636
637 vma_start_write(vma);
638 if (next && (vma != next) && (end == next->vm_end)) {
639 int ret;
640
641 remove_next = true;
642 vma_start_write(next);
643 ret = dup_anon_vma(vma, next, &anon_dup);
644 if (ret)
645 return ret;
646 }
647
648 init_multi_vma_prep(&vp, vma, NULL, remove_next ? next : NULL, NULL);
649 /* Not merging but overwriting any part of next is not handled. */
650 VM_WARN_ON(next && !vp.remove &&
651 next != vma && end > next->vm_start);
652 /* Only handles expanding */
653 VM_WARN_ON(vma->vm_start < start || vma->vm_end > end);
654
655 /* Note: vma iterator must be pointing to 'start' */
656 vma_iter_config(vmi, start, end);
657 if (vma_iter_prealloc(vmi, vma))
658 goto nomem;
659
660 vma_prepare(&vp);
661 vma_adjust_trans_huge(vma, start, end, 0);
662 vma->vm_start = start;
663 vma->vm_end = end;
664 vma->vm_pgoff = pgoff;
665 vma_iter_store(vmi, vma);
666
667 vma_complete(&vp, vmi, vma->vm_mm);
668 return 0;
669
670 nomem:
671 if (anon_dup)
672 unlink_anon_vmas(anon_dup);
673 return -ENOMEM;
674 }
675
676 /*
677 * vma_shrink() - Reduce an existing VMAs memory area
678 * @vmi: The vma iterator
679 * @vma: The VMA to modify
680 * @start: The new start
681 * @end: The new end
682 *
683 * Returns: 0 on success, -ENOMEM otherwise
684 */
vma_shrink(struct vma_iterator * vmi,struct vm_area_struct * vma,unsigned long start,unsigned long end,pgoff_t pgoff)685 int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma,
686 unsigned long start, unsigned long end, pgoff_t pgoff)
687 {
688 struct vma_prepare vp;
689
690 WARN_ON((vma->vm_start != start) && (vma->vm_end != end));
691
692 if (vma->vm_start < start)
693 vma_iter_config(vmi, vma->vm_start, start);
694 else
695 vma_iter_config(vmi, end, vma->vm_end);
696
697 if (vma_iter_prealloc(vmi, NULL))
698 return -ENOMEM;
699
700 vma_start_write(vma);
701
702 init_vma_prep(&vp, vma);
703 vma_prepare(&vp);
704 vma_adjust_trans_huge(vma, start, end, 0);
705
706 vma_iter_clear(vmi);
707 vma->vm_start = start;
708 vma->vm_end = end;
709 vma->vm_pgoff = pgoff;
710 vma_complete(&vp, vmi, vma->vm_mm);
711 return 0;
712 }
713
714 /*
715 * If the vma has a ->close operation then the driver probably needs to release
716 * per-vma resources, so we don't attempt to merge those if the caller indicates
717 * the current vma may be removed as part of the merge.
718 */
is_mergeable_vma(struct vm_area_struct * vma,struct file * file,unsigned long vm_flags,struct vm_userfaultfd_ctx vm_userfaultfd_ctx,struct anon_vma_name * anon_name,bool may_remove_vma)719 static inline bool is_mergeable_vma(struct vm_area_struct *vma,
720 struct file *file, unsigned long vm_flags,
721 struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
722 struct anon_vma_name *anon_name, bool may_remove_vma)
723 {
724 /*
725 * VM_SOFTDIRTY should not prevent from VMA merging, if we
726 * match the flags but dirty bit -- the caller should mark
727 * merged VMA as dirty. If dirty bit won't be excluded from
728 * comparison, we increase pressure on the memory system forcing
729 * the kernel to generate new VMAs when old one could be
730 * extended instead.
731 */
732 if ((vma->vm_flags ^ vm_flags) & ~VM_SOFTDIRTY)
733 return false;
734 if (vma->vm_file != file)
735 return false;
736 if (may_remove_vma && vma->vm_ops && vma->vm_ops->close)
737 return false;
738 if (!is_mergeable_vm_userfaultfd_ctx(vma, vm_userfaultfd_ctx))
739 return false;
740 if (!anon_vma_name_eq(anon_vma_name(vma), anon_name))
741 return false;
742 return true;
743 }
744
is_mergeable_anon_vma(struct anon_vma * anon_vma1,struct anon_vma * anon_vma2,struct vm_area_struct * vma)745 static inline bool is_mergeable_anon_vma(struct anon_vma *anon_vma1,
746 struct anon_vma *anon_vma2, struct vm_area_struct *vma)
747 {
748 /*
749 * The list_is_singular() test is to avoid merging VMA cloned from
750 * parents. This can improve scalability caused by anon_vma lock.
751 */
752 if ((!anon_vma1 || !anon_vma2) && (!vma ||
753 list_is_singular(&vma->anon_vma_chain)))
754 return true;
755 return anon_vma1 == anon_vma2;
756 }
757
758 /*
759 * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
760 * in front of (at a lower virtual address and file offset than) the vma.
761 *
762 * We cannot merge two vmas if they have differently assigned (non-NULL)
763 * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
764 *
765 * We don't check here for the merged mmap wrapping around the end of pagecache
766 * indices (16TB on ia32) because do_mmap() does not permit mmap's which
767 * wrap, nor mmaps which cover the final page at index -1UL.
768 *
769 * We assume the vma may be removed as part of the merge.
770 */
771 static bool
can_vma_merge_before(struct vm_area_struct * vma,unsigned long vm_flags,struct anon_vma * anon_vma,struct file * file,pgoff_t vm_pgoff,struct vm_userfaultfd_ctx vm_userfaultfd_ctx,struct anon_vma_name * anon_name)772 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
773 struct anon_vma *anon_vma, struct file *file,
774 pgoff_t vm_pgoff, struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
775 struct anon_vma_name *anon_name)
776 {
777 if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx, anon_name, true) &&
778 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
779 if (vma->vm_pgoff == vm_pgoff)
780 return true;
781 }
782 return false;
783 }
784
785 /*
786 * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
787 * beyond (at a higher virtual address and file offset than) the vma.
788 *
789 * We cannot merge two vmas if they have differently assigned (non-NULL)
790 * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
791 *
792 * We assume that vma is not removed as part of the merge.
793 */
794 static bool
can_vma_merge_after(struct vm_area_struct * vma,unsigned long vm_flags,struct anon_vma * anon_vma,struct file * file,pgoff_t vm_pgoff,struct vm_userfaultfd_ctx vm_userfaultfd_ctx,struct anon_vma_name * anon_name)795 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
796 struct anon_vma *anon_vma, struct file *file,
797 pgoff_t vm_pgoff, struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
798 struct anon_vma_name *anon_name)
799 {
800 if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx, anon_name, false) &&
801 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
802 pgoff_t vm_pglen;
803 vm_pglen = vma_pages(vma);
804 if (vma->vm_pgoff + vm_pglen == vm_pgoff)
805 return true;
806 }
807 return false;
808 }
809
810 /*
811 * Given a mapping request (addr,end,vm_flags,file,pgoff,anon_name),
812 * figure out whether that can be merged with its predecessor or its
813 * successor. Or both (it neatly fills a hole).
814 *
815 * In most cases - when called for mmap, brk or mremap - [addr,end) is
816 * certain not to be mapped by the time vma_merge is called; but when
817 * called for mprotect, it is certain to be already mapped (either at
818 * an offset within prev, or at the start of next), and the flags of
819 * this area are about to be changed to vm_flags - and the no-change
820 * case has already been eliminated.
821 *
822 * The following mprotect cases have to be considered, where **** is
823 * the area passed down from mprotect_fixup, never extending beyond one
824 * vma, PPPP is the previous vma, CCCC is a concurrent vma that starts
825 * at the same address as **** and is of the same or larger span, and
826 * NNNN the next vma after ****:
827 *
828 * **** **** ****
829 * PPPPPPNNNNNN PPPPPPNNNNNN PPPPPPCCCCCC
830 * cannot merge might become might become
831 * PPNNNNNNNNNN PPPPPPPPPPCC
832 * mmap, brk or case 4 below case 5 below
833 * mremap move:
834 * **** ****
835 * PPPP NNNN PPPPCCCCNNNN
836 * might become might become
837 * PPPPPPPPPPPP 1 or PPPPPPPPPPPP 6 or
838 * PPPPPPPPNNNN 2 or PPPPPPPPNNNN 7 or
839 * PPPPNNNNNNNN 3 PPPPNNNNNNNN 8
840 *
841 * It is important for case 8 that the vma CCCC overlapping the
842 * region **** is never going to extended over NNNN. Instead NNNN must
843 * be extended in region **** and CCCC must be removed. This way in
844 * all cases where vma_merge succeeds, the moment vma_merge drops the
845 * rmap_locks, the properties of the merged vma will be already
846 * correct for the whole merged range. Some of those properties like
847 * vm_page_prot/vm_flags may be accessed by rmap_walks and they must
848 * be correct for the whole merged range immediately after the
849 * rmap_locks are released. Otherwise if NNNN would be removed and
850 * CCCC would be extended over the NNNN range, remove_migration_ptes
851 * or other rmap walkers (if working on addresses beyond the "end"
852 * parameter) may establish ptes with the wrong permissions of CCCC
853 * instead of the right permissions of NNNN.
854 *
855 * In the code below:
856 * PPPP is represented by *prev
857 * CCCC is represented by *curr or not represented at all (NULL)
858 * NNNN is represented by *next or not represented at all (NULL)
859 * **** is not represented - it will be merged and the vma containing the
860 * area is returned, or the function will return NULL
861 */
vma_merge(struct vma_iterator * vmi,struct mm_struct * mm,struct vm_area_struct * prev,unsigned long addr,unsigned long end,unsigned long vm_flags,struct anon_vma * anon_vma,struct file * file,pgoff_t pgoff,struct mempolicy * policy,struct vm_userfaultfd_ctx vm_userfaultfd_ctx,struct anon_vma_name * anon_name)862 struct vm_area_struct *vma_merge(struct vma_iterator *vmi, struct mm_struct *mm,
863 struct vm_area_struct *prev, unsigned long addr,
864 unsigned long end, unsigned long vm_flags,
865 struct anon_vma *anon_vma, struct file *file,
866 pgoff_t pgoff, struct mempolicy *policy,
867 struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
868 struct anon_vma_name *anon_name)
869 {
870 struct vm_area_struct *curr, *next, *res;
871 struct vm_area_struct *vma, *adjust, *remove, *remove2;
872 struct vm_area_struct *anon_dup = NULL;
873 struct vma_prepare vp;
874 pgoff_t vma_pgoff;
875 int err = 0;
876 bool merge_prev = false;
877 bool merge_next = false;
878 bool vma_expanded = false;
879 unsigned long vma_start = addr;
880 unsigned long vma_end = end;
881 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
882 long adj_start = 0;
883
884 /*
885 * We later require that vma->vm_flags == vm_flags,
886 * so this tests vma->vm_flags & VM_SPECIAL, too.
887 */
888 if (vm_flags & VM_SPECIAL)
889 return NULL;
890
891 /* Does the input range span an existing VMA? (cases 5 - 8) */
892 curr = find_vma_intersection(mm, prev ? prev->vm_end : 0, end);
893
894 if (!curr || /* cases 1 - 4 */
895 end == curr->vm_end) /* cases 6 - 8, adjacent VMA */
896 next = vma_lookup(mm, end);
897 else
898 next = NULL; /* case 5 */
899
900 if (prev) {
901 vma_start = prev->vm_start;
902 vma_pgoff = prev->vm_pgoff;
903
904 /* Can we merge the predecessor? */
905 if (addr == prev->vm_end && mpol_equal(vma_policy(prev), policy)
906 && can_vma_merge_after(prev, vm_flags, anon_vma, file,
907 pgoff, vm_userfaultfd_ctx, anon_name)) {
908 merge_prev = true;
909 vma_prev(vmi);
910 }
911 }
912
913 /* Can we merge the successor? */
914 if (next && mpol_equal(policy, vma_policy(next)) &&
915 can_vma_merge_before(next, vm_flags, anon_vma, file, pgoff+pglen,
916 vm_userfaultfd_ctx, anon_name)) {
917 merge_next = true;
918 }
919
920 /* Verify some invariant that must be enforced by the caller. */
921 VM_WARN_ON(prev && addr <= prev->vm_start);
922 VM_WARN_ON(curr && (addr != curr->vm_start || end > curr->vm_end));
923 VM_WARN_ON(addr >= end);
924
925 if (!merge_prev && !merge_next)
926 return NULL; /* Not mergeable. */
927
928 if (merge_prev)
929 vma_start_write(prev);
930
931 res = vma = prev;
932 remove = remove2 = adjust = NULL;
933
934 /* Can we merge both the predecessor and the successor? */
935 if (merge_prev && merge_next &&
936 is_mergeable_anon_vma(prev->anon_vma, next->anon_vma, NULL)) {
937 vma_start_write(next);
938 remove = next; /* case 1 */
939 vma_end = next->vm_end;
940 err = dup_anon_vma(prev, next, &anon_dup);
941 if (curr) { /* case 6 */
942 vma_start_write(curr);
943 remove = curr;
944 remove2 = next;
945 if (!next->anon_vma)
946 err = dup_anon_vma(prev, curr, &anon_dup);
947 }
948 } else if (merge_prev) { /* case 2 */
949 if (curr) {
950 vma_start_write(curr);
951 if (end == curr->vm_end) { /* case 7 */
952 /*
953 * can_vma_merge_after() assumed we would not be
954 * removing prev vma, so it skipped the check
955 * for vm_ops->close, but we are removing curr
956 */
957 if (curr->vm_ops && curr->vm_ops->close)
958 err = -EINVAL;
959 remove = curr;
960 } else { /* case 5 */
961 adjust = curr;
962 adj_start = (end - curr->vm_start);
963 }
964 if (!err)
965 err = dup_anon_vma(prev, curr, &anon_dup);
966 }
967 } else { /* merge_next */
968 vma_start_write(next);
969 res = next;
970 if (prev && addr < prev->vm_end) { /* case 4 */
971 vma_start_write(prev);
972 vma_end = addr;
973 adjust = next;
974 adj_start = -(prev->vm_end - addr);
975 err = dup_anon_vma(next, prev, &anon_dup);
976 } else {
977 /*
978 * Note that cases 3 and 8 are the ONLY ones where prev
979 * is permitted to be (but is not necessarily) NULL.
980 */
981 vma = next; /* case 3 */
982 vma_start = addr;
983 vma_end = next->vm_end;
984 vma_pgoff = next->vm_pgoff - pglen;
985 if (curr) { /* case 8 */
986 vma_pgoff = curr->vm_pgoff;
987 vma_start_write(curr);
988 remove = curr;
989 err = dup_anon_vma(next, curr, &anon_dup);
990 }
991 }
992 }
993
994 /* Error in anon_vma clone. */
995 if (err)
996 goto anon_vma_fail;
997
998 if (vma_start < vma->vm_start || vma_end > vma->vm_end)
999 vma_expanded = true;
1000
1001 if (vma_expanded) {
1002 vma_iter_config(vmi, vma_start, vma_end);
1003 } else {
1004 vma_iter_config(vmi, adjust->vm_start + adj_start,
1005 adjust->vm_end);
1006 }
1007
1008 if (vma_iter_prealloc(vmi, vma))
1009 goto prealloc_fail;
1010
1011 init_multi_vma_prep(&vp, vma, adjust, remove, remove2);
1012 VM_WARN_ON(vp.anon_vma && adjust && adjust->anon_vma &&
1013 vp.anon_vma != adjust->anon_vma);
1014
1015 vma_prepare(&vp);
1016 vma_adjust_trans_huge(vma, vma_start, vma_end, adj_start);
1017
1018 vma->vm_start = vma_start;
1019 vma->vm_end = vma_end;
1020 vma->vm_pgoff = vma_pgoff;
1021
1022 if (vma_expanded)
1023 vma_iter_store(vmi, vma);
1024
1025 if (adj_start) {
1026 adjust->vm_start += adj_start;
1027 adjust->vm_pgoff += adj_start >> PAGE_SHIFT;
1028 if (adj_start < 0) {
1029 WARN_ON(vma_expanded);
1030 vma_iter_store(vmi, next);
1031 }
1032 }
1033
1034 vma_complete(&vp, vmi, mm);
1035 khugepaged_enter_vma(res, vm_flags);
1036 return res;
1037
1038 prealloc_fail:
1039 if (anon_dup)
1040 unlink_anon_vmas(anon_dup);
1041
1042 anon_vma_fail:
1043 vma_iter_set(vmi, addr);
1044 vma_iter_load(vmi);
1045 return NULL;
1046 }
1047
1048 /*
1049 * Rough compatibility check to quickly see if it's even worth looking
1050 * at sharing an anon_vma.
1051 *
1052 * They need to have the same vm_file, and the flags can only differ
1053 * in things that mprotect may change.
1054 *
1055 * NOTE! The fact that we share an anon_vma doesn't _have_ to mean that
1056 * we can merge the two vma's. For example, we refuse to merge a vma if
1057 * there is a vm_ops->close() function, because that indicates that the
1058 * driver is doing some kind of reference counting. But that doesn't
1059 * really matter for the anon_vma sharing case.
1060 */
anon_vma_compatible(struct vm_area_struct * a,struct vm_area_struct * b)1061 static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *b)
1062 {
1063 return a->vm_end == b->vm_start &&
1064 mpol_equal(vma_policy(a), vma_policy(b)) &&
1065 a->vm_file == b->vm_file &&
1066 !((a->vm_flags ^ b->vm_flags) & ~(VM_ACCESS_FLAGS | VM_SOFTDIRTY)) &&
1067 b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT);
1068 }
1069
1070 /*
1071 * Do some basic sanity checking to see if we can re-use the anon_vma
1072 * from 'old'. The 'a'/'b' vma's are in VM order - one of them will be
1073 * the same as 'old', the other will be the new one that is trying
1074 * to share the anon_vma.
1075 *
1076 * NOTE! This runs with mmap_lock held for reading, so it is possible that
1077 * the anon_vma of 'old' is concurrently in the process of being set up
1078 * by another page fault trying to merge _that_. But that's ok: if it
1079 * is being set up, that automatically means that it will be a singleton
1080 * acceptable for merging, so we can do all of this optimistically. But
1081 * we do that READ_ONCE() to make sure that we never re-load the pointer.
1082 *
1083 * IOW: that the "list_is_singular()" test on the anon_vma_chain only
1084 * matters for the 'stable anon_vma' case (ie the thing we want to avoid
1085 * is to return an anon_vma that is "complex" due to having gone through
1086 * a fork).
1087 *
1088 * We also make sure that the two vma's are compatible (adjacent,
1089 * and with the same memory policies). That's all stable, even with just
1090 * a read lock on the mmap_lock.
1091 */
reusable_anon_vma(struct vm_area_struct * old,struct vm_area_struct * a,struct vm_area_struct * b)1092 static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old, struct vm_area_struct *a, struct vm_area_struct *b)
1093 {
1094 if (anon_vma_compatible(a, b)) {
1095 struct anon_vma *anon_vma = READ_ONCE(old->anon_vma);
1096
1097 if (anon_vma && list_is_singular(&old->anon_vma_chain))
1098 return anon_vma;
1099 }
1100 return NULL;
1101 }
1102
1103 /*
1104 * find_mergeable_anon_vma is used by anon_vma_prepare, to check
1105 * neighbouring vmas for a suitable anon_vma, before it goes off
1106 * to allocate a new anon_vma. It checks because a repetitive
1107 * sequence of mprotects and faults may otherwise lead to distinct
1108 * anon_vmas being allocated, preventing vma merge in subsequent
1109 * mprotect.
1110 */
find_mergeable_anon_vma(struct vm_area_struct * vma)1111 struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma)
1112 {
1113 MA_STATE(mas, &vma->vm_mm->mm_mt, vma->vm_end, vma->vm_end);
1114 struct anon_vma *anon_vma = NULL;
1115 struct vm_area_struct *prev, *next;
1116
1117 /* Try next first. */
1118 next = mas_walk(&mas);
1119 if (next) {
1120 anon_vma = reusable_anon_vma(next, vma, next);
1121 if (anon_vma)
1122 return anon_vma;
1123 }
1124
1125 prev = mas_prev(&mas, 0);
1126 VM_BUG_ON_VMA(prev != vma, vma);
1127 prev = mas_prev(&mas, 0);
1128 /* Try prev next. */
1129 if (prev)
1130 anon_vma = reusable_anon_vma(prev, prev, vma);
1131
1132 /*
1133 * We might reach here with anon_vma == NULL if we can't find
1134 * any reusable anon_vma.
1135 * There's no absolute need to look only at touching neighbours:
1136 * we could search further afield for "compatible" anon_vmas.
1137 * But it would probably just be a waste of time searching,
1138 * or lead to too many vmas hanging off the same anon_vma.
1139 * We're trying to allow mprotect remerging later on,
1140 * not trying to minimize memory used for anon_vmas.
1141 */
1142 return anon_vma;
1143 }
1144
1145 /*
1146 * If a hint addr is less than mmap_min_addr change hint to be as
1147 * low as possible but still greater than mmap_min_addr
1148 */
round_hint_to_min(unsigned long hint)1149 static inline unsigned long round_hint_to_min(unsigned long hint)
1150 {
1151 hint &= PAGE_MASK;
1152 if (((void *)hint != NULL) &&
1153 (hint < mmap_min_addr))
1154 return PAGE_ALIGN(mmap_min_addr);
1155 return hint;
1156 }
1157
mlock_future_ok(struct mm_struct * mm,unsigned long flags,unsigned long bytes)1158 bool mlock_future_ok(struct mm_struct *mm, unsigned long flags,
1159 unsigned long bytes)
1160 {
1161 unsigned long locked_pages, limit_pages;
1162
1163 if (!(flags & VM_LOCKED) || capable(CAP_IPC_LOCK))
1164 return true;
1165
1166 locked_pages = bytes >> PAGE_SHIFT;
1167 locked_pages += mm->locked_vm;
1168
1169 limit_pages = rlimit(RLIMIT_MEMLOCK);
1170 limit_pages >>= PAGE_SHIFT;
1171
1172 return locked_pages <= limit_pages;
1173 }
1174
file_mmap_size_max(struct file * file,struct inode * inode)1175 static inline u64 file_mmap_size_max(struct file *file, struct inode *inode)
1176 {
1177 if (S_ISREG(inode->i_mode))
1178 return MAX_LFS_FILESIZE;
1179
1180 if (S_ISBLK(inode->i_mode))
1181 return MAX_LFS_FILESIZE;
1182
1183 if (S_ISSOCK(inode->i_mode))
1184 return MAX_LFS_FILESIZE;
1185
1186 /* Special "we do even unsigned file positions" case */
1187 if (file->f_mode & FMODE_UNSIGNED_OFFSET)
1188 return 0;
1189
1190 /* Yes, random drivers might want more. But I'm tired of buggy drivers */
1191 return ULONG_MAX;
1192 }
1193
file_mmap_ok(struct file * file,struct inode * inode,unsigned long pgoff,unsigned long len)1194 static inline bool file_mmap_ok(struct file *file, struct inode *inode,
1195 unsigned long pgoff, unsigned long len)
1196 {
1197 u64 maxsize = file_mmap_size_max(file, inode);
1198
1199 if (maxsize && len > maxsize)
1200 return false;
1201 maxsize -= len;
1202 if (pgoff > maxsize >> PAGE_SHIFT)
1203 return false;
1204 return true;
1205 }
1206
1207 /*
1208 * The caller must write-lock current->mm->mmap_lock.
1209 */
do_mmap(struct file * file,unsigned long addr,unsigned long len,unsigned long prot,unsigned long flags,vm_flags_t vm_flags,unsigned long pgoff,unsigned long * populate,struct list_head * uf)1210 unsigned long do_mmap(struct file *file, unsigned long addr,
1211 unsigned long len, unsigned long prot,
1212 unsigned long flags, vm_flags_t vm_flags,
1213 unsigned long pgoff, unsigned long *populate,
1214 struct list_head *uf)
1215 {
1216 struct mm_struct *mm = current->mm;
1217 int pkey = 0;
1218
1219 *populate = 0;
1220
1221 if (!len)
1222 return -EINVAL;
1223
1224 /*
1225 * Does the application expect PROT_READ to imply PROT_EXEC?
1226 *
1227 * (the exception is when the underlying filesystem is noexec
1228 * mounted, in which case we dont add PROT_EXEC.)
1229 */
1230 if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
1231 if (!(file && path_noexec(&file->f_path)))
1232 prot |= PROT_EXEC;
1233
1234 /* force arch specific MAP_FIXED handling in get_unmapped_area */
1235 if (flags & MAP_FIXED_NOREPLACE)
1236 flags |= MAP_FIXED;
1237
1238 if (!(flags & MAP_FIXED))
1239 addr = round_hint_to_min(addr);
1240
1241 /* Careful about overflows.. */
1242 len = PAGE_ALIGN(len);
1243 if (!len)
1244 return -ENOMEM;
1245
1246 /* offset overflow? */
1247 if ((pgoff + (len >> PAGE_SHIFT)) < pgoff)
1248 return -EOVERFLOW;
1249
1250 /* Too many mappings? */
1251 if (mm->map_count > sysctl_max_map_count)
1252 return -ENOMEM;
1253
1254 /* Obtain the address to map to. we verify (or select) it and ensure
1255 * that it represents a valid section of the address space.
1256 */
1257 addr = get_unmapped_area(file, addr, len, pgoff, flags);
1258 if (IS_ERR_VALUE(addr))
1259 return addr;
1260
1261 if (flags & MAP_FIXED_NOREPLACE) {
1262 if (find_vma_intersection(mm, addr, addr + len))
1263 return -EEXIST;
1264 }
1265
1266 if (prot == PROT_EXEC) {
1267 pkey = execute_only_pkey(mm);
1268 if (pkey < 0)
1269 pkey = 0;
1270 }
1271
1272 /* Do simple checking here so the lower-level routines won't have
1273 * to. we assume access permissions have been handled by the open
1274 * of the memory object, so we don't do any here.
1275 */
1276 vm_flags |= calc_vm_prot_bits(prot, pkey) | calc_vm_flag_bits(file, flags) |
1277 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
1278
1279 if (flags & MAP_LOCKED)
1280 if (!can_do_mlock())
1281 return -EPERM;
1282
1283 if (!mlock_future_ok(mm, vm_flags, len))
1284 return -EAGAIN;
1285
1286 if (file) {
1287 struct inode *inode = file_inode(file);
1288 unsigned long flags_mask;
1289
1290 if (!file_mmap_ok(file, inode, pgoff, len))
1291 return -EOVERFLOW;
1292
1293 flags_mask = LEGACY_MAP_MASK | file->f_op->mmap_supported_flags;
1294
1295 switch (flags & MAP_TYPE) {
1296 case MAP_SHARED:
1297 /*
1298 * Force use of MAP_SHARED_VALIDATE with non-legacy
1299 * flags. E.g. MAP_SYNC is dangerous to use with
1300 * MAP_SHARED as you don't know which consistency model
1301 * you will get. We silently ignore unsupported flags
1302 * with MAP_SHARED to preserve backward compatibility.
1303 */
1304 flags &= LEGACY_MAP_MASK;
1305 fallthrough;
1306 case MAP_SHARED_VALIDATE:
1307 if (flags & ~flags_mask)
1308 return -EOPNOTSUPP;
1309 if (prot & PROT_WRITE) {
1310 if (!(file->f_mode & FMODE_WRITE))
1311 return -EACCES;
1312 if (IS_SWAPFILE(file->f_mapping->host))
1313 return -ETXTBSY;
1314 }
1315
1316 /*
1317 * Make sure we don't allow writing to an append-only
1318 * file..
1319 */
1320 if (IS_APPEND(inode) && (file->f_mode & FMODE_WRITE))
1321 return -EACCES;
1322
1323 vm_flags |= VM_SHARED | VM_MAYSHARE;
1324 if (!(file->f_mode & FMODE_WRITE))
1325 vm_flags &= ~(VM_MAYWRITE | VM_SHARED);
1326 fallthrough;
1327 case MAP_PRIVATE:
1328 if (!(file->f_mode & FMODE_READ))
1329 return -EACCES;
1330 if (path_noexec(&file->f_path)) {
1331 if (vm_flags & VM_EXEC)
1332 return -EPERM;
1333 vm_flags &= ~VM_MAYEXEC;
1334 }
1335
1336 if (!file->f_op->mmap)
1337 return -ENODEV;
1338 if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
1339 return -EINVAL;
1340 break;
1341
1342 default:
1343 return -EINVAL;
1344 }
1345 } else {
1346 switch (flags & MAP_TYPE) {
1347 case MAP_SHARED:
1348 if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
1349 return -EINVAL;
1350 /*
1351 * Ignore pgoff.
1352 */
1353 pgoff = 0;
1354 vm_flags |= VM_SHARED | VM_MAYSHARE;
1355 break;
1356 case MAP_PRIVATE:
1357 /*
1358 * Set pgoff according to addr for anon_vma.
1359 */
1360 pgoff = addr >> PAGE_SHIFT;
1361 break;
1362 default:
1363 return -EINVAL;
1364 }
1365 }
1366
1367 /*
1368 * Set 'VM_NORESERVE' if we should not account for the
1369 * memory use of this mapping.
1370 */
1371 if (flags & MAP_NORESERVE) {
1372 /* We honor MAP_NORESERVE if allowed to overcommit */
1373 if (sysctl_overcommit_memory != OVERCOMMIT_NEVER)
1374 vm_flags |= VM_NORESERVE;
1375
1376 /* hugetlb applies strict overcommit unless MAP_NORESERVE */
1377 if (file && is_file_hugepages(file))
1378 vm_flags |= VM_NORESERVE;
1379 }
1380
1381 addr = mmap_region(file, addr, len, vm_flags, pgoff, uf);
1382 if (!IS_ERR_VALUE(addr) &&
1383 ((vm_flags & VM_LOCKED) ||
1384 (flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE))
1385 *populate = len;
1386 return addr;
1387 }
1388
ksys_mmap_pgoff(unsigned long addr,unsigned long len,unsigned long prot,unsigned long flags,unsigned long fd,unsigned long pgoff)1389 unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len,
1390 unsigned long prot, unsigned long flags,
1391 unsigned long fd, unsigned long pgoff)
1392 {
1393 struct file *file = NULL;
1394 unsigned long retval;
1395
1396 if (!(flags & MAP_ANONYMOUS)) {
1397 audit_mmap_fd(fd, flags);
1398 file = fget(fd);
1399 if (!file)
1400 return -EBADF;
1401 if (is_file_hugepages(file)) {
1402 len = ALIGN(len, huge_page_size(hstate_file(file)));
1403 } else if (unlikely(flags & MAP_HUGETLB)) {
1404 retval = -EINVAL;
1405 goto out_fput;
1406 }
1407 } else if (flags & MAP_HUGETLB) {
1408 struct hstate *hs;
1409
1410 hs = hstate_sizelog((flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK);
1411 if (!hs)
1412 return -EINVAL;
1413
1414 len = ALIGN(len, huge_page_size(hs));
1415 /*
1416 * VM_NORESERVE is used because the reservations will be
1417 * taken when vm_ops->mmap() is called
1418 */
1419 file = hugetlb_file_setup(HUGETLB_ANON_FILE, len,
1420 VM_NORESERVE,
1421 HUGETLB_ANONHUGE_INODE,
1422 (flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK);
1423 if (IS_ERR(file))
1424 return PTR_ERR(file);
1425 }
1426
1427 retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff);
1428 out_fput:
1429 if (file)
1430 fput(file);
1431 return retval;
1432 }
1433
SYSCALL_DEFINE6(mmap_pgoff,unsigned long,addr,unsigned long,len,unsigned long,prot,unsigned long,flags,unsigned long,fd,unsigned long,pgoff)1434 SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
1435 unsigned long, prot, unsigned long, flags,
1436 unsigned long, fd, unsigned long, pgoff)
1437 {
1438 return ksys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
1439 }
1440
1441 #ifdef __ARCH_WANT_SYS_OLD_MMAP
1442 struct mmap_arg_struct {
1443 unsigned long addr;
1444 unsigned long len;
1445 unsigned long prot;
1446 unsigned long flags;
1447 unsigned long fd;
1448 unsigned long offset;
1449 };
1450
SYSCALL_DEFINE1(old_mmap,struct mmap_arg_struct __user *,arg)1451 SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
1452 {
1453 struct mmap_arg_struct a;
1454
1455 if (copy_from_user(&a, arg, sizeof(a)))
1456 return -EFAULT;
1457 if (offset_in_page(a.offset))
1458 return -EINVAL;
1459
1460 return ksys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
1461 a.offset >> PAGE_SHIFT);
1462 }
1463 #endif /* __ARCH_WANT_SYS_OLD_MMAP */
1464
vm_ops_needs_writenotify(const struct vm_operations_struct * vm_ops)1465 static bool vm_ops_needs_writenotify(const struct vm_operations_struct *vm_ops)
1466 {
1467 return vm_ops && (vm_ops->page_mkwrite || vm_ops->pfn_mkwrite);
1468 }
1469
vma_is_shared_writable(struct vm_area_struct * vma)1470 static bool vma_is_shared_writable(struct vm_area_struct *vma)
1471 {
1472 return (vma->vm_flags & (VM_WRITE | VM_SHARED)) ==
1473 (VM_WRITE | VM_SHARED);
1474 }
1475
vma_fs_can_writeback(struct vm_area_struct * vma)1476 static bool vma_fs_can_writeback(struct vm_area_struct *vma)
1477 {
1478 /* No managed pages to writeback. */
1479 if (vma->vm_flags & VM_PFNMAP)
1480 return false;
1481
1482 return vma->vm_file && vma->vm_file->f_mapping &&
1483 mapping_can_writeback(vma->vm_file->f_mapping);
1484 }
1485
1486 /*
1487 * Does this VMA require the underlying folios to have their dirty state
1488 * tracked?
1489 */
vma_needs_dirty_tracking(struct vm_area_struct * vma)1490 bool vma_needs_dirty_tracking(struct vm_area_struct *vma)
1491 {
1492 /* Only shared, writable VMAs require dirty tracking. */
1493 if (!vma_is_shared_writable(vma))
1494 return false;
1495
1496 /* Does the filesystem need to be notified? */
1497 if (vm_ops_needs_writenotify(vma->vm_ops))
1498 return true;
1499
1500 /*
1501 * Even if the filesystem doesn't indicate a need for writenotify, if it
1502 * can writeback, dirty tracking is still required.
1503 */
1504 return vma_fs_can_writeback(vma);
1505 }
1506
1507 /*
1508 * Some shared mappings will want the pages marked read-only
1509 * to track write events. If so, we'll downgrade vm_page_prot
1510 * to the private version (using protection_map[] without the
1511 * VM_SHARED bit).
1512 */
vma_wants_writenotify(struct vm_area_struct * vma,pgprot_t vm_page_prot)1513 int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot)
1514 {
1515 /* If it was private or non-writable, the write bit is already clear */
1516 if (!vma_is_shared_writable(vma))
1517 return 0;
1518
1519 /* The backer wishes to know when pages are first written to? */
1520 if (vm_ops_needs_writenotify(vma->vm_ops))
1521 return 1;
1522
1523 /* The open routine did something to the protections that pgprot_modify
1524 * won't preserve? */
1525 if (pgprot_val(vm_page_prot) !=
1526 pgprot_val(vm_pgprot_modify(vm_page_prot, vma->vm_flags)))
1527 return 0;
1528
1529 /*
1530 * Do we need to track softdirty? hugetlb does not support softdirty
1531 * tracking yet.
1532 */
1533 if (vma_soft_dirty_enabled(vma) && !is_vm_hugetlb_page(vma))
1534 return 1;
1535
1536 /* Do we need write faults for uffd-wp tracking? */
1537 if (userfaultfd_wp(vma))
1538 return 1;
1539
1540 /* Can the mapping track the dirty pages? */
1541 return vma_fs_can_writeback(vma);
1542 }
1543
1544 /*
1545 * We account for memory if it's a private writeable mapping,
1546 * not hugepages and VM_NORESERVE wasn't set.
1547 */
accountable_mapping(struct file * file,vm_flags_t vm_flags)1548 static inline int accountable_mapping(struct file *file, vm_flags_t vm_flags)
1549 {
1550 /*
1551 * hugetlb has its own accounting separate from the core VM
1552 * VM_HUGETLB may not be set yet so we cannot check for that flag.
1553 */
1554 if (file && is_file_hugepages(file))
1555 return 0;
1556
1557 return (vm_flags & (VM_NORESERVE | VM_SHARED | VM_WRITE)) == VM_WRITE;
1558 }
1559
1560 /**
1561 * unmapped_area() - Find an area between the low_limit and the high_limit with
1562 * the correct alignment and offset, all from @info. Note: current->mm is used
1563 * for the search.
1564 *
1565 * @info: The unmapped area information including the range [low_limit -
1566 * high_limit), the alignment offset and mask.
1567 *
1568 * Return: A memory address or -ENOMEM.
1569 */
unmapped_area(struct vm_unmapped_area_info * info)1570 static unsigned long unmapped_area(struct vm_unmapped_area_info *info)
1571 {
1572 unsigned long length, gap;
1573 unsigned long low_limit, high_limit;
1574 struct vm_area_struct *tmp;
1575
1576 MA_STATE(mas, ¤t->mm->mm_mt, 0, 0);
1577
1578 /* Adjust search length to account for worst case alignment overhead */
1579 length = info->length + info->align_mask;
1580 if (length < info->length)
1581 return -ENOMEM;
1582
1583 low_limit = info->low_limit;
1584 if (low_limit < mmap_min_addr)
1585 low_limit = mmap_min_addr;
1586 high_limit = info->high_limit;
1587 retry:
1588 if (mas_empty_area(&mas, low_limit, high_limit - 1, length))
1589 return -ENOMEM;
1590
1591 gap = mas.index;
1592 gap += (info->align_offset - gap) & info->align_mask;
1593 tmp = mas_next(&mas, ULONG_MAX);
1594 if (tmp && (tmp->vm_flags & VM_STARTGAP_FLAGS)) { /* Avoid prev check if possible */
1595 if (vm_start_gap(tmp) < gap + length - 1) {
1596 low_limit = tmp->vm_end;
1597 mas_reset(&mas);
1598 goto retry;
1599 }
1600 } else {
1601 tmp = mas_prev(&mas, 0);
1602 if (tmp && vm_end_gap(tmp) > gap) {
1603 low_limit = vm_end_gap(tmp);
1604 mas_reset(&mas);
1605 goto retry;
1606 }
1607 }
1608
1609 return gap;
1610 }
1611
1612 /**
1613 * unmapped_area_topdown() - Find an area between the low_limit and the
1614 * high_limit with the correct alignment and offset at the highest available
1615 * address, all from @info. Note: current->mm is used for the search.
1616 *
1617 * @info: The unmapped area information including the range [low_limit -
1618 * high_limit), the alignment offset and mask.
1619 *
1620 * Return: A memory address or -ENOMEM.
1621 */
unmapped_area_topdown(struct vm_unmapped_area_info * info)1622 static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
1623 {
1624 unsigned long length, gap, gap_end;
1625 unsigned long low_limit, high_limit;
1626 struct vm_area_struct *tmp;
1627
1628 MA_STATE(mas, ¤t->mm->mm_mt, 0, 0);
1629 /* Adjust search length to account for worst case alignment overhead */
1630 length = info->length + info->align_mask;
1631 if (length < info->length)
1632 return -ENOMEM;
1633
1634 low_limit = info->low_limit;
1635 if (low_limit < mmap_min_addr)
1636 low_limit = mmap_min_addr;
1637 high_limit = info->high_limit;
1638 retry:
1639 if (mas_empty_area_rev(&mas, low_limit, high_limit - 1, length))
1640 return -ENOMEM;
1641
1642 gap = mas.last + 1 - info->length;
1643 gap -= (gap - info->align_offset) & info->align_mask;
1644 gap_end = mas.last;
1645 tmp = mas_next(&mas, ULONG_MAX);
1646 if (tmp && (tmp->vm_flags & VM_STARTGAP_FLAGS)) { /* Avoid prev check if possible */
1647 if (vm_start_gap(tmp) <= gap_end) {
1648 high_limit = vm_start_gap(tmp);
1649 mas_reset(&mas);
1650 goto retry;
1651 }
1652 } else {
1653 tmp = mas_prev(&mas, 0);
1654 if (tmp && vm_end_gap(tmp) > gap) {
1655 high_limit = tmp->vm_start;
1656 mas_reset(&mas);
1657 goto retry;
1658 }
1659 }
1660
1661 return gap;
1662 }
1663
1664 /*
1665 * Search for an unmapped address range.
1666 *
1667 * We are looking for a range that:
1668 * - does not intersect with any VMA;
1669 * - is contained within the [low_limit, high_limit) interval;
1670 * - is at least the desired size.
1671 * - satisfies (begin_addr & align_mask) == (align_offset & align_mask)
1672 */
vm_unmapped_area(struct vm_unmapped_area_info * info)1673 unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info)
1674 {
1675 unsigned long addr;
1676
1677 if (info->flags & VM_UNMAPPED_AREA_TOPDOWN)
1678 addr = unmapped_area_topdown(info);
1679 else
1680 addr = unmapped_area(info);
1681
1682 trace_vm_unmapped_area(addr, info);
1683 return addr;
1684 }
1685
1686 /* Get an address range which is currently unmapped.
1687 * For shmat() with addr=0.
1688 *
1689 * Ugly calling convention alert:
1690 * Return value with the low bits set means error value,
1691 * ie
1692 * if (ret & ~PAGE_MASK)
1693 * error = ret;
1694 *
1695 * This function "knows" that -ENOMEM has the bits set.
1696 */
1697 unsigned long
generic_get_unmapped_area(struct file * filp,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)1698 generic_get_unmapped_area(struct file *filp, unsigned long addr,
1699 unsigned long len, unsigned long pgoff,
1700 unsigned long flags)
1701 {
1702 struct mm_struct *mm = current->mm;
1703 struct vm_area_struct *vma, *prev;
1704 struct vm_unmapped_area_info info;
1705 const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags);
1706
1707 if (len > mmap_end - mmap_min_addr)
1708 return -ENOMEM;
1709
1710 if (flags & MAP_FIXED)
1711 return addr;
1712
1713 if (addr) {
1714 addr = PAGE_ALIGN(addr);
1715 vma = find_vma_prev(mm, addr, &prev);
1716 if (mmap_end - len >= addr && addr >= mmap_min_addr &&
1717 (!vma || addr + len <= vm_start_gap(vma)) &&
1718 (!prev || addr >= vm_end_gap(prev)))
1719 return addr;
1720 }
1721
1722 info.flags = 0;
1723 info.length = len;
1724 info.low_limit = mm->mmap_base;
1725 info.high_limit = mmap_end;
1726 info.align_mask = 0;
1727 info.align_offset = 0;
1728 return vm_unmapped_area(&info);
1729 }
1730
1731 #ifndef HAVE_ARCH_UNMAPPED_AREA
1732 unsigned long
arch_get_unmapped_area(struct file * filp,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)1733 arch_get_unmapped_area(struct file *filp, unsigned long addr,
1734 unsigned long len, unsigned long pgoff,
1735 unsigned long flags)
1736 {
1737 return generic_get_unmapped_area(filp, addr, len, pgoff, flags);
1738 }
1739 #endif
1740
1741 /*
1742 * This mmap-allocator allocates new areas top-down from below the
1743 * stack's low limit (the base):
1744 */
1745 unsigned long
generic_get_unmapped_area_topdown(struct file * filp,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)1746 generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
1747 unsigned long len, unsigned long pgoff,
1748 unsigned long flags)
1749 {
1750 struct vm_area_struct *vma, *prev;
1751 struct mm_struct *mm = current->mm;
1752 struct vm_unmapped_area_info info;
1753 const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags);
1754
1755 /* requested length too big for entire address space */
1756 if (len > mmap_end - mmap_min_addr)
1757 return -ENOMEM;
1758
1759 if (flags & MAP_FIXED)
1760 return addr;
1761
1762 /* requesting a specific address */
1763 if (addr) {
1764 addr = PAGE_ALIGN(addr);
1765 vma = find_vma_prev(mm, addr, &prev);
1766 if (mmap_end - len >= addr && addr >= mmap_min_addr &&
1767 (!vma || addr + len <= vm_start_gap(vma)) &&
1768 (!prev || addr >= vm_end_gap(prev)))
1769 return addr;
1770 }
1771
1772 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
1773 info.length = len;
1774 info.low_limit = PAGE_SIZE;
1775 info.high_limit = arch_get_mmap_base(addr, mm->mmap_base);
1776 info.align_mask = 0;
1777 info.align_offset = 0;
1778 addr = vm_unmapped_area(&info);
1779
1780 /*
1781 * A failed mmap() very likely causes application failure,
1782 * so fall back to the bottom-up function here. This scenario
1783 * can happen with large stack limits and large mmap()
1784 * allocations.
1785 */
1786 if (offset_in_page(addr)) {
1787 VM_BUG_ON(addr != -ENOMEM);
1788 info.flags = 0;
1789 info.low_limit = TASK_UNMAPPED_BASE;
1790 info.high_limit = mmap_end;
1791 addr = vm_unmapped_area(&info);
1792 }
1793
1794 return addr;
1795 }
1796
1797 #ifndef HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1798 unsigned long
arch_get_unmapped_area_topdown(struct file * filp,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)1799 arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
1800 unsigned long len, unsigned long pgoff,
1801 unsigned long flags)
1802 {
1803 return generic_get_unmapped_area_topdown(filp, addr, len, pgoff, flags);
1804 }
1805 #endif
1806
1807 unsigned long
get_unmapped_area(struct file * file,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)1808 get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
1809 unsigned long pgoff, unsigned long flags)
1810 {
1811 unsigned long (*get_area)(struct file *, unsigned long,
1812 unsigned long, unsigned long, unsigned long);
1813
1814 unsigned long error = arch_mmap_check(addr, len, flags);
1815 if (error)
1816 return error;
1817
1818 /* Careful about overflows.. */
1819 if (len > TASK_SIZE)
1820 return -ENOMEM;
1821
1822 get_area = current->mm->get_unmapped_area;
1823 if (file) {
1824 if (file->f_op->get_unmapped_area)
1825 get_area = file->f_op->get_unmapped_area;
1826 } else if (flags & MAP_SHARED) {
1827 /*
1828 * mmap_region() will call shmem_zero_setup() to create a file,
1829 * so use shmem's get_unmapped_area in case it can be huge.
1830 * do_mmap() will clear pgoff, so match alignment.
1831 */
1832 pgoff = 0;
1833 get_area = shmem_get_unmapped_area;
1834 }
1835
1836 addr = get_area(file, addr, len, pgoff, flags);
1837 if (IS_ERR_VALUE(addr))
1838 return addr;
1839
1840 if (addr > TASK_SIZE - len)
1841 return -ENOMEM;
1842 if (offset_in_page(addr))
1843 return -EINVAL;
1844
1845 error = security_mmap_addr(addr);
1846 return error ? error : addr;
1847 }
1848
1849 EXPORT_SYMBOL(get_unmapped_area);
1850
1851 /**
1852 * find_vma_intersection() - Look up the first VMA which intersects the interval
1853 * @mm: The process address space.
1854 * @start_addr: The inclusive start user address.
1855 * @end_addr: The exclusive end user address.
1856 *
1857 * Returns: The first VMA within the provided range, %NULL otherwise. Assumes
1858 * start_addr < end_addr.
1859 */
find_vma_intersection(struct mm_struct * mm,unsigned long start_addr,unsigned long end_addr)1860 struct vm_area_struct *find_vma_intersection(struct mm_struct *mm,
1861 unsigned long start_addr,
1862 unsigned long end_addr)
1863 {
1864 unsigned long index = start_addr;
1865
1866 mmap_assert_locked(mm);
1867 return mt_find(&mm->mm_mt, &index, end_addr - 1);
1868 }
1869 EXPORT_SYMBOL(find_vma_intersection);
1870
1871 /**
1872 * find_vma() - Find the VMA for a given address, or the next VMA.
1873 * @mm: The mm_struct to check
1874 * @addr: The address
1875 *
1876 * Returns: The VMA associated with addr, or the next VMA.
1877 * May return %NULL in the case of no VMA at addr or above.
1878 */
find_vma(struct mm_struct * mm,unsigned long addr)1879 struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
1880 {
1881 unsigned long index = addr;
1882
1883 mmap_assert_locked(mm);
1884 return mt_find(&mm->mm_mt, &index, ULONG_MAX);
1885 }
1886 EXPORT_SYMBOL(find_vma);
1887
1888 /**
1889 * find_vma_prev() - Find the VMA for a given address, or the next vma and
1890 * set %pprev to the previous VMA, if any.
1891 * @mm: The mm_struct to check
1892 * @addr: The address
1893 * @pprev: The pointer to set to the previous VMA
1894 *
1895 * Note that RCU lock is missing here since the external mmap_lock() is used
1896 * instead.
1897 *
1898 * Returns: The VMA associated with @addr, or the next vma.
1899 * May return %NULL in the case of no vma at addr or above.
1900 */
1901 struct vm_area_struct *
find_vma_prev(struct mm_struct * mm,unsigned long addr,struct vm_area_struct ** pprev)1902 find_vma_prev(struct mm_struct *mm, unsigned long addr,
1903 struct vm_area_struct **pprev)
1904 {
1905 struct vm_area_struct *vma;
1906 MA_STATE(mas, &mm->mm_mt, addr, addr);
1907
1908 vma = mas_walk(&mas);
1909 *pprev = mas_prev(&mas, 0);
1910 if (!vma)
1911 vma = mas_next(&mas, ULONG_MAX);
1912 return vma;
1913 }
1914
1915 /*
1916 * Verify that the stack growth is acceptable and
1917 * update accounting. This is shared with both the
1918 * grow-up and grow-down cases.
1919 */
acct_stack_growth(struct vm_area_struct * vma,unsigned long size,unsigned long grow)1920 static int acct_stack_growth(struct vm_area_struct *vma,
1921 unsigned long size, unsigned long grow)
1922 {
1923 struct mm_struct *mm = vma->vm_mm;
1924 unsigned long new_start;
1925
1926 /* address space limit tests */
1927 if (!may_expand_vm(mm, vma->vm_flags, grow))
1928 return -ENOMEM;
1929
1930 /* Stack limit test */
1931 if (size > rlimit(RLIMIT_STACK))
1932 return -ENOMEM;
1933
1934 /* mlock limit tests */
1935 if (!mlock_future_ok(mm, vma->vm_flags, grow << PAGE_SHIFT))
1936 return -ENOMEM;
1937
1938 /* Check to ensure the stack will not grow into a hugetlb-only region */
1939 new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start :
1940 vma->vm_end - size;
1941 if (is_hugepage_only_range(vma->vm_mm, new_start, size))
1942 return -EFAULT;
1943
1944 /*
1945 * Overcommit.. This must be the final test, as it will
1946 * update security statistics.
1947 */
1948 if (security_vm_enough_memory_mm(mm, grow))
1949 return -ENOMEM;
1950
1951 return 0;
1952 }
1953
1954 #if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
1955 /*
1956 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
1957 * vma is the last one with address > vma->vm_end. Have to extend vma.
1958 */
expand_upwards(struct vm_area_struct * vma,unsigned long address)1959 static int expand_upwards(struct vm_area_struct *vma, unsigned long address)
1960 {
1961 struct mm_struct *mm = vma->vm_mm;
1962 struct vm_area_struct *next;
1963 unsigned long gap_addr;
1964 int error = 0;
1965 MA_STATE(mas, &mm->mm_mt, vma->vm_start, address);
1966
1967 if (!(vma->vm_flags & VM_GROWSUP))
1968 return -EFAULT;
1969
1970 /* Guard against exceeding limits of the address space. */
1971 address &= PAGE_MASK;
1972 if (address >= (TASK_SIZE & PAGE_MASK))
1973 return -ENOMEM;
1974 address += PAGE_SIZE;
1975
1976 /* Enforce stack_guard_gap */
1977 gap_addr = address + stack_guard_gap;
1978
1979 /* Guard against overflow */
1980 if (gap_addr < address || gap_addr > TASK_SIZE)
1981 gap_addr = TASK_SIZE;
1982
1983 next = find_vma_intersection(mm, vma->vm_end, gap_addr);
1984 if (next && vma_is_accessible(next)) {
1985 if (!(next->vm_flags & VM_GROWSUP))
1986 return -ENOMEM;
1987 /* Check that both stack segments have the same anon_vma? */
1988 }
1989
1990 if (next)
1991 mas_prev_range(&mas, address);
1992
1993 __mas_set_range(&mas, vma->vm_start, address - 1);
1994 if (mas_preallocate(&mas, vma, GFP_KERNEL))
1995 return -ENOMEM;
1996
1997 /* We must make sure the anon_vma is allocated. */
1998 if (unlikely(anon_vma_prepare(vma))) {
1999 mas_destroy(&mas);
2000 return -ENOMEM;
2001 }
2002
2003 /* Lock the VMA before expanding to prevent concurrent page faults */
2004 vma_start_write(vma);
2005 /*
2006 * vma->vm_start/vm_end cannot change under us because the caller
2007 * is required to hold the mmap_lock in read mode. We need the
2008 * anon_vma lock to serialize against concurrent expand_stacks.
2009 */
2010 anon_vma_lock_write(vma->anon_vma);
2011
2012 /* Somebody else might have raced and expanded it already */
2013 if (address > vma->vm_end) {
2014 unsigned long size, grow;
2015
2016 size = address - vma->vm_start;
2017 grow = (address - vma->vm_end) >> PAGE_SHIFT;
2018
2019 error = -ENOMEM;
2020 if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) {
2021 error = acct_stack_growth(vma, size, grow);
2022 if (!error) {
2023 /*
2024 * We only hold a shared mmap_lock lock here, so
2025 * we need to protect against concurrent vma
2026 * expansions. anon_vma_lock_write() doesn't
2027 * help here, as we don't guarantee that all
2028 * growable vmas in a mm share the same root
2029 * anon vma. So, we reuse mm->page_table_lock
2030 * to guard against concurrent vma expansions.
2031 */
2032 spin_lock(&mm->page_table_lock);
2033 if (vma->vm_flags & VM_LOCKED)
2034 mm->locked_vm += grow;
2035 vm_stat_account(mm, vma->vm_flags, grow);
2036 anon_vma_interval_tree_pre_update_vma(vma);
2037 vma->vm_end = address;
2038 /* Overwrite old entry in mtree. */
2039 mas_store_prealloc(&mas, vma);
2040 anon_vma_interval_tree_post_update_vma(vma);
2041 spin_unlock(&mm->page_table_lock);
2042
2043 perf_event_mmap(vma);
2044 }
2045 }
2046 }
2047 anon_vma_unlock_write(vma->anon_vma);
2048 khugepaged_enter_vma(vma, vma->vm_flags);
2049 mas_destroy(&mas);
2050 validate_mm(mm);
2051 return error;
2052 }
2053 #endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */
2054
2055 /*
2056 * vma is the first one with address < vma->vm_start. Have to extend vma.
2057 * mmap_lock held for writing.
2058 */
expand_downwards(struct vm_area_struct * vma,unsigned long address)2059 int expand_downwards(struct vm_area_struct *vma, unsigned long address)
2060 {
2061 struct mm_struct *mm = vma->vm_mm;
2062 MA_STATE(mas, &mm->mm_mt, vma->vm_start, vma->vm_start);
2063 struct vm_area_struct *prev;
2064 int error = 0;
2065
2066 if (!(vma->vm_flags & VM_GROWSDOWN))
2067 return -EFAULT;
2068
2069 address &= PAGE_MASK;
2070 if (address < mmap_min_addr || address < FIRST_USER_ADDRESS)
2071 return -EPERM;
2072
2073 /* Enforce stack_guard_gap */
2074 prev = mas_prev(&mas, 0);
2075 /* Check that both stack segments have the same anon_vma? */
2076 if (prev) {
2077 if (!(prev->vm_flags & VM_GROWSDOWN) &&
2078 vma_is_accessible(prev) &&
2079 (address - prev->vm_end < stack_guard_gap))
2080 return -ENOMEM;
2081 }
2082
2083 if (prev)
2084 mas_next_range(&mas, vma->vm_start);
2085
2086 __mas_set_range(&mas, address, vma->vm_end - 1);
2087 if (mas_preallocate(&mas, vma, GFP_KERNEL))
2088 return -ENOMEM;
2089
2090 /* We must make sure the anon_vma is allocated. */
2091 if (unlikely(anon_vma_prepare(vma))) {
2092 mas_destroy(&mas);
2093 return -ENOMEM;
2094 }
2095
2096 /* Lock the VMA before expanding to prevent concurrent page faults */
2097 vma_start_write(vma);
2098 /*
2099 * vma->vm_start/vm_end cannot change under us because the caller
2100 * is required to hold the mmap_lock in read mode. We need the
2101 * anon_vma lock to serialize against concurrent expand_stacks.
2102 */
2103 anon_vma_lock_write(vma->anon_vma);
2104
2105 /* Somebody else might have raced and expanded it already */
2106 if (address < vma->vm_start) {
2107 unsigned long size, grow;
2108
2109 size = vma->vm_end - address;
2110 grow = (vma->vm_start - address) >> PAGE_SHIFT;
2111
2112 error = -ENOMEM;
2113 if (grow <= vma->vm_pgoff) {
2114 error = acct_stack_growth(vma, size, grow);
2115 if (!error) {
2116 /*
2117 * We only hold a shared mmap_lock lock here, so
2118 * we need to protect against concurrent vma
2119 * expansions. anon_vma_lock_write() doesn't
2120 * help here, as we don't guarantee that all
2121 * growable vmas in a mm share the same root
2122 * anon vma. So, we reuse mm->page_table_lock
2123 * to guard against concurrent vma expansions.
2124 */
2125 spin_lock(&mm->page_table_lock);
2126 if (vma->vm_flags & VM_LOCKED)
2127 mm->locked_vm += grow;
2128 vm_stat_account(mm, vma->vm_flags, grow);
2129 anon_vma_interval_tree_pre_update_vma(vma);
2130 vma->vm_start = address;
2131 vma->vm_pgoff -= grow;
2132 /* Overwrite old entry in mtree. */
2133 mas_store_prealloc(&mas, vma);
2134 anon_vma_interval_tree_post_update_vma(vma);
2135 spin_unlock(&mm->page_table_lock);
2136
2137 perf_event_mmap(vma);
2138 }
2139 }
2140 }
2141 anon_vma_unlock_write(vma->anon_vma);
2142 khugepaged_enter_vma(vma, vma->vm_flags);
2143 mas_destroy(&mas);
2144 validate_mm(mm);
2145 return error;
2146 }
2147
2148 /* enforced gap between the expanding stack and other mappings. */
2149 unsigned long stack_guard_gap = 256UL<<PAGE_SHIFT;
2150
cmdline_parse_stack_guard_gap(char * p)2151 static int __init cmdline_parse_stack_guard_gap(char *p)
2152 {
2153 unsigned long val;
2154 char *endptr;
2155
2156 val = simple_strtoul(p, &endptr, 10);
2157 if (!*endptr)
2158 stack_guard_gap = val << PAGE_SHIFT;
2159
2160 return 1;
2161 }
2162 __setup("stack_guard_gap=", cmdline_parse_stack_guard_gap);
2163
2164 #ifdef CONFIG_STACK_GROWSUP
expand_stack_locked(struct vm_area_struct * vma,unsigned long address)2165 int expand_stack_locked(struct vm_area_struct *vma, unsigned long address)
2166 {
2167 return expand_upwards(vma, address);
2168 }
2169
find_extend_vma_locked(struct mm_struct * mm,unsigned long addr)2170 struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm, unsigned long addr)
2171 {
2172 struct vm_area_struct *vma, *prev;
2173
2174 addr &= PAGE_MASK;
2175 vma = find_vma_prev(mm, addr, &prev);
2176 if (vma && (vma->vm_start <= addr))
2177 return vma;
2178 if (!prev)
2179 return NULL;
2180 if (expand_stack_locked(prev, addr))
2181 return NULL;
2182 if (prev->vm_flags & VM_LOCKED)
2183 populate_vma_page_range(prev, addr, prev->vm_end, NULL);
2184 return prev;
2185 }
2186 #else
expand_stack_locked(struct vm_area_struct * vma,unsigned long address)2187 int expand_stack_locked(struct vm_area_struct *vma, unsigned long address)
2188 {
2189 if (unlikely(!(vma->vm_flags & VM_GROWSDOWN)))
2190 return -EINVAL;
2191 return expand_downwards(vma, address);
2192 }
2193
find_extend_vma_locked(struct mm_struct * mm,unsigned long addr)2194 struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm, unsigned long addr)
2195 {
2196 struct vm_area_struct *vma;
2197 unsigned long start;
2198
2199 addr &= PAGE_MASK;
2200 vma = find_vma(mm, addr);
2201 if (!vma)
2202 return NULL;
2203 if (vma->vm_start <= addr)
2204 return vma;
2205 start = vma->vm_start;
2206 if (expand_stack_locked(vma, addr))
2207 return NULL;
2208 if (vma->vm_flags & VM_LOCKED)
2209 populate_vma_page_range(vma, addr, start, NULL);
2210 return vma;
2211 }
2212 #endif
2213
2214 /*
2215 * IA64 has some horrid mapping rules: it can expand both up and down,
2216 * but with various special rules.
2217 *
2218 * We'll get rid of this architecture eventually, so the ugliness is
2219 * temporary.
2220 */
2221 #ifdef CONFIG_IA64
vma_expand_ok(struct vm_area_struct * vma,unsigned long addr)2222 static inline bool vma_expand_ok(struct vm_area_struct *vma, unsigned long addr)
2223 {
2224 return REGION_NUMBER(addr) == REGION_NUMBER(vma->vm_start) &&
2225 REGION_OFFSET(addr) < RGN_MAP_LIMIT;
2226 }
2227
2228 /*
2229 * IA64 stacks grow down, but there's a special register backing store
2230 * that can grow up. Only sequentially, though, so the new address must
2231 * match vm_end.
2232 */
vma_expand_up(struct vm_area_struct * vma,unsigned long addr)2233 static inline int vma_expand_up(struct vm_area_struct *vma, unsigned long addr)
2234 {
2235 if (!vma_expand_ok(vma, addr))
2236 return -EFAULT;
2237 if (vma->vm_end != (addr & PAGE_MASK))
2238 return -EFAULT;
2239 return expand_upwards(vma, addr);
2240 }
2241
vma_expand_down(struct vm_area_struct * vma,unsigned long addr)2242 static inline bool vma_expand_down(struct vm_area_struct *vma, unsigned long addr)
2243 {
2244 if (!vma_expand_ok(vma, addr))
2245 return -EFAULT;
2246 return expand_downwards(vma, addr);
2247 }
2248
2249 #elif defined(CONFIG_STACK_GROWSUP)
2250
2251 #define vma_expand_up(vma,addr) expand_upwards(vma, addr)
2252 #define vma_expand_down(vma, addr) (-EFAULT)
2253
2254 #else
2255
2256 #define vma_expand_up(vma,addr) (-EFAULT)
2257 #define vma_expand_down(vma, addr) expand_downwards(vma, addr)
2258
2259 #endif
2260
2261 /*
2262 * expand_stack(): legacy interface for page faulting. Don't use unless
2263 * you have to.
2264 *
2265 * This is called with the mm locked for reading, drops the lock, takes
2266 * the lock for writing, tries to look up a vma again, expands it if
2267 * necessary, and downgrades the lock to reading again.
2268 *
2269 * If no vma is found or it can't be expanded, it returns NULL and has
2270 * dropped the lock.
2271 */
expand_stack(struct mm_struct * mm,unsigned long addr)2272 struct vm_area_struct *expand_stack(struct mm_struct *mm, unsigned long addr)
2273 {
2274 struct vm_area_struct *vma, *prev;
2275
2276 mmap_read_unlock(mm);
2277 if (mmap_write_lock_killable(mm))
2278 return NULL;
2279
2280 vma = find_vma_prev(mm, addr, &prev);
2281 if (vma && vma->vm_start <= addr)
2282 goto success;
2283
2284 if (prev && !vma_expand_up(prev, addr)) {
2285 vma = prev;
2286 goto success;
2287 }
2288
2289 if (vma && !vma_expand_down(vma, addr))
2290 goto success;
2291
2292 mmap_write_unlock(mm);
2293 return NULL;
2294
2295 success:
2296 mmap_write_downgrade(mm);
2297 return vma;
2298 }
2299
2300 /*
2301 * Ok - we have the memory areas we should free on a maple tree so release them,
2302 * and do the vma updates.
2303 *
2304 * Called with the mm semaphore held.
2305 */
remove_mt(struct mm_struct * mm,struct ma_state * mas)2306 static inline void remove_mt(struct mm_struct *mm, struct ma_state *mas)
2307 {
2308 unsigned long nr_accounted = 0;
2309 struct vm_area_struct *vma;
2310
2311 /* Update high watermark before we lower total_vm */
2312 update_hiwater_vm(mm);
2313 mas_for_each(mas, vma, ULONG_MAX) {
2314 long nrpages = vma_pages(vma);
2315
2316 if (vma->vm_flags & VM_ACCOUNT)
2317 nr_accounted += nrpages;
2318 vm_stat_account(mm, vma->vm_flags, -nrpages);
2319 remove_vma(vma, false);
2320 }
2321 vm_unacct_memory(nr_accounted);
2322 }
2323
2324 /*
2325 * Get rid of page table information in the indicated region.
2326 *
2327 * Called with the mm semaphore held.
2328 */
unmap_region(struct mm_struct * mm,struct ma_state * mas,struct vm_area_struct * vma,struct vm_area_struct * prev,struct vm_area_struct * next,unsigned long start,unsigned long end,unsigned long tree_end,bool mm_wr_locked)2329 static void unmap_region(struct mm_struct *mm, struct ma_state *mas,
2330 struct vm_area_struct *vma, struct vm_area_struct *prev,
2331 struct vm_area_struct *next, unsigned long start,
2332 unsigned long end, unsigned long tree_end, bool mm_wr_locked)
2333 {
2334 struct mmu_gather tlb;
2335 unsigned long mt_start = mas->index;
2336
2337 lru_add_drain();
2338 tlb_gather_mmu(&tlb, mm);
2339 update_hiwater_rss(mm);
2340 unmap_vmas(&tlb, mas, vma, start, end, tree_end, mm_wr_locked);
2341 mas_set(mas, mt_start);
2342 free_pgtables(&tlb, mas, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
2343 next ? next->vm_start : USER_PGTABLES_CEILING,
2344 mm_wr_locked);
2345 tlb_finish_mmu(&tlb);
2346 }
2347
2348 /*
2349 * __split_vma() bypasses sysctl_max_map_count checking. We use this where it
2350 * has already been checked or doesn't make sense to fail.
2351 * VMA Iterator will point to the end VMA.
2352 */
__split_vma(struct vma_iterator * vmi,struct vm_area_struct * vma,unsigned long addr,int new_below)2353 int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
2354 unsigned long addr, int new_below)
2355 {
2356 struct vma_prepare vp;
2357 struct vm_area_struct *new;
2358 int err;
2359
2360 WARN_ON(vma->vm_start >= addr);
2361 WARN_ON(vma->vm_end <= addr);
2362
2363 if (vma->vm_ops && vma->vm_ops->may_split) {
2364 err = vma->vm_ops->may_split(vma, addr);
2365 if (err)
2366 return err;
2367 }
2368
2369 new = vm_area_dup(vma);
2370 if (!new)
2371 return -ENOMEM;
2372
2373 if (new_below) {
2374 new->vm_end = addr;
2375 } else {
2376 new->vm_start = addr;
2377 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
2378 }
2379
2380 err = -ENOMEM;
2381 vma_iter_config(vmi, new->vm_start, new->vm_end);
2382 if (vma_iter_prealloc(vmi, new))
2383 goto out_free_vma;
2384
2385 err = vma_dup_policy(vma, new);
2386 if (err)
2387 goto out_free_vmi;
2388
2389 err = anon_vma_clone(new, vma);
2390 if (err)
2391 goto out_free_mpol;
2392
2393 if (new->vm_file)
2394 get_file(new->vm_file);
2395
2396 if (new->vm_ops && new->vm_ops->open)
2397 new->vm_ops->open(new);
2398
2399 vma_start_write(vma);
2400 vma_start_write(new);
2401
2402 init_vma_prep(&vp, vma);
2403 vp.insert = new;
2404 vma_prepare(&vp);
2405 vma_adjust_trans_huge(vma, vma->vm_start, addr, 0);
2406
2407 if (new_below) {
2408 vma->vm_start = addr;
2409 vma->vm_pgoff += (addr - new->vm_start) >> PAGE_SHIFT;
2410 } else {
2411 vma->vm_end = addr;
2412 }
2413
2414 /* vma_complete stores the new vma */
2415 vma_complete(&vp, vmi, vma->vm_mm);
2416
2417 /* Success. */
2418 if (new_below)
2419 vma_next(vmi);
2420 return 0;
2421
2422 out_free_mpol:
2423 mpol_put(vma_policy(new));
2424 out_free_vmi:
2425 vma_iter_free(vmi);
2426 out_free_vma:
2427 vm_area_free(new);
2428 return err;
2429 }
2430
2431 /*
2432 * Split a vma into two pieces at address 'addr', a new vma is allocated
2433 * either for the first part or the tail.
2434 */
split_vma(struct vma_iterator * vmi,struct vm_area_struct * vma,unsigned long addr,int new_below)2435 int split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
2436 unsigned long addr, int new_below)
2437 {
2438 if (vma->vm_mm->map_count >= sysctl_max_map_count)
2439 return -ENOMEM;
2440
2441 return __split_vma(vmi, vma, addr, new_below);
2442 }
2443
2444 /*
2445 * do_vmi_align_munmap() - munmap the aligned region from @start to @end.
2446 * @vmi: The vma iterator
2447 * @vma: The starting vm_area_struct
2448 * @mm: The mm_struct
2449 * @start: The aligned start address to munmap.
2450 * @end: The aligned end address to munmap.
2451 * @uf: The userfaultfd list_head
2452 * @unlock: Set to true to drop the mmap_lock. unlocking only happens on
2453 * success.
2454 *
2455 * Return: 0 on success and drops the lock if so directed, error and leaves the
2456 * lock held otherwise.
2457 */
2458 static int
do_vmi_align_munmap(struct vma_iterator * vmi,struct vm_area_struct * vma,struct mm_struct * mm,unsigned long start,unsigned long end,struct list_head * uf,bool unlock)2459 do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
2460 struct mm_struct *mm, unsigned long start,
2461 unsigned long end, struct list_head *uf, bool unlock)
2462 {
2463 struct vm_area_struct *prev, *next = NULL;
2464 struct maple_tree mt_detach;
2465 int count = 0;
2466 int error = -ENOMEM;
2467 unsigned long locked_vm = 0;
2468 MA_STATE(mas_detach, &mt_detach, 0, 0);
2469 mt_init_flags(&mt_detach, vmi->mas.tree->ma_flags & MT_FLAGS_LOCK_MASK);
2470 mt_on_stack(mt_detach);
2471
2472 /*
2473 * If we need to split any vma, do it now to save pain later.
2474 *
2475 * Note: mremap's move_vma VM_ACCOUNT handling assumes a partially
2476 * unmapped vm_area_struct will remain in use: so lower split_vma
2477 * places tmp vma above, and higher split_vma places tmp vma below.
2478 */
2479
2480 /* Does it split the first one? */
2481 if (start > vma->vm_start) {
2482
2483 /*
2484 * Make sure that map_count on return from munmap() will
2485 * not exceed its limit; but let map_count go just above
2486 * its limit temporarily, to help free resources as expected.
2487 */
2488 if (end < vma->vm_end && mm->map_count >= sysctl_max_map_count)
2489 goto map_count_exceeded;
2490
2491 error = __split_vma(vmi, vma, start, 1);
2492 if (error)
2493 goto start_split_failed;
2494 }
2495
2496 /*
2497 * Detach a range of VMAs from the mm. Using next as a temp variable as
2498 * it is always overwritten.
2499 */
2500 next = vma;
2501 do {
2502 /* Does it split the end? */
2503 if (next->vm_end > end) {
2504 error = __split_vma(vmi, next, end, 0);
2505 if (error)
2506 goto end_split_failed;
2507 }
2508 vma_start_write(next);
2509 mas_set(&mas_detach, count);
2510 error = mas_store_gfp(&mas_detach, next, GFP_KERNEL);
2511 if (error)
2512 goto munmap_gather_failed;
2513 vma_mark_detached(next, true);
2514 if (next->vm_flags & VM_LOCKED)
2515 locked_vm += vma_pages(next);
2516
2517 count++;
2518 if (unlikely(uf)) {
2519 /*
2520 * If userfaultfd_unmap_prep returns an error the vmas
2521 * will remain split, but userland will get a
2522 * highly unexpected error anyway. This is no
2523 * different than the case where the first of the two
2524 * __split_vma fails, but we don't undo the first
2525 * split, despite we could. This is unlikely enough
2526 * failure that it's not worth optimizing it for.
2527 */
2528 error = userfaultfd_unmap_prep(next, start, end, uf);
2529
2530 if (error)
2531 goto userfaultfd_error;
2532 }
2533 #ifdef CONFIG_DEBUG_VM_MAPLE_TREE
2534 BUG_ON(next->vm_start < start);
2535 BUG_ON(next->vm_start > end);
2536 #endif
2537 } for_each_vma_range(*vmi, next, end);
2538
2539 #if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
2540 /* Make sure no VMAs are about to be lost. */
2541 {
2542 MA_STATE(test, &mt_detach, 0, 0);
2543 struct vm_area_struct *vma_mas, *vma_test;
2544 int test_count = 0;
2545
2546 vma_iter_set(vmi, start);
2547 rcu_read_lock();
2548 vma_test = mas_find(&test, count - 1);
2549 for_each_vma_range(*vmi, vma_mas, end) {
2550 BUG_ON(vma_mas != vma_test);
2551 test_count++;
2552 vma_test = mas_next(&test, count - 1);
2553 }
2554 rcu_read_unlock();
2555 BUG_ON(count != test_count);
2556 }
2557 #endif
2558
2559 while (vma_iter_addr(vmi) > start)
2560 vma_iter_prev_range(vmi);
2561
2562 error = vma_iter_clear_gfp(vmi, start, end, GFP_KERNEL);
2563 if (error)
2564 goto clear_tree_failed;
2565
2566 /* Point of no return */
2567 mm->locked_vm -= locked_vm;
2568 mm->map_count -= count;
2569 if (unlock)
2570 mmap_write_downgrade(mm);
2571
2572 prev = vma_iter_prev_range(vmi);
2573 next = vma_next(vmi);
2574 if (next)
2575 vma_iter_prev_range(vmi);
2576
2577 /*
2578 * We can free page tables without write-locking mmap_lock because VMAs
2579 * were isolated before we downgraded mmap_lock.
2580 */
2581 mas_set(&mas_detach, 1);
2582 unmap_region(mm, &mas_detach, vma, prev, next, start, end, count,
2583 !unlock);
2584 /* Statistics and freeing VMAs */
2585 mas_set(&mas_detach, 0);
2586 remove_mt(mm, &mas_detach);
2587 validate_mm(mm);
2588 if (unlock)
2589 mmap_read_unlock(mm);
2590
2591 __mt_destroy(&mt_detach);
2592 return 0;
2593
2594 clear_tree_failed:
2595 userfaultfd_error:
2596 munmap_gather_failed:
2597 end_split_failed:
2598 mas_set(&mas_detach, 0);
2599 mas_for_each(&mas_detach, next, end)
2600 vma_mark_detached(next, false);
2601
2602 __mt_destroy(&mt_detach);
2603 start_split_failed:
2604 map_count_exceeded:
2605 validate_mm(mm);
2606 return error;
2607 }
2608
2609 /*
2610 * do_vmi_munmap() - munmap a given range.
2611 * @vmi: The vma iterator
2612 * @mm: The mm_struct
2613 * @start: The start address to munmap
2614 * @len: The length of the range to munmap
2615 * @uf: The userfaultfd list_head
2616 * @unlock: set to true if the user wants to drop the mmap_lock on success
2617 *
2618 * This function takes a @mas that is either pointing to the previous VMA or set
2619 * to MA_START and sets it up to remove the mapping(s). The @len will be
2620 * aligned and any arch_unmap work will be preformed.
2621 *
2622 * Return: 0 on success and drops the lock if so directed, error and leaves the
2623 * lock held otherwise.
2624 */
do_vmi_munmap(struct vma_iterator * vmi,struct mm_struct * mm,unsigned long start,size_t len,struct list_head * uf,bool unlock)2625 int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
2626 unsigned long start, size_t len, struct list_head *uf,
2627 bool unlock)
2628 {
2629 unsigned long end;
2630 struct vm_area_struct *vma;
2631
2632 if ((offset_in_page(start)) || start > TASK_SIZE || len > TASK_SIZE-start)
2633 return -EINVAL;
2634
2635 end = start + PAGE_ALIGN(len);
2636 if (end == start)
2637 return -EINVAL;
2638
2639 /* arch_unmap() might do unmaps itself. */
2640 arch_unmap(mm, start, end);
2641
2642 /* Find the first overlapping VMA */
2643 vma = vma_find(vmi, end);
2644 if (!vma) {
2645 if (unlock)
2646 mmap_write_unlock(mm);
2647 return 0;
2648 }
2649
2650 return do_vmi_align_munmap(vmi, vma, mm, start, end, uf, unlock);
2651 }
2652
2653 /* do_munmap() - Wrapper function for non-maple tree aware do_munmap() calls.
2654 * @mm: The mm_struct
2655 * @start: The start address to munmap
2656 * @len: The length to be munmapped.
2657 * @uf: The userfaultfd list_head
2658 *
2659 * Return: 0 on success, error otherwise.
2660 */
do_munmap(struct mm_struct * mm,unsigned long start,size_t len,struct list_head * uf)2661 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
2662 struct list_head *uf)
2663 {
2664 VMA_ITERATOR(vmi, mm, start);
2665
2666 return do_vmi_munmap(&vmi, mm, start, len, uf, false);
2667 }
2668
__mmap_region(struct file * file,unsigned long addr,unsigned long len,vm_flags_t vm_flags,unsigned long pgoff,struct list_head * uf)2669 static unsigned long __mmap_region(struct file *file, unsigned long addr,
2670 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
2671 struct list_head *uf)
2672 {
2673 struct mm_struct *mm = current->mm;
2674 struct vm_area_struct *vma = NULL;
2675 struct vm_area_struct *next, *prev, *merge;
2676 pgoff_t pglen = PHYS_PFN(len);
2677 unsigned long charged = 0;
2678 unsigned long end = addr + len;
2679 unsigned long merge_start = addr, merge_end = end;
2680 pgoff_t vm_pgoff;
2681 int error;
2682 VMA_ITERATOR(vmi, mm, addr);
2683
2684 /* Check against address space limit. */
2685 if (!may_expand_vm(mm, vm_flags, len >> PAGE_SHIFT)) {
2686 unsigned long nr_pages;
2687
2688 /*
2689 * MAP_FIXED may remove pages of mappings that intersects with
2690 * requested mapping. Account for the pages it would unmap.
2691 */
2692 nr_pages = count_vma_pages_range(mm, addr, end);
2693
2694 if (!may_expand_vm(mm, vm_flags,
2695 (len >> PAGE_SHIFT) - nr_pages))
2696 return -ENOMEM;
2697 }
2698
2699 /* Unmap any existing mapping in the area */
2700 if (do_vmi_munmap(&vmi, mm, addr, len, uf, false))
2701 return -ENOMEM;
2702
2703 /*
2704 * Private writable mapping: check memory availability
2705 */
2706 if (accountable_mapping(file, vm_flags)) {
2707 charged = len >> PAGE_SHIFT;
2708 if (security_vm_enough_memory_mm(mm, charged))
2709 return -ENOMEM;
2710 vm_flags |= VM_ACCOUNT;
2711 }
2712
2713 next = vma_next(&vmi);
2714 prev = vma_prev(&vmi);
2715 if (vm_flags & VM_SPECIAL) {
2716 if (prev)
2717 vma_iter_next_range(&vmi);
2718 goto cannot_expand;
2719 }
2720
2721 /* Attempt to expand an old mapping */
2722 /* Check next */
2723 if (next && next->vm_start == end && !vma_policy(next) &&
2724 can_vma_merge_before(next, vm_flags, NULL, file, pgoff+pglen,
2725 NULL_VM_UFFD_CTX, NULL)) {
2726 merge_end = next->vm_end;
2727 vma = next;
2728 vm_pgoff = next->vm_pgoff - pglen;
2729 }
2730
2731 /* Check prev */
2732 if (prev && prev->vm_end == addr && !vma_policy(prev) &&
2733 (vma ? can_vma_merge_after(prev, vm_flags, vma->anon_vma, file,
2734 pgoff, vma->vm_userfaultfd_ctx, NULL) :
2735 can_vma_merge_after(prev, vm_flags, NULL, file, pgoff,
2736 NULL_VM_UFFD_CTX, NULL))) {
2737 merge_start = prev->vm_start;
2738 vma = prev;
2739 vm_pgoff = prev->vm_pgoff;
2740 } else if (prev) {
2741 vma_iter_next_range(&vmi);
2742 }
2743
2744 /* Actually expand, if possible */
2745 if (vma &&
2746 !vma_expand(&vmi, vma, merge_start, merge_end, vm_pgoff, next)) {
2747 khugepaged_enter_vma(vma, vm_flags);
2748 goto expanded;
2749 }
2750
2751 if (vma == prev)
2752 vma_iter_set(&vmi, addr);
2753 cannot_expand:
2754
2755 /*
2756 * Determine the object being mapped and call the appropriate
2757 * specific mapper. the address has already been validated, but
2758 * not unmapped, but the maps are removed from the list.
2759 */
2760 vma = vm_area_alloc(mm);
2761 if (!vma) {
2762 error = -ENOMEM;
2763 goto unacct_error;
2764 }
2765
2766 vma_iter_config(&vmi, addr, end);
2767 vma->vm_start = addr;
2768 vma->vm_end = end;
2769 vm_flags_init(vma, vm_flags);
2770 vma->vm_page_prot = vm_get_page_prot(vm_flags);
2771 vma->vm_pgoff = pgoff;
2772
2773 if (vma_iter_prealloc(&vmi, vma)) {
2774 error = -ENOMEM;
2775 goto free_vma;
2776 }
2777
2778 if (file) {
2779 vma->vm_file = get_file(file);
2780 error = mmap_file(file, vma);
2781 if (error)
2782 goto unmap_and_free_file_vma;
2783
2784 /* Drivers cannot alter the address of the VMA. */
2785 WARN_ON_ONCE(addr != vma->vm_start);
2786 /*
2787 * Drivers should not permit writability when previously it was
2788 * disallowed.
2789 */
2790 VM_WARN_ON_ONCE(vm_flags != vma->vm_flags &&
2791 !(vm_flags & VM_MAYWRITE) &&
2792 (vma->vm_flags & VM_MAYWRITE));
2793
2794 vma_iter_config(&vmi, addr, end);
2795 /*
2796 * If vm_flags changed after mmap_file(), we should try merge
2797 * vma again as we may succeed this time.
2798 */
2799 if (unlikely(vm_flags != vma->vm_flags && prev)) {
2800 merge = vma_merge(&vmi, mm, prev, vma->vm_start,
2801 vma->vm_end, vma->vm_flags, NULL,
2802 vma->vm_file, vma->vm_pgoff, NULL,
2803 NULL_VM_UFFD_CTX, NULL);
2804
2805 if (merge) {
2806 /*
2807 * ->mmap() can change vma->vm_file and fput
2808 * the original file. So fput the vma->vm_file
2809 * here or we would add an extra fput for file
2810 * and cause general protection fault
2811 * ultimately.
2812 */
2813 fput(vma->vm_file);
2814 vm_area_free(vma);
2815 vma = merge;
2816 /* Update vm_flags to pick up the change. */
2817 vm_flags = vma->vm_flags;
2818 goto file_expanded;
2819 }
2820 }
2821
2822 vm_flags = vma->vm_flags;
2823 } else if (vm_flags & VM_SHARED) {
2824 error = shmem_zero_setup(vma);
2825 if (error)
2826 goto free_iter_vma;
2827 } else {
2828 vma_set_anonymous(vma);
2829 }
2830
2831 #ifdef CONFIG_SPARC64
2832 /* TODO: Fix SPARC ADI! */
2833 WARN_ON_ONCE(!arch_validate_flags(vm_flags));
2834 #endif
2835
2836 /* Lock the VMA since it is modified after insertion into VMA tree */
2837 vma_start_write(vma);
2838 vma_iter_store(&vmi, vma);
2839 mm->map_count++;
2840 if (vma->vm_file) {
2841 i_mmap_lock_write(vma->vm_file->f_mapping);
2842 if (vma->vm_flags & VM_SHARED)
2843 mapping_allow_writable(vma->vm_file->f_mapping);
2844
2845 flush_dcache_mmap_lock(vma->vm_file->f_mapping);
2846 vma_interval_tree_insert(vma, &vma->vm_file->f_mapping->i_mmap);
2847 flush_dcache_mmap_unlock(vma->vm_file->f_mapping);
2848 i_mmap_unlock_write(vma->vm_file->f_mapping);
2849 }
2850
2851 /*
2852 * vma_merge() calls khugepaged_enter_vma() either, the below
2853 * call covers the non-merge case.
2854 */
2855 khugepaged_enter_vma(vma, vma->vm_flags);
2856
2857 file_expanded:
2858 file = vma->vm_file;
2859 ksm_add_vma(vma);
2860 expanded:
2861 perf_event_mmap(vma);
2862
2863 vm_stat_account(mm, vm_flags, len >> PAGE_SHIFT);
2864 if (vm_flags & VM_LOCKED) {
2865 if ((vm_flags & VM_SPECIAL) || vma_is_dax(vma) ||
2866 is_vm_hugetlb_page(vma) ||
2867 vma == get_gate_vma(current->mm))
2868 vm_flags_clear(vma, VM_LOCKED_MASK);
2869 else
2870 mm->locked_vm += (len >> PAGE_SHIFT);
2871 }
2872
2873 if (file)
2874 uprobe_mmap(vma);
2875
2876 /*
2877 * New (or expanded) vma always get soft dirty status.
2878 * Otherwise user-space soft-dirty page tracker won't
2879 * be able to distinguish situation when vma area unmapped,
2880 * then new mapped in-place (which must be aimed as
2881 * a completely new data area).
2882 */
2883 vm_flags_set(vma, VM_SOFTDIRTY);
2884
2885 vma_set_page_prot(vma);
2886
2887 return addr;
2888
2889 unmap_and_free_file_vma:
2890 fput(vma->vm_file);
2891 vma->vm_file = NULL;
2892
2893 vma_iter_set(&vmi, vma->vm_end);
2894 /* Undo any partial mapping done by a device driver. */
2895 unmap_region(mm, &vmi.mas, vma, prev, next, vma->vm_start,
2896 vma->vm_end, vma->vm_end, true);
2897 free_iter_vma:
2898 vma_iter_free(&vmi);
2899 free_vma:
2900 vm_area_free(vma);
2901 unacct_error:
2902 if (charged)
2903 vm_unacct_memory(charged);
2904 return error;
2905 }
2906
mmap_region(struct file * file,unsigned long addr,unsigned long len,vm_flags_t vm_flags,unsigned long pgoff,struct list_head * uf)2907 unsigned long mmap_region(struct file *file, unsigned long addr,
2908 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
2909 struct list_head *uf)
2910 {
2911 unsigned long ret;
2912 bool writable_file_mapping = false;
2913
2914 /* Check to see if MDWE is applicable. */
2915 if (map_deny_write_exec(vm_flags, vm_flags))
2916 return -EACCES;
2917
2918 /* Allow architectures to sanity-check the vm_flags. */
2919 if (!arch_validate_flags(vm_flags))
2920 return -EINVAL;
2921
2922 /* Map writable and ensure this isn't a sealed memfd. */
2923 if (file && (vm_flags & VM_SHARED)) {
2924 int error = mapping_map_writable(file->f_mapping);
2925
2926 if (error)
2927 return error;
2928 writable_file_mapping = true;
2929 }
2930
2931 ret = __mmap_region(file, addr, len, vm_flags, pgoff, uf);
2932
2933 /* Clear our write mapping regardless of error. */
2934 if (writable_file_mapping)
2935 mapping_unmap_writable(file->f_mapping);
2936
2937 validate_mm(current->mm);
2938 return ret;
2939 }
2940
__vm_munmap(unsigned long start,size_t len,bool unlock)2941 static int __vm_munmap(unsigned long start, size_t len, bool unlock)
2942 {
2943 int ret;
2944 struct mm_struct *mm = current->mm;
2945 LIST_HEAD(uf);
2946 VMA_ITERATOR(vmi, mm, start);
2947
2948 if (mmap_write_lock_killable(mm))
2949 return -EINTR;
2950
2951 ret = do_vmi_munmap(&vmi, mm, start, len, &uf, unlock);
2952 if (ret || !unlock)
2953 mmap_write_unlock(mm);
2954
2955 userfaultfd_unmap_complete(mm, &uf);
2956 return ret;
2957 }
2958
vm_munmap(unsigned long start,size_t len)2959 int vm_munmap(unsigned long start, size_t len)
2960 {
2961 return __vm_munmap(start, len, false);
2962 }
2963 EXPORT_SYMBOL(vm_munmap);
2964
SYSCALL_DEFINE2(munmap,unsigned long,addr,size_t,len)2965 SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
2966 {
2967 addr = untagged_addr(addr);
2968 return __vm_munmap(addr, len, true);
2969 }
2970
2971
2972 /*
2973 * Emulation of deprecated remap_file_pages() syscall.
2974 */
SYSCALL_DEFINE5(remap_file_pages,unsigned long,start,unsigned long,size,unsigned long,prot,unsigned long,pgoff,unsigned long,flags)2975 SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
2976 unsigned long, prot, unsigned long, pgoff, unsigned long, flags)
2977 {
2978
2979 struct mm_struct *mm = current->mm;
2980 struct vm_area_struct *vma;
2981 unsigned long populate = 0;
2982 unsigned long ret = -EINVAL;
2983 struct file *file;
2984
2985 pr_warn_once("%s (%d) uses deprecated remap_file_pages() syscall. See Documentation/mm/remap_file_pages.rst.\n",
2986 current->comm, current->pid);
2987
2988 if (prot)
2989 return ret;
2990 start = start & PAGE_MASK;
2991 size = size & PAGE_MASK;
2992
2993 if (start + size <= start)
2994 return ret;
2995
2996 /* Does pgoff wrap? */
2997 if (pgoff + (size >> PAGE_SHIFT) < pgoff)
2998 return ret;
2999
3000 if (mmap_write_lock_killable(mm))
3001 return -EINTR;
3002
3003 vma = vma_lookup(mm, start);
3004
3005 if (!vma || !(vma->vm_flags & VM_SHARED))
3006 goto out;
3007
3008 if (start + size > vma->vm_end) {
3009 VMA_ITERATOR(vmi, mm, vma->vm_end);
3010 struct vm_area_struct *next, *prev = vma;
3011
3012 for_each_vma_range(vmi, next, start + size) {
3013 /* hole between vmas ? */
3014 if (next->vm_start != prev->vm_end)
3015 goto out;
3016
3017 if (next->vm_file != vma->vm_file)
3018 goto out;
3019
3020 if (next->vm_flags != vma->vm_flags)
3021 goto out;
3022
3023 if (start + size <= next->vm_end)
3024 break;
3025
3026 prev = next;
3027 }
3028
3029 if (!next)
3030 goto out;
3031 }
3032
3033 prot |= vma->vm_flags & VM_READ ? PROT_READ : 0;
3034 prot |= vma->vm_flags & VM_WRITE ? PROT_WRITE : 0;
3035 prot |= vma->vm_flags & VM_EXEC ? PROT_EXEC : 0;
3036
3037 flags &= MAP_NONBLOCK;
3038 flags |= MAP_SHARED | MAP_FIXED | MAP_POPULATE;
3039 if (vma->vm_flags & VM_LOCKED)
3040 flags |= MAP_LOCKED;
3041
3042 file = get_file(vma->vm_file);
3043 ret = security_mmap_file(vma->vm_file, prot, flags);
3044 if (ret)
3045 goto out_fput;
3046 ret = do_mmap(vma->vm_file, start, size,
3047 prot, flags, 0, pgoff, &populate, NULL);
3048 out_fput:
3049 fput(file);
3050 out:
3051 mmap_write_unlock(mm);
3052 if (populate)
3053 mm_populate(ret, populate);
3054 if (!IS_ERR_VALUE(ret))
3055 ret = 0;
3056 return ret;
3057 }
3058
3059 /*
3060 * do_vma_munmap() - Unmap a full or partial vma.
3061 * @vmi: The vma iterator pointing at the vma
3062 * @vma: The first vma to be munmapped
3063 * @start: the start of the address to unmap
3064 * @end: The end of the address to unmap
3065 * @uf: The userfaultfd list_head
3066 * @unlock: Drop the lock on success
3067 *
3068 * unmaps a VMA mapping when the vma iterator is already in position.
3069 * Does not handle alignment.
3070 *
3071 * Return: 0 on success drops the lock of so directed, error on failure and will
3072 * still hold the lock.
3073 */
do_vma_munmap(struct vma_iterator * vmi,struct vm_area_struct * vma,unsigned long start,unsigned long end,struct list_head * uf,bool unlock)3074 int do_vma_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
3075 unsigned long start, unsigned long end, struct list_head *uf,
3076 bool unlock)
3077 {
3078 struct mm_struct *mm = vma->vm_mm;
3079
3080 arch_unmap(mm, start, end);
3081 return do_vmi_align_munmap(vmi, vma, mm, start, end, uf, unlock);
3082 }
3083
3084 /*
3085 * do_brk_flags() - Increase the brk vma if the flags match.
3086 * @vmi: The vma iterator
3087 * @addr: The start address
3088 * @len: The length of the increase
3089 * @vma: The vma,
3090 * @flags: The VMA Flags
3091 *
3092 * Extend the brk VMA from addr to addr + len. If the VMA is NULL or the flags
3093 * do not match then create a new anonymous VMA. Eventually we may be able to
3094 * do some brk-specific accounting here.
3095 */
do_brk_flags(struct vma_iterator * vmi,struct vm_area_struct * vma,unsigned long addr,unsigned long len,unsigned long flags)3096 static int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *vma,
3097 unsigned long addr, unsigned long len, unsigned long flags)
3098 {
3099 struct mm_struct *mm = current->mm;
3100 struct vma_prepare vp;
3101
3102 /*
3103 * Check against address space limits by the changed size
3104 * Note: This happens *after* clearing old mappings in some code paths.
3105 */
3106 flags |= VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
3107 if (!may_expand_vm(mm, flags, len >> PAGE_SHIFT))
3108 return -ENOMEM;
3109
3110 if (mm->map_count > sysctl_max_map_count)
3111 return -ENOMEM;
3112
3113 if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
3114 return -ENOMEM;
3115
3116 /*
3117 * Expand the existing vma if possible; Note that singular lists do not
3118 * occur after forking, so the expand will only happen on new VMAs.
3119 */
3120 if (vma && vma->vm_end == addr && !vma_policy(vma) &&
3121 can_vma_merge_after(vma, flags, NULL, NULL,
3122 addr >> PAGE_SHIFT, NULL_VM_UFFD_CTX, NULL)) {
3123 vma_iter_config(vmi, vma->vm_start, addr + len);
3124 if (vma_iter_prealloc(vmi, vma))
3125 goto unacct_fail;
3126
3127 vma_start_write(vma);
3128
3129 init_vma_prep(&vp, vma);
3130 vma_prepare(&vp);
3131 vma_adjust_trans_huge(vma, vma->vm_start, addr + len, 0);
3132 vma->vm_end = addr + len;
3133 vm_flags_set(vma, VM_SOFTDIRTY);
3134 vma_iter_store(vmi, vma);
3135
3136 vma_complete(&vp, vmi, mm);
3137 khugepaged_enter_vma(vma, flags);
3138 goto out;
3139 }
3140
3141 if (vma)
3142 vma_iter_next_range(vmi);
3143 /* create a vma struct for an anonymous mapping */
3144 vma = vm_area_alloc(mm);
3145 if (!vma)
3146 goto unacct_fail;
3147
3148 vma_set_anonymous(vma);
3149 vma->vm_start = addr;
3150 vma->vm_end = addr + len;
3151 vma->vm_pgoff = addr >> PAGE_SHIFT;
3152 vm_flags_init(vma, flags);
3153 vma->vm_page_prot = vm_get_page_prot(flags);
3154 vma_start_write(vma);
3155 if (vma_iter_store_gfp(vmi, vma, GFP_KERNEL))
3156 goto mas_store_fail;
3157
3158 mm->map_count++;
3159 validate_mm(mm);
3160 ksm_add_vma(vma);
3161 out:
3162 perf_event_mmap(vma);
3163 mm->total_vm += len >> PAGE_SHIFT;
3164 mm->data_vm += len >> PAGE_SHIFT;
3165 if (flags & VM_LOCKED)
3166 mm->locked_vm += (len >> PAGE_SHIFT);
3167 vm_flags_set(vma, VM_SOFTDIRTY);
3168 return 0;
3169
3170 mas_store_fail:
3171 vm_area_free(vma);
3172 unacct_fail:
3173 vm_unacct_memory(len >> PAGE_SHIFT);
3174 return -ENOMEM;
3175 }
3176
vm_brk_flags(unsigned long addr,unsigned long request,unsigned long flags)3177 int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags)
3178 {
3179 struct mm_struct *mm = current->mm;
3180 struct vm_area_struct *vma = NULL;
3181 unsigned long len;
3182 int ret;
3183 bool populate;
3184 LIST_HEAD(uf);
3185 VMA_ITERATOR(vmi, mm, addr);
3186
3187 len = PAGE_ALIGN(request);
3188 if (len < request)
3189 return -ENOMEM;
3190 if (!len)
3191 return 0;
3192
3193 /* Until we need other flags, refuse anything except VM_EXEC. */
3194 if ((flags & (~VM_EXEC)) != 0)
3195 return -EINVAL;
3196
3197 if (mmap_write_lock_killable(mm))
3198 return -EINTR;
3199
3200 ret = check_brk_limits(addr, len);
3201 if (ret)
3202 goto limits_failed;
3203
3204 ret = do_vmi_munmap(&vmi, mm, addr, len, &uf, 0);
3205 if (ret)
3206 goto munmap_failed;
3207
3208 vma = vma_prev(&vmi);
3209 ret = do_brk_flags(&vmi, vma, addr, len, flags);
3210 populate = ((mm->def_flags & VM_LOCKED) != 0);
3211 mmap_write_unlock(mm);
3212 userfaultfd_unmap_complete(mm, &uf);
3213 if (populate && !ret)
3214 mm_populate(addr, len);
3215 return ret;
3216
3217 munmap_failed:
3218 limits_failed:
3219 mmap_write_unlock(mm);
3220 return ret;
3221 }
3222 EXPORT_SYMBOL(vm_brk_flags);
3223
vm_brk(unsigned long addr,unsigned long len)3224 int vm_brk(unsigned long addr, unsigned long len)
3225 {
3226 return vm_brk_flags(addr, len, 0);
3227 }
3228 EXPORT_SYMBOL(vm_brk);
3229
3230 /* Release all mmaps. */
exit_mmap(struct mm_struct * mm)3231 void exit_mmap(struct mm_struct *mm)
3232 {
3233 struct mmu_gather tlb;
3234 struct vm_area_struct *vma;
3235 unsigned long nr_accounted = 0;
3236 MA_STATE(mas, &mm->mm_mt, 0, 0);
3237 int count = 0;
3238
3239 /* mm's last user has gone, and its about to be pulled down */
3240 mmu_notifier_release(mm);
3241
3242 mmap_read_lock(mm);
3243 arch_exit_mmap(mm);
3244
3245 vma = mas_find(&mas, ULONG_MAX);
3246 if (!vma) {
3247 /* Can happen if dup_mmap() received an OOM */
3248 mmap_read_unlock(mm);
3249 return;
3250 }
3251
3252 lru_add_drain();
3253 flush_cache_mm(mm);
3254 tlb_gather_mmu_fullmm(&tlb, mm);
3255 /* update_hiwater_rss(mm) here? but nobody should be looking */
3256 /* Use ULONG_MAX here to ensure all VMAs in the mm are unmapped */
3257 unmap_vmas(&tlb, &mas, vma, 0, ULONG_MAX, ULONG_MAX, false);
3258 mmap_read_unlock(mm);
3259
3260 /*
3261 * Set MMF_OOM_SKIP to hide this task from the oom killer/reaper
3262 * because the memory has been already freed.
3263 */
3264 set_bit(MMF_OOM_SKIP, &mm->flags);
3265 mmap_write_lock(mm);
3266 mt_clear_in_rcu(&mm->mm_mt);
3267 mas_set(&mas, vma->vm_end);
3268 free_pgtables(&tlb, &mas, vma, FIRST_USER_ADDRESS,
3269 USER_PGTABLES_CEILING, true);
3270 tlb_finish_mmu(&tlb);
3271
3272 /*
3273 * Walk the list again, actually closing and freeing it, with preemption
3274 * enabled, without holding any MM locks besides the unreachable
3275 * mmap_write_lock.
3276 */
3277 mas_set(&mas, vma->vm_end);
3278 do {
3279 if (vma->vm_flags & VM_ACCOUNT)
3280 nr_accounted += vma_pages(vma);
3281 remove_vma(vma, true);
3282 count++;
3283 cond_resched();
3284 } while ((vma = mas_find(&mas, ULONG_MAX)) != NULL);
3285
3286 BUG_ON(count != mm->map_count);
3287
3288 trace_exit_mmap(mm);
3289 __mt_destroy(&mm->mm_mt);
3290 mmap_write_unlock(mm);
3291 vm_unacct_memory(nr_accounted);
3292 }
3293
3294 /* Insert vm structure into process list sorted by address
3295 * and into the inode's i_mmap tree. If vm_file is non-NULL
3296 * then i_mmap_rwsem is taken here.
3297 */
insert_vm_struct(struct mm_struct * mm,struct vm_area_struct * vma)3298 int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
3299 {
3300 unsigned long charged = vma_pages(vma);
3301
3302
3303 if (find_vma_intersection(mm, vma->vm_start, vma->vm_end))
3304 return -ENOMEM;
3305
3306 if ((vma->vm_flags & VM_ACCOUNT) &&
3307 security_vm_enough_memory_mm(mm, charged))
3308 return -ENOMEM;
3309
3310 /*
3311 * The vm_pgoff of a purely anonymous vma should be irrelevant
3312 * until its first write fault, when page's anon_vma and index
3313 * are set. But now set the vm_pgoff it will almost certainly
3314 * end up with (unless mremap moves it elsewhere before that
3315 * first wfault), so /proc/pid/maps tells a consistent story.
3316 *
3317 * By setting it to reflect the virtual start address of the
3318 * vma, merges and splits can happen in a seamless way, just
3319 * using the existing file pgoff checks and manipulations.
3320 * Similarly in do_mmap and in do_brk_flags.
3321 */
3322 if (vma_is_anonymous(vma)) {
3323 BUG_ON(vma->anon_vma);
3324 vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT;
3325 }
3326
3327 if (vma_link(mm, vma)) {
3328 vm_unacct_memory(charged);
3329 return -ENOMEM;
3330 }
3331
3332 return 0;
3333 }
3334
3335 /*
3336 * Copy the vma structure to a new location in the same mm,
3337 * prior to moving page table entries, to effect an mremap move.
3338 */
copy_vma(struct vm_area_struct ** vmap,unsigned long addr,unsigned long len,pgoff_t pgoff,bool * need_rmap_locks)3339 struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
3340 unsigned long addr, unsigned long len, pgoff_t pgoff,
3341 bool *need_rmap_locks)
3342 {
3343 struct vm_area_struct *vma = *vmap;
3344 unsigned long vma_start = vma->vm_start;
3345 struct mm_struct *mm = vma->vm_mm;
3346 struct vm_area_struct *new_vma, *prev;
3347 bool faulted_in_anon_vma = true;
3348 VMA_ITERATOR(vmi, mm, addr);
3349
3350 /*
3351 * If anonymous vma has not yet been faulted, update new pgoff
3352 * to match new location, to increase its chance of merging.
3353 */
3354 if (unlikely(vma_is_anonymous(vma) && !vma->anon_vma)) {
3355 pgoff = addr >> PAGE_SHIFT;
3356 faulted_in_anon_vma = false;
3357 }
3358
3359 new_vma = find_vma_prev(mm, addr, &prev);
3360 if (new_vma && new_vma->vm_start < addr + len)
3361 return NULL; /* should never get here */
3362
3363 new_vma = vma_merge(&vmi, mm, prev, addr, addr + len, vma->vm_flags,
3364 vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
3365 vma->vm_userfaultfd_ctx, anon_vma_name(vma));
3366 if (new_vma) {
3367 /*
3368 * Source vma may have been merged into new_vma
3369 */
3370 if (unlikely(vma_start >= new_vma->vm_start &&
3371 vma_start < new_vma->vm_end)) {
3372 /*
3373 * The only way we can get a vma_merge with
3374 * self during an mremap is if the vma hasn't
3375 * been faulted in yet and we were allowed to
3376 * reset the dst vma->vm_pgoff to the
3377 * destination address of the mremap to allow
3378 * the merge to happen. mremap must change the
3379 * vm_pgoff linearity between src and dst vmas
3380 * (in turn preventing a vma_merge) to be
3381 * safe. It is only safe to keep the vm_pgoff
3382 * linear if there are no pages mapped yet.
3383 */
3384 VM_BUG_ON_VMA(faulted_in_anon_vma, new_vma);
3385 *vmap = vma = new_vma;
3386 }
3387 *need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff);
3388 } else {
3389 new_vma = vm_area_dup(vma);
3390 if (!new_vma)
3391 goto out;
3392 new_vma->vm_start = addr;
3393 new_vma->vm_end = addr + len;
3394 new_vma->vm_pgoff = pgoff;
3395 if (vma_dup_policy(vma, new_vma))
3396 goto out_free_vma;
3397 if (anon_vma_clone(new_vma, vma))
3398 goto out_free_mempol;
3399 if (new_vma->vm_file)
3400 get_file(new_vma->vm_file);
3401 if (new_vma->vm_ops && new_vma->vm_ops->open)
3402 new_vma->vm_ops->open(new_vma);
3403 if (vma_link(mm, new_vma))
3404 goto out_vma_link;
3405 *need_rmap_locks = false;
3406 }
3407 return new_vma;
3408
3409 out_vma_link:
3410 vma_close(new_vma);
3411
3412 if (new_vma->vm_file)
3413 fput(new_vma->vm_file);
3414
3415 unlink_anon_vmas(new_vma);
3416 out_free_mempol:
3417 mpol_put(vma_policy(new_vma));
3418 out_free_vma:
3419 vm_area_free(new_vma);
3420 out:
3421 return NULL;
3422 }
3423
3424 /*
3425 * Return true if the calling process may expand its vm space by the passed
3426 * number of pages
3427 */
may_expand_vm(struct mm_struct * mm,vm_flags_t flags,unsigned long npages)3428 bool may_expand_vm(struct mm_struct *mm, vm_flags_t flags, unsigned long npages)
3429 {
3430 if (mm->total_vm + npages > rlimit(RLIMIT_AS) >> PAGE_SHIFT)
3431 return false;
3432
3433 if (is_data_mapping(flags) &&
3434 mm->data_vm + npages > rlimit(RLIMIT_DATA) >> PAGE_SHIFT) {
3435 /* Workaround for Valgrind */
3436 if (rlimit(RLIMIT_DATA) == 0 &&
3437 mm->data_vm + npages <= rlimit_max(RLIMIT_DATA) >> PAGE_SHIFT)
3438 return true;
3439
3440 pr_warn_once("%s (%d): VmData %lu exceed data ulimit %lu. Update limits%s.\n",
3441 current->comm, current->pid,
3442 (mm->data_vm + npages) << PAGE_SHIFT,
3443 rlimit(RLIMIT_DATA),
3444 ignore_rlimit_data ? "" : " or use boot option ignore_rlimit_data");
3445
3446 if (!ignore_rlimit_data)
3447 return false;
3448 }
3449
3450 return true;
3451 }
3452
vm_stat_account(struct mm_struct * mm,vm_flags_t flags,long npages)3453 void vm_stat_account(struct mm_struct *mm, vm_flags_t flags, long npages)
3454 {
3455 WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm)+npages);
3456
3457 if (is_exec_mapping(flags))
3458 mm->exec_vm += npages;
3459 else if (is_stack_mapping(flags))
3460 mm->stack_vm += npages;
3461 else if (is_data_mapping(flags))
3462 mm->data_vm += npages;
3463 }
3464
3465 static vm_fault_t special_mapping_fault(struct vm_fault *vmf);
3466
3467 /*
3468 * Having a close hook prevents vma merging regardless of flags.
3469 */
special_mapping_close(struct vm_area_struct * vma)3470 static void special_mapping_close(struct vm_area_struct *vma)
3471 {
3472 }
3473
special_mapping_name(struct vm_area_struct * vma)3474 static const char *special_mapping_name(struct vm_area_struct *vma)
3475 {
3476 return ((struct vm_special_mapping *)vma->vm_private_data)->name;
3477 }
3478
special_mapping_mremap(struct vm_area_struct * new_vma)3479 static int special_mapping_mremap(struct vm_area_struct *new_vma)
3480 {
3481 struct vm_special_mapping *sm = new_vma->vm_private_data;
3482
3483 if (WARN_ON_ONCE(current->mm != new_vma->vm_mm))
3484 return -EFAULT;
3485
3486 if (sm->mremap)
3487 return sm->mremap(sm, new_vma);
3488
3489 return 0;
3490 }
3491
special_mapping_split(struct vm_area_struct * vma,unsigned long addr)3492 static int special_mapping_split(struct vm_area_struct *vma, unsigned long addr)
3493 {
3494 /*
3495 * Forbid splitting special mappings - kernel has expectations over
3496 * the number of pages in mapping. Together with VM_DONTEXPAND
3497 * the size of vma should stay the same over the special mapping's
3498 * lifetime.
3499 */
3500 return -EINVAL;
3501 }
3502
3503 static const struct vm_operations_struct special_mapping_vmops = {
3504 .close = special_mapping_close,
3505 .fault = special_mapping_fault,
3506 .mremap = special_mapping_mremap,
3507 .name = special_mapping_name,
3508 /* vDSO code relies that VVAR can't be accessed remotely */
3509 .access = NULL,
3510 .may_split = special_mapping_split,
3511 };
3512
3513 static const struct vm_operations_struct legacy_special_mapping_vmops = {
3514 .close = special_mapping_close,
3515 .fault = special_mapping_fault,
3516 };
3517
special_mapping_fault(struct vm_fault * vmf)3518 static vm_fault_t special_mapping_fault(struct vm_fault *vmf)
3519 {
3520 struct vm_area_struct *vma = vmf->vma;
3521 pgoff_t pgoff;
3522 struct page **pages;
3523
3524 if (vma->vm_ops == &legacy_special_mapping_vmops) {
3525 pages = vma->vm_private_data;
3526 } else {
3527 struct vm_special_mapping *sm = vma->vm_private_data;
3528
3529 if (sm->fault)
3530 return sm->fault(sm, vmf->vma, vmf);
3531
3532 pages = sm->pages;
3533 }
3534
3535 for (pgoff = vmf->pgoff; pgoff && *pages; ++pages)
3536 pgoff--;
3537
3538 if (*pages) {
3539 struct page *page = *pages;
3540 get_page(page);
3541 vmf->page = page;
3542 return 0;
3543 }
3544
3545 return VM_FAULT_SIGBUS;
3546 }
3547
__install_special_mapping(struct mm_struct * mm,unsigned long addr,unsigned long len,unsigned long vm_flags,void * priv,const struct vm_operations_struct * ops)3548 static struct vm_area_struct *__install_special_mapping(
3549 struct mm_struct *mm,
3550 unsigned long addr, unsigned long len,
3551 unsigned long vm_flags, void *priv,
3552 const struct vm_operations_struct *ops)
3553 {
3554 int ret;
3555 struct vm_area_struct *vma;
3556
3557 vma = vm_area_alloc(mm);
3558 if (unlikely(vma == NULL))
3559 return ERR_PTR(-ENOMEM);
3560
3561 vma->vm_start = addr;
3562 vma->vm_end = addr + len;
3563
3564 vm_flags_init(vma, (vm_flags | mm->def_flags |
3565 VM_DONTEXPAND | VM_SOFTDIRTY) & ~VM_LOCKED_MASK);
3566 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
3567
3568 vma->vm_ops = ops;
3569 vma->vm_private_data = priv;
3570
3571 ret = insert_vm_struct(mm, vma);
3572 if (ret)
3573 goto out;
3574
3575 vm_stat_account(mm, vma->vm_flags, len >> PAGE_SHIFT);
3576
3577 perf_event_mmap(vma);
3578
3579 return vma;
3580
3581 out:
3582 vm_area_free(vma);
3583 return ERR_PTR(ret);
3584 }
3585
vma_is_special_mapping(const struct vm_area_struct * vma,const struct vm_special_mapping * sm)3586 bool vma_is_special_mapping(const struct vm_area_struct *vma,
3587 const struct vm_special_mapping *sm)
3588 {
3589 return vma->vm_private_data == sm &&
3590 (vma->vm_ops == &special_mapping_vmops ||
3591 vma->vm_ops == &legacy_special_mapping_vmops);
3592 }
3593
3594 /*
3595 * Called with mm->mmap_lock held for writing.
3596 * Insert a new vma covering the given region, with the given flags.
3597 * Its pages are supplied by the given array of struct page *.
3598 * The array can be shorter than len >> PAGE_SHIFT if it's null-terminated.
3599 * The region past the last page supplied will always produce SIGBUS.
3600 * The array pointer and the pages it points to are assumed to stay alive
3601 * for as long as this mapping might exist.
3602 */
_install_special_mapping(struct mm_struct * mm,unsigned long addr,unsigned long len,unsigned long vm_flags,const struct vm_special_mapping * spec)3603 struct vm_area_struct *_install_special_mapping(
3604 struct mm_struct *mm,
3605 unsigned long addr, unsigned long len,
3606 unsigned long vm_flags, const struct vm_special_mapping *spec)
3607 {
3608 return __install_special_mapping(mm, addr, len, vm_flags, (void *)spec,
3609 &special_mapping_vmops);
3610 }
3611
install_special_mapping(struct mm_struct * mm,unsigned long addr,unsigned long len,unsigned long vm_flags,struct page ** pages)3612 int install_special_mapping(struct mm_struct *mm,
3613 unsigned long addr, unsigned long len,
3614 unsigned long vm_flags, struct page **pages)
3615 {
3616 struct vm_area_struct *vma = __install_special_mapping(
3617 mm, addr, len, vm_flags, (void *)pages,
3618 &legacy_special_mapping_vmops);
3619
3620 return PTR_ERR_OR_ZERO(vma);
3621 }
3622
3623 static DEFINE_MUTEX(mm_all_locks_mutex);
3624
vm_lock_anon_vma(struct mm_struct * mm,struct anon_vma * anon_vma)3625 static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma)
3626 {
3627 if (!test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) {
3628 /*
3629 * The LSB of head.next can't change from under us
3630 * because we hold the mm_all_locks_mutex.
3631 */
3632 down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_lock);
3633 /*
3634 * We can safely modify head.next after taking the
3635 * anon_vma->root->rwsem. If some other vma in this mm shares
3636 * the same anon_vma we won't take it again.
3637 *
3638 * No need of atomic instructions here, head.next
3639 * can't change from under us thanks to the
3640 * anon_vma->root->rwsem.
3641 */
3642 if (__test_and_set_bit(0, (unsigned long *)
3643 &anon_vma->root->rb_root.rb_root.rb_node))
3644 BUG();
3645 }
3646 }
3647
vm_lock_mapping(struct mm_struct * mm,struct address_space * mapping)3648 static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
3649 {
3650 if (!test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
3651 /*
3652 * AS_MM_ALL_LOCKS can't change from under us because
3653 * we hold the mm_all_locks_mutex.
3654 *
3655 * Operations on ->flags have to be atomic because
3656 * even if AS_MM_ALL_LOCKS is stable thanks to the
3657 * mm_all_locks_mutex, there may be other cpus
3658 * changing other bitflags in parallel to us.
3659 */
3660 if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags))
3661 BUG();
3662 down_write_nest_lock(&mapping->i_mmap_rwsem, &mm->mmap_lock);
3663 }
3664 }
3665
3666 /*
3667 * This operation locks against the VM for all pte/vma/mm related
3668 * operations that could ever happen on a certain mm. This includes
3669 * vmtruncate, try_to_unmap, and all page faults.
3670 *
3671 * The caller must take the mmap_lock in write mode before calling
3672 * mm_take_all_locks(). The caller isn't allowed to release the
3673 * mmap_lock until mm_drop_all_locks() returns.
3674 *
3675 * mmap_lock in write mode is required in order to block all operations
3676 * that could modify pagetables and free pages without need of
3677 * altering the vma layout. It's also needed in write mode to avoid new
3678 * anon_vmas to be associated with existing vmas.
3679 *
3680 * A single task can't take more than one mm_take_all_locks() in a row
3681 * or it would deadlock.
3682 *
3683 * The LSB in anon_vma->rb_root.rb_node and the AS_MM_ALL_LOCKS bitflag in
3684 * mapping->flags avoid to take the same lock twice, if more than one
3685 * vma in this mm is backed by the same anon_vma or address_space.
3686 *
3687 * We take locks in following order, accordingly to comment at beginning
3688 * of mm/rmap.c:
3689 * - all hugetlbfs_i_mmap_rwsem_key locks (aka mapping->i_mmap_rwsem for
3690 * hugetlb mapping);
3691 * - all vmas marked locked
3692 * - all i_mmap_rwsem locks;
3693 * - all anon_vma->rwseml
3694 *
3695 * We can take all locks within these types randomly because the VM code
3696 * doesn't nest them and we protected from parallel mm_take_all_locks() by
3697 * mm_all_locks_mutex.
3698 *
3699 * mm_take_all_locks() and mm_drop_all_locks are expensive operations
3700 * that may have to take thousand of locks.
3701 *
3702 * mm_take_all_locks() can fail if it's interrupted by signals.
3703 */
mm_take_all_locks(struct mm_struct * mm)3704 int mm_take_all_locks(struct mm_struct *mm)
3705 {
3706 struct vm_area_struct *vma;
3707 struct anon_vma_chain *avc;
3708 MA_STATE(mas, &mm->mm_mt, 0, 0);
3709
3710 mmap_assert_write_locked(mm);
3711
3712 mutex_lock(&mm_all_locks_mutex);
3713
3714 /*
3715 * vma_start_write() does not have a complement in mm_drop_all_locks()
3716 * because vma_start_write() is always asymmetrical; it marks a VMA as
3717 * being written to until mmap_write_unlock() or mmap_write_downgrade()
3718 * is reached.
3719 */
3720 mas_for_each(&mas, vma, ULONG_MAX) {
3721 if (signal_pending(current))
3722 goto out_unlock;
3723 vma_start_write(vma);
3724 }
3725
3726 mas_set(&mas, 0);
3727 mas_for_each(&mas, vma, ULONG_MAX) {
3728 if (signal_pending(current))
3729 goto out_unlock;
3730 if (vma->vm_file && vma->vm_file->f_mapping &&
3731 is_vm_hugetlb_page(vma))
3732 vm_lock_mapping(mm, vma->vm_file->f_mapping);
3733 }
3734
3735 mas_set(&mas, 0);
3736 mas_for_each(&mas, vma, ULONG_MAX) {
3737 if (signal_pending(current))
3738 goto out_unlock;
3739 if (vma->vm_file && vma->vm_file->f_mapping &&
3740 !is_vm_hugetlb_page(vma))
3741 vm_lock_mapping(mm, vma->vm_file->f_mapping);
3742 }
3743
3744 mas_set(&mas, 0);
3745 mas_for_each(&mas, vma, ULONG_MAX) {
3746 if (signal_pending(current))
3747 goto out_unlock;
3748 if (vma->anon_vma)
3749 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
3750 vm_lock_anon_vma(mm, avc->anon_vma);
3751 }
3752
3753 return 0;
3754
3755 out_unlock:
3756 mm_drop_all_locks(mm);
3757 return -EINTR;
3758 }
3759
vm_unlock_anon_vma(struct anon_vma * anon_vma)3760 static void vm_unlock_anon_vma(struct anon_vma *anon_vma)
3761 {
3762 if (test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) {
3763 /*
3764 * The LSB of head.next can't change to 0 from under
3765 * us because we hold the mm_all_locks_mutex.
3766 *
3767 * We must however clear the bitflag before unlocking
3768 * the vma so the users using the anon_vma->rb_root will
3769 * never see our bitflag.
3770 *
3771 * No need of atomic instructions here, head.next
3772 * can't change from under us until we release the
3773 * anon_vma->root->rwsem.
3774 */
3775 if (!__test_and_clear_bit(0, (unsigned long *)
3776 &anon_vma->root->rb_root.rb_root.rb_node))
3777 BUG();
3778 anon_vma_unlock_write(anon_vma);
3779 }
3780 }
3781
vm_unlock_mapping(struct address_space * mapping)3782 static void vm_unlock_mapping(struct address_space *mapping)
3783 {
3784 if (test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
3785 /*
3786 * AS_MM_ALL_LOCKS can't change to 0 from under us
3787 * because we hold the mm_all_locks_mutex.
3788 */
3789 i_mmap_unlock_write(mapping);
3790 if (!test_and_clear_bit(AS_MM_ALL_LOCKS,
3791 &mapping->flags))
3792 BUG();
3793 }
3794 }
3795
3796 /*
3797 * The mmap_lock cannot be released by the caller until
3798 * mm_drop_all_locks() returns.
3799 */
mm_drop_all_locks(struct mm_struct * mm)3800 void mm_drop_all_locks(struct mm_struct *mm)
3801 {
3802 struct vm_area_struct *vma;
3803 struct anon_vma_chain *avc;
3804 MA_STATE(mas, &mm->mm_mt, 0, 0);
3805
3806 mmap_assert_write_locked(mm);
3807 BUG_ON(!mutex_is_locked(&mm_all_locks_mutex));
3808
3809 mas_for_each(&mas, vma, ULONG_MAX) {
3810 if (vma->anon_vma)
3811 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
3812 vm_unlock_anon_vma(avc->anon_vma);
3813 if (vma->vm_file && vma->vm_file->f_mapping)
3814 vm_unlock_mapping(vma->vm_file->f_mapping);
3815 }
3816
3817 mutex_unlock(&mm_all_locks_mutex);
3818 }
3819
3820 /*
3821 * initialise the percpu counter for VM
3822 */
mmap_init(void)3823 void __init mmap_init(void)
3824 {
3825 int ret;
3826
3827 ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL);
3828 VM_BUG_ON(ret);
3829 }
3830
3831 /*
3832 * Initialise sysctl_user_reserve_kbytes.
3833 *
3834 * This is intended to prevent a user from starting a single memory hogging
3835 * process, such that they cannot recover (kill the hog) in OVERCOMMIT_NEVER
3836 * mode.
3837 *
3838 * The default value is min(3% of free memory, 128MB)
3839 * 128MB is enough to recover with sshd/login, bash, and top/kill.
3840 */
init_user_reserve(void)3841 static int init_user_reserve(void)
3842 {
3843 unsigned long free_kbytes;
3844
3845 free_kbytes = K(global_zone_page_state(NR_FREE_PAGES));
3846
3847 sysctl_user_reserve_kbytes = min(free_kbytes / 32, 1UL << 17);
3848 return 0;
3849 }
3850 subsys_initcall(init_user_reserve);
3851
3852 /*
3853 * Initialise sysctl_admin_reserve_kbytes.
3854 *
3855 * The purpose of sysctl_admin_reserve_kbytes is to allow the sys admin
3856 * to log in and kill a memory hogging process.
3857 *
3858 * Systems with more than 256MB will reserve 8MB, enough to recover
3859 * with sshd, bash, and top in OVERCOMMIT_GUESS. Smaller systems will
3860 * only reserve 3% of free pages by default.
3861 */
init_admin_reserve(void)3862 static int init_admin_reserve(void)
3863 {
3864 unsigned long free_kbytes;
3865
3866 free_kbytes = K(global_zone_page_state(NR_FREE_PAGES));
3867
3868 sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13);
3869 return 0;
3870 }
3871 subsys_initcall(init_admin_reserve);
3872
3873 /*
3874 * Reinititalise user and admin reserves if memory is added or removed.
3875 *
3876 * The default user reserve max is 128MB, and the default max for the
3877 * admin reserve is 8MB. These are usually, but not always, enough to
3878 * enable recovery from a memory hogging process using login/sshd, a shell,
3879 * and tools like top. It may make sense to increase or even disable the
3880 * reserve depending on the existence of swap or variations in the recovery
3881 * tools. So, the admin may have changed them.
3882 *
3883 * If memory is added and the reserves have been eliminated or increased above
3884 * the default max, then we'll trust the admin.
3885 *
3886 * If memory is removed and there isn't enough free memory, then we
3887 * need to reset the reserves.
3888 *
3889 * Otherwise keep the reserve set by the admin.
3890 */
reserve_mem_notifier(struct notifier_block * nb,unsigned long action,void * data)3891 static int reserve_mem_notifier(struct notifier_block *nb,
3892 unsigned long action, void *data)
3893 {
3894 unsigned long tmp, free_kbytes;
3895
3896 switch (action) {
3897 case MEM_ONLINE:
3898 /* Default max is 128MB. Leave alone if modified by operator. */
3899 tmp = sysctl_user_reserve_kbytes;
3900 if (0 < tmp && tmp < (1UL << 17))
3901 init_user_reserve();
3902
3903 /* Default max is 8MB. Leave alone if modified by operator. */
3904 tmp = sysctl_admin_reserve_kbytes;
3905 if (0 < tmp && tmp < (1UL << 13))
3906 init_admin_reserve();
3907
3908 break;
3909 case MEM_OFFLINE:
3910 free_kbytes = K(global_zone_page_state(NR_FREE_PAGES));
3911
3912 if (sysctl_user_reserve_kbytes > free_kbytes) {
3913 init_user_reserve();
3914 pr_info("vm.user_reserve_kbytes reset to %lu\n",
3915 sysctl_user_reserve_kbytes);
3916 }
3917
3918 if (sysctl_admin_reserve_kbytes > free_kbytes) {
3919 init_admin_reserve();
3920 pr_info("vm.admin_reserve_kbytes reset to %lu\n",
3921 sysctl_admin_reserve_kbytes);
3922 }
3923 break;
3924 default:
3925 break;
3926 }
3927 return NOTIFY_OK;
3928 }
3929
init_reserve_notifier(void)3930 static int __meminit init_reserve_notifier(void)
3931 {
3932 if (hotplug_memory_notifier(reserve_mem_notifier, DEFAULT_CALLBACK_PRI))
3933 pr_err("Failed registering memory add/remove notifier for admin reserve\n");
3934
3935 return 0;
3936 }
3937 subsys_initcall(init_reserve_notifier);
3938