xref: /openbmc/linux/mm/mremap.c (revision 06701297)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *	mm/mremap.c
4  *
5  *	(C) Copyright 1996 Linus Torvalds
6  *
7  *	Address space accounting code	<alan@lxorguk.ukuu.org.uk>
8  *	(C) Copyright 2002 Red Hat Inc, All Rights Reserved
9  */
10 
11 #include <linux/mm.h>
12 #include <linux/hugetlb.h>
13 #include <linux/shm.h>
14 #include <linux/ksm.h>
15 #include <linux/mman.h>
16 #include <linux/swap.h>
17 #include <linux/capability.h>
18 #include <linux/fs.h>
19 #include <linux/swapops.h>
20 #include <linux/highmem.h>
21 #include <linux/security.h>
22 #include <linux/syscalls.h>
23 #include <linux/mmu_notifier.h>
24 #include <linux/uaccess.h>
25 #include <linux/mm-arch-hooks.h>
26 #include <linux/userfaultfd_k.h>
27 
28 #include <asm/cacheflush.h>
29 #include <asm/tlbflush.h>
30 
31 #include "internal.h"
32 
33 static pud_t *get_old_pud(struct mm_struct *mm, unsigned long addr)
34 {
35 	pgd_t *pgd;
36 	p4d_t *p4d;
37 	pud_t *pud;
38 
39 	pgd = pgd_offset(mm, addr);
40 	if (pgd_none_or_clear_bad(pgd))
41 		return NULL;
42 
43 	p4d = p4d_offset(pgd, addr);
44 	if (p4d_none_or_clear_bad(p4d))
45 		return NULL;
46 
47 	pud = pud_offset(p4d, addr);
48 	if (pud_none_or_clear_bad(pud))
49 		return NULL;
50 
51 	return pud;
52 }
53 
54 static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr)
55 {
56 	pud_t *pud;
57 	pmd_t *pmd;
58 
59 	pud = get_old_pud(mm, addr);
60 	if (!pud)
61 		return NULL;
62 
63 	pmd = pmd_offset(pud, addr);
64 	if (pmd_none(*pmd))
65 		return NULL;
66 
67 	return pmd;
68 }
69 
70 static pud_t *alloc_new_pud(struct mm_struct *mm, struct vm_area_struct *vma,
71 			    unsigned long addr)
72 {
73 	pgd_t *pgd;
74 	p4d_t *p4d;
75 
76 	pgd = pgd_offset(mm, addr);
77 	p4d = p4d_alloc(mm, pgd, addr);
78 	if (!p4d)
79 		return NULL;
80 
81 	return pud_alloc(mm, p4d, addr);
82 }
83 
84 static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
85 			    unsigned long addr)
86 {
87 	pud_t *pud;
88 	pmd_t *pmd;
89 
90 	pud = alloc_new_pud(mm, vma, addr);
91 	if (!pud)
92 		return NULL;
93 
94 	pmd = pmd_alloc(mm, pud, addr);
95 	if (!pmd)
96 		return NULL;
97 
98 	VM_BUG_ON(pmd_trans_huge(*pmd));
99 
100 	return pmd;
101 }
102 
103 static void take_rmap_locks(struct vm_area_struct *vma)
104 {
105 	if (vma->vm_file)
106 		i_mmap_lock_write(vma->vm_file->f_mapping);
107 	if (vma->anon_vma)
108 		anon_vma_lock_write(vma->anon_vma);
109 }
110 
111 static void drop_rmap_locks(struct vm_area_struct *vma)
112 {
113 	if (vma->anon_vma)
114 		anon_vma_unlock_write(vma->anon_vma);
115 	if (vma->vm_file)
116 		i_mmap_unlock_write(vma->vm_file->f_mapping);
117 }
118 
119 static pte_t move_soft_dirty_pte(pte_t pte)
120 {
121 	/*
122 	 * Set soft dirty bit so we can notice
123 	 * in userspace the ptes were moved.
124 	 */
125 #ifdef CONFIG_MEM_SOFT_DIRTY
126 	if (pte_present(pte))
127 		pte = pte_mksoft_dirty(pte);
128 	else if (is_swap_pte(pte))
129 		pte = pte_swp_mksoft_dirty(pte);
130 #endif
131 	return pte;
132 }
133 
134 static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
135 		unsigned long old_addr, unsigned long old_end,
136 		struct vm_area_struct *new_vma, pmd_t *new_pmd,
137 		unsigned long new_addr, bool need_rmap_locks)
138 {
139 	struct mm_struct *mm = vma->vm_mm;
140 	pte_t *old_pte, *new_pte, pte;
141 	spinlock_t *old_ptl, *new_ptl;
142 	bool force_flush = false;
143 	unsigned long len = old_end - old_addr;
144 
145 	/*
146 	 * When need_rmap_locks is true, we take the i_mmap_rwsem and anon_vma
147 	 * locks to ensure that rmap will always observe either the old or the
148 	 * new ptes. This is the easiest way to avoid races with
149 	 * truncate_pagecache(), page migration, etc...
150 	 *
151 	 * When need_rmap_locks is false, we use other ways to avoid
152 	 * such races:
153 	 *
154 	 * - During exec() shift_arg_pages(), we use a specially tagged vma
155 	 *   which rmap call sites look for using vma_is_temporary_stack().
156 	 *
157 	 * - During mremap(), new_vma is often known to be placed after vma
158 	 *   in rmap traversal order. This ensures rmap will always observe
159 	 *   either the old pte, or the new pte, or both (the page table locks
160 	 *   serialize access to individual ptes, but only rmap traversal
161 	 *   order guarantees that we won't miss both the old and new ptes).
162 	 */
163 	if (need_rmap_locks)
164 		take_rmap_locks(vma);
165 
166 	/*
167 	 * We don't have to worry about the ordering of src and dst
168 	 * pte locks because exclusive mmap_lock prevents deadlock.
169 	 */
170 	old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl);
171 	new_pte = pte_offset_map(new_pmd, new_addr);
172 	new_ptl = pte_lockptr(mm, new_pmd);
173 	if (new_ptl != old_ptl)
174 		spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
175 	flush_tlb_batched_pending(vma->vm_mm);
176 	arch_enter_lazy_mmu_mode();
177 
178 	for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE,
179 				   new_pte++, new_addr += PAGE_SIZE) {
180 		if (pte_none(*old_pte))
181 			continue;
182 
183 		pte = ptep_get_and_clear(mm, old_addr, old_pte);
184 		/*
185 		 * If we are remapping a valid PTE, make sure
186 		 * to flush TLB before we drop the PTL for the
187 		 * PTE.
188 		 *
189 		 * NOTE! Both old and new PTL matter: the old one
190 		 * for racing with page_mkclean(), the new one to
191 		 * make sure the physical page stays valid until
192 		 * the TLB entry for the old mapping has been
193 		 * flushed.
194 		 */
195 		if (pte_present(pte))
196 			force_flush = true;
197 		pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
198 		pte = move_soft_dirty_pte(pte);
199 		set_pte_at(mm, new_addr, new_pte, pte);
200 	}
201 
202 	arch_leave_lazy_mmu_mode();
203 	if (force_flush)
204 		flush_tlb_range(vma, old_end - len, old_end);
205 	if (new_ptl != old_ptl)
206 		spin_unlock(new_ptl);
207 	pte_unmap(new_pte - 1);
208 	pte_unmap_unlock(old_pte - 1, old_ptl);
209 	if (need_rmap_locks)
210 		drop_rmap_locks(vma);
211 }
212 
213 #ifdef CONFIG_HAVE_MOVE_PMD
214 static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr,
215 		  unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd)
216 {
217 	spinlock_t *old_ptl, *new_ptl;
218 	struct mm_struct *mm = vma->vm_mm;
219 	pmd_t pmd;
220 
221 	/*
222 	 * The destination pmd shouldn't be established, free_pgtables()
223 	 * should have released it.
224 	 *
225 	 * However, there's a case during execve() where we use mremap
226 	 * to move the initial stack, and in that case the target area
227 	 * may overlap the source area (always moving down).
228 	 *
229 	 * If everything is PMD-aligned, that works fine, as moving
230 	 * each pmd down will clear the source pmd. But if we first
231 	 * have a few 4kB-only pages that get moved down, and then
232 	 * hit the "now the rest is PMD-aligned, let's do everything
233 	 * one pmd at a time", we will still have the old (now empty
234 	 * of any 4kB pages, but still there) PMD in the page table
235 	 * tree.
236 	 *
237 	 * Warn on it once - because we really should try to figure
238 	 * out how to do this better - but then say "I won't move
239 	 * this pmd".
240 	 *
241 	 * One alternative might be to just unmap the target pmd at
242 	 * this point, and verify that it really is empty. We'll see.
243 	 */
244 	if (WARN_ON_ONCE(!pmd_none(*new_pmd)))
245 		return false;
246 
247 	/*
248 	 * We don't have to worry about the ordering of src and dst
249 	 * ptlocks because exclusive mmap_lock prevents deadlock.
250 	 */
251 	old_ptl = pmd_lock(vma->vm_mm, old_pmd);
252 	new_ptl = pmd_lockptr(mm, new_pmd);
253 	if (new_ptl != old_ptl)
254 		spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
255 
256 	/* Clear the pmd */
257 	pmd = *old_pmd;
258 	pmd_clear(old_pmd);
259 
260 	VM_BUG_ON(!pmd_none(*new_pmd));
261 
262 	/* Set the new pmd */
263 	set_pmd_at(mm, new_addr, new_pmd, pmd);
264 	flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
265 	if (new_ptl != old_ptl)
266 		spin_unlock(new_ptl);
267 	spin_unlock(old_ptl);
268 
269 	return true;
270 }
271 #else
272 static inline bool move_normal_pmd(struct vm_area_struct *vma,
273 		unsigned long old_addr, unsigned long new_addr, pmd_t *old_pmd,
274 		pmd_t *new_pmd)
275 {
276 	return false;
277 }
278 #endif
279 
280 #ifdef CONFIG_HAVE_MOVE_PUD
281 static bool move_normal_pud(struct vm_area_struct *vma, unsigned long old_addr,
282 		  unsigned long new_addr, pud_t *old_pud, pud_t *new_pud)
283 {
284 	spinlock_t *old_ptl, *new_ptl;
285 	struct mm_struct *mm = vma->vm_mm;
286 	pud_t pud;
287 
288 	/*
289 	 * The destination pud shouldn't be established, free_pgtables()
290 	 * should have released it.
291 	 */
292 	if (WARN_ON_ONCE(!pud_none(*new_pud)))
293 		return false;
294 
295 	/*
296 	 * We don't have to worry about the ordering of src and dst
297 	 * ptlocks because exclusive mmap_lock prevents deadlock.
298 	 */
299 	old_ptl = pud_lock(vma->vm_mm, old_pud);
300 	new_ptl = pud_lockptr(mm, new_pud);
301 	if (new_ptl != old_ptl)
302 		spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
303 
304 	/* Clear the pud */
305 	pud = *old_pud;
306 	pud_clear(old_pud);
307 
308 	VM_BUG_ON(!pud_none(*new_pud));
309 
310 	/* Set the new pud */
311 	set_pud_at(mm, new_addr, new_pud, pud);
312 	flush_tlb_range(vma, old_addr, old_addr + PUD_SIZE);
313 	if (new_ptl != old_ptl)
314 		spin_unlock(new_ptl);
315 	spin_unlock(old_ptl);
316 
317 	return true;
318 }
319 #else
320 static inline bool move_normal_pud(struct vm_area_struct *vma,
321 		unsigned long old_addr, unsigned long new_addr, pud_t *old_pud,
322 		pud_t *new_pud)
323 {
324 	return false;
325 }
326 #endif
327 
328 enum pgt_entry {
329 	NORMAL_PMD,
330 	HPAGE_PMD,
331 	NORMAL_PUD,
332 };
333 
334 /*
335  * Returns an extent of the corresponding size for the pgt_entry specified if
336  * valid. Else returns a smaller extent bounded by the end of the source and
337  * destination pgt_entry.
338  */
339 static __always_inline unsigned long get_extent(enum pgt_entry entry,
340 			unsigned long old_addr, unsigned long old_end,
341 			unsigned long new_addr)
342 {
343 	unsigned long next, extent, mask, size;
344 
345 	switch (entry) {
346 	case HPAGE_PMD:
347 	case NORMAL_PMD:
348 		mask = PMD_MASK;
349 		size = PMD_SIZE;
350 		break;
351 	case NORMAL_PUD:
352 		mask = PUD_MASK;
353 		size = PUD_SIZE;
354 		break;
355 	default:
356 		BUILD_BUG();
357 		break;
358 	}
359 
360 	next = (old_addr + size) & mask;
361 	/* even if next overflowed, extent below will be ok */
362 	extent = next - old_addr;
363 	if (extent > old_end - old_addr)
364 		extent = old_end - old_addr;
365 	next = (new_addr + size) & mask;
366 	if (extent > next - new_addr)
367 		extent = next - new_addr;
368 	return extent;
369 }
370 
371 /*
372  * Attempts to speedup the move by moving entry at the level corresponding to
373  * pgt_entry. Returns true if the move was successful, else false.
374  */
375 static bool move_pgt_entry(enum pgt_entry entry, struct vm_area_struct *vma,
376 			unsigned long old_addr, unsigned long new_addr,
377 			void *old_entry, void *new_entry, bool need_rmap_locks)
378 {
379 	bool moved = false;
380 
381 	/* See comment in move_ptes() */
382 	if (need_rmap_locks)
383 		take_rmap_locks(vma);
384 
385 	switch (entry) {
386 	case NORMAL_PMD:
387 		moved = move_normal_pmd(vma, old_addr, new_addr, old_entry,
388 					new_entry);
389 		break;
390 	case NORMAL_PUD:
391 		moved = move_normal_pud(vma, old_addr, new_addr, old_entry,
392 					new_entry);
393 		break;
394 	case HPAGE_PMD:
395 		moved = IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
396 			move_huge_pmd(vma, old_addr, new_addr, old_entry,
397 				      new_entry);
398 		break;
399 	default:
400 		WARN_ON_ONCE(1);
401 		break;
402 	}
403 
404 	if (need_rmap_locks)
405 		drop_rmap_locks(vma);
406 
407 	return moved;
408 }
409 
410 unsigned long move_page_tables(struct vm_area_struct *vma,
411 		unsigned long old_addr, struct vm_area_struct *new_vma,
412 		unsigned long new_addr, unsigned long len,
413 		bool need_rmap_locks)
414 {
415 	unsigned long extent, old_end;
416 	struct mmu_notifier_range range;
417 	pmd_t *old_pmd, *new_pmd;
418 
419 	old_end = old_addr + len;
420 	flush_cache_range(vma, old_addr, old_end);
421 
422 	mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm,
423 				old_addr, old_end);
424 	mmu_notifier_invalidate_range_start(&range);
425 
426 	for (; old_addr < old_end; old_addr += extent, new_addr += extent) {
427 		cond_resched();
428 		/*
429 		 * If extent is PUD-sized try to speed up the move by moving at the
430 		 * PUD level if possible.
431 		 */
432 		extent = get_extent(NORMAL_PUD, old_addr, old_end, new_addr);
433 		if (IS_ENABLED(CONFIG_HAVE_MOVE_PUD) && extent == PUD_SIZE) {
434 			pud_t *old_pud, *new_pud;
435 
436 			old_pud = get_old_pud(vma->vm_mm, old_addr);
437 			if (!old_pud)
438 				continue;
439 			new_pud = alloc_new_pud(vma->vm_mm, vma, new_addr);
440 			if (!new_pud)
441 				break;
442 			if (move_pgt_entry(NORMAL_PUD, vma, old_addr, new_addr,
443 					   old_pud, new_pud, need_rmap_locks))
444 				continue;
445 		}
446 
447 		extent = get_extent(NORMAL_PMD, old_addr, old_end, new_addr);
448 		old_pmd = get_old_pmd(vma->vm_mm, old_addr);
449 		if (!old_pmd)
450 			continue;
451 		new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr);
452 		if (!new_pmd)
453 			break;
454 		if (is_swap_pmd(*old_pmd) || pmd_trans_huge(*old_pmd) ||
455 		    pmd_devmap(*old_pmd)) {
456 			if (extent == HPAGE_PMD_SIZE &&
457 			    move_pgt_entry(HPAGE_PMD, vma, old_addr, new_addr,
458 					   old_pmd, new_pmd, need_rmap_locks))
459 				continue;
460 			split_huge_pmd(vma, old_pmd, old_addr);
461 			if (pmd_trans_unstable(old_pmd))
462 				continue;
463 		} else if (IS_ENABLED(CONFIG_HAVE_MOVE_PMD) &&
464 			   extent == PMD_SIZE) {
465 			/*
466 			 * If the extent is PMD-sized, try to speed the move by
467 			 * moving at the PMD level if possible.
468 			 */
469 			if (move_pgt_entry(NORMAL_PMD, vma, old_addr, new_addr,
470 					   old_pmd, new_pmd, need_rmap_locks))
471 				continue;
472 		}
473 
474 		if (pte_alloc(new_vma->vm_mm, new_pmd))
475 			break;
476 		move_ptes(vma, old_pmd, old_addr, old_addr + extent, new_vma,
477 			  new_pmd, new_addr, need_rmap_locks);
478 	}
479 
480 	mmu_notifier_invalidate_range_end(&range);
481 
482 	return len + old_addr - old_end;	/* how much done */
483 }
484 
485 static unsigned long move_vma(struct vm_area_struct *vma,
486 		unsigned long old_addr, unsigned long old_len,
487 		unsigned long new_len, unsigned long new_addr,
488 		bool *locked, unsigned long flags,
489 		struct vm_userfaultfd_ctx *uf, struct list_head *uf_unmap)
490 {
491 	struct mm_struct *mm = vma->vm_mm;
492 	struct vm_area_struct *new_vma;
493 	unsigned long vm_flags = vma->vm_flags;
494 	unsigned long new_pgoff;
495 	unsigned long moved_len;
496 	unsigned long excess = 0;
497 	unsigned long hiwater_vm;
498 	int split = 0;
499 	int err = 0;
500 	bool need_rmap_locks;
501 
502 	/*
503 	 * We'd prefer to avoid failure later on in do_munmap:
504 	 * which may split one vma into three before unmapping.
505 	 */
506 	if (mm->map_count >= sysctl_max_map_count - 3)
507 		return -ENOMEM;
508 
509 	if (vma->vm_ops && vma->vm_ops->may_split) {
510 		if (vma->vm_start != old_addr)
511 			err = vma->vm_ops->may_split(vma, old_addr);
512 		if (!err && vma->vm_end != old_addr + old_len)
513 			err = vma->vm_ops->may_split(vma, old_addr + old_len);
514 		if (err)
515 			return err;
516 	}
517 
518 	/*
519 	 * Advise KSM to break any KSM pages in the area to be moved:
520 	 * it would be confusing if they were to turn up at the new
521 	 * location, where they happen to coincide with different KSM
522 	 * pages recently unmapped.  But leave vma->vm_flags as it was,
523 	 * so KSM can come around to merge on vma and new_vma afterwards.
524 	 */
525 	err = ksm_madvise(vma, old_addr, old_addr + old_len,
526 						MADV_UNMERGEABLE, &vm_flags);
527 	if (err)
528 		return err;
529 
530 	if (unlikely(flags & MREMAP_DONTUNMAP && vm_flags & VM_ACCOUNT)) {
531 		if (security_vm_enough_memory_mm(mm, new_len >> PAGE_SHIFT))
532 			return -ENOMEM;
533 	}
534 
535 	new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT);
536 	new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff,
537 			   &need_rmap_locks);
538 	if (!new_vma) {
539 		if (unlikely(flags & MREMAP_DONTUNMAP && vm_flags & VM_ACCOUNT))
540 			vm_unacct_memory(new_len >> PAGE_SHIFT);
541 		return -ENOMEM;
542 	}
543 
544 	moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len,
545 				     need_rmap_locks);
546 	if (moved_len < old_len) {
547 		err = -ENOMEM;
548 	} else if (vma->vm_ops && vma->vm_ops->mremap) {
549 		err = vma->vm_ops->mremap(new_vma, flags);
550 	}
551 
552 	if (unlikely(err)) {
553 		/*
554 		 * On error, move entries back from new area to old,
555 		 * which will succeed since page tables still there,
556 		 * and then proceed to unmap new area instead of old.
557 		 */
558 		move_page_tables(new_vma, new_addr, vma, old_addr, moved_len,
559 				 true);
560 		vma = new_vma;
561 		old_len = new_len;
562 		old_addr = new_addr;
563 		new_addr = err;
564 	} else {
565 		mremap_userfaultfd_prep(new_vma, uf);
566 		arch_remap(mm, old_addr, old_addr + old_len,
567 			   new_addr, new_addr + new_len);
568 	}
569 
570 	/* Conceal VM_ACCOUNT so old reservation is not undone */
571 	if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP)) {
572 		vma->vm_flags &= ~VM_ACCOUNT;
573 		excess = vma->vm_end - vma->vm_start - old_len;
574 		if (old_addr > vma->vm_start &&
575 		    old_addr + old_len < vma->vm_end)
576 			split = 1;
577 	}
578 
579 	/*
580 	 * If we failed to move page tables we still do total_vm increment
581 	 * since do_munmap() will decrement it by old_len == new_len.
582 	 *
583 	 * Since total_vm is about to be raised artificially high for a
584 	 * moment, we need to restore high watermark afterwards: if stats
585 	 * are taken meanwhile, total_vm and hiwater_vm appear too high.
586 	 * If this were a serious issue, we'd add a flag to do_munmap().
587 	 */
588 	hiwater_vm = mm->hiwater_vm;
589 	vm_stat_account(mm, vma->vm_flags, new_len >> PAGE_SHIFT);
590 
591 	/* Tell pfnmap has moved from this vma */
592 	if (unlikely(vma->vm_flags & VM_PFNMAP))
593 		untrack_pfn_moved(vma);
594 
595 	if (unlikely(!err && (flags & MREMAP_DONTUNMAP))) {
596 		/* We always clear VM_LOCKED[ONFAULT] on the old vma */
597 		vma->vm_flags &= VM_LOCKED_CLEAR_MASK;
598 
599 		/* Because we won't unmap we don't need to touch locked_vm */
600 		return new_addr;
601 	}
602 
603 	if (do_munmap(mm, old_addr, old_len, uf_unmap) < 0) {
604 		/* OOM: unable to split vma, just get accounts right */
605 		if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP))
606 			vm_acct_memory(new_len >> PAGE_SHIFT);
607 		excess = 0;
608 	}
609 
610 	if (vm_flags & VM_LOCKED) {
611 		mm->locked_vm += new_len >> PAGE_SHIFT;
612 		*locked = true;
613 	}
614 
615 	mm->hiwater_vm = hiwater_vm;
616 
617 	/* Restore VM_ACCOUNT if one or two pieces of vma left */
618 	if (excess) {
619 		vma->vm_flags |= VM_ACCOUNT;
620 		if (split)
621 			vma->vm_next->vm_flags |= VM_ACCOUNT;
622 	}
623 
624 	return new_addr;
625 }
626 
627 static struct vm_area_struct *vma_to_resize(unsigned long addr,
628 	unsigned long old_len, unsigned long new_len, unsigned long flags,
629 	unsigned long *p)
630 {
631 	struct mm_struct *mm = current->mm;
632 	struct vm_area_struct *vma = find_vma(mm, addr);
633 	unsigned long pgoff;
634 
635 	if (!vma || vma->vm_start > addr)
636 		return ERR_PTR(-EFAULT);
637 
638 	/*
639 	 * !old_len is a special case where an attempt is made to 'duplicate'
640 	 * a mapping.  This makes no sense for private mappings as it will
641 	 * instead create a fresh/new mapping unrelated to the original.  This
642 	 * is contrary to the basic idea of mremap which creates new mappings
643 	 * based on the original.  There are no known use cases for this
644 	 * behavior.  As a result, fail such attempts.
645 	 */
646 	if (!old_len && !(vma->vm_flags & (VM_SHARED | VM_MAYSHARE))) {
647 		pr_warn_once("%s (%d): attempted to duplicate a private mapping with mremap.  This is not supported.\n", current->comm, current->pid);
648 		return ERR_PTR(-EINVAL);
649 	}
650 
651 	if (flags & MREMAP_DONTUNMAP && (!vma_is_anonymous(vma) ||
652 			vma->vm_flags & VM_SHARED))
653 		return ERR_PTR(-EINVAL);
654 
655 	if (is_vm_hugetlb_page(vma))
656 		return ERR_PTR(-EINVAL);
657 
658 	/* We can't remap across vm area boundaries */
659 	if (old_len > vma->vm_end - addr)
660 		return ERR_PTR(-EFAULT);
661 
662 	if (new_len == old_len)
663 		return vma;
664 
665 	/* Need to be careful about a growing mapping */
666 	pgoff = (addr - vma->vm_start) >> PAGE_SHIFT;
667 	pgoff += vma->vm_pgoff;
668 	if (pgoff + (new_len >> PAGE_SHIFT) < pgoff)
669 		return ERR_PTR(-EINVAL);
670 
671 	if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP))
672 		return ERR_PTR(-EFAULT);
673 
674 	if (vma->vm_flags & VM_LOCKED) {
675 		unsigned long locked, lock_limit;
676 		locked = mm->locked_vm << PAGE_SHIFT;
677 		lock_limit = rlimit(RLIMIT_MEMLOCK);
678 		locked += new_len - old_len;
679 		if (locked > lock_limit && !capable(CAP_IPC_LOCK))
680 			return ERR_PTR(-EAGAIN);
681 	}
682 
683 	if (!may_expand_vm(mm, vma->vm_flags,
684 				(new_len - old_len) >> PAGE_SHIFT))
685 		return ERR_PTR(-ENOMEM);
686 
687 	if (vma->vm_flags & VM_ACCOUNT) {
688 		unsigned long charged = (new_len - old_len) >> PAGE_SHIFT;
689 		if (security_vm_enough_memory_mm(mm, charged))
690 			return ERR_PTR(-ENOMEM);
691 		*p = charged;
692 	}
693 
694 	return vma;
695 }
696 
697 static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
698 		unsigned long new_addr, unsigned long new_len, bool *locked,
699 		unsigned long flags, struct vm_userfaultfd_ctx *uf,
700 		struct list_head *uf_unmap_early,
701 		struct list_head *uf_unmap)
702 {
703 	struct mm_struct *mm = current->mm;
704 	struct vm_area_struct *vma;
705 	unsigned long ret = -EINVAL;
706 	unsigned long charged = 0;
707 	unsigned long map_flags = 0;
708 
709 	if (offset_in_page(new_addr))
710 		goto out;
711 
712 	if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
713 		goto out;
714 
715 	/* Ensure the old/new locations do not overlap */
716 	if (addr + old_len > new_addr && new_addr + new_len > addr)
717 		goto out;
718 
719 	/*
720 	 * move_vma() need us to stay 4 maps below the threshold, otherwise
721 	 * it will bail out at the very beginning.
722 	 * That is a problem if we have already unmaped the regions here
723 	 * (new_addr, and old_addr), because userspace will not know the
724 	 * state of the vma's after it gets -ENOMEM.
725 	 * So, to avoid such scenario we can pre-compute if the whole
726 	 * operation has high chances to success map-wise.
727 	 * Worst-scenario case is when both vma's (new_addr and old_addr) get
728 	 * split in 3 before unmaping it.
729 	 * That means 2 more maps (1 for each) to the ones we already hold.
730 	 * Check whether current map count plus 2 still leads us to 4 maps below
731 	 * the threshold, otherwise return -ENOMEM here to be more safe.
732 	 */
733 	if ((mm->map_count + 2) >= sysctl_max_map_count - 3)
734 		return -ENOMEM;
735 
736 	if (flags & MREMAP_FIXED) {
737 		ret = do_munmap(mm, new_addr, new_len, uf_unmap_early);
738 		if (ret)
739 			goto out;
740 	}
741 
742 	if (old_len >= new_len) {
743 		ret = do_munmap(mm, addr+new_len, old_len - new_len, uf_unmap);
744 		if (ret && old_len != new_len)
745 			goto out;
746 		old_len = new_len;
747 	}
748 
749 	vma = vma_to_resize(addr, old_len, new_len, flags, &charged);
750 	if (IS_ERR(vma)) {
751 		ret = PTR_ERR(vma);
752 		goto out;
753 	}
754 
755 	/* MREMAP_DONTUNMAP expands by old_len since old_len == new_len */
756 	if (flags & MREMAP_DONTUNMAP &&
757 		!may_expand_vm(mm, vma->vm_flags, old_len >> PAGE_SHIFT)) {
758 		ret = -ENOMEM;
759 		goto out;
760 	}
761 
762 	if (flags & MREMAP_FIXED)
763 		map_flags |= MAP_FIXED;
764 
765 	if (vma->vm_flags & VM_MAYSHARE)
766 		map_flags |= MAP_SHARED;
767 
768 	ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff +
769 				((addr - vma->vm_start) >> PAGE_SHIFT),
770 				map_flags);
771 	if (IS_ERR_VALUE(ret))
772 		goto out1;
773 
774 	/* We got a new mapping */
775 	if (!(flags & MREMAP_FIXED))
776 		new_addr = ret;
777 
778 	ret = move_vma(vma, addr, old_len, new_len, new_addr, locked, flags, uf,
779 		       uf_unmap);
780 
781 	if (!(offset_in_page(ret)))
782 		goto out;
783 
784 out1:
785 	vm_unacct_memory(charged);
786 
787 out:
788 	return ret;
789 }
790 
791 static int vma_expandable(struct vm_area_struct *vma, unsigned long delta)
792 {
793 	unsigned long end = vma->vm_end + delta;
794 	if (end < vma->vm_end) /* overflow */
795 		return 0;
796 	if (vma->vm_next && vma->vm_next->vm_start < end) /* intersection */
797 		return 0;
798 	if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start,
799 			      0, MAP_FIXED) & ~PAGE_MASK)
800 		return 0;
801 	return 1;
802 }
803 
804 /*
805  * Expand (or shrink) an existing mapping, potentially moving it at the
806  * same time (controlled by the MREMAP_MAYMOVE flag and available VM space)
807  *
808  * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise
809  * This option implies MREMAP_MAYMOVE.
810  */
811 SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
812 		unsigned long, new_len, unsigned long, flags,
813 		unsigned long, new_addr)
814 {
815 	struct mm_struct *mm = current->mm;
816 	struct vm_area_struct *vma;
817 	unsigned long ret = -EINVAL;
818 	unsigned long charged = 0;
819 	bool locked = false;
820 	bool downgraded = false;
821 	struct vm_userfaultfd_ctx uf = NULL_VM_UFFD_CTX;
822 	LIST_HEAD(uf_unmap_early);
823 	LIST_HEAD(uf_unmap);
824 
825 	/*
826 	 * There is a deliberate asymmetry here: we strip the pointer tag
827 	 * from the old address but leave the new address alone. This is
828 	 * for consistency with mmap(), where we prevent the creation of
829 	 * aliasing mappings in userspace by leaving the tag bits of the
830 	 * mapping address intact. A non-zero tag will cause the subsequent
831 	 * range checks to reject the address as invalid.
832 	 *
833 	 * See Documentation/arm64/tagged-address-abi.rst for more information.
834 	 */
835 	addr = untagged_addr(addr);
836 
837 	if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE | MREMAP_DONTUNMAP))
838 		return ret;
839 
840 	if (flags & MREMAP_FIXED && !(flags & MREMAP_MAYMOVE))
841 		return ret;
842 
843 	/*
844 	 * MREMAP_DONTUNMAP is always a move and it does not allow resizing
845 	 * in the process.
846 	 */
847 	if (flags & MREMAP_DONTUNMAP &&
848 			(!(flags & MREMAP_MAYMOVE) || old_len != new_len))
849 		return ret;
850 
851 
852 	if (offset_in_page(addr))
853 		return ret;
854 
855 	old_len = PAGE_ALIGN(old_len);
856 	new_len = PAGE_ALIGN(new_len);
857 
858 	/*
859 	 * We allow a zero old-len as a special case
860 	 * for DOS-emu "duplicate shm area" thing. But
861 	 * a zero new-len is nonsensical.
862 	 */
863 	if (!new_len)
864 		return ret;
865 
866 	if (mmap_write_lock_killable(current->mm))
867 		return -EINTR;
868 
869 	if (flags & (MREMAP_FIXED | MREMAP_DONTUNMAP)) {
870 		ret = mremap_to(addr, old_len, new_addr, new_len,
871 				&locked, flags, &uf, &uf_unmap_early,
872 				&uf_unmap);
873 		goto out;
874 	}
875 
876 	/*
877 	 * Always allow a shrinking remap: that just unmaps
878 	 * the unnecessary pages..
879 	 * __do_munmap does all the needed commit accounting, and
880 	 * downgrades mmap_lock to read if so directed.
881 	 */
882 	if (old_len >= new_len) {
883 		int retval;
884 
885 		retval = __do_munmap(mm, addr+new_len, old_len - new_len,
886 				  &uf_unmap, true);
887 		if (retval < 0 && old_len != new_len) {
888 			ret = retval;
889 			goto out;
890 		/* Returning 1 indicates mmap_lock is downgraded to read. */
891 		} else if (retval == 1)
892 			downgraded = true;
893 		ret = addr;
894 		goto out;
895 	}
896 
897 	/*
898 	 * Ok, we need to grow..
899 	 */
900 	vma = vma_to_resize(addr, old_len, new_len, flags, &charged);
901 	if (IS_ERR(vma)) {
902 		ret = PTR_ERR(vma);
903 		goto out;
904 	}
905 
906 	/* old_len exactly to the end of the area..
907 	 */
908 	if (old_len == vma->vm_end - addr) {
909 		/* can we just expand the current mapping? */
910 		if (vma_expandable(vma, new_len - old_len)) {
911 			int pages = (new_len - old_len) >> PAGE_SHIFT;
912 
913 			if (vma_adjust(vma, vma->vm_start, addr + new_len,
914 				       vma->vm_pgoff, NULL)) {
915 				ret = -ENOMEM;
916 				goto out;
917 			}
918 
919 			vm_stat_account(mm, vma->vm_flags, pages);
920 			if (vma->vm_flags & VM_LOCKED) {
921 				mm->locked_vm += pages;
922 				locked = true;
923 				new_addr = addr;
924 			}
925 			ret = addr;
926 			goto out;
927 		}
928 	}
929 
930 	/*
931 	 * We weren't able to just expand or shrink the area,
932 	 * we need to create a new one and move it..
933 	 */
934 	ret = -ENOMEM;
935 	if (flags & MREMAP_MAYMOVE) {
936 		unsigned long map_flags = 0;
937 		if (vma->vm_flags & VM_MAYSHARE)
938 			map_flags |= MAP_SHARED;
939 
940 		new_addr = get_unmapped_area(vma->vm_file, 0, new_len,
941 					vma->vm_pgoff +
942 					((addr - vma->vm_start) >> PAGE_SHIFT),
943 					map_flags);
944 		if (IS_ERR_VALUE(new_addr)) {
945 			ret = new_addr;
946 			goto out;
947 		}
948 
949 		ret = move_vma(vma, addr, old_len, new_len, new_addr,
950 			       &locked, flags, &uf, &uf_unmap);
951 	}
952 out:
953 	if (offset_in_page(ret)) {
954 		vm_unacct_memory(charged);
955 		locked = false;
956 	}
957 	if (downgraded)
958 		mmap_read_unlock(current->mm);
959 	else
960 		mmap_write_unlock(current->mm);
961 	if (locked && new_len > old_len)
962 		mm_populate(new_addr + old_len, new_len - old_len);
963 	userfaultfd_unmap_complete(mm, &uf_unmap_early);
964 	mremap_userfaultfd_complete(&uf, addr, ret, old_len);
965 	userfaultfd_unmap_complete(mm, &uf_unmap);
966 	return ret;
967 }
968