xref: /openbmc/linux/mm/mremap.c (revision 7bd9af25)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *	mm/mremap.c
4  *
5  *	(C) Copyright 1996 Linus Torvalds
6  *
7  *	Address space accounting code	<alan@lxorguk.ukuu.org.uk>
8  *	(C) Copyright 2002 Red Hat Inc, All Rights Reserved
9  */
10 
11 #include <linux/mm.h>
12 #include <linux/mm_inline.h>
13 #include <linux/hugetlb.h>
14 #include <linux/shm.h>
15 #include <linux/ksm.h>
16 #include <linux/mman.h>
17 #include <linux/swap.h>
18 #include <linux/capability.h>
19 #include <linux/fs.h>
20 #include <linux/swapops.h>
21 #include <linux/highmem.h>
22 #include <linux/security.h>
23 #include <linux/syscalls.h>
24 #include <linux/mmu_notifier.h>
25 #include <linux/uaccess.h>
26 #include <linux/userfaultfd_k.h>
27 #include <linux/mempolicy.h>
28 
29 #include <asm/cacheflush.h>
30 #include <asm/tlb.h>
31 #include <asm/pgalloc.h>
32 
33 #include "internal.h"
34 
35 static pud_t *get_old_pud(struct mm_struct *mm, unsigned long addr)
36 {
37 	pgd_t *pgd;
38 	p4d_t *p4d;
39 	pud_t *pud;
40 
41 	pgd = pgd_offset(mm, addr);
42 	if (pgd_none_or_clear_bad(pgd))
43 		return NULL;
44 
45 	p4d = p4d_offset(pgd, addr);
46 	if (p4d_none_or_clear_bad(p4d))
47 		return NULL;
48 
49 	pud = pud_offset(p4d, addr);
50 	if (pud_none_or_clear_bad(pud))
51 		return NULL;
52 
53 	return pud;
54 }
55 
56 static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr)
57 {
58 	pud_t *pud;
59 	pmd_t *pmd;
60 
61 	pud = get_old_pud(mm, addr);
62 	if (!pud)
63 		return NULL;
64 
65 	pmd = pmd_offset(pud, addr);
66 	if (pmd_none(*pmd))
67 		return NULL;
68 
69 	return pmd;
70 }
71 
72 static pud_t *alloc_new_pud(struct mm_struct *mm, struct vm_area_struct *vma,
73 			    unsigned long addr)
74 {
75 	pgd_t *pgd;
76 	p4d_t *p4d;
77 
78 	pgd = pgd_offset(mm, addr);
79 	p4d = p4d_alloc(mm, pgd, addr);
80 	if (!p4d)
81 		return NULL;
82 
83 	return pud_alloc(mm, p4d, addr);
84 }
85 
86 static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
87 			    unsigned long addr)
88 {
89 	pud_t *pud;
90 	pmd_t *pmd;
91 
92 	pud = alloc_new_pud(mm, vma, addr);
93 	if (!pud)
94 		return NULL;
95 
96 	pmd = pmd_alloc(mm, pud, addr);
97 	if (!pmd)
98 		return NULL;
99 
100 	VM_BUG_ON(pmd_trans_huge(*pmd));
101 
102 	return pmd;
103 }
104 
105 static void take_rmap_locks(struct vm_area_struct *vma)
106 {
107 	if (vma->vm_file)
108 		i_mmap_lock_write(vma->vm_file->f_mapping);
109 	if (vma->anon_vma)
110 		anon_vma_lock_write(vma->anon_vma);
111 }
112 
113 static void drop_rmap_locks(struct vm_area_struct *vma)
114 {
115 	if (vma->anon_vma)
116 		anon_vma_unlock_write(vma->anon_vma);
117 	if (vma->vm_file)
118 		i_mmap_unlock_write(vma->vm_file->f_mapping);
119 }
120 
121 static pte_t move_soft_dirty_pte(pte_t pte)
122 {
123 	/*
124 	 * Set soft dirty bit so we can notice
125 	 * in userspace the ptes were moved.
126 	 */
127 #ifdef CONFIG_MEM_SOFT_DIRTY
128 	if (pte_present(pte))
129 		pte = pte_mksoft_dirty(pte);
130 	else if (is_swap_pte(pte))
131 		pte = pte_swp_mksoft_dirty(pte);
132 #endif
133 	return pte;
134 }
135 
136 static int move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
137 		unsigned long old_addr, unsigned long old_end,
138 		struct vm_area_struct *new_vma, pmd_t *new_pmd,
139 		unsigned long new_addr, bool need_rmap_locks)
140 {
141 	struct mm_struct *mm = vma->vm_mm;
142 	pte_t *old_pte, *new_pte, pte;
143 	spinlock_t *old_ptl, *new_ptl;
144 	bool force_flush = false;
145 	unsigned long len = old_end - old_addr;
146 	int err = 0;
147 
148 	/*
149 	 * When need_rmap_locks is true, we take the i_mmap_rwsem and anon_vma
150 	 * locks to ensure that rmap will always observe either the old or the
151 	 * new ptes. This is the easiest way to avoid races with
152 	 * truncate_pagecache(), page migration, etc...
153 	 *
154 	 * When need_rmap_locks is false, we use other ways to avoid
155 	 * such races:
156 	 *
157 	 * - During exec() shift_arg_pages(), we use a specially tagged vma
158 	 *   which rmap call sites look for using vma_is_temporary_stack().
159 	 *
160 	 * - During mremap(), new_vma is often known to be placed after vma
161 	 *   in rmap traversal order. This ensures rmap will always observe
162 	 *   either the old pte, or the new pte, or both (the page table locks
163 	 *   serialize access to individual ptes, but only rmap traversal
164 	 *   order guarantees that we won't miss both the old and new ptes).
165 	 */
166 	if (need_rmap_locks)
167 		take_rmap_locks(vma);
168 
169 	/*
170 	 * We don't have to worry about the ordering of src and dst
171 	 * pte locks because exclusive mmap_lock prevents deadlock.
172 	 */
173 	old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl);
174 	if (!old_pte) {
175 		err = -EAGAIN;
176 		goto out;
177 	}
178 	new_pte = pte_offset_map_nolock(mm, new_pmd, new_addr, &new_ptl);
179 	if (!new_pte) {
180 		pte_unmap_unlock(old_pte, old_ptl);
181 		err = -EAGAIN;
182 		goto out;
183 	}
184 	if (new_ptl != old_ptl)
185 		spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
186 	flush_tlb_batched_pending(vma->vm_mm);
187 	arch_enter_lazy_mmu_mode();
188 
189 	for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE,
190 				   new_pte++, new_addr += PAGE_SIZE) {
191 		if (pte_none(ptep_get(old_pte)))
192 			continue;
193 
194 		pte = ptep_get_and_clear(mm, old_addr, old_pte);
195 		/*
196 		 * If we are remapping a valid PTE, make sure
197 		 * to flush TLB before we drop the PTL for the
198 		 * PTE.
199 		 *
200 		 * NOTE! Both old and new PTL matter: the old one
201 		 * for racing with page_mkclean(), the new one to
202 		 * make sure the physical page stays valid until
203 		 * the TLB entry for the old mapping has been
204 		 * flushed.
205 		 */
206 		if (pte_present(pte))
207 			force_flush = true;
208 		pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
209 		pte = move_soft_dirty_pte(pte);
210 		set_pte_at(mm, new_addr, new_pte, pte);
211 	}
212 
213 	arch_leave_lazy_mmu_mode();
214 	if (force_flush)
215 		flush_tlb_range(vma, old_end - len, old_end);
216 	if (new_ptl != old_ptl)
217 		spin_unlock(new_ptl);
218 	pte_unmap(new_pte - 1);
219 	pte_unmap_unlock(old_pte - 1, old_ptl);
220 out:
221 	if (need_rmap_locks)
222 		drop_rmap_locks(vma);
223 	return err;
224 }
225 
226 #ifndef arch_supports_page_table_move
227 #define arch_supports_page_table_move arch_supports_page_table_move
228 static inline bool arch_supports_page_table_move(void)
229 {
230 	return IS_ENABLED(CONFIG_HAVE_MOVE_PMD) ||
231 		IS_ENABLED(CONFIG_HAVE_MOVE_PUD);
232 }
233 #endif
234 
235 #ifdef CONFIG_HAVE_MOVE_PMD
236 static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr,
237 		  unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd)
238 {
239 	spinlock_t *old_ptl, *new_ptl;
240 	struct mm_struct *mm = vma->vm_mm;
241 	bool res = false;
242 	pmd_t pmd;
243 
244 	if (!arch_supports_page_table_move())
245 		return false;
246 	/*
247 	 * The destination pmd shouldn't be established, free_pgtables()
248 	 * should have released it.
249 	 *
250 	 * However, there's a case during execve() where we use mremap
251 	 * to move the initial stack, and in that case the target area
252 	 * may overlap the source area (always moving down).
253 	 *
254 	 * If everything is PMD-aligned, that works fine, as moving
255 	 * each pmd down will clear the source pmd. But if we first
256 	 * have a few 4kB-only pages that get moved down, and then
257 	 * hit the "now the rest is PMD-aligned, let's do everything
258 	 * one pmd at a time", we will still have the old (now empty
259 	 * of any 4kB pages, but still there) PMD in the page table
260 	 * tree.
261 	 *
262 	 * Warn on it once - because we really should try to figure
263 	 * out how to do this better - but then say "I won't move
264 	 * this pmd".
265 	 *
266 	 * One alternative might be to just unmap the target pmd at
267 	 * this point, and verify that it really is empty. We'll see.
268 	 */
269 	if (WARN_ON_ONCE(!pmd_none(*new_pmd)))
270 		return false;
271 
272 	/*
273 	 * We don't have to worry about the ordering of src and dst
274 	 * ptlocks because exclusive mmap_lock prevents deadlock.
275 	 */
276 	old_ptl = pmd_lock(vma->vm_mm, old_pmd);
277 	new_ptl = pmd_lockptr(mm, new_pmd);
278 	if (new_ptl != old_ptl)
279 		spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
280 
281 	pmd = *old_pmd;
282 
283 	/* Racing with collapse? */
284 	if (unlikely(!pmd_present(pmd) || pmd_leaf(pmd)))
285 		goto out_unlock;
286 	/* Clear the pmd */
287 	pmd_clear(old_pmd);
288 	res = true;
289 
290 	VM_BUG_ON(!pmd_none(*new_pmd));
291 
292 	pmd_populate(mm, new_pmd, pmd_pgtable(pmd));
293 	flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
294 out_unlock:
295 	if (new_ptl != old_ptl)
296 		spin_unlock(new_ptl);
297 	spin_unlock(old_ptl);
298 
299 	return res;
300 }
301 #else
302 static inline bool move_normal_pmd(struct vm_area_struct *vma,
303 		unsigned long old_addr, unsigned long new_addr, pmd_t *old_pmd,
304 		pmd_t *new_pmd)
305 {
306 	return false;
307 }
308 #endif
309 
310 #if CONFIG_PGTABLE_LEVELS > 2 && defined(CONFIG_HAVE_MOVE_PUD)
311 static bool move_normal_pud(struct vm_area_struct *vma, unsigned long old_addr,
312 		  unsigned long new_addr, pud_t *old_pud, pud_t *new_pud)
313 {
314 	spinlock_t *old_ptl, *new_ptl;
315 	struct mm_struct *mm = vma->vm_mm;
316 	pud_t pud;
317 
318 	if (!arch_supports_page_table_move())
319 		return false;
320 	/*
321 	 * The destination pud shouldn't be established, free_pgtables()
322 	 * should have released it.
323 	 */
324 	if (WARN_ON_ONCE(!pud_none(*new_pud)))
325 		return false;
326 
327 	/*
328 	 * We don't have to worry about the ordering of src and dst
329 	 * ptlocks because exclusive mmap_lock prevents deadlock.
330 	 */
331 	old_ptl = pud_lock(vma->vm_mm, old_pud);
332 	new_ptl = pud_lockptr(mm, new_pud);
333 	if (new_ptl != old_ptl)
334 		spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
335 
336 	/* Clear the pud */
337 	pud = *old_pud;
338 	pud_clear(old_pud);
339 
340 	VM_BUG_ON(!pud_none(*new_pud));
341 
342 	pud_populate(mm, new_pud, pud_pgtable(pud));
343 	flush_tlb_range(vma, old_addr, old_addr + PUD_SIZE);
344 	if (new_ptl != old_ptl)
345 		spin_unlock(new_ptl);
346 	spin_unlock(old_ptl);
347 
348 	return true;
349 }
350 #else
351 static inline bool move_normal_pud(struct vm_area_struct *vma,
352 		unsigned long old_addr, unsigned long new_addr, pud_t *old_pud,
353 		pud_t *new_pud)
354 {
355 	return false;
356 }
357 #endif
358 
359 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
360 static bool move_huge_pud(struct vm_area_struct *vma, unsigned long old_addr,
361 			  unsigned long new_addr, pud_t *old_pud, pud_t *new_pud)
362 {
363 	spinlock_t *old_ptl, *new_ptl;
364 	struct mm_struct *mm = vma->vm_mm;
365 	pud_t pud;
366 
367 	/*
368 	 * The destination pud shouldn't be established, free_pgtables()
369 	 * should have released it.
370 	 */
371 	if (WARN_ON_ONCE(!pud_none(*new_pud)))
372 		return false;
373 
374 	/*
375 	 * We don't have to worry about the ordering of src and dst
376 	 * ptlocks because exclusive mmap_lock prevents deadlock.
377 	 */
378 	old_ptl = pud_lock(vma->vm_mm, old_pud);
379 	new_ptl = pud_lockptr(mm, new_pud);
380 	if (new_ptl != old_ptl)
381 		spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
382 
383 	/* Clear the pud */
384 	pud = *old_pud;
385 	pud_clear(old_pud);
386 
387 	VM_BUG_ON(!pud_none(*new_pud));
388 
389 	/* Set the new pud */
390 	/* mark soft_ditry when we add pud level soft dirty support */
391 	set_pud_at(mm, new_addr, new_pud, pud);
392 	flush_pud_tlb_range(vma, old_addr, old_addr + HPAGE_PUD_SIZE);
393 	if (new_ptl != old_ptl)
394 		spin_unlock(new_ptl);
395 	spin_unlock(old_ptl);
396 
397 	return true;
398 }
399 #else
400 static bool move_huge_pud(struct vm_area_struct *vma, unsigned long old_addr,
401 			  unsigned long new_addr, pud_t *old_pud, pud_t *new_pud)
402 {
403 	WARN_ON_ONCE(1);
404 	return false;
405 
406 }
407 #endif
408 
409 enum pgt_entry {
410 	NORMAL_PMD,
411 	HPAGE_PMD,
412 	NORMAL_PUD,
413 	HPAGE_PUD,
414 };
415 
416 /*
417  * Returns an extent of the corresponding size for the pgt_entry specified if
418  * valid. Else returns a smaller extent bounded by the end of the source and
419  * destination pgt_entry.
420  */
421 static __always_inline unsigned long get_extent(enum pgt_entry entry,
422 			unsigned long old_addr, unsigned long old_end,
423 			unsigned long new_addr)
424 {
425 	unsigned long next, extent, mask, size;
426 
427 	switch (entry) {
428 	case HPAGE_PMD:
429 	case NORMAL_PMD:
430 		mask = PMD_MASK;
431 		size = PMD_SIZE;
432 		break;
433 	case HPAGE_PUD:
434 	case NORMAL_PUD:
435 		mask = PUD_MASK;
436 		size = PUD_SIZE;
437 		break;
438 	default:
439 		BUILD_BUG();
440 		break;
441 	}
442 
443 	next = (old_addr + size) & mask;
444 	/* even if next overflowed, extent below will be ok */
445 	extent = next - old_addr;
446 	if (extent > old_end - old_addr)
447 		extent = old_end - old_addr;
448 	next = (new_addr + size) & mask;
449 	if (extent > next - new_addr)
450 		extent = next - new_addr;
451 	return extent;
452 }
453 
454 /*
455  * Attempts to speedup the move by moving entry at the level corresponding to
456  * pgt_entry. Returns true if the move was successful, else false.
457  */
458 static bool move_pgt_entry(enum pgt_entry entry, struct vm_area_struct *vma,
459 			unsigned long old_addr, unsigned long new_addr,
460 			void *old_entry, void *new_entry, bool need_rmap_locks)
461 {
462 	bool moved = false;
463 
464 	/* See comment in move_ptes() */
465 	if (need_rmap_locks)
466 		take_rmap_locks(vma);
467 
468 	switch (entry) {
469 	case NORMAL_PMD:
470 		moved = move_normal_pmd(vma, old_addr, new_addr, old_entry,
471 					new_entry);
472 		break;
473 	case NORMAL_PUD:
474 		moved = move_normal_pud(vma, old_addr, new_addr, old_entry,
475 					new_entry);
476 		break;
477 	case HPAGE_PMD:
478 		moved = IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
479 			move_huge_pmd(vma, old_addr, new_addr, old_entry,
480 				      new_entry);
481 		break;
482 	case HPAGE_PUD:
483 		moved = IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
484 			move_huge_pud(vma, old_addr, new_addr, old_entry,
485 				      new_entry);
486 		break;
487 
488 	default:
489 		WARN_ON_ONCE(1);
490 		break;
491 	}
492 
493 	if (need_rmap_locks)
494 		drop_rmap_locks(vma);
495 
496 	return moved;
497 }
498 
499 unsigned long move_page_tables(struct vm_area_struct *vma,
500 		unsigned long old_addr, struct vm_area_struct *new_vma,
501 		unsigned long new_addr, unsigned long len,
502 		bool need_rmap_locks)
503 {
504 	unsigned long extent, old_end;
505 	struct mmu_notifier_range range;
506 	pmd_t *old_pmd, *new_pmd;
507 	pud_t *old_pud, *new_pud;
508 
509 	if (!len)
510 		return 0;
511 
512 	old_end = old_addr + len;
513 
514 	if (is_vm_hugetlb_page(vma))
515 		return move_hugetlb_page_tables(vma, new_vma, old_addr,
516 						new_addr, len);
517 
518 	flush_cache_range(vma, old_addr, old_end);
519 	mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma->vm_mm,
520 				old_addr, old_end);
521 	mmu_notifier_invalidate_range_start(&range);
522 
523 	for (; old_addr < old_end; old_addr += extent, new_addr += extent) {
524 		cond_resched();
525 		/*
526 		 * If extent is PUD-sized try to speed up the move by moving at the
527 		 * PUD level if possible.
528 		 */
529 		extent = get_extent(NORMAL_PUD, old_addr, old_end, new_addr);
530 
531 		old_pud = get_old_pud(vma->vm_mm, old_addr);
532 		if (!old_pud)
533 			continue;
534 		new_pud = alloc_new_pud(vma->vm_mm, vma, new_addr);
535 		if (!new_pud)
536 			break;
537 		if (pud_trans_huge(*old_pud) || pud_devmap(*old_pud)) {
538 			if (extent == HPAGE_PUD_SIZE) {
539 				move_pgt_entry(HPAGE_PUD, vma, old_addr, new_addr,
540 					       old_pud, new_pud, need_rmap_locks);
541 				/* We ignore and continue on error? */
542 				continue;
543 			}
544 		} else if (IS_ENABLED(CONFIG_HAVE_MOVE_PUD) && extent == PUD_SIZE) {
545 
546 			if (move_pgt_entry(NORMAL_PUD, vma, old_addr, new_addr,
547 					   old_pud, new_pud, true))
548 				continue;
549 		}
550 
551 		extent = get_extent(NORMAL_PMD, old_addr, old_end, new_addr);
552 		old_pmd = get_old_pmd(vma->vm_mm, old_addr);
553 		if (!old_pmd)
554 			continue;
555 		new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr);
556 		if (!new_pmd)
557 			break;
558 again:
559 		if (is_swap_pmd(*old_pmd) || pmd_trans_huge(*old_pmd) ||
560 		    pmd_devmap(*old_pmd)) {
561 			if (extent == HPAGE_PMD_SIZE &&
562 			    move_pgt_entry(HPAGE_PMD, vma, old_addr, new_addr,
563 					   old_pmd, new_pmd, need_rmap_locks))
564 				continue;
565 			split_huge_pmd(vma, old_pmd, old_addr);
566 		} else if (IS_ENABLED(CONFIG_HAVE_MOVE_PMD) &&
567 			   extent == PMD_SIZE) {
568 			/*
569 			 * If the extent is PMD-sized, try to speed the move by
570 			 * moving at the PMD level if possible.
571 			 */
572 			if (move_pgt_entry(NORMAL_PMD, vma, old_addr, new_addr,
573 					   old_pmd, new_pmd, true))
574 				continue;
575 		}
576 		if (pmd_none(*old_pmd))
577 			continue;
578 		if (pte_alloc(new_vma->vm_mm, new_pmd))
579 			break;
580 		if (move_ptes(vma, old_pmd, old_addr, old_addr + extent,
581 			      new_vma, new_pmd, new_addr, need_rmap_locks) < 0)
582 			goto again;
583 	}
584 
585 	mmu_notifier_invalidate_range_end(&range);
586 
587 	return len + old_addr - old_end;	/* how much done */
588 }
589 
590 static unsigned long move_vma(struct vm_area_struct *vma,
591 		unsigned long old_addr, unsigned long old_len,
592 		unsigned long new_len, unsigned long new_addr,
593 		bool *locked, unsigned long flags,
594 		struct vm_userfaultfd_ctx *uf, struct list_head *uf_unmap)
595 {
596 	long to_account = new_len - old_len;
597 	struct mm_struct *mm = vma->vm_mm;
598 	struct vm_area_struct *new_vma;
599 	unsigned long vm_flags = vma->vm_flags;
600 	unsigned long new_pgoff;
601 	unsigned long moved_len;
602 	unsigned long account_start = 0;
603 	unsigned long account_end = 0;
604 	unsigned long hiwater_vm;
605 	int err = 0;
606 	bool need_rmap_locks;
607 	struct vma_iterator vmi;
608 
609 	/*
610 	 * We'd prefer to avoid failure later on in do_munmap:
611 	 * which may split one vma into three before unmapping.
612 	 */
613 	if (mm->map_count >= sysctl_max_map_count - 3)
614 		return -ENOMEM;
615 
616 	if (unlikely(flags & MREMAP_DONTUNMAP))
617 		to_account = new_len;
618 
619 	if (vma->vm_ops && vma->vm_ops->may_split) {
620 		if (vma->vm_start != old_addr)
621 			err = vma->vm_ops->may_split(vma, old_addr);
622 		if (!err && vma->vm_end != old_addr + old_len)
623 			err = vma->vm_ops->may_split(vma, old_addr + old_len);
624 		if (err)
625 			return err;
626 	}
627 
628 	/*
629 	 * Advise KSM to break any KSM pages in the area to be moved:
630 	 * it would be confusing if they were to turn up at the new
631 	 * location, where they happen to coincide with different KSM
632 	 * pages recently unmapped.  But leave vma->vm_flags as it was,
633 	 * so KSM can come around to merge on vma and new_vma afterwards.
634 	 */
635 	err = ksm_madvise(vma, old_addr, old_addr + old_len,
636 						MADV_UNMERGEABLE, &vm_flags);
637 	if (err)
638 		return err;
639 
640 	if (vm_flags & VM_ACCOUNT) {
641 		if (security_vm_enough_memory_mm(mm, to_account >> PAGE_SHIFT))
642 			return -ENOMEM;
643 	}
644 
645 	vma_start_write(vma);
646 	new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT);
647 	new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff,
648 			   &need_rmap_locks);
649 	if (!new_vma) {
650 		if (vm_flags & VM_ACCOUNT)
651 			vm_unacct_memory(to_account >> PAGE_SHIFT);
652 		return -ENOMEM;
653 	}
654 
655 	moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len,
656 				     need_rmap_locks);
657 	if (moved_len < old_len) {
658 		err = -ENOMEM;
659 	} else if (vma->vm_ops && vma->vm_ops->mremap) {
660 		err = vma->vm_ops->mremap(new_vma);
661 	}
662 
663 	if (unlikely(err)) {
664 		/*
665 		 * On error, move entries back from new area to old,
666 		 * which will succeed since page tables still there,
667 		 * and then proceed to unmap new area instead of old.
668 		 */
669 		move_page_tables(new_vma, new_addr, vma, old_addr, moved_len,
670 				 true);
671 		vma = new_vma;
672 		old_len = new_len;
673 		old_addr = new_addr;
674 		new_addr = err;
675 	} else {
676 		mremap_userfaultfd_prep(new_vma, uf);
677 	}
678 
679 	if (is_vm_hugetlb_page(vma)) {
680 		clear_vma_resv_huge_pages(vma);
681 	}
682 
683 	/* Conceal VM_ACCOUNT so old reservation is not undone */
684 	if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP)) {
685 		vm_flags_clear(vma, VM_ACCOUNT);
686 		if (vma->vm_start < old_addr)
687 			account_start = vma->vm_start;
688 		if (vma->vm_end > old_addr + old_len)
689 			account_end = vma->vm_end;
690 	}
691 
692 	/*
693 	 * If we failed to move page tables we still do total_vm increment
694 	 * since do_munmap() will decrement it by old_len == new_len.
695 	 *
696 	 * Since total_vm is about to be raised artificially high for a
697 	 * moment, we need to restore high watermark afterwards: if stats
698 	 * are taken meanwhile, total_vm and hiwater_vm appear too high.
699 	 * If this were a serious issue, we'd add a flag to do_munmap().
700 	 */
701 	hiwater_vm = mm->hiwater_vm;
702 	vm_stat_account(mm, vma->vm_flags, new_len >> PAGE_SHIFT);
703 
704 	/* Tell pfnmap has moved from this vma */
705 	if (unlikely(vma->vm_flags & VM_PFNMAP))
706 		untrack_pfn_clear(vma);
707 
708 	if (unlikely(!err && (flags & MREMAP_DONTUNMAP))) {
709 		/* We always clear VM_LOCKED[ONFAULT] on the old vma */
710 		vm_flags_clear(vma, VM_LOCKED_MASK);
711 
712 		/*
713 		 * anon_vma links of the old vma is no longer needed after its page
714 		 * table has been moved.
715 		 */
716 		if (new_vma != vma && vma->vm_start == old_addr &&
717 			vma->vm_end == (old_addr + old_len))
718 			unlink_anon_vmas(vma);
719 
720 		/* Because we won't unmap we don't need to touch locked_vm */
721 		return new_addr;
722 	}
723 
724 	vma_iter_init(&vmi, mm, old_addr);
725 	if (do_vmi_munmap(&vmi, mm, old_addr, old_len, uf_unmap, false) < 0) {
726 		/* OOM: unable to split vma, just get accounts right */
727 		if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP))
728 			vm_acct_memory(old_len >> PAGE_SHIFT);
729 		account_start = account_end = 0;
730 	}
731 
732 	if (vm_flags & VM_LOCKED) {
733 		mm->locked_vm += new_len >> PAGE_SHIFT;
734 		*locked = true;
735 	}
736 
737 	mm->hiwater_vm = hiwater_vm;
738 
739 	/* Restore VM_ACCOUNT if one or two pieces of vma left */
740 	if (account_start) {
741 		vma = vma_prev(&vmi);
742 		vm_flags_set(vma, VM_ACCOUNT);
743 	}
744 
745 	if (account_end) {
746 		vma = vma_next(&vmi);
747 		vm_flags_set(vma, VM_ACCOUNT);
748 	}
749 
750 	return new_addr;
751 }
752 
753 static struct vm_area_struct *vma_to_resize(unsigned long addr,
754 	unsigned long old_len, unsigned long new_len, unsigned long flags)
755 {
756 	struct mm_struct *mm = current->mm;
757 	struct vm_area_struct *vma;
758 	unsigned long pgoff;
759 
760 	vma = vma_lookup(mm, addr);
761 	if (!vma)
762 		return ERR_PTR(-EFAULT);
763 
764 	/*
765 	 * !old_len is a special case where an attempt is made to 'duplicate'
766 	 * a mapping.  This makes no sense for private mappings as it will
767 	 * instead create a fresh/new mapping unrelated to the original.  This
768 	 * is contrary to the basic idea of mremap which creates new mappings
769 	 * based on the original.  There are no known use cases for this
770 	 * behavior.  As a result, fail such attempts.
771 	 */
772 	if (!old_len && !(vma->vm_flags & (VM_SHARED | VM_MAYSHARE))) {
773 		pr_warn_once("%s (%d): attempted to duplicate a private mapping with mremap.  This is not supported.\n", current->comm, current->pid);
774 		return ERR_PTR(-EINVAL);
775 	}
776 
777 	if ((flags & MREMAP_DONTUNMAP) &&
778 			(vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)))
779 		return ERR_PTR(-EINVAL);
780 
781 	/* We can't remap across vm area boundaries */
782 	if (old_len > vma->vm_end - addr)
783 		return ERR_PTR(-EFAULT);
784 
785 	if (new_len == old_len)
786 		return vma;
787 
788 	/* Need to be careful about a growing mapping */
789 	pgoff = (addr - vma->vm_start) >> PAGE_SHIFT;
790 	pgoff += vma->vm_pgoff;
791 	if (pgoff + (new_len >> PAGE_SHIFT) < pgoff)
792 		return ERR_PTR(-EINVAL);
793 
794 	if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP))
795 		return ERR_PTR(-EFAULT);
796 
797 	if (!mlock_future_ok(mm, vma->vm_flags, new_len - old_len))
798 		return ERR_PTR(-EAGAIN);
799 
800 	if (!may_expand_vm(mm, vma->vm_flags,
801 				(new_len - old_len) >> PAGE_SHIFT))
802 		return ERR_PTR(-ENOMEM);
803 
804 	return vma;
805 }
806 
807 static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
808 		unsigned long new_addr, unsigned long new_len, bool *locked,
809 		unsigned long flags, struct vm_userfaultfd_ctx *uf,
810 		struct list_head *uf_unmap_early,
811 		struct list_head *uf_unmap)
812 {
813 	struct mm_struct *mm = current->mm;
814 	struct vm_area_struct *vma;
815 	unsigned long ret = -EINVAL;
816 	unsigned long map_flags = 0;
817 
818 	if (offset_in_page(new_addr))
819 		goto out;
820 
821 	if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
822 		goto out;
823 
824 	/* Ensure the old/new locations do not overlap */
825 	if (addr + old_len > new_addr && new_addr + new_len > addr)
826 		goto out;
827 
828 	/*
829 	 * move_vma() need us to stay 4 maps below the threshold, otherwise
830 	 * it will bail out at the very beginning.
831 	 * That is a problem if we have already unmaped the regions here
832 	 * (new_addr, and old_addr), because userspace will not know the
833 	 * state of the vma's after it gets -ENOMEM.
834 	 * So, to avoid such scenario we can pre-compute if the whole
835 	 * operation has high chances to success map-wise.
836 	 * Worst-scenario case is when both vma's (new_addr and old_addr) get
837 	 * split in 3 before unmapping it.
838 	 * That means 2 more maps (1 for each) to the ones we already hold.
839 	 * Check whether current map count plus 2 still leads us to 4 maps below
840 	 * the threshold, otherwise return -ENOMEM here to be more safe.
841 	 */
842 	if ((mm->map_count + 2) >= sysctl_max_map_count - 3)
843 		return -ENOMEM;
844 
845 	if (flags & MREMAP_FIXED) {
846 		ret = do_munmap(mm, new_addr, new_len, uf_unmap_early);
847 		if (ret)
848 			goto out;
849 	}
850 
851 	if (old_len > new_len) {
852 		ret = do_munmap(mm, addr+new_len, old_len - new_len, uf_unmap);
853 		if (ret)
854 			goto out;
855 		old_len = new_len;
856 	}
857 
858 	vma = vma_to_resize(addr, old_len, new_len, flags);
859 	if (IS_ERR(vma)) {
860 		ret = PTR_ERR(vma);
861 		goto out;
862 	}
863 
864 	/* MREMAP_DONTUNMAP expands by old_len since old_len == new_len */
865 	if (flags & MREMAP_DONTUNMAP &&
866 		!may_expand_vm(mm, vma->vm_flags, old_len >> PAGE_SHIFT)) {
867 		ret = -ENOMEM;
868 		goto out;
869 	}
870 
871 	if (flags & MREMAP_FIXED)
872 		map_flags |= MAP_FIXED;
873 
874 	if (vma->vm_flags & VM_MAYSHARE)
875 		map_flags |= MAP_SHARED;
876 
877 	ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff +
878 				((addr - vma->vm_start) >> PAGE_SHIFT),
879 				map_flags);
880 	if (IS_ERR_VALUE(ret))
881 		goto out;
882 
883 	/* We got a new mapping */
884 	if (!(flags & MREMAP_FIXED))
885 		new_addr = ret;
886 
887 	ret = move_vma(vma, addr, old_len, new_len, new_addr, locked, flags, uf,
888 		       uf_unmap);
889 
890 out:
891 	return ret;
892 }
893 
894 static int vma_expandable(struct vm_area_struct *vma, unsigned long delta)
895 {
896 	unsigned long end = vma->vm_end + delta;
897 
898 	if (end < vma->vm_end) /* overflow */
899 		return 0;
900 	if (find_vma_intersection(vma->vm_mm, vma->vm_end, end))
901 		return 0;
902 	if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start,
903 			      0, MAP_FIXED) & ~PAGE_MASK)
904 		return 0;
905 	return 1;
906 }
907 
908 /*
909  * Expand (or shrink) an existing mapping, potentially moving it at the
910  * same time (controlled by the MREMAP_MAYMOVE flag and available VM space)
911  *
912  * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise
913  * This option implies MREMAP_MAYMOVE.
914  */
915 SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
916 		unsigned long, new_len, unsigned long, flags,
917 		unsigned long, new_addr)
918 {
919 	struct mm_struct *mm = current->mm;
920 	struct vm_area_struct *vma;
921 	unsigned long ret = -EINVAL;
922 	bool locked = false;
923 	struct vm_userfaultfd_ctx uf = NULL_VM_UFFD_CTX;
924 	LIST_HEAD(uf_unmap_early);
925 	LIST_HEAD(uf_unmap);
926 
927 	/*
928 	 * There is a deliberate asymmetry here: we strip the pointer tag
929 	 * from the old address but leave the new address alone. This is
930 	 * for consistency with mmap(), where we prevent the creation of
931 	 * aliasing mappings in userspace by leaving the tag bits of the
932 	 * mapping address intact. A non-zero tag will cause the subsequent
933 	 * range checks to reject the address as invalid.
934 	 *
935 	 * See Documentation/arch/arm64/tagged-address-abi.rst for more
936 	 * information.
937 	 */
938 	addr = untagged_addr(addr);
939 
940 	if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE | MREMAP_DONTUNMAP))
941 		return ret;
942 
943 	if (flags & MREMAP_FIXED && !(flags & MREMAP_MAYMOVE))
944 		return ret;
945 
946 	/*
947 	 * MREMAP_DONTUNMAP is always a move and it does not allow resizing
948 	 * in the process.
949 	 */
950 	if (flags & MREMAP_DONTUNMAP &&
951 			(!(flags & MREMAP_MAYMOVE) || old_len != new_len))
952 		return ret;
953 
954 
955 	if (offset_in_page(addr))
956 		return ret;
957 
958 	old_len = PAGE_ALIGN(old_len);
959 	new_len = PAGE_ALIGN(new_len);
960 
961 	/*
962 	 * We allow a zero old-len as a special case
963 	 * for DOS-emu "duplicate shm area" thing. But
964 	 * a zero new-len is nonsensical.
965 	 */
966 	if (!new_len)
967 		return ret;
968 
969 	if (mmap_write_lock_killable(current->mm))
970 		return -EINTR;
971 	vma = vma_lookup(mm, addr);
972 	if (!vma) {
973 		ret = -EFAULT;
974 		goto out;
975 	}
976 
977 	if (is_vm_hugetlb_page(vma)) {
978 		struct hstate *h __maybe_unused = hstate_vma(vma);
979 
980 		old_len = ALIGN(old_len, huge_page_size(h));
981 		new_len = ALIGN(new_len, huge_page_size(h));
982 
983 		/* addrs must be huge page aligned */
984 		if (addr & ~huge_page_mask(h))
985 			goto out;
986 		if (new_addr & ~huge_page_mask(h))
987 			goto out;
988 
989 		/*
990 		 * Don't allow remap expansion, because the underlying hugetlb
991 		 * reservation is not yet capable to handle split reservation.
992 		 */
993 		if (new_len > old_len)
994 			goto out;
995 	}
996 
997 	if (flags & (MREMAP_FIXED | MREMAP_DONTUNMAP)) {
998 		ret = mremap_to(addr, old_len, new_addr, new_len,
999 				&locked, flags, &uf, &uf_unmap_early,
1000 				&uf_unmap);
1001 		goto out;
1002 	}
1003 
1004 	/*
1005 	 * Always allow a shrinking remap: that just unmaps
1006 	 * the unnecessary pages..
1007 	 * do_vmi_munmap does all the needed commit accounting, and
1008 	 * unlocks the mmap_lock if so directed.
1009 	 */
1010 	if (old_len >= new_len) {
1011 		VMA_ITERATOR(vmi, mm, addr + new_len);
1012 
1013 		if (old_len == new_len) {
1014 			ret = addr;
1015 			goto out;
1016 		}
1017 
1018 		ret = do_vmi_munmap(&vmi, mm, addr + new_len, old_len - new_len,
1019 				    &uf_unmap, true);
1020 		if (ret)
1021 			goto out;
1022 
1023 		ret = addr;
1024 		goto out_unlocked;
1025 	}
1026 
1027 	/*
1028 	 * Ok, we need to grow..
1029 	 */
1030 	vma = vma_to_resize(addr, old_len, new_len, flags);
1031 	if (IS_ERR(vma)) {
1032 		ret = PTR_ERR(vma);
1033 		goto out;
1034 	}
1035 
1036 	/* old_len exactly to the end of the area..
1037 	 */
1038 	if (old_len == vma->vm_end - addr) {
1039 		/* can we just expand the current mapping? */
1040 		if (vma_expandable(vma, new_len - old_len)) {
1041 			long pages = (new_len - old_len) >> PAGE_SHIFT;
1042 			unsigned long extension_start = addr + old_len;
1043 			unsigned long extension_end = addr + new_len;
1044 			pgoff_t extension_pgoff = vma->vm_pgoff +
1045 				((extension_start - vma->vm_start) >> PAGE_SHIFT);
1046 			VMA_ITERATOR(vmi, mm, extension_start);
1047 
1048 			if (vma->vm_flags & VM_ACCOUNT) {
1049 				if (security_vm_enough_memory_mm(mm, pages)) {
1050 					ret = -ENOMEM;
1051 					goto out;
1052 				}
1053 			}
1054 
1055 			/*
1056 			 * Function vma_merge() is called on the extension we
1057 			 * are adding to the already existing vma, vma_merge()
1058 			 * will merge this extension with the already existing
1059 			 * vma (expand operation itself) and possibly also with
1060 			 * the next vma if it becomes adjacent to the expanded
1061 			 * vma and  otherwise compatible.
1062 			 */
1063 			vma = vma_merge(&vmi, mm, vma, extension_start,
1064 				extension_end, vma->vm_flags, vma->anon_vma,
1065 				vma->vm_file, extension_pgoff, vma_policy(vma),
1066 				vma->vm_userfaultfd_ctx, anon_vma_name(vma));
1067 			if (!vma) {
1068 				vm_unacct_memory(pages);
1069 				ret = -ENOMEM;
1070 				goto out;
1071 			}
1072 
1073 			vm_stat_account(mm, vma->vm_flags, pages);
1074 			if (vma->vm_flags & VM_LOCKED) {
1075 				mm->locked_vm += pages;
1076 				locked = true;
1077 				new_addr = addr;
1078 			}
1079 			ret = addr;
1080 			goto out;
1081 		}
1082 	}
1083 
1084 	/*
1085 	 * We weren't able to just expand or shrink the area,
1086 	 * we need to create a new one and move it..
1087 	 */
1088 	ret = -ENOMEM;
1089 	if (flags & MREMAP_MAYMOVE) {
1090 		unsigned long map_flags = 0;
1091 		if (vma->vm_flags & VM_MAYSHARE)
1092 			map_flags |= MAP_SHARED;
1093 
1094 		new_addr = get_unmapped_area(vma->vm_file, 0, new_len,
1095 					vma->vm_pgoff +
1096 					((addr - vma->vm_start) >> PAGE_SHIFT),
1097 					map_flags);
1098 		if (IS_ERR_VALUE(new_addr)) {
1099 			ret = new_addr;
1100 			goto out;
1101 		}
1102 
1103 		ret = move_vma(vma, addr, old_len, new_len, new_addr,
1104 			       &locked, flags, &uf, &uf_unmap);
1105 	}
1106 out:
1107 	if (offset_in_page(ret))
1108 		locked = false;
1109 	mmap_write_unlock(current->mm);
1110 	if (locked && new_len > old_len)
1111 		mm_populate(new_addr + old_len, new_len - old_len);
1112 out_unlocked:
1113 	userfaultfd_unmap_complete(mm, &uf_unmap_early);
1114 	mremap_userfaultfd_complete(&uf, addr, ret, old_len);
1115 	userfaultfd_unmap_complete(mm, &uf_unmap);
1116 	return ret;
1117 }
1118