1 /* 2 * mm/mremap.c 3 * 4 * (C) Copyright 1996 Linus Torvalds 5 * 6 * Address space accounting code <alan@lxorguk.ukuu.org.uk> 7 * (C) Copyright 2002 Red Hat Inc, All Rights Reserved 8 */ 9 10 #include <linux/mm.h> 11 #include <linux/hugetlb.h> 12 #include <linux/shm.h> 13 #include <linux/ksm.h> 14 #include <linux/mman.h> 15 #include <linux/swap.h> 16 #include <linux/capability.h> 17 #include <linux/fs.h> 18 #include <linux/swapops.h> 19 #include <linux/highmem.h> 20 #include <linux/security.h> 21 #include <linux/syscalls.h> 22 #include <linux/mmu_notifier.h> 23 #include <linux/sched/sysctl.h> 24 #include <linux/uaccess.h> 25 26 #include <asm/cacheflush.h> 27 #include <asm/tlbflush.h> 28 29 #include "internal.h" 30 31 static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr) 32 { 33 pgd_t *pgd; 34 pud_t *pud; 35 pmd_t *pmd; 36 37 pgd = pgd_offset(mm, addr); 38 if (pgd_none_or_clear_bad(pgd)) 39 return NULL; 40 41 pud = pud_offset(pgd, addr); 42 if (pud_none_or_clear_bad(pud)) 43 return NULL; 44 45 pmd = pmd_offset(pud, addr); 46 if (pmd_none(*pmd)) 47 return NULL; 48 49 return pmd; 50 } 51 52 static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma, 53 unsigned long addr) 54 { 55 pgd_t *pgd; 56 pud_t *pud; 57 pmd_t *pmd; 58 59 pgd = pgd_offset(mm, addr); 60 pud = pud_alloc(mm, pgd, addr); 61 if (!pud) 62 return NULL; 63 64 pmd = pmd_alloc(mm, pud, addr); 65 if (!pmd) 66 return NULL; 67 68 VM_BUG_ON(pmd_trans_huge(*pmd)); 69 70 return pmd; 71 } 72 73 static pte_t move_soft_dirty_pte(pte_t pte) 74 { 75 /* 76 * Set soft dirty bit so we can notice 77 * in userspace the ptes were moved. 78 */ 79 #ifdef CONFIG_MEM_SOFT_DIRTY 80 if (pte_present(pte)) 81 pte = pte_mksoft_dirty(pte); 82 else if (is_swap_pte(pte)) 83 pte = pte_swp_mksoft_dirty(pte); 84 else if (pte_file(pte)) 85 pte = pte_file_mksoft_dirty(pte); 86 #endif 87 return pte; 88 } 89 90 static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, 91 unsigned long old_addr, unsigned long old_end, 92 struct vm_area_struct *new_vma, pmd_t *new_pmd, 93 unsigned long new_addr, bool need_rmap_locks) 94 { 95 struct address_space *mapping = NULL; 96 struct anon_vma *anon_vma = NULL; 97 struct mm_struct *mm = vma->vm_mm; 98 pte_t *old_pte, *new_pte, pte; 99 spinlock_t *old_ptl, *new_ptl; 100 101 /* 102 * When need_rmap_locks is true, we take the i_mmap_rwsem and anon_vma 103 * locks to ensure that rmap will always observe either the old or the 104 * new ptes. This is the easiest way to avoid races with 105 * truncate_pagecache(), page migration, etc... 106 * 107 * When need_rmap_locks is false, we use other ways to avoid 108 * such races: 109 * 110 * - During exec() shift_arg_pages(), we use a specially tagged vma 111 * which rmap call sites look for using is_vma_temporary_stack(). 112 * 113 * - During mremap(), new_vma is often known to be placed after vma 114 * in rmap traversal order. This ensures rmap will always observe 115 * either the old pte, or the new pte, or both (the page table locks 116 * serialize access to individual ptes, but only rmap traversal 117 * order guarantees that we won't miss both the old and new ptes). 118 */ 119 if (need_rmap_locks) { 120 if (vma->vm_file) { 121 mapping = vma->vm_file->f_mapping; 122 i_mmap_lock_write(mapping); 123 } 124 if (vma->anon_vma) { 125 anon_vma = vma->anon_vma; 126 anon_vma_lock_write(anon_vma); 127 } 128 } 129 130 /* 131 * We don't have to worry about the ordering of src and dst 132 * pte locks because exclusive mmap_sem prevents deadlock. 133 */ 134 old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl); 135 new_pte = pte_offset_map(new_pmd, new_addr); 136 new_ptl = pte_lockptr(mm, new_pmd); 137 if (new_ptl != old_ptl) 138 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); 139 arch_enter_lazy_mmu_mode(); 140 141 for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE, 142 new_pte++, new_addr += PAGE_SIZE) { 143 if (pte_none(*old_pte)) 144 continue; 145 pte = ptep_get_and_clear(mm, old_addr, old_pte); 146 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr); 147 pte = move_soft_dirty_pte(pte); 148 set_pte_at(mm, new_addr, new_pte, pte); 149 } 150 151 arch_leave_lazy_mmu_mode(); 152 if (new_ptl != old_ptl) 153 spin_unlock(new_ptl); 154 pte_unmap(new_pte - 1); 155 pte_unmap_unlock(old_pte - 1, old_ptl); 156 if (anon_vma) 157 anon_vma_unlock_write(anon_vma); 158 if (mapping) 159 i_mmap_unlock_write(mapping); 160 } 161 162 #define LATENCY_LIMIT (64 * PAGE_SIZE) 163 164 unsigned long move_page_tables(struct vm_area_struct *vma, 165 unsigned long old_addr, struct vm_area_struct *new_vma, 166 unsigned long new_addr, unsigned long len, 167 bool need_rmap_locks) 168 { 169 unsigned long extent, next, old_end; 170 pmd_t *old_pmd, *new_pmd; 171 bool need_flush = false; 172 unsigned long mmun_start; /* For mmu_notifiers */ 173 unsigned long mmun_end; /* For mmu_notifiers */ 174 175 old_end = old_addr + len; 176 flush_cache_range(vma, old_addr, old_end); 177 178 mmun_start = old_addr; 179 mmun_end = old_end; 180 mmu_notifier_invalidate_range_start(vma->vm_mm, mmun_start, mmun_end); 181 182 for (; old_addr < old_end; old_addr += extent, new_addr += extent) { 183 cond_resched(); 184 next = (old_addr + PMD_SIZE) & PMD_MASK; 185 /* even if next overflowed, extent below will be ok */ 186 extent = next - old_addr; 187 if (extent > old_end - old_addr) 188 extent = old_end - old_addr; 189 old_pmd = get_old_pmd(vma->vm_mm, old_addr); 190 if (!old_pmd) 191 continue; 192 new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr); 193 if (!new_pmd) 194 break; 195 if (pmd_trans_huge(*old_pmd)) { 196 int err = 0; 197 if (extent == HPAGE_PMD_SIZE) { 198 VM_BUG_ON_VMA(vma->vm_file || !vma->anon_vma, 199 vma); 200 /* See comment in move_ptes() */ 201 if (need_rmap_locks) 202 anon_vma_lock_write(vma->anon_vma); 203 err = move_huge_pmd(vma, new_vma, old_addr, 204 new_addr, old_end, 205 old_pmd, new_pmd); 206 if (need_rmap_locks) 207 anon_vma_unlock_write(vma->anon_vma); 208 } 209 if (err > 0) { 210 need_flush = true; 211 continue; 212 } else if (!err) { 213 split_huge_page_pmd(vma, old_addr, old_pmd); 214 } 215 VM_BUG_ON(pmd_trans_huge(*old_pmd)); 216 } 217 if (pmd_none(*new_pmd) && __pte_alloc(new_vma->vm_mm, new_vma, 218 new_pmd, new_addr)) 219 break; 220 next = (new_addr + PMD_SIZE) & PMD_MASK; 221 if (extent > next - new_addr) 222 extent = next - new_addr; 223 if (extent > LATENCY_LIMIT) 224 extent = LATENCY_LIMIT; 225 move_ptes(vma, old_pmd, old_addr, old_addr + extent, 226 new_vma, new_pmd, new_addr, need_rmap_locks); 227 need_flush = true; 228 } 229 if (likely(need_flush)) 230 flush_tlb_range(vma, old_end-len, old_addr); 231 232 mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end); 233 234 return len + old_addr - old_end; /* how much done */ 235 } 236 237 static unsigned long move_vma(struct vm_area_struct *vma, 238 unsigned long old_addr, unsigned long old_len, 239 unsigned long new_len, unsigned long new_addr, bool *locked) 240 { 241 struct mm_struct *mm = vma->vm_mm; 242 struct vm_area_struct *new_vma; 243 unsigned long vm_flags = vma->vm_flags; 244 unsigned long new_pgoff; 245 unsigned long moved_len; 246 unsigned long excess = 0; 247 unsigned long hiwater_vm; 248 int split = 0; 249 int err; 250 bool need_rmap_locks; 251 252 /* 253 * We'd prefer to avoid failure later on in do_munmap: 254 * which may split one vma into three before unmapping. 255 */ 256 if (mm->map_count >= sysctl_max_map_count - 3) 257 return -ENOMEM; 258 259 /* 260 * Advise KSM to break any KSM pages in the area to be moved: 261 * it would be confusing if they were to turn up at the new 262 * location, where they happen to coincide with different KSM 263 * pages recently unmapped. But leave vma->vm_flags as it was, 264 * so KSM can come around to merge on vma and new_vma afterwards. 265 */ 266 err = ksm_madvise(vma, old_addr, old_addr + old_len, 267 MADV_UNMERGEABLE, &vm_flags); 268 if (err) 269 return err; 270 271 new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT); 272 new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff, 273 &need_rmap_locks); 274 if (!new_vma) 275 return -ENOMEM; 276 277 moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len, 278 need_rmap_locks); 279 if (moved_len < old_len) { 280 /* 281 * On error, move entries back from new area to old, 282 * which will succeed since page tables still there, 283 * and then proceed to unmap new area instead of old. 284 */ 285 move_page_tables(new_vma, new_addr, vma, old_addr, moved_len, 286 true); 287 vma = new_vma; 288 old_len = new_len; 289 old_addr = new_addr; 290 new_addr = -ENOMEM; 291 } else if (vma->vm_file && vma->vm_file->f_op->mremap) 292 vma->vm_file->f_op->mremap(vma->vm_file, new_vma); 293 294 /* Conceal VM_ACCOUNT so old reservation is not undone */ 295 if (vm_flags & VM_ACCOUNT) { 296 vma->vm_flags &= ~VM_ACCOUNT; 297 excess = vma->vm_end - vma->vm_start - old_len; 298 if (old_addr > vma->vm_start && 299 old_addr + old_len < vma->vm_end) 300 split = 1; 301 } 302 303 /* 304 * If we failed to move page tables we still do total_vm increment 305 * since do_munmap() will decrement it by old_len == new_len. 306 * 307 * Since total_vm is about to be raised artificially high for a 308 * moment, we need to restore high watermark afterwards: if stats 309 * are taken meanwhile, total_vm and hiwater_vm appear too high. 310 * If this were a serious issue, we'd add a flag to do_munmap(). 311 */ 312 hiwater_vm = mm->hiwater_vm; 313 vm_stat_account(mm, vma->vm_flags, vma->vm_file, new_len>>PAGE_SHIFT); 314 315 if (do_munmap(mm, old_addr, old_len) < 0) { 316 /* OOM: unable to split vma, just get accounts right */ 317 vm_unacct_memory(excess >> PAGE_SHIFT); 318 excess = 0; 319 } 320 mm->hiwater_vm = hiwater_vm; 321 322 /* Restore VM_ACCOUNT if one or two pieces of vma left */ 323 if (excess) { 324 vma->vm_flags |= VM_ACCOUNT; 325 if (split) 326 vma->vm_next->vm_flags |= VM_ACCOUNT; 327 } 328 329 if (vm_flags & VM_LOCKED) { 330 mm->locked_vm += new_len >> PAGE_SHIFT; 331 *locked = true; 332 } 333 334 return new_addr; 335 } 336 337 static struct vm_area_struct *vma_to_resize(unsigned long addr, 338 unsigned long old_len, unsigned long new_len, unsigned long *p) 339 { 340 struct mm_struct *mm = current->mm; 341 struct vm_area_struct *vma = find_vma(mm, addr); 342 343 if (!vma || vma->vm_start > addr) 344 goto Efault; 345 346 if (is_vm_hugetlb_page(vma)) 347 goto Einval; 348 349 /* We can't remap across vm area boundaries */ 350 if (old_len > vma->vm_end - addr) 351 goto Efault; 352 353 /* Need to be careful about a growing mapping */ 354 if (new_len > old_len) { 355 unsigned long pgoff; 356 357 if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)) 358 goto Efault; 359 pgoff = (addr - vma->vm_start) >> PAGE_SHIFT; 360 pgoff += vma->vm_pgoff; 361 if (pgoff + (new_len >> PAGE_SHIFT) < pgoff) 362 goto Einval; 363 } 364 365 if (vma->vm_flags & VM_LOCKED) { 366 unsigned long locked, lock_limit; 367 locked = mm->locked_vm << PAGE_SHIFT; 368 lock_limit = rlimit(RLIMIT_MEMLOCK); 369 locked += new_len - old_len; 370 if (locked > lock_limit && !capable(CAP_IPC_LOCK)) 371 goto Eagain; 372 } 373 374 if (!may_expand_vm(mm, (new_len - old_len) >> PAGE_SHIFT)) 375 goto Enomem; 376 377 if (vma->vm_flags & VM_ACCOUNT) { 378 unsigned long charged = (new_len - old_len) >> PAGE_SHIFT; 379 if (security_vm_enough_memory_mm(mm, charged)) 380 goto Efault; 381 *p = charged; 382 } 383 384 return vma; 385 386 Efault: /* very odd choice for most of the cases, but... */ 387 return ERR_PTR(-EFAULT); 388 Einval: 389 return ERR_PTR(-EINVAL); 390 Enomem: 391 return ERR_PTR(-ENOMEM); 392 Eagain: 393 return ERR_PTR(-EAGAIN); 394 } 395 396 static unsigned long mremap_to(unsigned long addr, unsigned long old_len, 397 unsigned long new_addr, unsigned long new_len, bool *locked) 398 { 399 struct mm_struct *mm = current->mm; 400 struct vm_area_struct *vma; 401 unsigned long ret = -EINVAL; 402 unsigned long charged = 0; 403 unsigned long map_flags; 404 405 if (new_addr & ~PAGE_MASK) 406 goto out; 407 408 if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len) 409 goto out; 410 411 /* Check if the location we're moving into overlaps the 412 * old location at all, and fail if it does. 413 */ 414 if ((new_addr <= addr) && (new_addr+new_len) > addr) 415 goto out; 416 417 if ((addr <= new_addr) && (addr+old_len) > new_addr) 418 goto out; 419 420 ret = do_munmap(mm, new_addr, new_len); 421 if (ret) 422 goto out; 423 424 if (old_len >= new_len) { 425 ret = do_munmap(mm, addr+new_len, old_len - new_len); 426 if (ret && old_len != new_len) 427 goto out; 428 old_len = new_len; 429 } 430 431 vma = vma_to_resize(addr, old_len, new_len, &charged); 432 if (IS_ERR(vma)) { 433 ret = PTR_ERR(vma); 434 goto out; 435 } 436 437 map_flags = MAP_FIXED; 438 if (vma->vm_flags & VM_MAYSHARE) 439 map_flags |= MAP_SHARED; 440 441 ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff + 442 ((addr - vma->vm_start) >> PAGE_SHIFT), 443 map_flags); 444 if (ret & ~PAGE_MASK) 445 goto out1; 446 447 ret = move_vma(vma, addr, old_len, new_len, new_addr, locked); 448 if (!(ret & ~PAGE_MASK)) 449 goto out; 450 out1: 451 vm_unacct_memory(charged); 452 453 out: 454 return ret; 455 } 456 457 static int vma_expandable(struct vm_area_struct *vma, unsigned long delta) 458 { 459 unsigned long end = vma->vm_end + delta; 460 if (end < vma->vm_end) /* overflow */ 461 return 0; 462 if (vma->vm_next && vma->vm_next->vm_start < end) /* intersection */ 463 return 0; 464 if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start, 465 0, MAP_FIXED) & ~PAGE_MASK) 466 return 0; 467 return 1; 468 } 469 470 /* 471 * Expand (or shrink) an existing mapping, potentially moving it at the 472 * same time (controlled by the MREMAP_MAYMOVE flag and available VM space) 473 * 474 * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise 475 * This option implies MREMAP_MAYMOVE. 476 */ 477 SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, 478 unsigned long, new_len, unsigned long, flags, 479 unsigned long, new_addr) 480 { 481 struct mm_struct *mm = current->mm; 482 struct vm_area_struct *vma; 483 unsigned long ret = -EINVAL; 484 unsigned long charged = 0; 485 bool locked = false; 486 487 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE)) 488 return ret; 489 490 if (flags & MREMAP_FIXED && !(flags & MREMAP_MAYMOVE)) 491 return ret; 492 493 if (addr & ~PAGE_MASK) 494 return ret; 495 496 old_len = PAGE_ALIGN(old_len); 497 new_len = PAGE_ALIGN(new_len); 498 499 /* 500 * We allow a zero old-len as a special case 501 * for DOS-emu "duplicate shm area" thing. But 502 * a zero new-len is nonsensical. 503 */ 504 if (!new_len) 505 return ret; 506 507 down_write(¤t->mm->mmap_sem); 508 509 if (flags & MREMAP_FIXED) { 510 ret = mremap_to(addr, old_len, new_addr, new_len, 511 &locked); 512 goto out; 513 } 514 515 /* 516 * Always allow a shrinking remap: that just unmaps 517 * the unnecessary pages.. 518 * do_munmap does all the needed commit accounting 519 */ 520 if (old_len >= new_len) { 521 ret = do_munmap(mm, addr+new_len, old_len - new_len); 522 if (ret && old_len != new_len) 523 goto out; 524 ret = addr; 525 goto out; 526 } 527 528 /* 529 * Ok, we need to grow.. 530 */ 531 vma = vma_to_resize(addr, old_len, new_len, &charged); 532 if (IS_ERR(vma)) { 533 ret = PTR_ERR(vma); 534 goto out; 535 } 536 537 /* old_len exactly to the end of the area.. 538 */ 539 if (old_len == vma->vm_end - addr) { 540 /* can we just expand the current mapping? */ 541 if (vma_expandable(vma, new_len - old_len)) { 542 int pages = (new_len - old_len) >> PAGE_SHIFT; 543 544 if (vma_adjust(vma, vma->vm_start, addr + new_len, 545 vma->vm_pgoff, NULL)) { 546 ret = -ENOMEM; 547 goto out; 548 } 549 550 vm_stat_account(mm, vma->vm_flags, vma->vm_file, pages); 551 if (vma->vm_flags & VM_LOCKED) { 552 mm->locked_vm += pages; 553 locked = true; 554 new_addr = addr; 555 } 556 ret = addr; 557 goto out; 558 } 559 } 560 561 /* 562 * We weren't able to just expand or shrink the area, 563 * we need to create a new one and move it.. 564 */ 565 ret = -ENOMEM; 566 if (flags & MREMAP_MAYMOVE) { 567 unsigned long map_flags = 0; 568 if (vma->vm_flags & VM_MAYSHARE) 569 map_flags |= MAP_SHARED; 570 571 new_addr = get_unmapped_area(vma->vm_file, 0, new_len, 572 vma->vm_pgoff + 573 ((addr - vma->vm_start) >> PAGE_SHIFT), 574 map_flags); 575 if (new_addr & ~PAGE_MASK) { 576 ret = new_addr; 577 goto out; 578 } 579 580 ret = move_vma(vma, addr, old_len, new_len, new_addr, &locked); 581 } 582 out: 583 if (ret & ~PAGE_MASK) 584 vm_unacct_memory(charged); 585 up_write(¤t->mm->mmap_sem); 586 if (locked && new_len > old_len) 587 mm_populate(new_addr + old_len, new_len - old_len); 588 return ret; 589 } 590