1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/mm/madvise.c 4 * 5 * Copyright (C) 1999 Linus Torvalds 6 * Copyright (C) 2002 Christoph Hellwig 7 */ 8 9 #include <linux/mman.h> 10 #include <linux/pagemap.h> 11 #include <linux/syscalls.h> 12 #include <linux/mempolicy.h> 13 #include <linux/page-isolation.h> 14 #include <linux/page_idle.h> 15 #include <linux/userfaultfd_k.h> 16 #include <linux/hugetlb.h> 17 #include <linux/falloc.h> 18 #include <linux/fadvise.h> 19 #include <linux/sched.h> 20 #include <linux/sched/mm.h> 21 #include <linux/mm_inline.h> 22 #include <linux/string.h> 23 #include <linux/uio.h> 24 #include <linux/ksm.h> 25 #include <linux/fs.h> 26 #include <linux/file.h> 27 #include <linux/blkdev.h> 28 #include <linux/backing-dev.h> 29 #include <linux/pagewalk.h> 30 #include <linux/swap.h> 31 #include <linux/swapops.h> 32 #include <linux/shmem_fs.h> 33 #include <linux/mmu_notifier.h> 34 35 #include <asm/tlb.h> 36 37 #include "internal.h" 38 #include "swap.h" 39 40 struct madvise_walk_private { 41 struct mmu_gather *tlb; 42 bool pageout; 43 }; 44 45 /* 46 * Any behaviour which results in changes to the vma->vm_flags needs to 47 * take mmap_lock for writing. Others, which simply traverse vmas, need 48 * to only take it for reading. 49 */ 50 static int madvise_need_mmap_write(int behavior) 51 { 52 switch (behavior) { 53 case MADV_REMOVE: 54 case MADV_WILLNEED: 55 case MADV_DONTNEED: 56 case MADV_DONTNEED_LOCKED: 57 case MADV_COLD: 58 case MADV_PAGEOUT: 59 case MADV_FREE: 60 case MADV_POPULATE_READ: 61 case MADV_POPULATE_WRITE: 62 case MADV_COLLAPSE: 63 return 0; 64 default: 65 /* be safe, default to 1. list exceptions explicitly */ 66 return 1; 67 } 68 } 69 70 #ifdef CONFIG_ANON_VMA_NAME 71 struct anon_vma_name *anon_vma_name_alloc(const char *name) 72 { 73 struct anon_vma_name *anon_name; 74 size_t count; 75 76 /* Add 1 for NUL terminator at the end of the anon_name->name */ 77 count = strlen(name) + 1; 78 anon_name = kmalloc(struct_size(anon_name, name, count), GFP_KERNEL); 79 if (anon_name) { 80 kref_init(&anon_name->kref); 81 memcpy(anon_name->name, name, count); 82 } 83 84 return anon_name; 85 } 86 87 void anon_vma_name_free(struct kref *kref) 88 { 89 struct anon_vma_name *anon_name = 90 container_of(kref, struct anon_vma_name, kref); 91 kfree(anon_name); 92 } 93 94 struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma) 95 { 96 mmap_assert_locked(vma->vm_mm); 97 98 return vma->anon_name; 99 } 100 101 /* mmap_lock should be write-locked */ 102 static int replace_anon_vma_name(struct vm_area_struct *vma, 103 struct anon_vma_name *anon_name) 104 { 105 struct anon_vma_name *orig_name = anon_vma_name(vma); 106 107 if (!anon_name) { 108 vma->anon_name = NULL; 109 anon_vma_name_put(orig_name); 110 return 0; 111 } 112 113 if (anon_vma_name_eq(orig_name, anon_name)) 114 return 0; 115 116 vma->anon_name = anon_vma_name_reuse(anon_name); 117 anon_vma_name_put(orig_name); 118 119 return 0; 120 } 121 #else /* CONFIG_ANON_VMA_NAME */ 122 static int replace_anon_vma_name(struct vm_area_struct *vma, 123 struct anon_vma_name *anon_name) 124 { 125 if (anon_name) 126 return -EINVAL; 127 128 return 0; 129 } 130 #endif /* CONFIG_ANON_VMA_NAME */ 131 /* 132 * Update the vm_flags on region of a vma, splitting it or merging it as 133 * necessary. Must be called with mmap_sem held for writing; 134 * Caller should ensure anon_name stability by raising its refcount even when 135 * anon_name belongs to a valid vma because this function might free that vma. 136 */ 137 static int madvise_update_vma(struct vm_area_struct *vma, 138 struct vm_area_struct **prev, unsigned long start, 139 unsigned long end, unsigned long new_flags, 140 struct anon_vma_name *anon_name) 141 { 142 struct mm_struct *mm = vma->vm_mm; 143 int error; 144 pgoff_t pgoff; 145 146 if (new_flags == vma->vm_flags && anon_vma_name_eq(anon_vma_name(vma), anon_name)) { 147 *prev = vma; 148 return 0; 149 } 150 151 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); 152 *prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma, 153 vma->vm_file, pgoff, vma_policy(vma), 154 vma->vm_userfaultfd_ctx, anon_name); 155 if (*prev) { 156 vma = *prev; 157 goto success; 158 } 159 160 *prev = vma; 161 162 if (start != vma->vm_start) { 163 if (unlikely(mm->map_count >= sysctl_max_map_count)) 164 return -ENOMEM; 165 error = __split_vma(mm, vma, start, 1); 166 if (error) 167 return error; 168 } 169 170 if (end != vma->vm_end) { 171 if (unlikely(mm->map_count >= sysctl_max_map_count)) 172 return -ENOMEM; 173 error = __split_vma(mm, vma, end, 0); 174 if (error) 175 return error; 176 } 177 178 success: 179 /* 180 * vm_flags is protected by the mmap_lock held in write mode. 181 */ 182 vma->vm_flags = new_flags; 183 if (!vma->vm_file || vma_is_anon_shmem(vma)) { 184 error = replace_anon_vma_name(vma, anon_name); 185 if (error) 186 return error; 187 } 188 189 return 0; 190 } 191 192 #ifdef CONFIG_SWAP 193 static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start, 194 unsigned long end, struct mm_walk *walk) 195 { 196 struct vm_area_struct *vma = walk->private; 197 unsigned long index; 198 struct swap_iocb *splug = NULL; 199 200 if (pmd_none_or_trans_huge_or_clear_bad(pmd)) 201 return 0; 202 203 for (index = start; index != end; index += PAGE_SIZE) { 204 pte_t pte; 205 swp_entry_t entry; 206 struct page *page; 207 spinlock_t *ptl; 208 pte_t *ptep; 209 210 ptep = pte_offset_map_lock(vma->vm_mm, pmd, index, &ptl); 211 pte = *ptep; 212 pte_unmap_unlock(ptep, ptl); 213 214 if (!is_swap_pte(pte)) 215 continue; 216 entry = pte_to_swp_entry(pte); 217 if (unlikely(non_swap_entry(entry))) 218 continue; 219 220 page = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE, 221 vma, index, false, &splug); 222 if (page) 223 put_page(page); 224 } 225 swap_read_unplug(splug); 226 227 return 0; 228 } 229 230 static const struct mm_walk_ops swapin_walk_ops = { 231 .pmd_entry = swapin_walk_pmd_entry, 232 }; 233 234 static void force_shm_swapin_readahead(struct vm_area_struct *vma, 235 unsigned long start, unsigned long end, 236 struct address_space *mapping) 237 { 238 XA_STATE(xas, &mapping->i_pages, linear_page_index(vma, start)); 239 pgoff_t end_index = linear_page_index(vma, end + PAGE_SIZE - 1); 240 struct page *page; 241 struct swap_iocb *splug = NULL; 242 243 rcu_read_lock(); 244 xas_for_each(&xas, page, end_index) { 245 swp_entry_t swap; 246 247 if (!xa_is_value(page)) 248 continue; 249 swap = radix_to_swp_entry(page); 250 /* There might be swapin error entries in shmem mapping. */ 251 if (non_swap_entry(swap)) 252 continue; 253 xas_pause(&xas); 254 rcu_read_unlock(); 255 256 page = read_swap_cache_async(swap, GFP_HIGHUSER_MOVABLE, 257 NULL, 0, false, &splug); 258 if (page) 259 put_page(page); 260 261 rcu_read_lock(); 262 } 263 rcu_read_unlock(); 264 swap_read_unplug(splug); 265 266 lru_add_drain(); /* Push any new pages onto the LRU now */ 267 } 268 #endif /* CONFIG_SWAP */ 269 270 /* 271 * Schedule all required I/O operations. Do not wait for completion. 272 */ 273 static long madvise_willneed(struct vm_area_struct *vma, 274 struct vm_area_struct **prev, 275 unsigned long start, unsigned long end) 276 { 277 struct mm_struct *mm = vma->vm_mm; 278 struct file *file = vma->vm_file; 279 loff_t offset; 280 281 *prev = vma; 282 #ifdef CONFIG_SWAP 283 if (!file) { 284 walk_page_range(vma->vm_mm, start, end, &swapin_walk_ops, vma); 285 lru_add_drain(); /* Push any new pages onto the LRU now */ 286 return 0; 287 } 288 289 if (shmem_mapping(file->f_mapping)) { 290 force_shm_swapin_readahead(vma, start, end, 291 file->f_mapping); 292 return 0; 293 } 294 #else 295 if (!file) 296 return -EBADF; 297 #endif 298 299 if (IS_DAX(file_inode(file))) { 300 /* no bad return value, but ignore advice */ 301 return 0; 302 } 303 304 /* 305 * Filesystem's fadvise may need to take various locks. We need to 306 * explicitly grab a reference because the vma (and hence the 307 * vma's reference to the file) can go away as soon as we drop 308 * mmap_lock. 309 */ 310 *prev = NULL; /* tell sys_madvise we drop mmap_lock */ 311 get_file(file); 312 offset = (loff_t)(start - vma->vm_start) 313 + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); 314 mmap_read_unlock(mm); 315 vfs_fadvise(file, offset, end - start, POSIX_FADV_WILLNEED); 316 fput(file); 317 mmap_read_lock(mm); 318 return 0; 319 } 320 321 static int madvise_cold_or_pageout_pte_range(pmd_t *pmd, 322 unsigned long addr, unsigned long end, 323 struct mm_walk *walk) 324 { 325 struct madvise_walk_private *private = walk->private; 326 struct mmu_gather *tlb = private->tlb; 327 bool pageout = private->pageout; 328 struct mm_struct *mm = tlb->mm; 329 struct vm_area_struct *vma = walk->vma; 330 pte_t *orig_pte, *pte, ptent; 331 spinlock_t *ptl; 332 struct page *page = NULL; 333 LIST_HEAD(page_list); 334 335 if (fatal_signal_pending(current)) 336 return -EINTR; 337 338 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 339 if (pmd_trans_huge(*pmd)) { 340 pmd_t orig_pmd; 341 unsigned long next = pmd_addr_end(addr, end); 342 343 tlb_change_page_size(tlb, HPAGE_PMD_SIZE); 344 ptl = pmd_trans_huge_lock(pmd, vma); 345 if (!ptl) 346 return 0; 347 348 orig_pmd = *pmd; 349 if (is_huge_zero_pmd(orig_pmd)) 350 goto huge_unlock; 351 352 if (unlikely(!pmd_present(orig_pmd))) { 353 VM_BUG_ON(thp_migration_supported() && 354 !is_pmd_migration_entry(orig_pmd)); 355 goto huge_unlock; 356 } 357 358 page = pmd_page(orig_pmd); 359 360 /* Do not interfere with other mappings of this page */ 361 if (page_mapcount(page) != 1) 362 goto huge_unlock; 363 364 if (next - addr != HPAGE_PMD_SIZE) { 365 int err; 366 367 get_page(page); 368 spin_unlock(ptl); 369 lock_page(page); 370 err = split_huge_page(page); 371 unlock_page(page); 372 put_page(page); 373 if (!err) 374 goto regular_page; 375 return 0; 376 } 377 378 if (pmd_young(orig_pmd)) { 379 pmdp_invalidate(vma, addr, pmd); 380 orig_pmd = pmd_mkold(orig_pmd); 381 382 set_pmd_at(mm, addr, pmd, orig_pmd); 383 tlb_remove_pmd_tlb_entry(tlb, pmd, addr); 384 } 385 386 ClearPageReferenced(page); 387 test_and_clear_page_young(page); 388 if (pageout) { 389 if (!isolate_lru_page(page)) { 390 if (PageUnevictable(page)) 391 putback_lru_page(page); 392 else 393 list_add(&page->lru, &page_list); 394 } 395 } else 396 deactivate_page(page); 397 huge_unlock: 398 spin_unlock(ptl); 399 if (pageout) 400 reclaim_pages(&page_list); 401 return 0; 402 } 403 404 regular_page: 405 if (pmd_trans_unstable(pmd)) 406 return 0; 407 #endif 408 tlb_change_page_size(tlb, PAGE_SIZE); 409 orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 410 flush_tlb_batched_pending(mm); 411 arch_enter_lazy_mmu_mode(); 412 for (; addr < end; pte++, addr += PAGE_SIZE) { 413 ptent = *pte; 414 415 if (pte_none(ptent)) 416 continue; 417 418 if (!pte_present(ptent)) 419 continue; 420 421 page = vm_normal_page(vma, addr, ptent); 422 if (!page || is_zone_device_page(page)) 423 continue; 424 425 /* 426 * Creating a THP page is expensive so split it only if we 427 * are sure it's worth. Split it if we are only owner. 428 */ 429 if (PageTransCompound(page)) { 430 if (page_mapcount(page) != 1) 431 break; 432 get_page(page); 433 if (!trylock_page(page)) { 434 put_page(page); 435 break; 436 } 437 pte_unmap_unlock(orig_pte, ptl); 438 if (split_huge_page(page)) { 439 unlock_page(page); 440 put_page(page); 441 orig_pte = pte_offset_map_lock(mm, pmd, addr, &ptl); 442 break; 443 } 444 unlock_page(page); 445 put_page(page); 446 orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl); 447 pte--; 448 addr -= PAGE_SIZE; 449 continue; 450 } 451 452 /* 453 * Do not interfere with other mappings of this page and 454 * non-LRU page. 455 */ 456 if (!PageLRU(page) || page_mapcount(page) != 1) 457 continue; 458 459 VM_BUG_ON_PAGE(PageTransCompound(page), page); 460 461 if (pte_young(ptent)) { 462 ptent = ptep_get_and_clear_full(mm, addr, pte, 463 tlb->fullmm); 464 ptent = pte_mkold(ptent); 465 set_pte_at(mm, addr, pte, ptent); 466 tlb_remove_tlb_entry(tlb, pte, addr); 467 } 468 469 /* 470 * We are deactivating a page for accelerating reclaiming. 471 * VM couldn't reclaim the page unless we clear PG_young. 472 * As a side effect, it makes confuse idle-page tracking 473 * because they will miss recent referenced history. 474 */ 475 ClearPageReferenced(page); 476 test_and_clear_page_young(page); 477 if (pageout) { 478 if (!isolate_lru_page(page)) { 479 if (PageUnevictable(page)) 480 putback_lru_page(page); 481 else 482 list_add(&page->lru, &page_list); 483 } 484 } else 485 deactivate_page(page); 486 } 487 488 arch_leave_lazy_mmu_mode(); 489 pte_unmap_unlock(orig_pte, ptl); 490 if (pageout) 491 reclaim_pages(&page_list); 492 cond_resched(); 493 494 return 0; 495 } 496 497 static const struct mm_walk_ops cold_walk_ops = { 498 .pmd_entry = madvise_cold_or_pageout_pte_range, 499 }; 500 501 static void madvise_cold_page_range(struct mmu_gather *tlb, 502 struct vm_area_struct *vma, 503 unsigned long addr, unsigned long end) 504 { 505 struct madvise_walk_private walk_private = { 506 .pageout = false, 507 .tlb = tlb, 508 }; 509 510 tlb_start_vma(tlb, vma); 511 walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private); 512 tlb_end_vma(tlb, vma); 513 } 514 515 static inline bool can_madv_lru_vma(struct vm_area_struct *vma) 516 { 517 return !(vma->vm_flags & (VM_LOCKED|VM_PFNMAP|VM_HUGETLB)); 518 } 519 520 static long madvise_cold(struct vm_area_struct *vma, 521 struct vm_area_struct **prev, 522 unsigned long start_addr, unsigned long end_addr) 523 { 524 struct mm_struct *mm = vma->vm_mm; 525 struct mmu_gather tlb; 526 527 *prev = vma; 528 if (!can_madv_lru_vma(vma)) 529 return -EINVAL; 530 531 lru_add_drain(); 532 tlb_gather_mmu(&tlb, mm); 533 madvise_cold_page_range(&tlb, vma, start_addr, end_addr); 534 tlb_finish_mmu(&tlb); 535 536 return 0; 537 } 538 539 static void madvise_pageout_page_range(struct mmu_gather *tlb, 540 struct vm_area_struct *vma, 541 unsigned long addr, unsigned long end) 542 { 543 struct madvise_walk_private walk_private = { 544 .pageout = true, 545 .tlb = tlb, 546 }; 547 548 tlb_start_vma(tlb, vma); 549 walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private); 550 tlb_end_vma(tlb, vma); 551 } 552 553 static inline bool can_do_pageout(struct vm_area_struct *vma) 554 { 555 if (vma_is_anonymous(vma)) 556 return true; 557 if (!vma->vm_file) 558 return false; 559 /* 560 * paging out pagecache only for non-anonymous mappings that correspond 561 * to the files the calling process could (if tried) open for writing; 562 * otherwise we'd be including shared non-exclusive mappings, which 563 * opens a side channel. 564 */ 565 return inode_owner_or_capable(&init_user_ns, 566 file_inode(vma->vm_file)) || 567 file_permission(vma->vm_file, MAY_WRITE) == 0; 568 } 569 570 static long madvise_pageout(struct vm_area_struct *vma, 571 struct vm_area_struct **prev, 572 unsigned long start_addr, unsigned long end_addr) 573 { 574 struct mm_struct *mm = vma->vm_mm; 575 struct mmu_gather tlb; 576 577 *prev = vma; 578 if (!can_madv_lru_vma(vma)) 579 return -EINVAL; 580 581 if (!can_do_pageout(vma)) 582 return 0; 583 584 lru_add_drain(); 585 tlb_gather_mmu(&tlb, mm); 586 madvise_pageout_page_range(&tlb, vma, start_addr, end_addr); 587 tlb_finish_mmu(&tlb); 588 589 return 0; 590 } 591 592 static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr, 593 unsigned long end, struct mm_walk *walk) 594 595 { 596 struct mmu_gather *tlb = walk->private; 597 struct mm_struct *mm = tlb->mm; 598 struct vm_area_struct *vma = walk->vma; 599 spinlock_t *ptl; 600 pte_t *orig_pte, *pte, ptent; 601 struct folio *folio; 602 struct page *page; 603 int nr_swap = 0; 604 unsigned long next; 605 606 next = pmd_addr_end(addr, end); 607 if (pmd_trans_huge(*pmd)) 608 if (madvise_free_huge_pmd(tlb, vma, pmd, addr, next)) 609 goto next; 610 611 if (pmd_trans_unstable(pmd)) 612 return 0; 613 614 tlb_change_page_size(tlb, PAGE_SIZE); 615 orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl); 616 flush_tlb_batched_pending(mm); 617 arch_enter_lazy_mmu_mode(); 618 for (; addr != end; pte++, addr += PAGE_SIZE) { 619 ptent = *pte; 620 621 if (pte_none(ptent)) 622 continue; 623 /* 624 * If the pte has swp_entry, just clear page table to 625 * prevent swap-in which is more expensive rather than 626 * (page allocation + zeroing). 627 */ 628 if (!pte_present(ptent)) { 629 swp_entry_t entry; 630 631 entry = pte_to_swp_entry(ptent); 632 if (!non_swap_entry(entry)) { 633 nr_swap--; 634 free_swap_and_cache(entry); 635 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm); 636 } else if (is_hwpoison_entry(entry) || 637 is_swapin_error_entry(entry)) { 638 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm); 639 } 640 continue; 641 } 642 643 page = vm_normal_page(vma, addr, ptent); 644 if (!page || is_zone_device_page(page)) 645 continue; 646 folio = page_folio(page); 647 648 /* 649 * If pmd isn't transhuge but the folio is large and 650 * is owned by only this process, split it and 651 * deactivate all pages. 652 */ 653 if (folio_test_large(folio)) { 654 if (folio_mapcount(folio) != 1) 655 goto out; 656 folio_get(folio); 657 if (!folio_trylock(folio)) { 658 folio_put(folio); 659 goto out; 660 } 661 pte_unmap_unlock(orig_pte, ptl); 662 if (split_folio(folio)) { 663 folio_unlock(folio); 664 folio_put(folio); 665 orig_pte = pte_offset_map_lock(mm, pmd, addr, &ptl); 666 goto out; 667 } 668 folio_unlock(folio); 669 folio_put(folio); 670 orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl); 671 pte--; 672 addr -= PAGE_SIZE; 673 continue; 674 } 675 676 if (folio_test_swapcache(folio) || folio_test_dirty(folio)) { 677 if (!folio_trylock(folio)) 678 continue; 679 /* 680 * If folio is shared with others, we mustn't clear 681 * the folio's dirty flag. 682 */ 683 if (folio_mapcount(folio) != 1) { 684 folio_unlock(folio); 685 continue; 686 } 687 688 if (folio_test_swapcache(folio) && 689 !folio_free_swap(folio)) { 690 folio_unlock(folio); 691 continue; 692 } 693 694 folio_clear_dirty(folio); 695 folio_unlock(folio); 696 } 697 698 if (pte_young(ptent) || pte_dirty(ptent)) { 699 /* 700 * Some of architecture(ex, PPC) don't update TLB 701 * with set_pte_at and tlb_remove_tlb_entry so for 702 * the portability, remap the pte with old|clean 703 * after pte clearing. 704 */ 705 ptent = ptep_get_and_clear_full(mm, addr, pte, 706 tlb->fullmm); 707 708 ptent = pte_mkold(ptent); 709 ptent = pte_mkclean(ptent); 710 set_pte_at(mm, addr, pte, ptent); 711 tlb_remove_tlb_entry(tlb, pte, addr); 712 } 713 mark_page_lazyfree(&folio->page); 714 } 715 out: 716 if (nr_swap) { 717 if (current->mm == mm) 718 sync_mm_rss(mm); 719 720 add_mm_counter(mm, MM_SWAPENTS, nr_swap); 721 } 722 arch_leave_lazy_mmu_mode(); 723 pte_unmap_unlock(orig_pte, ptl); 724 cond_resched(); 725 next: 726 return 0; 727 } 728 729 static const struct mm_walk_ops madvise_free_walk_ops = { 730 .pmd_entry = madvise_free_pte_range, 731 }; 732 733 static int madvise_free_single_vma(struct vm_area_struct *vma, 734 unsigned long start_addr, unsigned long end_addr) 735 { 736 struct mm_struct *mm = vma->vm_mm; 737 struct mmu_notifier_range range; 738 struct mmu_gather tlb; 739 740 /* MADV_FREE works for only anon vma at the moment */ 741 if (!vma_is_anonymous(vma)) 742 return -EINVAL; 743 744 range.start = max(vma->vm_start, start_addr); 745 if (range.start >= vma->vm_end) 746 return -EINVAL; 747 range.end = min(vma->vm_end, end_addr); 748 if (range.end <= vma->vm_start) 749 return -EINVAL; 750 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, 751 range.start, range.end); 752 753 lru_add_drain(); 754 tlb_gather_mmu(&tlb, mm); 755 update_hiwater_rss(mm); 756 757 mmu_notifier_invalidate_range_start(&range); 758 tlb_start_vma(&tlb, vma); 759 walk_page_range(vma->vm_mm, range.start, range.end, 760 &madvise_free_walk_ops, &tlb); 761 tlb_end_vma(&tlb, vma); 762 mmu_notifier_invalidate_range_end(&range); 763 tlb_finish_mmu(&tlb); 764 765 return 0; 766 } 767 768 /* 769 * Application no longer needs these pages. If the pages are dirty, 770 * it's OK to just throw them away. The app will be more careful about 771 * data it wants to keep. Be sure to free swap resources too. The 772 * zap_page_range_single call sets things up for shrink_active_list to actually 773 * free these pages later if no one else has touched them in the meantime, 774 * although we could add these pages to a global reuse list for 775 * shrink_active_list to pick up before reclaiming other pages. 776 * 777 * NB: This interface discards data rather than pushes it out to swap, 778 * as some implementations do. This has performance implications for 779 * applications like large transactional databases which want to discard 780 * pages in anonymous maps after committing to backing store the data 781 * that was kept in them. There is no reason to write this data out to 782 * the swap area if the application is discarding it. 783 * 784 * An interface that causes the system to free clean pages and flush 785 * dirty pages is already available as msync(MS_INVALIDATE). 786 */ 787 static long madvise_dontneed_single_vma(struct vm_area_struct *vma, 788 unsigned long start, unsigned long end) 789 { 790 zap_page_range_single(vma, start, end - start, NULL); 791 return 0; 792 } 793 794 static bool madvise_dontneed_free_valid_vma(struct vm_area_struct *vma, 795 unsigned long start, 796 unsigned long *end, 797 int behavior) 798 { 799 if (!is_vm_hugetlb_page(vma)) { 800 unsigned int forbidden = VM_PFNMAP; 801 802 if (behavior != MADV_DONTNEED_LOCKED) 803 forbidden |= VM_LOCKED; 804 805 return !(vma->vm_flags & forbidden); 806 } 807 808 if (behavior != MADV_DONTNEED && behavior != MADV_DONTNEED_LOCKED) 809 return false; 810 if (start & ~huge_page_mask(hstate_vma(vma))) 811 return false; 812 813 /* 814 * Madvise callers expect the length to be rounded up to PAGE_SIZE 815 * boundaries, and may be unaware that this VMA uses huge pages. 816 * Avoid unexpected data loss by rounding down the number of 817 * huge pages freed. 818 */ 819 *end = ALIGN_DOWN(*end, huge_page_size(hstate_vma(vma))); 820 821 return true; 822 } 823 824 static long madvise_dontneed_free(struct vm_area_struct *vma, 825 struct vm_area_struct **prev, 826 unsigned long start, unsigned long end, 827 int behavior) 828 { 829 struct mm_struct *mm = vma->vm_mm; 830 831 *prev = vma; 832 if (!madvise_dontneed_free_valid_vma(vma, start, &end, behavior)) 833 return -EINVAL; 834 835 if (start == end) 836 return 0; 837 838 if (!userfaultfd_remove(vma, start, end)) { 839 *prev = NULL; /* mmap_lock has been dropped, prev is stale */ 840 841 mmap_read_lock(mm); 842 vma = find_vma(mm, start); 843 if (!vma) 844 return -ENOMEM; 845 if (start < vma->vm_start) { 846 /* 847 * This "vma" under revalidation is the one 848 * with the lowest vma->vm_start where start 849 * is also < vma->vm_end. If start < 850 * vma->vm_start it means an hole materialized 851 * in the user address space within the 852 * virtual range passed to MADV_DONTNEED 853 * or MADV_FREE. 854 */ 855 return -ENOMEM; 856 } 857 /* 858 * Potential end adjustment for hugetlb vma is OK as 859 * the check below keeps end within vma. 860 */ 861 if (!madvise_dontneed_free_valid_vma(vma, start, &end, 862 behavior)) 863 return -EINVAL; 864 if (end > vma->vm_end) { 865 /* 866 * Don't fail if end > vma->vm_end. If the old 867 * vma was split while the mmap_lock was 868 * released the effect of the concurrent 869 * operation may not cause madvise() to 870 * have an undefined result. There may be an 871 * adjacent next vma that we'll walk 872 * next. userfaultfd_remove() will generate an 873 * UFFD_EVENT_REMOVE repetition on the 874 * end-vma->vm_end range, but the manager can 875 * handle a repetition fine. 876 */ 877 end = vma->vm_end; 878 } 879 VM_WARN_ON(start >= end); 880 } 881 882 if (behavior == MADV_DONTNEED || behavior == MADV_DONTNEED_LOCKED) 883 return madvise_dontneed_single_vma(vma, start, end); 884 else if (behavior == MADV_FREE) 885 return madvise_free_single_vma(vma, start, end); 886 else 887 return -EINVAL; 888 } 889 890 static long madvise_populate(struct vm_area_struct *vma, 891 struct vm_area_struct **prev, 892 unsigned long start, unsigned long end, 893 int behavior) 894 { 895 const bool write = behavior == MADV_POPULATE_WRITE; 896 struct mm_struct *mm = vma->vm_mm; 897 unsigned long tmp_end; 898 int locked = 1; 899 long pages; 900 901 *prev = vma; 902 903 while (start < end) { 904 /* 905 * We might have temporarily dropped the lock. For example, 906 * our VMA might have been split. 907 */ 908 if (!vma || start >= vma->vm_end) { 909 vma = vma_lookup(mm, start); 910 if (!vma) 911 return -ENOMEM; 912 } 913 914 tmp_end = min_t(unsigned long, end, vma->vm_end); 915 /* Populate (prefault) page tables readable/writable. */ 916 pages = faultin_vma_page_range(vma, start, tmp_end, write, 917 &locked); 918 if (!locked) { 919 mmap_read_lock(mm); 920 locked = 1; 921 *prev = NULL; 922 vma = NULL; 923 } 924 if (pages < 0) { 925 switch (pages) { 926 case -EINTR: 927 return -EINTR; 928 case -EINVAL: /* Incompatible mappings / permissions. */ 929 return -EINVAL; 930 case -EHWPOISON: 931 return -EHWPOISON; 932 case -EFAULT: /* VM_FAULT_SIGBUS or VM_FAULT_SIGSEGV */ 933 return -EFAULT; 934 default: 935 pr_warn_once("%s: unhandled return value: %ld\n", 936 __func__, pages); 937 fallthrough; 938 case -ENOMEM: 939 return -ENOMEM; 940 } 941 } 942 start += pages * PAGE_SIZE; 943 } 944 return 0; 945 } 946 947 /* 948 * Application wants to free up the pages and associated backing store. 949 * This is effectively punching a hole into the middle of a file. 950 */ 951 static long madvise_remove(struct vm_area_struct *vma, 952 struct vm_area_struct **prev, 953 unsigned long start, unsigned long end) 954 { 955 loff_t offset; 956 int error; 957 struct file *f; 958 struct mm_struct *mm = vma->vm_mm; 959 960 *prev = NULL; /* tell sys_madvise we drop mmap_lock */ 961 962 if (vma->vm_flags & VM_LOCKED) 963 return -EINVAL; 964 965 f = vma->vm_file; 966 967 if (!f || !f->f_mapping || !f->f_mapping->host) { 968 return -EINVAL; 969 } 970 971 if ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE)) 972 return -EACCES; 973 974 offset = (loff_t)(start - vma->vm_start) 975 + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); 976 977 /* 978 * Filesystem's fallocate may need to take i_rwsem. We need to 979 * explicitly grab a reference because the vma (and hence the 980 * vma's reference to the file) can go away as soon as we drop 981 * mmap_lock. 982 */ 983 get_file(f); 984 if (userfaultfd_remove(vma, start, end)) { 985 /* mmap_lock was not released by userfaultfd_remove() */ 986 mmap_read_unlock(mm); 987 } 988 error = vfs_fallocate(f, 989 FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, 990 offset, end - start); 991 fput(f); 992 mmap_read_lock(mm); 993 return error; 994 } 995 996 /* 997 * Apply an madvise behavior to a region of a vma. madvise_update_vma 998 * will handle splitting a vm area into separate areas, each area with its own 999 * behavior. 1000 */ 1001 static int madvise_vma_behavior(struct vm_area_struct *vma, 1002 struct vm_area_struct **prev, 1003 unsigned long start, unsigned long end, 1004 unsigned long behavior) 1005 { 1006 int error; 1007 struct anon_vma_name *anon_name; 1008 unsigned long new_flags = vma->vm_flags; 1009 1010 switch (behavior) { 1011 case MADV_REMOVE: 1012 return madvise_remove(vma, prev, start, end); 1013 case MADV_WILLNEED: 1014 return madvise_willneed(vma, prev, start, end); 1015 case MADV_COLD: 1016 return madvise_cold(vma, prev, start, end); 1017 case MADV_PAGEOUT: 1018 return madvise_pageout(vma, prev, start, end); 1019 case MADV_FREE: 1020 case MADV_DONTNEED: 1021 case MADV_DONTNEED_LOCKED: 1022 return madvise_dontneed_free(vma, prev, start, end, behavior); 1023 case MADV_POPULATE_READ: 1024 case MADV_POPULATE_WRITE: 1025 return madvise_populate(vma, prev, start, end, behavior); 1026 case MADV_NORMAL: 1027 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ; 1028 break; 1029 case MADV_SEQUENTIAL: 1030 new_flags = (new_flags & ~VM_RAND_READ) | VM_SEQ_READ; 1031 break; 1032 case MADV_RANDOM: 1033 new_flags = (new_flags & ~VM_SEQ_READ) | VM_RAND_READ; 1034 break; 1035 case MADV_DONTFORK: 1036 new_flags |= VM_DONTCOPY; 1037 break; 1038 case MADV_DOFORK: 1039 if (vma->vm_flags & VM_IO) 1040 return -EINVAL; 1041 new_flags &= ~VM_DONTCOPY; 1042 break; 1043 case MADV_WIPEONFORK: 1044 /* MADV_WIPEONFORK is only supported on anonymous memory. */ 1045 if (vma->vm_file || vma->vm_flags & VM_SHARED) 1046 return -EINVAL; 1047 new_flags |= VM_WIPEONFORK; 1048 break; 1049 case MADV_KEEPONFORK: 1050 new_flags &= ~VM_WIPEONFORK; 1051 break; 1052 case MADV_DONTDUMP: 1053 new_flags |= VM_DONTDUMP; 1054 break; 1055 case MADV_DODUMP: 1056 if (!is_vm_hugetlb_page(vma) && new_flags & VM_SPECIAL) 1057 return -EINVAL; 1058 new_flags &= ~VM_DONTDUMP; 1059 break; 1060 case MADV_MERGEABLE: 1061 case MADV_UNMERGEABLE: 1062 error = ksm_madvise(vma, start, end, behavior, &new_flags); 1063 if (error) 1064 goto out; 1065 break; 1066 case MADV_HUGEPAGE: 1067 case MADV_NOHUGEPAGE: 1068 error = hugepage_madvise(vma, &new_flags, behavior); 1069 if (error) 1070 goto out; 1071 break; 1072 case MADV_COLLAPSE: 1073 return madvise_collapse(vma, prev, start, end); 1074 } 1075 1076 anon_name = anon_vma_name(vma); 1077 anon_vma_name_get(anon_name); 1078 error = madvise_update_vma(vma, prev, start, end, new_flags, 1079 anon_name); 1080 anon_vma_name_put(anon_name); 1081 1082 out: 1083 /* 1084 * madvise() returns EAGAIN if kernel resources, such as 1085 * slab, are temporarily unavailable. 1086 */ 1087 if (error == -ENOMEM) 1088 error = -EAGAIN; 1089 return error; 1090 } 1091 1092 #ifdef CONFIG_MEMORY_FAILURE 1093 /* 1094 * Error injection support for memory error handling. 1095 */ 1096 static int madvise_inject_error(int behavior, 1097 unsigned long start, unsigned long end) 1098 { 1099 unsigned long size; 1100 1101 if (!capable(CAP_SYS_ADMIN)) 1102 return -EPERM; 1103 1104 1105 for (; start < end; start += size) { 1106 unsigned long pfn; 1107 struct page *page; 1108 int ret; 1109 1110 ret = get_user_pages_fast(start, 1, 0, &page); 1111 if (ret != 1) 1112 return ret; 1113 pfn = page_to_pfn(page); 1114 1115 /* 1116 * When soft offlining hugepages, after migrating the page 1117 * we dissolve it, therefore in the second loop "page" will 1118 * no longer be a compound page. 1119 */ 1120 size = page_size(compound_head(page)); 1121 1122 if (behavior == MADV_SOFT_OFFLINE) { 1123 pr_info("Soft offlining pfn %#lx at process virtual address %#lx\n", 1124 pfn, start); 1125 ret = soft_offline_page(pfn, MF_COUNT_INCREASED); 1126 } else { 1127 pr_info("Injecting memory failure for pfn %#lx at process virtual address %#lx\n", 1128 pfn, start); 1129 ret = memory_failure(pfn, MF_COUNT_INCREASED | MF_SW_SIMULATED); 1130 if (ret == -EOPNOTSUPP) 1131 ret = 0; 1132 } 1133 1134 if (ret) 1135 return ret; 1136 } 1137 1138 return 0; 1139 } 1140 #endif 1141 1142 static bool 1143 madvise_behavior_valid(int behavior) 1144 { 1145 switch (behavior) { 1146 case MADV_DOFORK: 1147 case MADV_DONTFORK: 1148 case MADV_NORMAL: 1149 case MADV_SEQUENTIAL: 1150 case MADV_RANDOM: 1151 case MADV_REMOVE: 1152 case MADV_WILLNEED: 1153 case MADV_DONTNEED: 1154 case MADV_DONTNEED_LOCKED: 1155 case MADV_FREE: 1156 case MADV_COLD: 1157 case MADV_PAGEOUT: 1158 case MADV_POPULATE_READ: 1159 case MADV_POPULATE_WRITE: 1160 #ifdef CONFIG_KSM 1161 case MADV_MERGEABLE: 1162 case MADV_UNMERGEABLE: 1163 #endif 1164 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1165 case MADV_HUGEPAGE: 1166 case MADV_NOHUGEPAGE: 1167 case MADV_COLLAPSE: 1168 #endif 1169 case MADV_DONTDUMP: 1170 case MADV_DODUMP: 1171 case MADV_WIPEONFORK: 1172 case MADV_KEEPONFORK: 1173 #ifdef CONFIG_MEMORY_FAILURE 1174 case MADV_SOFT_OFFLINE: 1175 case MADV_HWPOISON: 1176 #endif 1177 return true; 1178 1179 default: 1180 return false; 1181 } 1182 } 1183 1184 static bool process_madvise_behavior_valid(int behavior) 1185 { 1186 switch (behavior) { 1187 case MADV_COLD: 1188 case MADV_PAGEOUT: 1189 case MADV_WILLNEED: 1190 case MADV_COLLAPSE: 1191 return true; 1192 default: 1193 return false; 1194 } 1195 } 1196 1197 /* 1198 * Walk the vmas in range [start,end), and call the visit function on each one. 1199 * The visit function will get start and end parameters that cover the overlap 1200 * between the current vma and the original range. Any unmapped regions in the 1201 * original range will result in this function returning -ENOMEM while still 1202 * calling the visit function on all of the existing vmas in the range. 1203 * Must be called with the mmap_lock held for reading or writing. 1204 */ 1205 static 1206 int madvise_walk_vmas(struct mm_struct *mm, unsigned long start, 1207 unsigned long end, unsigned long arg, 1208 int (*visit)(struct vm_area_struct *vma, 1209 struct vm_area_struct **prev, unsigned long start, 1210 unsigned long end, unsigned long arg)) 1211 { 1212 struct vm_area_struct *vma; 1213 struct vm_area_struct *prev; 1214 unsigned long tmp; 1215 int unmapped_error = 0; 1216 1217 /* 1218 * If the interval [start,end) covers some unmapped address 1219 * ranges, just ignore them, but return -ENOMEM at the end. 1220 * - different from the way of handling in mlock etc. 1221 */ 1222 vma = find_vma_prev(mm, start, &prev); 1223 if (vma && start > vma->vm_start) 1224 prev = vma; 1225 1226 for (;;) { 1227 int error; 1228 1229 /* Still start < end. */ 1230 if (!vma) 1231 return -ENOMEM; 1232 1233 /* Here start < (end|vma->vm_end). */ 1234 if (start < vma->vm_start) { 1235 unmapped_error = -ENOMEM; 1236 start = vma->vm_start; 1237 if (start >= end) 1238 break; 1239 } 1240 1241 /* Here vma->vm_start <= start < (end|vma->vm_end) */ 1242 tmp = vma->vm_end; 1243 if (end < tmp) 1244 tmp = end; 1245 1246 /* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */ 1247 error = visit(vma, &prev, start, tmp, arg); 1248 if (error) 1249 return error; 1250 start = tmp; 1251 if (prev && start < prev->vm_end) 1252 start = prev->vm_end; 1253 if (start >= end) 1254 break; 1255 if (prev) 1256 vma = find_vma(mm, prev->vm_end); 1257 else /* madvise_remove dropped mmap_lock */ 1258 vma = find_vma(mm, start); 1259 } 1260 1261 return unmapped_error; 1262 } 1263 1264 #ifdef CONFIG_ANON_VMA_NAME 1265 static int madvise_vma_anon_name(struct vm_area_struct *vma, 1266 struct vm_area_struct **prev, 1267 unsigned long start, unsigned long end, 1268 unsigned long anon_name) 1269 { 1270 int error; 1271 1272 /* Only anonymous mappings can be named */ 1273 if (vma->vm_file && !vma_is_anon_shmem(vma)) 1274 return -EBADF; 1275 1276 error = madvise_update_vma(vma, prev, start, end, vma->vm_flags, 1277 (struct anon_vma_name *)anon_name); 1278 1279 /* 1280 * madvise() returns EAGAIN if kernel resources, such as 1281 * slab, are temporarily unavailable. 1282 */ 1283 if (error == -ENOMEM) 1284 error = -EAGAIN; 1285 return error; 1286 } 1287 1288 int madvise_set_anon_name(struct mm_struct *mm, unsigned long start, 1289 unsigned long len_in, struct anon_vma_name *anon_name) 1290 { 1291 unsigned long end; 1292 unsigned long len; 1293 1294 if (start & ~PAGE_MASK) 1295 return -EINVAL; 1296 len = (len_in + ~PAGE_MASK) & PAGE_MASK; 1297 1298 /* Check to see whether len was rounded up from small -ve to zero */ 1299 if (len_in && !len) 1300 return -EINVAL; 1301 1302 end = start + len; 1303 if (end < start) 1304 return -EINVAL; 1305 1306 if (end == start) 1307 return 0; 1308 1309 return madvise_walk_vmas(mm, start, end, (unsigned long)anon_name, 1310 madvise_vma_anon_name); 1311 } 1312 #endif /* CONFIG_ANON_VMA_NAME */ 1313 /* 1314 * The madvise(2) system call. 1315 * 1316 * Applications can use madvise() to advise the kernel how it should 1317 * handle paging I/O in this VM area. The idea is to help the kernel 1318 * use appropriate read-ahead and caching techniques. The information 1319 * provided is advisory only, and can be safely disregarded by the 1320 * kernel without affecting the correct operation of the application. 1321 * 1322 * behavior values: 1323 * MADV_NORMAL - the default behavior is to read clusters. This 1324 * results in some read-ahead and read-behind. 1325 * MADV_RANDOM - the system should read the minimum amount of data 1326 * on any access, since it is unlikely that the appli- 1327 * cation will need more than what it asks for. 1328 * MADV_SEQUENTIAL - pages in the given range will probably be accessed 1329 * once, so they can be aggressively read ahead, and 1330 * can be freed soon after they are accessed. 1331 * MADV_WILLNEED - the application is notifying the system to read 1332 * some pages ahead. 1333 * MADV_DONTNEED - the application is finished with the given range, 1334 * so the kernel can free resources associated with it. 1335 * MADV_FREE - the application marks pages in the given range as lazy free, 1336 * where actual purges are postponed until memory pressure happens. 1337 * MADV_REMOVE - the application wants to free up the given range of 1338 * pages and associated backing store. 1339 * MADV_DONTFORK - omit this area from child's address space when forking: 1340 * typically, to avoid COWing pages pinned by get_user_pages(). 1341 * MADV_DOFORK - cancel MADV_DONTFORK: no longer omit this area when forking. 1342 * MADV_WIPEONFORK - present the child process with zero-filled memory in this 1343 * range after a fork. 1344 * MADV_KEEPONFORK - undo the effect of MADV_WIPEONFORK 1345 * MADV_HWPOISON - trigger memory error handler as if the given memory range 1346 * were corrupted by unrecoverable hardware memory failure. 1347 * MADV_SOFT_OFFLINE - try to soft-offline the given range of memory. 1348 * MADV_MERGEABLE - the application recommends that KSM try to merge pages in 1349 * this area with pages of identical content from other such areas. 1350 * MADV_UNMERGEABLE- cancel MADV_MERGEABLE: no longer merge pages with others. 1351 * MADV_HUGEPAGE - the application wants to back the given range by transparent 1352 * huge pages in the future. Existing pages might be coalesced and 1353 * new pages might be allocated as THP. 1354 * MADV_NOHUGEPAGE - mark the given range as not worth being backed by 1355 * transparent huge pages so the existing pages will not be 1356 * coalesced into THP and new pages will not be allocated as THP. 1357 * MADV_COLLAPSE - synchronously coalesce pages into new THP. 1358 * MADV_DONTDUMP - the application wants to prevent pages in the given range 1359 * from being included in its core dump. 1360 * MADV_DODUMP - cancel MADV_DONTDUMP: no longer exclude from core dump. 1361 * MADV_COLD - the application is not expected to use this memory soon, 1362 * deactivate pages in this range so that they can be reclaimed 1363 * easily if memory pressure happens. 1364 * MADV_PAGEOUT - the application is not expected to use this memory soon, 1365 * page out the pages in this range immediately. 1366 * MADV_POPULATE_READ - populate (prefault) page tables readable by 1367 * triggering read faults if required 1368 * MADV_POPULATE_WRITE - populate (prefault) page tables writable by 1369 * triggering write faults if required 1370 * 1371 * return values: 1372 * zero - success 1373 * -EINVAL - start + len < 0, start is not page-aligned, 1374 * "behavior" is not a valid value, or application 1375 * is attempting to release locked or shared pages, 1376 * or the specified address range includes file, Huge TLB, 1377 * MAP_SHARED or VMPFNMAP range. 1378 * -ENOMEM - addresses in the specified range are not currently 1379 * mapped, or are outside the AS of the process. 1380 * -EIO - an I/O error occurred while paging in data. 1381 * -EBADF - map exists, but area maps something that isn't a file. 1382 * -EAGAIN - a kernel resource was temporarily unavailable. 1383 */ 1384 int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int behavior) 1385 { 1386 unsigned long end; 1387 int error; 1388 int write; 1389 size_t len; 1390 struct blk_plug plug; 1391 1392 start = untagged_addr(start); 1393 1394 if (!madvise_behavior_valid(behavior)) 1395 return -EINVAL; 1396 1397 if (!PAGE_ALIGNED(start)) 1398 return -EINVAL; 1399 len = PAGE_ALIGN(len_in); 1400 1401 /* Check to see whether len was rounded up from small -ve to zero */ 1402 if (len_in && !len) 1403 return -EINVAL; 1404 1405 end = start + len; 1406 if (end < start) 1407 return -EINVAL; 1408 1409 if (end == start) 1410 return 0; 1411 1412 #ifdef CONFIG_MEMORY_FAILURE 1413 if (behavior == MADV_HWPOISON || behavior == MADV_SOFT_OFFLINE) 1414 return madvise_inject_error(behavior, start, start + len_in); 1415 #endif 1416 1417 write = madvise_need_mmap_write(behavior); 1418 if (write) { 1419 if (mmap_write_lock_killable(mm)) 1420 return -EINTR; 1421 } else { 1422 mmap_read_lock(mm); 1423 } 1424 1425 blk_start_plug(&plug); 1426 error = madvise_walk_vmas(mm, start, end, behavior, 1427 madvise_vma_behavior); 1428 blk_finish_plug(&plug); 1429 if (write) 1430 mmap_write_unlock(mm); 1431 else 1432 mmap_read_unlock(mm); 1433 1434 return error; 1435 } 1436 1437 SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior) 1438 { 1439 return do_madvise(current->mm, start, len_in, behavior); 1440 } 1441 1442 SYSCALL_DEFINE5(process_madvise, int, pidfd, const struct iovec __user *, vec, 1443 size_t, vlen, int, behavior, unsigned int, flags) 1444 { 1445 ssize_t ret; 1446 struct iovec iovstack[UIO_FASTIOV], iovec; 1447 struct iovec *iov = iovstack; 1448 struct iov_iter iter; 1449 struct task_struct *task; 1450 struct mm_struct *mm; 1451 size_t total_len; 1452 unsigned int f_flags; 1453 1454 if (flags != 0) { 1455 ret = -EINVAL; 1456 goto out; 1457 } 1458 1459 ret = import_iovec(READ, vec, vlen, ARRAY_SIZE(iovstack), &iov, &iter); 1460 if (ret < 0) 1461 goto out; 1462 1463 task = pidfd_get_task(pidfd, &f_flags); 1464 if (IS_ERR(task)) { 1465 ret = PTR_ERR(task); 1466 goto free_iov; 1467 } 1468 1469 if (!process_madvise_behavior_valid(behavior)) { 1470 ret = -EINVAL; 1471 goto release_task; 1472 } 1473 1474 /* Require PTRACE_MODE_READ to avoid leaking ASLR metadata. */ 1475 mm = mm_access(task, PTRACE_MODE_READ_FSCREDS); 1476 if (IS_ERR_OR_NULL(mm)) { 1477 ret = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH; 1478 goto release_task; 1479 } 1480 1481 /* 1482 * Require CAP_SYS_NICE for influencing process performance. Note that 1483 * only non-destructive hints are currently supported. 1484 */ 1485 if (!capable(CAP_SYS_NICE)) { 1486 ret = -EPERM; 1487 goto release_mm; 1488 } 1489 1490 total_len = iov_iter_count(&iter); 1491 1492 while (iov_iter_count(&iter)) { 1493 iovec = iov_iter_iovec(&iter); 1494 ret = do_madvise(mm, (unsigned long)iovec.iov_base, 1495 iovec.iov_len, behavior); 1496 if (ret < 0) 1497 break; 1498 iov_iter_advance(&iter, iovec.iov_len); 1499 } 1500 1501 ret = (total_len - iov_iter_count(&iter)) ? : ret; 1502 1503 release_mm: 1504 mmput(mm); 1505 release_task: 1506 put_task_struct(task); 1507 free_iov: 1508 kfree(iov); 1509 out: 1510 return ret; 1511 } 1512