1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/mm/madvise.c 4 * 5 * Copyright (C) 1999 Linus Torvalds 6 * Copyright (C) 2002 Christoph Hellwig 7 */ 8 9 #include <linux/mman.h> 10 #include <linux/pagemap.h> 11 #include <linux/syscalls.h> 12 #include <linux/mempolicy.h> 13 #include <linux/page-isolation.h> 14 #include <linux/page_idle.h> 15 #include <linux/userfaultfd_k.h> 16 #include <linux/hugetlb.h> 17 #include <linux/falloc.h> 18 #include <linux/fadvise.h> 19 #include <linux/sched.h> 20 #include <linux/sched/mm.h> 21 #include <linux/mm_inline.h> 22 #include <linux/string.h> 23 #include <linux/uio.h> 24 #include <linux/ksm.h> 25 #include <linux/fs.h> 26 #include <linux/file.h> 27 #include <linux/blkdev.h> 28 #include <linux/backing-dev.h> 29 #include <linux/pagewalk.h> 30 #include <linux/swap.h> 31 #include <linux/swapops.h> 32 #include <linux/shmem_fs.h> 33 #include <linux/mmu_notifier.h> 34 35 #include <asm/tlb.h> 36 37 #include "internal.h" 38 #include "swap.h" 39 40 struct madvise_walk_private { 41 struct mmu_gather *tlb; 42 bool pageout; 43 }; 44 45 /* 46 * Any behaviour which results in changes to the vma->vm_flags needs to 47 * take mmap_lock for writing. Others, which simply traverse vmas, need 48 * to only take it for reading. 49 */ 50 static int madvise_need_mmap_write(int behavior) 51 { 52 switch (behavior) { 53 case MADV_REMOVE: 54 case MADV_WILLNEED: 55 case MADV_DONTNEED: 56 case MADV_DONTNEED_LOCKED: 57 case MADV_COLD: 58 case MADV_PAGEOUT: 59 case MADV_FREE: 60 case MADV_POPULATE_READ: 61 case MADV_POPULATE_WRITE: 62 case MADV_COLLAPSE: 63 return 0; 64 default: 65 /* be safe, default to 1. list exceptions explicitly */ 66 return 1; 67 } 68 } 69 70 #ifdef CONFIG_ANON_VMA_NAME 71 struct anon_vma_name *anon_vma_name_alloc(const char *name) 72 { 73 struct anon_vma_name *anon_name; 74 size_t count; 75 76 /* Add 1 for NUL terminator at the end of the anon_name->name */ 77 count = strlen(name) + 1; 78 anon_name = kmalloc(struct_size(anon_name, name, count), GFP_KERNEL); 79 if (anon_name) { 80 kref_init(&anon_name->kref); 81 memcpy(anon_name->name, name, count); 82 } 83 84 return anon_name; 85 } 86 87 void anon_vma_name_free(struct kref *kref) 88 { 89 struct anon_vma_name *anon_name = 90 container_of(kref, struct anon_vma_name, kref); 91 kfree(anon_name); 92 } 93 94 struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma) 95 { 96 mmap_assert_locked(vma->vm_mm); 97 98 return vma->anon_name; 99 } 100 101 /* mmap_lock should be write-locked */ 102 static int replace_anon_vma_name(struct vm_area_struct *vma, 103 struct anon_vma_name *anon_name) 104 { 105 struct anon_vma_name *orig_name = anon_vma_name(vma); 106 107 if (!anon_name) { 108 vma->anon_name = NULL; 109 anon_vma_name_put(orig_name); 110 return 0; 111 } 112 113 if (anon_vma_name_eq(orig_name, anon_name)) 114 return 0; 115 116 vma->anon_name = anon_vma_name_reuse(anon_name); 117 anon_vma_name_put(orig_name); 118 119 return 0; 120 } 121 #else /* CONFIG_ANON_VMA_NAME */ 122 static int replace_anon_vma_name(struct vm_area_struct *vma, 123 struct anon_vma_name *anon_name) 124 { 125 if (anon_name) 126 return -EINVAL; 127 128 return 0; 129 } 130 #endif /* CONFIG_ANON_VMA_NAME */ 131 /* 132 * Update the vm_flags on region of a vma, splitting it or merging it as 133 * necessary. Must be called with mmap_lock held for writing; 134 * Caller should ensure anon_name stability by raising its refcount even when 135 * anon_name belongs to a valid vma because this function might free that vma. 136 */ 137 static int madvise_update_vma(struct vm_area_struct *vma, 138 struct vm_area_struct **prev, unsigned long start, 139 unsigned long end, unsigned long new_flags, 140 struct anon_vma_name *anon_name) 141 { 142 struct mm_struct *mm = vma->vm_mm; 143 int error; 144 pgoff_t pgoff; 145 VMA_ITERATOR(vmi, mm, start); 146 147 if (new_flags == vma->vm_flags && anon_vma_name_eq(anon_vma_name(vma), anon_name)) { 148 *prev = vma; 149 return 0; 150 } 151 152 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); 153 *prev = vma_merge(&vmi, mm, *prev, start, end, new_flags, 154 vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma), 155 vma->vm_userfaultfd_ctx, anon_name); 156 if (*prev) { 157 vma = *prev; 158 goto success; 159 } 160 161 *prev = vma; 162 163 if (start != vma->vm_start) { 164 error = split_vma(&vmi, vma, start, 1); 165 if (error) 166 return error; 167 } 168 169 if (end != vma->vm_end) { 170 error = split_vma(&vmi, vma, end, 0); 171 if (error) 172 return error; 173 } 174 175 success: 176 /* vm_flags is protected by the mmap_lock held in write mode. */ 177 vma_start_write(vma); 178 vm_flags_reset(vma, new_flags); 179 if (!vma->vm_file || vma_is_anon_shmem(vma)) { 180 error = replace_anon_vma_name(vma, anon_name); 181 if (error) 182 return error; 183 } 184 185 return 0; 186 } 187 188 #ifdef CONFIG_SWAP 189 static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start, 190 unsigned long end, struct mm_walk *walk) 191 { 192 struct vm_area_struct *vma = walk->private; 193 struct swap_iocb *splug = NULL; 194 pte_t *ptep = NULL; 195 spinlock_t *ptl; 196 unsigned long addr; 197 198 for (addr = start; addr < end; addr += PAGE_SIZE) { 199 pte_t pte; 200 swp_entry_t entry; 201 struct page *page; 202 203 if (!ptep++) { 204 ptep = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 205 if (!ptep) 206 break; 207 } 208 209 pte = ptep_get(ptep); 210 if (!is_swap_pte(pte)) 211 continue; 212 entry = pte_to_swp_entry(pte); 213 if (unlikely(non_swap_entry(entry))) 214 continue; 215 216 pte_unmap_unlock(ptep, ptl); 217 ptep = NULL; 218 219 page = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE, 220 vma, addr, false, &splug); 221 if (page) 222 put_page(page); 223 } 224 225 if (ptep) 226 pte_unmap_unlock(ptep, ptl); 227 swap_read_unplug(splug); 228 cond_resched(); 229 230 return 0; 231 } 232 233 static const struct mm_walk_ops swapin_walk_ops = { 234 .pmd_entry = swapin_walk_pmd_entry, 235 }; 236 237 static void shmem_swapin_range(struct vm_area_struct *vma, 238 unsigned long start, unsigned long end, 239 struct address_space *mapping) 240 { 241 XA_STATE(xas, &mapping->i_pages, linear_page_index(vma, start)); 242 pgoff_t end_index = linear_page_index(vma, end) - 1; 243 struct page *page; 244 struct swap_iocb *splug = NULL; 245 246 rcu_read_lock(); 247 xas_for_each(&xas, page, end_index) { 248 unsigned long addr; 249 swp_entry_t entry; 250 251 if (!xa_is_value(page)) 252 continue; 253 entry = radix_to_swp_entry(page); 254 /* There might be swapin error entries in shmem mapping. */ 255 if (non_swap_entry(entry)) 256 continue; 257 258 addr = vma->vm_start + 259 ((xas.xa_index - vma->vm_pgoff) << PAGE_SHIFT); 260 xas_pause(&xas); 261 rcu_read_unlock(); 262 263 page = read_swap_cache_async(entry, mapping_gfp_mask(mapping), 264 vma, addr, false, &splug); 265 if (page) 266 put_page(page); 267 268 rcu_read_lock(); 269 } 270 rcu_read_unlock(); 271 swap_read_unplug(splug); 272 } 273 #endif /* CONFIG_SWAP */ 274 275 /* 276 * Schedule all required I/O operations. Do not wait for completion. 277 */ 278 static long madvise_willneed(struct vm_area_struct *vma, 279 struct vm_area_struct **prev, 280 unsigned long start, unsigned long end) 281 { 282 struct mm_struct *mm = vma->vm_mm; 283 struct file *file = vma->vm_file; 284 loff_t offset; 285 286 *prev = vma; 287 #ifdef CONFIG_SWAP 288 if (!file) { 289 walk_page_range(vma->vm_mm, start, end, &swapin_walk_ops, vma); 290 lru_add_drain(); /* Push any new pages onto the LRU now */ 291 return 0; 292 } 293 294 if (shmem_mapping(file->f_mapping)) { 295 shmem_swapin_range(vma, start, end, file->f_mapping); 296 lru_add_drain(); /* Push any new pages onto the LRU now */ 297 return 0; 298 } 299 #else 300 if (!file) 301 return -EBADF; 302 #endif 303 304 if (IS_DAX(file_inode(file))) { 305 /* no bad return value, but ignore advice */ 306 return 0; 307 } 308 309 /* 310 * Filesystem's fadvise may need to take various locks. We need to 311 * explicitly grab a reference because the vma (and hence the 312 * vma's reference to the file) can go away as soon as we drop 313 * mmap_lock. 314 */ 315 *prev = NULL; /* tell sys_madvise we drop mmap_lock */ 316 get_file(file); 317 offset = (loff_t)(start - vma->vm_start) 318 + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); 319 mmap_read_unlock(mm); 320 vfs_fadvise(file, offset, end - start, POSIX_FADV_WILLNEED); 321 fput(file); 322 mmap_read_lock(mm); 323 return 0; 324 } 325 326 static inline bool can_do_file_pageout(struct vm_area_struct *vma) 327 { 328 if (!vma->vm_file) 329 return false; 330 /* 331 * paging out pagecache only for non-anonymous mappings that correspond 332 * to the files the calling process could (if tried) open for writing; 333 * otherwise we'd be including shared non-exclusive mappings, which 334 * opens a side channel. 335 */ 336 return inode_owner_or_capable(&nop_mnt_idmap, 337 file_inode(vma->vm_file)) || 338 file_permission(vma->vm_file, MAY_WRITE) == 0; 339 } 340 341 static int madvise_cold_or_pageout_pte_range(pmd_t *pmd, 342 unsigned long addr, unsigned long end, 343 struct mm_walk *walk) 344 { 345 struct madvise_walk_private *private = walk->private; 346 struct mmu_gather *tlb = private->tlb; 347 bool pageout = private->pageout; 348 struct mm_struct *mm = tlb->mm; 349 struct vm_area_struct *vma = walk->vma; 350 pte_t *start_pte, *pte, ptent; 351 spinlock_t *ptl; 352 struct folio *folio = NULL; 353 LIST_HEAD(folio_list); 354 bool pageout_anon_only_filter; 355 356 if (fatal_signal_pending(current)) 357 return -EINTR; 358 359 pageout_anon_only_filter = pageout && !vma_is_anonymous(vma) && 360 !can_do_file_pageout(vma); 361 362 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 363 if (pmd_trans_huge(*pmd)) { 364 pmd_t orig_pmd; 365 unsigned long next = pmd_addr_end(addr, end); 366 367 tlb_change_page_size(tlb, HPAGE_PMD_SIZE); 368 ptl = pmd_trans_huge_lock(pmd, vma); 369 if (!ptl) 370 return 0; 371 372 orig_pmd = *pmd; 373 if (is_huge_zero_pmd(orig_pmd)) 374 goto huge_unlock; 375 376 if (unlikely(!pmd_present(orig_pmd))) { 377 VM_BUG_ON(thp_migration_supported() && 378 !is_pmd_migration_entry(orig_pmd)); 379 goto huge_unlock; 380 } 381 382 folio = pfn_folio(pmd_pfn(orig_pmd)); 383 384 /* Do not interfere with other mappings of this folio */ 385 if (folio_mapcount(folio) != 1) 386 goto huge_unlock; 387 388 if (pageout_anon_only_filter && !folio_test_anon(folio)) 389 goto huge_unlock; 390 391 if (next - addr != HPAGE_PMD_SIZE) { 392 int err; 393 394 folio_get(folio); 395 spin_unlock(ptl); 396 folio_lock(folio); 397 err = split_folio(folio); 398 folio_unlock(folio); 399 folio_put(folio); 400 if (!err) 401 goto regular_folio; 402 return 0; 403 } 404 405 if (pmd_young(orig_pmd)) { 406 pmdp_invalidate(vma, addr, pmd); 407 orig_pmd = pmd_mkold(orig_pmd); 408 409 set_pmd_at(mm, addr, pmd, orig_pmd); 410 tlb_remove_pmd_tlb_entry(tlb, pmd, addr); 411 } 412 413 folio_clear_referenced(folio); 414 folio_test_clear_young(folio); 415 if (folio_test_active(folio)) 416 folio_set_workingset(folio); 417 if (pageout) { 418 if (folio_isolate_lru(folio)) { 419 if (folio_test_unevictable(folio)) 420 folio_putback_lru(folio); 421 else 422 list_add(&folio->lru, &folio_list); 423 } 424 } else 425 folio_deactivate(folio); 426 huge_unlock: 427 spin_unlock(ptl); 428 if (pageout) 429 reclaim_pages(&folio_list); 430 return 0; 431 } 432 433 regular_folio: 434 #endif 435 tlb_change_page_size(tlb, PAGE_SIZE); 436 start_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 437 if (!start_pte) 438 return 0; 439 flush_tlb_batched_pending(mm); 440 arch_enter_lazy_mmu_mode(); 441 for (; addr < end; pte++, addr += PAGE_SIZE) { 442 ptent = ptep_get(pte); 443 444 if (pte_none(ptent)) 445 continue; 446 447 if (!pte_present(ptent)) 448 continue; 449 450 folio = vm_normal_folio(vma, addr, ptent); 451 if (!folio || folio_is_zone_device(folio)) 452 continue; 453 454 /* 455 * Creating a THP page is expensive so split it only if we 456 * are sure it's worth. Split it if we are only owner. 457 */ 458 if (folio_test_large(folio)) { 459 int err; 460 461 if (folio_mapcount(folio) != 1) 462 break; 463 if (pageout_anon_only_filter && !folio_test_anon(folio)) 464 break; 465 if (!folio_trylock(folio)) 466 break; 467 folio_get(folio); 468 arch_leave_lazy_mmu_mode(); 469 pte_unmap_unlock(start_pte, ptl); 470 start_pte = NULL; 471 err = split_folio(folio); 472 folio_unlock(folio); 473 folio_put(folio); 474 if (err) 475 break; 476 start_pte = pte = 477 pte_offset_map_lock(mm, pmd, addr, &ptl); 478 if (!start_pte) 479 break; 480 arch_enter_lazy_mmu_mode(); 481 pte--; 482 addr -= PAGE_SIZE; 483 continue; 484 } 485 486 /* 487 * Do not interfere with other mappings of this folio and 488 * non-LRU folio. 489 */ 490 if (!folio_test_lru(folio) || folio_mapcount(folio) != 1) 491 continue; 492 493 if (pageout_anon_only_filter && !folio_test_anon(folio)) 494 continue; 495 496 VM_BUG_ON_FOLIO(folio_test_large(folio), folio); 497 498 if (pte_young(ptent)) { 499 ptent = ptep_get_and_clear_full(mm, addr, pte, 500 tlb->fullmm); 501 ptent = pte_mkold(ptent); 502 set_pte_at(mm, addr, pte, ptent); 503 tlb_remove_tlb_entry(tlb, pte, addr); 504 } 505 506 /* 507 * We are deactivating a folio for accelerating reclaiming. 508 * VM couldn't reclaim the folio unless we clear PG_young. 509 * As a side effect, it makes confuse idle-page tracking 510 * because they will miss recent referenced history. 511 */ 512 folio_clear_referenced(folio); 513 folio_test_clear_young(folio); 514 if (folio_test_active(folio)) 515 folio_set_workingset(folio); 516 if (pageout) { 517 if (folio_isolate_lru(folio)) { 518 if (folio_test_unevictable(folio)) 519 folio_putback_lru(folio); 520 else 521 list_add(&folio->lru, &folio_list); 522 } 523 } else 524 folio_deactivate(folio); 525 } 526 527 if (start_pte) { 528 arch_leave_lazy_mmu_mode(); 529 pte_unmap_unlock(start_pte, ptl); 530 } 531 if (pageout) 532 reclaim_pages(&folio_list); 533 cond_resched(); 534 535 return 0; 536 } 537 538 static const struct mm_walk_ops cold_walk_ops = { 539 .pmd_entry = madvise_cold_or_pageout_pte_range, 540 }; 541 542 static void madvise_cold_page_range(struct mmu_gather *tlb, 543 struct vm_area_struct *vma, 544 unsigned long addr, unsigned long end) 545 { 546 struct madvise_walk_private walk_private = { 547 .pageout = false, 548 .tlb = tlb, 549 }; 550 551 tlb_start_vma(tlb, vma); 552 walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private); 553 tlb_end_vma(tlb, vma); 554 } 555 556 static inline bool can_madv_lru_vma(struct vm_area_struct *vma) 557 { 558 return !(vma->vm_flags & (VM_LOCKED|VM_PFNMAP|VM_HUGETLB)); 559 } 560 561 static long madvise_cold(struct vm_area_struct *vma, 562 struct vm_area_struct **prev, 563 unsigned long start_addr, unsigned long end_addr) 564 { 565 struct mm_struct *mm = vma->vm_mm; 566 struct mmu_gather tlb; 567 568 *prev = vma; 569 if (!can_madv_lru_vma(vma)) 570 return -EINVAL; 571 572 lru_add_drain(); 573 tlb_gather_mmu(&tlb, mm); 574 madvise_cold_page_range(&tlb, vma, start_addr, end_addr); 575 tlb_finish_mmu(&tlb); 576 577 return 0; 578 } 579 580 static void madvise_pageout_page_range(struct mmu_gather *tlb, 581 struct vm_area_struct *vma, 582 unsigned long addr, unsigned long end) 583 { 584 struct madvise_walk_private walk_private = { 585 .pageout = true, 586 .tlb = tlb, 587 }; 588 589 tlb_start_vma(tlb, vma); 590 walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private); 591 tlb_end_vma(tlb, vma); 592 } 593 594 static long madvise_pageout(struct vm_area_struct *vma, 595 struct vm_area_struct **prev, 596 unsigned long start_addr, unsigned long end_addr) 597 { 598 struct mm_struct *mm = vma->vm_mm; 599 struct mmu_gather tlb; 600 601 *prev = vma; 602 if (!can_madv_lru_vma(vma)) 603 return -EINVAL; 604 605 /* 606 * If the VMA belongs to a private file mapping, there can be private 607 * dirty pages which can be paged out if even this process is neither 608 * owner nor write capable of the file. We allow private file mappings 609 * further to pageout dirty anon pages. 610 */ 611 if (!vma_is_anonymous(vma) && (!can_do_file_pageout(vma) && 612 (vma->vm_flags & VM_MAYSHARE))) 613 return 0; 614 615 lru_add_drain(); 616 tlb_gather_mmu(&tlb, mm); 617 madvise_pageout_page_range(&tlb, vma, start_addr, end_addr); 618 tlb_finish_mmu(&tlb); 619 620 return 0; 621 } 622 623 static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr, 624 unsigned long end, struct mm_walk *walk) 625 626 { 627 struct mmu_gather *tlb = walk->private; 628 struct mm_struct *mm = tlb->mm; 629 struct vm_area_struct *vma = walk->vma; 630 spinlock_t *ptl; 631 pte_t *start_pte, *pte, ptent; 632 struct folio *folio; 633 int nr_swap = 0; 634 unsigned long next; 635 636 next = pmd_addr_end(addr, end); 637 if (pmd_trans_huge(*pmd)) 638 if (madvise_free_huge_pmd(tlb, vma, pmd, addr, next)) 639 return 0; 640 641 tlb_change_page_size(tlb, PAGE_SIZE); 642 start_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl); 643 if (!start_pte) 644 return 0; 645 flush_tlb_batched_pending(mm); 646 arch_enter_lazy_mmu_mode(); 647 for (; addr != end; pte++, addr += PAGE_SIZE) { 648 ptent = ptep_get(pte); 649 650 if (pte_none(ptent)) 651 continue; 652 /* 653 * If the pte has swp_entry, just clear page table to 654 * prevent swap-in which is more expensive rather than 655 * (page allocation + zeroing). 656 */ 657 if (!pte_present(ptent)) { 658 swp_entry_t entry; 659 660 entry = pte_to_swp_entry(ptent); 661 if (!non_swap_entry(entry)) { 662 nr_swap--; 663 free_swap_and_cache(entry); 664 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm); 665 } else if (is_hwpoison_entry(entry) || 666 is_poisoned_swp_entry(entry)) { 667 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm); 668 } 669 continue; 670 } 671 672 folio = vm_normal_folio(vma, addr, ptent); 673 if (!folio || folio_is_zone_device(folio)) 674 continue; 675 676 /* 677 * If pmd isn't transhuge but the folio is large and 678 * is owned by only this process, split it and 679 * deactivate all pages. 680 */ 681 if (folio_test_large(folio)) { 682 int err; 683 684 if (folio_mapcount(folio) != 1) 685 break; 686 if (!folio_trylock(folio)) 687 break; 688 folio_get(folio); 689 arch_leave_lazy_mmu_mode(); 690 pte_unmap_unlock(start_pte, ptl); 691 start_pte = NULL; 692 err = split_folio(folio); 693 folio_unlock(folio); 694 folio_put(folio); 695 if (err) 696 break; 697 start_pte = pte = 698 pte_offset_map_lock(mm, pmd, addr, &ptl); 699 if (!start_pte) 700 break; 701 arch_enter_lazy_mmu_mode(); 702 pte--; 703 addr -= PAGE_SIZE; 704 continue; 705 } 706 707 if (folio_test_swapcache(folio) || folio_test_dirty(folio)) { 708 if (!folio_trylock(folio)) 709 continue; 710 /* 711 * If folio is shared with others, we mustn't clear 712 * the folio's dirty flag. 713 */ 714 if (folio_mapcount(folio) != 1) { 715 folio_unlock(folio); 716 continue; 717 } 718 719 if (folio_test_swapcache(folio) && 720 !folio_free_swap(folio)) { 721 folio_unlock(folio); 722 continue; 723 } 724 725 folio_clear_dirty(folio); 726 folio_unlock(folio); 727 } 728 729 if (pte_young(ptent) || pte_dirty(ptent)) { 730 /* 731 * Some of architecture(ex, PPC) don't update TLB 732 * with set_pte_at and tlb_remove_tlb_entry so for 733 * the portability, remap the pte with old|clean 734 * after pte clearing. 735 */ 736 ptent = ptep_get_and_clear_full(mm, addr, pte, 737 tlb->fullmm); 738 739 ptent = pte_mkold(ptent); 740 ptent = pte_mkclean(ptent); 741 set_pte_at(mm, addr, pte, ptent); 742 tlb_remove_tlb_entry(tlb, pte, addr); 743 } 744 folio_mark_lazyfree(folio); 745 } 746 747 if (nr_swap) { 748 if (current->mm == mm) 749 sync_mm_rss(mm); 750 add_mm_counter(mm, MM_SWAPENTS, nr_swap); 751 } 752 if (start_pte) { 753 arch_leave_lazy_mmu_mode(); 754 pte_unmap_unlock(start_pte, ptl); 755 } 756 cond_resched(); 757 758 return 0; 759 } 760 761 static const struct mm_walk_ops madvise_free_walk_ops = { 762 .pmd_entry = madvise_free_pte_range, 763 }; 764 765 static int madvise_free_single_vma(struct vm_area_struct *vma, 766 unsigned long start_addr, unsigned long end_addr) 767 { 768 struct mm_struct *mm = vma->vm_mm; 769 struct mmu_notifier_range range; 770 struct mmu_gather tlb; 771 772 /* MADV_FREE works for only anon vma at the moment */ 773 if (!vma_is_anonymous(vma)) 774 return -EINVAL; 775 776 range.start = max(vma->vm_start, start_addr); 777 if (range.start >= vma->vm_end) 778 return -EINVAL; 779 range.end = min(vma->vm_end, end_addr); 780 if (range.end <= vma->vm_start) 781 return -EINVAL; 782 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, 783 range.start, range.end); 784 785 lru_add_drain(); 786 tlb_gather_mmu(&tlb, mm); 787 update_hiwater_rss(mm); 788 789 mmu_notifier_invalidate_range_start(&range); 790 tlb_start_vma(&tlb, vma); 791 walk_page_range(vma->vm_mm, range.start, range.end, 792 &madvise_free_walk_ops, &tlb); 793 tlb_end_vma(&tlb, vma); 794 mmu_notifier_invalidate_range_end(&range); 795 tlb_finish_mmu(&tlb); 796 797 return 0; 798 } 799 800 /* 801 * Application no longer needs these pages. If the pages are dirty, 802 * it's OK to just throw them away. The app will be more careful about 803 * data it wants to keep. Be sure to free swap resources too. The 804 * zap_page_range_single call sets things up for shrink_active_list to actually 805 * free these pages later if no one else has touched them in the meantime, 806 * although we could add these pages to a global reuse list for 807 * shrink_active_list to pick up before reclaiming other pages. 808 * 809 * NB: This interface discards data rather than pushes it out to swap, 810 * as some implementations do. This has performance implications for 811 * applications like large transactional databases which want to discard 812 * pages in anonymous maps after committing to backing store the data 813 * that was kept in them. There is no reason to write this data out to 814 * the swap area if the application is discarding it. 815 * 816 * An interface that causes the system to free clean pages and flush 817 * dirty pages is already available as msync(MS_INVALIDATE). 818 */ 819 static long madvise_dontneed_single_vma(struct vm_area_struct *vma, 820 unsigned long start, unsigned long end) 821 { 822 zap_page_range_single(vma, start, end - start, NULL); 823 return 0; 824 } 825 826 static bool madvise_dontneed_free_valid_vma(struct vm_area_struct *vma, 827 unsigned long start, 828 unsigned long *end, 829 int behavior) 830 { 831 if (!is_vm_hugetlb_page(vma)) { 832 unsigned int forbidden = VM_PFNMAP; 833 834 if (behavior != MADV_DONTNEED_LOCKED) 835 forbidden |= VM_LOCKED; 836 837 return !(vma->vm_flags & forbidden); 838 } 839 840 if (behavior != MADV_DONTNEED && behavior != MADV_DONTNEED_LOCKED) 841 return false; 842 if (start & ~huge_page_mask(hstate_vma(vma))) 843 return false; 844 845 /* 846 * Madvise callers expect the length to be rounded up to PAGE_SIZE 847 * boundaries, and may be unaware that this VMA uses huge pages. 848 * Avoid unexpected data loss by rounding down the number of 849 * huge pages freed. 850 */ 851 *end = ALIGN_DOWN(*end, huge_page_size(hstate_vma(vma))); 852 853 return true; 854 } 855 856 static long madvise_dontneed_free(struct vm_area_struct *vma, 857 struct vm_area_struct **prev, 858 unsigned long start, unsigned long end, 859 int behavior) 860 { 861 struct mm_struct *mm = vma->vm_mm; 862 863 *prev = vma; 864 if (!madvise_dontneed_free_valid_vma(vma, start, &end, behavior)) 865 return -EINVAL; 866 867 if (start == end) 868 return 0; 869 870 if (!userfaultfd_remove(vma, start, end)) { 871 *prev = NULL; /* mmap_lock has been dropped, prev is stale */ 872 873 mmap_read_lock(mm); 874 vma = vma_lookup(mm, start); 875 if (!vma) 876 return -ENOMEM; 877 /* 878 * Potential end adjustment for hugetlb vma is OK as 879 * the check below keeps end within vma. 880 */ 881 if (!madvise_dontneed_free_valid_vma(vma, start, &end, 882 behavior)) 883 return -EINVAL; 884 if (end > vma->vm_end) { 885 /* 886 * Don't fail if end > vma->vm_end. If the old 887 * vma was split while the mmap_lock was 888 * released the effect of the concurrent 889 * operation may not cause madvise() to 890 * have an undefined result. There may be an 891 * adjacent next vma that we'll walk 892 * next. userfaultfd_remove() will generate an 893 * UFFD_EVENT_REMOVE repetition on the 894 * end-vma->vm_end range, but the manager can 895 * handle a repetition fine. 896 */ 897 end = vma->vm_end; 898 } 899 VM_WARN_ON(start >= end); 900 } 901 902 if (behavior == MADV_DONTNEED || behavior == MADV_DONTNEED_LOCKED) 903 return madvise_dontneed_single_vma(vma, start, end); 904 else if (behavior == MADV_FREE) 905 return madvise_free_single_vma(vma, start, end); 906 else 907 return -EINVAL; 908 } 909 910 static long madvise_populate(struct vm_area_struct *vma, 911 struct vm_area_struct **prev, 912 unsigned long start, unsigned long end, 913 int behavior) 914 { 915 const bool write = behavior == MADV_POPULATE_WRITE; 916 struct mm_struct *mm = vma->vm_mm; 917 unsigned long tmp_end; 918 int locked = 1; 919 long pages; 920 921 *prev = vma; 922 923 while (start < end) { 924 /* 925 * We might have temporarily dropped the lock. For example, 926 * our VMA might have been split. 927 */ 928 if (!vma || start >= vma->vm_end) { 929 vma = vma_lookup(mm, start); 930 if (!vma) 931 return -ENOMEM; 932 } 933 934 tmp_end = min_t(unsigned long, end, vma->vm_end); 935 /* Populate (prefault) page tables readable/writable. */ 936 pages = faultin_vma_page_range(vma, start, tmp_end, write, 937 &locked); 938 if (!locked) { 939 mmap_read_lock(mm); 940 locked = 1; 941 *prev = NULL; 942 vma = NULL; 943 } 944 if (pages < 0) { 945 switch (pages) { 946 case -EINTR: 947 return -EINTR; 948 case -EINVAL: /* Incompatible mappings / permissions. */ 949 return -EINVAL; 950 case -EHWPOISON: 951 return -EHWPOISON; 952 case -EFAULT: /* VM_FAULT_SIGBUS or VM_FAULT_SIGSEGV */ 953 return -EFAULT; 954 default: 955 pr_warn_once("%s: unhandled return value: %ld\n", 956 __func__, pages); 957 fallthrough; 958 case -ENOMEM: 959 return -ENOMEM; 960 } 961 } 962 start += pages * PAGE_SIZE; 963 } 964 return 0; 965 } 966 967 /* 968 * Application wants to free up the pages and associated backing store. 969 * This is effectively punching a hole into the middle of a file. 970 */ 971 static long madvise_remove(struct vm_area_struct *vma, 972 struct vm_area_struct **prev, 973 unsigned long start, unsigned long end) 974 { 975 loff_t offset; 976 int error; 977 struct file *f; 978 struct mm_struct *mm = vma->vm_mm; 979 980 *prev = NULL; /* tell sys_madvise we drop mmap_lock */ 981 982 if (vma->vm_flags & VM_LOCKED) 983 return -EINVAL; 984 985 f = vma->vm_file; 986 987 if (!f || !f->f_mapping || !f->f_mapping->host) { 988 return -EINVAL; 989 } 990 991 if ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE)) 992 return -EACCES; 993 994 offset = (loff_t)(start - vma->vm_start) 995 + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); 996 997 /* 998 * Filesystem's fallocate may need to take i_rwsem. We need to 999 * explicitly grab a reference because the vma (and hence the 1000 * vma's reference to the file) can go away as soon as we drop 1001 * mmap_lock. 1002 */ 1003 get_file(f); 1004 if (userfaultfd_remove(vma, start, end)) { 1005 /* mmap_lock was not released by userfaultfd_remove() */ 1006 mmap_read_unlock(mm); 1007 } 1008 error = vfs_fallocate(f, 1009 FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, 1010 offset, end - start); 1011 fput(f); 1012 mmap_read_lock(mm); 1013 return error; 1014 } 1015 1016 /* 1017 * Apply an madvise behavior to a region of a vma. madvise_update_vma 1018 * will handle splitting a vm area into separate areas, each area with its own 1019 * behavior. 1020 */ 1021 static int madvise_vma_behavior(struct vm_area_struct *vma, 1022 struct vm_area_struct **prev, 1023 unsigned long start, unsigned long end, 1024 unsigned long behavior) 1025 { 1026 int error; 1027 struct anon_vma_name *anon_name; 1028 unsigned long new_flags = vma->vm_flags; 1029 1030 switch (behavior) { 1031 case MADV_REMOVE: 1032 return madvise_remove(vma, prev, start, end); 1033 case MADV_WILLNEED: 1034 return madvise_willneed(vma, prev, start, end); 1035 case MADV_COLD: 1036 return madvise_cold(vma, prev, start, end); 1037 case MADV_PAGEOUT: 1038 return madvise_pageout(vma, prev, start, end); 1039 case MADV_FREE: 1040 case MADV_DONTNEED: 1041 case MADV_DONTNEED_LOCKED: 1042 return madvise_dontneed_free(vma, prev, start, end, behavior); 1043 case MADV_POPULATE_READ: 1044 case MADV_POPULATE_WRITE: 1045 return madvise_populate(vma, prev, start, end, behavior); 1046 case MADV_NORMAL: 1047 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ; 1048 break; 1049 case MADV_SEQUENTIAL: 1050 new_flags = (new_flags & ~VM_RAND_READ) | VM_SEQ_READ; 1051 break; 1052 case MADV_RANDOM: 1053 new_flags = (new_flags & ~VM_SEQ_READ) | VM_RAND_READ; 1054 break; 1055 case MADV_DONTFORK: 1056 new_flags |= VM_DONTCOPY; 1057 break; 1058 case MADV_DOFORK: 1059 if (vma->vm_flags & VM_IO) 1060 return -EINVAL; 1061 new_flags &= ~VM_DONTCOPY; 1062 break; 1063 case MADV_WIPEONFORK: 1064 /* MADV_WIPEONFORK is only supported on anonymous memory. */ 1065 if (vma->vm_file || vma->vm_flags & VM_SHARED) 1066 return -EINVAL; 1067 new_flags |= VM_WIPEONFORK; 1068 break; 1069 case MADV_KEEPONFORK: 1070 new_flags &= ~VM_WIPEONFORK; 1071 break; 1072 case MADV_DONTDUMP: 1073 new_flags |= VM_DONTDUMP; 1074 break; 1075 case MADV_DODUMP: 1076 if (!is_vm_hugetlb_page(vma) && new_flags & VM_SPECIAL) 1077 return -EINVAL; 1078 new_flags &= ~VM_DONTDUMP; 1079 break; 1080 case MADV_MERGEABLE: 1081 case MADV_UNMERGEABLE: 1082 error = ksm_madvise(vma, start, end, behavior, &new_flags); 1083 if (error) 1084 goto out; 1085 break; 1086 case MADV_HUGEPAGE: 1087 case MADV_NOHUGEPAGE: 1088 error = hugepage_madvise(vma, &new_flags, behavior); 1089 if (error) 1090 goto out; 1091 break; 1092 case MADV_COLLAPSE: 1093 return madvise_collapse(vma, prev, start, end); 1094 } 1095 1096 anon_name = anon_vma_name(vma); 1097 anon_vma_name_get(anon_name); 1098 error = madvise_update_vma(vma, prev, start, end, new_flags, 1099 anon_name); 1100 anon_vma_name_put(anon_name); 1101 1102 out: 1103 /* 1104 * madvise() returns EAGAIN if kernel resources, such as 1105 * slab, are temporarily unavailable. 1106 */ 1107 if (error == -ENOMEM) 1108 error = -EAGAIN; 1109 return error; 1110 } 1111 1112 #ifdef CONFIG_MEMORY_FAILURE 1113 /* 1114 * Error injection support for memory error handling. 1115 */ 1116 static int madvise_inject_error(int behavior, 1117 unsigned long start, unsigned long end) 1118 { 1119 unsigned long size; 1120 1121 if (!capable(CAP_SYS_ADMIN)) 1122 return -EPERM; 1123 1124 1125 for (; start < end; start += size) { 1126 unsigned long pfn; 1127 struct page *page; 1128 int ret; 1129 1130 ret = get_user_pages_fast(start, 1, 0, &page); 1131 if (ret != 1) 1132 return ret; 1133 pfn = page_to_pfn(page); 1134 1135 /* 1136 * When soft offlining hugepages, after migrating the page 1137 * we dissolve it, therefore in the second loop "page" will 1138 * no longer be a compound page. 1139 */ 1140 size = page_size(compound_head(page)); 1141 1142 if (behavior == MADV_SOFT_OFFLINE) { 1143 pr_info("Soft offlining pfn %#lx at process virtual address %#lx\n", 1144 pfn, start); 1145 ret = soft_offline_page(pfn, MF_COUNT_INCREASED); 1146 } else { 1147 pr_info("Injecting memory failure for pfn %#lx at process virtual address %#lx\n", 1148 pfn, start); 1149 ret = memory_failure(pfn, MF_COUNT_INCREASED | MF_SW_SIMULATED); 1150 if (ret == -EOPNOTSUPP) 1151 ret = 0; 1152 } 1153 1154 if (ret) 1155 return ret; 1156 } 1157 1158 return 0; 1159 } 1160 #endif 1161 1162 static bool 1163 madvise_behavior_valid(int behavior) 1164 { 1165 switch (behavior) { 1166 case MADV_DOFORK: 1167 case MADV_DONTFORK: 1168 case MADV_NORMAL: 1169 case MADV_SEQUENTIAL: 1170 case MADV_RANDOM: 1171 case MADV_REMOVE: 1172 case MADV_WILLNEED: 1173 case MADV_DONTNEED: 1174 case MADV_DONTNEED_LOCKED: 1175 case MADV_FREE: 1176 case MADV_COLD: 1177 case MADV_PAGEOUT: 1178 case MADV_POPULATE_READ: 1179 case MADV_POPULATE_WRITE: 1180 #ifdef CONFIG_KSM 1181 case MADV_MERGEABLE: 1182 case MADV_UNMERGEABLE: 1183 #endif 1184 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1185 case MADV_HUGEPAGE: 1186 case MADV_NOHUGEPAGE: 1187 case MADV_COLLAPSE: 1188 #endif 1189 case MADV_DONTDUMP: 1190 case MADV_DODUMP: 1191 case MADV_WIPEONFORK: 1192 case MADV_KEEPONFORK: 1193 #ifdef CONFIG_MEMORY_FAILURE 1194 case MADV_SOFT_OFFLINE: 1195 case MADV_HWPOISON: 1196 #endif 1197 return true; 1198 1199 default: 1200 return false; 1201 } 1202 } 1203 1204 static bool process_madvise_behavior_valid(int behavior) 1205 { 1206 switch (behavior) { 1207 case MADV_COLD: 1208 case MADV_PAGEOUT: 1209 case MADV_WILLNEED: 1210 case MADV_COLLAPSE: 1211 return true; 1212 default: 1213 return false; 1214 } 1215 } 1216 1217 /* 1218 * Walk the vmas in range [start,end), and call the visit function on each one. 1219 * The visit function will get start and end parameters that cover the overlap 1220 * between the current vma and the original range. Any unmapped regions in the 1221 * original range will result in this function returning -ENOMEM while still 1222 * calling the visit function on all of the existing vmas in the range. 1223 * Must be called with the mmap_lock held for reading or writing. 1224 */ 1225 static 1226 int madvise_walk_vmas(struct mm_struct *mm, unsigned long start, 1227 unsigned long end, unsigned long arg, 1228 int (*visit)(struct vm_area_struct *vma, 1229 struct vm_area_struct **prev, unsigned long start, 1230 unsigned long end, unsigned long arg)) 1231 { 1232 struct vm_area_struct *vma; 1233 struct vm_area_struct *prev; 1234 unsigned long tmp; 1235 int unmapped_error = 0; 1236 1237 /* 1238 * If the interval [start,end) covers some unmapped address 1239 * ranges, just ignore them, but return -ENOMEM at the end. 1240 * - different from the way of handling in mlock etc. 1241 */ 1242 vma = find_vma_prev(mm, start, &prev); 1243 if (vma && start > vma->vm_start) 1244 prev = vma; 1245 1246 for (;;) { 1247 int error; 1248 1249 /* Still start < end. */ 1250 if (!vma) 1251 return -ENOMEM; 1252 1253 /* Here start < (end|vma->vm_end). */ 1254 if (start < vma->vm_start) { 1255 unmapped_error = -ENOMEM; 1256 start = vma->vm_start; 1257 if (start >= end) 1258 break; 1259 } 1260 1261 /* Here vma->vm_start <= start < (end|vma->vm_end) */ 1262 tmp = vma->vm_end; 1263 if (end < tmp) 1264 tmp = end; 1265 1266 /* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */ 1267 error = visit(vma, &prev, start, tmp, arg); 1268 if (error) 1269 return error; 1270 start = tmp; 1271 if (prev && start < prev->vm_end) 1272 start = prev->vm_end; 1273 if (start >= end) 1274 break; 1275 if (prev) 1276 vma = find_vma(mm, prev->vm_end); 1277 else /* madvise_remove dropped mmap_lock */ 1278 vma = find_vma(mm, start); 1279 } 1280 1281 return unmapped_error; 1282 } 1283 1284 #ifdef CONFIG_ANON_VMA_NAME 1285 static int madvise_vma_anon_name(struct vm_area_struct *vma, 1286 struct vm_area_struct **prev, 1287 unsigned long start, unsigned long end, 1288 unsigned long anon_name) 1289 { 1290 int error; 1291 1292 /* Only anonymous mappings can be named */ 1293 if (vma->vm_file && !vma_is_anon_shmem(vma)) 1294 return -EBADF; 1295 1296 error = madvise_update_vma(vma, prev, start, end, vma->vm_flags, 1297 (struct anon_vma_name *)anon_name); 1298 1299 /* 1300 * madvise() returns EAGAIN if kernel resources, such as 1301 * slab, are temporarily unavailable. 1302 */ 1303 if (error == -ENOMEM) 1304 error = -EAGAIN; 1305 return error; 1306 } 1307 1308 int madvise_set_anon_name(struct mm_struct *mm, unsigned long start, 1309 unsigned long len_in, struct anon_vma_name *anon_name) 1310 { 1311 unsigned long end; 1312 unsigned long len; 1313 1314 if (start & ~PAGE_MASK) 1315 return -EINVAL; 1316 len = (len_in + ~PAGE_MASK) & PAGE_MASK; 1317 1318 /* Check to see whether len was rounded up from small -ve to zero */ 1319 if (len_in && !len) 1320 return -EINVAL; 1321 1322 end = start + len; 1323 if (end < start) 1324 return -EINVAL; 1325 1326 if (end == start) 1327 return 0; 1328 1329 return madvise_walk_vmas(mm, start, end, (unsigned long)anon_name, 1330 madvise_vma_anon_name); 1331 } 1332 #endif /* CONFIG_ANON_VMA_NAME */ 1333 /* 1334 * The madvise(2) system call. 1335 * 1336 * Applications can use madvise() to advise the kernel how it should 1337 * handle paging I/O in this VM area. The idea is to help the kernel 1338 * use appropriate read-ahead and caching techniques. The information 1339 * provided is advisory only, and can be safely disregarded by the 1340 * kernel without affecting the correct operation of the application. 1341 * 1342 * behavior values: 1343 * MADV_NORMAL - the default behavior is to read clusters. This 1344 * results in some read-ahead and read-behind. 1345 * MADV_RANDOM - the system should read the minimum amount of data 1346 * on any access, since it is unlikely that the appli- 1347 * cation will need more than what it asks for. 1348 * MADV_SEQUENTIAL - pages in the given range will probably be accessed 1349 * once, so they can be aggressively read ahead, and 1350 * can be freed soon after they are accessed. 1351 * MADV_WILLNEED - the application is notifying the system to read 1352 * some pages ahead. 1353 * MADV_DONTNEED - the application is finished with the given range, 1354 * so the kernel can free resources associated with it. 1355 * MADV_FREE - the application marks pages in the given range as lazy free, 1356 * where actual purges are postponed until memory pressure happens. 1357 * MADV_REMOVE - the application wants to free up the given range of 1358 * pages and associated backing store. 1359 * MADV_DONTFORK - omit this area from child's address space when forking: 1360 * typically, to avoid COWing pages pinned by get_user_pages(). 1361 * MADV_DOFORK - cancel MADV_DONTFORK: no longer omit this area when forking. 1362 * MADV_WIPEONFORK - present the child process with zero-filled memory in this 1363 * range after a fork. 1364 * MADV_KEEPONFORK - undo the effect of MADV_WIPEONFORK 1365 * MADV_HWPOISON - trigger memory error handler as if the given memory range 1366 * were corrupted by unrecoverable hardware memory failure. 1367 * MADV_SOFT_OFFLINE - try to soft-offline the given range of memory. 1368 * MADV_MERGEABLE - the application recommends that KSM try to merge pages in 1369 * this area with pages of identical content from other such areas. 1370 * MADV_UNMERGEABLE- cancel MADV_MERGEABLE: no longer merge pages with others. 1371 * MADV_HUGEPAGE - the application wants to back the given range by transparent 1372 * huge pages in the future. Existing pages might be coalesced and 1373 * new pages might be allocated as THP. 1374 * MADV_NOHUGEPAGE - mark the given range as not worth being backed by 1375 * transparent huge pages so the existing pages will not be 1376 * coalesced into THP and new pages will not be allocated as THP. 1377 * MADV_COLLAPSE - synchronously coalesce pages into new THP. 1378 * MADV_DONTDUMP - the application wants to prevent pages in the given range 1379 * from being included in its core dump. 1380 * MADV_DODUMP - cancel MADV_DONTDUMP: no longer exclude from core dump. 1381 * MADV_COLD - the application is not expected to use this memory soon, 1382 * deactivate pages in this range so that they can be reclaimed 1383 * easily if memory pressure happens. 1384 * MADV_PAGEOUT - the application is not expected to use this memory soon, 1385 * page out the pages in this range immediately. 1386 * MADV_POPULATE_READ - populate (prefault) page tables readable by 1387 * triggering read faults if required 1388 * MADV_POPULATE_WRITE - populate (prefault) page tables writable by 1389 * triggering write faults if required 1390 * 1391 * return values: 1392 * zero - success 1393 * -EINVAL - start + len < 0, start is not page-aligned, 1394 * "behavior" is not a valid value, or application 1395 * is attempting to release locked or shared pages, 1396 * or the specified address range includes file, Huge TLB, 1397 * MAP_SHARED or VMPFNMAP range. 1398 * -ENOMEM - addresses in the specified range are not currently 1399 * mapped, or are outside the AS of the process. 1400 * -EIO - an I/O error occurred while paging in data. 1401 * -EBADF - map exists, but area maps something that isn't a file. 1402 * -EAGAIN - a kernel resource was temporarily unavailable. 1403 */ 1404 int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int behavior) 1405 { 1406 unsigned long end; 1407 int error; 1408 int write; 1409 size_t len; 1410 struct blk_plug plug; 1411 1412 if (!madvise_behavior_valid(behavior)) 1413 return -EINVAL; 1414 1415 if (!PAGE_ALIGNED(start)) 1416 return -EINVAL; 1417 len = PAGE_ALIGN(len_in); 1418 1419 /* Check to see whether len was rounded up from small -ve to zero */ 1420 if (len_in && !len) 1421 return -EINVAL; 1422 1423 end = start + len; 1424 if (end < start) 1425 return -EINVAL; 1426 1427 if (end == start) 1428 return 0; 1429 1430 #ifdef CONFIG_MEMORY_FAILURE 1431 if (behavior == MADV_HWPOISON || behavior == MADV_SOFT_OFFLINE) 1432 return madvise_inject_error(behavior, start, start + len_in); 1433 #endif 1434 1435 write = madvise_need_mmap_write(behavior); 1436 if (write) { 1437 if (mmap_write_lock_killable(mm)) 1438 return -EINTR; 1439 } else { 1440 mmap_read_lock(mm); 1441 } 1442 1443 start = untagged_addr_remote(mm, start); 1444 end = start + len; 1445 1446 blk_start_plug(&plug); 1447 error = madvise_walk_vmas(mm, start, end, behavior, 1448 madvise_vma_behavior); 1449 blk_finish_plug(&plug); 1450 if (write) 1451 mmap_write_unlock(mm); 1452 else 1453 mmap_read_unlock(mm); 1454 1455 return error; 1456 } 1457 1458 SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior) 1459 { 1460 return do_madvise(current->mm, start, len_in, behavior); 1461 } 1462 1463 SYSCALL_DEFINE5(process_madvise, int, pidfd, const struct iovec __user *, vec, 1464 size_t, vlen, int, behavior, unsigned int, flags) 1465 { 1466 ssize_t ret; 1467 struct iovec iovstack[UIO_FASTIOV]; 1468 struct iovec *iov = iovstack; 1469 struct iov_iter iter; 1470 struct task_struct *task; 1471 struct mm_struct *mm; 1472 size_t total_len; 1473 unsigned int f_flags; 1474 1475 if (flags != 0) { 1476 ret = -EINVAL; 1477 goto out; 1478 } 1479 1480 ret = import_iovec(ITER_DEST, vec, vlen, ARRAY_SIZE(iovstack), &iov, &iter); 1481 if (ret < 0) 1482 goto out; 1483 1484 task = pidfd_get_task(pidfd, &f_flags); 1485 if (IS_ERR(task)) { 1486 ret = PTR_ERR(task); 1487 goto free_iov; 1488 } 1489 1490 if (!process_madvise_behavior_valid(behavior)) { 1491 ret = -EINVAL; 1492 goto release_task; 1493 } 1494 1495 /* Require PTRACE_MODE_READ to avoid leaking ASLR metadata. */ 1496 mm = mm_access(task, PTRACE_MODE_READ_FSCREDS); 1497 if (IS_ERR_OR_NULL(mm)) { 1498 ret = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH; 1499 goto release_task; 1500 } 1501 1502 /* 1503 * Require CAP_SYS_NICE for influencing process performance. Note that 1504 * only non-destructive hints are currently supported. 1505 */ 1506 if (!capable(CAP_SYS_NICE)) { 1507 ret = -EPERM; 1508 goto release_mm; 1509 } 1510 1511 total_len = iov_iter_count(&iter); 1512 1513 while (iov_iter_count(&iter)) { 1514 ret = do_madvise(mm, (unsigned long)iter_iov_addr(&iter), 1515 iter_iov_len(&iter), behavior); 1516 if (ret < 0) 1517 break; 1518 iov_iter_advance(&iter, iter_iov_len(&iter)); 1519 } 1520 1521 ret = (total_len - iov_iter_count(&iter)) ? : ret; 1522 1523 release_mm: 1524 mmput(mm); 1525 release_task: 1526 put_task_struct(task); 1527 free_iov: 1528 kfree(iov); 1529 out: 1530 return ret; 1531 } 1532