1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/mm/madvise.c 4 * 5 * Copyright (C) 1999 Linus Torvalds 6 * Copyright (C) 2002 Christoph Hellwig 7 */ 8 9 #include <linux/mman.h> 10 #include <linux/pagemap.h> 11 #include <linux/syscalls.h> 12 #include <linux/mempolicy.h> 13 #include <linux/page-isolation.h> 14 #include <linux/page_idle.h> 15 #include <linux/userfaultfd_k.h> 16 #include <linux/hugetlb.h> 17 #include <linux/falloc.h> 18 #include <linux/fadvise.h> 19 #include <linux/sched.h> 20 #include <linux/sched/mm.h> 21 #include <linux/mm_inline.h> 22 #include <linux/string.h> 23 #include <linux/uio.h> 24 #include <linux/ksm.h> 25 #include <linux/fs.h> 26 #include <linux/file.h> 27 #include <linux/blkdev.h> 28 #include <linux/backing-dev.h> 29 #include <linux/pagewalk.h> 30 #include <linux/swap.h> 31 #include <linux/swapops.h> 32 #include <linux/shmem_fs.h> 33 #include <linux/mmu_notifier.h> 34 35 #include <asm/tlb.h> 36 37 #include "internal.h" 38 #include "swap.h" 39 40 struct madvise_walk_private { 41 struct mmu_gather *tlb; 42 bool pageout; 43 }; 44 45 /* 46 * Any behaviour which results in changes to the vma->vm_flags needs to 47 * take mmap_lock for writing. Others, which simply traverse vmas, need 48 * to only take it for reading. 49 */ 50 static int madvise_need_mmap_write(int behavior) 51 { 52 switch (behavior) { 53 case MADV_REMOVE: 54 case MADV_WILLNEED: 55 case MADV_DONTNEED: 56 case MADV_DONTNEED_LOCKED: 57 case MADV_COLD: 58 case MADV_PAGEOUT: 59 case MADV_FREE: 60 case MADV_POPULATE_READ: 61 case MADV_POPULATE_WRITE: 62 case MADV_COLLAPSE: 63 return 0; 64 default: 65 /* be safe, default to 1. list exceptions explicitly */ 66 return 1; 67 } 68 } 69 70 #ifdef CONFIG_ANON_VMA_NAME 71 struct anon_vma_name *anon_vma_name_alloc(const char *name) 72 { 73 struct anon_vma_name *anon_name; 74 size_t count; 75 76 /* Add 1 for NUL terminator at the end of the anon_name->name */ 77 count = strlen(name) + 1; 78 anon_name = kmalloc(struct_size(anon_name, name, count), GFP_KERNEL); 79 if (anon_name) { 80 kref_init(&anon_name->kref); 81 memcpy(anon_name->name, name, count); 82 } 83 84 return anon_name; 85 } 86 87 void anon_vma_name_free(struct kref *kref) 88 { 89 struct anon_vma_name *anon_name = 90 container_of(kref, struct anon_vma_name, kref); 91 kfree(anon_name); 92 } 93 94 struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma) 95 { 96 mmap_assert_locked(vma->vm_mm); 97 98 return vma->anon_name; 99 } 100 101 /* mmap_lock should be write-locked */ 102 static int replace_anon_vma_name(struct vm_area_struct *vma, 103 struct anon_vma_name *anon_name) 104 { 105 struct anon_vma_name *orig_name = anon_vma_name(vma); 106 107 if (!anon_name) { 108 vma->anon_name = NULL; 109 anon_vma_name_put(orig_name); 110 return 0; 111 } 112 113 if (anon_vma_name_eq(orig_name, anon_name)) 114 return 0; 115 116 vma->anon_name = anon_vma_name_reuse(anon_name); 117 anon_vma_name_put(orig_name); 118 119 return 0; 120 } 121 #else /* CONFIG_ANON_VMA_NAME */ 122 static int replace_anon_vma_name(struct vm_area_struct *vma, 123 struct anon_vma_name *anon_name) 124 { 125 if (anon_name) 126 return -EINVAL; 127 128 return 0; 129 } 130 #endif /* CONFIG_ANON_VMA_NAME */ 131 /* 132 * Update the vm_flags on region of a vma, splitting it or merging it as 133 * necessary. Must be called with mmap_lock held for writing; 134 * Caller should ensure anon_name stability by raising its refcount even when 135 * anon_name belongs to a valid vma because this function might free that vma. 136 */ 137 static int madvise_update_vma(struct vm_area_struct *vma, 138 struct vm_area_struct **prev, unsigned long start, 139 unsigned long end, unsigned long new_flags, 140 struct anon_vma_name *anon_name) 141 { 142 struct mm_struct *mm = vma->vm_mm; 143 int error; 144 pgoff_t pgoff; 145 VMA_ITERATOR(vmi, mm, 0); 146 147 if (new_flags == vma->vm_flags && anon_vma_name_eq(anon_vma_name(vma), anon_name)) { 148 *prev = vma; 149 return 0; 150 } 151 152 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); 153 *prev = vma_merge(&vmi, mm, *prev, start, end, new_flags, 154 vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma), 155 vma->vm_userfaultfd_ctx, anon_name); 156 if (*prev) { 157 vma = *prev; 158 goto success; 159 } 160 161 *prev = vma; 162 163 if (start != vma->vm_start) { 164 if (unlikely(mm->map_count >= sysctl_max_map_count)) 165 return -ENOMEM; 166 error = __split_vma(&vmi, vma, start, 1); 167 if (error) 168 return error; 169 } 170 171 if (end != vma->vm_end) { 172 if (unlikely(mm->map_count >= sysctl_max_map_count)) 173 return -ENOMEM; 174 error = __split_vma(&vmi, vma, end, 0); 175 if (error) 176 return error; 177 } 178 179 success: 180 /* 181 * vm_flags is protected by the mmap_lock held in write mode. 182 */ 183 vma->vm_flags = new_flags; 184 if (!vma->vm_file || vma_is_anon_shmem(vma)) { 185 error = replace_anon_vma_name(vma, anon_name); 186 if (error) 187 return error; 188 } 189 190 return 0; 191 } 192 193 #ifdef CONFIG_SWAP 194 static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start, 195 unsigned long end, struct mm_walk *walk) 196 { 197 struct vm_area_struct *vma = walk->private; 198 unsigned long index; 199 struct swap_iocb *splug = NULL; 200 201 if (pmd_none_or_trans_huge_or_clear_bad(pmd)) 202 return 0; 203 204 for (index = start; index != end; index += PAGE_SIZE) { 205 pte_t pte; 206 swp_entry_t entry; 207 struct page *page; 208 spinlock_t *ptl; 209 pte_t *ptep; 210 211 ptep = pte_offset_map_lock(vma->vm_mm, pmd, index, &ptl); 212 pte = *ptep; 213 pte_unmap_unlock(ptep, ptl); 214 215 if (!is_swap_pte(pte)) 216 continue; 217 entry = pte_to_swp_entry(pte); 218 if (unlikely(non_swap_entry(entry))) 219 continue; 220 221 page = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE, 222 vma, index, false, &splug); 223 if (page) 224 put_page(page); 225 } 226 swap_read_unplug(splug); 227 cond_resched(); 228 229 return 0; 230 } 231 232 static const struct mm_walk_ops swapin_walk_ops = { 233 .pmd_entry = swapin_walk_pmd_entry, 234 }; 235 236 static void force_shm_swapin_readahead(struct vm_area_struct *vma, 237 unsigned long start, unsigned long end, 238 struct address_space *mapping) 239 { 240 XA_STATE(xas, &mapping->i_pages, linear_page_index(vma, start)); 241 pgoff_t end_index = linear_page_index(vma, end + PAGE_SIZE - 1); 242 struct page *page; 243 struct swap_iocb *splug = NULL; 244 245 rcu_read_lock(); 246 xas_for_each(&xas, page, end_index) { 247 swp_entry_t swap; 248 249 if (!xa_is_value(page)) 250 continue; 251 swap = radix_to_swp_entry(page); 252 /* There might be swapin error entries in shmem mapping. */ 253 if (non_swap_entry(swap)) 254 continue; 255 xas_pause(&xas); 256 rcu_read_unlock(); 257 258 page = read_swap_cache_async(swap, GFP_HIGHUSER_MOVABLE, 259 NULL, 0, false, &splug); 260 if (page) 261 put_page(page); 262 263 rcu_read_lock(); 264 } 265 rcu_read_unlock(); 266 swap_read_unplug(splug); 267 268 lru_add_drain(); /* Push any new pages onto the LRU now */ 269 } 270 #endif /* CONFIG_SWAP */ 271 272 /* 273 * Schedule all required I/O operations. Do not wait for completion. 274 */ 275 static long madvise_willneed(struct vm_area_struct *vma, 276 struct vm_area_struct **prev, 277 unsigned long start, unsigned long end) 278 { 279 struct mm_struct *mm = vma->vm_mm; 280 struct file *file = vma->vm_file; 281 loff_t offset; 282 283 *prev = vma; 284 #ifdef CONFIG_SWAP 285 if (!file) { 286 walk_page_range(vma->vm_mm, start, end, &swapin_walk_ops, vma); 287 lru_add_drain(); /* Push any new pages onto the LRU now */ 288 return 0; 289 } 290 291 if (shmem_mapping(file->f_mapping)) { 292 force_shm_swapin_readahead(vma, start, end, 293 file->f_mapping); 294 return 0; 295 } 296 #else 297 if (!file) 298 return -EBADF; 299 #endif 300 301 if (IS_DAX(file_inode(file))) { 302 /* no bad return value, but ignore advice */ 303 return 0; 304 } 305 306 /* 307 * Filesystem's fadvise may need to take various locks. We need to 308 * explicitly grab a reference because the vma (and hence the 309 * vma's reference to the file) can go away as soon as we drop 310 * mmap_lock. 311 */ 312 *prev = NULL; /* tell sys_madvise we drop mmap_lock */ 313 get_file(file); 314 offset = (loff_t)(start - vma->vm_start) 315 + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); 316 mmap_read_unlock(mm); 317 vfs_fadvise(file, offset, end - start, POSIX_FADV_WILLNEED); 318 fput(file); 319 mmap_read_lock(mm); 320 return 0; 321 } 322 323 static inline bool can_do_file_pageout(struct vm_area_struct *vma) 324 { 325 if (!vma->vm_file) 326 return false; 327 /* 328 * paging out pagecache only for non-anonymous mappings that correspond 329 * to the files the calling process could (if tried) open for writing; 330 * otherwise we'd be including shared non-exclusive mappings, which 331 * opens a side channel. 332 */ 333 return inode_owner_or_capable(&init_user_ns, 334 file_inode(vma->vm_file)) || 335 file_permission(vma->vm_file, MAY_WRITE) == 0; 336 } 337 338 static int madvise_cold_or_pageout_pte_range(pmd_t *pmd, 339 unsigned long addr, unsigned long end, 340 struct mm_walk *walk) 341 { 342 struct madvise_walk_private *private = walk->private; 343 struct mmu_gather *tlb = private->tlb; 344 bool pageout = private->pageout; 345 struct mm_struct *mm = tlb->mm; 346 struct vm_area_struct *vma = walk->vma; 347 pte_t *orig_pte, *pte, ptent; 348 spinlock_t *ptl; 349 struct folio *folio = NULL; 350 LIST_HEAD(folio_list); 351 bool pageout_anon_only_filter; 352 353 if (fatal_signal_pending(current)) 354 return -EINTR; 355 356 pageout_anon_only_filter = pageout && !vma_is_anonymous(vma) && 357 !can_do_file_pageout(vma); 358 359 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 360 if (pmd_trans_huge(*pmd)) { 361 pmd_t orig_pmd; 362 unsigned long next = pmd_addr_end(addr, end); 363 364 tlb_change_page_size(tlb, HPAGE_PMD_SIZE); 365 ptl = pmd_trans_huge_lock(pmd, vma); 366 if (!ptl) 367 return 0; 368 369 orig_pmd = *pmd; 370 if (is_huge_zero_pmd(orig_pmd)) 371 goto huge_unlock; 372 373 if (unlikely(!pmd_present(orig_pmd))) { 374 VM_BUG_ON(thp_migration_supported() && 375 !is_pmd_migration_entry(orig_pmd)); 376 goto huge_unlock; 377 } 378 379 folio = pfn_folio(pmd_pfn(orig_pmd)); 380 381 /* Do not interfere with other mappings of this folio */ 382 if (folio_mapcount(folio) != 1) 383 goto huge_unlock; 384 385 if (pageout_anon_only_filter && !folio_test_anon(folio)) 386 goto huge_unlock; 387 388 if (next - addr != HPAGE_PMD_SIZE) { 389 int err; 390 391 folio_get(folio); 392 spin_unlock(ptl); 393 folio_lock(folio); 394 err = split_folio(folio); 395 folio_unlock(folio); 396 folio_put(folio); 397 if (!err) 398 goto regular_folio; 399 return 0; 400 } 401 402 if (pmd_young(orig_pmd)) { 403 pmdp_invalidate(vma, addr, pmd); 404 orig_pmd = pmd_mkold(orig_pmd); 405 406 set_pmd_at(mm, addr, pmd, orig_pmd); 407 tlb_remove_pmd_tlb_entry(tlb, pmd, addr); 408 } 409 410 folio_clear_referenced(folio); 411 folio_test_clear_young(folio); 412 if (pageout) { 413 if (!folio_isolate_lru(folio)) { 414 if (folio_test_unevictable(folio)) 415 folio_putback_lru(folio); 416 else 417 list_add(&folio->lru, &folio_list); 418 } 419 } else 420 folio_deactivate(folio); 421 huge_unlock: 422 spin_unlock(ptl); 423 if (pageout) 424 reclaim_pages(&folio_list); 425 return 0; 426 } 427 428 regular_folio: 429 if (pmd_trans_unstable(pmd)) 430 return 0; 431 #endif 432 tlb_change_page_size(tlb, PAGE_SIZE); 433 orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 434 flush_tlb_batched_pending(mm); 435 arch_enter_lazy_mmu_mode(); 436 for (; addr < end; pte++, addr += PAGE_SIZE) { 437 ptent = *pte; 438 439 if (pte_none(ptent)) 440 continue; 441 442 if (!pte_present(ptent)) 443 continue; 444 445 folio = vm_normal_folio(vma, addr, ptent); 446 if (!folio || folio_is_zone_device(folio)) 447 continue; 448 449 /* 450 * Creating a THP page is expensive so split it only if we 451 * are sure it's worth. Split it if we are only owner. 452 */ 453 if (folio_test_large(folio)) { 454 if (folio_mapcount(folio) != 1) 455 break; 456 if (pageout_anon_only_filter && !folio_test_anon(folio)) 457 break; 458 folio_get(folio); 459 if (!folio_trylock(folio)) { 460 folio_put(folio); 461 break; 462 } 463 pte_unmap_unlock(orig_pte, ptl); 464 if (split_folio(folio)) { 465 folio_unlock(folio); 466 folio_put(folio); 467 orig_pte = pte_offset_map_lock(mm, pmd, addr, &ptl); 468 break; 469 } 470 folio_unlock(folio); 471 folio_put(folio); 472 orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl); 473 pte--; 474 addr -= PAGE_SIZE; 475 continue; 476 } 477 478 /* 479 * Do not interfere with other mappings of this folio and 480 * non-LRU folio. 481 */ 482 if (!folio_test_lru(folio) || folio_mapcount(folio) != 1) 483 continue; 484 485 if (pageout_anon_only_filter && !folio_test_anon(folio)) 486 continue; 487 488 VM_BUG_ON_FOLIO(folio_test_large(folio), folio); 489 490 if (pte_young(ptent)) { 491 ptent = ptep_get_and_clear_full(mm, addr, pte, 492 tlb->fullmm); 493 ptent = pte_mkold(ptent); 494 set_pte_at(mm, addr, pte, ptent); 495 tlb_remove_tlb_entry(tlb, pte, addr); 496 } 497 498 /* 499 * We are deactivating a folio for accelerating reclaiming. 500 * VM couldn't reclaim the folio unless we clear PG_young. 501 * As a side effect, it makes confuse idle-page tracking 502 * because they will miss recent referenced history. 503 */ 504 folio_clear_referenced(folio); 505 folio_test_clear_young(folio); 506 if (pageout) { 507 if (!folio_isolate_lru(folio)) { 508 if (folio_test_unevictable(folio)) 509 folio_putback_lru(folio); 510 else 511 list_add(&folio->lru, &folio_list); 512 } 513 } else 514 folio_deactivate(folio); 515 } 516 517 arch_leave_lazy_mmu_mode(); 518 pte_unmap_unlock(orig_pte, ptl); 519 if (pageout) 520 reclaim_pages(&folio_list); 521 cond_resched(); 522 523 return 0; 524 } 525 526 static const struct mm_walk_ops cold_walk_ops = { 527 .pmd_entry = madvise_cold_or_pageout_pte_range, 528 }; 529 530 static void madvise_cold_page_range(struct mmu_gather *tlb, 531 struct vm_area_struct *vma, 532 unsigned long addr, unsigned long end) 533 { 534 struct madvise_walk_private walk_private = { 535 .pageout = false, 536 .tlb = tlb, 537 }; 538 539 tlb_start_vma(tlb, vma); 540 walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private); 541 tlb_end_vma(tlb, vma); 542 } 543 544 static inline bool can_madv_lru_vma(struct vm_area_struct *vma) 545 { 546 return !(vma->vm_flags & (VM_LOCKED|VM_PFNMAP|VM_HUGETLB)); 547 } 548 549 static long madvise_cold(struct vm_area_struct *vma, 550 struct vm_area_struct **prev, 551 unsigned long start_addr, unsigned long end_addr) 552 { 553 struct mm_struct *mm = vma->vm_mm; 554 struct mmu_gather tlb; 555 556 *prev = vma; 557 if (!can_madv_lru_vma(vma)) 558 return -EINVAL; 559 560 lru_add_drain(); 561 tlb_gather_mmu(&tlb, mm); 562 madvise_cold_page_range(&tlb, vma, start_addr, end_addr); 563 tlb_finish_mmu(&tlb); 564 565 return 0; 566 } 567 568 static void madvise_pageout_page_range(struct mmu_gather *tlb, 569 struct vm_area_struct *vma, 570 unsigned long addr, unsigned long end) 571 { 572 struct madvise_walk_private walk_private = { 573 .pageout = true, 574 .tlb = tlb, 575 }; 576 577 tlb_start_vma(tlb, vma); 578 walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private); 579 tlb_end_vma(tlb, vma); 580 } 581 582 static long madvise_pageout(struct vm_area_struct *vma, 583 struct vm_area_struct **prev, 584 unsigned long start_addr, unsigned long end_addr) 585 { 586 struct mm_struct *mm = vma->vm_mm; 587 struct mmu_gather tlb; 588 589 *prev = vma; 590 if (!can_madv_lru_vma(vma)) 591 return -EINVAL; 592 593 /* 594 * If the VMA belongs to a private file mapping, there can be private 595 * dirty pages which can be paged out if even this process is neither 596 * owner nor write capable of the file. We allow private file mappings 597 * further to pageout dirty anon pages. 598 */ 599 if (!vma_is_anonymous(vma) && (!can_do_file_pageout(vma) && 600 (vma->vm_flags & VM_MAYSHARE))) 601 return 0; 602 603 lru_add_drain(); 604 tlb_gather_mmu(&tlb, mm); 605 madvise_pageout_page_range(&tlb, vma, start_addr, end_addr); 606 tlb_finish_mmu(&tlb); 607 608 return 0; 609 } 610 611 static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr, 612 unsigned long end, struct mm_walk *walk) 613 614 { 615 struct mmu_gather *tlb = walk->private; 616 struct mm_struct *mm = tlb->mm; 617 struct vm_area_struct *vma = walk->vma; 618 spinlock_t *ptl; 619 pte_t *orig_pte, *pte, ptent; 620 struct folio *folio; 621 int nr_swap = 0; 622 unsigned long next; 623 624 next = pmd_addr_end(addr, end); 625 if (pmd_trans_huge(*pmd)) 626 if (madvise_free_huge_pmd(tlb, vma, pmd, addr, next)) 627 goto next; 628 629 if (pmd_trans_unstable(pmd)) 630 return 0; 631 632 tlb_change_page_size(tlb, PAGE_SIZE); 633 orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl); 634 flush_tlb_batched_pending(mm); 635 arch_enter_lazy_mmu_mode(); 636 for (; addr != end; pte++, addr += PAGE_SIZE) { 637 ptent = *pte; 638 639 if (pte_none(ptent)) 640 continue; 641 /* 642 * If the pte has swp_entry, just clear page table to 643 * prevent swap-in which is more expensive rather than 644 * (page allocation + zeroing). 645 */ 646 if (!pte_present(ptent)) { 647 swp_entry_t entry; 648 649 entry = pte_to_swp_entry(ptent); 650 if (!non_swap_entry(entry)) { 651 nr_swap--; 652 free_swap_and_cache(entry); 653 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm); 654 } else if (is_hwpoison_entry(entry) || 655 is_swapin_error_entry(entry)) { 656 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm); 657 } 658 continue; 659 } 660 661 folio = vm_normal_folio(vma, addr, ptent); 662 if (!folio || folio_is_zone_device(folio)) 663 continue; 664 665 /* 666 * If pmd isn't transhuge but the folio is large and 667 * is owned by only this process, split it and 668 * deactivate all pages. 669 */ 670 if (folio_test_large(folio)) { 671 if (folio_mapcount(folio) != 1) 672 goto out; 673 folio_get(folio); 674 if (!folio_trylock(folio)) { 675 folio_put(folio); 676 goto out; 677 } 678 pte_unmap_unlock(orig_pte, ptl); 679 if (split_folio(folio)) { 680 folio_unlock(folio); 681 folio_put(folio); 682 orig_pte = pte_offset_map_lock(mm, pmd, addr, &ptl); 683 goto out; 684 } 685 folio_unlock(folio); 686 folio_put(folio); 687 orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl); 688 pte--; 689 addr -= PAGE_SIZE; 690 continue; 691 } 692 693 if (folio_test_swapcache(folio) || folio_test_dirty(folio)) { 694 if (!folio_trylock(folio)) 695 continue; 696 /* 697 * If folio is shared with others, we mustn't clear 698 * the folio's dirty flag. 699 */ 700 if (folio_mapcount(folio) != 1) { 701 folio_unlock(folio); 702 continue; 703 } 704 705 if (folio_test_swapcache(folio) && 706 !folio_free_swap(folio)) { 707 folio_unlock(folio); 708 continue; 709 } 710 711 folio_clear_dirty(folio); 712 folio_unlock(folio); 713 } 714 715 if (pte_young(ptent) || pte_dirty(ptent)) { 716 /* 717 * Some of architecture(ex, PPC) don't update TLB 718 * with set_pte_at and tlb_remove_tlb_entry so for 719 * the portability, remap the pte with old|clean 720 * after pte clearing. 721 */ 722 ptent = ptep_get_and_clear_full(mm, addr, pte, 723 tlb->fullmm); 724 725 ptent = pte_mkold(ptent); 726 ptent = pte_mkclean(ptent); 727 set_pte_at(mm, addr, pte, ptent); 728 tlb_remove_tlb_entry(tlb, pte, addr); 729 } 730 folio_mark_lazyfree(folio); 731 } 732 out: 733 if (nr_swap) { 734 if (current->mm == mm) 735 sync_mm_rss(mm); 736 737 add_mm_counter(mm, MM_SWAPENTS, nr_swap); 738 } 739 arch_leave_lazy_mmu_mode(); 740 pte_unmap_unlock(orig_pte, ptl); 741 cond_resched(); 742 next: 743 return 0; 744 } 745 746 static const struct mm_walk_ops madvise_free_walk_ops = { 747 .pmd_entry = madvise_free_pte_range, 748 }; 749 750 static int madvise_free_single_vma(struct vm_area_struct *vma, 751 unsigned long start_addr, unsigned long end_addr) 752 { 753 struct mm_struct *mm = vma->vm_mm; 754 struct mmu_notifier_range range; 755 struct mmu_gather tlb; 756 757 /* MADV_FREE works for only anon vma at the moment */ 758 if (!vma_is_anonymous(vma)) 759 return -EINVAL; 760 761 range.start = max(vma->vm_start, start_addr); 762 if (range.start >= vma->vm_end) 763 return -EINVAL; 764 range.end = min(vma->vm_end, end_addr); 765 if (range.end <= vma->vm_start) 766 return -EINVAL; 767 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, 768 range.start, range.end); 769 770 lru_add_drain(); 771 tlb_gather_mmu(&tlb, mm); 772 update_hiwater_rss(mm); 773 774 mmu_notifier_invalidate_range_start(&range); 775 tlb_start_vma(&tlb, vma); 776 walk_page_range(vma->vm_mm, range.start, range.end, 777 &madvise_free_walk_ops, &tlb); 778 tlb_end_vma(&tlb, vma); 779 mmu_notifier_invalidate_range_end(&range); 780 tlb_finish_mmu(&tlb); 781 782 return 0; 783 } 784 785 /* 786 * Application no longer needs these pages. If the pages are dirty, 787 * it's OK to just throw them away. The app will be more careful about 788 * data it wants to keep. Be sure to free swap resources too. The 789 * zap_page_range_single call sets things up for shrink_active_list to actually 790 * free these pages later if no one else has touched them in the meantime, 791 * although we could add these pages to a global reuse list for 792 * shrink_active_list to pick up before reclaiming other pages. 793 * 794 * NB: This interface discards data rather than pushes it out to swap, 795 * as some implementations do. This has performance implications for 796 * applications like large transactional databases which want to discard 797 * pages in anonymous maps after committing to backing store the data 798 * that was kept in them. There is no reason to write this data out to 799 * the swap area if the application is discarding it. 800 * 801 * An interface that causes the system to free clean pages and flush 802 * dirty pages is already available as msync(MS_INVALIDATE). 803 */ 804 static long madvise_dontneed_single_vma(struct vm_area_struct *vma, 805 unsigned long start, unsigned long end) 806 { 807 zap_page_range_single(vma, start, end - start, NULL); 808 return 0; 809 } 810 811 static bool madvise_dontneed_free_valid_vma(struct vm_area_struct *vma, 812 unsigned long start, 813 unsigned long *end, 814 int behavior) 815 { 816 if (!is_vm_hugetlb_page(vma)) { 817 unsigned int forbidden = VM_PFNMAP; 818 819 if (behavior != MADV_DONTNEED_LOCKED) 820 forbidden |= VM_LOCKED; 821 822 return !(vma->vm_flags & forbidden); 823 } 824 825 if (behavior != MADV_DONTNEED && behavior != MADV_DONTNEED_LOCKED) 826 return false; 827 if (start & ~huge_page_mask(hstate_vma(vma))) 828 return false; 829 830 /* 831 * Madvise callers expect the length to be rounded up to PAGE_SIZE 832 * boundaries, and may be unaware that this VMA uses huge pages. 833 * Avoid unexpected data loss by rounding down the number of 834 * huge pages freed. 835 */ 836 *end = ALIGN_DOWN(*end, huge_page_size(hstate_vma(vma))); 837 838 return true; 839 } 840 841 static long madvise_dontneed_free(struct vm_area_struct *vma, 842 struct vm_area_struct **prev, 843 unsigned long start, unsigned long end, 844 int behavior) 845 { 846 struct mm_struct *mm = vma->vm_mm; 847 848 *prev = vma; 849 if (!madvise_dontneed_free_valid_vma(vma, start, &end, behavior)) 850 return -EINVAL; 851 852 if (start == end) 853 return 0; 854 855 if (!userfaultfd_remove(vma, start, end)) { 856 *prev = NULL; /* mmap_lock has been dropped, prev is stale */ 857 858 mmap_read_lock(mm); 859 vma = find_vma(mm, start); 860 if (!vma) 861 return -ENOMEM; 862 if (start < vma->vm_start) { 863 /* 864 * This "vma" under revalidation is the one 865 * with the lowest vma->vm_start where start 866 * is also < vma->vm_end. If start < 867 * vma->vm_start it means an hole materialized 868 * in the user address space within the 869 * virtual range passed to MADV_DONTNEED 870 * or MADV_FREE. 871 */ 872 return -ENOMEM; 873 } 874 /* 875 * Potential end adjustment for hugetlb vma is OK as 876 * the check below keeps end within vma. 877 */ 878 if (!madvise_dontneed_free_valid_vma(vma, start, &end, 879 behavior)) 880 return -EINVAL; 881 if (end > vma->vm_end) { 882 /* 883 * Don't fail if end > vma->vm_end. If the old 884 * vma was split while the mmap_lock was 885 * released the effect of the concurrent 886 * operation may not cause madvise() to 887 * have an undefined result. There may be an 888 * adjacent next vma that we'll walk 889 * next. userfaultfd_remove() will generate an 890 * UFFD_EVENT_REMOVE repetition on the 891 * end-vma->vm_end range, but the manager can 892 * handle a repetition fine. 893 */ 894 end = vma->vm_end; 895 } 896 VM_WARN_ON(start >= end); 897 } 898 899 if (behavior == MADV_DONTNEED || behavior == MADV_DONTNEED_LOCKED) 900 return madvise_dontneed_single_vma(vma, start, end); 901 else if (behavior == MADV_FREE) 902 return madvise_free_single_vma(vma, start, end); 903 else 904 return -EINVAL; 905 } 906 907 static long madvise_populate(struct vm_area_struct *vma, 908 struct vm_area_struct **prev, 909 unsigned long start, unsigned long end, 910 int behavior) 911 { 912 const bool write = behavior == MADV_POPULATE_WRITE; 913 struct mm_struct *mm = vma->vm_mm; 914 unsigned long tmp_end; 915 int locked = 1; 916 long pages; 917 918 *prev = vma; 919 920 while (start < end) { 921 /* 922 * We might have temporarily dropped the lock. For example, 923 * our VMA might have been split. 924 */ 925 if (!vma || start >= vma->vm_end) { 926 vma = vma_lookup(mm, start); 927 if (!vma) 928 return -ENOMEM; 929 } 930 931 tmp_end = min_t(unsigned long, end, vma->vm_end); 932 /* Populate (prefault) page tables readable/writable. */ 933 pages = faultin_vma_page_range(vma, start, tmp_end, write, 934 &locked); 935 if (!locked) { 936 mmap_read_lock(mm); 937 locked = 1; 938 *prev = NULL; 939 vma = NULL; 940 } 941 if (pages < 0) { 942 switch (pages) { 943 case -EINTR: 944 return -EINTR; 945 case -EINVAL: /* Incompatible mappings / permissions. */ 946 return -EINVAL; 947 case -EHWPOISON: 948 return -EHWPOISON; 949 case -EFAULT: /* VM_FAULT_SIGBUS or VM_FAULT_SIGSEGV */ 950 return -EFAULT; 951 default: 952 pr_warn_once("%s: unhandled return value: %ld\n", 953 __func__, pages); 954 fallthrough; 955 case -ENOMEM: 956 return -ENOMEM; 957 } 958 } 959 start += pages * PAGE_SIZE; 960 } 961 return 0; 962 } 963 964 /* 965 * Application wants to free up the pages and associated backing store. 966 * This is effectively punching a hole into the middle of a file. 967 */ 968 static long madvise_remove(struct vm_area_struct *vma, 969 struct vm_area_struct **prev, 970 unsigned long start, unsigned long end) 971 { 972 loff_t offset; 973 int error; 974 struct file *f; 975 struct mm_struct *mm = vma->vm_mm; 976 977 *prev = NULL; /* tell sys_madvise we drop mmap_lock */ 978 979 if (vma->vm_flags & VM_LOCKED) 980 return -EINVAL; 981 982 f = vma->vm_file; 983 984 if (!f || !f->f_mapping || !f->f_mapping->host) { 985 return -EINVAL; 986 } 987 988 if ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE)) 989 return -EACCES; 990 991 offset = (loff_t)(start - vma->vm_start) 992 + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); 993 994 /* 995 * Filesystem's fallocate may need to take i_rwsem. We need to 996 * explicitly grab a reference because the vma (and hence the 997 * vma's reference to the file) can go away as soon as we drop 998 * mmap_lock. 999 */ 1000 get_file(f); 1001 if (userfaultfd_remove(vma, start, end)) { 1002 /* mmap_lock was not released by userfaultfd_remove() */ 1003 mmap_read_unlock(mm); 1004 } 1005 error = vfs_fallocate(f, 1006 FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, 1007 offset, end - start); 1008 fput(f); 1009 mmap_read_lock(mm); 1010 return error; 1011 } 1012 1013 /* 1014 * Apply an madvise behavior to a region of a vma. madvise_update_vma 1015 * will handle splitting a vm area into separate areas, each area with its own 1016 * behavior. 1017 */ 1018 static int madvise_vma_behavior(struct vm_area_struct *vma, 1019 struct vm_area_struct **prev, 1020 unsigned long start, unsigned long end, 1021 unsigned long behavior) 1022 { 1023 int error; 1024 struct anon_vma_name *anon_name; 1025 unsigned long new_flags = vma->vm_flags; 1026 1027 switch (behavior) { 1028 case MADV_REMOVE: 1029 return madvise_remove(vma, prev, start, end); 1030 case MADV_WILLNEED: 1031 return madvise_willneed(vma, prev, start, end); 1032 case MADV_COLD: 1033 return madvise_cold(vma, prev, start, end); 1034 case MADV_PAGEOUT: 1035 return madvise_pageout(vma, prev, start, end); 1036 case MADV_FREE: 1037 case MADV_DONTNEED: 1038 case MADV_DONTNEED_LOCKED: 1039 return madvise_dontneed_free(vma, prev, start, end, behavior); 1040 case MADV_POPULATE_READ: 1041 case MADV_POPULATE_WRITE: 1042 return madvise_populate(vma, prev, start, end, behavior); 1043 case MADV_NORMAL: 1044 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ; 1045 break; 1046 case MADV_SEQUENTIAL: 1047 new_flags = (new_flags & ~VM_RAND_READ) | VM_SEQ_READ; 1048 break; 1049 case MADV_RANDOM: 1050 new_flags = (new_flags & ~VM_SEQ_READ) | VM_RAND_READ; 1051 break; 1052 case MADV_DONTFORK: 1053 new_flags |= VM_DONTCOPY; 1054 break; 1055 case MADV_DOFORK: 1056 if (vma->vm_flags & VM_IO) 1057 return -EINVAL; 1058 new_flags &= ~VM_DONTCOPY; 1059 break; 1060 case MADV_WIPEONFORK: 1061 /* MADV_WIPEONFORK is only supported on anonymous memory. */ 1062 if (vma->vm_file || vma->vm_flags & VM_SHARED) 1063 return -EINVAL; 1064 new_flags |= VM_WIPEONFORK; 1065 break; 1066 case MADV_KEEPONFORK: 1067 new_flags &= ~VM_WIPEONFORK; 1068 break; 1069 case MADV_DONTDUMP: 1070 new_flags |= VM_DONTDUMP; 1071 break; 1072 case MADV_DODUMP: 1073 if (!is_vm_hugetlb_page(vma) && new_flags & VM_SPECIAL) 1074 return -EINVAL; 1075 new_flags &= ~VM_DONTDUMP; 1076 break; 1077 case MADV_MERGEABLE: 1078 case MADV_UNMERGEABLE: 1079 error = ksm_madvise(vma, start, end, behavior, &new_flags); 1080 if (error) 1081 goto out; 1082 break; 1083 case MADV_HUGEPAGE: 1084 case MADV_NOHUGEPAGE: 1085 error = hugepage_madvise(vma, &new_flags, behavior); 1086 if (error) 1087 goto out; 1088 break; 1089 case MADV_COLLAPSE: 1090 return madvise_collapse(vma, prev, start, end); 1091 } 1092 1093 anon_name = anon_vma_name(vma); 1094 anon_vma_name_get(anon_name); 1095 error = madvise_update_vma(vma, prev, start, end, new_flags, 1096 anon_name); 1097 anon_vma_name_put(anon_name); 1098 1099 out: 1100 /* 1101 * madvise() returns EAGAIN if kernel resources, such as 1102 * slab, are temporarily unavailable. 1103 */ 1104 if (error == -ENOMEM) 1105 error = -EAGAIN; 1106 return error; 1107 } 1108 1109 #ifdef CONFIG_MEMORY_FAILURE 1110 /* 1111 * Error injection support for memory error handling. 1112 */ 1113 static int madvise_inject_error(int behavior, 1114 unsigned long start, unsigned long end) 1115 { 1116 unsigned long size; 1117 1118 if (!capable(CAP_SYS_ADMIN)) 1119 return -EPERM; 1120 1121 1122 for (; start < end; start += size) { 1123 unsigned long pfn; 1124 struct page *page; 1125 int ret; 1126 1127 ret = get_user_pages_fast(start, 1, 0, &page); 1128 if (ret != 1) 1129 return ret; 1130 pfn = page_to_pfn(page); 1131 1132 /* 1133 * When soft offlining hugepages, after migrating the page 1134 * we dissolve it, therefore in the second loop "page" will 1135 * no longer be a compound page. 1136 */ 1137 size = page_size(compound_head(page)); 1138 1139 if (behavior == MADV_SOFT_OFFLINE) { 1140 pr_info("Soft offlining pfn %#lx at process virtual address %#lx\n", 1141 pfn, start); 1142 ret = soft_offline_page(pfn, MF_COUNT_INCREASED); 1143 } else { 1144 pr_info("Injecting memory failure for pfn %#lx at process virtual address %#lx\n", 1145 pfn, start); 1146 ret = memory_failure(pfn, MF_COUNT_INCREASED | MF_SW_SIMULATED); 1147 if (ret == -EOPNOTSUPP) 1148 ret = 0; 1149 } 1150 1151 if (ret) 1152 return ret; 1153 } 1154 1155 return 0; 1156 } 1157 #endif 1158 1159 static bool 1160 madvise_behavior_valid(int behavior) 1161 { 1162 switch (behavior) { 1163 case MADV_DOFORK: 1164 case MADV_DONTFORK: 1165 case MADV_NORMAL: 1166 case MADV_SEQUENTIAL: 1167 case MADV_RANDOM: 1168 case MADV_REMOVE: 1169 case MADV_WILLNEED: 1170 case MADV_DONTNEED: 1171 case MADV_DONTNEED_LOCKED: 1172 case MADV_FREE: 1173 case MADV_COLD: 1174 case MADV_PAGEOUT: 1175 case MADV_POPULATE_READ: 1176 case MADV_POPULATE_WRITE: 1177 #ifdef CONFIG_KSM 1178 case MADV_MERGEABLE: 1179 case MADV_UNMERGEABLE: 1180 #endif 1181 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1182 case MADV_HUGEPAGE: 1183 case MADV_NOHUGEPAGE: 1184 case MADV_COLLAPSE: 1185 #endif 1186 case MADV_DONTDUMP: 1187 case MADV_DODUMP: 1188 case MADV_WIPEONFORK: 1189 case MADV_KEEPONFORK: 1190 #ifdef CONFIG_MEMORY_FAILURE 1191 case MADV_SOFT_OFFLINE: 1192 case MADV_HWPOISON: 1193 #endif 1194 return true; 1195 1196 default: 1197 return false; 1198 } 1199 } 1200 1201 static bool process_madvise_behavior_valid(int behavior) 1202 { 1203 switch (behavior) { 1204 case MADV_COLD: 1205 case MADV_PAGEOUT: 1206 case MADV_WILLNEED: 1207 case MADV_COLLAPSE: 1208 return true; 1209 default: 1210 return false; 1211 } 1212 } 1213 1214 /* 1215 * Walk the vmas in range [start,end), and call the visit function on each one. 1216 * The visit function will get start and end parameters that cover the overlap 1217 * between the current vma and the original range. Any unmapped regions in the 1218 * original range will result in this function returning -ENOMEM while still 1219 * calling the visit function on all of the existing vmas in the range. 1220 * Must be called with the mmap_lock held for reading or writing. 1221 */ 1222 static 1223 int madvise_walk_vmas(struct mm_struct *mm, unsigned long start, 1224 unsigned long end, unsigned long arg, 1225 int (*visit)(struct vm_area_struct *vma, 1226 struct vm_area_struct **prev, unsigned long start, 1227 unsigned long end, unsigned long arg)) 1228 { 1229 struct vm_area_struct *vma; 1230 struct vm_area_struct *prev; 1231 unsigned long tmp; 1232 int unmapped_error = 0; 1233 1234 /* 1235 * If the interval [start,end) covers some unmapped address 1236 * ranges, just ignore them, but return -ENOMEM at the end. 1237 * - different from the way of handling in mlock etc. 1238 */ 1239 vma = find_vma_prev(mm, start, &prev); 1240 if (vma && start > vma->vm_start) 1241 prev = vma; 1242 1243 for (;;) { 1244 int error; 1245 1246 /* Still start < end. */ 1247 if (!vma) 1248 return -ENOMEM; 1249 1250 /* Here start < (end|vma->vm_end). */ 1251 if (start < vma->vm_start) { 1252 unmapped_error = -ENOMEM; 1253 start = vma->vm_start; 1254 if (start >= end) 1255 break; 1256 } 1257 1258 /* Here vma->vm_start <= start < (end|vma->vm_end) */ 1259 tmp = vma->vm_end; 1260 if (end < tmp) 1261 tmp = end; 1262 1263 /* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */ 1264 error = visit(vma, &prev, start, tmp, arg); 1265 if (error) 1266 return error; 1267 start = tmp; 1268 if (prev && start < prev->vm_end) 1269 start = prev->vm_end; 1270 if (start >= end) 1271 break; 1272 if (prev) 1273 vma = find_vma(mm, prev->vm_end); 1274 else /* madvise_remove dropped mmap_lock */ 1275 vma = find_vma(mm, start); 1276 } 1277 1278 return unmapped_error; 1279 } 1280 1281 #ifdef CONFIG_ANON_VMA_NAME 1282 static int madvise_vma_anon_name(struct vm_area_struct *vma, 1283 struct vm_area_struct **prev, 1284 unsigned long start, unsigned long end, 1285 unsigned long anon_name) 1286 { 1287 int error; 1288 1289 /* Only anonymous mappings can be named */ 1290 if (vma->vm_file && !vma_is_anon_shmem(vma)) 1291 return -EBADF; 1292 1293 error = madvise_update_vma(vma, prev, start, end, vma->vm_flags, 1294 (struct anon_vma_name *)anon_name); 1295 1296 /* 1297 * madvise() returns EAGAIN if kernel resources, such as 1298 * slab, are temporarily unavailable. 1299 */ 1300 if (error == -ENOMEM) 1301 error = -EAGAIN; 1302 return error; 1303 } 1304 1305 int madvise_set_anon_name(struct mm_struct *mm, unsigned long start, 1306 unsigned long len_in, struct anon_vma_name *anon_name) 1307 { 1308 unsigned long end; 1309 unsigned long len; 1310 1311 if (start & ~PAGE_MASK) 1312 return -EINVAL; 1313 len = (len_in + ~PAGE_MASK) & PAGE_MASK; 1314 1315 /* Check to see whether len was rounded up from small -ve to zero */ 1316 if (len_in && !len) 1317 return -EINVAL; 1318 1319 end = start + len; 1320 if (end < start) 1321 return -EINVAL; 1322 1323 if (end == start) 1324 return 0; 1325 1326 return madvise_walk_vmas(mm, start, end, (unsigned long)anon_name, 1327 madvise_vma_anon_name); 1328 } 1329 #endif /* CONFIG_ANON_VMA_NAME */ 1330 /* 1331 * The madvise(2) system call. 1332 * 1333 * Applications can use madvise() to advise the kernel how it should 1334 * handle paging I/O in this VM area. The idea is to help the kernel 1335 * use appropriate read-ahead and caching techniques. The information 1336 * provided is advisory only, and can be safely disregarded by the 1337 * kernel without affecting the correct operation of the application. 1338 * 1339 * behavior values: 1340 * MADV_NORMAL - the default behavior is to read clusters. This 1341 * results in some read-ahead and read-behind. 1342 * MADV_RANDOM - the system should read the minimum amount of data 1343 * on any access, since it is unlikely that the appli- 1344 * cation will need more than what it asks for. 1345 * MADV_SEQUENTIAL - pages in the given range will probably be accessed 1346 * once, so they can be aggressively read ahead, and 1347 * can be freed soon after they are accessed. 1348 * MADV_WILLNEED - the application is notifying the system to read 1349 * some pages ahead. 1350 * MADV_DONTNEED - the application is finished with the given range, 1351 * so the kernel can free resources associated with it. 1352 * MADV_FREE - the application marks pages in the given range as lazy free, 1353 * where actual purges are postponed until memory pressure happens. 1354 * MADV_REMOVE - the application wants to free up the given range of 1355 * pages and associated backing store. 1356 * MADV_DONTFORK - omit this area from child's address space when forking: 1357 * typically, to avoid COWing pages pinned by get_user_pages(). 1358 * MADV_DOFORK - cancel MADV_DONTFORK: no longer omit this area when forking. 1359 * MADV_WIPEONFORK - present the child process with zero-filled memory in this 1360 * range after a fork. 1361 * MADV_KEEPONFORK - undo the effect of MADV_WIPEONFORK 1362 * MADV_HWPOISON - trigger memory error handler as if the given memory range 1363 * were corrupted by unrecoverable hardware memory failure. 1364 * MADV_SOFT_OFFLINE - try to soft-offline the given range of memory. 1365 * MADV_MERGEABLE - the application recommends that KSM try to merge pages in 1366 * this area with pages of identical content from other such areas. 1367 * MADV_UNMERGEABLE- cancel MADV_MERGEABLE: no longer merge pages with others. 1368 * MADV_HUGEPAGE - the application wants to back the given range by transparent 1369 * huge pages in the future. Existing pages might be coalesced and 1370 * new pages might be allocated as THP. 1371 * MADV_NOHUGEPAGE - mark the given range as not worth being backed by 1372 * transparent huge pages so the existing pages will not be 1373 * coalesced into THP and new pages will not be allocated as THP. 1374 * MADV_COLLAPSE - synchronously coalesce pages into new THP. 1375 * MADV_DONTDUMP - the application wants to prevent pages in the given range 1376 * from being included in its core dump. 1377 * MADV_DODUMP - cancel MADV_DONTDUMP: no longer exclude from core dump. 1378 * MADV_COLD - the application is not expected to use this memory soon, 1379 * deactivate pages in this range so that they can be reclaimed 1380 * easily if memory pressure happens. 1381 * MADV_PAGEOUT - the application is not expected to use this memory soon, 1382 * page out the pages in this range immediately. 1383 * MADV_POPULATE_READ - populate (prefault) page tables readable by 1384 * triggering read faults if required 1385 * MADV_POPULATE_WRITE - populate (prefault) page tables writable by 1386 * triggering write faults if required 1387 * 1388 * return values: 1389 * zero - success 1390 * -EINVAL - start + len < 0, start is not page-aligned, 1391 * "behavior" is not a valid value, or application 1392 * is attempting to release locked or shared pages, 1393 * or the specified address range includes file, Huge TLB, 1394 * MAP_SHARED or VMPFNMAP range. 1395 * -ENOMEM - addresses in the specified range are not currently 1396 * mapped, or are outside the AS of the process. 1397 * -EIO - an I/O error occurred while paging in data. 1398 * -EBADF - map exists, but area maps something that isn't a file. 1399 * -EAGAIN - a kernel resource was temporarily unavailable. 1400 */ 1401 int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int behavior) 1402 { 1403 unsigned long end; 1404 int error; 1405 int write; 1406 size_t len; 1407 struct blk_plug plug; 1408 1409 start = untagged_addr(start); 1410 1411 if (!madvise_behavior_valid(behavior)) 1412 return -EINVAL; 1413 1414 if (!PAGE_ALIGNED(start)) 1415 return -EINVAL; 1416 len = PAGE_ALIGN(len_in); 1417 1418 /* Check to see whether len was rounded up from small -ve to zero */ 1419 if (len_in && !len) 1420 return -EINVAL; 1421 1422 end = start + len; 1423 if (end < start) 1424 return -EINVAL; 1425 1426 if (end == start) 1427 return 0; 1428 1429 #ifdef CONFIG_MEMORY_FAILURE 1430 if (behavior == MADV_HWPOISON || behavior == MADV_SOFT_OFFLINE) 1431 return madvise_inject_error(behavior, start, start + len_in); 1432 #endif 1433 1434 write = madvise_need_mmap_write(behavior); 1435 if (write) { 1436 if (mmap_write_lock_killable(mm)) 1437 return -EINTR; 1438 } else { 1439 mmap_read_lock(mm); 1440 } 1441 1442 blk_start_plug(&plug); 1443 error = madvise_walk_vmas(mm, start, end, behavior, 1444 madvise_vma_behavior); 1445 blk_finish_plug(&plug); 1446 if (write) 1447 mmap_write_unlock(mm); 1448 else 1449 mmap_read_unlock(mm); 1450 1451 return error; 1452 } 1453 1454 SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior) 1455 { 1456 return do_madvise(current->mm, start, len_in, behavior); 1457 } 1458 1459 SYSCALL_DEFINE5(process_madvise, int, pidfd, const struct iovec __user *, vec, 1460 size_t, vlen, int, behavior, unsigned int, flags) 1461 { 1462 ssize_t ret; 1463 struct iovec iovstack[UIO_FASTIOV], iovec; 1464 struct iovec *iov = iovstack; 1465 struct iov_iter iter; 1466 struct task_struct *task; 1467 struct mm_struct *mm; 1468 size_t total_len; 1469 unsigned int f_flags; 1470 1471 if (flags != 0) { 1472 ret = -EINVAL; 1473 goto out; 1474 } 1475 1476 ret = import_iovec(ITER_DEST, vec, vlen, ARRAY_SIZE(iovstack), &iov, &iter); 1477 if (ret < 0) 1478 goto out; 1479 1480 task = pidfd_get_task(pidfd, &f_flags); 1481 if (IS_ERR(task)) { 1482 ret = PTR_ERR(task); 1483 goto free_iov; 1484 } 1485 1486 if (!process_madvise_behavior_valid(behavior)) { 1487 ret = -EINVAL; 1488 goto release_task; 1489 } 1490 1491 /* Require PTRACE_MODE_READ to avoid leaking ASLR metadata. */ 1492 mm = mm_access(task, PTRACE_MODE_READ_FSCREDS); 1493 if (IS_ERR_OR_NULL(mm)) { 1494 ret = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH; 1495 goto release_task; 1496 } 1497 1498 /* 1499 * Require CAP_SYS_NICE for influencing process performance. Note that 1500 * only non-destructive hints are currently supported. 1501 */ 1502 if (!capable(CAP_SYS_NICE)) { 1503 ret = -EPERM; 1504 goto release_mm; 1505 } 1506 1507 total_len = iov_iter_count(&iter); 1508 1509 while (iov_iter_count(&iter)) { 1510 iovec = iov_iter_iovec(&iter); 1511 ret = do_madvise(mm, (unsigned long)iovec.iov_base, 1512 iovec.iov_len, behavior); 1513 if (ret < 0) 1514 break; 1515 iov_iter_advance(&iter, iovec.iov_len); 1516 } 1517 1518 ret = (total_len - iov_iter_count(&iter)) ? : ret; 1519 1520 release_mm: 1521 mmput(mm); 1522 release_task: 1523 put_task_struct(task); 1524 free_iov: 1525 kfree(iov); 1526 out: 1527 return ret; 1528 } 1529