1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/mm/madvise.c 4 * 5 * Copyright (C) 1999 Linus Torvalds 6 * Copyright (C) 2002 Christoph Hellwig 7 */ 8 9 #include <linux/mman.h> 10 #include <linux/pagemap.h> 11 #include <linux/syscalls.h> 12 #include <linux/mempolicy.h> 13 #include <linux/page-isolation.h> 14 #include <linux/page_idle.h> 15 #include <linux/userfaultfd_k.h> 16 #include <linux/hugetlb.h> 17 #include <linux/falloc.h> 18 #include <linux/fadvise.h> 19 #include <linux/sched.h> 20 #include <linux/sched/mm.h> 21 #include <linux/mm_inline.h> 22 #include <linux/string.h> 23 #include <linux/uio.h> 24 #include <linux/ksm.h> 25 #include <linux/fs.h> 26 #include <linux/file.h> 27 #include <linux/blkdev.h> 28 #include <linux/backing-dev.h> 29 #include <linux/pagewalk.h> 30 #include <linux/swap.h> 31 #include <linux/swapops.h> 32 #include <linux/shmem_fs.h> 33 #include <linux/mmu_notifier.h> 34 35 #include <asm/tlb.h> 36 37 #include "internal.h" 38 #include "swap.h" 39 40 struct madvise_walk_private { 41 struct mmu_gather *tlb; 42 bool pageout; 43 }; 44 45 /* 46 * Any behaviour which results in changes to the vma->vm_flags needs to 47 * take mmap_lock for writing. Others, which simply traverse vmas, need 48 * to only take it for reading. 49 */ 50 static int madvise_need_mmap_write(int behavior) 51 { 52 switch (behavior) { 53 case MADV_REMOVE: 54 case MADV_WILLNEED: 55 case MADV_DONTNEED: 56 case MADV_DONTNEED_LOCKED: 57 case MADV_COLD: 58 case MADV_PAGEOUT: 59 case MADV_FREE: 60 case MADV_POPULATE_READ: 61 case MADV_POPULATE_WRITE: 62 case MADV_COLLAPSE: 63 return 0; 64 default: 65 /* be safe, default to 1. list exceptions explicitly */ 66 return 1; 67 } 68 } 69 70 #ifdef CONFIG_ANON_VMA_NAME 71 struct anon_vma_name *anon_vma_name_alloc(const char *name) 72 { 73 struct anon_vma_name *anon_name; 74 size_t count; 75 76 /* Add 1 for NUL terminator at the end of the anon_name->name */ 77 count = strlen(name) + 1; 78 anon_name = kmalloc(struct_size(anon_name, name, count), GFP_KERNEL); 79 if (anon_name) { 80 kref_init(&anon_name->kref); 81 memcpy(anon_name->name, name, count); 82 } 83 84 return anon_name; 85 } 86 87 void anon_vma_name_free(struct kref *kref) 88 { 89 struct anon_vma_name *anon_name = 90 container_of(kref, struct anon_vma_name, kref); 91 kfree(anon_name); 92 } 93 94 struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma) 95 { 96 mmap_assert_locked(vma->vm_mm); 97 98 if (vma->vm_file) 99 return NULL; 100 101 return vma->anon_name; 102 } 103 104 /* mmap_lock should be write-locked */ 105 static int replace_anon_vma_name(struct vm_area_struct *vma, 106 struct anon_vma_name *anon_name) 107 { 108 struct anon_vma_name *orig_name = anon_vma_name(vma); 109 110 if (!anon_name) { 111 vma->anon_name = NULL; 112 anon_vma_name_put(orig_name); 113 return 0; 114 } 115 116 if (anon_vma_name_eq(orig_name, anon_name)) 117 return 0; 118 119 vma->anon_name = anon_vma_name_reuse(anon_name); 120 anon_vma_name_put(orig_name); 121 122 return 0; 123 } 124 #else /* CONFIG_ANON_VMA_NAME */ 125 static int replace_anon_vma_name(struct vm_area_struct *vma, 126 struct anon_vma_name *anon_name) 127 { 128 if (anon_name) 129 return -EINVAL; 130 131 return 0; 132 } 133 #endif /* CONFIG_ANON_VMA_NAME */ 134 /* 135 * Update the vm_flags on region of a vma, splitting it or merging it as 136 * necessary. Must be called with mmap_sem held for writing; 137 * Caller should ensure anon_name stability by raising its refcount even when 138 * anon_name belongs to a valid vma because this function might free that vma. 139 */ 140 static int madvise_update_vma(struct vm_area_struct *vma, 141 struct vm_area_struct **prev, unsigned long start, 142 unsigned long end, unsigned long new_flags, 143 struct anon_vma_name *anon_name) 144 { 145 struct mm_struct *mm = vma->vm_mm; 146 int error; 147 pgoff_t pgoff; 148 149 if (new_flags == vma->vm_flags && anon_vma_name_eq(anon_vma_name(vma), anon_name)) { 150 *prev = vma; 151 return 0; 152 } 153 154 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); 155 *prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma, 156 vma->vm_file, pgoff, vma_policy(vma), 157 vma->vm_userfaultfd_ctx, anon_name); 158 if (*prev) { 159 vma = *prev; 160 goto success; 161 } 162 163 *prev = vma; 164 165 if (start != vma->vm_start) { 166 if (unlikely(mm->map_count >= sysctl_max_map_count)) 167 return -ENOMEM; 168 error = __split_vma(mm, vma, start, 1); 169 if (error) 170 return error; 171 } 172 173 if (end != vma->vm_end) { 174 if (unlikely(mm->map_count >= sysctl_max_map_count)) 175 return -ENOMEM; 176 error = __split_vma(mm, vma, end, 0); 177 if (error) 178 return error; 179 } 180 181 success: 182 /* 183 * vm_flags is protected by the mmap_lock held in write mode. 184 */ 185 vma->vm_flags = new_flags; 186 if (!vma->vm_file) { 187 error = replace_anon_vma_name(vma, anon_name); 188 if (error) 189 return error; 190 } 191 192 return 0; 193 } 194 195 #ifdef CONFIG_SWAP 196 static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start, 197 unsigned long end, struct mm_walk *walk) 198 { 199 struct vm_area_struct *vma = walk->private; 200 unsigned long index; 201 struct swap_iocb *splug = NULL; 202 203 if (pmd_none_or_trans_huge_or_clear_bad(pmd)) 204 return 0; 205 206 for (index = start; index != end; index += PAGE_SIZE) { 207 pte_t pte; 208 swp_entry_t entry; 209 struct page *page; 210 spinlock_t *ptl; 211 pte_t *ptep; 212 213 ptep = pte_offset_map_lock(vma->vm_mm, pmd, index, &ptl); 214 pte = *ptep; 215 pte_unmap_unlock(ptep, ptl); 216 217 if (!is_swap_pte(pte)) 218 continue; 219 entry = pte_to_swp_entry(pte); 220 if (unlikely(non_swap_entry(entry))) 221 continue; 222 223 page = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE, 224 vma, index, false, &splug); 225 if (page) 226 put_page(page); 227 } 228 swap_read_unplug(splug); 229 230 return 0; 231 } 232 233 static const struct mm_walk_ops swapin_walk_ops = { 234 .pmd_entry = swapin_walk_pmd_entry, 235 }; 236 237 static void force_shm_swapin_readahead(struct vm_area_struct *vma, 238 unsigned long start, unsigned long end, 239 struct address_space *mapping) 240 { 241 XA_STATE(xas, &mapping->i_pages, linear_page_index(vma, start)); 242 pgoff_t end_index = linear_page_index(vma, end + PAGE_SIZE - 1); 243 struct page *page; 244 struct swap_iocb *splug = NULL; 245 246 rcu_read_lock(); 247 xas_for_each(&xas, page, end_index) { 248 swp_entry_t swap; 249 250 if (!xa_is_value(page)) 251 continue; 252 swap = radix_to_swp_entry(page); 253 /* There might be swapin error entries in shmem mapping. */ 254 if (non_swap_entry(swap)) 255 continue; 256 xas_pause(&xas); 257 rcu_read_unlock(); 258 259 page = read_swap_cache_async(swap, GFP_HIGHUSER_MOVABLE, 260 NULL, 0, false, &splug); 261 if (page) 262 put_page(page); 263 264 rcu_read_lock(); 265 } 266 rcu_read_unlock(); 267 swap_read_unplug(splug); 268 269 lru_add_drain(); /* Push any new pages onto the LRU now */ 270 } 271 #endif /* CONFIG_SWAP */ 272 273 /* 274 * Schedule all required I/O operations. Do not wait for completion. 275 */ 276 static long madvise_willneed(struct vm_area_struct *vma, 277 struct vm_area_struct **prev, 278 unsigned long start, unsigned long end) 279 { 280 struct mm_struct *mm = vma->vm_mm; 281 struct file *file = vma->vm_file; 282 loff_t offset; 283 284 *prev = vma; 285 #ifdef CONFIG_SWAP 286 if (!file) { 287 walk_page_range(vma->vm_mm, start, end, &swapin_walk_ops, vma); 288 lru_add_drain(); /* Push any new pages onto the LRU now */ 289 return 0; 290 } 291 292 if (shmem_mapping(file->f_mapping)) { 293 force_shm_swapin_readahead(vma, start, end, 294 file->f_mapping); 295 return 0; 296 } 297 #else 298 if (!file) 299 return -EBADF; 300 #endif 301 302 if (IS_DAX(file_inode(file))) { 303 /* no bad return value, but ignore advice */ 304 return 0; 305 } 306 307 /* 308 * Filesystem's fadvise may need to take various locks. We need to 309 * explicitly grab a reference because the vma (and hence the 310 * vma's reference to the file) can go away as soon as we drop 311 * mmap_lock. 312 */ 313 *prev = NULL; /* tell sys_madvise we drop mmap_lock */ 314 get_file(file); 315 offset = (loff_t)(start - vma->vm_start) 316 + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); 317 mmap_read_unlock(mm); 318 vfs_fadvise(file, offset, end - start, POSIX_FADV_WILLNEED); 319 fput(file); 320 mmap_read_lock(mm); 321 return 0; 322 } 323 324 static int madvise_cold_or_pageout_pte_range(pmd_t *pmd, 325 unsigned long addr, unsigned long end, 326 struct mm_walk *walk) 327 { 328 struct madvise_walk_private *private = walk->private; 329 struct mmu_gather *tlb = private->tlb; 330 bool pageout = private->pageout; 331 struct mm_struct *mm = tlb->mm; 332 struct vm_area_struct *vma = walk->vma; 333 pte_t *orig_pte, *pte, ptent; 334 spinlock_t *ptl; 335 struct page *page = NULL; 336 LIST_HEAD(page_list); 337 338 if (fatal_signal_pending(current)) 339 return -EINTR; 340 341 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 342 if (pmd_trans_huge(*pmd)) { 343 pmd_t orig_pmd; 344 unsigned long next = pmd_addr_end(addr, end); 345 346 tlb_change_page_size(tlb, HPAGE_PMD_SIZE); 347 ptl = pmd_trans_huge_lock(pmd, vma); 348 if (!ptl) 349 return 0; 350 351 orig_pmd = *pmd; 352 if (is_huge_zero_pmd(orig_pmd)) 353 goto huge_unlock; 354 355 if (unlikely(!pmd_present(orig_pmd))) { 356 VM_BUG_ON(thp_migration_supported() && 357 !is_pmd_migration_entry(orig_pmd)); 358 goto huge_unlock; 359 } 360 361 page = pmd_page(orig_pmd); 362 363 /* Do not interfere with other mappings of this page */ 364 if (page_mapcount(page) != 1) 365 goto huge_unlock; 366 367 if (next - addr != HPAGE_PMD_SIZE) { 368 int err; 369 370 get_page(page); 371 spin_unlock(ptl); 372 lock_page(page); 373 err = split_huge_page(page); 374 unlock_page(page); 375 put_page(page); 376 if (!err) 377 goto regular_page; 378 return 0; 379 } 380 381 if (pmd_young(orig_pmd)) { 382 pmdp_invalidate(vma, addr, pmd); 383 orig_pmd = pmd_mkold(orig_pmd); 384 385 set_pmd_at(mm, addr, pmd, orig_pmd); 386 tlb_remove_pmd_tlb_entry(tlb, pmd, addr); 387 } 388 389 ClearPageReferenced(page); 390 test_and_clear_page_young(page); 391 if (pageout) { 392 if (!isolate_lru_page(page)) { 393 if (PageUnevictable(page)) 394 putback_lru_page(page); 395 else 396 list_add(&page->lru, &page_list); 397 } 398 } else 399 deactivate_page(page); 400 huge_unlock: 401 spin_unlock(ptl); 402 if (pageout) 403 reclaim_pages(&page_list); 404 return 0; 405 } 406 407 regular_page: 408 if (pmd_trans_unstable(pmd)) 409 return 0; 410 #endif 411 tlb_change_page_size(tlb, PAGE_SIZE); 412 orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 413 flush_tlb_batched_pending(mm); 414 arch_enter_lazy_mmu_mode(); 415 for (; addr < end; pte++, addr += PAGE_SIZE) { 416 ptent = *pte; 417 418 if (pte_none(ptent)) 419 continue; 420 421 if (!pte_present(ptent)) 422 continue; 423 424 page = vm_normal_page(vma, addr, ptent); 425 if (!page || is_zone_device_page(page)) 426 continue; 427 428 /* 429 * Creating a THP page is expensive so split it only if we 430 * are sure it's worth. Split it if we are only owner. 431 */ 432 if (PageTransCompound(page)) { 433 if (page_mapcount(page) != 1) 434 break; 435 get_page(page); 436 if (!trylock_page(page)) { 437 put_page(page); 438 break; 439 } 440 pte_unmap_unlock(orig_pte, ptl); 441 if (split_huge_page(page)) { 442 unlock_page(page); 443 put_page(page); 444 orig_pte = pte_offset_map_lock(mm, pmd, addr, &ptl); 445 break; 446 } 447 unlock_page(page); 448 put_page(page); 449 orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl); 450 pte--; 451 addr -= PAGE_SIZE; 452 continue; 453 } 454 455 /* 456 * Do not interfere with other mappings of this page and 457 * non-LRU page. 458 */ 459 if (!PageLRU(page) || page_mapcount(page) != 1) 460 continue; 461 462 VM_BUG_ON_PAGE(PageTransCompound(page), page); 463 464 if (pte_young(ptent)) { 465 ptent = ptep_get_and_clear_full(mm, addr, pte, 466 tlb->fullmm); 467 ptent = pte_mkold(ptent); 468 set_pte_at(mm, addr, pte, ptent); 469 tlb_remove_tlb_entry(tlb, pte, addr); 470 } 471 472 /* 473 * We are deactivating a page for accelerating reclaiming. 474 * VM couldn't reclaim the page unless we clear PG_young. 475 * As a side effect, it makes confuse idle-page tracking 476 * because they will miss recent referenced history. 477 */ 478 ClearPageReferenced(page); 479 test_and_clear_page_young(page); 480 if (pageout) { 481 if (!isolate_lru_page(page)) { 482 if (PageUnevictable(page)) 483 putback_lru_page(page); 484 else 485 list_add(&page->lru, &page_list); 486 } 487 } else 488 deactivate_page(page); 489 } 490 491 arch_leave_lazy_mmu_mode(); 492 pte_unmap_unlock(orig_pte, ptl); 493 if (pageout) 494 reclaim_pages(&page_list); 495 cond_resched(); 496 497 return 0; 498 } 499 500 static const struct mm_walk_ops cold_walk_ops = { 501 .pmd_entry = madvise_cold_or_pageout_pte_range, 502 }; 503 504 static void madvise_cold_page_range(struct mmu_gather *tlb, 505 struct vm_area_struct *vma, 506 unsigned long addr, unsigned long end) 507 { 508 struct madvise_walk_private walk_private = { 509 .pageout = false, 510 .tlb = tlb, 511 }; 512 513 tlb_start_vma(tlb, vma); 514 walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private); 515 tlb_end_vma(tlb, vma); 516 } 517 518 static inline bool can_madv_lru_vma(struct vm_area_struct *vma) 519 { 520 return !(vma->vm_flags & (VM_LOCKED|VM_PFNMAP|VM_HUGETLB)); 521 } 522 523 static long madvise_cold(struct vm_area_struct *vma, 524 struct vm_area_struct **prev, 525 unsigned long start_addr, unsigned long end_addr) 526 { 527 struct mm_struct *mm = vma->vm_mm; 528 struct mmu_gather tlb; 529 530 *prev = vma; 531 if (!can_madv_lru_vma(vma)) 532 return -EINVAL; 533 534 lru_add_drain(); 535 tlb_gather_mmu(&tlb, mm); 536 madvise_cold_page_range(&tlb, vma, start_addr, end_addr); 537 tlb_finish_mmu(&tlb); 538 539 return 0; 540 } 541 542 static void madvise_pageout_page_range(struct mmu_gather *tlb, 543 struct vm_area_struct *vma, 544 unsigned long addr, unsigned long end) 545 { 546 struct madvise_walk_private walk_private = { 547 .pageout = true, 548 .tlb = tlb, 549 }; 550 551 tlb_start_vma(tlb, vma); 552 walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private); 553 tlb_end_vma(tlb, vma); 554 } 555 556 static inline bool can_do_pageout(struct vm_area_struct *vma) 557 { 558 if (vma_is_anonymous(vma)) 559 return true; 560 if (!vma->vm_file) 561 return false; 562 /* 563 * paging out pagecache only for non-anonymous mappings that correspond 564 * to the files the calling process could (if tried) open for writing; 565 * otherwise we'd be including shared non-exclusive mappings, which 566 * opens a side channel. 567 */ 568 return inode_owner_or_capable(&init_user_ns, 569 file_inode(vma->vm_file)) || 570 file_permission(vma->vm_file, MAY_WRITE) == 0; 571 } 572 573 static long madvise_pageout(struct vm_area_struct *vma, 574 struct vm_area_struct **prev, 575 unsigned long start_addr, unsigned long end_addr) 576 { 577 struct mm_struct *mm = vma->vm_mm; 578 struct mmu_gather tlb; 579 580 *prev = vma; 581 if (!can_madv_lru_vma(vma)) 582 return -EINVAL; 583 584 if (!can_do_pageout(vma)) 585 return 0; 586 587 lru_add_drain(); 588 tlb_gather_mmu(&tlb, mm); 589 madvise_pageout_page_range(&tlb, vma, start_addr, end_addr); 590 tlb_finish_mmu(&tlb); 591 592 return 0; 593 } 594 595 static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr, 596 unsigned long end, struct mm_walk *walk) 597 598 { 599 struct mmu_gather *tlb = walk->private; 600 struct mm_struct *mm = tlb->mm; 601 struct vm_area_struct *vma = walk->vma; 602 spinlock_t *ptl; 603 pte_t *orig_pte, *pte, ptent; 604 struct page *page; 605 int nr_swap = 0; 606 unsigned long next; 607 608 next = pmd_addr_end(addr, end); 609 if (pmd_trans_huge(*pmd)) 610 if (madvise_free_huge_pmd(tlb, vma, pmd, addr, next)) 611 goto next; 612 613 if (pmd_trans_unstable(pmd)) 614 return 0; 615 616 tlb_change_page_size(tlb, PAGE_SIZE); 617 orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl); 618 flush_tlb_batched_pending(mm); 619 arch_enter_lazy_mmu_mode(); 620 for (; addr != end; pte++, addr += PAGE_SIZE) { 621 ptent = *pte; 622 623 if (pte_none(ptent)) 624 continue; 625 /* 626 * If the pte has swp_entry, just clear page table to 627 * prevent swap-in which is more expensive rather than 628 * (page allocation + zeroing). 629 */ 630 if (!pte_present(ptent)) { 631 swp_entry_t entry; 632 633 entry = pte_to_swp_entry(ptent); 634 if (!non_swap_entry(entry)) { 635 nr_swap--; 636 free_swap_and_cache(entry); 637 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm); 638 } else if (is_hwpoison_entry(entry) || 639 is_swapin_error_entry(entry)) { 640 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm); 641 } 642 continue; 643 } 644 645 page = vm_normal_page(vma, addr, ptent); 646 if (!page || is_zone_device_page(page)) 647 continue; 648 649 /* 650 * If pmd isn't transhuge but the page is THP and 651 * is owned by only this process, split it and 652 * deactivate all pages. 653 */ 654 if (PageTransCompound(page)) { 655 if (page_mapcount(page) != 1) 656 goto out; 657 get_page(page); 658 if (!trylock_page(page)) { 659 put_page(page); 660 goto out; 661 } 662 pte_unmap_unlock(orig_pte, ptl); 663 if (split_huge_page(page)) { 664 unlock_page(page); 665 put_page(page); 666 orig_pte = pte_offset_map_lock(mm, pmd, addr, &ptl); 667 goto out; 668 } 669 unlock_page(page); 670 put_page(page); 671 orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl); 672 pte--; 673 addr -= PAGE_SIZE; 674 continue; 675 } 676 677 VM_BUG_ON_PAGE(PageTransCompound(page), page); 678 679 if (PageSwapCache(page) || PageDirty(page)) { 680 if (!trylock_page(page)) 681 continue; 682 /* 683 * If page is shared with others, we couldn't clear 684 * PG_dirty of the page. 685 */ 686 if (page_mapcount(page) != 1) { 687 unlock_page(page); 688 continue; 689 } 690 691 if (PageSwapCache(page) && !try_to_free_swap(page)) { 692 unlock_page(page); 693 continue; 694 } 695 696 ClearPageDirty(page); 697 unlock_page(page); 698 } 699 700 if (pte_young(ptent) || pte_dirty(ptent)) { 701 /* 702 * Some of architecture(ex, PPC) don't update TLB 703 * with set_pte_at and tlb_remove_tlb_entry so for 704 * the portability, remap the pte with old|clean 705 * after pte clearing. 706 */ 707 ptent = ptep_get_and_clear_full(mm, addr, pte, 708 tlb->fullmm); 709 710 ptent = pte_mkold(ptent); 711 ptent = pte_mkclean(ptent); 712 set_pte_at(mm, addr, pte, ptent); 713 tlb_remove_tlb_entry(tlb, pte, addr); 714 } 715 mark_page_lazyfree(page); 716 } 717 out: 718 if (nr_swap) { 719 if (current->mm == mm) 720 sync_mm_rss(mm); 721 722 add_mm_counter(mm, MM_SWAPENTS, nr_swap); 723 } 724 arch_leave_lazy_mmu_mode(); 725 pte_unmap_unlock(orig_pte, ptl); 726 cond_resched(); 727 next: 728 return 0; 729 } 730 731 static const struct mm_walk_ops madvise_free_walk_ops = { 732 .pmd_entry = madvise_free_pte_range, 733 }; 734 735 static int madvise_free_single_vma(struct vm_area_struct *vma, 736 unsigned long start_addr, unsigned long end_addr) 737 { 738 struct mm_struct *mm = vma->vm_mm; 739 struct mmu_notifier_range range; 740 struct mmu_gather tlb; 741 742 /* MADV_FREE works for only anon vma at the moment */ 743 if (!vma_is_anonymous(vma)) 744 return -EINVAL; 745 746 range.start = max(vma->vm_start, start_addr); 747 if (range.start >= vma->vm_end) 748 return -EINVAL; 749 range.end = min(vma->vm_end, end_addr); 750 if (range.end <= vma->vm_start) 751 return -EINVAL; 752 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, 753 range.start, range.end); 754 755 lru_add_drain(); 756 tlb_gather_mmu(&tlb, mm); 757 update_hiwater_rss(mm); 758 759 mmu_notifier_invalidate_range_start(&range); 760 tlb_start_vma(&tlb, vma); 761 walk_page_range(vma->vm_mm, range.start, range.end, 762 &madvise_free_walk_ops, &tlb); 763 tlb_end_vma(&tlb, vma); 764 mmu_notifier_invalidate_range_end(&range); 765 tlb_finish_mmu(&tlb); 766 767 return 0; 768 } 769 770 /* 771 * Application no longer needs these pages. If the pages are dirty, 772 * it's OK to just throw them away. The app will be more careful about 773 * data it wants to keep. Be sure to free swap resources too. The 774 * zap_page_range call sets things up for shrink_active_list to actually free 775 * these pages later if no one else has touched them in the meantime, 776 * although we could add these pages to a global reuse list for 777 * shrink_active_list to pick up before reclaiming other pages. 778 * 779 * NB: This interface discards data rather than pushes it out to swap, 780 * as some implementations do. This has performance implications for 781 * applications like large transactional databases which want to discard 782 * pages in anonymous maps after committing to backing store the data 783 * that was kept in them. There is no reason to write this data out to 784 * the swap area if the application is discarding it. 785 * 786 * An interface that causes the system to free clean pages and flush 787 * dirty pages is already available as msync(MS_INVALIDATE). 788 */ 789 static long madvise_dontneed_single_vma(struct vm_area_struct *vma, 790 unsigned long start, unsigned long end) 791 { 792 zap_page_range(vma, start, end - start); 793 return 0; 794 } 795 796 static bool madvise_dontneed_free_valid_vma(struct vm_area_struct *vma, 797 unsigned long start, 798 unsigned long *end, 799 int behavior) 800 { 801 if (!is_vm_hugetlb_page(vma)) { 802 unsigned int forbidden = VM_PFNMAP; 803 804 if (behavior != MADV_DONTNEED_LOCKED) 805 forbidden |= VM_LOCKED; 806 807 return !(vma->vm_flags & forbidden); 808 } 809 810 if (behavior != MADV_DONTNEED && behavior != MADV_DONTNEED_LOCKED) 811 return false; 812 if (start & ~huge_page_mask(hstate_vma(vma))) 813 return false; 814 815 *end = ALIGN(*end, huge_page_size(hstate_vma(vma))); 816 return true; 817 } 818 819 static long madvise_dontneed_free(struct vm_area_struct *vma, 820 struct vm_area_struct **prev, 821 unsigned long start, unsigned long end, 822 int behavior) 823 { 824 struct mm_struct *mm = vma->vm_mm; 825 826 *prev = vma; 827 if (!madvise_dontneed_free_valid_vma(vma, start, &end, behavior)) 828 return -EINVAL; 829 830 if (!userfaultfd_remove(vma, start, end)) { 831 *prev = NULL; /* mmap_lock has been dropped, prev is stale */ 832 833 mmap_read_lock(mm); 834 vma = find_vma(mm, start); 835 if (!vma) 836 return -ENOMEM; 837 if (start < vma->vm_start) { 838 /* 839 * This "vma" under revalidation is the one 840 * with the lowest vma->vm_start where start 841 * is also < vma->vm_end. If start < 842 * vma->vm_start it means an hole materialized 843 * in the user address space within the 844 * virtual range passed to MADV_DONTNEED 845 * or MADV_FREE. 846 */ 847 return -ENOMEM; 848 } 849 /* 850 * Potential end adjustment for hugetlb vma is OK as 851 * the check below keeps end within vma. 852 */ 853 if (!madvise_dontneed_free_valid_vma(vma, start, &end, 854 behavior)) 855 return -EINVAL; 856 if (end > vma->vm_end) { 857 /* 858 * Don't fail if end > vma->vm_end. If the old 859 * vma was split while the mmap_lock was 860 * released the effect of the concurrent 861 * operation may not cause madvise() to 862 * have an undefined result. There may be an 863 * adjacent next vma that we'll walk 864 * next. userfaultfd_remove() will generate an 865 * UFFD_EVENT_REMOVE repetition on the 866 * end-vma->vm_end range, but the manager can 867 * handle a repetition fine. 868 */ 869 end = vma->vm_end; 870 } 871 VM_WARN_ON(start >= end); 872 } 873 874 if (behavior == MADV_DONTNEED || behavior == MADV_DONTNEED_LOCKED) 875 return madvise_dontneed_single_vma(vma, start, end); 876 else if (behavior == MADV_FREE) 877 return madvise_free_single_vma(vma, start, end); 878 else 879 return -EINVAL; 880 } 881 882 static long madvise_populate(struct vm_area_struct *vma, 883 struct vm_area_struct **prev, 884 unsigned long start, unsigned long end, 885 int behavior) 886 { 887 const bool write = behavior == MADV_POPULATE_WRITE; 888 struct mm_struct *mm = vma->vm_mm; 889 unsigned long tmp_end; 890 int locked = 1; 891 long pages; 892 893 *prev = vma; 894 895 while (start < end) { 896 /* 897 * We might have temporarily dropped the lock. For example, 898 * our VMA might have been split. 899 */ 900 if (!vma || start >= vma->vm_end) { 901 vma = vma_lookup(mm, start); 902 if (!vma) 903 return -ENOMEM; 904 } 905 906 tmp_end = min_t(unsigned long, end, vma->vm_end); 907 /* Populate (prefault) page tables readable/writable. */ 908 pages = faultin_vma_page_range(vma, start, tmp_end, write, 909 &locked); 910 if (!locked) { 911 mmap_read_lock(mm); 912 locked = 1; 913 *prev = NULL; 914 vma = NULL; 915 } 916 if (pages < 0) { 917 switch (pages) { 918 case -EINTR: 919 return -EINTR; 920 case -EINVAL: /* Incompatible mappings / permissions. */ 921 return -EINVAL; 922 case -EHWPOISON: 923 return -EHWPOISON; 924 case -EFAULT: /* VM_FAULT_SIGBUS or VM_FAULT_SIGSEGV */ 925 return -EFAULT; 926 default: 927 pr_warn_once("%s: unhandled return value: %ld\n", 928 __func__, pages); 929 fallthrough; 930 case -ENOMEM: 931 return -ENOMEM; 932 } 933 } 934 start += pages * PAGE_SIZE; 935 } 936 return 0; 937 } 938 939 /* 940 * Application wants to free up the pages and associated backing store. 941 * This is effectively punching a hole into the middle of a file. 942 */ 943 static long madvise_remove(struct vm_area_struct *vma, 944 struct vm_area_struct **prev, 945 unsigned long start, unsigned long end) 946 { 947 loff_t offset; 948 int error; 949 struct file *f; 950 struct mm_struct *mm = vma->vm_mm; 951 952 *prev = NULL; /* tell sys_madvise we drop mmap_lock */ 953 954 if (vma->vm_flags & VM_LOCKED) 955 return -EINVAL; 956 957 f = vma->vm_file; 958 959 if (!f || !f->f_mapping || !f->f_mapping->host) { 960 return -EINVAL; 961 } 962 963 if ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE)) 964 return -EACCES; 965 966 offset = (loff_t)(start - vma->vm_start) 967 + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); 968 969 /* 970 * Filesystem's fallocate may need to take i_rwsem. We need to 971 * explicitly grab a reference because the vma (and hence the 972 * vma's reference to the file) can go away as soon as we drop 973 * mmap_lock. 974 */ 975 get_file(f); 976 if (userfaultfd_remove(vma, start, end)) { 977 /* mmap_lock was not released by userfaultfd_remove() */ 978 mmap_read_unlock(mm); 979 } 980 error = vfs_fallocate(f, 981 FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, 982 offset, end - start); 983 fput(f); 984 mmap_read_lock(mm); 985 return error; 986 } 987 988 /* 989 * Apply an madvise behavior to a region of a vma. madvise_update_vma 990 * will handle splitting a vm area into separate areas, each area with its own 991 * behavior. 992 */ 993 static int madvise_vma_behavior(struct vm_area_struct *vma, 994 struct vm_area_struct **prev, 995 unsigned long start, unsigned long end, 996 unsigned long behavior) 997 { 998 int error; 999 struct anon_vma_name *anon_name; 1000 unsigned long new_flags = vma->vm_flags; 1001 1002 switch (behavior) { 1003 case MADV_REMOVE: 1004 return madvise_remove(vma, prev, start, end); 1005 case MADV_WILLNEED: 1006 return madvise_willneed(vma, prev, start, end); 1007 case MADV_COLD: 1008 return madvise_cold(vma, prev, start, end); 1009 case MADV_PAGEOUT: 1010 return madvise_pageout(vma, prev, start, end); 1011 case MADV_FREE: 1012 case MADV_DONTNEED: 1013 case MADV_DONTNEED_LOCKED: 1014 return madvise_dontneed_free(vma, prev, start, end, behavior); 1015 case MADV_POPULATE_READ: 1016 case MADV_POPULATE_WRITE: 1017 return madvise_populate(vma, prev, start, end, behavior); 1018 case MADV_NORMAL: 1019 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ; 1020 break; 1021 case MADV_SEQUENTIAL: 1022 new_flags = (new_flags & ~VM_RAND_READ) | VM_SEQ_READ; 1023 break; 1024 case MADV_RANDOM: 1025 new_flags = (new_flags & ~VM_SEQ_READ) | VM_RAND_READ; 1026 break; 1027 case MADV_DONTFORK: 1028 new_flags |= VM_DONTCOPY; 1029 break; 1030 case MADV_DOFORK: 1031 if (vma->vm_flags & VM_IO) 1032 return -EINVAL; 1033 new_flags &= ~VM_DONTCOPY; 1034 break; 1035 case MADV_WIPEONFORK: 1036 /* MADV_WIPEONFORK is only supported on anonymous memory. */ 1037 if (vma->vm_file || vma->vm_flags & VM_SHARED) 1038 return -EINVAL; 1039 new_flags |= VM_WIPEONFORK; 1040 break; 1041 case MADV_KEEPONFORK: 1042 new_flags &= ~VM_WIPEONFORK; 1043 break; 1044 case MADV_DONTDUMP: 1045 new_flags |= VM_DONTDUMP; 1046 break; 1047 case MADV_DODUMP: 1048 if (!is_vm_hugetlb_page(vma) && new_flags & VM_SPECIAL) 1049 return -EINVAL; 1050 new_flags &= ~VM_DONTDUMP; 1051 break; 1052 case MADV_MERGEABLE: 1053 case MADV_UNMERGEABLE: 1054 error = ksm_madvise(vma, start, end, behavior, &new_flags); 1055 if (error) 1056 goto out; 1057 break; 1058 case MADV_HUGEPAGE: 1059 case MADV_NOHUGEPAGE: 1060 error = hugepage_madvise(vma, &new_flags, behavior); 1061 if (error) 1062 goto out; 1063 break; 1064 case MADV_COLLAPSE: 1065 return madvise_collapse(vma, prev, start, end); 1066 } 1067 1068 anon_name = anon_vma_name(vma); 1069 anon_vma_name_get(anon_name); 1070 error = madvise_update_vma(vma, prev, start, end, new_flags, 1071 anon_name); 1072 anon_vma_name_put(anon_name); 1073 1074 out: 1075 /* 1076 * madvise() returns EAGAIN if kernel resources, such as 1077 * slab, are temporarily unavailable. 1078 */ 1079 if (error == -ENOMEM) 1080 error = -EAGAIN; 1081 return error; 1082 } 1083 1084 #ifdef CONFIG_MEMORY_FAILURE 1085 /* 1086 * Error injection support for memory error handling. 1087 */ 1088 static int madvise_inject_error(int behavior, 1089 unsigned long start, unsigned long end) 1090 { 1091 unsigned long size; 1092 1093 if (!capable(CAP_SYS_ADMIN)) 1094 return -EPERM; 1095 1096 1097 for (; start < end; start += size) { 1098 unsigned long pfn; 1099 struct page *page; 1100 int ret; 1101 1102 ret = get_user_pages_fast(start, 1, 0, &page); 1103 if (ret != 1) 1104 return ret; 1105 pfn = page_to_pfn(page); 1106 1107 /* 1108 * When soft offlining hugepages, after migrating the page 1109 * we dissolve it, therefore in the second loop "page" will 1110 * no longer be a compound page. 1111 */ 1112 size = page_size(compound_head(page)); 1113 1114 if (behavior == MADV_SOFT_OFFLINE) { 1115 pr_info("Soft offlining pfn %#lx at process virtual address %#lx\n", 1116 pfn, start); 1117 ret = soft_offline_page(pfn, MF_COUNT_INCREASED); 1118 } else { 1119 pr_info("Injecting memory failure for pfn %#lx at process virtual address %#lx\n", 1120 pfn, start); 1121 ret = memory_failure(pfn, MF_COUNT_INCREASED | MF_SW_SIMULATED); 1122 if (ret == -EOPNOTSUPP) 1123 ret = 0; 1124 } 1125 1126 if (ret) 1127 return ret; 1128 } 1129 1130 return 0; 1131 } 1132 #endif 1133 1134 static bool 1135 madvise_behavior_valid(int behavior) 1136 { 1137 switch (behavior) { 1138 case MADV_DOFORK: 1139 case MADV_DONTFORK: 1140 case MADV_NORMAL: 1141 case MADV_SEQUENTIAL: 1142 case MADV_RANDOM: 1143 case MADV_REMOVE: 1144 case MADV_WILLNEED: 1145 case MADV_DONTNEED: 1146 case MADV_DONTNEED_LOCKED: 1147 case MADV_FREE: 1148 case MADV_COLD: 1149 case MADV_PAGEOUT: 1150 case MADV_POPULATE_READ: 1151 case MADV_POPULATE_WRITE: 1152 #ifdef CONFIG_KSM 1153 case MADV_MERGEABLE: 1154 case MADV_UNMERGEABLE: 1155 #endif 1156 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1157 case MADV_HUGEPAGE: 1158 case MADV_NOHUGEPAGE: 1159 case MADV_COLLAPSE: 1160 #endif 1161 case MADV_DONTDUMP: 1162 case MADV_DODUMP: 1163 case MADV_WIPEONFORK: 1164 case MADV_KEEPONFORK: 1165 #ifdef CONFIG_MEMORY_FAILURE 1166 case MADV_SOFT_OFFLINE: 1167 case MADV_HWPOISON: 1168 #endif 1169 return true; 1170 1171 default: 1172 return false; 1173 } 1174 } 1175 1176 static bool process_madvise_behavior_valid(int behavior) 1177 { 1178 switch (behavior) { 1179 case MADV_COLD: 1180 case MADV_PAGEOUT: 1181 case MADV_WILLNEED: 1182 case MADV_COLLAPSE: 1183 return true; 1184 default: 1185 return false; 1186 } 1187 } 1188 1189 /* 1190 * Walk the vmas in range [start,end), and call the visit function on each one. 1191 * The visit function will get start and end parameters that cover the overlap 1192 * between the current vma and the original range. Any unmapped regions in the 1193 * original range will result in this function returning -ENOMEM while still 1194 * calling the visit function on all of the existing vmas in the range. 1195 * Must be called with the mmap_lock held for reading or writing. 1196 */ 1197 static 1198 int madvise_walk_vmas(struct mm_struct *mm, unsigned long start, 1199 unsigned long end, unsigned long arg, 1200 int (*visit)(struct vm_area_struct *vma, 1201 struct vm_area_struct **prev, unsigned long start, 1202 unsigned long end, unsigned long arg)) 1203 { 1204 struct vm_area_struct *vma; 1205 struct vm_area_struct *prev; 1206 unsigned long tmp; 1207 int unmapped_error = 0; 1208 1209 /* 1210 * If the interval [start,end) covers some unmapped address 1211 * ranges, just ignore them, but return -ENOMEM at the end. 1212 * - different from the way of handling in mlock etc. 1213 */ 1214 vma = find_vma_prev(mm, start, &prev); 1215 if (vma && start > vma->vm_start) 1216 prev = vma; 1217 1218 for (;;) { 1219 int error; 1220 1221 /* Still start < end. */ 1222 if (!vma) 1223 return -ENOMEM; 1224 1225 /* Here start < (end|vma->vm_end). */ 1226 if (start < vma->vm_start) { 1227 unmapped_error = -ENOMEM; 1228 start = vma->vm_start; 1229 if (start >= end) 1230 break; 1231 } 1232 1233 /* Here vma->vm_start <= start < (end|vma->vm_end) */ 1234 tmp = vma->vm_end; 1235 if (end < tmp) 1236 tmp = end; 1237 1238 /* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */ 1239 error = visit(vma, &prev, start, tmp, arg); 1240 if (error) 1241 return error; 1242 start = tmp; 1243 if (prev && start < prev->vm_end) 1244 start = prev->vm_end; 1245 if (start >= end) 1246 break; 1247 if (prev) 1248 vma = prev->vm_next; 1249 else /* madvise_remove dropped mmap_lock */ 1250 vma = find_vma(mm, start); 1251 } 1252 1253 return unmapped_error; 1254 } 1255 1256 #ifdef CONFIG_ANON_VMA_NAME 1257 static int madvise_vma_anon_name(struct vm_area_struct *vma, 1258 struct vm_area_struct **prev, 1259 unsigned long start, unsigned long end, 1260 unsigned long anon_name) 1261 { 1262 int error; 1263 1264 /* Only anonymous mappings can be named */ 1265 if (vma->vm_file) 1266 return -EBADF; 1267 1268 error = madvise_update_vma(vma, prev, start, end, vma->vm_flags, 1269 (struct anon_vma_name *)anon_name); 1270 1271 /* 1272 * madvise() returns EAGAIN if kernel resources, such as 1273 * slab, are temporarily unavailable. 1274 */ 1275 if (error == -ENOMEM) 1276 error = -EAGAIN; 1277 return error; 1278 } 1279 1280 int madvise_set_anon_name(struct mm_struct *mm, unsigned long start, 1281 unsigned long len_in, struct anon_vma_name *anon_name) 1282 { 1283 unsigned long end; 1284 unsigned long len; 1285 1286 if (start & ~PAGE_MASK) 1287 return -EINVAL; 1288 len = (len_in + ~PAGE_MASK) & PAGE_MASK; 1289 1290 /* Check to see whether len was rounded up from small -ve to zero */ 1291 if (len_in && !len) 1292 return -EINVAL; 1293 1294 end = start + len; 1295 if (end < start) 1296 return -EINVAL; 1297 1298 if (end == start) 1299 return 0; 1300 1301 return madvise_walk_vmas(mm, start, end, (unsigned long)anon_name, 1302 madvise_vma_anon_name); 1303 } 1304 #endif /* CONFIG_ANON_VMA_NAME */ 1305 /* 1306 * The madvise(2) system call. 1307 * 1308 * Applications can use madvise() to advise the kernel how it should 1309 * handle paging I/O in this VM area. The idea is to help the kernel 1310 * use appropriate read-ahead and caching techniques. The information 1311 * provided is advisory only, and can be safely disregarded by the 1312 * kernel without affecting the correct operation of the application. 1313 * 1314 * behavior values: 1315 * MADV_NORMAL - the default behavior is to read clusters. This 1316 * results in some read-ahead and read-behind. 1317 * MADV_RANDOM - the system should read the minimum amount of data 1318 * on any access, since it is unlikely that the appli- 1319 * cation will need more than what it asks for. 1320 * MADV_SEQUENTIAL - pages in the given range will probably be accessed 1321 * once, so they can be aggressively read ahead, and 1322 * can be freed soon after they are accessed. 1323 * MADV_WILLNEED - the application is notifying the system to read 1324 * some pages ahead. 1325 * MADV_DONTNEED - the application is finished with the given range, 1326 * so the kernel can free resources associated with it. 1327 * MADV_FREE - the application marks pages in the given range as lazy free, 1328 * where actual purges are postponed until memory pressure happens. 1329 * MADV_REMOVE - the application wants to free up the given range of 1330 * pages and associated backing store. 1331 * MADV_DONTFORK - omit this area from child's address space when forking: 1332 * typically, to avoid COWing pages pinned by get_user_pages(). 1333 * MADV_DOFORK - cancel MADV_DONTFORK: no longer omit this area when forking. 1334 * MADV_WIPEONFORK - present the child process with zero-filled memory in this 1335 * range after a fork. 1336 * MADV_KEEPONFORK - undo the effect of MADV_WIPEONFORK 1337 * MADV_HWPOISON - trigger memory error handler as if the given memory range 1338 * were corrupted by unrecoverable hardware memory failure. 1339 * MADV_SOFT_OFFLINE - try to soft-offline the given range of memory. 1340 * MADV_MERGEABLE - the application recommends that KSM try to merge pages in 1341 * this area with pages of identical content from other such areas. 1342 * MADV_UNMERGEABLE- cancel MADV_MERGEABLE: no longer merge pages with others. 1343 * MADV_HUGEPAGE - the application wants to back the given range by transparent 1344 * huge pages in the future. Existing pages might be coalesced and 1345 * new pages might be allocated as THP. 1346 * MADV_NOHUGEPAGE - mark the given range as not worth being backed by 1347 * transparent huge pages so the existing pages will not be 1348 * coalesced into THP and new pages will not be allocated as THP. 1349 * MADV_COLLAPSE - synchronously coalesce pages into new THP. 1350 * MADV_DONTDUMP - the application wants to prevent pages in the given range 1351 * from being included in its core dump. 1352 * MADV_DODUMP - cancel MADV_DONTDUMP: no longer exclude from core dump. 1353 * MADV_COLD - the application is not expected to use this memory soon, 1354 * deactivate pages in this range so that they can be reclaimed 1355 * easily if memory pressure happens. 1356 * MADV_PAGEOUT - the application is not expected to use this memory soon, 1357 * page out the pages in this range immediately. 1358 * MADV_POPULATE_READ - populate (prefault) page tables readable by 1359 * triggering read faults if required 1360 * MADV_POPULATE_WRITE - populate (prefault) page tables writable by 1361 * triggering write faults if required 1362 * 1363 * return values: 1364 * zero - success 1365 * -EINVAL - start + len < 0, start is not page-aligned, 1366 * "behavior" is not a valid value, or application 1367 * is attempting to release locked or shared pages, 1368 * or the specified address range includes file, Huge TLB, 1369 * MAP_SHARED or VMPFNMAP range. 1370 * -ENOMEM - addresses in the specified range are not currently 1371 * mapped, or are outside the AS of the process. 1372 * -EIO - an I/O error occurred while paging in data. 1373 * -EBADF - map exists, but area maps something that isn't a file. 1374 * -EAGAIN - a kernel resource was temporarily unavailable. 1375 */ 1376 int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int behavior) 1377 { 1378 unsigned long end; 1379 int error; 1380 int write; 1381 size_t len; 1382 struct blk_plug plug; 1383 1384 start = untagged_addr(start); 1385 1386 if (!madvise_behavior_valid(behavior)) 1387 return -EINVAL; 1388 1389 if (!PAGE_ALIGNED(start)) 1390 return -EINVAL; 1391 len = PAGE_ALIGN(len_in); 1392 1393 /* Check to see whether len was rounded up from small -ve to zero */ 1394 if (len_in && !len) 1395 return -EINVAL; 1396 1397 end = start + len; 1398 if (end < start) 1399 return -EINVAL; 1400 1401 if (end == start) 1402 return 0; 1403 1404 #ifdef CONFIG_MEMORY_FAILURE 1405 if (behavior == MADV_HWPOISON || behavior == MADV_SOFT_OFFLINE) 1406 return madvise_inject_error(behavior, start, start + len_in); 1407 #endif 1408 1409 write = madvise_need_mmap_write(behavior); 1410 if (write) { 1411 if (mmap_write_lock_killable(mm)) 1412 return -EINTR; 1413 } else { 1414 mmap_read_lock(mm); 1415 } 1416 1417 blk_start_plug(&plug); 1418 error = madvise_walk_vmas(mm, start, end, behavior, 1419 madvise_vma_behavior); 1420 blk_finish_plug(&plug); 1421 if (write) 1422 mmap_write_unlock(mm); 1423 else 1424 mmap_read_unlock(mm); 1425 1426 return error; 1427 } 1428 1429 SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior) 1430 { 1431 return do_madvise(current->mm, start, len_in, behavior); 1432 } 1433 1434 SYSCALL_DEFINE5(process_madvise, int, pidfd, const struct iovec __user *, vec, 1435 size_t, vlen, int, behavior, unsigned int, flags) 1436 { 1437 ssize_t ret; 1438 struct iovec iovstack[UIO_FASTIOV], iovec; 1439 struct iovec *iov = iovstack; 1440 struct iov_iter iter; 1441 struct task_struct *task; 1442 struct mm_struct *mm; 1443 size_t total_len; 1444 unsigned int f_flags; 1445 1446 if (flags != 0) { 1447 ret = -EINVAL; 1448 goto out; 1449 } 1450 1451 ret = import_iovec(READ, vec, vlen, ARRAY_SIZE(iovstack), &iov, &iter); 1452 if (ret < 0) 1453 goto out; 1454 1455 task = pidfd_get_task(pidfd, &f_flags); 1456 if (IS_ERR(task)) { 1457 ret = PTR_ERR(task); 1458 goto free_iov; 1459 } 1460 1461 if (!process_madvise_behavior_valid(behavior)) { 1462 ret = -EINVAL; 1463 goto release_task; 1464 } 1465 1466 /* Require PTRACE_MODE_READ to avoid leaking ASLR metadata. */ 1467 mm = mm_access(task, PTRACE_MODE_READ_FSCREDS); 1468 if (IS_ERR_OR_NULL(mm)) { 1469 ret = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH; 1470 goto release_task; 1471 } 1472 1473 /* 1474 * Require CAP_SYS_NICE for influencing process performance. Note that 1475 * only non-destructive hints are currently supported. 1476 */ 1477 if (!capable(CAP_SYS_NICE)) { 1478 ret = -EPERM; 1479 goto release_mm; 1480 } 1481 1482 total_len = iov_iter_count(&iter); 1483 1484 while (iov_iter_count(&iter)) { 1485 iovec = iov_iter_iovec(&iter); 1486 ret = do_madvise(mm, (unsigned long)iovec.iov_base, 1487 iovec.iov_len, behavior); 1488 if (ret < 0) 1489 break; 1490 iov_iter_advance(&iter, iovec.iov_len); 1491 } 1492 1493 ret = (total_len - iov_iter_count(&iter)) ? : ret; 1494 1495 release_mm: 1496 mmput(mm); 1497 release_task: 1498 put_task_struct(task); 1499 free_iov: 1500 kfree(iov); 1501 out: 1502 return ret; 1503 } 1504