1 /* 2 * linux/mm/madvise.c 3 * 4 * Copyright (C) 1999 Linus Torvalds 5 * Copyright (C) 2002 Christoph Hellwig 6 */ 7 8 #include <linux/mman.h> 9 #include <linux/pagemap.h> 10 #include <linux/syscalls.h> 11 #include <linux/mempolicy.h> 12 #include <linux/page-isolation.h> 13 #include <linux/userfaultfd_k.h> 14 #include <linux/hugetlb.h> 15 #include <linux/falloc.h> 16 #include <linux/sched.h> 17 #include <linux/ksm.h> 18 #include <linux/fs.h> 19 #include <linux/file.h> 20 #include <linux/blkdev.h> 21 #include <linux/backing-dev.h> 22 #include <linux/swap.h> 23 #include <linux/swapops.h> 24 #include <linux/shmem_fs.h> 25 #include <linux/mmu_notifier.h> 26 27 #include <asm/tlb.h> 28 29 #include "internal.h" 30 31 /* 32 * Any behaviour which results in changes to the vma->vm_flags needs to 33 * take mmap_sem for writing. Others, which simply traverse vmas, need 34 * to only take it for reading. 35 */ 36 static int madvise_need_mmap_write(int behavior) 37 { 38 switch (behavior) { 39 case MADV_REMOVE: 40 case MADV_WILLNEED: 41 case MADV_DONTNEED: 42 case MADV_FREE: 43 return 0; 44 default: 45 /* be safe, default to 1. list exceptions explicitly */ 46 return 1; 47 } 48 } 49 50 /* 51 * We can potentially split a vm area into separate 52 * areas, each area with its own behavior. 53 */ 54 static long madvise_behavior(struct vm_area_struct *vma, 55 struct vm_area_struct **prev, 56 unsigned long start, unsigned long end, int behavior) 57 { 58 struct mm_struct *mm = vma->vm_mm; 59 int error = 0; 60 pgoff_t pgoff; 61 unsigned long new_flags = vma->vm_flags; 62 63 switch (behavior) { 64 case MADV_NORMAL: 65 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ; 66 break; 67 case MADV_SEQUENTIAL: 68 new_flags = (new_flags & ~VM_RAND_READ) | VM_SEQ_READ; 69 break; 70 case MADV_RANDOM: 71 new_flags = (new_flags & ~VM_SEQ_READ) | VM_RAND_READ; 72 break; 73 case MADV_DONTFORK: 74 new_flags |= VM_DONTCOPY; 75 break; 76 case MADV_DOFORK: 77 if (vma->vm_flags & VM_IO) { 78 error = -EINVAL; 79 goto out; 80 } 81 new_flags &= ~VM_DONTCOPY; 82 break; 83 case MADV_DONTDUMP: 84 new_flags |= VM_DONTDUMP; 85 break; 86 case MADV_DODUMP: 87 if (new_flags & VM_SPECIAL) { 88 error = -EINVAL; 89 goto out; 90 } 91 new_flags &= ~VM_DONTDUMP; 92 break; 93 case MADV_MERGEABLE: 94 case MADV_UNMERGEABLE: 95 error = ksm_madvise(vma, start, end, behavior, &new_flags); 96 if (error) { 97 /* 98 * madvise() returns EAGAIN if kernel resources, such as 99 * slab, are temporarily unavailable. 100 */ 101 if (error == -ENOMEM) 102 error = -EAGAIN; 103 goto out; 104 } 105 break; 106 case MADV_HUGEPAGE: 107 case MADV_NOHUGEPAGE: 108 error = hugepage_madvise(vma, &new_flags, behavior); 109 if (error) { 110 /* 111 * madvise() returns EAGAIN if kernel resources, such as 112 * slab, are temporarily unavailable. 113 */ 114 if (error == -ENOMEM) 115 error = -EAGAIN; 116 goto out; 117 } 118 break; 119 } 120 121 if (new_flags == vma->vm_flags) { 122 *prev = vma; 123 goto out; 124 } 125 126 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); 127 *prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma, 128 vma->vm_file, pgoff, vma_policy(vma), 129 vma->vm_userfaultfd_ctx); 130 if (*prev) { 131 vma = *prev; 132 goto success; 133 } 134 135 *prev = vma; 136 137 if (start != vma->vm_start) { 138 if (unlikely(mm->map_count >= sysctl_max_map_count)) { 139 error = -ENOMEM; 140 goto out; 141 } 142 error = __split_vma(mm, vma, start, 1); 143 if (error) { 144 /* 145 * madvise() returns EAGAIN if kernel resources, such as 146 * slab, are temporarily unavailable. 147 */ 148 if (error == -ENOMEM) 149 error = -EAGAIN; 150 goto out; 151 } 152 } 153 154 if (end != vma->vm_end) { 155 if (unlikely(mm->map_count >= sysctl_max_map_count)) { 156 error = -ENOMEM; 157 goto out; 158 } 159 error = __split_vma(mm, vma, end, 0); 160 if (error) { 161 /* 162 * madvise() returns EAGAIN if kernel resources, such as 163 * slab, are temporarily unavailable. 164 */ 165 if (error == -ENOMEM) 166 error = -EAGAIN; 167 goto out; 168 } 169 } 170 171 success: 172 /* 173 * vm_flags is protected by the mmap_sem held in write mode. 174 */ 175 vma->vm_flags = new_flags; 176 out: 177 return error; 178 } 179 180 #ifdef CONFIG_SWAP 181 static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start, 182 unsigned long end, struct mm_walk *walk) 183 { 184 pte_t *orig_pte; 185 struct vm_area_struct *vma = walk->private; 186 unsigned long index; 187 188 if (pmd_none_or_trans_huge_or_clear_bad(pmd)) 189 return 0; 190 191 for (index = start; index != end; index += PAGE_SIZE) { 192 pte_t pte; 193 swp_entry_t entry; 194 struct page *page; 195 spinlock_t *ptl; 196 197 orig_pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl); 198 pte = *(orig_pte + ((index - start) / PAGE_SIZE)); 199 pte_unmap_unlock(orig_pte, ptl); 200 201 if (pte_present(pte) || pte_none(pte)) 202 continue; 203 entry = pte_to_swp_entry(pte); 204 if (unlikely(non_swap_entry(entry))) 205 continue; 206 207 page = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE, 208 vma, index); 209 if (page) 210 put_page(page); 211 } 212 213 return 0; 214 } 215 216 static void force_swapin_readahead(struct vm_area_struct *vma, 217 unsigned long start, unsigned long end) 218 { 219 struct mm_walk walk = { 220 .mm = vma->vm_mm, 221 .pmd_entry = swapin_walk_pmd_entry, 222 .private = vma, 223 }; 224 225 walk_page_range(start, end, &walk); 226 227 lru_add_drain(); /* Push any new pages onto the LRU now */ 228 } 229 230 static void force_shm_swapin_readahead(struct vm_area_struct *vma, 231 unsigned long start, unsigned long end, 232 struct address_space *mapping) 233 { 234 pgoff_t index; 235 struct page *page; 236 swp_entry_t swap; 237 238 for (; start < end; start += PAGE_SIZE) { 239 index = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; 240 241 page = find_get_entry(mapping, index); 242 if (!radix_tree_exceptional_entry(page)) { 243 if (page) 244 put_page(page); 245 continue; 246 } 247 swap = radix_to_swp_entry(page); 248 page = read_swap_cache_async(swap, GFP_HIGHUSER_MOVABLE, 249 NULL, 0); 250 if (page) 251 put_page(page); 252 } 253 254 lru_add_drain(); /* Push any new pages onto the LRU now */ 255 } 256 #endif /* CONFIG_SWAP */ 257 258 /* 259 * Schedule all required I/O operations. Do not wait for completion. 260 */ 261 static long madvise_willneed(struct vm_area_struct *vma, 262 struct vm_area_struct **prev, 263 unsigned long start, unsigned long end) 264 { 265 struct file *file = vma->vm_file; 266 267 #ifdef CONFIG_SWAP 268 if (!file) { 269 *prev = vma; 270 force_swapin_readahead(vma, start, end); 271 return 0; 272 } 273 274 if (shmem_mapping(file->f_mapping)) { 275 *prev = vma; 276 force_shm_swapin_readahead(vma, start, end, 277 file->f_mapping); 278 return 0; 279 } 280 #else 281 if (!file) 282 return -EBADF; 283 #endif 284 285 if (IS_DAX(file_inode(file))) { 286 /* no bad return value, but ignore advice */ 287 return 0; 288 } 289 290 *prev = vma; 291 start = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; 292 if (end > vma->vm_end) 293 end = vma->vm_end; 294 end = ((end - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; 295 296 force_page_cache_readahead(file->f_mapping, file, start, end - start); 297 return 0; 298 } 299 300 static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr, 301 unsigned long end, struct mm_walk *walk) 302 303 { 304 struct mmu_gather *tlb = walk->private; 305 struct mm_struct *mm = tlb->mm; 306 struct vm_area_struct *vma = walk->vma; 307 spinlock_t *ptl; 308 pte_t *orig_pte, *pte, ptent; 309 struct page *page; 310 int nr_swap = 0; 311 unsigned long next; 312 313 next = pmd_addr_end(addr, end); 314 if (pmd_trans_huge(*pmd)) 315 if (madvise_free_huge_pmd(tlb, vma, pmd, addr, next)) 316 goto next; 317 318 if (pmd_trans_unstable(pmd)) 319 return 0; 320 321 tlb_remove_check_page_size_change(tlb, PAGE_SIZE); 322 orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl); 323 arch_enter_lazy_mmu_mode(); 324 for (; addr != end; pte++, addr += PAGE_SIZE) { 325 ptent = *pte; 326 327 if (pte_none(ptent)) 328 continue; 329 /* 330 * If the pte has swp_entry, just clear page table to 331 * prevent swap-in which is more expensive rather than 332 * (page allocation + zeroing). 333 */ 334 if (!pte_present(ptent)) { 335 swp_entry_t entry; 336 337 entry = pte_to_swp_entry(ptent); 338 if (non_swap_entry(entry)) 339 continue; 340 nr_swap--; 341 free_swap_and_cache(entry); 342 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm); 343 continue; 344 } 345 346 page = vm_normal_page(vma, addr, ptent); 347 if (!page) 348 continue; 349 350 /* 351 * If pmd isn't transhuge but the page is THP and 352 * is owned by only this process, split it and 353 * deactivate all pages. 354 */ 355 if (PageTransCompound(page)) { 356 if (page_mapcount(page) != 1) 357 goto out; 358 get_page(page); 359 if (!trylock_page(page)) { 360 put_page(page); 361 goto out; 362 } 363 pte_unmap_unlock(orig_pte, ptl); 364 if (split_huge_page(page)) { 365 unlock_page(page); 366 put_page(page); 367 pte_offset_map_lock(mm, pmd, addr, &ptl); 368 goto out; 369 } 370 put_page(page); 371 unlock_page(page); 372 pte = pte_offset_map_lock(mm, pmd, addr, &ptl); 373 pte--; 374 addr -= PAGE_SIZE; 375 continue; 376 } 377 378 VM_BUG_ON_PAGE(PageTransCompound(page), page); 379 380 if (PageSwapCache(page) || PageDirty(page)) { 381 if (!trylock_page(page)) 382 continue; 383 /* 384 * If page is shared with others, we couldn't clear 385 * PG_dirty of the page. 386 */ 387 if (page_mapcount(page) != 1) { 388 unlock_page(page); 389 continue; 390 } 391 392 if (PageSwapCache(page) && !try_to_free_swap(page)) { 393 unlock_page(page); 394 continue; 395 } 396 397 ClearPageDirty(page); 398 unlock_page(page); 399 } 400 401 if (pte_young(ptent) || pte_dirty(ptent)) { 402 /* 403 * Some of architecture(ex, PPC) don't update TLB 404 * with set_pte_at and tlb_remove_tlb_entry so for 405 * the portability, remap the pte with old|clean 406 * after pte clearing. 407 */ 408 ptent = ptep_get_and_clear_full(mm, addr, pte, 409 tlb->fullmm); 410 411 ptent = pte_mkold(ptent); 412 ptent = pte_mkclean(ptent); 413 set_pte_at(mm, addr, pte, ptent); 414 if (PageActive(page)) 415 deactivate_page(page); 416 tlb_remove_tlb_entry(tlb, pte, addr); 417 } 418 } 419 out: 420 if (nr_swap) { 421 if (current->mm == mm) 422 sync_mm_rss(mm); 423 424 add_mm_counter(mm, MM_SWAPENTS, nr_swap); 425 } 426 arch_leave_lazy_mmu_mode(); 427 pte_unmap_unlock(orig_pte, ptl); 428 cond_resched(); 429 next: 430 return 0; 431 } 432 433 static void madvise_free_page_range(struct mmu_gather *tlb, 434 struct vm_area_struct *vma, 435 unsigned long addr, unsigned long end) 436 { 437 struct mm_walk free_walk = { 438 .pmd_entry = madvise_free_pte_range, 439 .mm = vma->vm_mm, 440 .private = tlb, 441 }; 442 443 tlb_start_vma(tlb, vma); 444 walk_page_range(addr, end, &free_walk); 445 tlb_end_vma(tlb, vma); 446 } 447 448 static int madvise_free_single_vma(struct vm_area_struct *vma, 449 unsigned long start_addr, unsigned long end_addr) 450 { 451 unsigned long start, end; 452 struct mm_struct *mm = vma->vm_mm; 453 struct mmu_gather tlb; 454 455 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP)) 456 return -EINVAL; 457 458 /* MADV_FREE works for only anon vma at the moment */ 459 if (!vma_is_anonymous(vma)) 460 return -EINVAL; 461 462 start = max(vma->vm_start, start_addr); 463 if (start >= vma->vm_end) 464 return -EINVAL; 465 end = min(vma->vm_end, end_addr); 466 if (end <= vma->vm_start) 467 return -EINVAL; 468 469 lru_add_drain(); 470 tlb_gather_mmu(&tlb, mm, start, end); 471 update_hiwater_rss(mm); 472 473 mmu_notifier_invalidate_range_start(mm, start, end); 474 madvise_free_page_range(&tlb, vma, start, end); 475 mmu_notifier_invalidate_range_end(mm, start, end); 476 tlb_finish_mmu(&tlb, start, end); 477 478 return 0; 479 } 480 481 static long madvise_free(struct vm_area_struct *vma, 482 struct vm_area_struct **prev, 483 unsigned long start, unsigned long end) 484 { 485 *prev = vma; 486 return madvise_free_single_vma(vma, start, end); 487 } 488 489 /* 490 * Application no longer needs these pages. If the pages are dirty, 491 * it's OK to just throw them away. The app will be more careful about 492 * data it wants to keep. Be sure to free swap resources too. The 493 * zap_page_range call sets things up for shrink_active_list to actually free 494 * these pages later if no one else has touched them in the meantime, 495 * although we could add these pages to a global reuse list for 496 * shrink_active_list to pick up before reclaiming other pages. 497 * 498 * NB: This interface discards data rather than pushes it out to swap, 499 * as some implementations do. This has performance implications for 500 * applications like large transactional databases which want to discard 501 * pages in anonymous maps after committing to backing store the data 502 * that was kept in them. There is no reason to write this data out to 503 * the swap area if the application is discarding it. 504 * 505 * An interface that causes the system to free clean pages and flush 506 * dirty pages is already available as msync(MS_INVALIDATE). 507 */ 508 static long madvise_dontneed(struct vm_area_struct *vma, 509 struct vm_area_struct **prev, 510 unsigned long start, unsigned long end) 511 { 512 *prev = vma; 513 if (!can_madv_dontneed_vma(vma)) 514 return -EINVAL; 515 516 userfaultfd_remove(vma, prev, start, end); 517 zap_page_range(vma, start, end - start); 518 return 0; 519 } 520 521 /* 522 * Application wants to free up the pages and associated backing store. 523 * This is effectively punching a hole into the middle of a file. 524 */ 525 static long madvise_remove(struct vm_area_struct *vma, 526 struct vm_area_struct **prev, 527 unsigned long start, unsigned long end) 528 { 529 loff_t offset; 530 int error; 531 struct file *f; 532 533 *prev = NULL; /* tell sys_madvise we drop mmap_sem */ 534 535 if (vma->vm_flags & VM_LOCKED) 536 return -EINVAL; 537 538 f = vma->vm_file; 539 540 if (!f || !f->f_mapping || !f->f_mapping->host) { 541 return -EINVAL; 542 } 543 544 if ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE)) 545 return -EACCES; 546 547 offset = (loff_t)(start - vma->vm_start) 548 + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); 549 550 /* 551 * Filesystem's fallocate may need to take i_mutex. We need to 552 * explicitly grab a reference because the vma (and hence the 553 * vma's reference to the file) can go away as soon as we drop 554 * mmap_sem. 555 */ 556 get_file(f); 557 userfaultfd_remove(vma, prev, start, end); 558 up_read(¤t->mm->mmap_sem); 559 error = vfs_fallocate(f, 560 FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, 561 offset, end - start); 562 fput(f); 563 down_read(¤t->mm->mmap_sem); 564 return error; 565 } 566 567 #ifdef CONFIG_MEMORY_FAILURE 568 /* 569 * Error injection support for memory error handling. 570 */ 571 static int madvise_hwpoison(int bhv, unsigned long start, unsigned long end) 572 { 573 struct page *p; 574 if (!capable(CAP_SYS_ADMIN)) 575 return -EPERM; 576 for (; start < end; start += PAGE_SIZE << 577 compound_order(compound_head(p))) { 578 int ret; 579 580 ret = get_user_pages_fast(start, 1, 0, &p); 581 if (ret != 1) 582 return ret; 583 584 if (PageHWPoison(p)) { 585 put_page(p); 586 continue; 587 } 588 if (bhv == MADV_SOFT_OFFLINE) { 589 pr_info("Soft offlining page %#lx at %#lx\n", 590 page_to_pfn(p), start); 591 ret = soft_offline_page(p, MF_COUNT_INCREASED); 592 if (ret) 593 return ret; 594 continue; 595 } 596 pr_info("Injecting memory failure for page %#lx at %#lx\n", 597 page_to_pfn(p), start); 598 ret = memory_failure(page_to_pfn(p), 0, MF_COUNT_INCREASED); 599 if (ret) 600 return ret; 601 } 602 return 0; 603 } 604 #endif 605 606 static long 607 madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev, 608 unsigned long start, unsigned long end, int behavior) 609 { 610 switch (behavior) { 611 case MADV_REMOVE: 612 return madvise_remove(vma, prev, start, end); 613 case MADV_WILLNEED: 614 return madvise_willneed(vma, prev, start, end); 615 case MADV_FREE: 616 /* 617 * XXX: In this implementation, MADV_FREE works like 618 * MADV_DONTNEED on swapless system or full swap. 619 */ 620 if (get_nr_swap_pages() > 0) 621 return madvise_free(vma, prev, start, end); 622 /* passthrough */ 623 case MADV_DONTNEED: 624 return madvise_dontneed(vma, prev, start, end); 625 default: 626 return madvise_behavior(vma, prev, start, end, behavior); 627 } 628 } 629 630 static bool 631 madvise_behavior_valid(int behavior) 632 { 633 switch (behavior) { 634 case MADV_DOFORK: 635 case MADV_DONTFORK: 636 case MADV_NORMAL: 637 case MADV_SEQUENTIAL: 638 case MADV_RANDOM: 639 case MADV_REMOVE: 640 case MADV_WILLNEED: 641 case MADV_DONTNEED: 642 case MADV_FREE: 643 #ifdef CONFIG_KSM 644 case MADV_MERGEABLE: 645 case MADV_UNMERGEABLE: 646 #endif 647 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 648 case MADV_HUGEPAGE: 649 case MADV_NOHUGEPAGE: 650 #endif 651 case MADV_DONTDUMP: 652 case MADV_DODUMP: 653 return true; 654 655 default: 656 return false; 657 } 658 } 659 660 /* 661 * The madvise(2) system call. 662 * 663 * Applications can use madvise() to advise the kernel how it should 664 * handle paging I/O in this VM area. The idea is to help the kernel 665 * use appropriate read-ahead and caching techniques. The information 666 * provided is advisory only, and can be safely disregarded by the 667 * kernel without affecting the correct operation of the application. 668 * 669 * behavior values: 670 * MADV_NORMAL - the default behavior is to read clusters. This 671 * results in some read-ahead and read-behind. 672 * MADV_RANDOM - the system should read the minimum amount of data 673 * on any access, since it is unlikely that the appli- 674 * cation will need more than what it asks for. 675 * MADV_SEQUENTIAL - pages in the given range will probably be accessed 676 * once, so they can be aggressively read ahead, and 677 * can be freed soon after they are accessed. 678 * MADV_WILLNEED - the application is notifying the system to read 679 * some pages ahead. 680 * MADV_DONTNEED - the application is finished with the given range, 681 * so the kernel can free resources associated with it. 682 * MADV_FREE - the application marks pages in the given range as lazy free, 683 * where actual purges are postponed until memory pressure happens. 684 * MADV_REMOVE - the application wants to free up the given range of 685 * pages and associated backing store. 686 * MADV_DONTFORK - omit this area from child's address space when forking: 687 * typically, to avoid COWing pages pinned by get_user_pages(). 688 * MADV_DOFORK - cancel MADV_DONTFORK: no longer omit this area when forking. 689 * MADV_HWPOISON - trigger memory error handler as if the given memory range 690 * were corrupted by unrecoverable hardware memory failure. 691 * MADV_SOFT_OFFLINE - try to soft-offline the given range of memory. 692 * MADV_MERGEABLE - the application recommends that KSM try to merge pages in 693 * this area with pages of identical content from other such areas. 694 * MADV_UNMERGEABLE- cancel MADV_MERGEABLE: no longer merge pages with others. 695 * MADV_HUGEPAGE - the application wants to back the given range by transparent 696 * huge pages in the future. Existing pages might be coalesced and 697 * new pages might be allocated as THP. 698 * MADV_NOHUGEPAGE - mark the given range as not worth being backed by 699 * transparent huge pages so the existing pages will not be 700 * coalesced into THP and new pages will not be allocated as THP. 701 * MADV_DONTDUMP - the application wants to prevent pages in the given range 702 * from being included in its core dump. 703 * MADV_DODUMP - cancel MADV_DONTDUMP: no longer exclude from core dump. 704 * 705 * return values: 706 * zero - success 707 * -EINVAL - start + len < 0, start is not page-aligned, 708 * "behavior" is not a valid value, or application 709 * is attempting to release locked or shared pages. 710 * -ENOMEM - addresses in the specified range are not currently 711 * mapped, or are outside the AS of the process. 712 * -EIO - an I/O error occurred while paging in data. 713 * -EBADF - map exists, but area maps something that isn't a file. 714 * -EAGAIN - a kernel resource was temporarily unavailable. 715 */ 716 SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior) 717 { 718 unsigned long end, tmp; 719 struct vm_area_struct *vma, *prev; 720 int unmapped_error = 0; 721 int error = -EINVAL; 722 int write; 723 size_t len; 724 struct blk_plug plug; 725 726 #ifdef CONFIG_MEMORY_FAILURE 727 if (behavior == MADV_HWPOISON || behavior == MADV_SOFT_OFFLINE) 728 return madvise_hwpoison(behavior, start, start+len_in); 729 #endif 730 if (!madvise_behavior_valid(behavior)) 731 return error; 732 733 if (start & ~PAGE_MASK) 734 return error; 735 len = (len_in + ~PAGE_MASK) & PAGE_MASK; 736 737 /* Check to see whether len was rounded up from small -ve to zero */ 738 if (len_in && !len) 739 return error; 740 741 end = start + len; 742 if (end < start) 743 return error; 744 745 error = 0; 746 if (end == start) 747 return error; 748 749 write = madvise_need_mmap_write(behavior); 750 if (write) { 751 if (down_write_killable(¤t->mm->mmap_sem)) 752 return -EINTR; 753 } else { 754 down_read(¤t->mm->mmap_sem); 755 } 756 757 /* 758 * If the interval [start,end) covers some unmapped address 759 * ranges, just ignore them, but return -ENOMEM at the end. 760 * - different from the way of handling in mlock etc. 761 */ 762 vma = find_vma_prev(current->mm, start, &prev); 763 if (vma && start > vma->vm_start) 764 prev = vma; 765 766 blk_start_plug(&plug); 767 for (;;) { 768 /* Still start < end. */ 769 error = -ENOMEM; 770 if (!vma) 771 goto out; 772 773 /* Here start < (end|vma->vm_end). */ 774 if (start < vma->vm_start) { 775 unmapped_error = -ENOMEM; 776 start = vma->vm_start; 777 if (start >= end) 778 goto out; 779 } 780 781 /* Here vma->vm_start <= start < (end|vma->vm_end) */ 782 tmp = vma->vm_end; 783 if (end < tmp) 784 tmp = end; 785 786 /* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */ 787 error = madvise_vma(vma, &prev, start, tmp, behavior); 788 if (error) 789 goto out; 790 start = tmp; 791 if (prev && start < prev->vm_end) 792 start = prev->vm_end; 793 error = unmapped_error; 794 if (start >= end) 795 goto out; 796 if (prev) 797 vma = prev->vm_next; 798 else /* madvise_remove dropped mmap_sem */ 799 vma = find_vma(current->mm, start); 800 } 801 out: 802 blk_finish_plug(&plug); 803 if (write) 804 up_write(¤t->mm->mmap_sem); 805 else 806 up_read(¤t->mm->mmap_sem); 807 808 return error; 809 } 810