1 /* 2 * mm/rmap.c - physical to virtual reverse mappings 3 * 4 * Copyright 2001, Rik van Riel <riel@conectiva.com.br> 5 * Released under the General Public License (GPL). 6 * 7 * Simple, low overhead reverse mapping scheme. 8 * Please try to keep this thing as modular as possible. 9 * 10 * Provides methods for unmapping each kind of mapped page: 11 * the anon methods track anonymous pages, and 12 * the file methods track pages belonging to an inode. 13 * 14 * Original design by Rik van Riel <riel@conectiva.com.br> 2001 15 * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004 16 * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004 17 * Contributions by Hugh Dickins 2003, 2004 18 */ 19 20 /* 21 * Lock ordering in mm: 22 * 23 * inode->i_mutex (while writing or truncating, not reading or faulting) 24 * mm->mmap_sem 25 * page->flags PG_locked (lock_page) 26 * mapping->i_mmap_rwsem 27 * anon_vma->rwsem 28 * mm->page_table_lock or pte_lock 29 * zone->lru_lock (in mark_page_accessed, isolate_lru_page) 30 * swap_lock (in swap_duplicate, swap_info_get) 31 * mmlist_lock (in mmput, drain_mmlist and others) 32 * mapping->private_lock (in __set_page_dirty_buffers) 33 * inode->i_lock (in set_page_dirty's __mark_inode_dirty) 34 * bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty) 35 * sb_lock (within inode_lock in fs/fs-writeback.c) 36 * mapping->tree_lock (widely used, in set_page_dirty, 37 * in arch-dependent flush_dcache_mmap_lock, 38 * within bdi.wb->list_lock in __sync_single_inode) 39 * 40 * anon_vma->rwsem,mapping->i_mutex (memory_failure, collect_procs_anon) 41 * ->tasklist_lock 42 * pte map lock 43 */ 44 45 #include <linux/mm.h> 46 #include <linux/pagemap.h> 47 #include <linux/swap.h> 48 #include <linux/swapops.h> 49 #include <linux/slab.h> 50 #include <linux/init.h> 51 #include <linux/ksm.h> 52 #include <linux/rmap.h> 53 #include <linux/rcupdate.h> 54 #include <linux/export.h> 55 #include <linux/memcontrol.h> 56 #include <linux/mmu_notifier.h> 57 #include <linux/migrate.h> 58 #include <linux/hugetlb.h> 59 #include <linux/backing-dev.h> 60 61 #include <asm/tlbflush.h> 62 63 #include "internal.h" 64 65 static struct kmem_cache *anon_vma_cachep; 66 static struct kmem_cache *anon_vma_chain_cachep; 67 68 static inline struct anon_vma *anon_vma_alloc(void) 69 { 70 struct anon_vma *anon_vma; 71 72 anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL); 73 if (anon_vma) { 74 atomic_set(&anon_vma->refcount, 1); 75 /* 76 * Initialise the anon_vma root to point to itself. If called 77 * from fork, the root will be reset to the parents anon_vma. 78 */ 79 anon_vma->root = anon_vma; 80 } 81 82 return anon_vma; 83 } 84 85 static inline void anon_vma_free(struct anon_vma *anon_vma) 86 { 87 VM_BUG_ON(atomic_read(&anon_vma->refcount)); 88 89 /* 90 * Synchronize against page_lock_anon_vma_read() such that 91 * we can safely hold the lock without the anon_vma getting 92 * freed. 93 * 94 * Relies on the full mb implied by the atomic_dec_and_test() from 95 * put_anon_vma() against the acquire barrier implied by 96 * down_read_trylock() from page_lock_anon_vma_read(). This orders: 97 * 98 * page_lock_anon_vma_read() VS put_anon_vma() 99 * down_read_trylock() atomic_dec_and_test() 100 * LOCK MB 101 * atomic_read() rwsem_is_locked() 102 * 103 * LOCK should suffice since the actual taking of the lock must 104 * happen _before_ what follows. 105 */ 106 might_sleep(); 107 if (rwsem_is_locked(&anon_vma->root->rwsem)) { 108 anon_vma_lock_write(anon_vma); 109 anon_vma_unlock_write(anon_vma); 110 } 111 112 kmem_cache_free(anon_vma_cachep, anon_vma); 113 } 114 115 static inline struct anon_vma_chain *anon_vma_chain_alloc(gfp_t gfp) 116 { 117 return kmem_cache_alloc(anon_vma_chain_cachep, gfp); 118 } 119 120 static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain) 121 { 122 kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain); 123 } 124 125 static void anon_vma_chain_link(struct vm_area_struct *vma, 126 struct anon_vma_chain *avc, 127 struct anon_vma *anon_vma) 128 { 129 avc->vma = vma; 130 avc->anon_vma = anon_vma; 131 list_add(&avc->same_vma, &vma->anon_vma_chain); 132 anon_vma_interval_tree_insert(avc, &anon_vma->rb_root); 133 } 134 135 /** 136 * anon_vma_prepare - attach an anon_vma to a memory region 137 * @vma: the memory region in question 138 * 139 * This makes sure the memory mapping described by 'vma' has 140 * an 'anon_vma' attached to it, so that we can associate the 141 * anonymous pages mapped into it with that anon_vma. 142 * 143 * The common case will be that we already have one, but if 144 * not we either need to find an adjacent mapping that we 145 * can re-use the anon_vma from (very common when the only 146 * reason for splitting a vma has been mprotect()), or we 147 * allocate a new one. 148 * 149 * Anon-vma allocations are very subtle, because we may have 150 * optimistically looked up an anon_vma in page_lock_anon_vma_read() 151 * and that may actually touch the spinlock even in the newly 152 * allocated vma (it depends on RCU to make sure that the 153 * anon_vma isn't actually destroyed). 154 * 155 * As a result, we need to do proper anon_vma locking even 156 * for the new allocation. At the same time, we do not want 157 * to do any locking for the common case of already having 158 * an anon_vma. 159 * 160 * This must be called with the mmap_sem held for reading. 161 */ 162 int anon_vma_prepare(struct vm_area_struct *vma) 163 { 164 struct anon_vma *anon_vma = vma->anon_vma; 165 struct anon_vma_chain *avc; 166 167 might_sleep(); 168 if (unlikely(!anon_vma)) { 169 struct mm_struct *mm = vma->vm_mm; 170 struct anon_vma *allocated; 171 172 avc = anon_vma_chain_alloc(GFP_KERNEL); 173 if (!avc) 174 goto out_enomem; 175 176 anon_vma = find_mergeable_anon_vma(vma); 177 allocated = NULL; 178 if (!anon_vma) { 179 anon_vma = anon_vma_alloc(); 180 if (unlikely(!anon_vma)) 181 goto out_enomem_free_avc; 182 allocated = anon_vma; 183 } 184 185 anon_vma_lock_write(anon_vma); 186 /* page_table_lock to protect against threads */ 187 spin_lock(&mm->page_table_lock); 188 if (likely(!vma->anon_vma)) { 189 vma->anon_vma = anon_vma; 190 anon_vma_chain_link(vma, avc, anon_vma); 191 allocated = NULL; 192 avc = NULL; 193 } 194 spin_unlock(&mm->page_table_lock); 195 anon_vma_unlock_write(anon_vma); 196 197 if (unlikely(allocated)) 198 put_anon_vma(allocated); 199 if (unlikely(avc)) 200 anon_vma_chain_free(avc); 201 } 202 return 0; 203 204 out_enomem_free_avc: 205 anon_vma_chain_free(avc); 206 out_enomem: 207 return -ENOMEM; 208 } 209 210 /* 211 * This is a useful helper function for locking the anon_vma root as 212 * we traverse the vma->anon_vma_chain, looping over anon_vma's that 213 * have the same vma. 214 * 215 * Such anon_vma's should have the same root, so you'd expect to see 216 * just a single mutex_lock for the whole traversal. 217 */ 218 static inline struct anon_vma *lock_anon_vma_root(struct anon_vma *root, struct anon_vma *anon_vma) 219 { 220 struct anon_vma *new_root = anon_vma->root; 221 if (new_root != root) { 222 if (WARN_ON_ONCE(root)) 223 up_write(&root->rwsem); 224 root = new_root; 225 down_write(&root->rwsem); 226 } 227 return root; 228 } 229 230 static inline void unlock_anon_vma_root(struct anon_vma *root) 231 { 232 if (root) 233 up_write(&root->rwsem); 234 } 235 236 /* 237 * Attach the anon_vmas from src to dst. 238 * Returns 0 on success, -ENOMEM on failure. 239 */ 240 int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src) 241 { 242 struct anon_vma_chain *avc, *pavc; 243 struct anon_vma *root = NULL; 244 245 list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) { 246 struct anon_vma *anon_vma; 247 248 avc = anon_vma_chain_alloc(GFP_NOWAIT | __GFP_NOWARN); 249 if (unlikely(!avc)) { 250 unlock_anon_vma_root(root); 251 root = NULL; 252 avc = anon_vma_chain_alloc(GFP_KERNEL); 253 if (!avc) 254 goto enomem_failure; 255 } 256 anon_vma = pavc->anon_vma; 257 root = lock_anon_vma_root(root, anon_vma); 258 anon_vma_chain_link(dst, avc, anon_vma); 259 } 260 unlock_anon_vma_root(root); 261 return 0; 262 263 enomem_failure: 264 unlink_anon_vmas(dst); 265 return -ENOMEM; 266 } 267 268 /* 269 * Attach vma to its own anon_vma, as well as to the anon_vmas that 270 * the corresponding VMA in the parent process is attached to. 271 * Returns 0 on success, non-zero on failure. 272 */ 273 int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) 274 { 275 struct anon_vma_chain *avc; 276 struct anon_vma *anon_vma; 277 int error; 278 279 /* Don't bother if the parent process has no anon_vma here. */ 280 if (!pvma->anon_vma) 281 return 0; 282 283 /* 284 * First, attach the new VMA to the parent VMA's anon_vmas, 285 * so rmap can find non-COWed pages in child processes. 286 */ 287 error = anon_vma_clone(vma, pvma); 288 if (error) 289 return error; 290 291 /* Then add our own anon_vma. */ 292 anon_vma = anon_vma_alloc(); 293 if (!anon_vma) 294 goto out_error; 295 avc = anon_vma_chain_alloc(GFP_KERNEL); 296 if (!avc) 297 goto out_error_free_anon_vma; 298 299 /* 300 * The root anon_vma's spinlock is the lock actually used when we 301 * lock any of the anon_vmas in this anon_vma tree. 302 */ 303 anon_vma->root = pvma->anon_vma->root; 304 /* 305 * With refcounts, an anon_vma can stay around longer than the 306 * process it belongs to. The root anon_vma needs to be pinned until 307 * this anon_vma is freed, because the lock lives in the root. 308 */ 309 get_anon_vma(anon_vma->root); 310 /* Mark this anon_vma as the one where our new (COWed) pages go. */ 311 vma->anon_vma = anon_vma; 312 anon_vma_lock_write(anon_vma); 313 anon_vma_chain_link(vma, avc, anon_vma); 314 anon_vma_unlock_write(anon_vma); 315 316 return 0; 317 318 out_error_free_anon_vma: 319 put_anon_vma(anon_vma); 320 out_error: 321 unlink_anon_vmas(vma); 322 return -ENOMEM; 323 } 324 325 void unlink_anon_vmas(struct vm_area_struct *vma) 326 { 327 struct anon_vma_chain *avc, *next; 328 struct anon_vma *root = NULL; 329 330 /* 331 * Unlink each anon_vma chained to the VMA. This list is ordered 332 * from newest to oldest, ensuring the root anon_vma gets freed last. 333 */ 334 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { 335 struct anon_vma *anon_vma = avc->anon_vma; 336 337 root = lock_anon_vma_root(root, anon_vma); 338 anon_vma_interval_tree_remove(avc, &anon_vma->rb_root); 339 340 /* 341 * Leave empty anon_vmas on the list - we'll need 342 * to free them outside the lock. 343 */ 344 if (RB_EMPTY_ROOT(&anon_vma->rb_root)) 345 continue; 346 347 list_del(&avc->same_vma); 348 anon_vma_chain_free(avc); 349 } 350 unlock_anon_vma_root(root); 351 352 /* 353 * Iterate the list once more, it now only contains empty and unlinked 354 * anon_vmas, destroy them. Could not do before due to __put_anon_vma() 355 * needing to write-acquire the anon_vma->root->rwsem. 356 */ 357 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { 358 struct anon_vma *anon_vma = avc->anon_vma; 359 360 put_anon_vma(anon_vma); 361 362 list_del(&avc->same_vma); 363 anon_vma_chain_free(avc); 364 } 365 } 366 367 static void anon_vma_ctor(void *data) 368 { 369 struct anon_vma *anon_vma = data; 370 371 init_rwsem(&anon_vma->rwsem); 372 atomic_set(&anon_vma->refcount, 0); 373 anon_vma->rb_root = RB_ROOT; 374 } 375 376 void __init anon_vma_init(void) 377 { 378 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma), 379 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor); 380 anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, SLAB_PANIC); 381 } 382 383 /* 384 * Getting a lock on a stable anon_vma from a page off the LRU is tricky! 385 * 386 * Since there is no serialization what so ever against page_remove_rmap() 387 * the best this function can do is return a locked anon_vma that might 388 * have been relevant to this page. 389 * 390 * The page might have been remapped to a different anon_vma or the anon_vma 391 * returned may already be freed (and even reused). 392 * 393 * In case it was remapped to a different anon_vma, the new anon_vma will be a 394 * child of the old anon_vma, and the anon_vma lifetime rules will therefore 395 * ensure that any anon_vma obtained from the page will still be valid for as 396 * long as we observe page_mapped() [ hence all those page_mapped() tests ]. 397 * 398 * All users of this function must be very careful when walking the anon_vma 399 * chain and verify that the page in question is indeed mapped in it 400 * [ something equivalent to page_mapped_in_vma() ]. 401 * 402 * Since anon_vma's slab is DESTROY_BY_RCU and we know from page_remove_rmap() 403 * that the anon_vma pointer from page->mapping is valid if there is a 404 * mapcount, we can dereference the anon_vma after observing those. 405 */ 406 struct anon_vma *page_get_anon_vma(struct page *page) 407 { 408 struct anon_vma *anon_vma = NULL; 409 unsigned long anon_mapping; 410 411 rcu_read_lock(); 412 anon_mapping = (unsigned long) ACCESS_ONCE(page->mapping); 413 if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) 414 goto out; 415 if (!page_mapped(page)) 416 goto out; 417 418 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); 419 if (!atomic_inc_not_zero(&anon_vma->refcount)) { 420 anon_vma = NULL; 421 goto out; 422 } 423 424 /* 425 * If this page is still mapped, then its anon_vma cannot have been 426 * freed. But if it has been unmapped, we have no security against the 427 * anon_vma structure being freed and reused (for another anon_vma: 428 * SLAB_DESTROY_BY_RCU guarantees that - so the atomic_inc_not_zero() 429 * above cannot corrupt). 430 */ 431 if (!page_mapped(page)) { 432 rcu_read_unlock(); 433 put_anon_vma(anon_vma); 434 return NULL; 435 } 436 out: 437 rcu_read_unlock(); 438 439 return anon_vma; 440 } 441 442 /* 443 * Similar to page_get_anon_vma() except it locks the anon_vma. 444 * 445 * Its a little more complex as it tries to keep the fast path to a single 446 * atomic op -- the trylock. If we fail the trylock, we fall back to getting a 447 * reference like with page_get_anon_vma() and then block on the mutex. 448 */ 449 struct anon_vma *page_lock_anon_vma_read(struct page *page) 450 { 451 struct anon_vma *anon_vma = NULL; 452 struct anon_vma *root_anon_vma; 453 unsigned long anon_mapping; 454 455 rcu_read_lock(); 456 anon_mapping = (unsigned long) ACCESS_ONCE(page->mapping); 457 if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) 458 goto out; 459 if (!page_mapped(page)) 460 goto out; 461 462 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); 463 root_anon_vma = ACCESS_ONCE(anon_vma->root); 464 if (down_read_trylock(&root_anon_vma->rwsem)) { 465 /* 466 * If the page is still mapped, then this anon_vma is still 467 * its anon_vma, and holding the mutex ensures that it will 468 * not go away, see anon_vma_free(). 469 */ 470 if (!page_mapped(page)) { 471 up_read(&root_anon_vma->rwsem); 472 anon_vma = NULL; 473 } 474 goto out; 475 } 476 477 /* trylock failed, we got to sleep */ 478 if (!atomic_inc_not_zero(&anon_vma->refcount)) { 479 anon_vma = NULL; 480 goto out; 481 } 482 483 if (!page_mapped(page)) { 484 rcu_read_unlock(); 485 put_anon_vma(anon_vma); 486 return NULL; 487 } 488 489 /* we pinned the anon_vma, its safe to sleep */ 490 rcu_read_unlock(); 491 anon_vma_lock_read(anon_vma); 492 493 if (atomic_dec_and_test(&anon_vma->refcount)) { 494 /* 495 * Oops, we held the last refcount, release the lock 496 * and bail -- can't simply use put_anon_vma() because 497 * we'll deadlock on the anon_vma_lock_write() recursion. 498 */ 499 anon_vma_unlock_read(anon_vma); 500 __put_anon_vma(anon_vma); 501 anon_vma = NULL; 502 } 503 504 return anon_vma; 505 506 out: 507 rcu_read_unlock(); 508 return anon_vma; 509 } 510 511 void page_unlock_anon_vma_read(struct anon_vma *anon_vma) 512 { 513 anon_vma_unlock_read(anon_vma); 514 } 515 516 /* 517 * At what user virtual address is page expected in @vma? 518 */ 519 static inline unsigned long 520 __vma_address(struct page *page, struct vm_area_struct *vma) 521 { 522 pgoff_t pgoff = page_to_pgoff(page); 523 return vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); 524 } 525 526 inline unsigned long 527 vma_address(struct page *page, struct vm_area_struct *vma) 528 { 529 unsigned long address = __vma_address(page, vma); 530 531 /* page should be within @vma mapping range */ 532 VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma); 533 534 return address; 535 } 536 537 /* 538 * At what user virtual address is page expected in vma? 539 * Caller should check the page is actually part of the vma. 540 */ 541 unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) 542 { 543 unsigned long address; 544 if (PageAnon(page)) { 545 struct anon_vma *page__anon_vma = page_anon_vma(page); 546 /* 547 * Note: swapoff's unuse_vma() is more efficient with this 548 * check, and needs it to match anon_vma when KSM is active. 549 */ 550 if (!vma->anon_vma || !page__anon_vma || 551 vma->anon_vma->root != page__anon_vma->root) 552 return -EFAULT; 553 } else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) { 554 if (!vma->vm_file || 555 vma->vm_file->f_mapping != page->mapping) 556 return -EFAULT; 557 } else 558 return -EFAULT; 559 address = __vma_address(page, vma); 560 if (unlikely(address < vma->vm_start || address >= vma->vm_end)) 561 return -EFAULT; 562 return address; 563 } 564 565 pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address) 566 { 567 pgd_t *pgd; 568 pud_t *pud; 569 pmd_t *pmd = NULL; 570 pmd_t pmde; 571 572 pgd = pgd_offset(mm, address); 573 if (!pgd_present(*pgd)) 574 goto out; 575 576 pud = pud_offset(pgd, address); 577 if (!pud_present(*pud)) 578 goto out; 579 580 pmd = pmd_offset(pud, address); 581 /* 582 * Some THP functions use the sequence pmdp_clear_flush(), set_pmd_at() 583 * without holding anon_vma lock for write. So when looking for a 584 * genuine pmde (in which to find pte), test present and !THP together. 585 */ 586 pmde = *pmd; 587 barrier(); 588 if (!pmd_present(pmde) || pmd_trans_huge(pmde)) 589 pmd = NULL; 590 out: 591 return pmd; 592 } 593 594 /* 595 * Check that @page is mapped at @address into @mm. 596 * 597 * If @sync is false, page_check_address may perform a racy check to avoid 598 * the page table lock when the pte is not present (helpful when reclaiming 599 * highly shared pages). 600 * 601 * On success returns with pte mapped and locked. 602 */ 603 pte_t *__page_check_address(struct page *page, struct mm_struct *mm, 604 unsigned long address, spinlock_t **ptlp, int sync) 605 { 606 pmd_t *pmd; 607 pte_t *pte; 608 spinlock_t *ptl; 609 610 if (unlikely(PageHuge(page))) { 611 /* when pud is not present, pte will be NULL */ 612 pte = huge_pte_offset(mm, address); 613 if (!pte) 614 return NULL; 615 616 ptl = huge_pte_lockptr(page_hstate(page), mm, pte); 617 goto check; 618 } 619 620 pmd = mm_find_pmd(mm, address); 621 if (!pmd) 622 return NULL; 623 624 pte = pte_offset_map(pmd, address); 625 /* Make a quick check before getting the lock */ 626 if (!sync && !pte_present(*pte)) { 627 pte_unmap(pte); 628 return NULL; 629 } 630 631 ptl = pte_lockptr(mm, pmd); 632 check: 633 spin_lock(ptl); 634 if (pte_present(*pte) && page_to_pfn(page) == pte_pfn(*pte)) { 635 *ptlp = ptl; 636 return pte; 637 } 638 pte_unmap_unlock(pte, ptl); 639 return NULL; 640 } 641 642 /** 643 * page_mapped_in_vma - check whether a page is really mapped in a VMA 644 * @page: the page to test 645 * @vma: the VMA to test 646 * 647 * Returns 1 if the page is mapped into the page tables of the VMA, 0 648 * if the page is not mapped into the page tables of this VMA. Only 649 * valid for normal file or anonymous VMAs. 650 */ 651 int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma) 652 { 653 unsigned long address; 654 pte_t *pte; 655 spinlock_t *ptl; 656 657 address = __vma_address(page, vma); 658 if (unlikely(address < vma->vm_start || address >= vma->vm_end)) 659 return 0; 660 pte = page_check_address(page, vma->vm_mm, address, &ptl, 1); 661 if (!pte) /* the page is not in this mm */ 662 return 0; 663 pte_unmap_unlock(pte, ptl); 664 665 return 1; 666 } 667 668 struct page_referenced_arg { 669 int mapcount; 670 int referenced; 671 unsigned long vm_flags; 672 struct mem_cgroup *memcg; 673 }; 674 /* 675 * arg: page_referenced_arg will be passed 676 */ 677 static int page_referenced_one(struct page *page, struct vm_area_struct *vma, 678 unsigned long address, void *arg) 679 { 680 struct mm_struct *mm = vma->vm_mm; 681 spinlock_t *ptl; 682 int referenced = 0; 683 struct page_referenced_arg *pra = arg; 684 685 if (unlikely(PageTransHuge(page))) { 686 pmd_t *pmd; 687 688 /* 689 * rmap might return false positives; we must filter 690 * these out using page_check_address_pmd(). 691 */ 692 pmd = page_check_address_pmd(page, mm, address, 693 PAGE_CHECK_ADDRESS_PMD_FLAG, &ptl); 694 if (!pmd) 695 return SWAP_AGAIN; 696 697 if (vma->vm_flags & VM_LOCKED) { 698 spin_unlock(ptl); 699 pra->vm_flags |= VM_LOCKED; 700 return SWAP_FAIL; /* To break the loop */ 701 } 702 703 /* go ahead even if the pmd is pmd_trans_splitting() */ 704 if (pmdp_clear_flush_young_notify(vma, address, pmd)) 705 referenced++; 706 spin_unlock(ptl); 707 } else { 708 pte_t *pte; 709 710 /* 711 * rmap might return false positives; we must filter 712 * these out using page_check_address(). 713 */ 714 pte = page_check_address(page, mm, address, &ptl, 0); 715 if (!pte) 716 return SWAP_AGAIN; 717 718 if (vma->vm_flags & VM_LOCKED) { 719 pte_unmap_unlock(pte, ptl); 720 pra->vm_flags |= VM_LOCKED; 721 return SWAP_FAIL; /* To break the loop */ 722 } 723 724 if (ptep_clear_flush_young_notify(vma, address, pte)) { 725 /* 726 * Don't treat a reference through a sequentially read 727 * mapping as such. If the page has been used in 728 * another mapping, we will catch it; if this other 729 * mapping is already gone, the unmap path will have 730 * set PG_referenced or activated the page. 731 */ 732 if (likely(!(vma->vm_flags & VM_SEQ_READ))) 733 referenced++; 734 } 735 pte_unmap_unlock(pte, ptl); 736 } 737 738 if (referenced) { 739 pra->referenced++; 740 pra->vm_flags |= vma->vm_flags; 741 } 742 743 pra->mapcount--; 744 if (!pra->mapcount) 745 return SWAP_SUCCESS; /* To break the loop */ 746 747 return SWAP_AGAIN; 748 } 749 750 static bool invalid_page_referenced_vma(struct vm_area_struct *vma, void *arg) 751 { 752 struct page_referenced_arg *pra = arg; 753 struct mem_cgroup *memcg = pra->memcg; 754 755 if (!mm_match_cgroup(vma->vm_mm, memcg)) 756 return true; 757 758 return false; 759 } 760 761 /** 762 * page_referenced - test if the page was referenced 763 * @page: the page to test 764 * @is_locked: caller holds lock on the page 765 * @memcg: target memory cgroup 766 * @vm_flags: collect encountered vma->vm_flags who actually referenced the page 767 * 768 * Quick test_and_clear_referenced for all mappings to a page, 769 * returns the number of ptes which referenced the page. 770 */ 771 int page_referenced(struct page *page, 772 int is_locked, 773 struct mem_cgroup *memcg, 774 unsigned long *vm_flags) 775 { 776 int ret; 777 int we_locked = 0; 778 struct page_referenced_arg pra = { 779 .mapcount = page_mapcount(page), 780 .memcg = memcg, 781 }; 782 struct rmap_walk_control rwc = { 783 .rmap_one = page_referenced_one, 784 .arg = (void *)&pra, 785 .anon_lock = page_lock_anon_vma_read, 786 }; 787 788 *vm_flags = 0; 789 if (!page_mapped(page)) 790 return 0; 791 792 if (!page_rmapping(page)) 793 return 0; 794 795 if (!is_locked && (!PageAnon(page) || PageKsm(page))) { 796 we_locked = trylock_page(page); 797 if (!we_locked) 798 return 1; 799 } 800 801 /* 802 * If we are reclaiming on behalf of a cgroup, skip 803 * counting on behalf of references from different 804 * cgroups 805 */ 806 if (memcg) { 807 rwc.invalid_vma = invalid_page_referenced_vma; 808 } 809 810 ret = rmap_walk(page, &rwc); 811 *vm_flags = pra.vm_flags; 812 813 if (we_locked) 814 unlock_page(page); 815 816 return pra.referenced; 817 } 818 819 static int page_mkclean_one(struct page *page, struct vm_area_struct *vma, 820 unsigned long address, void *arg) 821 { 822 struct mm_struct *mm = vma->vm_mm; 823 pte_t *pte; 824 spinlock_t *ptl; 825 int ret = 0; 826 int *cleaned = arg; 827 828 pte = page_check_address(page, mm, address, &ptl, 1); 829 if (!pte) 830 goto out; 831 832 if (pte_dirty(*pte) || pte_write(*pte)) { 833 pte_t entry; 834 835 flush_cache_page(vma, address, pte_pfn(*pte)); 836 entry = ptep_clear_flush(vma, address, pte); 837 entry = pte_wrprotect(entry); 838 entry = pte_mkclean(entry); 839 set_pte_at(mm, address, pte, entry); 840 ret = 1; 841 } 842 843 pte_unmap_unlock(pte, ptl); 844 845 if (ret) { 846 mmu_notifier_invalidate_page(mm, address); 847 (*cleaned)++; 848 } 849 out: 850 return SWAP_AGAIN; 851 } 852 853 static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg) 854 { 855 if (vma->vm_flags & VM_SHARED) 856 return false; 857 858 return true; 859 } 860 861 int page_mkclean(struct page *page) 862 { 863 int cleaned = 0; 864 struct address_space *mapping; 865 struct rmap_walk_control rwc = { 866 .arg = (void *)&cleaned, 867 .rmap_one = page_mkclean_one, 868 .invalid_vma = invalid_mkclean_vma, 869 }; 870 871 BUG_ON(!PageLocked(page)); 872 873 if (!page_mapped(page)) 874 return 0; 875 876 mapping = page_mapping(page); 877 if (!mapping) 878 return 0; 879 880 rmap_walk(page, &rwc); 881 882 return cleaned; 883 } 884 EXPORT_SYMBOL_GPL(page_mkclean); 885 886 /** 887 * page_move_anon_rmap - move a page to our anon_vma 888 * @page: the page to move to our anon_vma 889 * @vma: the vma the page belongs to 890 * @address: the user virtual address mapped 891 * 892 * When a page belongs exclusively to one process after a COW event, 893 * that page can be moved into the anon_vma that belongs to just that 894 * process, so the rmap code will not search the parent or sibling 895 * processes. 896 */ 897 void page_move_anon_rmap(struct page *page, 898 struct vm_area_struct *vma, unsigned long address) 899 { 900 struct anon_vma *anon_vma = vma->anon_vma; 901 902 VM_BUG_ON_PAGE(!PageLocked(page), page); 903 VM_BUG_ON_VMA(!anon_vma, vma); 904 VM_BUG_ON_PAGE(page->index != linear_page_index(vma, address), page); 905 906 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 907 page->mapping = (struct address_space *) anon_vma; 908 } 909 910 /** 911 * __page_set_anon_rmap - set up new anonymous rmap 912 * @page: Page to add to rmap 913 * @vma: VM area to add page to. 914 * @address: User virtual address of the mapping 915 * @exclusive: the page is exclusively owned by the current process 916 */ 917 static void __page_set_anon_rmap(struct page *page, 918 struct vm_area_struct *vma, unsigned long address, int exclusive) 919 { 920 struct anon_vma *anon_vma = vma->anon_vma; 921 922 BUG_ON(!anon_vma); 923 924 if (PageAnon(page)) 925 return; 926 927 /* 928 * If the page isn't exclusively mapped into this vma, 929 * we must use the _oldest_ possible anon_vma for the 930 * page mapping! 931 */ 932 if (!exclusive) 933 anon_vma = anon_vma->root; 934 935 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 936 page->mapping = (struct address_space *) anon_vma; 937 page->index = linear_page_index(vma, address); 938 } 939 940 /** 941 * __page_check_anon_rmap - sanity check anonymous rmap addition 942 * @page: the page to add the mapping to 943 * @vma: the vm area in which the mapping is added 944 * @address: the user virtual address mapped 945 */ 946 static void __page_check_anon_rmap(struct page *page, 947 struct vm_area_struct *vma, unsigned long address) 948 { 949 #ifdef CONFIG_DEBUG_VM 950 /* 951 * The page's anon-rmap details (mapping and index) are guaranteed to 952 * be set up correctly at this point. 953 * 954 * We have exclusion against page_add_anon_rmap because the caller 955 * always holds the page locked, except if called from page_dup_rmap, 956 * in which case the page is already known to be setup. 957 * 958 * We have exclusion against page_add_new_anon_rmap because those pages 959 * are initially only visible via the pagetables, and the pte is locked 960 * over the call to page_add_new_anon_rmap. 961 */ 962 BUG_ON(page_anon_vma(page)->root != vma->anon_vma->root); 963 BUG_ON(page->index != linear_page_index(vma, address)); 964 #endif 965 } 966 967 /** 968 * page_add_anon_rmap - add pte mapping to an anonymous page 969 * @page: the page to add the mapping to 970 * @vma: the vm area in which the mapping is added 971 * @address: the user virtual address mapped 972 * 973 * The caller needs to hold the pte lock, and the page must be locked in 974 * the anon_vma case: to serialize mapping,index checking after setting, 975 * and to ensure that PageAnon is not being upgraded racily to PageKsm 976 * (but PageKsm is never downgraded to PageAnon). 977 */ 978 void page_add_anon_rmap(struct page *page, 979 struct vm_area_struct *vma, unsigned long address) 980 { 981 do_page_add_anon_rmap(page, vma, address, 0); 982 } 983 984 /* 985 * Special version of the above for do_swap_page, which often runs 986 * into pages that are exclusively owned by the current process. 987 * Everybody else should continue to use page_add_anon_rmap above. 988 */ 989 void do_page_add_anon_rmap(struct page *page, 990 struct vm_area_struct *vma, unsigned long address, int exclusive) 991 { 992 int first = atomic_inc_and_test(&page->_mapcount); 993 if (first) { 994 /* 995 * We use the irq-unsafe __{inc|mod}_zone_page_stat because 996 * these counters are not modified in interrupt context, and 997 * pte lock(a spinlock) is held, which implies preemption 998 * disabled. 999 */ 1000 if (PageTransHuge(page)) 1001 __inc_zone_page_state(page, 1002 NR_ANON_TRANSPARENT_HUGEPAGES); 1003 __mod_zone_page_state(page_zone(page), NR_ANON_PAGES, 1004 hpage_nr_pages(page)); 1005 } 1006 if (unlikely(PageKsm(page))) 1007 return; 1008 1009 VM_BUG_ON_PAGE(!PageLocked(page), page); 1010 /* address might be in next vma when migration races vma_adjust */ 1011 if (first) 1012 __page_set_anon_rmap(page, vma, address, exclusive); 1013 else 1014 __page_check_anon_rmap(page, vma, address); 1015 } 1016 1017 /** 1018 * page_add_new_anon_rmap - add pte mapping to a new anonymous page 1019 * @page: the page to add the mapping to 1020 * @vma: the vm area in which the mapping is added 1021 * @address: the user virtual address mapped 1022 * 1023 * Same as page_add_anon_rmap but must only be called on *new* pages. 1024 * This means the inc-and-test can be bypassed. 1025 * Page does not have to be locked. 1026 */ 1027 void page_add_new_anon_rmap(struct page *page, 1028 struct vm_area_struct *vma, unsigned long address) 1029 { 1030 VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma); 1031 SetPageSwapBacked(page); 1032 atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */ 1033 if (PageTransHuge(page)) 1034 __inc_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES); 1035 __mod_zone_page_state(page_zone(page), NR_ANON_PAGES, 1036 hpage_nr_pages(page)); 1037 __page_set_anon_rmap(page, vma, address, 1); 1038 } 1039 1040 /** 1041 * page_add_file_rmap - add pte mapping to a file page 1042 * @page: the page to add the mapping to 1043 * 1044 * The caller needs to hold the pte lock. 1045 */ 1046 void page_add_file_rmap(struct page *page) 1047 { 1048 struct mem_cgroup *memcg; 1049 unsigned long flags; 1050 bool locked; 1051 1052 memcg = mem_cgroup_begin_page_stat(page, &locked, &flags); 1053 if (atomic_inc_and_test(&page->_mapcount)) { 1054 __inc_zone_page_state(page, NR_FILE_MAPPED); 1055 mem_cgroup_inc_page_stat(memcg, MEM_CGROUP_STAT_FILE_MAPPED); 1056 } 1057 mem_cgroup_end_page_stat(memcg, &locked, &flags); 1058 } 1059 1060 static void page_remove_file_rmap(struct page *page) 1061 { 1062 struct mem_cgroup *memcg; 1063 unsigned long flags; 1064 bool locked; 1065 1066 memcg = mem_cgroup_begin_page_stat(page, &locked, &flags); 1067 1068 /* page still mapped by someone else? */ 1069 if (!atomic_add_negative(-1, &page->_mapcount)) 1070 goto out; 1071 1072 /* Hugepages are not counted in NR_FILE_MAPPED for now. */ 1073 if (unlikely(PageHuge(page))) 1074 goto out; 1075 1076 /* 1077 * We use the irq-unsafe __{inc|mod}_zone_page_stat because 1078 * these counters are not modified in interrupt context, and 1079 * pte lock(a spinlock) is held, which implies preemption disabled. 1080 */ 1081 __dec_zone_page_state(page, NR_FILE_MAPPED); 1082 mem_cgroup_dec_page_stat(memcg, MEM_CGROUP_STAT_FILE_MAPPED); 1083 1084 if (unlikely(PageMlocked(page))) 1085 clear_page_mlock(page); 1086 out: 1087 mem_cgroup_end_page_stat(memcg, &locked, &flags); 1088 } 1089 1090 /** 1091 * page_remove_rmap - take down pte mapping from a page 1092 * @page: page to remove mapping from 1093 * 1094 * The caller needs to hold the pte lock. 1095 */ 1096 void page_remove_rmap(struct page *page) 1097 { 1098 if (!PageAnon(page)) { 1099 page_remove_file_rmap(page); 1100 return; 1101 } 1102 1103 /* page still mapped by someone else? */ 1104 if (!atomic_add_negative(-1, &page->_mapcount)) 1105 return; 1106 1107 /* Hugepages are not counted in NR_ANON_PAGES for now. */ 1108 if (unlikely(PageHuge(page))) 1109 return; 1110 1111 /* 1112 * We use the irq-unsafe __{inc|mod}_zone_page_stat because 1113 * these counters are not modified in interrupt context, and 1114 * pte lock(a spinlock) is held, which implies preemption disabled. 1115 */ 1116 if (PageTransHuge(page)) 1117 __dec_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES); 1118 1119 __mod_zone_page_state(page_zone(page), NR_ANON_PAGES, 1120 -hpage_nr_pages(page)); 1121 1122 if (unlikely(PageMlocked(page))) 1123 clear_page_mlock(page); 1124 1125 /* 1126 * It would be tidy to reset the PageAnon mapping here, 1127 * but that might overwrite a racing page_add_anon_rmap 1128 * which increments mapcount after us but sets mapping 1129 * before us: so leave the reset to free_hot_cold_page, 1130 * and remember that it's only reliable while mapped. 1131 * Leaving it set also helps swapoff to reinstate ptes 1132 * faster for those pages still in swapcache. 1133 */ 1134 } 1135 1136 /* 1137 * @arg: enum ttu_flags will be passed to this argument 1138 */ 1139 static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, 1140 unsigned long address, void *arg) 1141 { 1142 struct mm_struct *mm = vma->vm_mm; 1143 pte_t *pte; 1144 pte_t pteval; 1145 spinlock_t *ptl; 1146 int ret = SWAP_AGAIN; 1147 enum ttu_flags flags = (enum ttu_flags)arg; 1148 1149 pte = page_check_address(page, mm, address, &ptl, 0); 1150 if (!pte) 1151 goto out; 1152 1153 /* 1154 * If the page is mlock()d, we cannot swap it out. 1155 * If it's recently referenced (perhaps page_referenced 1156 * skipped over this mm) then we should reactivate it. 1157 */ 1158 if (!(flags & TTU_IGNORE_MLOCK)) { 1159 if (vma->vm_flags & VM_LOCKED) 1160 goto out_mlock; 1161 1162 if (flags & TTU_MUNLOCK) 1163 goto out_unmap; 1164 } 1165 if (!(flags & TTU_IGNORE_ACCESS)) { 1166 if (ptep_clear_flush_young_notify(vma, address, pte)) { 1167 ret = SWAP_FAIL; 1168 goto out_unmap; 1169 } 1170 } 1171 1172 /* Nuke the page table entry. */ 1173 flush_cache_page(vma, address, page_to_pfn(page)); 1174 pteval = ptep_clear_flush(vma, address, pte); 1175 1176 /* Move the dirty bit to the physical page now the pte is gone. */ 1177 if (pte_dirty(pteval)) 1178 set_page_dirty(page); 1179 1180 /* Update high watermark before we lower rss */ 1181 update_hiwater_rss(mm); 1182 1183 if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) { 1184 if (!PageHuge(page)) { 1185 if (PageAnon(page)) 1186 dec_mm_counter(mm, MM_ANONPAGES); 1187 else 1188 dec_mm_counter(mm, MM_FILEPAGES); 1189 } 1190 set_pte_at(mm, address, pte, 1191 swp_entry_to_pte(make_hwpoison_entry(page))); 1192 } else if (pte_unused(pteval)) { 1193 /* 1194 * The guest indicated that the page content is of no 1195 * interest anymore. Simply discard the pte, vmscan 1196 * will take care of the rest. 1197 */ 1198 if (PageAnon(page)) 1199 dec_mm_counter(mm, MM_ANONPAGES); 1200 else 1201 dec_mm_counter(mm, MM_FILEPAGES); 1202 } else if (PageAnon(page)) { 1203 swp_entry_t entry = { .val = page_private(page) }; 1204 pte_t swp_pte; 1205 1206 if (PageSwapCache(page)) { 1207 /* 1208 * Store the swap location in the pte. 1209 * See handle_pte_fault() ... 1210 */ 1211 if (swap_duplicate(entry) < 0) { 1212 set_pte_at(mm, address, pte, pteval); 1213 ret = SWAP_FAIL; 1214 goto out_unmap; 1215 } 1216 if (list_empty(&mm->mmlist)) { 1217 spin_lock(&mmlist_lock); 1218 if (list_empty(&mm->mmlist)) 1219 list_add(&mm->mmlist, &init_mm.mmlist); 1220 spin_unlock(&mmlist_lock); 1221 } 1222 dec_mm_counter(mm, MM_ANONPAGES); 1223 inc_mm_counter(mm, MM_SWAPENTS); 1224 } else if (IS_ENABLED(CONFIG_MIGRATION)) { 1225 /* 1226 * Store the pfn of the page in a special migration 1227 * pte. do_swap_page() will wait until the migration 1228 * pte is removed and then restart fault handling. 1229 */ 1230 BUG_ON(!(flags & TTU_MIGRATION)); 1231 entry = make_migration_entry(page, pte_write(pteval)); 1232 } 1233 swp_pte = swp_entry_to_pte(entry); 1234 if (pte_soft_dirty(pteval)) 1235 swp_pte = pte_swp_mksoft_dirty(swp_pte); 1236 set_pte_at(mm, address, pte, swp_pte); 1237 BUG_ON(pte_file(*pte)); 1238 } else if (IS_ENABLED(CONFIG_MIGRATION) && 1239 (flags & TTU_MIGRATION)) { 1240 /* Establish migration entry for a file page */ 1241 swp_entry_t entry; 1242 entry = make_migration_entry(page, pte_write(pteval)); 1243 set_pte_at(mm, address, pte, swp_entry_to_pte(entry)); 1244 } else 1245 dec_mm_counter(mm, MM_FILEPAGES); 1246 1247 page_remove_rmap(page); 1248 page_cache_release(page); 1249 1250 out_unmap: 1251 pte_unmap_unlock(pte, ptl); 1252 if (ret != SWAP_FAIL && !(flags & TTU_MUNLOCK)) 1253 mmu_notifier_invalidate_page(mm, address); 1254 out: 1255 return ret; 1256 1257 out_mlock: 1258 pte_unmap_unlock(pte, ptl); 1259 1260 1261 /* 1262 * We need mmap_sem locking, Otherwise VM_LOCKED check makes 1263 * unstable result and race. Plus, We can't wait here because 1264 * we now hold anon_vma->rwsem or mapping->i_mmap_rwsem. 1265 * if trylock failed, the page remain in evictable lru and later 1266 * vmscan could retry to move the page to unevictable lru if the 1267 * page is actually mlocked. 1268 */ 1269 if (down_read_trylock(&vma->vm_mm->mmap_sem)) { 1270 if (vma->vm_flags & VM_LOCKED) { 1271 mlock_vma_page(page); 1272 ret = SWAP_MLOCK; 1273 } 1274 up_read(&vma->vm_mm->mmap_sem); 1275 } 1276 return ret; 1277 } 1278 1279 /* 1280 * objrmap doesn't work for nonlinear VMAs because the assumption that 1281 * offset-into-file correlates with offset-into-virtual-addresses does not hold. 1282 * Consequently, given a particular page and its ->index, we cannot locate the 1283 * ptes which are mapping that page without an exhaustive linear search. 1284 * 1285 * So what this code does is a mini "virtual scan" of each nonlinear VMA which 1286 * maps the file to which the target page belongs. The ->vm_private_data field 1287 * holds the current cursor into that scan. Successive searches will circulate 1288 * around the vma's virtual address space. 1289 * 1290 * So as more replacement pressure is applied to the pages in a nonlinear VMA, 1291 * more scanning pressure is placed against them as well. Eventually pages 1292 * will become fully unmapped and are eligible for eviction. 1293 * 1294 * For very sparsely populated VMAs this is a little inefficient - chances are 1295 * there there won't be many ptes located within the scan cluster. In this case 1296 * maybe we could scan further - to the end of the pte page, perhaps. 1297 * 1298 * Mlocked pages: check VM_LOCKED under mmap_sem held for read, if we can 1299 * acquire it without blocking. If vma locked, mlock the pages in the cluster, 1300 * rather than unmapping them. If we encounter the "check_page" that vmscan is 1301 * trying to unmap, return SWAP_MLOCK, else default SWAP_AGAIN. 1302 */ 1303 #define CLUSTER_SIZE min(32*PAGE_SIZE, PMD_SIZE) 1304 #define CLUSTER_MASK (~(CLUSTER_SIZE - 1)) 1305 1306 static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount, 1307 struct vm_area_struct *vma, struct page *check_page) 1308 { 1309 struct mm_struct *mm = vma->vm_mm; 1310 pmd_t *pmd; 1311 pte_t *pte; 1312 pte_t pteval; 1313 spinlock_t *ptl; 1314 struct page *page; 1315 unsigned long address; 1316 unsigned long mmun_start; /* For mmu_notifiers */ 1317 unsigned long mmun_end; /* For mmu_notifiers */ 1318 unsigned long end; 1319 int ret = SWAP_AGAIN; 1320 int locked_vma = 0; 1321 1322 address = (vma->vm_start + cursor) & CLUSTER_MASK; 1323 end = address + CLUSTER_SIZE; 1324 if (address < vma->vm_start) 1325 address = vma->vm_start; 1326 if (end > vma->vm_end) 1327 end = vma->vm_end; 1328 1329 pmd = mm_find_pmd(mm, address); 1330 if (!pmd) 1331 return ret; 1332 1333 mmun_start = address; 1334 mmun_end = end; 1335 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); 1336 1337 /* 1338 * If we can acquire the mmap_sem for read, and vma is VM_LOCKED, 1339 * keep the sem while scanning the cluster for mlocking pages. 1340 */ 1341 if (down_read_trylock(&vma->vm_mm->mmap_sem)) { 1342 locked_vma = (vma->vm_flags & VM_LOCKED); 1343 if (!locked_vma) 1344 up_read(&vma->vm_mm->mmap_sem); /* don't need it */ 1345 } 1346 1347 pte = pte_offset_map_lock(mm, pmd, address, &ptl); 1348 1349 /* Update high watermark before we lower rss */ 1350 update_hiwater_rss(mm); 1351 1352 for (; address < end; pte++, address += PAGE_SIZE) { 1353 if (!pte_present(*pte)) 1354 continue; 1355 page = vm_normal_page(vma, address, *pte); 1356 BUG_ON(!page || PageAnon(page)); 1357 1358 if (locked_vma) { 1359 if (page == check_page) { 1360 /* we know we have check_page locked */ 1361 mlock_vma_page(page); 1362 ret = SWAP_MLOCK; 1363 } else if (trylock_page(page)) { 1364 /* 1365 * If we can lock the page, perform mlock. 1366 * Otherwise leave the page alone, it will be 1367 * eventually encountered again later. 1368 */ 1369 mlock_vma_page(page); 1370 unlock_page(page); 1371 } 1372 continue; /* don't unmap */ 1373 } 1374 1375 /* 1376 * No need for _notify because we're within an 1377 * mmu_notifier_invalidate_range_ {start|end} scope. 1378 */ 1379 if (ptep_clear_flush_young(vma, address, pte)) 1380 continue; 1381 1382 /* Nuke the page table entry. */ 1383 flush_cache_page(vma, address, pte_pfn(*pte)); 1384 pteval = ptep_clear_flush_notify(vma, address, pte); 1385 1386 /* If nonlinear, store the file page offset in the pte. */ 1387 if (page->index != linear_page_index(vma, address)) { 1388 pte_t ptfile = pgoff_to_pte(page->index); 1389 if (pte_soft_dirty(pteval)) 1390 ptfile = pte_file_mksoft_dirty(ptfile); 1391 set_pte_at(mm, address, pte, ptfile); 1392 } 1393 1394 /* Move the dirty bit to the physical page now the pte is gone. */ 1395 if (pte_dirty(pteval)) 1396 set_page_dirty(page); 1397 1398 page_remove_rmap(page); 1399 page_cache_release(page); 1400 dec_mm_counter(mm, MM_FILEPAGES); 1401 (*mapcount)--; 1402 } 1403 pte_unmap_unlock(pte - 1, ptl); 1404 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 1405 if (locked_vma) 1406 up_read(&vma->vm_mm->mmap_sem); 1407 return ret; 1408 } 1409 1410 static int try_to_unmap_nonlinear(struct page *page, 1411 struct address_space *mapping, void *arg) 1412 { 1413 struct vm_area_struct *vma; 1414 int ret = SWAP_AGAIN; 1415 unsigned long cursor; 1416 unsigned long max_nl_cursor = 0; 1417 unsigned long max_nl_size = 0; 1418 unsigned int mapcount; 1419 1420 list_for_each_entry(vma, 1421 &mapping->i_mmap_nonlinear, shared.nonlinear) { 1422 1423 cursor = (unsigned long) vma->vm_private_data; 1424 if (cursor > max_nl_cursor) 1425 max_nl_cursor = cursor; 1426 cursor = vma->vm_end - vma->vm_start; 1427 if (cursor > max_nl_size) 1428 max_nl_size = cursor; 1429 } 1430 1431 if (max_nl_size == 0) { /* all nonlinears locked or reserved ? */ 1432 return SWAP_FAIL; 1433 } 1434 1435 /* 1436 * We don't try to search for this page in the nonlinear vmas, 1437 * and page_referenced wouldn't have found it anyway. Instead 1438 * just walk the nonlinear vmas trying to age and unmap some. 1439 * The mapcount of the page we came in with is irrelevant, 1440 * but even so use it as a guide to how hard we should try? 1441 */ 1442 mapcount = page_mapcount(page); 1443 if (!mapcount) 1444 return ret; 1445 1446 cond_resched(); 1447 1448 max_nl_size = (max_nl_size + CLUSTER_SIZE - 1) & CLUSTER_MASK; 1449 if (max_nl_cursor == 0) 1450 max_nl_cursor = CLUSTER_SIZE; 1451 1452 do { 1453 list_for_each_entry(vma, 1454 &mapping->i_mmap_nonlinear, shared.nonlinear) { 1455 1456 cursor = (unsigned long) vma->vm_private_data; 1457 while (cursor < max_nl_cursor && 1458 cursor < vma->vm_end - vma->vm_start) { 1459 if (try_to_unmap_cluster(cursor, &mapcount, 1460 vma, page) == SWAP_MLOCK) 1461 ret = SWAP_MLOCK; 1462 cursor += CLUSTER_SIZE; 1463 vma->vm_private_data = (void *) cursor; 1464 if ((int)mapcount <= 0) 1465 return ret; 1466 } 1467 vma->vm_private_data = (void *) max_nl_cursor; 1468 } 1469 cond_resched(); 1470 max_nl_cursor += CLUSTER_SIZE; 1471 } while (max_nl_cursor <= max_nl_size); 1472 1473 /* 1474 * Don't loop forever (perhaps all the remaining pages are 1475 * in locked vmas). Reset cursor on all unreserved nonlinear 1476 * vmas, now forgetting on which ones it had fallen behind. 1477 */ 1478 list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.nonlinear) 1479 vma->vm_private_data = NULL; 1480 1481 return ret; 1482 } 1483 1484 bool is_vma_temporary_stack(struct vm_area_struct *vma) 1485 { 1486 int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP); 1487 1488 if (!maybe_stack) 1489 return false; 1490 1491 if ((vma->vm_flags & VM_STACK_INCOMPLETE_SETUP) == 1492 VM_STACK_INCOMPLETE_SETUP) 1493 return true; 1494 1495 return false; 1496 } 1497 1498 static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg) 1499 { 1500 return is_vma_temporary_stack(vma); 1501 } 1502 1503 static int page_not_mapped(struct page *page) 1504 { 1505 return !page_mapped(page); 1506 }; 1507 1508 /** 1509 * try_to_unmap - try to remove all page table mappings to a page 1510 * @page: the page to get unmapped 1511 * @flags: action and flags 1512 * 1513 * Tries to remove all the page table entries which are mapping this 1514 * page, used in the pageout path. Caller must hold the page lock. 1515 * Return values are: 1516 * 1517 * SWAP_SUCCESS - we succeeded in removing all mappings 1518 * SWAP_AGAIN - we missed a mapping, try again later 1519 * SWAP_FAIL - the page is unswappable 1520 * SWAP_MLOCK - page is mlocked. 1521 */ 1522 int try_to_unmap(struct page *page, enum ttu_flags flags) 1523 { 1524 int ret; 1525 struct rmap_walk_control rwc = { 1526 .rmap_one = try_to_unmap_one, 1527 .arg = (void *)flags, 1528 .done = page_not_mapped, 1529 .file_nonlinear = try_to_unmap_nonlinear, 1530 .anon_lock = page_lock_anon_vma_read, 1531 }; 1532 1533 VM_BUG_ON_PAGE(!PageHuge(page) && PageTransHuge(page), page); 1534 1535 /* 1536 * During exec, a temporary VMA is setup and later moved. 1537 * The VMA is moved under the anon_vma lock but not the 1538 * page tables leading to a race where migration cannot 1539 * find the migration ptes. Rather than increasing the 1540 * locking requirements of exec(), migration skips 1541 * temporary VMAs until after exec() completes. 1542 */ 1543 if ((flags & TTU_MIGRATION) && !PageKsm(page) && PageAnon(page)) 1544 rwc.invalid_vma = invalid_migration_vma; 1545 1546 ret = rmap_walk(page, &rwc); 1547 1548 if (ret != SWAP_MLOCK && !page_mapped(page)) 1549 ret = SWAP_SUCCESS; 1550 return ret; 1551 } 1552 1553 /** 1554 * try_to_munlock - try to munlock a page 1555 * @page: the page to be munlocked 1556 * 1557 * Called from munlock code. Checks all of the VMAs mapping the page 1558 * to make sure nobody else has this page mlocked. The page will be 1559 * returned with PG_mlocked cleared if no other vmas have it mlocked. 1560 * 1561 * Return values are: 1562 * 1563 * SWAP_AGAIN - no vma is holding page mlocked, or, 1564 * SWAP_AGAIN - page mapped in mlocked vma -- couldn't acquire mmap sem 1565 * SWAP_FAIL - page cannot be located at present 1566 * SWAP_MLOCK - page is now mlocked. 1567 */ 1568 int try_to_munlock(struct page *page) 1569 { 1570 int ret; 1571 struct rmap_walk_control rwc = { 1572 .rmap_one = try_to_unmap_one, 1573 .arg = (void *)TTU_MUNLOCK, 1574 .done = page_not_mapped, 1575 /* 1576 * We don't bother to try to find the munlocked page in 1577 * nonlinears. It's costly. Instead, later, page reclaim logic 1578 * may call try_to_unmap() and recover PG_mlocked lazily. 1579 */ 1580 .file_nonlinear = NULL, 1581 .anon_lock = page_lock_anon_vma_read, 1582 1583 }; 1584 1585 VM_BUG_ON_PAGE(!PageLocked(page) || PageLRU(page), page); 1586 1587 ret = rmap_walk(page, &rwc); 1588 return ret; 1589 } 1590 1591 void __put_anon_vma(struct anon_vma *anon_vma) 1592 { 1593 struct anon_vma *root = anon_vma->root; 1594 1595 anon_vma_free(anon_vma); 1596 if (root != anon_vma && atomic_dec_and_test(&root->refcount)) 1597 anon_vma_free(root); 1598 } 1599 1600 static struct anon_vma *rmap_walk_anon_lock(struct page *page, 1601 struct rmap_walk_control *rwc) 1602 { 1603 struct anon_vma *anon_vma; 1604 1605 if (rwc->anon_lock) 1606 return rwc->anon_lock(page); 1607 1608 /* 1609 * Note: remove_migration_ptes() cannot use page_lock_anon_vma_read() 1610 * because that depends on page_mapped(); but not all its usages 1611 * are holding mmap_sem. Users without mmap_sem are required to 1612 * take a reference count to prevent the anon_vma disappearing 1613 */ 1614 anon_vma = page_anon_vma(page); 1615 if (!anon_vma) 1616 return NULL; 1617 1618 anon_vma_lock_read(anon_vma); 1619 return anon_vma; 1620 } 1621 1622 /* 1623 * rmap_walk_anon - do something to anonymous page using the object-based 1624 * rmap method 1625 * @page: the page to be handled 1626 * @rwc: control variable according to each walk type 1627 * 1628 * Find all the mappings of a page using the mapping pointer and the vma chains 1629 * contained in the anon_vma struct it points to. 1630 * 1631 * When called from try_to_munlock(), the mmap_sem of the mm containing the vma 1632 * where the page was found will be held for write. So, we won't recheck 1633 * vm_flags for that VMA. That should be OK, because that vma shouldn't be 1634 * LOCKED. 1635 */ 1636 static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc) 1637 { 1638 struct anon_vma *anon_vma; 1639 pgoff_t pgoff; 1640 struct anon_vma_chain *avc; 1641 int ret = SWAP_AGAIN; 1642 1643 anon_vma = rmap_walk_anon_lock(page, rwc); 1644 if (!anon_vma) 1645 return ret; 1646 1647 pgoff = page_to_pgoff(page); 1648 anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) { 1649 struct vm_area_struct *vma = avc->vma; 1650 unsigned long address = vma_address(page, vma); 1651 1652 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) 1653 continue; 1654 1655 ret = rwc->rmap_one(page, vma, address, rwc->arg); 1656 if (ret != SWAP_AGAIN) 1657 break; 1658 if (rwc->done && rwc->done(page)) 1659 break; 1660 } 1661 anon_vma_unlock_read(anon_vma); 1662 return ret; 1663 } 1664 1665 /* 1666 * rmap_walk_file - do something to file page using the object-based rmap method 1667 * @page: the page to be handled 1668 * @rwc: control variable according to each walk type 1669 * 1670 * Find all the mappings of a page using the mapping pointer and the vma chains 1671 * contained in the address_space struct it points to. 1672 * 1673 * When called from try_to_munlock(), the mmap_sem of the mm containing the vma 1674 * where the page was found will be held for write. So, we won't recheck 1675 * vm_flags for that VMA. That should be OK, because that vma shouldn't be 1676 * LOCKED. 1677 */ 1678 static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc) 1679 { 1680 struct address_space *mapping = page->mapping; 1681 pgoff_t pgoff; 1682 struct vm_area_struct *vma; 1683 int ret = SWAP_AGAIN; 1684 1685 /* 1686 * The page lock not only makes sure that page->mapping cannot 1687 * suddenly be NULLified by truncation, it makes sure that the 1688 * structure at mapping cannot be freed and reused yet, 1689 * so we can safely take mapping->i_mmap_rwsem. 1690 */ 1691 VM_BUG_ON_PAGE(!PageLocked(page), page); 1692 1693 if (!mapping) 1694 return ret; 1695 1696 pgoff = page_to_pgoff(page); 1697 i_mmap_lock_read(mapping); 1698 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { 1699 unsigned long address = vma_address(page, vma); 1700 1701 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) 1702 continue; 1703 1704 ret = rwc->rmap_one(page, vma, address, rwc->arg); 1705 if (ret != SWAP_AGAIN) 1706 goto done; 1707 if (rwc->done && rwc->done(page)) 1708 goto done; 1709 } 1710 1711 if (!rwc->file_nonlinear) 1712 goto done; 1713 1714 if (list_empty(&mapping->i_mmap_nonlinear)) 1715 goto done; 1716 1717 ret = rwc->file_nonlinear(page, mapping, rwc->arg); 1718 done: 1719 i_mmap_unlock_read(mapping); 1720 return ret; 1721 } 1722 1723 int rmap_walk(struct page *page, struct rmap_walk_control *rwc) 1724 { 1725 if (unlikely(PageKsm(page))) 1726 return rmap_walk_ksm(page, rwc); 1727 else if (PageAnon(page)) 1728 return rmap_walk_anon(page, rwc); 1729 else 1730 return rmap_walk_file(page, rwc); 1731 } 1732 1733 #ifdef CONFIG_HUGETLB_PAGE 1734 /* 1735 * The following three functions are for anonymous (private mapped) hugepages. 1736 * Unlike common anonymous pages, anonymous hugepages have no accounting code 1737 * and no lru code, because we handle hugepages differently from common pages. 1738 */ 1739 static void __hugepage_set_anon_rmap(struct page *page, 1740 struct vm_area_struct *vma, unsigned long address, int exclusive) 1741 { 1742 struct anon_vma *anon_vma = vma->anon_vma; 1743 1744 BUG_ON(!anon_vma); 1745 1746 if (PageAnon(page)) 1747 return; 1748 if (!exclusive) 1749 anon_vma = anon_vma->root; 1750 1751 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 1752 page->mapping = (struct address_space *) anon_vma; 1753 page->index = linear_page_index(vma, address); 1754 } 1755 1756 void hugepage_add_anon_rmap(struct page *page, 1757 struct vm_area_struct *vma, unsigned long address) 1758 { 1759 struct anon_vma *anon_vma = vma->anon_vma; 1760 int first; 1761 1762 BUG_ON(!PageLocked(page)); 1763 BUG_ON(!anon_vma); 1764 /* address might be in next vma when migration races vma_adjust */ 1765 first = atomic_inc_and_test(&page->_mapcount); 1766 if (first) 1767 __hugepage_set_anon_rmap(page, vma, address, 0); 1768 } 1769 1770 void hugepage_add_new_anon_rmap(struct page *page, 1771 struct vm_area_struct *vma, unsigned long address) 1772 { 1773 BUG_ON(address < vma->vm_start || address >= vma->vm_end); 1774 atomic_set(&page->_mapcount, 0); 1775 __hugepage_set_anon_rmap(page, vma, address, 1); 1776 } 1777 #endif /* CONFIG_HUGETLB_PAGE */ 1778