1 /* 2 * mm/rmap.c - physical to virtual reverse mappings 3 * 4 * Copyright 2001, Rik van Riel <riel@conectiva.com.br> 5 * Released under the General Public License (GPL). 6 * 7 * Simple, low overhead reverse mapping scheme. 8 * Please try to keep this thing as modular as possible. 9 * 10 * Provides methods for unmapping each kind of mapped page: 11 * the anon methods track anonymous pages, and 12 * the file methods track pages belonging to an inode. 13 * 14 * Original design by Rik van Riel <riel@conectiva.com.br> 2001 15 * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004 16 * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004 17 * Contributions by Hugh Dickins 2003, 2004 18 */ 19 20 /* 21 * Lock ordering in mm: 22 * 23 * inode->i_mutex (while writing or truncating, not reading or faulting) 24 * mm->mmap_sem 25 * page->flags PG_locked (lock_page) 26 * mapping->i_mmap_mutex 27 * anon_vma->rwsem 28 * mm->page_table_lock or pte_lock 29 * zone->lru_lock (in mark_page_accessed, isolate_lru_page) 30 * swap_lock (in swap_duplicate, swap_info_get) 31 * mmlist_lock (in mmput, drain_mmlist and others) 32 * mapping->private_lock (in __set_page_dirty_buffers) 33 * inode->i_lock (in set_page_dirty's __mark_inode_dirty) 34 * bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty) 35 * sb_lock (within inode_lock in fs/fs-writeback.c) 36 * mapping->tree_lock (widely used, in set_page_dirty, 37 * in arch-dependent flush_dcache_mmap_lock, 38 * within bdi.wb->list_lock in __sync_single_inode) 39 * 40 * anon_vma->rwsem,mapping->i_mutex (memory_failure, collect_procs_anon) 41 * ->tasklist_lock 42 * pte map lock 43 */ 44 45 #include <linux/mm.h> 46 #include <linux/pagemap.h> 47 #include <linux/swap.h> 48 #include <linux/swapops.h> 49 #include <linux/slab.h> 50 #include <linux/init.h> 51 #include <linux/ksm.h> 52 #include <linux/rmap.h> 53 #include <linux/rcupdate.h> 54 #include <linux/export.h> 55 #include <linux/memcontrol.h> 56 #include <linux/mmu_notifier.h> 57 #include <linux/migrate.h> 58 #include <linux/hugetlb.h> 59 #include <linux/backing-dev.h> 60 61 #include <asm/tlbflush.h> 62 63 #include "internal.h" 64 65 static struct kmem_cache *anon_vma_cachep; 66 static struct kmem_cache *anon_vma_chain_cachep; 67 68 static inline struct anon_vma *anon_vma_alloc(void) 69 { 70 struct anon_vma *anon_vma; 71 72 anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL); 73 if (anon_vma) { 74 atomic_set(&anon_vma->refcount, 1); 75 /* 76 * Initialise the anon_vma root to point to itself. If called 77 * from fork, the root will be reset to the parents anon_vma. 78 */ 79 anon_vma->root = anon_vma; 80 } 81 82 return anon_vma; 83 } 84 85 static inline void anon_vma_free(struct anon_vma *anon_vma) 86 { 87 VM_BUG_ON(atomic_read(&anon_vma->refcount)); 88 89 /* 90 * Synchronize against page_lock_anon_vma_read() such that 91 * we can safely hold the lock without the anon_vma getting 92 * freed. 93 * 94 * Relies on the full mb implied by the atomic_dec_and_test() from 95 * put_anon_vma() against the acquire barrier implied by 96 * down_read_trylock() from page_lock_anon_vma_read(). This orders: 97 * 98 * page_lock_anon_vma_read() VS put_anon_vma() 99 * down_read_trylock() atomic_dec_and_test() 100 * LOCK MB 101 * atomic_read() rwsem_is_locked() 102 * 103 * LOCK should suffice since the actual taking of the lock must 104 * happen _before_ what follows. 105 */ 106 might_sleep(); 107 if (rwsem_is_locked(&anon_vma->root->rwsem)) { 108 anon_vma_lock_write(anon_vma); 109 anon_vma_unlock_write(anon_vma); 110 } 111 112 kmem_cache_free(anon_vma_cachep, anon_vma); 113 } 114 115 static inline struct anon_vma_chain *anon_vma_chain_alloc(gfp_t gfp) 116 { 117 return kmem_cache_alloc(anon_vma_chain_cachep, gfp); 118 } 119 120 static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain) 121 { 122 kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain); 123 } 124 125 static void anon_vma_chain_link(struct vm_area_struct *vma, 126 struct anon_vma_chain *avc, 127 struct anon_vma *anon_vma) 128 { 129 avc->vma = vma; 130 avc->anon_vma = anon_vma; 131 list_add(&avc->same_vma, &vma->anon_vma_chain); 132 anon_vma_interval_tree_insert(avc, &anon_vma->rb_root); 133 } 134 135 /** 136 * anon_vma_prepare - attach an anon_vma to a memory region 137 * @vma: the memory region in question 138 * 139 * This makes sure the memory mapping described by 'vma' has 140 * an 'anon_vma' attached to it, so that we can associate the 141 * anonymous pages mapped into it with that anon_vma. 142 * 143 * The common case will be that we already have one, but if 144 * not we either need to find an adjacent mapping that we 145 * can re-use the anon_vma from (very common when the only 146 * reason for splitting a vma has been mprotect()), or we 147 * allocate a new one. 148 * 149 * Anon-vma allocations are very subtle, because we may have 150 * optimistically looked up an anon_vma in page_lock_anon_vma_read() 151 * and that may actually touch the spinlock even in the newly 152 * allocated vma (it depends on RCU to make sure that the 153 * anon_vma isn't actually destroyed). 154 * 155 * As a result, we need to do proper anon_vma locking even 156 * for the new allocation. At the same time, we do not want 157 * to do any locking for the common case of already having 158 * an anon_vma. 159 * 160 * This must be called with the mmap_sem held for reading. 161 */ 162 int anon_vma_prepare(struct vm_area_struct *vma) 163 { 164 struct anon_vma *anon_vma = vma->anon_vma; 165 struct anon_vma_chain *avc; 166 167 might_sleep(); 168 if (unlikely(!anon_vma)) { 169 struct mm_struct *mm = vma->vm_mm; 170 struct anon_vma *allocated; 171 172 avc = anon_vma_chain_alloc(GFP_KERNEL); 173 if (!avc) 174 goto out_enomem; 175 176 anon_vma = find_mergeable_anon_vma(vma); 177 allocated = NULL; 178 if (!anon_vma) { 179 anon_vma = anon_vma_alloc(); 180 if (unlikely(!anon_vma)) 181 goto out_enomem_free_avc; 182 allocated = anon_vma; 183 } 184 185 anon_vma_lock_write(anon_vma); 186 /* page_table_lock to protect against threads */ 187 spin_lock(&mm->page_table_lock); 188 if (likely(!vma->anon_vma)) { 189 vma->anon_vma = anon_vma; 190 anon_vma_chain_link(vma, avc, anon_vma); 191 allocated = NULL; 192 avc = NULL; 193 } 194 spin_unlock(&mm->page_table_lock); 195 anon_vma_unlock_write(anon_vma); 196 197 if (unlikely(allocated)) 198 put_anon_vma(allocated); 199 if (unlikely(avc)) 200 anon_vma_chain_free(avc); 201 } 202 return 0; 203 204 out_enomem_free_avc: 205 anon_vma_chain_free(avc); 206 out_enomem: 207 return -ENOMEM; 208 } 209 210 /* 211 * This is a useful helper function for locking the anon_vma root as 212 * we traverse the vma->anon_vma_chain, looping over anon_vma's that 213 * have the same vma. 214 * 215 * Such anon_vma's should have the same root, so you'd expect to see 216 * just a single mutex_lock for the whole traversal. 217 */ 218 static inline struct anon_vma *lock_anon_vma_root(struct anon_vma *root, struct anon_vma *anon_vma) 219 { 220 struct anon_vma *new_root = anon_vma->root; 221 if (new_root != root) { 222 if (WARN_ON_ONCE(root)) 223 up_write(&root->rwsem); 224 root = new_root; 225 down_write(&root->rwsem); 226 } 227 return root; 228 } 229 230 static inline void unlock_anon_vma_root(struct anon_vma *root) 231 { 232 if (root) 233 up_write(&root->rwsem); 234 } 235 236 /* 237 * Attach the anon_vmas from src to dst. 238 * Returns 0 on success, -ENOMEM on failure. 239 */ 240 int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src) 241 { 242 struct anon_vma_chain *avc, *pavc; 243 struct anon_vma *root = NULL; 244 245 list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) { 246 struct anon_vma *anon_vma; 247 248 avc = anon_vma_chain_alloc(GFP_NOWAIT | __GFP_NOWARN); 249 if (unlikely(!avc)) { 250 unlock_anon_vma_root(root); 251 root = NULL; 252 avc = anon_vma_chain_alloc(GFP_KERNEL); 253 if (!avc) 254 goto enomem_failure; 255 } 256 anon_vma = pavc->anon_vma; 257 root = lock_anon_vma_root(root, anon_vma); 258 anon_vma_chain_link(dst, avc, anon_vma); 259 } 260 unlock_anon_vma_root(root); 261 return 0; 262 263 enomem_failure: 264 unlink_anon_vmas(dst); 265 return -ENOMEM; 266 } 267 268 /* 269 * Attach vma to its own anon_vma, as well as to the anon_vmas that 270 * the corresponding VMA in the parent process is attached to. 271 * Returns 0 on success, non-zero on failure. 272 */ 273 int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) 274 { 275 struct anon_vma_chain *avc; 276 struct anon_vma *anon_vma; 277 278 /* Don't bother if the parent process has no anon_vma here. */ 279 if (!pvma->anon_vma) 280 return 0; 281 282 /* 283 * First, attach the new VMA to the parent VMA's anon_vmas, 284 * so rmap can find non-COWed pages in child processes. 285 */ 286 if (anon_vma_clone(vma, pvma)) 287 return -ENOMEM; 288 289 /* Then add our own anon_vma. */ 290 anon_vma = anon_vma_alloc(); 291 if (!anon_vma) 292 goto out_error; 293 avc = anon_vma_chain_alloc(GFP_KERNEL); 294 if (!avc) 295 goto out_error_free_anon_vma; 296 297 /* 298 * The root anon_vma's spinlock is the lock actually used when we 299 * lock any of the anon_vmas in this anon_vma tree. 300 */ 301 anon_vma->root = pvma->anon_vma->root; 302 /* 303 * With refcounts, an anon_vma can stay around longer than the 304 * process it belongs to. The root anon_vma needs to be pinned until 305 * this anon_vma is freed, because the lock lives in the root. 306 */ 307 get_anon_vma(anon_vma->root); 308 /* Mark this anon_vma as the one where our new (COWed) pages go. */ 309 vma->anon_vma = anon_vma; 310 anon_vma_lock_write(anon_vma); 311 anon_vma_chain_link(vma, avc, anon_vma); 312 anon_vma_unlock_write(anon_vma); 313 314 return 0; 315 316 out_error_free_anon_vma: 317 put_anon_vma(anon_vma); 318 out_error: 319 unlink_anon_vmas(vma); 320 return -ENOMEM; 321 } 322 323 void unlink_anon_vmas(struct vm_area_struct *vma) 324 { 325 struct anon_vma_chain *avc, *next; 326 struct anon_vma *root = NULL; 327 328 /* 329 * Unlink each anon_vma chained to the VMA. This list is ordered 330 * from newest to oldest, ensuring the root anon_vma gets freed last. 331 */ 332 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { 333 struct anon_vma *anon_vma = avc->anon_vma; 334 335 root = lock_anon_vma_root(root, anon_vma); 336 anon_vma_interval_tree_remove(avc, &anon_vma->rb_root); 337 338 /* 339 * Leave empty anon_vmas on the list - we'll need 340 * to free them outside the lock. 341 */ 342 if (RB_EMPTY_ROOT(&anon_vma->rb_root)) 343 continue; 344 345 list_del(&avc->same_vma); 346 anon_vma_chain_free(avc); 347 } 348 unlock_anon_vma_root(root); 349 350 /* 351 * Iterate the list once more, it now only contains empty and unlinked 352 * anon_vmas, destroy them. Could not do before due to __put_anon_vma() 353 * needing to write-acquire the anon_vma->root->rwsem. 354 */ 355 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { 356 struct anon_vma *anon_vma = avc->anon_vma; 357 358 put_anon_vma(anon_vma); 359 360 list_del(&avc->same_vma); 361 anon_vma_chain_free(avc); 362 } 363 } 364 365 static void anon_vma_ctor(void *data) 366 { 367 struct anon_vma *anon_vma = data; 368 369 init_rwsem(&anon_vma->rwsem); 370 atomic_set(&anon_vma->refcount, 0); 371 anon_vma->rb_root = RB_ROOT; 372 } 373 374 void __init anon_vma_init(void) 375 { 376 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma), 377 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor); 378 anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, SLAB_PANIC); 379 } 380 381 /* 382 * Getting a lock on a stable anon_vma from a page off the LRU is tricky! 383 * 384 * Since there is no serialization what so ever against page_remove_rmap() 385 * the best this function can do is return a locked anon_vma that might 386 * have been relevant to this page. 387 * 388 * The page might have been remapped to a different anon_vma or the anon_vma 389 * returned may already be freed (and even reused). 390 * 391 * In case it was remapped to a different anon_vma, the new anon_vma will be a 392 * child of the old anon_vma, and the anon_vma lifetime rules will therefore 393 * ensure that any anon_vma obtained from the page will still be valid for as 394 * long as we observe page_mapped() [ hence all those page_mapped() tests ]. 395 * 396 * All users of this function must be very careful when walking the anon_vma 397 * chain and verify that the page in question is indeed mapped in it 398 * [ something equivalent to page_mapped_in_vma() ]. 399 * 400 * Since anon_vma's slab is DESTROY_BY_RCU and we know from page_remove_rmap() 401 * that the anon_vma pointer from page->mapping is valid if there is a 402 * mapcount, we can dereference the anon_vma after observing those. 403 */ 404 struct anon_vma *page_get_anon_vma(struct page *page) 405 { 406 struct anon_vma *anon_vma = NULL; 407 unsigned long anon_mapping; 408 409 rcu_read_lock(); 410 anon_mapping = (unsigned long) ACCESS_ONCE(page->mapping); 411 if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) 412 goto out; 413 if (!page_mapped(page)) 414 goto out; 415 416 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); 417 if (!atomic_inc_not_zero(&anon_vma->refcount)) { 418 anon_vma = NULL; 419 goto out; 420 } 421 422 /* 423 * If this page is still mapped, then its anon_vma cannot have been 424 * freed. But if it has been unmapped, we have no security against the 425 * anon_vma structure being freed and reused (for another anon_vma: 426 * SLAB_DESTROY_BY_RCU guarantees that - so the atomic_inc_not_zero() 427 * above cannot corrupt). 428 */ 429 if (!page_mapped(page)) { 430 rcu_read_unlock(); 431 put_anon_vma(anon_vma); 432 return NULL; 433 } 434 out: 435 rcu_read_unlock(); 436 437 return anon_vma; 438 } 439 440 /* 441 * Similar to page_get_anon_vma() except it locks the anon_vma. 442 * 443 * Its a little more complex as it tries to keep the fast path to a single 444 * atomic op -- the trylock. If we fail the trylock, we fall back to getting a 445 * reference like with page_get_anon_vma() and then block on the mutex. 446 */ 447 struct anon_vma *page_lock_anon_vma_read(struct page *page) 448 { 449 struct anon_vma *anon_vma = NULL; 450 struct anon_vma *root_anon_vma; 451 unsigned long anon_mapping; 452 453 rcu_read_lock(); 454 anon_mapping = (unsigned long) ACCESS_ONCE(page->mapping); 455 if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) 456 goto out; 457 if (!page_mapped(page)) 458 goto out; 459 460 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); 461 root_anon_vma = ACCESS_ONCE(anon_vma->root); 462 if (down_read_trylock(&root_anon_vma->rwsem)) { 463 /* 464 * If the page is still mapped, then this anon_vma is still 465 * its anon_vma, and holding the mutex ensures that it will 466 * not go away, see anon_vma_free(). 467 */ 468 if (!page_mapped(page)) { 469 up_read(&root_anon_vma->rwsem); 470 anon_vma = NULL; 471 } 472 goto out; 473 } 474 475 /* trylock failed, we got to sleep */ 476 if (!atomic_inc_not_zero(&anon_vma->refcount)) { 477 anon_vma = NULL; 478 goto out; 479 } 480 481 if (!page_mapped(page)) { 482 rcu_read_unlock(); 483 put_anon_vma(anon_vma); 484 return NULL; 485 } 486 487 /* we pinned the anon_vma, its safe to sleep */ 488 rcu_read_unlock(); 489 anon_vma_lock_read(anon_vma); 490 491 if (atomic_dec_and_test(&anon_vma->refcount)) { 492 /* 493 * Oops, we held the last refcount, release the lock 494 * and bail -- can't simply use put_anon_vma() because 495 * we'll deadlock on the anon_vma_lock_write() recursion. 496 */ 497 anon_vma_unlock_read(anon_vma); 498 __put_anon_vma(anon_vma); 499 anon_vma = NULL; 500 } 501 502 return anon_vma; 503 504 out: 505 rcu_read_unlock(); 506 return anon_vma; 507 } 508 509 void page_unlock_anon_vma_read(struct anon_vma *anon_vma) 510 { 511 anon_vma_unlock_read(anon_vma); 512 } 513 514 /* 515 * At what user virtual address is page expected in @vma? 516 */ 517 static inline unsigned long 518 __vma_address(struct page *page, struct vm_area_struct *vma) 519 { 520 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 521 522 if (unlikely(is_vm_hugetlb_page(vma))) 523 pgoff = page->index << huge_page_order(page_hstate(page)); 524 525 return vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); 526 } 527 528 inline unsigned long 529 vma_address(struct page *page, struct vm_area_struct *vma) 530 { 531 unsigned long address = __vma_address(page, vma); 532 533 /* page should be within @vma mapping range */ 534 VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end); 535 536 return address; 537 } 538 539 /* 540 * At what user virtual address is page expected in vma? 541 * Caller should check the page is actually part of the vma. 542 */ 543 unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) 544 { 545 unsigned long address; 546 if (PageAnon(page)) { 547 struct anon_vma *page__anon_vma = page_anon_vma(page); 548 /* 549 * Note: swapoff's unuse_vma() is more efficient with this 550 * check, and needs it to match anon_vma when KSM is active. 551 */ 552 if (!vma->anon_vma || !page__anon_vma || 553 vma->anon_vma->root != page__anon_vma->root) 554 return -EFAULT; 555 } else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) { 556 if (!vma->vm_file || 557 vma->vm_file->f_mapping != page->mapping) 558 return -EFAULT; 559 } else 560 return -EFAULT; 561 address = __vma_address(page, vma); 562 if (unlikely(address < vma->vm_start || address >= vma->vm_end)) 563 return -EFAULT; 564 return address; 565 } 566 567 pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address) 568 { 569 pgd_t *pgd; 570 pud_t *pud; 571 pmd_t *pmd = NULL; 572 pmd_t pmde; 573 574 pgd = pgd_offset(mm, address); 575 if (!pgd_present(*pgd)) 576 goto out; 577 578 pud = pud_offset(pgd, address); 579 if (!pud_present(*pud)) 580 goto out; 581 582 pmd = pmd_offset(pud, address); 583 /* 584 * Some THP functions use the sequence pmdp_clear_flush(), set_pmd_at() 585 * without holding anon_vma lock for write. So when looking for a 586 * genuine pmde (in which to find pte), test present and !THP together. 587 */ 588 pmde = ACCESS_ONCE(*pmd); 589 if (!pmd_present(pmde) || pmd_trans_huge(pmde)) 590 pmd = NULL; 591 out: 592 return pmd; 593 } 594 595 /* 596 * Check that @page is mapped at @address into @mm. 597 * 598 * If @sync is false, page_check_address may perform a racy check to avoid 599 * the page table lock when the pte is not present (helpful when reclaiming 600 * highly shared pages). 601 * 602 * On success returns with pte mapped and locked. 603 */ 604 pte_t *__page_check_address(struct page *page, struct mm_struct *mm, 605 unsigned long address, spinlock_t **ptlp, int sync) 606 { 607 pmd_t *pmd; 608 pte_t *pte; 609 spinlock_t *ptl; 610 611 if (unlikely(PageHuge(page))) { 612 /* when pud is not present, pte will be NULL */ 613 pte = huge_pte_offset(mm, address); 614 if (!pte) 615 return NULL; 616 617 ptl = huge_pte_lockptr(page_hstate(page), mm, pte); 618 goto check; 619 } 620 621 pmd = mm_find_pmd(mm, address); 622 if (!pmd) 623 return NULL; 624 625 pte = pte_offset_map(pmd, address); 626 /* Make a quick check before getting the lock */ 627 if (!sync && !pte_present(*pte)) { 628 pte_unmap(pte); 629 return NULL; 630 } 631 632 ptl = pte_lockptr(mm, pmd); 633 check: 634 spin_lock(ptl); 635 if (pte_present(*pte) && page_to_pfn(page) == pte_pfn(*pte)) { 636 *ptlp = ptl; 637 return pte; 638 } 639 pte_unmap_unlock(pte, ptl); 640 return NULL; 641 } 642 643 /** 644 * page_mapped_in_vma - check whether a page is really mapped in a VMA 645 * @page: the page to test 646 * @vma: the VMA to test 647 * 648 * Returns 1 if the page is mapped into the page tables of the VMA, 0 649 * if the page is not mapped into the page tables of this VMA. Only 650 * valid for normal file or anonymous VMAs. 651 */ 652 int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma) 653 { 654 unsigned long address; 655 pte_t *pte; 656 spinlock_t *ptl; 657 658 address = __vma_address(page, vma); 659 if (unlikely(address < vma->vm_start || address >= vma->vm_end)) 660 return 0; 661 pte = page_check_address(page, vma->vm_mm, address, &ptl, 1); 662 if (!pte) /* the page is not in this mm */ 663 return 0; 664 pte_unmap_unlock(pte, ptl); 665 666 return 1; 667 } 668 669 struct page_referenced_arg { 670 int mapcount; 671 int referenced; 672 unsigned long vm_flags; 673 struct mem_cgroup *memcg; 674 }; 675 /* 676 * arg: page_referenced_arg will be passed 677 */ 678 static int page_referenced_one(struct page *page, struct vm_area_struct *vma, 679 unsigned long address, void *arg) 680 { 681 struct mm_struct *mm = vma->vm_mm; 682 spinlock_t *ptl; 683 int referenced = 0; 684 struct page_referenced_arg *pra = arg; 685 686 if (unlikely(PageTransHuge(page))) { 687 pmd_t *pmd; 688 689 /* 690 * rmap might return false positives; we must filter 691 * these out using page_check_address_pmd(). 692 */ 693 pmd = page_check_address_pmd(page, mm, address, 694 PAGE_CHECK_ADDRESS_PMD_FLAG, &ptl); 695 if (!pmd) 696 return SWAP_AGAIN; 697 698 if (vma->vm_flags & VM_LOCKED) { 699 spin_unlock(ptl); 700 pra->vm_flags |= VM_LOCKED; 701 return SWAP_FAIL; /* To break the loop */ 702 } 703 704 /* go ahead even if the pmd is pmd_trans_splitting() */ 705 if (pmdp_clear_flush_young_notify(vma, address, pmd)) 706 referenced++; 707 spin_unlock(ptl); 708 } else { 709 pte_t *pte; 710 711 /* 712 * rmap might return false positives; we must filter 713 * these out using page_check_address(). 714 */ 715 pte = page_check_address(page, mm, address, &ptl, 0); 716 if (!pte) 717 return SWAP_AGAIN; 718 719 if (vma->vm_flags & VM_LOCKED) { 720 pte_unmap_unlock(pte, ptl); 721 pra->vm_flags |= VM_LOCKED; 722 return SWAP_FAIL; /* To break the loop */ 723 } 724 725 if (ptep_clear_flush_young_notify(vma, address, pte)) { 726 /* 727 * Don't treat a reference through a sequentially read 728 * mapping as such. If the page has been used in 729 * another mapping, we will catch it; if this other 730 * mapping is already gone, the unmap path will have 731 * set PG_referenced or activated the page. 732 */ 733 if (likely(!(vma->vm_flags & VM_SEQ_READ))) 734 referenced++; 735 } 736 pte_unmap_unlock(pte, ptl); 737 } 738 739 if (referenced) { 740 pra->referenced++; 741 pra->vm_flags |= vma->vm_flags; 742 } 743 744 pra->mapcount--; 745 if (!pra->mapcount) 746 return SWAP_SUCCESS; /* To break the loop */ 747 748 return SWAP_AGAIN; 749 } 750 751 static bool invalid_page_referenced_vma(struct vm_area_struct *vma, void *arg) 752 { 753 struct page_referenced_arg *pra = arg; 754 struct mem_cgroup *memcg = pra->memcg; 755 756 if (!mm_match_cgroup(vma->vm_mm, memcg)) 757 return true; 758 759 return false; 760 } 761 762 /** 763 * page_referenced - test if the page was referenced 764 * @page: the page to test 765 * @is_locked: caller holds lock on the page 766 * @memcg: target memory cgroup 767 * @vm_flags: collect encountered vma->vm_flags who actually referenced the page 768 * 769 * Quick test_and_clear_referenced for all mappings to a page, 770 * returns the number of ptes which referenced the page. 771 */ 772 int page_referenced(struct page *page, 773 int is_locked, 774 struct mem_cgroup *memcg, 775 unsigned long *vm_flags) 776 { 777 int ret; 778 int we_locked = 0; 779 struct page_referenced_arg pra = { 780 .mapcount = page_mapcount(page), 781 .memcg = memcg, 782 }; 783 struct rmap_walk_control rwc = { 784 .rmap_one = page_referenced_one, 785 .arg = (void *)&pra, 786 .anon_lock = page_lock_anon_vma_read, 787 }; 788 789 *vm_flags = 0; 790 if (!page_mapped(page)) 791 return 0; 792 793 if (!page_rmapping(page)) 794 return 0; 795 796 if (!is_locked && (!PageAnon(page) || PageKsm(page))) { 797 we_locked = trylock_page(page); 798 if (!we_locked) 799 return 1; 800 } 801 802 /* 803 * If we are reclaiming on behalf of a cgroup, skip 804 * counting on behalf of references from different 805 * cgroups 806 */ 807 if (memcg) { 808 rwc.invalid_vma = invalid_page_referenced_vma; 809 } 810 811 ret = rmap_walk(page, &rwc); 812 *vm_flags = pra.vm_flags; 813 814 if (we_locked) 815 unlock_page(page); 816 817 return pra.referenced; 818 } 819 820 static int page_mkclean_one(struct page *page, struct vm_area_struct *vma, 821 unsigned long address, void *arg) 822 { 823 struct mm_struct *mm = vma->vm_mm; 824 pte_t *pte; 825 spinlock_t *ptl; 826 int ret = 0; 827 int *cleaned = arg; 828 829 pte = page_check_address(page, mm, address, &ptl, 1); 830 if (!pte) 831 goto out; 832 833 if (pte_dirty(*pte) || pte_write(*pte)) { 834 pte_t entry; 835 836 flush_cache_page(vma, address, pte_pfn(*pte)); 837 entry = ptep_clear_flush(vma, address, pte); 838 entry = pte_wrprotect(entry); 839 entry = pte_mkclean(entry); 840 set_pte_at(mm, address, pte, entry); 841 ret = 1; 842 } 843 844 pte_unmap_unlock(pte, ptl); 845 846 if (ret) { 847 mmu_notifier_invalidate_page(mm, address); 848 (*cleaned)++; 849 } 850 out: 851 return SWAP_AGAIN; 852 } 853 854 static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg) 855 { 856 if (vma->vm_flags & VM_SHARED) 857 return false; 858 859 return true; 860 } 861 862 int page_mkclean(struct page *page) 863 { 864 int cleaned = 0; 865 struct address_space *mapping; 866 struct rmap_walk_control rwc = { 867 .arg = (void *)&cleaned, 868 .rmap_one = page_mkclean_one, 869 .invalid_vma = invalid_mkclean_vma, 870 }; 871 872 BUG_ON(!PageLocked(page)); 873 874 if (!page_mapped(page)) 875 return 0; 876 877 mapping = page_mapping(page); 878 if (!mapping) 879 return 0; 880 881 rmap_walk(page, &rwc); 882 883 return cleaned; 884 } 885 EXPORT_SYMBOL_GPL(page_mkclean); 886 887 /** 888 * page_move_anon_rmap - move a page to our anon_vma 889 * @page: the page to move to our anon_vma 890 * @vma: the vma the page belongs to 891 * @address: the user virtual address mapped 892 * 893 * When a page belongs exclusively to one process after a COW event, 894 * that page can be moved into the anon_vma that belongs to just that 895 * process, so the rmap code will not search the parent or sibling 896 * processes. 897 */ 898 void page_move_anon_rmap(struct page *page, 899 struct vm_area_struct *vma, unsigned long address) 900 { 901 struct anon_vma *anon_vma = vma->anon_vma; 902 903 VM_BUG_ON_PAGE(!PageLocked(page), page); 904 VM_BUG_ON(!anon_vma); 905 VM_BUG_ON_PAGE(page->index != linear_page_index(vma, address), page); 906 907 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 908 page->mapping = (struct address_space *) anon_vma; 909 } 910 911 /** 912 * __page_set_anon_rmap - set up new anonymous rmap 913 * @page: Page to add to rmap 914 * @vma: VM area to add page to. 915 * @address: User virtual address of the mapping 916 * @exclusive: the page is exclusively owned by the current process 917 */ 918 static void __page_set_anon_rmap(struct page *page, 919 struct vm_area_struct *vma, unsigned long address, int exclusive) 920 { 921 struct anon_vma *anon_vma = vma->anon_vma; 922 923 BUG_ON(!anon_vma); 924 925 if (PageAnon(page)) 926 return; 927 928 /* 929 * If the page isn't exclusively mapped into this vma, 930 * we must use the _oldest_ possible anon_vma for the 931 * page mapping! 932 */ 933 if (!exclusive) 934 anon_vma = anon_vma->root; 935 936 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 937 page->mapping = (struct address_space *) anon_vma; 938 page->index = linear_page_index(vma, address); 939 } 940 941 /** 942 * __page_check_anon_rmap - sanity check anonymous rmap addition 943 * @page: the page to add the mapping to 944 * @vma: the vm area in which the mapping is added 945 * @address: the user virtual address mapped 946 */ 947 static void __page_check_anon_rmap(struct page *page, 948 struct vm_area_struct *vma, unsigned long address) 949 { 950 #ifdef CONFIG_DEBUG_VM 951 /* 952 * The page's anon-rmap details (mapping and index) are guaranteed to 953 * be set up correctly at this point. 954 * 955 * We have exclusion against page_add_anon_rmap because the caller 956 * always holds the page locked, except if called from page_dup_rmap, 957 * in which case the page is already known to be setup. 958 * 959 * We have exclusion against page_add_new_anon_rmap because those pages 960 * are initially only visible via the pagetables, and the pte is locked 961 * over the call to page_add_new_anon_rmap. 962 */ 963 BUG_ON(page_anon_vma(page)->root != vma->anon_vma->root); 964 BUG_ON(page->index != linear_page_index(vma, address)); 965 #endif 966 } 967 968 /** 969 * page_add_anon_rmap - add pte mapping to an anonymous page 970 * @page: the page to add the mapping to 971 * @vma: the vm area in which the mapping is added 972 * @address: the user virtual address mapped 973 * 974 * The caller needs to hold the pte lock, and the page must be locked in 975 * the anon_vma case: to serialize mapping,index checking after setting, 976 * and to ensure that PageAnon is not being upgraded racily to PageKsm 977 * (but PageKsm is never downgraded to PageAnon). 978 */ 979 void page_add_anon_rmap(struct page *page, 980 struct vm_area_struct *vma, unsigned long address) 981 { 982 do_page_add_anon_rmap(page, vma, address, 0); 983 } 984 985 /* 986 * Special version of the above for do_swap_page, which often runs 987 * into pages that are exclusively owned by the current process. 988 * Everybody else should continue to use page_add_anon_rmap above. 989 */ 990 void do_page_add_anon_rmap(struct page *page, 991 struct vm_area_struct *vma, unsigned long address, int exclusive) 992 { 993 int first = atomic_inc_and_test(&page->_mapcount); 994 if (first) { 995 /* 996 * We use the irq-unsafe __{inc|mod}_zone_page_stat because 997 * these counters are not modified in interrupt context, and 998 * pte lock(a spinlock) is held, which implies preemption 999 * disabled. 1000 */ 1001 if (PageTransHuge(page)) 1002 __inc_zone_page_state(page, 1003 NR_ANON_TRANSPARENT_HUGEPAGES); 1004 __mod_zone_page_state(page_zone(page), NR_ANON_PAGES, 1005 hpage_nr_pages(page)); 1006 } 1007 if (unlikely(PageKsm(page))) 1008 return; 1009 1010 VM_BUG_ON_PAGE(!PageLocked(page), page); 1011 /* address might be in next vma when migration races vma_adjust */ 1012 if (first) 1013 __page_set_anon_rmap(page, vma, address, exclusive); 1014 else 1015 __page_check_anon_rmap(page, vma, address); 1016 } 1017 1018 /** 1019 * page_add_new_anon_rmap - add pte mapping to a new anonymous page 1020 * @page: the page to add the mapping to 1021 * @vma: the vm area in which the mapping is added 1022 * @address: the user virtual address mapped 1023 * 1024 * Same as page_add_anon_rmap but must only be called on *new* pages. 1025 * This means the inc-and-test can be bypassed. 1026 * Page does not have to be locked. 1027 */ 1028 void page_add_new_anon_rmap(struct page *page, 1029 struct vm_area_struct *vma, unsigned long address) 1030 { 1031 VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end); 1032 SetPageSwapBacked(page); 1033 atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */ 1034 if (PageTransHuge(page)) 1035 __inc_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES); 1036 __mod_zone_page_state(page_zone(page), NR_ANON_PAGES, 1037 hpage_nr_pages(page)); 1038 __page_set_anon_rmap(page, vma, address, 1); 1039 1040 VM_BUG_ON_PAGE(PageLRU(page), page); 1041 if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED)) { 1042 SetPageActive(page); 1043 lru_cache_add(page); 1044 return; 1045 } 1046 1047 if (!TestSetPageMlocked(page)) { 1048 /* 1049 * We use the irq-unsafe __mod_zone_page_stat because this 1050 * counter is not modified from interrupt context, and the pte 1051 * lock is held(spinlock), which implies preemption disabled. 1052 */ 1053 __mod_zone_page_state(page_zone(page), NR_MLOCK, 1054 hpage_nr_pages(page)); 1055 count_vm_event(UNEVICTABLE_PGMLOCKED); 1056 } 1057 add_page_to_unevictable_list(page); 1058 } 1059 1060 /** 1061 * page_add_file_rmap - add pte mapping to a file page 1062 * @page: the page to add the mapping to 1063 * 1064 * The caller needs to hold the pte lock. 1065 */ 1066 void page_add_file_rmap(struct page *page) 1067 { 1068 bool locked; 1069 unsigned long flags; 1070 1071 mem_cgroup_begin_update_page_stat(page, &locked, &flags); 1072 if (atomic_inc_and_test(&page->_mapcount)) { 1073 __inc_zone_page_state(page, NR_FILE_MAPPED); 1074 mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED); 1075 } 1076 mem_cgroup_end_update_page_stat(page, &locked, &flags); 1077 } 1078 1079 /** 1080 * page_remove_rmap - take down pte mapping from a page 1081 * @page: page to remove mapping from 1082 * 1083 * The caller needs to hold the pte lock. 1084 */ 1085 void page_remove_rmap(struct page *page) 1086 { 1087 bool anon = PageAnon(page); 1088 bool locked; 1089 unsigned long flags; 1090 1091 /* 1092 * The anon case has no mem_cgroup page_stat to update; but may 1093 * uncharge_page() below, where the lock ordering can deadlock if 1094 * we hold the lock against page_stat move: so avoid it on anon. 1095 */ 1096 if (!anon) 1097 mem_cgroup_begin_update_page_stat(page, &locked, &flags); 1098 1099 /* page still mapped by someone else? */ 1100 if (!atomic_add_negative(-1, &page->_mapcount)) 1101 goto out; 1102 1103 /* 1104 * Hugepages are not counted in NR_ANON_PAGES nor NR_FILE_MAPPED 1105 * and not charged by memcg for now. 1106 * 1107 * We use the irq-unsafe __{inc|mod}_zone_page_stat because 1108 * these counters are not modified in interrupt context, and 1109 * these counters are not modified in interrupt context, and 1110 * pte lock(a spinlock) is held, which implies preemption disabled. 1111 */ 1112 if (unlikely(PageHuge(page))) 1113 goto out; 1114 if (anon) { 1115 mem_cgroup_uncharge_page(page); 1116 if (PageTransHuge(page)) 1117 __dec_zone_page_state(page, 1118 NR_ANON_TRANSPARENT_HUGEPAGES); 1119 __mod_zone_page_state(page_zone(page), NR_ANON_PAGES, 1120 -hpage_nr_pages(page)); 1121 } else { 1122 __dec_zone_page_state(page, NR_FILE_MAPPED); 1123 mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED); 1124 mem_cgroup_end_update_page_stat(page, &locked, &flags); 1125 } 1126 if (unlikely(PageMlocked(page))) 1127 clear_page_mlock(page); 1128 /* 1129 * It would be tidy to reset the PageAnon mapping here, 1130 * but that might overwrite a racing page_add_anon_rmap 1131 * which increments mapcount after us but sets mapping 1132 * before us: so leave the reset to free_hot_cold_page, 1133 * and remember that it's only reliable while mapped. 1134 * Leaving it set also helps swapoff to reinstate ptes 1135 * faster for those pages still in swapcache. 1136 */ 1137 return; 1138 out: 1139 if (!anon) 1140 mem_cgroup_end_update_page_stat(page, &locked, &flags); 1141 } 1142 1143 /* 1144 * @arg: enum ttu_flags will be passed to this argument 1145 */ 1146 static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, 1147 unsigned long address, void *arg) 1148 { 1149 struct mm_struct *mm = vma->vm_mm; 1150 pte_t *pte; 1151 pte_t pteval; 1152 spinlock_t *ptl; 1153 int ret = SWAP_AGAIN; 1154 enum ttu_flags flags = (enum ttu_flags)arg; 1155 1156 pte = page_check_address(page, mm, address, &ptl, 0); 1157 if (!pte) 1158 goto out; 1159 1160 /* 1161 * If the page is mlock()d, we cannot swap it out. 1162 * If it's recently referenced (perhaps page_referenced 1163 * skipped over this mm) then we should reactivate it. 1164 */ 1165 if (!(flags & TTU_IGNORE_MLOCK)) { 1166 if (vma->vm_flags & VM_LOCKED) 1167 goto out_mlock; 1168 1169 if (flags & TTU_MUNLOCK) 1170 goto out_unmap; 1171 } 1172 if (!(flags & TTU_IGNORE_ACCESS)) { 1173 if (ptep_clear_flush_young_notify(vma, address, pte)) { 1174 ret = SWAP_FAIL; 1175 goto out_unmap; 1176 } 1177 } 1178 1179 /* Nuke the page table entry. */ 1180 flush_cache_page(vma, address, page_to_pfn(page)); 1181 pteval = ptep_clear_flush(vma, address, pte); 1182 1183 /* Move the dirty bit to the physical page now the pte is gone. */ 1184 if (pte_dirty(pteval)) 1185 set_page_dirty(page); 1186 1187 /* Update high watermark before we lower rss */ 1188 update_hiwater_rss(mm); 1189 1190 if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) { 1191 if (!PageHuge(page)) { 1192 if (PageAnon(page)) 1193 dec_mm_counter(mm, MM_ANONPAGES); 1194 else 1195 dec_mm_counter(mm, MM_FILEPAGES); 1196 } 1197 set_pte_at(mm, address, pte, 1198 swp_entry_to_pte(make_hwpoison_entry(page))); 1199 } else if (pte_unused(pteval)) { 1200 /* 1201 * The guest indicated that the page content is of no 1202 * interest anymore. Simply discard the pte, vmscan 1203 * will take care of the rest. 1204 */ 1205 if (PageAnon(page)) 1206 dec_mm_counter(mm, MM_ANONPAGES); 1207 else 1208 dec_mm_counter(mm, MM_FILEPAGES); 1209 } else if (PageAnon(page)) { 1210 swp_entry_t entry = { .val = page_private(page) }; 1211 pte_t swp_pte; 1212 1213 if (PageSwapCache(page)) { 1214 /* 1215 * Store the swap location in the pte. 1216 * See handle_pte_fault() ... 1217 */ 1218 if (swap_duplicate(entry) < 0) { 1219 set_pte_at(mm, address, pte, pteval); 1220 ret = SWAP_FAIL; 1221 goto out_unmap; 1222 } 1223 if (list_empty(&mm->mmlist)) { 1224 spin_lock(&mmlist_lock); 1225 if (list_empty(&mm->mmlist)) 1226 list_add(&mm->mmlist, &init_mm.mmlist); 1227 spin_unlock(&mmlist_lock); 1228 } 1229 dec_mm_counter(mm, MM_ANONPAGES); 1230 inc_mm_counter(mm, MM_SWAPENTS); 1231 } else if (IS_ENABLED(CONFIG_MIGRATION)) { 1232 /* 1233 * Store the pfn of the page in a special migration 1234 * pte. do_swap_page() will wait until the migration 1235 * pte is removed and then restart fault handling. 1236 */ 1237 BUG_ON(!(flags & TTU_MIGRATION)); 1238 entry = make_migration_entry(page, pte_write(pteval)); 1239 } 1240 swp_pte = swp_entry_to_pte(entry); 1241 if (pte_soft_dirty(pteval)) 1242 swp_pte = pte_swp_mksoft_dirty(swp_pte); 1243 set_pte_at(mm, address, pte, swp_pte); 1244 BUG_ON(pte_file(*pte)); 1245 } else if (IS_ENABLED(CONFIG_MIGRATION) && 1246 (flags & TTU_MIGRATION)) { 1247 /* Establish migration entry for a file page */ 1248 swp_entry_t entry; 1249 entry = make_migration_entry(page, pte_write(pteval)); 1250 set_pte_at(mm, address, pte, swp_entry_to_pte(entry)); 1251 } else 1252 dec_mm_counter(mm, MM_FILEPAGES); 1253 1254 page_remove_rmap(page); 1255 page_cache_release(page); 1256 1257 out_unmap: 1258 pte_unmap_unlock(pte, ptl); 1259 if (ret != SWAP_FAIL && !(flags & TTU_MUNLOCK)) 1260 mmu_notifier_invalidate_page(mm, address); 1261 out: 1262 return ret; 1263 1264 out_mlock: 1265 pte_unmap_unlock(pte, ptl); 1266 1267 1268 /* 1269 * We need mmap_sem locking, Otherwise VM_LOCKED check makes 1270 * unstable result and race. Plus, We can't wait here because 1271 * we now hold anon_vma->rwsem or mapping->i_mmap_mutex. 1272 * if trylock failed, the page remain in evictable lru and later 1273 * vmscan could retry to move the page to unevictable lru if the 1274 * page is actually mlocked. 1275 */ 1276 if (down_read_trylock(&vma->vm_mm->mmap_sem)) { 1277 if (vma->vm_flags & VM_LOCKED) { 1278 mlock_vma_page(page); 1279 ret = SWAP_MLOCK; 1280 } 1281 up_read(&vma->vm_mm->mmap_sem); 1282 } 1283 return ret; 1284 } 1285 1286 /* 1287 * objrmap doesn't work for nonlinear VMAs because the assumption that 1288 * offset-into-file correlates with offset-into-virtual-addresses does not hold. 1289 * Consequently, given a particular page and its ->index, we cannot locate the 1290 * ptes which are mapping that page without an exhaustive linear search. 1291 * 1292 * So what this code does is a mini "virtual scan" of each nonlinear VMA which 1293 * maps the file to which the target page belongs. The ->vm_private_data field 1294 * holds the current cursor into that scan. Successive searches will circulate 1295 * around the vma's virtual address space. 1296 * 1297 * So as more replacement pressure is applied to the pages in a nonlinear VMA, 1298 * more scanning pressure is placed against them as well. Eventually pages 1299 * will become fully unmapped and are eligible for eviction. 1300 * 1301 * For very sparsely populated VMAs this is a little inefficient - chances are 1302 * there there won't be many ptes located within the scan cluster. In this case 1303 * maybe we could scan further - to the end of the pte page, perhaps. 1304 * 1305 * Mlocked pages: check VM_LOCKED under mmap_sem held for read, if we can 1306 * acquire it without blocking. If vma locked, mlock the pages in the cluster, 1307 * rather than unmapping them. If we encounter the "check_page" that vmscan is 1308 * trying to unmap, return SWAP_MLOCK, else default SWAP_AGAIN. 1309 */ 1310 #define CLUSTER_SIZE min(32*PAGE_SIZE, PMD_SIZE) 1311 #define CLUSTER_MASK (~(CLUSTER_SIZE - 1)) 1312 1313 static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount, 1314 struct vm_area_struct *vma, struct page *check_page) 1315 { 1316 struct mm_struct *mm = vma->vm_mm; 1317 pmd_t *pmd; 1318 pte_t *pte; 1319 pte_t pteval; 1320 spinlock_t *ptl; 1321 struct page *page; 1322 unsigned long address; 1323 unsigned long mmun_start; /* For mmu_notifiers */ 1324 unsigned long mmun_end; /* For mmu_notifiers */ 1325 unsigned long end; 1326 int ret = SWAP_AGAIN; 1327 int locked_vma = 0; 1328 1329 address = (vma->vm_start + cursor) & CLUSTER_MASK; 1330 end = address + CLUSTER_SIZE; 1331 if (address < vma->vm_start) 1332 address = vma->vm_start; 1333 if (end > vma->vm_end) 1334 end = vma->vm_end; 1335 1336 pmd = mm_find_pmd(mm, address); 1337 if (!pmd) 1338 return ret; 1339 1340 mmun_start = address; 1341 mmun_end = end; 1342 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); 1343 1344 /* 1345 * If we can acquire the mmap_sem for read, and vma is VM_LOCKED, 1346 * keep the sem while scanning the cluster for mlocking pages. 1347 */ 1348 if (down_read_trylock(&vma->vm_mm->mmap_sem)) { 1349 locked_vma = (vma->vm_flags & VM_LOCKED); 1350 if (!locked_vma) 1351 up_read(&vma->vm_mm->mmap_sem); /* don't need it */ 1352 } 1353 1354 pte = pte_offset_map_lock(mm, pmd, address, &ptl); 1355 1356 /* Update high watermark before we lower rss */ 1357 update_hiwater_rss(mm); 1358 1359 for (; address < end; pte++, address += PAGE_SIZE) { 1360 if (!pte_present(*pte)) 1361 continue; 1362 page = vm_normal_page(vma, address, *pte); 1363 BUG_ON(!page || PageAnon(page)); 1364 1365 if (locked_vma) { 1366 if (page == check_page) { 1367 /* we know we have check_page locked */ 1368 mlock_vma_page(page); 1369 ret = SWAP_MLOCK; 1370 } else if (trylock_page(page)) { 1371 /* 1372 * If we can lock the page, perform mlock. 1373 * Otherwise leave the page alone, it will be 1374 * eventually encountered again later. 1375 */ 1376 mlock_vma_page(page); 1377 unlock_page(page); 1378 } 1379 continue; /* don't unmap */ 1380 } 1381 1382 if (ptep_clear_flush_young_notify(vma, address, pte)) 1383 continue; 1384 1385 /* Nuke the page table entry. */ 1386 flush_cache_page(vma, address, pte_pfn(*pte)); 1387 pteval = ptep_clear_flush(vma, address, pte); 1388 1389 /* If nonlinear, store the file page offset in the pte. */ 1390 if (page->index != linear_page_index(vma, address)) { 1391 pte_t ptfile = pgoff_to_pte(page->index); 1392 if (pte_soft_dirty(pteval)) 1393 ptfile = pte_file_mksoft_dirty(ptfile); 1394 set_pte_at(mm, address, pte, ptfile); 1395 } 1396 1397 /* Move the dirty bit to the physical page now the pte is gone. */ 1398 if (pte_dirty(pteval)) 1399 set_page_dirty(page); 1400 1401 page_remove_rmap(page); 1402 page_cache_release(page); 1403 dec_mm_counter(mm, MM_FILEPAGES); 1404 (*mapcount)--; 1405 } 1406 pte_unmap_unlock(pte - 1, ptl); 1407 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 1408 if (locked_vma) 1409 up_read(&vma->vm_mm->mmap_sem); 1410 return ret; 1411 } 1412 1413 static int try_to_unmap_nonlinear(struct page *page, 1414 struct address_space *mapping, void *arg) 1415 { 1416 struct vm_area_struct *vma; 1417 int ret = SWAP_AGAIN; 1418 unsigned long cursor; 1419 unsigned long max_nl_cursor = 0; 1420 unsigned long max_nl_size = 0; 1421 unsigned int mapcount; 1422 1423 list_for_each_entry(vma, 1424 &mapping->i_mmap_nonlinear, shared.nonlinear) { 1425 1426 cursor = (unsigned long) vma->vm_private_data; 1427 if (cursor > max_nl_cursor) 1428 max_nl_cursor = cursor; 1429 cursor = vma->vm_end - vma->vm_start; 1430 if (cursor > max_nl_size) 1431 max_nl_size = cursor; 1432 } 1433 1434 if (max_nl_size == 0) { /* all nonlinears locked or reserved ? */ 1435 return SWAP_FAIL; 1436 } 1437 1438 /* 1439 * We don't try to search for this page in the nonlinear vmas, 1440 * and page_referenced wouldn't have found it anyway. Instead 1441 * just walk the nonlinear vmas trying to age and unmap some. 1442 * The mapcount of the page we came in with is irrelevant, 1443 * but even so use it as a guide to how hard we should try? 1444 */ 1445 mapcount = page_mapcount(page); 1446 if (!mapcount) 1447 return ret; 1448 1449 cond_resched(); 1450 1451 max_nl_size = (max_nl_size + CLUSTER_SIZE - 1) & CLUSTER_MASK; 1452 if (max_nl_cursor == 0) 1453 max_nl_cursor = CLUSTER_SIZE; 1454 1455 do { 1456 list_for_each_entry(vma, 1457 &mapping->i_mmap_nonlinear, shared.nonlinear) { 1458 1459 cursor = (unsigned long) vma->vm_private_data; 1460 while (cursor < max_nl_cursor && 1461 cursor < vma->vm_end - vma->vm_start) { 1462 if (try_to_unmap_cluster(cursor, &mapcount, 1463 vma, page) == SWAP_MLOCK) 1464 ret = SWAP_MLOCK; 1465 cursor += CLUSTER_SIZE; 1466 vma->vm_private_data = (void *) cursor; 1467 if ((int)mapcount <= 0) 1468 return ret; 1469 } 1470 vma->vm_private_data = (void *) max_nl_cursor; 1471 } 1472 cond_resched(); 1473 max_nl_cursor += CLUSTER_SIZE; 1474 } while (max_nl_cursor <= max_nl_size); 1475 1476 /* 1477 * Don't loop forever (perhaps all the remaining pages are 1478 * in locked vmas). Reset cursor on all unreserved nonlinear 1479 * vmas, now forgetting on which ones it had fallen behind. 1480 */ 1481 list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.nonlinear) 1482 vma->vm_private_data = NULL; 1483 1484 return ret; 1485 } 1486 1487 bool is_vma_temporary_stack(struct vm_area_struct *vma) 1488 { 1489 int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP); 1490 1491 if (!maybe_stack) 1492 return false; 1493 1494 if ((vma->vm_flags & VM_STACK_INCOMPLETE_SETUP) == 1495 VM_STACK_INCOMPLETE_SETUP) 1496 return true; 1497 1498 return false; 1499 } 1500 1501 static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg) 1502 { 1503 return is_vma_temporary_stack(vma); 1504 } 1505 1506 static int page_not_mapped(struct page *page) 1507 { 1508 return !page_mapped(page); 1509 }; 1510 1511 /** 1512 * try_to_unmap - try to remove all page table mappings to a page 1513 * @page: the page to get unmapped 1514 * @flags: action and flags 1515 * 1516 * Tries to remove all the page table entries which are mapping this 1517 * page, used in the pageout path. Caller must hold the page lock. 1518 * Return values are: 1519 * 1520 * SWAP_SUCCESS - we succeeded in removing all mappings 1521 * SWAP_AGAIN - we missed a mapping, try again later 1522 * SWAP_FAIL - the page is unswappable 1523 * SWAP_MLOCK - page is mlocked. 1524 */ 1525 int try_to_unmap(struct page *page, enum ttu_flags flags) 1526 { 1527 int ret; 1528 struct rmap_walk_control rwc = { 1529 .rmap_one = try_to_unmap_one, 1530 .arg = (void *)flags, 1531 .done = page_not_mapped, 1532 .file_nonlinear = try_to_unmap_nonlinear, 1533 .anon_lock = page_lock_anon_vma_read, 1534 }; 1535 1536 VM_BUG_ON_PAGE(!PageHuge(page) && PageTransHuge(page), page); 1537 1538 /* 1539 * During exec, a temporary VMA is setup and later moved. 1540 * The VMA is moved under the anon_vma lock but not the 1541 * page tables leading to a race where migration cannot 1542 * find the migration ptes. Rather than increasing the 1543 * locking requirements of exec(), migration skips 1544 * temporary VMAs until after exec() completes. 1545 */ 1546 if ((flags & TTU_MIGRATION) && !PageKsm(page) && PageAnon(page)) 1547 rwc.invalid_vma = invalid_migration_vma; 1548 1549 ret = rmap_walk(page, &rwc); 1550 1551 if (ret != SWAP_MLOCK && !page_mapped(page)) 1552 ret = SWAP_SUCCESS; 1553 return ret; 1554 } 1555 1556 /** 1557 * try_to_munlock - try to munlock a page 1558 * @page: the page to be munlocked 1559 * 1560 * Called from munlock code. Checks all of the VMAs mapping the page 1561 * to make sure nobody else has this page mlocked. The page will be 1562 * returned with PG_mlocked cleared if no other vmas have it mlocked. 1563 * 1564 * Return values are: 1565 * 1566 * SWAP_AGAIN - no vma is holding page mlocked, or, 1567 * SWAP_AGAIN - page mapped in mlocked vma -- couldn't acquire mmap sem 1568 * SWAP_FAIL - page cannot be located at present 1569 * SWAP_MLOCK - page is now mlocked. 1570 */ 1571 int try_to_munlock(struct page *page) 1572 { 1573 int ret; 1574 struct rmap_walk_control rwc = { 1575 .rmap_one = try_to_unmap_one, 1576 .arg = (void *)TTU_MUNLOCK, 1577 .done = page_not_mapped, 1578 /* 1579 * We don't bother to try to find the munlocked page in 1580 * nonlinears. It's costly. Instead, later, page reclaim logic 1581 * may call try_to_unmap() and recover PG_mlocked lazily. 1582 */ 1583 .file_nonlinear = NULL, 1584 .anon_lock = page_lock_anon_vma_read, 1585 1586 }; 1587 1588 VM_BUG_ON_PAGE(!PageLocked(page) || PageLRU(page), page); 1589 1590 ret = rmap_walk(page, &rwc); 1591 return ret; 1592 } 1593 1594 void __put_anon_vma(struct anon_vma *anon_vma) 1595 { 1596 struct anon_vma *root = anon_vma->root; 1597 1598 anon_vma_free(anon_vma); 1599 if (root != anon_vma && atomic_dec_and_test(&root->refcount)) 1600 anon_vma_free(root); 1601 } 1602 1603 static struct anon_vma *rmap_walk_anon_lock(struct page *page, 1604 struct rmap_walk_control *rwc) 1605 { 1606 struct anon_vma *anon_vma; 1607 1608 if (rwc->anon_lock) 1609 return rwc->anon_lock(page); 1610 1611 /* 1612 * Note: remove_migration_ptes() cannot use page_lock_anon_vma_read() 1613 * because that depends on page_mapped(); but not all its usages 1614 * are holding mmap_sem. Users without mmap_sem are required to 1615 * take a reference count to prevent the anon_vma disappearing 1616 */ 1617 anon_vma = page_anon_vma(page); 1618 if (!anon_vma) 1619 return NULL; 1620 1621 anon_vma_lock_read(anon_vma); 1622 return anon_vma; 1623 } 1624 1625 /* 1626 * rmap_walk_anon - do something to anonymous page using the object-based 1627 * rmap method 1628 * @page: the page to be handled 1629 * @rwc: control variable according to each walk type 1630 * 1631 * Find all the mappings of a page using the mapping pointer and the vma chains 1632 * contained in the anon_vma struct it points to. 1633 * 1634 * When called from try_to_munlock(), the mmap_sem of the mm containing the vma 1635 * where the page was found will be held for write. So, we won't recheck 1636 * vm_flags for that VMA. That should be OK, because that vma shouldn't be 1637 * LOCKED. 1638 */ 1639 static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc) 1640 { 1641 struct anon_vma *anon_vma; 1642 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 1643 struct anon_vma_chain *avc; 1644 int ret = SWAP_AGAIN; 1645 1646 anon_vma = rmap_walk_anon_lock(page, rwc); 1647 if (!anon_vma) 1648 return ret; 1649 1650 anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) { 1651 struct vm_area_struct *vma = avc->vma; 1652 unsigned long address = vma_address(page, vma); 1653 1654 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) 1655 continue; 1656 1657 ret = rwc->rmap_one(page, vma, address, rwc->arg); 1658 if (ret != SWAP_AGAIN) 1659 break; 1660 if (rwc->done && rwc->done(page)) 1661 break; 1662 } 1663 anon_vma_unlock_read(anon_vma); 1664 return ret; 1665 } 1666 1667 /* 1668 * rmap_walk_file - do something to file page using the object-based rmap method 1669 * @page: the page to be handled 1670 * @rwc: control variable according to each walk type 1671 * 1672 * Find all the mappings of a page using the mapping pointer and the vma chains 1673 * contained in the address_space struct it points to. 1674 * 1675 * When called from try_to_munlock(), the mmap_sem of the mm containing the vma 1676 * where the page was found will be held for write. So, we won't recheck 1677 * vm_flags for that VMA. That should be OK, because that vma shouldn't be 1678 * LOCKED. 1679 */ 1680 static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc) 1681 { 1682 struct address_space *mapping = page->mapping; 1683 pgoff_t pgoff = page->index << compound_order(page); 1684 struct vm_area_struct *vma; 1685 int ret = SWAP_AGAIN; 1686 1687 /* 1688 * The page lock not only makes sure that page->mapping cannot 1689 * suddenly be NULLified by truncation, it makes sure that the 1690 * structure at mapping cannot be freed and reused yet, 1691 * so we can safely take mapping->i_mmap_mutex. 1692 */ 1693 VM_BUG_ON(!PageLocked(page)); 1694 1695 if (!mapping) 1696 return ret; 1697 mutex_lock(&mapping->i_mmap_mutex); 1698 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { 1699 unsigned long address = vma_address(page, vma); 1700 1701 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) 1702 continue; 1703 1704 ret = rwc->rmap_one(page, vma, address, rwc->arg); 1705 if (ret != SWAP_AGAIN) 1706 goto done; 1707 if (rwc->done && rwc->done(page)) 1708 goto done; 1709 } 1710 1711 if (!rwc->file_nonlinear) 1712 goto done; 1713 1714 if (list_empty(&mapping->i_mmap_nonlinear)) 1715 goto done; 1716 1717 ret = rwc->file_nonlinear(page, mapping, rwc->arg); 1718 1719 done: 1720 mutex_unlock(&mapping->i_mmap_mutex); 1721 return ret; 1722 } 1723 1724 int rmap_walk(struct page *page, struct rmap_walk_control *rwc) 1725 { 1726 if (unlikely(PageKsm(page))) 1727 return rmap_walk_ksm(page, rwc); 1728 else if (PageAnon(page)) 1729 return rmap_walk_anon(page, rwc); 1730 else 1731 return rmap_walk_file(page, rwc); 1732 } 1733 1734 #ifdef CONFIG_HUGETLB_PAGE 1735 /* 1736 * The following three functions are for anonymous (private mapped) hugepages. 1737 * Unlike common anonymous pages, anonymous hugepages have no accounting code 1738 * and no lru code, because we handle hugepages differently from common pages. 1739 */ 1740 static void __hugepage_set_anon_rmap(struct page *page, 1741 struct vm_area_struct *vma, unsigned long address, int exclusive) 1742 { 1743 struct anon_vma *anon_vma = vma->anon_vma; 1744 1745 BUG_ON(!anon_vma); 1746 1747 if (PageAnon(page)) 1748 return; 1749 if (!exclusive) 1750 anon_vma = anon_vma->root; 1751 1752 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 1753 page->mapping = (struct address_space *) anon_vma; 1754 page->index = linear_page_index(vma, address); 1755 } 1756 1757 void hugepage_add_anon_rmap(struct page *page, 1758 struct vm_area_struct *vma, unsigned long address) 1759 { 1760 struct anon_vma *anon_vma = vma->anon_vma; 1761 int first; 1762 1763 BUG_ON(!PageLocked(page)); 1764 BUG_ON(!anon_vma); 1765 /* address might be in next vma when migration races vma_adjust */ 1766 first = atomic_inc_and_test(&page->_mapcount); 1767 if (first) 1768 __hugepage_set_anon_rmap(page, vma, address, 0); 1769 } 1770 1771 void hugepage_add_new_anon_rmap(struct page *page, 1772 struct vm_area_struct *vma, unsigned long address) 1773 { 1774 BUG_ON(address < vma->vm_start || address >= vma->vm_end); 1775 atomic_set(&page->_mapcount, 0); 1776 __hugepage_set_anon_rmap(page, vma, address, 1); 1777 } 1778 #endif /* CONFIG_HUGETLB_PAGE */ 1779