1 /* 2 * mm/rmap.c - physical to virtual reverse mappings 3 * 4 * Copyright 2001, Rik van Riel <riel@conectiva.com.br> 5 * Released under the General Public License (GPL). 6 * 7 * Simple, low overhead reverse mapping scheme. 8 * Please try to keep this thing as modular as possible. 9 * 10 * Provides methods for unmapping each kind of mapped page: 11 * the anon methods track anonymous pages, and 12 * the file methods track pages belonging to an inode. 13 * 14 * Original design by Rik van Riel <riel@conectiva.com.br> 2001 15 * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004 16 * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004 17 * Contributions by Hugh Dickins 2003, 2004 18 */ 19 20 /* 21 * Lock ordering in mm: 22 * 23 * inode->i_mutex (while writing or truncating, not reading or faulting) 24 * mm->mmap_sem 25 * page->flags PG_locked (lock_page) 26 * hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share) 27 * mapping->i_mmap_rwsem 28 * anon_vma->rwsem 29 * mm->page_table_lock or pte_lock 30 * zone_lru_lock (in mark_page_accessed, isolate_lru_page) 31 * swap_lock (in swap_duplicate, swap_info_get) 32 * mmlist_lock (in mmput, drain_mmlist and others) 33 * mapping->private_lock (in __set_page_dirty_buffers) 34 * mem_cgroup_{begin,end}_page_stat (memcg->move_lock) 35 * mapping->tree_lock (widely used) 36 * inode->i_lock (in set_page_dirty's __mark_inode_dirty) 37 * bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty) 38 * sb_lock (within inode_lock in fs/fs-writeback.c) 39 * mapping->tree_lock (widely used, in set_page_dirty, 40 * in arch-dependent flush_dcache_mmap_lock, 41 * within bdi.wb->list_lock in __sync_single_inode) 42 * 43 * anon_vma->rwsem,mapping->i_mutex (memory_failure, collect_procs_anon) 44 * ->tasklist_lock 45 * pte map lock 46 */ 47 48 #include <linux/mm.h> 49 #include <linux/sched/mm.h> 50 #include <linux/pagemap.h> 51 #include <linux/swap.h> 52 #include <linux/swapops.h> 53 #include <linux/slab.h> 54 #include <linux/init.h> 55 #include <linux/ksm.h> 56 #include <linux/rmap.h> 57 #include <linux/rcupdate.h> 58 #include <linux/export.h> 59 #include <linux/memcontrol.h> 60 #include <linux/mmu_notifier.h> 61 #include <linux/migrate.h> 62 #include <linux/hugetlb.h> 63 #include <linux/backing-dev.h> 64 #include <linux/page_idle.h> 65 66 #include <asm/tlbflush.h> 67 68 #include <trace/events/tlb.h> 69 70 #include "internal.h" 71 72 static struct kmem_cache *anon_vma_cachep; 73 static struct kmem_cache *anon_vma_chain_cachep; 74 75 static inline struct anon_vma *anon_vma_alloc(void) 76 { 77 struct anon_vma *anon_vma; 78 79 anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL); 80 if (anon_vma) { 81 atomic_set(&anon_vma->refcount, 1); 82 anon_vma->degree = 1; /* Reference for first vma */ 83 anon_vma->parent = anon_vma; 84 /* 85 * Initialise the anon_vma root to point to itself. If called 86 * from fork, the root will be reset to the parents anon_vma. 87 */ 88 anon_vma->root = anon_vma; 89 } 90 91 return anon_vma; 92 } 93 94 static inline void anon_vma_free(struct anon_vma *anon_vma) 95 { 96 VM_BUG_ON(atomic_read(&anon_vma->refcount)); 97 98 /* 99 * Synchronize against page_lock_anon_vma_read() such that 100 * we can safely hold the lock without the anon_vma getting 101 * freed. 102 * 103 * Relies on the full mb implied by the atomic_dec_and_test() from 104 * put_anon_vma() against the acquire barrier implied by 105 * down_read_trylock() from page_lock_anon_vma_read(). This orders: 106 * 107 * page_lock_anon_vma_read() VS put_anon_vma() 108 * down_read_trylock() atomic_dec_and_test() 109 * LOCK MB 110 * atomic_read() rwsem_is_locked() 111 * 112 * LOCK should suffice since the actual taking of the lock must 113 * happen _before_ what follows. 114 */ 115 might_sleep(); 116 if (rwsem_is_locked(&anon_vma->root->rwsem)) { 117 anon_vma_lock_write(anon_vma); 118 anon_vma_unlock_write(anon_vma); 119 } 120 121 kmem_cache_free(anon_vma_cachep, anon_vma); 122 } 123 124 static inline struct anon_vma_chain *anon_vma_chain_alloc(gfp_t gfp) 125 { 126 return kmem_cache_alloc(anon_vma_chain_cachep, gfp); 127 } 128 129 static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain) 130 { 131 kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain); 132 } 133 134 static void anon_vma_chain_link(struct vm_area_struct *vma, 135 struct anon_vma_chain *avc, 136 struct anon_vma *anon_vma) 137 { 138 avc->vma = vma; 139 avc->anon_vma = anon_vma; 140 list_add(&avc->same_vma, &vma->anon_vma_chain); 141 anon_vma_interval_tree_insert(avc, &anon_vma->rb_root); 142 } 143 144 /** 145 * __anon_vma_prepare - attach an anon_vma to a memory region 146 * @vma: the memory region in question 147 * 148 * This makes sure the memory mapping described by 'vma' has 149 * an 'anon_vma' attached to it, so that we can associate the 150 * anonymous pages mapped into it with that anon_vma. 151 * 152 * The common case will be that we already have one, which 153 * is handled inline by anon_vma_prepare(). But if 154 * not we either need to find an adjacent mapping that we 155 * can re-use the anon_vma from (very common when the only 156 * reason for splitting a vma has been mprotect()), or we 157 * allocate a new one. 158 * 159 * Anon-vma allocations are very subtle, because we may have 160 * optimistically looked up an anon_vma in page_lock_anon_vma_read() 161 * and that may actually touch the spinlock even in the newly 162 * allocated vma (it depends on RCU to make sure that the 163 * anon_vma isn't actually destroyed). 164 * 165 * As a result, we need to do proper anon_vma locking even 166 * for the new allocation. At the same time, we do not want 167 * to do any locking for the common case of already having 168 * an anon_vma. 169 * 170 * This must be called with the mmap_sem held for reading. 171 */ 172 int __anon_vma_prepare(struct vm_area_struct *vma) 173 { 174 struct mm_struct *mm = vma->vm_mm; 175 struct anon_vma *anon_vma, *allocated; 176 struct anon_vma_chain *avc; 177 178 might_sleep(); 179 180 avc = anon_vma_chain_alloc(GFP_KERNEL); 181 if (!avc) 182 goto out_enomem; 183 184 anon_vma = find_mergeable_anon_vma(vma); 185 allocated = NULL; 186 if (!anon_vma) { 187 anon_vma = anon_vma_alloc(); 188 if (unlikely(!anon_vma)) 189 goto out_enomem_free_avc; 190 allocated = anon_vma; 191 } 192 193 anon_vma_lock_write(anon_vma); 194 /* page_table_lock to protect against threads */ 195 spin_lock(&mm->page_table_lock); 196 if (likely(!vma->anon_vma)) { 197 vma->anon_vma = anon_vma; 198 anon_vma_chain_link(vma, avc, anon_vma); 199 /* vma reference or self-parent link for new root */ 200 anon_vma->degree++; 201 allocated = NULL; 202 avc = NULL; 203 } 204 spin_unlock(&mm->page_table_lock); 205 anon_vma_unlock_write(anon_vma); 206 207 if (unlikely(allocated)) 208 put_anon_vma(allocated); 209 if (unlikely(avc)) 210 anon_vma_chain_free(avc); 211 212 return 0; 213 214 out_enomem_free_avc: 215 anon_vma_chain_free(avc); 216 out_enomem: 217 return -ENOMEM; 218 } 219 220 /* 221 * This is a useful helper function for locking the anon_vma root as 222 * we traverse the vma->anon_vma_chain, looping over anon_vma's that 223 * have the same vma. 224 * 225 * Such anon_vma's should have the same root, so you'd expect to see 226 * just a single mutex_lock for the whole traversal. 227 */ 228 static inline struct anon_vma *lock_anon_vma_root(struct anon_vma *root, struct anon_vma *anon_vma) 229 { 230 struct anon_vma *new_root = anon_vma->root; 231 if (new_root != root) { 232 if (WARN_ON_ONCE(root)) 233 up_write(&root->rwsem); 234 root = new_root; 235 down_write(&root->rwsem); 236 } 237 return root; 238 } 239 240 static inline void unlock_anon_vma_root(struct anon_vma *root) 241 { 242 if (root) 243 up_write(&root->rwsem); 244 } 245 246 /* 247 * Attach the anon_vmas from src to dst. 248 * Returns 0 on success, -ENOMEM on failure. 249 * 250 * If dst->anon_vma is NULL this function tries to find and reuse existing 251 * anon_vma which has no vmas and only one child anon_vma. This prevents 252 * degradation of anon_vma hierarchy to endless linear chain in case of 253 * constantly forking task. On the other hand, an anon_vma with more than one 254 * child isn't reused even if there was no alive vma, thus rmap walker has a 255 * good chance of avoiding scanning the whole hierarchy when it searches where 256 * page is mapped. 257 */ 258 int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src) 259 { 260 struct anon_vma_chain *avc, *pavc; 261 struct anon_vma *root = NULL; 262 263 list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) { 264 struct anon_vma *anon_vma; 265 266 avc = anon_vma_chain_alloc(GFP_NOWAIT | __GFP_NOWARN); 267 if (unlikely(!avc)) { 268 unlock_anon_vma_root(root); 269 root = NULL; 270 avc = anon_vma_chain_alloc(GFP_KERNEL); 271 if (!avc) 272 goto enomem_failure; 273 } 274 anon_vma = pavc->anon_vma; 275 root = lock_anon_vma_root(root, anon_vma); 276 anon_vma_chain_link(dst, avc, anon_vma); 277 278 /* 279 * Reuse existing anon_vma if its degree lower than two, 280 * that means it has no vma and only one anon_vma child. 281 * 282 * Do not chose parent anon_vma, otherwise first child 283 * will always reuse it. Root anon_vma is never reused: 284 * it has self-parent reference and at least one child. 285 */ 286 if (!dst->anon_vma && anon_vma != src->anon_vma && 287 anon_vma->degree < 2) 288 dst->anon_vma = anon_vma; 289 } 290 if (dst->anon_vma) 291 dst->anon_vma->degree++; 292 unlock_anon_vma_root(root); 293 return 0; 294 295 enomem_failure: 296 /* 297 * dst->anon_vma is dropped here otherwise its degree can be incorrectly 298 * decremented in unlink_anon_vmas(). 299 * We can safely do this because callers of anon_vma_clone() don't care 300 * about dst->anon_vma if anon_vma_clone() failed. 301 */ 302 dst->anon_vma = NULL; 303 unlink_anon_vmas(dst); 304 return -ENOMEM; 305 } 306 307 /* 308 * Attach vma to its own anon_vma, as well as to the anon_vmas that 309 * the corresponding VMA in the parent process is attached to. 310 * Returns 0 on success, non-zero on failure. 311 */ 312 int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) 313 { 314 struct anon_vma_chain *avc; 315 struct anon_vma *anon_vma; 316 int error; 317 318 /* Don't bother if the parent process has no anon_vma here. */ 319 if (!pvma->anon_vma) 320 return 0; 321 322 /* Drop inherited anon_vma, we'll reuse existing or allocate new. */ 323 vma->anon_vma = NULL; 324 325 /* 326 * First, attach the new VMA to the parent VMA's anon_vmas, 327 * so rmap can find non-COWed pages in child processes. 328 */ 329 error = anon_vma_clone(vma, pvma); 330 if (error) 331 return error; 332 333 /* An existing anon_vma has been reused, all done then. */ 334 if (vma->anon_vma) 335 return 0; 336 337 /* Then add our own anon_vma. */ 338 anon_vma = anon_vma_alloc(); 339 if (!anon_vma) 340 goto out_error; 341 avc = anon_vma_chain_alloc(GFP_KERNEL); 342 if (!avc) 343 goto out_error_free_anon_vma; 344 345 /* 346 * The root anon_vma's spinlock is the lock actually used when we 347 * lock any of the anon_vmas in this anon_vma tree. 348 */ 349 anon_vma->root = pvma->anon_vma->root; 350 anon_vma->parent = pvma->anon_vma; 351 /* 352 * With refcounts, an anon_vma can stay around longer than the 353 * process it belongs to. The root anon_vma needs to be pinned until 354 * this anon_vma is freed, because the lock lives in the root. 355 */ 356 get_anon_vma(anon_vma->root); 357 /* Mark this anon_vma as the one where our new (COWed) pages go. */ 358 vma->anon_vma = anon_vma; 359 anon_vma_lock_write(anon_vma); 360 anon_vma_chain_link(vma, avc, anon_vma); 361 anon_vma->parent->degree++; 362 anon_vma_unlock_write(anon_vma); 363 364 return 0; 365 366 out_error_free_anon_vma: 367 put_anon_vma(anon_vma); 368 out_error: 369 unlink_anon_vmas(vma); 370 return -ENOMEM; 371 } 372 373 void unlink_anon_vmas(struct vm_area_struct *vma) 374 { 375 struct anon_vma_chain *avc, *next; 376 struct anon_vma *root = NULL; 377 378 /* 379 * Unlink each anon_vma chained to the VMA. This list is ordered 380 * from newest to oldest, ensuring the root anon_vma gets freed last. 381 */ 382 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { 383 struct anon_vma *anon_vma = avc->anon_vma; 384 385 root = lock_anon_vma_root(root, anon_vma); 386 anon_vma_interval_tree_remove(avc, &anon_vma->rb_root); 387 388 /* 389 * Leave empty anon_vmas on the list - we'll need 390 * to free them outside the lock. 391 */ 392 if (RB_EMPTY_ROOT(&anon_vma->rb_root)) { 393 anon_vma->parent->degree--; 394 continue; 395 } 396 397 list_del(&avc->same_vma); 398 anon_vma_chain_free(avc); 399 } 400 if (vma->anon_vma) 401 vma->anon_vma->degree--; 402 unlock_anon_vma_root(root); 403 404 /* 405 * Iterate the list once more, it now only contains empty and unlinked 406 * anon_vmas, destroy them. Could not do before due to __put_anon_vma() 407 * needing to write-acquire the anon_vma->root->rwsem. 408 */ 409 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { 410 struct anon_vma *anon_vma = avc->anon_vma; 411 412 VM_WARN_ON(anon_vma->degree); 413 put_anon_vma(anon_vma); 414 415 list_del(&avc->same_vma); 416 anon_vma_chain_free(avc); 417 } 418 } 419 420 static void anon_vma_ctor(void *data) 421 { 422 struct anon_vma *anon_vma = data; 423 424 init_rwsem(&anon_vma->rwsem); 425 atomic_set(&anon_vma->refcount, 0); 426 anon_vma->rb_root = RB_ROOT; 427 } 428 429 void __init anon_vma_init(void) 430 { 431 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma), 432 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC|SLAB_ACCOUNT, 433 anon_vma_ctor); 434 anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, 435 SLAB_PANIC|SLAB_ACCOUNT); 436 } 437 438 /* 439 * Getting a lock on a stable anon_vma from a page off the LRU is tricky! 440 * 441 * Since there is no serialization what so ever against page_remove_rmap() 442 * the best this function can do is return a locked anon_vma that might 443 * have been relevant to this page. 444 * 445 * The page might have been remapped to a different anon_vma or the anon_vma 446 * returned may already be freed (and even reused). 447 * 448 * In case it was remapped to a different anon_vma, the new anon_vma will be a 449 * child of the old anon_vma, and the anon_vma lifetime rules will therefore 450 * ensure that any anon_vma obtained from the page will still be valid for as 451 * long as we observe page_mapped() [ hence all those page_mapped() tests ]. 452 * 453 * All users of this function must be very careful when walking the anon_vma 454 * chain and verify that the page in question is indeed mapped in it 455 * [ something equivalent to page_mapped_in_vma() ]. 456 * 457 * Since anon_vma's slab is DESTROY_BY_RCU and we know from page_remove_rmap() 458 * that the anon_vma pointer from page->mapping is valid if there is a 459 * mapcount, we can dereference the anon_vma after observing those. 460 */ 461 struct anon_vma *page_get_anon_vma(struct page *page) 462 { 463 struct anon_vma *anon_vma = NULL; 464 unsigned long anon_mapping; 465 466 rcu_read_lock(); 467 anon_mapping = (unsigned long)READ_ONCE(page->mapping); 468 if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) 469 goto out; 470 if (!page_mapped(page)) 471 goto out; 472 473 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); 474 if (!atomic_inc_not_zero(&anon_vma->refcount)) { 475 anon_vma = NULL; 476 goto out; 477 } 478 479 /* 480 * If this page is still mapped, then its anon_vma cannot have been 481 * freed. But if it has been unmapped, we have no security against the 482 * anon_vma structure being freed and reused (for another anon_vma: 483 * SLAB_DESTROY_BY_RCU guarantees that - so the atomic_inc_not_zero() 484 * above cannot corrupt). 485 */ 486 if (!page_mapped(page)) { 487 rcu_read_unlock(); 488 put_anon_vma(anon_vma); 489 return NULL; 490 } 491 out: 492 rcu_read_unlock(); 493 494 return anon_vma; 495 } 496 497 /* 498 * Similar to page_get_anon_vma() except it locks the anon_vma. 499 * 500 * Its a little more complex as it tries to keep the fast path to a single 501 * atomic op -- the trylock. If we fail the trylock, we fall back to getting a 502 * reference like with page_get_anon_vma() and then block on the mutex. 503 */ 504 struct anon_vma *page_lock_anon_vma_read(struct page *page) 505 { 506 struct anon_vma *anon_vma = NULL; 507 struct anon_vma *root_anon_vma; 508 unsigned long anon_mapping; 509 510 rcu_read_lock(); 511 anon_mapping = (unsigned long)READ_ONCE(page->mapping); 512 if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) 513 goto out; 514 if (!page_mapped(page)) 515 goto out; 516 517 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); 518 root_anon_vma = READ_ONCE(anon_vma->root); 519 if (down_read_trylock(&root_anon_vma->rwsem)) { 520 /* 521 * If the page is still mapped, then this anon_vma is still 522 * its anon_vma, and holding the mutex ensures that it will 523 * not go away, see anon_vma_free(). 524 */ 525 if (!page_mapped(page)) { 526 up_read(&root_anon_vma->rwsem); 527 anon_vma = NULL; 528 } 529 goto out; 530 } 531 532 /* trylock failed, we got to sleep */ 533 if (!atomic_inc_not_zero(&anon_vma->refcount)) { 534 anon_vma = NULL; 535 goto out; 536 } 537 538 if (!page_mapped(page)) { 539 rcu_read_unlock(); 540 put_anon_vma(anon_vma); 541 return NULL; 542 } 543 544 /* we pinned the anon_vma, its safe to sleep */ 545 rcu_read_unlock(); 546 anon_vma_lock_read(anon_vma); 547 548 if (atomic_dec_and_test(&anon_vma->refcount)) { 549 /* 550 * Oops, we held the last refcount, release the lock 551 * and bail -- can't simply use put_anon_vma() because 552 * we'll deadlock on the anon_vma_lock_write() recursion. 553 */ 554 anon_vma_unlock_read(anon_vma); 555 __put_anon_vma(anon_vma); 556 anon_vma = NULL; 557 } 558 559 return anon_vma; 560 561 out: 562 rcu_read_unlock(); 563 return anon_vma; 564 } 565 566 void page_unlock_anon_vma_read(struct anon_vma *anon_vma) 567 { 568 anon_vma_unlock_read(anon_vma); 569 } 570 571 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH 572 /* 573 * Flush TLB entries for recently unmapped pages from remote CPUs. It is 574 * important if a PTE was dirty when it was unmapped that it's flushed 575 * before any IO is initiated on the page to prevent lost writes. Similarly, 576 * it must be flushed before freeing to prevent data leakage. 577 */ 578 void try_to_unmap_flush(void) 579 { 580 struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; 581 int cpu; 582 583 if (!tlb_ubc->flush_required) 584 return; 585 586 cpu = get_cpu(); 587 588 if (cpumask_test_cpu(cpu, &tlb_ubc->cpumask)) { 589 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); 590 local_flush_tlb(); 591 trace_tlb_flush(TLB_LOCAL_SHOOTDOWN, TLB_FLUSH_ALL); 592 } 593 594 if (cpumask_any_but(&tlb_ubc->cpumask, cpu) < nr_cpu_ids) 595 flush_tlb_others(&tlb_ubc->cpumask, NULL, 0, TLB_FLUSH_ALL); 596 cpumask_clear(&tlb_ubc->cpumask); 597 tlb_ubc->flush_required = false; 598 tlb_ubc->writable = false; 599 put_cpu(); 600 } 601 602 /* Flush iff there are potentially writable TLB entries that can race with IO */ 603 void try_to_unmap_flush_dirty(void) 604 { 605 struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; 606 607 if (tlb_ubc->writable) 608 try_to_unmap_flush(); 609 } 610 611 static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable) 612 { 613 struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; 614 615 cpumask_or(&tlb_ubc->cpumask, &tlb_ubc->cpumask, mm_cpumask(mm)); 616 tlb_ubc->flush_required = true; 617 618 /* 619 * If the PTE was dirty then it's best to assume it's writable. The 620 * caller must use try_to_unmap_flush_dirty() or try_to_unmap_flush() 621 * before the page is queued for IO. 622 */ 623 if (writable) 624 tlb_ubc->writable = true; 625 } 626 627 /* 628 * Returns true if the TLB flush should be deferred to the end of a batch of 629 * unmap operations to reduce IPIs. 630 */ 631 static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags) 632 { 633 bool should_defer = false; 634 635 if (!(flags & TTU_BATCH_FLUSH)) 636 return false; 637 638 /* If remote CPUs need to be flushed then defer batch the flush */ 639 if (cpumask_any_but(mm_cpumask(mm), get_cpu()) < nr_cpu_ids) 640 should_defer = true; 641 put_cpu(); 642 643 return should_defer; 644 } 645 #else 646 static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable) 647 { 648 } 649 650 static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags) 651 { 652 return false; 653 } 654 #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */ 655 656 /* 657 * At what user virtual address is page expected in vma? 658 * Caller should check the page is actually part of the vma. 659 */ 660 unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) 661 { 662 unsigned long address; 663 if (PageAnon(page)) { 664 struct anon_vma *page__anon_vma = page_anon_vma(page); 665 /* 666 * Note: swapoff's unuse_vma() is more efficient with this 667 * check, and needs it to match anon_vma when KSM is active. 668 */ 669 if (!vma->anon_vma || !page__anon_vma || 670 vma->anon_vma->root != page__anon_vma->root) 671 return -EFAULT; 672 } else if (page->mapping) { 673 if (!vma->vm_file || vma->vm_file->f_mapping != page->mapping) 674 return -EFAULT; 675 } else 676 return -EFAULT; 677 address = __vma_address(page, vma); 678 if (unlikely(address < vma->vm_start || address >= vma->vm_end)) 679 return -EFAULT; 680 return address; 681 } 682 683 pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address) 684 { 685 pgd_t *pgd; 686 pud_t *pud; 687 pmd_t *pmd = NULL; 688 pmd_t pmde; 689 690 pgd = pgd_offset(mm, address); 691 if (!pgd_present(*pgd)) 692 goto out; 693 694 pud = pud_offset(pgd, address); 695 if (!pud_present(*pud)) 696 goto out; 697 698 pmd = pmd_offset(pud, address); 699 /* 700 * Some THP functions use the sequence pmdp_huge_clear_flush(), set_pmd_at() 701 * without holding anon_vma lock for write. So when looking for a 702 * genuine pmde (in which to find pte), test present and !THP together. 703 */ 704 pmde = *pmd; 705 barrier(); 706 if (!pmd_present(pmde) || pmd_trans_huge(pmde)) 707 pmd = NULL; 708 out: 709 return pmd; 710 } 711 712 struct page_referenced_arg { 713 int mapcount; 714 int referenced; 715 unsigned long vm_flags; 716 struct mem_cgroup *memcg; 717 }; 718 /* 719 * arg: page_referenced_arg will be passed 720 */ 721 static int page_referenced_one(struct page *page, struct vm_area_struct *vma, 722 unsigned long address, void *arg) 723 { 724 struct page_referenced_arg *pra = arg; 725 struct page_vma_mapped_walk pvmw = { 726 .page = page, 727 .vma = vma, 728 .address = address, 729 }; 730 int referenced = 0; 731 732 while (page_vma_mapped_walk(&pvmw)) { 733 address = pvmw.address; 734 735 if (vma->vm_flags & VM_LOCKED) { 736 page_vma_mapped_walk_done(&pvmw); 737 pra->vm_flags |= VM_LOCKED; 738 return SWAP_FAIL; /* To break the loop */ 739 } 740 741 if (pvmw.pte) { 742 if (ptep_clear_flush_young_notify(vma, address, 743 pvmw.pte)) { 744 /* 745 * Don't treat a reference through 746 * a sequentially read mapping as such. 747 * If the page has been used in another mapping, 748 * we will catch it; if this other mapping is 749 * already gone, the unmap path will have set 750 * PG_referenced or activated the page. 751 */ 752 if (likely(!(vma->vm_flags & VM_SEQ_READ))) 753 referenced++; 754 } 755 } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { 756 if (pmdp_clear_flush_young_notify(vma, address, 757 pvmw.pmd)) 758 referenced++; 759 } else { 760 /* unexpected pmd-mapped page? */ 761 WARN_ON_ONCE(1); 762 } 763 764 pra->mapcount--; 765 } 766 767 if (referenced) 768 clear_page_idle(page); 769 if (test_and_clear_page_young(page)) 770 referenced++; 771 772 if (referenced) { 773 pra->referenced++; 774 pra->vm_flags |= vma->vm_flags; 775 } 776 777 if (!pra->mapcount) 778 return SWAP_SUCCESS; /* To break the loop */ 779 780 return SWAP_AGAIN; 781 } 782 783 static bool invalid_page_referenced_vma(struct vm_area_struct *vma, void *arg) 784 { 785 struct page_referenced_arg *pra = arg; 786 struct mem_cgroup *memcg = pra->memcg; 787 788 if (!mm_match_cgroup(vma->vm_mm, memcg)) 789 return true; 790 791 return false; 792 } 793 794 /** 795 * page_referenced - test if the page was referenced 796 * @page: the page to test 797 * @is_locked: caller holds lock on the page 798 * @memcg: target memory cgroup 799 * @vm_flags: collect encountered vma->vm_flags who actually referenced the page 800 * 801 * Quick test_and_clear_referenced for all mappings to a page, 802 * returns the number of ptes which referenced the page. 803 */ 804 int page_referenced(struct page *page, 805 int is_locked, 806 struct mem_cgroup *memcg, 807 unsigned long *vm_flags) 808 { 809 int ret; 810 int we_locked = 0; 811 struct page_referenced_arg pra = { 812 .mapcount = total_mapcount(page), 813 .memcg = memcg, 814 }; 815 struct rmap_walk_control rwc = { 816 .rmap_one = page_referenced_one, 817 .arg = (void *)&pra, 818 .anon_lock = page_lock_anon_vma_read, 819 }; 820 821 *vm_flags = 0; 822 if (!page_mapped(page)) 823 return 0; 824 825 if (!page_rmapping(page)) 826 return 0; 827 828 if (!is_locked && (!PageAnon(page) || PageKsm(page))) { 829 we_locked = trylock_page(page); 830 if (!we_locked) 831 return 1; 832 } 833 834 /* 835 * If we are reclaiming on behalf of a cgroup, skip 836 * counting on behalf of references from different 837 * cgroups 838 */ 839 if (memcg) { 840 rwc.invalid_vma = invalid_page_referenced_vma; 841 } 842 843 ret = rmap_walk(page, &rwc); 844 *vm_flags = pra.vm_flags; 845 846 if (we_locked) 847 unlock_page(page); 848 849 return pra.referenced; 850 } 851 852 static int page_mkclean_one(struct page *page, struct vm_area_struct *vma, 853 unsigned long address, void *arg) 854 { 855 struct page_vma_mapped_walk pvmw = { 856 .page = page, 857 .vma = vma, 858 .address = address, 859 .flags = PVMW_SYNC, 860 }; 861 int *cleaned = arg; 862 863 while (page_vma_mapped_walk(&pvmw)) { 864 int ret = 0; 865 address = pvmw.address; 866 if (pvmw.pte) { 867 pte_t entry; 868 pte_t *pte = pvmw.pte; 869 870 if (!pte_dirty(*pte) && !pte_write(*pte)) 871 continue; 872 873 flush_cache_page(vma, address, pte_pfn(*pte)); 874 entry = ptep_clear_flush(vma, address, pte); 875 entry = pte_wrprotect(entry); 876 entry = pte_mkclean(entry); 877 set_pte_at(vma->vm_mm, address, pte, entry); 878 ret = 1; 879 } else { 880 #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE 881 pmd_t *pmd = pvmw.pmd; 882 pmd_t entry; 883 884 if (!pmd_dirty(*pmd) && !pmd_write(*pmd)) 885 continue; 886 887 flush_cache_page(vma, address, page_to_pfn(page)); 888 entry = pmdp_huge_clear_flush(vma, address, pmd); 889 entry = pmd_wrprotect(entry); 890 entry = pmd_mkclean(entry); 891 set_pmd_at(vma->vm_mm, address, pmd, entry); 892 ret = 1; 893 #else 894 /* unexpected pmd-mapped page? */ 895 WARN_ON_ONCE(1); 896 #endif 897 } 898 899 if (ret) { 900 mmu_notifier_invalidate_page(vma->vm_mm, address); 901 (*cleaned)++; 902 } 903 } 904 905 return SWAP_AGAIN; 906 } 907 908 static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg) 909 { 910 if (vma->vm_flags & VM_SHARED) 911 return false; 912 913 return true; 914 } 915 916 int page_mkclean(struct page *page) 917 { 918 int cleaned = 0; 919 struct address_space *mapping; 920 struct rmap_walk_control rwc = { 921 .arg = (void *)&cleaned, 922 .rmap_one = page_mkclean_one, 923 .invalid_vma = invalid_mkclean_vma, 924 }; 925 926 BUG_ON(!PageLocked(page)); 927 928 if (!page_mapped(page)) 929 return 0; 930 931 mapping = page_mapping(page); 932 if (!mapping) 933 return 0; 934 935 rmap_walk(page, &rwc); 936 937 return cleaned; 938 } 939 EXPORT_SYMBOL_GPL(page_mkclean); 940 941 /** 942 * page_move_anon_rmap - move a page to our anon_vma 943 * @page: the page to move to our anon_vma 944 * @vma: the vma the page belongs to 945 * 946 * When a page belongs exclusively to one process after a COW event, 947 * that page can be moved into the anon_vma that belongs to just that 948 * process, so the rmap code will not search the parent or sibling 949 * processes. 950 */ 951 void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma) 952 { 953 struct anon_vma *anon_vma = vma->anon_vma; 954 955 page = compound_head(page); 956 957 VM_BUG_ON_PAGE(!PageLocked(page), page); 958 VM_BUG_ON_VMA(!anon_vma, vma); 959 960 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 961 /* 962 * Ensure that anon_vma and the PAGE_MAPPING_ANON bit are written 963 * simultaneously, so a concurrent reader (eg page_referenced()'s 964 * PageAnon()) will not see one without the other. 965 */ 966 WRITE_ONCE(page->mapping, (struct address_space *) anon_vma); 967 } 968 969 /** 970 * __page_set_anon_rmap - set up new anonymous rmap 971 * @page: Page to add to rmap 972 * @vma: VM area to add page to. 973 * @address: User virtual address of the mapping 974 * @exclusive: the page is exclusively owned by the current process 975 */ 976 static void __page_set_anon_rmap(struct page *page, 977 struct vm_area_struct *vma, unsigned long address, int exclusive) 978 { 979 struct anon_vma *anon_vma = vma->anon_vma; 980 981 BUG_ON(!anon_vma); 982 983 if (PageAnon(page)) 984 return; 985 986 /* 987 * If the page isn't exclusively mapped into this vma, 988 * we must use the _oldest_ possible anon_vma for the 989 * page mapping! 990 */ 991 if (!exclusive) 992 anon_vma = anon_vma->root; 993 994 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 995 page->mapping = (struct address_space *) anon_vma; 996 page->index = linear_page_index(vma, address); 997 } 998 999 /** 1000 * __page_check_anon_rmap - sanity check anonymous rmap addition 1001 * @page: the page to add the mapping to 1002 * @vma: the vm area in which the mapping is added 1003 * @address: the user virtual address mapped 1004 */ 1005 static void __page_check_anon_rmap(struct page *page, 1006 struct vm_area_struct *vma, unsigned long address) 1007 { 1008 #ifdef CONFIG_DEBUG_VM 1009 /* 1010 * The page's anon-rmap details (mapping and index) are guaranteed to 1011 * be set up correctly at this point. 1012 * 1013 * We have exclusion against page_add_anon_rmap because the caller 1014 * always holds the page locked, except if called from page_dup_rmap, 1015 * in which case the page is already known to be setup. 1016 * 1017 * We have exclusion against page_add_new_anon_rmap because those pages 1018 * are initially only visible via the pagetables, and the pte is locked 1019 * over the call to page_add_new_anon_rmap. 1020 */ 1021 BUG_ON(page_anon_vma(page)->root != vma->anon_vma->root); 1022 BUG_ON(page_to_pgoff(page) != linear_page_index(vma, address)); 1023 #endif 1024 } 1025 1026 /** 1027 * page_add_anon_rmap - add pte mapping to an anonymous page 1028 * @page: the page to add the mapping to 1029 * @vma: the vm area in which the mapping is added 1030 * @address: the user virtual address mapped 1031 * @compound: charge the page as compound or small page 1032 * 1033 * The caller needs to hold the pte lock, and the page must be locked in 1034 * the anon_vma case: to serialize mapping,index checking after setting, 1035 * and to ensure that PageAnon is not being upgraded racily to PageKsm 1036 * (but PageKsm is never downgraded to PageAnon). 1037 */ 1038 void page_add_anon_rmap(struct page *page, 1039 struct vm_area_struct *vma, unsigned long address, bool compound) 1040 { 1041 do_page_add_anon_rmap(page, vma, address, compound ? RMAP_COMPOUND : 0); 1042 } 1043 1044 /* 1045 * Special version of the above for do_swap_page, which often runs 1046 * into pages that are exclusively owned by the current process. 1047 * Everybody else should continue to use page_add_anon_rmap above. 1048 */ 1049 void do_page_add_anon_rmap(struct page *page, 1050 struct vm_area_struct *vma, unsigned long address, int flags) 1051 { 1052 bool compound = flags & RMAP_COMPOUND; 1053 bool first; 1054 1055 if (compound) { 1056 atomic_t *mapcount; 1057 VM_BUG_ON_PAGE(!PageLocked(page), page); 1058 VM_BUG_ON_PAGE(!PageTransHuge(page), page); 1059 mapcount = compound_mapcount_ptr(page); 1060 first = atomic_inc_and_test(mapcount); 1061 } else { 1062 first = atomic_inc_and_test(&page->_mapcount); 1063 } 1064 1065 if (first) { 1066 int nr = compound ? hpage_nr_pages(page) : 1; 1067 /* 1068 * We use the irq-unsafe __{inc|mod}_zone_page_stat because 1069 * these counters are not modified in interrupt context, and 1070 * pte lock(a spinlock) is held, which implies preemption 1071 * disabled. 1072 */ 1073 if (compound) 1074 __inc_node_page_state(page, NR_ANON_THPS); 1075 __mod_node_page_state(page_pgdat(page), NR_ANON_MAPPED, nr); 1076 } 1077 if (unlikely(PageKsm(page))) 1078 return; 1079 1080 VM_BUG_ON_PAGE(!PageLocked(page), page); 1081 1082 /* address might be in next vma when migration races vma_adjust */ 1083 if (first) 1084 __page_set_anon_rmap(page, vma, address, 1085 flags & RMAP_EXCLUSIVE); 1086 else 1087 __page_check_anon_rmap(page, vma, address); 1088 } 1089 1090 /** 1091 * page_add_new_anon_rmap - add pte mapping to a new anonymous page 1092 * @page: the page to add the mapping to 1093 * @vma: the vm area in which the mapping is added 1094 * @address: the user virtual address mapped 1095 * @compound: charge the page as compound or small page 1096 * 1097 * Same as page_add_anon_rmap but must only be called on *new* pages. 1098 * This means the inc-and-test can be bypassed. 1099 * Page does not have to be locked. 1100 */ 1101 void page_add_new_anon_rmap(struct page *page, 1102 struct vm_area_struct *vma, unsigned long address, bool compound) 1103 { 1104 int nr = compound ? hpage_nr_pages(page) : 1; 1105 1106 VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma); 1107 __SetPageSwapBacked(page); 1108 if (compound) { 1109 VM_BUG_ON_PAGE(!PageTransHuge(page), page); 1110 /* increment count (starts at -1) */ 1111 atomic_set(compound_mapcount_ptr(page), 0); 1112 __inc_node_page_state(page, NR_ANON_THPS); 1113 } else { 1114 /* Anon THP always mapped first with PMD */ 1115 VM_BUG_ON_PAGE(PageTransCompound(page), page); 1116 /* increment count (starts at -1) */ 1117 atomic_set(&page->_mapcount, 0); 1118 } 1119 __mod_node_page_state(page_pgdat(page), NR_ANON_MAPPED, nr); 1120 __page_set_anon_rmap(page, vma, address, 1); 1121 } 1122 1123 /** 1124 * page_add_file_rmap - add pte mapping to a file page 1125 * @page: the page to add the mapping to 1126 * 1127 * The caller needs to hold the pte lock. 1128 */ 1129 void page_add_file_rmap(struct page *page, bool compound) 1130 { 1131 int i, nr = 1; 1132 1133 VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page); 1134 lock_page_memcg(page); 1135 if (compound && PageTransHuge(page)) { 1136 for (i = 0, nr = 0; i < HPAGE_PMD_NR; i++) { 1137 if (atomic_inc_and_test(&page[i]._mapcount)) 1138 nr++; 1139 } 1140 if (!atomic_inc_and_test(compound_mapcount_ptr(page))) 1141 goto out; 1142 VM_BUG_ON_PAGE(!PageSwapBacked(page), page); 1143 __inc_node_page_state(page, NR_SHMEM_PMDMAPPED); 1144 } else { 1145 if (PageTransCompound(page) && page_mapping(page)) { 1146 VM_WARN_ON_ONCE(!PageLocked(page)); 1147 1148 SetPageDoubleMap(compound_head(page)); 1149 if (PageMlocked(page)) 1150 clear_page_mlock(compound_head(page)); 1151 } 1152 if (!atomic_inc_and_test(&page->_mapcount)) 1153 goto out; 1154 } 1155 __mod_node_page_state(page_pgdat(page), NR_FILE_MAPPED, nr); 1156 mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED); 1157 out: 1158 unlock_page_memcg(page); 1159 } 1160 1161 static void page_remove_file_rmap(struct page *page, bool compound) 1162 { 1163 int i, nr = 1; 1164 1165 VM_BUG_ON_PAGE(compound && !PageHead(page), page); 1166 lock_page_memcg(page); 1167 1168 /* Hugepages are not counted in NR_FILE_MAPPED for now. */ 1169 if (unlikely(PageHuge(page))) { 1170 /* hugetlb pages are always mapped with pmds */ 1171 atomic_dec(compound_mapcount_ptr(page)); 1172 goto out; 1173 } 1174 1175 /* page still mapped by someone else? */ 1176 if (compound && PageTransHuge(page)) { 1177 for (i = 0, nr = 0; i < HPAGE_PMD_NR; i++) { 1178 if (atomic_add_negative(-1, &page[i]._mapcount)) 1179 nr++; 1180 } 1181 if (!atomic_add_negative(-1, compound_mapcount_ptr(page))) 1182 goto out; 1183 VM_BUG_ON_PAGE(!PageSwapBacked(page), page); 1184 __dec_node_page_state(page, NR_SHMEM_PMDMAPPED); 1185 } else { 1186 if (!atomic_add_negative(-1, &page->_mapcount)) 1187 goto out; 1188 } 1189 1190 /* 1191 * We use the irq-unsafe __{inc|mod}_zone_page_state because 1192 * these counters are not modified in interrupt context, and 1193 * pte lock(a spinlock) is held, which implies preemption disabled. 1194 */ 1195 __mod_node_page_state(page_pgdat(page), NR_FILE_MAPPED, -nr); 1196 mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED); 1197 1198 if (unlikely(PageMlocked(page))) 1199 clear_page_mlock(page); 1200 out: 1201 unlock_page_memcg(page); 1202 } 1203 1204 static void page_remove_anon_compound_rmap(struct page *page) 1205 { 1206 int i, nr; 1207 1208 if (!atomic_add_negative(-1, compound_mapcount_ptr(page))) 1209 return; 1210 1211 /* Hugepages are not counted in NR_ANON_PAGES for now. */ 1212 if (unlikely(PageHuge(page))) 1213 return; 1214 1215 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) 1216 return; 1217 1218 __dec_node_page_state(page, NR_ANON_THPS); 1219 1220 if (TestClearPageDoubleMap(page)) { 1221 /* 1222 * Subpages can be mapped with PTEs too. Check how many of 1223 * themi are still mapped. 1224 */ 1225 for (i = 0, nr = 0; i < HPAGE_PMD_NR; i++) { 1226 if (atomic_add_negative(-1, &page[i]._mapcount)) 1227 nr++; 1228 } 1229 } else { 1230 nr = HPAGE_PMD_NR; 1231 } 1232 1233 if (unlikely(PageMlocked(page))) 1234 clear_page_mlock(page); 1235 1236 if (nr) { 1237 __mod_node_page_state(page_pgdat(page), NR_ANON_MAPPED, -nr); 1238 deferred_split_huge_page(page); 1239 } 1240 } 1241 1242 /** 1243 * page_remove_rmap - take down pte mapping from a page 1244 * @page: page to remove mapping from 1245 * @compound: uncharge the page as compound or small page 1246 * 1247 * The caller needs to hold the pte lock. 1248 */ 1249 void page_remove_rmap(struct page *page, bool compound) 1250 { 1251 if (!PageAnon(page)) 1252 return page_remove_file_rmap(page, compound); 1253 1254 if (compound) 1255 return page_remove_anon_compound_rmap(page); 1256 1257 /* page still mapped by someone else? */ 1258 if (!atomic_add_negative(-1, &page->_mapcount)) 1259 return; 1260 1261 /* 1262 * We use the irq-unsafe __{inc|mod}_zone_page_stat because 1263 * these counters are not modified in interrupt context, and 1264 * pte lock(a spinlock) is held, which implies preemption disabled. 1265 */ 1266 __dec_node_page_state(page, NR_ANON_MAPPED); 1267 1268 if (unlikely(PageMlocked(page))) 1269 clear_page_mlock(page); 1270 1271 if (PageTransCompound(page)) 1272 deferred_split_huge_page(compound_head(page)); 1273 1274 /* 1275 * It would be tidy to reset the PageAnon mapping here, 1276 * but that might overwrite a racing page_add_anon_rmap 1277 * which increments mapcount after us but sets mapping 1278 * before us: so leave the reset to free_hot_cold_page, 1279 * and remember that it's only reliable while mapped. 1280 * Leaving it set also helps swapoff to reinstate ptes 1281 * faster for those pages still in swapcache. 1282 */ 1283 } 1284 1285 struct rmap_private { 1286 enum ttu_flags flags; 1287 int lazyfreed; 1288 }; 1289 1290 /* 1291 * @arg: enum ttu_flags will be passed to this argument 1292 */ 1293 static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, 1294 unsigned long address, void *arg) 1295 { 1296 struct mm_struct *mm = vma->vm_mm; 1297 struct page_vma_mapped_walk pvmw = { 1298 .page = page, 1299 .vma = vma, 1300 .address = address, 1301 }; 1302 pte_t pteval; 1303 struct page *subpage; 1304 int ret = SWAP_AGAIN; 1305 struct rmap_private *rp = arg; 1306 enum ttu_flags flags = rp->flags; 1307 1308 /* munlock has nothing to gain from examining un-locked vmas */ 1309 if ((flags & TTU_MUNLOCK) && !(vma->vm_flags & VM_LOCKED)) 1310 return SWAP_AGAIN; 1311 1312 if (flags & TTU_SPLIT_HUGE_PMD) { 1313 split_huge_pmd_address(vma, address, 1314 flags & TTU_MIGRATION, page); 1315 } 1316 1317 while (page_vma_mapped_walk(&pvmw)) { 1318 subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte); 1319 address = pvmw.address; 1320 1321 /* Unexpected PMD-mapped THP? */ 1322 VM_BUG_ON_PAGE(!pvmw.pte, page); 1323 1324 /* 1325 * If the page is mlock()d, we cannot swap it out. 1326 * If it's recently referenced (perhaps page_referenced 1327 * skipped over this mm) then we should reactivate it. 1328 */ 1329 if (!(flags & TTU_IGNORE_MLOCK)) { 1330 if (vma->vm_flags & VM_LOCKED) { 1331 /* PTE-mapped THP are never mlocked */ 1332 if (!PageTransCompound(page)) { 1333 /* 1334 * Holding pte lock, we do *not* need 1335 * mmap_sem here 1336 */ 1337 mlock_vma_page(page); 1338 } 1339 ret = SWAP_MLOCK; 1340 page_vma_mapped_walk_done(&pvmw); 1341 break; 1342 } 1343 if (flags & TTU_MUNLOCK) 1344 continue; 1345 } 1346 1347 if (!(flags & TTU_IGNORE_ACCESS)) { 1348 if (ptep_clear_flush_young_notify(vma, address, 1349 pvmw.pte)) { 1350 ret = SWAP_FAIL; 1351 page_vma_mapped_walk_done(&pvmw); 1352 break; 1353 } 1354 } 1355 1356 /* Nuke the page table entry. */ 1357 flush_cache_page(vma, address, pte_pfn(*pvmw.pte)); 1358 if (should_defer_flush(mm, flags)) { 1359 /* 1360 * We clear the PTE but do not flush so potentially 1361 * a remote CPU could still be writing to the page. 1362 * If the entry was previously clean then the 1363 * architecture must guarantee that a clear->dirty 1364 * transition on a cached TLB entry is written through 1365 * and traps if the PTE is unmapped. 1366 */ 1367 pteval = ptep_get_and_clear(mm, address, pvmw.pte); 1368 1369 set_tlb_ubc_flush_pending(mm, pte_dirty(pteval)); 1370 } else { 1371 pteval = ptep_clear_flush(vma, address, pvmw.pte); 1372 } 1373 1374 /* Move the dirty bit to the page. Now the pte is gone. */ 1375 if (pte_dirty(pteval)) 1376 set_page_dirty(page); 1377 1378 /* Update high watermark before we lower rss */ 1379 update_hiwater_rss(mm); 1380 1381 if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) { 1382 if (PageHuge(page)) { 1383 int nr = 1 << compound_order(page); 1384 hugetlb_count_sub(nr, mm); 1385 } else { 1386 dec_mm_counter(mm, mm_counter(page)); 1387 } 1388 1389 pteval = swp_entry_to_pte(make_hwpoison_entry(subpage)); 1390 set_pte_at(mm, address, pvmw.pte, pteval); 1391 } else if (pte_unused(pteval)) { 1392 /* 1393 * The guest indicated that the page content is of no 1394 * interest anymore. Simply discard the pte, vmscan 1395 * will take care of the rest. 1396 */ 1397 dec_mm_counter(mm, mm_counter(page)); 1398 } else if (IS_ENABLED(CONFIG_MIGRATION) && 1399 (flags & TTU_MIGRATION)) { 1400 swp_entry_t entry; 1401 pte_t swp_pte; 1402 /* 1403 * Store the pfn of the page in a special migration 1404 * pte. do_swap_page() will wait until the migration 1405 * pte is removed and then restart fault handling. 1406 */ 1407 entry = make_migration_entry(subpage, 1408 pte_write(pteval)); 1409 swp_pte = swp_entry_to_pte(entry); 1410 if (pte_soft_dirty(pteval)) 1411 swp_pte = pte_swp_mksoft_dirty(swp_pte); 1412 set_pte_at(mm, address, pvmw.pte, swp_pte); 1413 } else if (PageAnon(page)) { 1414 swp_entry_t entry = { .val = page_private(subpage) }; 1415 pte_t swp_pte; 1416 /* 1417 * Store the swap location in the pte. 1418 * See handle_pte_fault() ... 1419 */ 1420 VM_BUG_ON_PAGE(!PageSwapCache(page), page); 1421 1422 if (!PageDirty(page) && (flags & TTU_LZFREE)) { 1423 /* It's a freeable page by MADV_FREE */ 1424 dec_mm_counter(mm, MM_ANONPAGES); 1425 rp->lazyfreed++; 1426 goto discard; 1427 } 1428 1429 if (swap_duplicate(entry) < 0) { 1430 set_pte_at(mm, address, pvmw.pte, pteval); 1431 ret = SWAP_FAIL; 1432 page_vma_mapped_walk_done(&pvmw); 1433 break; 1434 } 1435 if (list_empty(&mm->mmlist)) { 1436 spin_lock(&mmlist_lock); 1437 if (list_empty(&mm->mmlist)) 1438 list_add(&mm->mmlist, &init_mm.mmlist); 1439 spin_unlock(&mmlist_lock); 1440 } 1441 dec_mm_counter(mm, MM_ANONPAGES); 1442 inc_mm_counter(mm, MM_SWAPENTS); 1443 swp_pte = swp_entry_to_pte(entry); 1444 if (pte_soft_dirty(pteval)) 1445 swp_pte = pte_swp_mksoft_dirty(swp_pte); 1446 set_pte_at(mm, address, pvmw.pte, swp_pte); 1447 } else 1448 dec_mm_counter(mm, mm_counter_file(page)); 1449 discard: 1450 page_remove_rmap(subpage, PageHuge(page)); 1451 put_page(page); 1452 mmu_notifier_invalidate_page(mm, address); 1453 } 1454 return ret; 1455 } 1456 1457 bool is_vma_temporary_stack(struct vm_area_struct *vma) 1458 { 1459 int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP); 1460 1461 if (!maybe_stack) 1462 return false; 1463 1464 if ((vma->vm_flags & VM_STACK_INCOMPLETE_SETUP) == 1465 VM_STACK_INCOMPLETE_SETUP) 1466 return true; 1467 1468 return false; 1469 } 1470 1471 static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg) 1472 { 1473 return is_vma_temporary_stack(vma); 1474 } 1475 1476 static int page_mapcount_is_zero(struct page *page) 1477 { 1478 return !total_mapcount(page); 1479 } 1480 1481 /** 1482 * try_to_unmap - try to remove all page table mappings to a page 1483 * @page: the page to get unmapped 1484 * @flags: action and flags 1485 * 1486 * Tries to remove all the page table entries which are mapping this 1487 * page, used in the pageout path. Caller must hold the page lock. 1488 * Return values are: 1489 * 1490 * SWAP_SUCCESS - we succeeded in removing all mappings 1491 * SWAP_AGAIN - we missed a mapping, try again later 1492 * SWAP_FAIL - the page is unswappable 1493 * SWAP_MLOCK - page is mlocked. 1494 */ 1495 int try_to_unmap(struct page *page, enum ttu_flags flags) 1496 { 1497 int ret; 1498 struct rmap_private rp = { 1499 .flags = flags, 1500 .lazyfreed = 0, 1501 }; 1502 1503 struct rmap_walk_control rwc = { 1504 .rmap_one = try_to_unmap_one, 1505 .arg = &rp, 1506 .done = page_mapcount_is_zero, 1507 .anon_lock = page_lock_anon_vma_read, 1508 }; 1509 1510 /* 1511 * During exec, a temporary VMA is setup and later moved. 1512 * The VMA is moved under the anon_vma lock but not the 1513 * page tables leading to a race where migration cannot 1514 * find the migration ptes. Rather than increasing the 1515 * locking requirements of exec(), migration skips 1516 * temporary VMAs until after exec() completes. 1517 */ 1518 if ((flags & TTU_MIGRATION) && !PageKsm(page) && PageAnon(page)) 1519 rwc.invalid_vma = invalid_migration_vma; 1520 1521 if (flags & TTU_RMAP_LOCKED) 1522 ret = rmap_walk_locked(page, &rwc); 1523 else 1524 ret = rmap_walk(page, &rwc); 1525 1526 if (ret != SWAP_MLOCK && !page_mapcount(page)) { 1527 ret = SWAP_SUCCESS; 1528 if (rp.lazyfreed && !PageDirty(page)) 1529 ret = SWAP_LZFREE; 1530 } 1531 return ret; 1532 } 1533 1534 static int page_not_mapped(struct page *page) 1535 { 1536 return !page_mapped(page); 1537 }; 1538 1539 /** 1540 * try_to_munlock - try to munlock a page 1541 * @page: the page to be munlocked 1542 * 1543 * Called from munlock code. Checks all of the VMAs mapping the page 1544 * to make sure nobody else has this page mlocked. The page will be 1545 * returned with PG_mlocked cleared if no other vmas have it mlocked. 1546 * 1547 * Return values are: 1548 * 1549 * SWAP_AGAIN - no vma is holding page mlocked, or, 1550 * SWAP_AGAIN - page mapped in mlocked vma -- couldn't acquire mmap sem 1551 * SWAP_FAIL - page cannot be located at present 1552 * SWAP_MLOCK - page is now mlocked. 1553 */ 1554 int try_to_munlock(struct page *page) 1555 { 1556 int ret; 1557 struct rmap_private rp = { 1558 .flags = TTU_MUNLOCK, 1559 .lazyfreed = 0, 1560 }; 1561 1562 struct rmap_walk_control rwc = { 1563 .rmap_one = try_to_unmap_one, 1564 .arg = &rp, 1565 .done = page_not_mapped, 1566 .anon_lock = page_lock_anon_vma_read, 1567 1568 }; 1569 1570 VM_BUG_ON_PAGE(!PageLocked(page) || PageLRU(page), page); 1571 1572 ret = rmap_walk(page, &rwc); 1573 return ret; 1574 } 1575 1576 void __put_anon_vma(struct anon_vma *anon_vma) 1577 { 1578 struct anon_vma *root = anon_vma->root; 1579 1580 anon_vma_free(anon_vma); 1581 if (root != anon_vma && atomic_dec_and_test(&root->refcount)) 1582 anon_vma_free(root); 1583 } 1584 1585 static struct anon_vma *rmap_walk_anon_lock(struct page *page, 1586 struct rmap_walk_control *rwc) 1587 { 1588 struct anon_vma *anon_vma; 1589 1590 if (rwc->anon_lock) 1591 return rwc->anon_lock(page); 1592 1593 /* 1594 * Note: remove_migration_ptes() cannot use page_lock_anon_vma_read() 1595 * because that depends on page_mapped(); but not all its usages 1596 * are holding mmap_sem. Users without mmap_sem are required to 1597 * take a reference count to prevent the anon_vma disappearing 1598 */ 1599 anon_vma = page_anon_vma(page); 1600 if (!anon_vma) 1601 return NULL; 1602 1603 anon_vma_lock_read(anon_vma); 1604 return anon_vma; 1605 } 1606 1607 /* 1608 * rmap_walk_anon - do something to anonymous page using the object-based 1609 * rmap method 1610 * @page: the page to be handled 1611 * @rwc: control variable according to each walk type 1612 * 1613 * Find all the mappings of a page using the mapping pointer and the vma chains 1614 * contained in the anon_vma struct it points to. 1615 * 1616 * When called from try_to_munlock(), the mmap_sem of the mm containing the vma 1617 * where the page was found will be held for write. So, we won't recheck 1618 * vm_flags for that VMA. That should be OK, because that vma shouldn't be 1619 * LOCKED. 1620 */ 1621 static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc, 1622 bool locked) 1623 { 1624 struct anon_vma *anon_vma; 1625 pgoff_t pgoff_start, pgoff_end; 1626 struct anon_vma_chain *avc; 1627 int ret = SWAP_AGAIN; 1628 1629 if (locked) { 1630 anon_vma = page_anon_vma(page); 1631 /* anon_vma disappear under us? */ 1632 VM_BUG_ON_PAGE(!anon_vma, page); 1633 } else { 1634 anon_vma = rmap_walk_anon_lock(page, rwc); 1635 } 1636 if (!anon_vma) 1637 return ret; 1638 1639 pgoff_start = page_to_pgoff(page); 1640 pgoff_end = pgoff_start + hpage_nr_pages(page) - 1; 1641 anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, 1642 pgoff_start, pgoff_end) { 1643 struct vm_area_struct *vma = avc->vma; 1644 unsigned long address = vma_address(page, vma); 1645 1646 cond_resched(); 1647 1648 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) 1649 continue; 1650 1651 ret = rwc->rmap_one(page, vma, address, rwc->arg); 1652 if (ret != SWAP_AGAIN) 1653 break; 1654 if (rwc->done && rwc->done(page)) 1655 break; 1656 } 1657 1658 if (!locked) 1659 anon_vma_unlock_read(anon_vma); 1660 return ret; 1661 } 1662 1663 /* 1664 * rmap_walk_file - do something to file page using the object-based rmap method 1665 * @page: the page to be handled 1666 * @rwc: control variable according to each walk type 1667 * 1668 * Find all the mappings of a page using the mapping pointer and the vma chains 1669 * contained in the address_space struct it points to. 1670 * 1671 * When called from try_to_munlock(), the mmap_sem of the mm containing the vma 1672 * where the page was found will be held for write. So, we won't recheck 1673 * vm_flags for that VMA. That should be OK, because that vma shouldn't be 1674 * LOCKED. 1675 */ 1676 static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc, 1677 bool locked) 1678 { 1679 struct address_space *mapping = page_mapping(page); 1680 pgoff_t pgoff_start, pgoff_end; 1681 struct vm_area_struct *vma; 1682 int ret = SWAP_AGAIN; 1683 1684 /* 1685 * The page lock not only makes sure that page->mapping cannot 1686 * suddenly be NULLified by truncation, it makes sure that the 1687 * structure at mapping cannot be freed and reused yet, 1688 * so we can safely take mapping->i_mmap_rwsem. 1689 */ 1690 VM_BUG_ON_PAGE(!PageLocked(page), page); 1691 1692 if (!mapping) 1693 return ret; 1694 1695 pgoff_start = page_to_pgoff(page); 1696 pgoff_end = pgoff_start + hpage_nr_pages(page) - 1; 1697 if (!locked) 1698 i_mmap_lock_read(mapping); 1699 vma_interval_tree_foreach(vma, &mapping->i_mmap, 1700 pgoff_start, pgoff_end) { 1701 unsigned long address = vma_address(page, vma); 1702 1703 cond_resched(); 1704 1705 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) 1706 continue; 1707 1708 ret = rwc->rmap_one(page, vma, address, rwc->arg); 1709 if (ret != SWAP_AGAIN) 1710 goto done; 1711 if (rwc->done && rwc->done(page)) 1712 goto done; 1713 } 1714 1715 done: 1716 if (!locked) 1717 i_mmap_unlock_read(mapping); 1718 return ret; 1719 } 1720 1721 int rmap_walk(struct page *page, struct rmap_walk_control *rwc) 1722 { 1723 if (unlikely(PageKsm(page))) 1724 return rmap_walk_ksm(page, rwc); 1725 else if (PageAnon(page)) 1726 return rmap_walk_anon(page, rwc, false); 1727 else 1728 return rmap_walk_file(page, rwc, false); 1729 } 1730 1731 /* Like rmap_walk, but caller holds relevant rmap lock */ 1732 int rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc) 1733 { 1734 /* no ksm support for now */ 1735 VM_BUG_ON_PAGE(PageKsm(page), page); 1736 if (PageAnon(page)) 1737 return rmap_walk_anon(page, rwc, true); 1738 else 1739 return rmap_walk_file(page, rwc, true); 1740 } 1741 1742 #ifdef CONFIG_HUGETLB_PAGE 1743 /* 1744 * The following three functions are for anonymous (private mapped) hugepages. 1745 * Unlike common anonymous pages, anonymous hugepages have no accounting code 1746 * and no lru code, because we handle hugepages differently from common pages. 1747 */ 1748 static void __hugepage_set_anon_rmap(struct page *page, 1749 struct vm_area_struct *vma, unsigned long address, int exclusive) 1750 { 1751 struct anon_vma *anon_vma = vma->anon_vma; 1752 1753 BUG_ON(!anon_vma); 1754 1755 if (PageAnon(page)) 1756 return; 1757 if (!exclusive) 1758 anon_vma = anon_vma->root; 1759 1760 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 1761 page->mapping = (struct address_space *) anon_vma; 1762 page->index = linear_page_index(vma, address); 1763 } 1764 1765 void hugepage_add_anon_rmap(struct page *page, 1766 struct vm_area_struct *vma, unsigned long address) 1767 { 1768 struct anon_vma *anon_vma = vma->anon_vma; 1769 int first; 1770 1771 BUG_ON(!PageLocked(page)); 1772 BUG_ON(!anon_vma); 1773 /* address might be in next vma when migration races vma_adjust */ 1774 first = atomic_inc_and_test(compound_mapcount_ptr(page)); 1775 if (first) 1776 __hugepage_set_anon_rmap(page, vma, address, 0); 1777 } 1778 1779 void hugepage_add_new_anon_rmap(struct page *page, 1780 struct vm_area_struct *vma, unsigned long address) 1781 { 1782 BUG_ON(address < vma->vm_start || address >= vma->vm_end); 1783 atomic_set(compound_mapcount_ptr(page), 0); 1784 __hugepage_set_anon_rmap(page, vma, address, 1); 1785 } 1786 #endif /* CONFIG_HUGETLB_PAGE */ 1787