1 /* 2 * mm/rmap.c - physical to virtual reverse mappings 3 * 4 * Copyright 2001, Rik van Riel <riel@conectiva.com.br> 5 * Released under the General Public License (GPL). 6 * 7 * Simple, low overhead reverse mapping scheme. 8 * Please try to keep this thing as modular as possible. 9 * 10 * Provides methods for unmapping each kind of mapped page: 11 * the anon methods track anonymous pages, and 12 * the file methods track pages belonging to an inode. 13 * 14 * Original design by Rik van Riel <riel@conectiva.com.br> 2001 15 * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004 16 * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004 17 * Contributions by Hugh Dickins 2003, 2004 18 */ 19 20 /* 21 * Lock ordering in mm: 22 * 23 * inode->i_mutex (while writing or truncating, not reading or faulting) 24 * mm->mmap_sem 25 * page->flags PG_locked (lock_page) 26 * hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share) 27 * mapping->i_mmap_rwsem 28 * anon_vma->rwsem 29 * mm->page_table_lock or pte_lock 30 * zone_lru_lock (in mark_page_accessed, isolate_lru_page) 31 * swap_lock (in swap_duplicate, swap_info_get) 32 * mmlist_lock (in mmput, drain_mmlist and others) 33 * mapping->private_lock (in __set_page_dirty_buffers) 34 * mem_cgroup_{begin,end}_page_stat (memcg->move_lock) 35 * mapping->tree_lock (widely used) 36 * inode->i_lock (in set_page_dirty's __mark_inode_dirty) 37 * bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty) 38 * sb_lock (within inode_lock in fs/fs-writeback.c) 39 * mapping->tree_lock (widely used, in set_page_dirty, 40 * in arch-dependent flush_dcache_mmap_lock, 41 * within bdi.wb->list_lock in __sync_single_inode) 42 * 43 * anon_vma->rwsem,mapping->i_mutex (memory_failure, collect_procs_anon) 44 * ->tasklist_lock 45 * pte map lock 46 */ 47 48 #include <linux/mm.h> 49 #include <linux/sched/mm.h> 50 #include <linux/sched/task.h> 51 #include <linux/pagemap.h> 52 #include <linux/swap.h> 53 #include <linux/swapops.h> 54 #include <linux/slab.h> 55 #include <linux/init.h> 56 #include <linux/ksm.h> 57 #include <linux/rmap.h> 58 #include <linux/rcupdate.h> 59 #include <linux/export.h> 60 #include <linux/memcontrol.h> 61 #include <linux/mmu_notifier.h> 62 #include <linux/migrate.h> 63 #include <linux/hugetlb.h> 64 #include <linux/backing-dev.h> 65 #include <linux/page_idle.h> 66 67 #include <asm/tlbflush.h> 68 69 #include <trace/events/tlb.h> 70 71 #include "internal.h" 72 73 static struct kmem_cache *anon_vma_cachep; 74 static struct kmem_cache *anon_vma_chain_cachep; 75 76 static inline struct anon_vma *anon_vma_alloc(void) 77 { 78 struct anon_vma *anon_vma; 79 80 anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL); 81 if (anon_vma) { 82 atomic_set(&anon_vma->refcount, 1); 83 anon_vma->degree = 1; /* Reference for first vma */ 84 anon_vma->parent = anon_vma; 85 /* 86 * Initialise the anon_vma root to point to itself. If called 87 * from fork, the root will be reset to the parents anon_vma. 88 */ 89 anon_vma->root = anon_vma; 90 } 91 92 return anon_vma; 93 } 94 95 static inline void anon_vma_free(struct anon_vma *anon_vma) 96 { 97 VM_BUG_ON(atomic_read(&anon_vma->refcount)); 98 99 /* 100 * Synchronize against page_lock_anon_vma_read() such that 101 * we can safely hold the lock without the anon_vma getting 102 * freed. 103 * 104 * Relies on the full mb implied by the atomic_dec_and_test() from 105 * put_anon_vma() against the acquire barrier implied by 106 * down_read_trylock() from page_lock_anon_vma_read(). This orders: 107 * 108 * page_lock_anon_vma_read() VS put_anon_vma() 109 * down_read_trylock() atomic_dec_and_test() 110 * LOCK MB 111 * atomic_read() rwsem_is_locked() 112 * 113 * LOCK should suffice since the actual taking of the lock must 114 * happen _before_ what follows. 115 */ 116 might_sleep(); 117 if (rwsem_is_locked(&anon_vma->root->rwsem)) { 118 anon_vma_lock_write(anon_vma); 119 anon_vma_unlock_write(anon_vma); 120 } 121 122 kmem_cache_free(anon_vma_cachep, anon_vma); 123 } 124 125 static inline struct anon_vma_chain *anon_vma_chain_alloc(gfp_t gfp) 126 { 127 return kmem_cache_alloc(anon_vma_chain_cachep, gfp); 128 } 129 130 static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain) 131 { 132 kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain); 133 } 134 135 static void anon_vma_chain_link(struct vm_area_struct *vma, 136 struct anon_vma_chain *avc, 137 struct anon_vma *anon_vma) 138 { 139 avc->vma = vma; 140 avc->anon_vma = anon_vma; 141 list_add(&avc->same_vma, &vma->anon_vma_chain); 142 anon_vma_interval_tree_insert(avc, &anon_vma->rb_root); 143 } 144 145 /** 146 * __anon_vma_prepare - attach an anon_vma to a memory region 147 * @vma: the memory region in question 148 * 149 * This makes sure the memory mapping described by 'vma' has 150 * an 'anon_vma' attached to it, so that we can associate the 151 * anonymous pages mapped into it with that anon_vma. 152 * 153 * The common case will be that we already have one, which 154 * is handled inline by anon_vma_prepare(). But if 155 * not we either need to find an adjacent mapping that we 156 * can re-use the anon_vma from (very common when the only 157 * reason for splitting a vma has been mprotect()), or we 158 * allocate a new one. 159 * 160 * Anon-vma allocations are very subtle, because we may have 161 * optimistically looked up an anon_vma in page_lock_anon_vma_read() 162 * and that may actually touch the spinlock even in the newly 163 * allocated vma (it depends on RCU to make sure that the 164 * anon_vma isn't actually destroyed). 165 * 166 * As a result, we need to do proper anon_vma locking even 167 * for the new allocation. At the same time, we do not want 168 * to do any locking for the common case of already having 169 * an anon_vma. 170 * 171 * This must be called with the mmap_sem held for reading. 172 */ 173 int __anon_vma_prepare(struct vm_area_struct *vma) 174 { 175 struct mm_struct *mm = vma->vm_mm; 176 struct anon_vma *anon_vma, *allocated; 177 struct anon_vma_chain *avc; 178 179 might_sleep(); 180 181 avc = anon_vma_chain_alloc(GFP_KERNEL); 182 if (!avc) 183 goto out_enomem; 184 185 anon_vma = find_mergeable_anon_vma(vma); 186 allocated = NULL; 187 if (!anon_vma) { 188 anon_vma = anon_vma_alloc(); 189 if (unlikely(!anon_vma)) 190 goto out_enomem_free_avc; 191 allocated = anon_vma; 192 } 193 194 anon_vma_lock_write(anon_vma); 195 /* page_table_lock to protect against threads */ 196 spin_lock(&mm->page_table_lock); 197 if (likely(!vma->anon_vma)) { 198 vma->anon_vma = anon_vma; 199 anon_vma_chain_link(vma, avc, anon_vma); 200 /* vma reference or self-parent link for new root */ 201 anon_vma->degree++; 202 allocated = NULL; 203 avc = NULL; 204 } 205 spin_unlock(&mm->page_table_lock); 206 anon_vma_unlock_write(anon_vma); 207 208 if (unlikely(allocated)) 209 put_anon_vma(allocated); 210 if (unlikely(avc)) 211 anon_vma_chain_free(avc); 212 213 return 0; 214 215 out_enomem_free_avc: 216 anon_vma_chain_free(avc); 217 out_enomem: 218 return -ENOMEM; 219 } 220 221 /* 222 * This is a useful helper function for locking the anon_vma root as 223 * we traverse the vma->anon_vma_chain, looping over anon_vma's that 224 * have the same vma. 225 * 226 * Such anon_vma's should have the same root, so you'd expect to see 227 * just a single mutex_lock for the whole traversal. 228 */ 229 static inline struct anon_vma *lock_anon_vma_root(struct anon_vma *root, struct anon_vma *anon_vma) 230 { 231 struct anon_vma *new_root = anon_vma->root; 232 if (new_root != root) { 233 if (WARN_ON_ONCE(root)) 234 up_write(&root->rwsem); 235 root = new_root; 236 down_write(&root->rwsem); 237 } 238 return root; 239 } 240 241 static inline void unlock_anon_vma_root(struct anon_vma *root) 242 { 243 if (root) 244 up_write(&root->rwsem); 245 } 246 247 /* 248 * Attach the anon_vmas from src to dst. 249 * Returns 0 on success, -ENOMEM on failure. 250 * 251 * If dst->anon_vma is NULL this function tries to find and reuse existing 252 * anon_vma which has no vmas and only one child anon_vma. This prevents 253 * degradation of anon_vma hierarchy to endless linear chain in case of 254 * constantly forking task. On the other hand, an anon_vma with more than one 255 * child isn't reused even if there was no alive vma, thus rmap walker has a 256 * good chance of avoiding scanning the whole hierarchy when it searches where 257 * page is mapped. 258 */ 259 int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src) 260 { 261 struct anon_vma_chain *avc, *pavc; 262 struct anon_vma *root = NULL; 263 264 list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) { 265 struct anon_vma *anon_vma; 266 267 avc = anon_vma_chain_alloc(GFP_NOWAIT | __GFP_NOWARN); 268 if (unlikely(!avc)) { 269 unlock_anon_vma_root(root); 270 root = NULL; 271 avc = anon_vma_chain_alloc(GFP_KERNEL); 272 if (!avc) 273 goto enomem_failure; 274 } 275 anon_vma = pavc->anon_vma; 276 root = lock_anon_vma_root(root, anon_vma); 277 anon_vma_chain_link(dst, avc, anon_vma); 278 279 /* 280 * Reuse existing anon_vma if its degree lower than two, 281 * that means it has no vma and only one anon_vma child. 282 * 283 * Do not chose parent anon_vma, otherwise first child 284 * will always reuse it. Root anon_vma is never reused: 285 * it has self-parent reference and at least one child. 286 */ 287 if (!dst->anon_vma && anon_vma != src->anon_vma && 288 anon_vma->degree < 2) 289 dst->anon_vma = anon_vma; 290 } 291 if (dst->anon_vma) 292 dst->anon_vma->degree++; 293 unlock_anon_vma_root(root); 294 return 0; 295 296 enomem_failure: 297 /* 298 * dst->anon_vma is dropped here otherwise its degree can be incorrectly 299 * decremented in unlink_anon_vmas(). 300 * We can safely do this because callers of anon_vma_clone() don't care 301 * about dst->anon_vma if anon_vma_clone() failed. 302 */ 303 dst->anon_vma = NULL; 304 unlink_anon_vmas(dst); 305 return -ENOMEM; 306 } 307 308 /* 309 * Attach vma to its own anon_vma, as well as to the anon_vmas that 310 * the corresponding VMA in the parent process is attached to. 311 * Returns 0 on success, non-zero on failure. 312 */ 313 int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) 314 { 315 struct anon_vma_chain *avc; 316 struct anon_vma *anon_vma; 317 int error; 318 319 /* Don't bother if the parent process has no anon_vma here. */ 320 if (!pvma->anon_vma) 321 return 0; 322 323 /* Drop inherited anon_vma, we'll reuse existing or allocate new. */ 324 vma->anon_vma = NULL; 325 326 /* 327 * First, attach the new VMA to the parent VMA's anon_vmas, 328 * so rmap can find non-COWed pages in child processes. 329 */ 330 error = anon_vma_clone(vma, pvma); 331 if (error) 332 return error; 333 334 /* An existing anon_vma has been reused, all done then. */ 335 if (vma->anon_vma) 336 return 0; 337 338 /* Then add our own anon_vma. */ 339 anon_vma = anon_vma_alloc(); 340 if (!anon_vma) 341 goto out_error; 342 avc = anon_vma_chain_alloc(GFP_KERNEL); 343 if (!avc) 344 goto out_error_free_anon_vma; 345 346 /* 347 * The root anon_vma's spinlock is the lock actually used when we 348 * lock any of the anon_vmas in this anon_vma tree. 349 */ 350 anon_vma->root = pvma->anon_vma->root; 351 anon_vma->parent = pvma->anon_vma; 352 /* 353 * With refcounts, an anon_vma can stay around longer than the 354 * process it belongs to. The root anon_vma needs to be pinned until 355 * this anon_vma is freed, because the lock lives in the root. 356 */ 357 get_anon_vma(anon_vma->root); 358 /* Mark this anon_vma as the one where our new (COWed) pages go. */ 359 vma->anon_vma = anon_vma; 360 anon_vma_lock_write(anon_vma); 361 anon_vma_chain_link(vma, avc, anon_vma); 362 anon_vma->parent->degree++; 363 anon_vma_unlock_write(anon_vma); 364 365 return 0; 366 367 out_error_free_anon_vma: 368 put_anon_vma(anon_vma); 369 out_error: 370 unlink_anon_vmas(vma); 371 return -ENOMEM; 372 } 373 374 void unlink_anon_vmas(struct vm_area_struct *vma) 375 { 376 struct anon_vma_chain *avc, *next; 377 struct anon_vma *root = NULL; 378 379 /* 380 * Unlink each anon_vma chained to the VMA. This list is ordered 381 * from newest to oldest, ensuring the root anon_vma gets freed last. 382 */ 383 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { 384 struct anon_vma *anon_vma = avc->anon_vma; 385 386 root = lock_anon_vma_root(root, anon_vma); 387 anon_vma_interval_tree_remove(avc, &anon_vma->rb_root); 388 389 /* 390 * Leave empty anon_vmas on the list - we'll need 391 * to free them outside the lock. 392 */ 393 if (RB_EMPTY_ROOT(&anon_vma->rb_root)) { 394 anon_vma->parent->degree--; 395 continue; 396 } 397 398 list_del(&avc->same_vma); 399 anon_vma_chain_free(avc); 400 } 401 if (vma->anon_vma) 402 vma->anon_vma->degree--; 403 unlock_anon_vma_root(root); 404 405 /* 406 * Iterate the list once more, it now only contains empty and unlinked 407 * anon_vmas, destroy them. Could not do before due to __put_anon_vma() 408 * needing to write-acquire the anon_vma->root->rwsem. 409 */ 410 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { 411 struct anon_vma *anon_vma = avc->anon_vma; 412 413 VM_WARN_ON(anon_vma->degree); 414 put_anon_vma(anon_vma); 415 416 list_del(&avc->same_vma); 417 anon_vma_chain_free(avc); 418 } 419 } 420 421 static void anon_vma_ctor(void *data) 422 { 423 struct anon_vma *anon_vma = data; 424 425 init_rwsem(&anon_vma->rwsem); 426 atomic_set(&anon_vma->refcount, 0); 427 anon_vma->rb_root = RB_ROOT; 428 } 429 430 void __init anon_vma_init(void) 431 { 432 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma), 433 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC|SLAB_ACCOUNT, 434 anon_vma_ctor); 435 anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, 436 SLAB_PANIC|SLAB_ACCOUNT); 437 } 438 439 /* 440 * Getting a lock on a stable anon_vma from a page off the LRU is tricky! 441 * 442 * Since there is no serialization what so ever against page_remove_rmap() 443 * the best this function can do is return a locked anon_vma that might 444 * have been relevant to this page. 445 * 446 * The page might have been remapped to a different anon_vma or the anon_vma 447 * returned may already be freed (and even reused). 448 * 449 * In case it was remapped to a different anon_vma, the new anon_vma will be a 450 * child of the old anon_vma, and the anon_vma lifetime rules will therefore 451 * ensure that any anon_vma obtained from the page will still be valid for as 452 * long as we observe page_mapped() [ hence all those page_mapped() tests ]. 453 * 454 * All users of this function must be very careful when walking the anon_vma 455 * chain and verify that the page in question is indeed mapped in it 456 * [ something equivalent to page_mapped_in_vma() ]. 457 * 458 * Since anon_vma's slab is DESTROY_BY_RCU and we know from page_remove_rmap() 459 * that the anon_vma pointer from page->mapping is valid if there is a 460 * mapcount, we can dereference the anon_vma after observing those. 461 */ 462 struct anon_vma *page_get_anon_vma(struct page *page) 463 { 464 struct anon_vma *anon_vma = NULL; 465 unsigned long anon_mapping; 466 467 rcu_read_lock(); 468 anon_mapping = (unsigned long)READ_ONCE(page->mapping); 469 if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) 470 goto out; 471 if (!page_mapped(page)) 472 goto out; 473 474 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); 475 if (!atomic_inc_not_zero(&anon_vma->refcount)) { 476 anon_vma = NULL; 477 goto out; 478 } 479 480 /* 481 * If this page is still mapped, then its anon_vma cannot have been 482 * freed. But if it has been unmapped, we have no security against the 483 * anon_vma structure being freed and reused (for another anon_vma: 484 * SLAB_DESTROY_BY_RCU guarantees that - so the atomic_inc_not_zero() 485 * above cannot corrupt). 486 */ 487 if (!page_mapped(page)) { 488 rcu_read_unlock(); 489 put_anon_vma(anon_vma); 490 return NULL; 491 } 492 out: 493 rcu_read_unlock(); 494 495 return anon_vma; 496 } 497 498 /* 499 * Similar to page_get_anon_vma() except it locks the anon_vma. 500 * 501 * Its a little more complex as it tries to keep the fast path to a single 502 * atomic op -- the trylock. If we fail the trylock, we fall back to getting a 503 * reference like with page_get_anon_vma() and then block on the mutex. 504 */ 505 struct anon_vma *page_lock_anon_vma_read(struct page *page) 506 { 507 struct anon_vma *anon_vma = NULL; 508 struct anon_vma *root_anon_vma; 509 unsigned long anon_mapping; 510 511 rcu_read_lock(); 512 anon_mapping = (unsigned long)READ_ONCE(page->mapping); 513 if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) 514 goto out; 515 if (!page_mapped(page)) 516 goto out; 517 518 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); 519 root_anon_vma = READ_ONCE(anon_vma->root); 520 if (down_read_trylock(&root_anon_vma->rwsem)) { 521 /* 522 * If the page is still mapped, then this anon_vma is still 523 * its anon_vma, and holding the mutex ensures that it will 524 * not go away, see anon_vma_free(). 525 */ 526 if (!page_mapped(page)) { 527 up_read(&root_anon_vma->rwsem); 528 anon_vma = NULL; 529 } 530 goto out; 531 } 532 533 /* trylock failed, we got to sleep */ 534 if (!atomic_inc_not_zero(&anon_vma->refcount)) { 535 anon_vma = NULL; 536 goto out; 537 } 538 539 if (!page_mapped(page)) { 540 rcu_read_unlock(); 541 put_anon_vma(anon_vma); 542 return NULL; 543 } 544 545 /* we pinned the anon_vma, its safe to sleep */ 546 rcu_read_unlock(); 547 anon_vma_lock_read(anon_vma); 548 549 if (atomic_dec_and_test(&anon_vma->refcount)) { 550 /* 551 * Oops, we held the last refcount, release the lock 552 * and bail -- can't simply use put_anon_vma() because 553 * we'll deadlock on the anon_vma_lock_write() recursion. 554 */ 555 anon_vma_unlock_read(anon_vma); 556 __put_anon_vma(anon_vma); 557 anon_vma = NULL; 558 } 559 560 return anon_vma; 561 562 out: 563 rcu_read_unlock(); 564 return anon_vma; 565 } 566 567 void page_unlock_anon_vma_read(struct anon_vma *anon_vma) 568 { 569 anon_vma_unlock_read(anon_vma); 570 } 571 572 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH 573 /* 574 * Flush TLB entries for recently unmapped pages from remote CPUs. It is 575 * important if a PTE was dirty when it was unmapped that it's flushed 576 * before any IO is initiated on the page to prevent lost writes. Similarly, 577 * it must be flushed before freeing to prevent data leakage. 578 */ 579 void try_to_unmap_flush(void) 580 { 581 struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; 582 int cpu; 583 584 if (!tlb_ubc->flush_required) 585 return; 586 587 cpu = get_cpu(); 588 589 if (cpumask_test_cpu(cpu, &tlb_ubc->cpumask)) { 590 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); 591 local_flush_tlb(); 592 trace_tlb_flush(TLB_LOCAL_SHOOTDOWN, TLB_FLUSH_ALL); 593 } 594 595 if (cpumask_any_but(&tlb_ubc->cpumask, cpu) < nr_cpu_ids) 596 flush_tlb_others(&tlb_ubc->cpumask, NULL, 0, TLB_FLUSH_ALL); 597 cpumask_clear(&tlb_ubc->cpumask); 598 tlb_ubc->flush_required = false; 599 tlb_ubc->writable = false; 600 put_cpu(); 601 } 602 603 /* Flush iff there are potentially writable TLB entries that can race with IO */ 604 void try_to_unmap_flush_dirty(void) 605 { 606 struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; 607 608 if (tlb_ubc->writable) 609 try_to_unmap_flush(); 610 } 611 612 static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable) 613 { 614 struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; 615 616 cpumask_or(&tlb_ubc->cpumask, &tlb_ubc->cpumask, mm_cpumask(mm)); 617 tlb_ubc->flush_required = true; 618 619 /* 620 * If the PTE was dirty then it's best to assume it's writable. The 621 * caller must use try_to_unmap_flush_dirty() or try_to_unmap_flush() 622 * before the page is queued for IO. 623 */ 624 if (writable) 625 tlb_ubc->writable = true; 626 } 627 628 /* 629 * Returns true if the TLB flush should be deferred to the end of a batch of 630 * unmap operations to reduce IPIs. 631 */ 632 static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags) 633 { 634 bool should_defer = false; 635 636 if (!(flags & TTU_BATCH_FLUSH)) 637 return false; 638 639 /* If remote CPUs need to be flushed then defer batch the flush */ 640 if (cpumask_any_but(mm_cpumask(mm), get_cpu()) < nr_cpu_ids) 641 should_defer = true; 642 put_cpu(); 643 644 return should_defer; 645 } 646 #else 647 static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable) 648 { 649 } 650 651 static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags) 652 { 653 return false; 654 } 655 #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */ 656 657 /* 658 * At what user virtual address is page expected in vma? 659 * Caller should check the page is actually part of the vma. 660 */ 661 unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) 662 { 663 unsigned long address; 664 if (PageAnon(page)) { 665 struct anon_vma *page__anon_vma = page_anon_vma(page); 666 /* 667 * Note: swapoff's unuse_vma() is more efficient with this 668 * check, and needs it to match anon_vma when KSM is active. 669 */ 670 if (!vma->anon_vma || !page__anon_vma || 671 vma->anon_vma->root != page__anon_vma->root) 672 return -EFAULT; 673 } else if (page->mapping) { 674 if (!vma->vm_file || vma->vm_file->f_mapping != page->mapping) 675 return -EFAULT; 676 } else 677 return -EFAULT; 678 address = __vma_address(page, vma); 679 if (unlikely(address < vma->vm_start || address >= vma->vm_end)) 680 return -EFAULT; 681 return address; 682 } 683 684 pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address) 685 { 686 pgd_t *pgd; 687 p4d_t *p4d; 688 pud_t *pud; 689 pmd_t *pmd = NULL; 690 pmd_t pmde; 691 692 pgd = pgd_offset(mm, address); 693 if (!pgd_present(*pgd)) 694 goto out; 695 696 p4d = p4d_offset(pgd, address); 697 if (!p4d_present(*p4d)) 698 goto out; 699 700 pud = pud_offset(p4d, address); 701 if (!pud_present(*pud)) 702 goto out; 703 704 pmd = pmd_offset(pud, address); 705 /* 706 * Some THP functions use the sequence pmdp_huge_clear_flush(), set_pmd_at() 707 * without holding anon_vma lock for write. So when looking for a 708 * genuine pmde (in which to find pte), test present and !THP together. 709 */ 710 pmde = *pmd; 711 barrier(); 712 if (!pmd_present(pmde) || pmd_trans_huge(pmde)) 713 pmd = NULL; 714 out: 715 return pmd; 716 } 717 718 struct page_referenced_arg { 719 int mapcount; 720 int referenced; 721 unsigned long vm_flags; 722 struct mem_cgroup *memcg; 723 }; 724 /* 725 * arg: page_referenced_arg will be passed 726 */ 727 static int page_referenced_one(struct page *page, struct vm_area_struct *vma, 728 unsigned long address, void *arg) 729 { 730 struct page_referenced_arg *pra = arg; 731 struct page_vma_mapped_walk pvmw = { 732 .page = page, 733 .vma = vma, 734 .address = address, 735 }; 736 int referenced = 0; 737 738 while (page_vma_mapped_walk(&pvmw)) { 739 address = pvmw.address; 740 741 if (vma->vm_flags & VM_LOCKED) { 742 page_vma_mapped_walk_done(&pvmw); 743 pra->vm_flags |= VM_LOCKED; 744 return SWAP_FAIL; /* To break the loop */ 745 } 746 747 if (pvmw.pte) { 748 if (ptep_clear_flush_young_notify(vma, address, 749 pvmw.pte)) { 750 /* 751 * Don't treat a reference through 752 * a sequentially read mapping as such. 753 * If the page has been used in another mapping, 754 * we will catch it; if this other mapping is 755 * already gone, the unmap path will have set 756 * PG_referenced or activated the page. 757 */ 758 if (likely(!(vma->vm_flags & VM_SEQ_READ))) 759 referenced++; 760 } 761 } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { 762 if (pmdp_clear_flush_young_notify(vma, address, 763 pvmw.pmd)) 764 referenced++; 765 } else { 766 /* unexpected pmd-mapped page? */ 767 WARN_ON_ONCE(1); 768 } 769 770 pra->mapcount--; 771 } 772 773 if (referenced) 774 clear_page_idle(page); 775 if (test_and_clear_page_young(page)) 776 referenced++; 777 778 if (referenced) { 779 pra->referenced++; 780 pra->vm_flags |= vma->vm_flags; 781 } 782 783 if (!pra->mapcount) 784 return SWAP_SUCCESS; /* To break the loop */ 785 786 return SWAP_AGAIN; 787 } 788 789 static bool invalid_page_referenced_vma(struct vm_area_struct *vma, void *arg) 790 { 791 struct page_referenced_arg *pra = arg; 792 struct mem_cgroup *memcg = pra->memcg; 793 794 if (!mm_match_cgroup(vma->vm_mm, memcg)) 795 return true; 796 797 return false; 798 } 799 800 /** 801 * page_referenced - test if the page was referenced 802 * @page: the page to test 803 * @is_locked: caller holds lock on the page 804 * @memcg: target memory cgroup 805 * @vm_flags: collect encountered vma->vm_flags who actually referenced the page 806 * 807 * Quick test_and_clear_referenced for all mappings to a page, 808 * returns the number of ptes which referenced the page. 809 */ 810 int page_referenced(struct page *page, 811 int is_locked, 812 struct mem_cgroup *memcg, 813 unsigned long *vm_flags) 814 { 815 int ret; 816 int we_locked = 0; 817 struct page_referenced_arg pra = { 818 .mapcount = total_mapcount(page), 819 .memcg = memcg, 820 }; 821 struct rmap_walk_control rwc = { 822 .rmap_one = page_referenced_one, 823 .arg = (void *)&pra, 824 .anon_lock = page_lock_anon_vma_read, 825 }; 826 827 *vm_flags = 0; 828 if (!page_mapped(page)) 829 return 0; 830 831 if (!page_rmapping(page)) 832 return 0; 833 834 if (!is_locked && (!PageAnon(page) || PageKsm(page))) { 835 we_locked = trylock_page(page); 836 if (!we_locked) 837 return 1; 838 } 839 840 /* 841 * If we are reclaiming on behalf of a cgroup, skip 842 * counting on behalf of references from different 843 * cgroups 844 */ 845 if (memcg) { 846 rwc.invalid_vma = invalid_page_referenced_vma; 847 } 848 849 ret = rmap_walk(page, &rwc); 850 *vm_flags = pra.vm_flags; 851 852 if (we_locked) 853 unlock_page(page); 854 855 return pra.referenced; 856 } 857 858 static int page_mkclean_one(struct page *page, struct vm_area_struct *vma, 859 unsigned long address, void *arg) 860 { 861 struct page_vma_mapped_walk pvmw = { 862 .page = page, 863 .vma = vma, 864 .address = address, 865 .flags = PVMW_SYNC, 866 }; 867 int *cleaned = arg; 868 869 while (page_vma_mapped_walk(&pvmw)) { 870 int ret = 0; 871 address = pvmw.address; 872 if (pvmw.pte) { 873 pte_t entry; 874 pte_t *pte = pvmw.pte; 875 876 if (!pte_dirty(*pte) && !pte_write(*pte)) 877 continue; 878 879 flush_cache_page(vma, address, pte_pfn(*pte)); 880 entry = ptep_clear_flush(vma, address, pte); 881 entry = pte_wrprotect(entry); 882 entry = pte_mkclean(entry); 883 set_pte_at(vma->vm_mm, address, pte, entry); 884 ret = 1; 885 } else { 886 #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE 887 pmd_t *pmd = pvmw.pmd; 888 pmd_t entry; 889 890 if (!pmd_dirty(*pmd) && !pmd_write(*pmd)) 891 continue; 892 893 flush_cache_page(vma, address, page_to_pfn(page)); 894 entry = pmdp_huge_clear_flush(vma, address, pmd); 895 entry = pmd_wrprotect(entry); 896 entry = pmd_mkclean(entry); 897 set_pmd_at(vma->vm_mm, address, pmd, entry); 898 ret = 1; 899 #else 900 /* unexpected pmd-mapped page? */ 901 WARN_ON_ONCE(1); 902 #endif 903 } 904 905 if (ret) { 906 mmu_notifier_invalidate_page(vma->vm_mm, address); 907 (*cleaned)++; 908 } 909 } 910 911 return SWAP_AGAIN; 912 } 913 914 static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg) 915 { 916 if (vma->vm_flags & VM_SHARED) 917 return false; 918 919 return true; 920 } 921 922 int page_mkclean(struct page *page) 923 { 924 int cleaned = 0; 925 struct address_space *mapping; 926 struct rmap_walk_control rwc = { 927 .arg = (void *)&cleaned, 928 .rmap_one = page_mkclean_one, 929 .invalid_vma = invalid_mkclean_vma, 930 }; 931 932 BUG_ON(!PageLocked(page)); 933 934 if (!page_mapped(page)) 935 return 0; 936 937 mapping = page_mapping(page); 938 if (!mapping) 939 return 0; 940 941 rmap_walk(page, &rwc); 942 943 return cleaned; 944 } 945 EXPORT_SYMBOL_GPL(page_mkclean); 946 947 /** 948 * page_move_anon_rmap - move a page to our anon_vma 949 * @page: the page to move to our anon_vma 950 * @vma: the vma the page belongs to 951 * 952 * When a page belongs exclusively to one process after a COW event, 953 * that page can be moved into the anon_vma that belongs to just that 954 * process, so the rmap code will not search the parent or sibling 955 * processes. 956 */ 957 void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma) 958 { 959 struct anon_vma *anon_vma = vma->anon_vma; 960 961 page = compound_head(page); 962 963 VM_BUG_ON_PAGE(!PageLocked(page), page); 964 VM_BUG_ON_VMA(!anon_vma, vma); 965 966 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 967 /* 968 * Ensure that anon_vma and the PAGE_MAPPING_ANON bit are written 969 * simultaneously, so a concurrent reader (eg page_referenced()'s 970 * PageAnon()) will not see one without the other. 971 */ 972 WRITE_ONCE(page->mapping, (struct address_space *) anon_vma); 973 } 974 975 /** 976 * __page_set_anon_rmap - set up new anonymous rmap 977 * @page: Page to add to rmap 978 * @vma: VM area to add page to. 979 * @address: User virtual address of the mapping 980 * @exclusive: the page is exclusively owned by the current process 981 */ 982 static void __page_set_anon_rmap(struct page *page, 983 struct vm_area_struct *vma, unsigned long address, int exclusive) 984 { 985 struct anon_vma *anon_vma = vma->anon_vma; 986 987 BUG_ON(!anon_vma); 988 989 if (PageAnon(page)) 990 return; 991 992 /* 993 * If the page isn't exclusively mapped into this vma, 994 * we must use the _oldest_ possible anon_vma for the 995 * page mapping! 996 */ 997 if (!exclusive) 998 anon_vma = anon_vma->root; 999 1000 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 1001 page->mapping = (struct address_space *) anon_vma; 1002 page->index = linear_page_index(vma, address); 1003 } 1004 1005 /** 1006 * __page_check_anon_rmap - sanity check anonymous rmap addition 1007 * @page: the page to add the mapping to 1008 * @vma: the vm area in which the mapping is added 1009 * @address: the user virtual address mapped 1010 */ 1011 static void __page_check_anon_rmap(struct page *page, 1012 struct vm_area_struct *vma, unsigned long address) 1013 { 1014 #ifdef CONFIG_DEBUG_VM 1015 /* 1016 * The page's anon-rmap details (mapping and index) are guaranteed to 1017 * be set up correctly at this point. 1018 * 1019 * We have exclusion against page_add_anon_rmap because the caller 1020 * always holds the page locked, except if called from page_dup_rmap, 1021 * in which case the page is already known to be setup. 1022 * 1023 * We have exclusion against page_add_new_anon_rmap because those pages 1024 * are initially only visible via the pagetables, and the pte is locked 1025 * over the call to page_add_new_anon_rmap. 1026 */ 1027 BUG_ON(page_anon_vma(page)->root != vma->anon_vma->root); 1028 BUG_ON(page_to_pgoff(page) != linear_page_index(vma, address)); 1029 #endif 1030 } 1031 1032 /** 1033 * page_add_anon_rmap - add pte mapping to an anonymous page 1034 * @page: the page to add the mapping to 1035 * @vma: the vm area in which the mapping is added 1036 * @address: the user virtual address mapped 1037 * @compound: charge the page as compound or small page 1038 * 1039 * The caller needs to hold the pte lock, and the page must be locked in 1040 * the anon_vma case: to serialize mapping,index checking after setting, 1041 * and to ensure that PageAnon is not being upgraded racily to PageKsm 1042 * (but PageKsm is never downgraded to PageAnon). 1043 */ 1044 void page_add_anon_rmap(struct page *page, 1045 struct vm_area_struct *vma, unsigned long address, bool compound) 1046 { 1047 do_page_add_anon_rmap(page, vma, address, compound ? RMAP_COMPOUND : 0); 1048 } 1049 1050 /* 1051 * Special version of the above for do_swap_page, which often runs 1052 * into pages that are exclusively owned by the current process. 1053 * Everybody else should continue to use page_add_anon_rmap above. 1054 */ 1055 void do_page_add_anon_rmap(struct page *page, 1056 struct vm_area_struct *vma, unsigned long address, int flags) 1057 { 1058 bool compound = flags & RMAP_COMPOUND; 1059 bool first; 1060 1061 if (compound) { 1062 atomic_t *mapcount; 1063 VM_BUG_ON_PAGE(!PageLocked(page), page); 1064 VM_BUG_ON_PAGE(!PageTransHuge(page), page); 1065 mapcount = compound_mapcount_ptr(page); 1066 first = atomic_inc_and_test(mapcount); 1067 } else { 1068 first = atomic_inc_and_test(&page->_mapcount); 1069 } 1070 1071 if (first) { 1072 int nr = compound ? hpage_nr_pages(page) : 1; 1073 /* 1074 * We use the irq-unsafe __{inc|mod}_zone_page_stat because 1075 * these counters are not modified in interrupt context, and 1076 * pte lock(a spinlock) is held, which implies preemption 1077 * disabled. 1078 */ 1079 if (compound) 1080 __inc_node_page_state(page, NR_ANON_THPS); 1081 __mod_node_page_state(page_pgdat(page), NR_ANON_MAPPED, nr); 1082 } 1083 if (unlikely(PageKsm(page))) 1084 return; 1085 1086 VM_BUG_ON_PAGE(!PageLocked(page), page); 1087 1088 /* address might be in next vma when migration races vma_adjust */ 1089 if (first) 1090 __page_set_anon_rmap(page, vma, address, 1091 flags & RMAP_EXCLUSIVE); 1092 else 1093 __page_check_anon_rmap(page, vma, address); 1094 } 1095 1096 /** 1097 * page_add_new_anon_rmap - add pte mapping to a new anonymous page 1098 * @page: the page to add the mapping to 1099 * @vma: the vm area in which the mapping is added 1100 * @address: the user virtual address mapped 1101 * @compound: charge the page as compound or small page 1102 * 1103 * Same as page_add_anon_rmap but must only be called on *new* pages. 1104 * This means the inc-and-test can be bypassed. 1105 * Page does not have to be locked. 1106 */ 1107 void page_add_new_anon_rmap(struct page *page, 1108 struct vm_area_struct *vma, unsigned long address, bool compound) 1109 { 1110 int nr = compound ? hpage_nr_pages(page) : 1; 1111 1112 VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma); 1113 __SetPageSwapBacked(page); 1114 if (compound) { 1115 VM_BUG_ON_PAGE(!PageTransHuge(page), page); 1116 /* increment count (starts at -1) */ 1117 atomic_set(compound_mapcount_ptr(page), 0); 1118 __inc_node_page_state(page, NR_ANON_THPS); 1119 } else { 1120 /* Anon THP always mapped first with PMD */ 1121 VM_BUG_ON_PAGE(PageTransCompound(page), page); 1122 /* increment count (starts at -1) */ 1123 atomic_set(&page->_mapcount, 0); 1124 } 1125 __mod_node_page_state(page_pgdat(page), NR_ANON_MAPPED, nr); 1126 __page_set_anon_rmap(page, vma, address, 1); 1127 } 1128 1129 /** 1130 * page_add_file_rmap - add pte mapping to a file page 1131 * @page: the page to add the mapping to 1132 * 1133 * The caller needs to hold the pte lock. 1134 */ 1135 void page_add_file_rmap(struct page *page, bool compound) 1136 { 1137 int i, nr = 1; 1138 1139 VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page); 1140 lock_page_memcg(page); 1141 if (compound && PageTransHuge(page)) { 1142 for (i = 0, nr = 0; i < HPAGE_PMD_NR; i++) { 1143 if (atomic_inc_and_test(&page[i]._mapcount)) 1144 nr++; 1145 } 1146 if (!atomic_inc_and_test(compound_mapcount_ptr(page))) 1147 goto out; 1148 VM_BUG_ON_PAGE(!PageSwapBacked(page), page); 1149 __inc_node_page_state(page, NR_SHMEM_PMDMAPPED); 1150 } else { 1151 if (PageTransCompound(page) && page_mapping(page)) { 1152 VM_WARN_ON_ONCE(!PageLocked(page)); 1153 1154 SetPageDoubleMap(compound_head(page)); 1155 if (PageMlocked(page)) 1156 clear_page_mlock(compound_head(page)); 1157 } 1158 if (!atomic_inc_and_test(&page->_mapcount)) 1159 goto out; 1160 } 1161 __mod_node_page_state(page_pgdat(page), NR_FILE_MAPPED, nr); 1162 mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED); 1163 out: 1164 unlock_page_memcg(page); 1165 } 1166 1167 static void page_remove_file_rmap(struct page *page, bool compound) 1168 { 1169 int i, nr = 1; 1170 1171 VM_BUG_ON_PAGE(compound && !PageHead(page), page); 1172 lock_page_memcg(page); 1173 1174 /* Hugepages are not counted in NR_FILE_MAPPED for now. */ 1175 if (unlikely(PageHuge(page))) { 1176 /* hugetlb pages are always mapped with pmds */ 1177 atomic_dec(compound_mapcount_ptr(page)); 1178 goto out; 1179 } 1180 1181 /* page still mapped by someone else? */ 1182 if (compound && PageTransHuge(page)) { 1183 for (i = 0, nr = 0; i < HPAGE_PMD_NR; i++) { 1184 if (atomic_add_negative(-1, &page[i]._mapcount)) 1185 nr++; 1186 } 1187 if (!atomic_add_negative(-1, compound_mapcount_ptr(page))) 1188 goto out; 1189 VM_BUG_ON_PAGE(!PageSwapBacked(page), page); 1190 __dec_node_page_state(page, NR_SHMEM_PMDMAPPED); 1191 } else { 1192 if (!atomic_add_negative(-1, &page->_mapcount)) 1193 goto out; 1194 } 1195 1196 /* 1197 * We use the irq-unsafe __{inc|mod}_zone_page_state because 1198 * these counters are not modified in interrupt context, and 1199 * pte lock(a spinlock) is held, which implies preemption disabled. 1200 */ 1201 __mod_node_page_state(page_pgdat(page), NR_FILE_MAPPED, -nr); 1202 mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED); 1203 1204 if (unlikely(PageMlocked(page))) 1205 clear_page_mlock(page); 1206 out: 1207 unlock_page_memcg(page); 1208 } 1209 1210 static void page_remove_anon_compound_rmap(struct page *page) 1211 { 1212 int i, nr; 1213 1214 if (!atomic_add_negative(-1, compound_mapcount_ptr(page))) 1215 return; 1216 1217 /* Hugepages are not counted in NR_ANON_PAGES for now. */ 1218 if (unlikely(PageHuge(page))) 1219 return; 1220 1221 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) 1222 return; 1223 1224 __dec_node_page_state(page, NR_ANON_THPS); 1225 1226 if (TestClearPageDoubleMap(page)) { 1227 /* 1228 * Subpages can be mapped with PTEs too. Check how many of 1229 * themi are still mapped. 1230 */ 1231 for (i = 0, nr = 0; i < HPAGE_PMD_NR; i++) { 1232 if (atomic_add_negative(-1, &page[i]._mapcount)) 1233 nr++; 1234 } 1235 } else { 1236 nr = HPAGE_PMD_NR; 1237 } 1238 1239 if (unlikely(PageMlocked(page))) 1240 clear_page_mlock(page); 1241 1242 if (nr) { 1243 __mod_node_page_state(page_pgdat(page), NR_ANON_MAPPED, -nr); 1244 deferred_split_huge_page(page); 1245 } 1246 } 1247 1248 /** 1249 * page_remove_rmap - take down pte mapping from a page 1250 * @page: page to remove mapping from 1251 * @compound: uncharge the page as compound or small page 1252 * 1253 * The caller needs to hold the pte lock. 1254 */ 1255 void page_remove_rmap(struct page *page, bool compound) 1256 { 1257 if (!PageAnon(page)) 1258 return page_remove_file_rmap(page, compound); 1259 1260 if (compound) 1261 return page_remove_anon_compound_rmap(page); 1262 1263 /* page still mapped by someone else? */ 1264 if (!atomic_add_negative(-1, &page->_mapcount)) 1265 return; 1266 1267 /* 1268 * We use the irq-unsafe __{inc|mod}_zone_page_stat because 1269 * these counters are not modified in interrupt context, and 1270 * pte lock(a spinlock) is held, which implies preemption disabled. 1271 */ 1272 __dec_node_page_state(page, NR_ANON_MAPPED); 1273 1274 if (unlikely(PageMlocked(page))) 1275 clear_page_mlock(page); 1276 1277 if (PageTransCompound(page)) 1278 deferred_split_huge_page(compound_head(page)); 1279 1280 /* 1281 * It would be tidy to reset the PageAnon mapping here, 1282 * but that might overwrite a racing page_add_anon_rmap 1283 * which increments mapcount after us but sets mapping 1284 * before us: so leave the reset to free_hot_cold_page, 1285 * and remember that it's only reliable while mapped. 1286 * Leaving it set also helps swapoff to reinstate ptes 1287 * faster for those pages still in swapcache. 1288 */ 1289 } 1290 1291 struct rmap_private { 1292 enum ttu_flags flags; 1293 int lazyfreed; 1294 }; 1295 1296 /* 1297 * @arg: enum ttu_flags will be passed to this argument 1298 */ 1299 static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, 1300 unsigned long address, void *arg) 1301 { 1302 struct mm_struct *mm = vma->vm_mm; 1303 struct page_vma_mapped_walk pvmw = { 1304 .page = page, 1305 .vma = vma, 1306 .address = address, 1307 }; 1308 pte_t pteval; 1309 struct page *subpage; 1310 int ret = SWAP_AGAIN; 1311 struct rmap_private *rp = arg; 1312 enum ttu_flags flags = rp->flags; 1313 1314 /* munlock has nothing to gain from examining un-locked vmas */ 1315 if ((flags & TTU_MUNLOCK) && !(vma->vm_flags & VM_LOCKED)) 1316 return SWAP_AGAIN; 1317 1318 if (flags & TTU_SPLIT_HUGE_PMD) { 1319 split_huge_pmd_address(vma, address, 1320 flags & TTU_MIGRATION, page); 1321 } 1322 1323 while (page_vma_mapped_walk(&pvmw)) { 1324 /* 1325 * If the page is mlock()d, we cannot swap it out. 1326 * If it's recently referenced (perhaps page_referenced 1327 * skipped over this mm) then we should reactivate it. 1328 */ 1329 if (!(flags & TTU_IGNORE_MLOCK)) { 1330 if (vma->vm_flags & VM_LOCKED) { 1331 /* PTE-mapped THP are never mlocked */ 1332 if (!PageTransCompound(page)) { 1333 /* 1334 * Holding pte lock, we do *not* need 1335 * mmap_sem here 1336 */ 1337 mlock_vma_page(page); 1338 } 1339 ret = SWAP_MLOCK; 1340 page_vma_mapped_walk_done(&pvmw); 1341 break; 1342 } 1343 if (flags & TTU_MUNLOCK) 1344 continue; 1345 } 1346 1347 /* Unexpected PMD-mapped THP? */ 1348 VM_BUG_ON_PAGE(!pvmw.pte, page); 1349 1350 subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte); 1351 address = pvmw.address; 1352 1353 1354 if (!(flags & TTU_IGNORE_ACCESS)) { 1355 if (ptep_clear_flush_young_notify(vma, address, 1356 pvmw.pte)) { 1357 ret = SWAP_FAIL; 1358 page_vma_mapped_walk_done(&pvmw); 1359 break; 1360 } 1361 } 1362 1363 /* Nuke the page table entry. */ 1364 flush_cache_page(vma, address, pte_pfn(*pvmw.pte)); 1365 if (should_defer_flush(mm, flags)) { 1366 /* 1367 * We clear the PTE but do not flush so potentially 1368 * a remote CPU could still be writing to the page. 1369 * If the entry was previously clean then the 1370 * architecture must guarantee that a clear->dirty 1371 * transition on a cached TLB entry is written through 1372 * and traps if the PTE is unmapped. 1373 */ 1374 pteval = ptep_get_and_clear(mm, address, pvmw.pte); 1375 1376 set_tlb_ubc_flush_pending(mm, pte_dirty(pteval)); 1377 } else { 1378 pteval = ptep_clear_flush(vma, address, pvmw.pte); 1379 } 1380 1381 /* Move the dirty bit to the page. Now the pte is gone. */ 1382 if (pte_dirty(pteval)) 1383 set_page_dirty(page); 1384 1385 /* Update high watermark before we lower rss */ 1386 update_hiwater_rss(mm); 1387 1388 if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) { 1389 if (PageHuge(page)) { 1390 int nr = 1 << compound_order(page); 1391 hugetlb_count_sub(nr, mm); 1392 } else { 1393 dec_mm_counter(mm, mm_counter(page)); 1394 } 1395 1396 pteval = swp_entry_to_pte(make_hwpoison_entry(subpage)); 1397 set_pte_at(mm, address, pvmw.pte, pteval); 1398 } else if (pte_unused(pteval)) { 1399 /* 1400 * The guest indicated that the page content is of no 1401 * interest anymore. Simply discard the pte, vmscan 1402 * will take care of the rest. 1403 */ 1404 dec_mm_counter(mm, mm_counter(page)); 1405 } else if (IS_ENABLED(CONFIG_MIGRATION) && 1406 (flags & TTU_MIGRATION)) { 1407 swp_entry_t entry; 1408 pte_t swp_pte; 1409 /* 1410 * Store the pfn of the page in a special migration 1411 * pte. do_swap_page() will wait until the migration 1412 * pte is removed and then restart fault handling. 1413 */ 1414 entry = make_migration_entry(subpage, 1415 pte_write(pteval)); 1416 swp_pte = swp_entry_to_pte(entry); 1417 if (pte_soft_dirty(pteval)) 1418 swp_pte = pte_swp_mksoft_dirty(swp_pte); 1419 set_pte_at(mm, address, pvmw.pte, swp_pte); 1420 } else if (PageAnon(page)) { 1421 swp_entry_t entry = { .val = page_private(subpage) }; 1422 pte_t swp_pte; 1423 /* 1424 * Store the swap location in the pte. 1425 * See handle_pte_fault() ... 1426 */ 1427 VM_BUG_ON_PAGE(!PageSwapCache(page), page); 1428 1429 if (!PageDirty(page) && (flags & TTU_LZFREE)) { 1430 /* It's a freeable page by MADV_FREE */ 1431 dec_mm_counter(mm, MM_ANONPAGES); 1432 rp->lazyfreed++; 1433 goto discard; 1434 } 1435 1436 if (swap_duplicate(entry) < 0) { 1437 set_pte_at(mm, address, pvmw.pte, pteval); 1438 ret = SWAP_FAIL; 1439 page_vma_mapped_walk_done(&pvmw); 1440 break; 1441 } 1442 if (list_empty(&mm->mmlist)) { 1443 spin_lock(&mmlist_lock); 1444 if (list_empty(&mm->mmlist)) 1445 list_add(&mm->mmlist, &init_mm.mmlist); 1446 spin_unlock(&mmlist_lock); 1447 } 1448 dec_mm_counter(mm, MM_ANONPAGES); 1449 inc_mm_counter(mm, MM_SWAPENTS); 1450 swp_pte = swp_entry_to_pte(entry); 1451 if (pte_soft_dirty(pteval)) 1452 swp_pte = pte_swp_mksoft_dirty(swp_pte); 1453 set_pte_at(mm, address, pvmw.pte, swp_pte); 1454 } else 1455 dec_mm_counter(mm, mm_counter_file(page)); 1456 discard: 1457 page_remove_rmap(subpage, PageHuge(page)); 1458 put_page(page); 1459 mmu_notifier_invalidate_page(mm, address); 1460 } 1461 return ret; 1462 } 1463 1464 bool is_vma_temporary_stack(struct vm_area_struct *vma) 1465 { 1466 int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP); 1467 1468 if (!maybe_stack) 1469 return false; 1470 1471 if ((vma->vm_flags & VM_STACK_INCOMPLETE_SETUP) == 1472 VM_STACK_INCOMPLETE_SETUP) 1473 return true; 1474 1475 return false; 1476 } 1477 1478 static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg) 1479 { 1480 return is_vma_temporary_stack(vma); 1481 } 1482 1483 static int page_mapcount_is_zero(struct page *page) 1484 { 1485 return !total_mapcount(page); 1486 } 1487 1488 /** 1489 * try_to_unmap - try to remove all page table mappings to a page 1490 * @page: the page to get unmapped 1491 * @flags: action and flags 1492 * 1493 * Tries to remove all the page table entries which are mapping this 1494 * page, used in the pageout path. Caller must hold the page lock. 1495 * Return values are: 1496 * 1497 * SWAP_SUCCESS - we succeeded in removing all mappings 1498 * SWAP_AGAIN - we missed a mapping, try again later 1499 * SWAP_FAIL - the page is unswappable 1500 * SWAP_MLOCK - page is mlocked. 1501 */ 1502 int try_to_unmap(struct page *page, enum ttu_flags flags) 1503 { 1504 int ret; 1505 struct rmap_private rp = { 1506 .flags = flags, 1507 .lazyfreed = 0, 1508 }; 1509 1510 struct rmap_walk_control rwc = { 1511 .rmap_one = try_to_unmap_one, 1512 .arg = &rp, 1513 .done = page_mapcount_is_zero, 1514 .anon_lock = page_lock_anon_vma_read, 1515 }; 1516 1517 /* 1518 * During exec, a temporary VMA is setup and later moved. 1519 * The VMA is moved under the anon_vma lock but not the 1520 * page tables leading to a race where migration cannot 1521 * find the migration ptes. Rather than increasing the 1522 * locking requirements of exec(), migration skips 1523 * temporary VMAs until after exec() completes. 1524 */ 1525 if ((flags & TTU_MIGRATION) && !PageKsm(page) && PageAnon(page)) 1526 rwc.invalid_vma = invalid_migration_vma; 1527 1528 if (flags & TTU_RMAP_LOCKED) 1529 ret = rmap_walk_locked(page, &rwc); 1530 else 1531 ret = rmap_walk(page, &rwc); 1532 1533 if (ret != SWAP_MLOCK && !page_mapcount(page)) { 1534 ret = SWAP_SUCCESS; 1535 if (rp.lazyfreed && !PageDirty(page)) 1536 ret = SWAP_LZFREE; 1537 } 1538 return ret; 1539 } 1540 1541 static int page_not_mapped(struct page *page) 1542 { 1543 return !page_mapped(page); 1544 }; 1545 1546 /** 1547 * try_to_munlock - try to munlock a page 1548 * @page: the page to be munlocked 1549 * 1550 * Called from munlock code. Checks all of the VMAs mapping the page 1551 * to make sure nobody else has this page mlocked. The page will be 1552 * returned with PG_mlocked cleared if no other vmas have it mlocked. 1553 * 1554 * Return values are: 1555 * 1556 * SWAP_AGAIN - no vma is holding page mlocked, or, 1557 * SWAP_AGAIN - page mapped in mlocked vma -- couldn't acquire mmap sem 1558 * SWAP_FAIL - page cannot be located at present 1559 * SWAP_MLOCK - page is now mlocked. 1560 */ 1561 int try_to_munlock(struct page *page) 1562 { 1563 int ret; 1564 struct rmap_private rp = { 1565 .flags = TTU_MUNLOCK, 1566 .lazyfreed = 0, 1567 }; 1568 1569 struct rmap_walk_control rwc = { 1570 .rmap_one = try_to_unmap_one, 1571 .arg = &rp, 1572 .done = page_not_mapped, 1573 .anon_lock = page_lock_anon_vma_read, 1574 1575 }; 1576 1577 VM_BUG_ON_PAGE(!PageLocked(page) || PageLRU(page), page); 1578 1579 ret = rmap_walk(page, &rwc); 1580 return ret; 1581 } 1582 1583 void __put_anon_vma(struct anon_vma *anon_vma) 1584 { 1585 struct anon_vma *root = anon_vma->root; 1586 1587 anon_vma_free(anon_vma); 1588 if (root != anon_vma && atomic_dec_and_test(&root->refcount)) 1589 anon_vma_free(root); 1590 } 1591 1592 static struct anon_vma *rmap_walk_anon_lock(struct page *page, 1593 struct rmap_walk_control *rwc) 1594 { 1595 struct anon_vma *anon_vma; 1596 1597 if (rwc->anon_lock) 1598 return rwc->anon_lock(page); 1599 1600 /* 1601 * Note: remove_migration_ptes() cannot use page_lock_anon_vma_read() 1602 * because that depends on page_mapped(); but not all its usages 1603 * are holding mmap_sem. Users without mmap_sem are required to 1604 * take a reference count to prevent the anon_vma disappearing 1605 */ 1606 anon_vma = page_anon_vma(page); 1607 if (!anon_vma) 1608 return NULL; 1609 1610 anon_vma_lock_read(anon_vma); 1611 return anon_vma; 1612 } 1613 1614 /* 1615 * rmap_walk_anon - do something to anonymous page using the object-based 1616 * rmap method 1617 * @page: the page to be handled 1618 * @rwc: control variable according to each walk type 1619 * 1620 * Find all the mappings of a page using the mapping pointer and the vma chains 1621 * contained in the anon_vma struct it points to. 1622 * 1623 * When called from try_to_munlock(), the mmap_sem of the mm containing the vma 1624 * where the page was found will be held for write. So, we won't recheck 1625 * vm_flags for that VMA. That should be OK, because that vma shouldn't be 1626 * LOCKED. 1627 */ 1628 static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc, 1629 bool locked) 1630 { 1631 struct anon_vma *anon_vma; 1632 pgoff_t pgoff_start, pgoff_end; 1633 struct anon_vma_chain *avc; 1634 int ret = SWAP_AGAIN; 1635 1636 if (locked) { 1637 anon_vma = page_anon_vma(page); 1638 /* anon_vma disappear under us? */ 1639 VM_BUG_ON_PAGE(!anon_vma, page); 1640 } else { 1641 anon_vma = rmap_walk_anon_lock(page, rwc); 1642 } 1643 if (!anon_vma) 1644 return ret; 1645 1646 pgoff_start = page_to_pgoff(page); 1647 pgoff_end = pgoff_start + hpage_nr_pages(page) - 1; 1648 anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, 1649 pgoff_start, pgoff_end) { 1650 struct vm_area_struct *vma = avc->vma; 1651 unsigned long address = vma_address(page, vma); 1652 1653 cond_resched(); 1654 1655 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) 1656 continue; 1657 1658 ret = rwc->rmap_one(page, vma, address, rwc->arg); 1659 if (ret != SWAP_AGAIN) 1660 break; 1661 if (rwc->done && rwc->done(page)) 1662 break; 1663 } 1664 1665 if (!locked) 1666 anon_vma_unlock_read(anon_vma); 1667 return ret; 1668 } 1669 1670 /* 1671 * rmap_walk_file - do something to file page using the object-based rmap method 1672 * @page: the page to be handled 1673 * @rwc: control variable according to each walk type 1674 * 1675 * Find all the mappings of a page using the mapping pointer and the vma chains 1676 * contained in the address_space struct it points to. 1677 * 1678 * When called from try_to_munlock(), the mmap_sem of the mm containing the vma 1679 * where the page was found will be held for write. So, we won't recheck 1680 * vm_flags for that VMA. That should be OK, because that vma shouldn't be 1681 * LOCKED. 1682 */ 1683 static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc, 1684 bool locked) 1685 { 1686 struct address_space *mapping = page_mapping(page); 1687 pgoff_t pgoff_start, pgoff_end; 1688 struct vm_area_struct *vma; 1689 int ret = SWAP_AGAIN; 1690 1691 /* 1692 * The page lock not only makes sure that page->mapping cannot 1693 * suddenly be NULLified by truncation, it makes sure that the 1694 * structure at mapping cannot be freed and reused yet, 1695 * so we can safely take mapping->i_mmap_rwsem. 1696 */ 1697 VM_BUG_ON_PAGE(!PageLocked(page), page); 1698 1699 if (!mapping) 1700 return ret; 1701 1702 pgoff_start = page_to_pgoff(page); 1703 pgoff_end = pgoff_start + hpage_nr_pages(page) - 1; 1704 if (!locked) 1705 i_mmap_lock_read(mapping); 1706 vma_interval_tree_foreach(vma, &mapping->i_mmap, 1707 pgoff_start, pgoff_end) { 1708 unsigned long address = vma_address(page, vma); 1709 1710 cond_resched(); 1711 1712 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) 1713 continue; 1714 1715 ret = rwc->rmap_one(page, vma, address, rwc->arg); 1716 if (ret != SWAP_AGAIN) 1717 goto done; 1718 if (rwc->done && rwc->done(page)) 1719 goto done; 1720 } 1721 1722 done: 1723 if (!locked) 1724 i_mmap_unlock_read(mapping); 1725 return ret; 1726 } 1727 1728 int rmap_walk(struct page *page, struct rmap_walk_control *rwc) 1729 { 1730 if (unlikely(PageKsm(page))) 1731 return rmap_walk_ksm(page, rwc); 1732 else if (PageAnon(page)) 1733 return rmap_walk_anon(page, rwc, false); 1734 else 1735 return rmap_walk_file(page, rwc, false); 1736 } 1737 1738 /* Like rmap_walk, but caller holds relevant rmap lock */ 1739 int rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc) 1740 { 1741 /* no ksm support for now */ 1742 VM_BUG_ON_PAGE(PageKsm(page), page); 1743 if (PageAnon(page)) 1744 return rmap_walk_anon(page, rwc, true); 1745 else 1746 return rmap_walk_file(page, rwc, true); 1747 } 1748 1749 #ifdef CONFIG_HUGETLB_PAGE 1750 /* 1751 * The following three functions are for anonymous (private mapped) hugepages. 1752 * Unlike common anonymous pages, anonymous hugepages have no accounting code 1753 * and no lru code, because we handle hugepages differently from common pages. 1754 */ 1755 static void __hugepage_set_anon_rmap(struct page *page, 1756 struct vm_area_struct *vma, unsigned long address, int exclusive) 1757 { 1758 struct anon_vma *anon_vma = vma->anon_vma; 1759 1760 BUG_ON(!anon_vma); 1761 1762 if (PageAnon(page)) 1763 return; 1764 if (!exclusive) 1765 anon_vma = anon_vma->root; 1766 1767 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 1768 page->mapping = (struct address_space *) anon_vma; 1769 page->index = linear_page_index(vma, address); 1770 } 1771 1772 void hugepage_add_anon_rmap(struct page *page, 1773 struct vm_area_struct *vma, unsigned long address) 1774 { 1775 struct anon_vma *anon_vma = vma->anon_vma; 1776 int first; 1777 1778 BUG_ON(!PageLocked(page)); 1779 BUG_ON(!anon_vma); 1780 /* address might be in next vma when migration races vma_adjust */ 1781 first = atomic_inc_and_test(compound_mapcount_ptr(page)); 1782 if (first) 1783 __hugepage_set_anon_rmap(page, vma, address, 0); 1784 } 1785 1786 void hugepage_add_new_anon_rmap(struct page *page, 1787 struct vm_area_struct *vma, unsigned long address) 1788 { 1789 BUG_ON(address < vma->vm_start || address >= vma->vm_end); 1790 atomic_set(compound_mapcount_ptr(page), 0); 1791 __hugepage_set_anon_rmap(page, vma, address, 1); 1792 } 1793 #endif /* CONFIG_HUGETLB_PAGE */ 1794