1 /* 2 * mm/rmap.c - physical to virtual reverse mappings 3 * 4 * Copyright 2001, Rik van Riel <riel@conectiva.com.br> 5 * Released under the General Public License (GPL). 6 * 7 * Simple, low overhead reverse mapping scheme. 8 * Please try to keep this thing as modular as possible. 9 * 10 * Provides methods for unmapping each kind of mapped page: 11 * the anon methods track anonymous pages, and 12 * the file methods track pages belonging to an inode. 13 * 14 * Original design by Rik van Riel <riel@conectiva.com.br> 2001 15 * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004 16 * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004 17 * Contributions by Hugh Dickins 2003, 2004 18 */ 19 20 /* 21 * Lock ordering in mm: 22 * 23 * inode->i_mutex (while writing or truncating, not reading or faulting) 24 * mm->mmap_lock 25 * page->flags PG_locked (lock_page) * (see huegtlbfs below) 26 * hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share) 27 * mapping->i_mmap_rwsem 28 * hugetlb_fault_mutex (hugetlbfs specific page fault mutex) 29 * anon_vma->rwsem 30 * mm->page_table_lock or pte_lock 31 * pgdat->lru_lock (in mark_page_accessed, isolate_lru_page) 32 * swap_lock (in swap_duplicate, swap_info_get) 33 * mmlist_lock (in mmput, drain_mmlist and others) 34 * mapping->private_lock (in __set_page_dirty_buffers) 35 * mem_cgroup_{begin,end}_page_stat (memcg->move_lock) 36 * i_pages lock (widely used) 37 * inode->i_lock (in set_page_dirty's __mark_inode_dirty) 38 * bdi.wb->list_lock (in set_page_dirty's __mark_inode_dirty) 39 * sb_lock (within inode_lock in fs/fs-writeback.c) 40 * i_pages lock (widely used, in set_page_dirty, 41 * in arch-dependent flush_dcache_mmap_lock, 42 * within bdi.wb->list_lock in __sync_single_inode) 43 * 44 * anon_vma->rwsem,mapping->i_mutex (memory_failure, collect_procs_anon) 45 * ->tasklist_lock 46 * pte map lock 47 * 48 * * hugetlbfs PageHuge() pages take locks in this order: 49 * mapping->i_mmap_rwsem 50 * hugetlb_fault_mutex (hugetlbfs specific page fault mutex) 51 * page->flags PG_locked (lock_page) 52 */ 53 54 #include <linux/mm.h> 55 #include <linux/sched/mm.h> 56 #include <linux/sched/task.h> 57 #include <linux/pagemap.h> 58 #include <linux/swap.h> 59 #include <linux/swapops.h> 60 #include <linux/slab.h> 61 #include <linux/init.h> 62 #include <linux/ksm.h> 63 #include <linux/rmap.h> 64 #include <linux/rcupdate.h> 65 #include <linux/export.h> 66 #include <linux/memcontrol.h> 67 #include <linux/mmu_notifier.h> 68 #include <linux/migrate.h> 69 #include <linux/hugetlb.h> 70 #include <linux/huge_mm.h> 71 #include <linux/backing-dev.h> 72 #include <linux/page_idle.h> 73 #include <linux/memremap.h> 74 #include <linux/userfaultfd_k.h> 75 76 #include <asm/tlbflush.h> 77 78 #include <trace/events/tlb.h> 79 80 #include "internal.h" 81 82 static struct kmem_cache *anon_vma_cachep; 83 static struct kmem_cache *anon_vma_chain_cachep; 84 85 static inline struct anon_vma *anon_vma_alloc(void) 86 { 87 struct anon_vma *anon_vma; 88 89 anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL); 90 if (anon_vma) { 91 atomic_set(&anon_vma->refcount, 1); 92 anon_vma->degree = 1; /* Reference for first vma */ 93 anon_vma->parent = anon_vma; 94 /* 95 * Initialise the anon_vma root to point to itself. If called 96 * from fork, the root will be reset to the parents anon_vma. 97 */ 98 anon_vma->root = anon_vma; 99 } 100 101 return anon_vma; 102 } 103 104 static inline void anon_vma_free(struct anon_vma *anon_vma) 105 { 106 VM_BUG_ON(atomic_read(&anon_vma->refcount)); 107 108 /* 109 * Synchronize against page_lock_anon_vma_read() such that 110 * we can safely hold the lock without the anon_vma getting 111 * freed. 112 * 113 * Relies on the full mb implied by the atomic_dec_and_test() from 114 * put_anon_vma() against the acquire barrier implied by 115 * down_read_trylock() from page_lock_anon_vma_read(). This orders: 116 * 117 * page_lock_anon_vma_read() VS put_anon_vma() 118 * down_read_trylock() atomic_dec_and_test() 119 * LOCK MB 120 * atomic_read() rwsem_is_locked() 121 * 122 * LOCK should suffice since the actual taking of the lock must 123 * happen _before_ what follows. 124 */ 125 might_sleep(); 126 if (rwsem_is_locked(&anon_vma->root->rwsem)) { 127 anon_vma_lock_write(anon_vma); 128 anon_vma_unlock_write(anon_vma); 129 } 130 131 kmem_cache_free(anon_vma_cachep, anon_vma); 132 } 133 134 static inline struct anon_vma_chain *anon_vma_chain_alloc(gfp_t gfp) 135 { 136 return kmem_cache_alloc(anon_vma_chain_cachep, gfp); 137 } 138 139 static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain) 140 { 141 kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain); 142 } 143 144 static void anon_vma_chain_link(struct vm_area_struct *vma, 145 struct anon_vma_chain *avc, 146 struct anon_vma *anon_vma) 147 { 148 avc->vma = vma; 149 avc->anon_vma = anon_vma; 150 list_add(&avc->same_vma, &vma->anon_vma_chain); 151 anon_vma_interval_tree_insert(avc, &anon_vma->rb_root); 152 } 153 154 /** 155 * __anon_vma_prepare - attach an anon_vma to a memory region 156 * @vma: the memory region in question 157 * 158 * This makes sure the memory mapping described by 'vma' has 159 * an 'anon_vma' attached to it, so that we can associate the 160 * anonymous pages mapped into it with that anon_vma. 161 * 162 * The common case will be that we already have one, which 163 * is handled inline by anon_vma_prepare(). But if 164 * not we either need to find an adjacent mapping that we 165 * can re-use the anon_vma from (very common when the only 166 * reason for splitting a vma has been mprotect()), or we 167 * allocate a new one. 168 * 169 * Anon-vma allocations are very subtle, because we may have 170 * optimistically looked up an anon_vma in page_lock_anon_vma_read() 171 * and that may actually touch the spinlock even in the newly 172 * allocated vma (it depends on RCU to make sure that the 173 * anon_vma isn't actually destroyed). 174 * 175 * As a result, we need to do proper anon_vma locking even 176 * for the new allocation. At the same time, we do not want 177 * to do any locking for the common case of already having 178 * an anon_vma. 179 * 180 * This must be called with the mmap_lock held for reading. 181 */ 182 int __anon_vma_prepare(struct vm_area_struct *vma) 183 { 184 struct mm_struct *mm = vma->vm_mm; 185 struct anon_vma *anon_vma, *allocated; 186 struct anon_vma_chain *avc; 187 188 might_sleep(); 189 190 avc = anon_vma_chain_alloc(GFP_KERNEL); 191 if (!avc) 192 goto out_enomem; 193 194 anon_vma = find_mergeable_anon_vma(vma); 195 allocated = NULL; 196 if (!anon_vma) { 197 anon_vma = anon_vma_alloc(); 198 if (unlikely(!anon_vma)) 199 goto out_enomem_free_avc; 200 allocated = anon_vma; 201 } 202 203 anon_vma_lock_write(anon_vma); 204 /* page_table_lock to protect against threads */ 205 spin_lock(&mm->page_table_lock); 206 if (likely(!vma->anon_vma)) { 207 vma->anon_vma = anon_vma; 208 anon_vma_chain_link(vma, avc, anon_vma); 209 /* vma reference or self-parent link for new root */ 210 anon_vma->degree++; 211 allocated = NULL; 212 avc = NULL; 213 } 214 spin_unlock(&mm->page_table_lock); 215 anon_vma_unlock_write(anon_vma); 216 217 if (unlikely(allocated)) 218 put_anon_vma(allocated); 219 if (unlikely(avc)) 220 anon_vma_chain_free(avc); 221 222 return 0; 223 224 out_enomem_free_avc: 225 anon_vma_chain_free(avc); 226 out_enomem: 227 return -ENOMEM; 228 } 229 230 /* 231 * This is a useful helper function for locking the anon_vma root as 232 * we traverse the vma->anon_vma_chain, looping over anon_vma's that 233 * have the same vma. 234 * 235 * Such anon_vma's should have the same root, so you'd expect to see 236 * just a single mutex_lock for the whole traversal. 237 */ 238 static inline struct anon_vma *lock_anon_vma_root(struct anon_vma *root, struct anon_vma *anon_vma) 239 { 240 struct anon_vma *new_root = anon_vma->root; 241 if (new_root != root) { 242 if (WARN_ON_ONCE(root)) 243 up_write(&root->rwsem); 244 root = new_root; 245 down_write(&root->rwsem); 246 } 247 return root; 248 } 249 250 static inline void unlock_anon_vma_root(struct anon_vma *root) 251 { 252 if (root) 253 up_write(&root->rwsem); 254 } 255 256 /* 257 * Attach the anon_vmas from src to dst. 258 * Returns 0 on success, -ENOMEM on failure. 259 * 260 * anon_vma_clone() is called by __vma_split(), __split_vma(), copy_vma() and 261 * anon_vma_fork(). The first three want an exact copy of src, while the last 262 * one, anon_vma_fork(), may try to reuse an existing anon_vma to prevent 263 * endless growth of anon_vma. Since dst->anon_vma is set to NULL before call, 264 * we can identify this case by checking (!dst->anon_vma && src->anon_vma). 265 * 266 * If (!dst->anon_vma && src->anon_vma) is true, this function tries to find 267 * and reuse existing anon_vma which has no vmas and only one child anon_vma. 268 * This prevents degradation of anon_vma hierarchy to endless linear chain in 269 * case of constantly forking task. On the other hand, an anon_vma with more 270 * than one child isn't reused even if there was no alive vma, thus rmap 271 * walker has a good chance of avoiding scanning the whole hierarchy when it 272 * searches where page is mapped. 273 */ 274 int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src) 275 { 276 struct anon_vma_chain *avc, *pavc; 277 struct anon_vma *root = NULL; 278 279 list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) { 280 struct anon_vma *anon_vma; 281 282 avc = anon_vma_chain_alloc(GFP_NOWAIT | __GFP_NOWARN); 283 if (unlikely(!avc)) { 284 unlock_anon_vma_root(root); 285 root = NULL; 286 avc = anon_vma_chain_alloc(GFP_KERNEL); 287 if (!avc) 288 goto enomem_failure; 289 } 290 anon_vma = pavc->anon_vma; 291 root = lock_anon_vma_root(root, anon_vma); 292 anon_vma_chain_link(dst, avc, anon_vma); 293 294 /* 295 * Reuse existing anon_vma if its degree lower than two, 296 * that means it has no vma and only one anon_vma child. 297 * 298 * Do not chose parent anon_vma, otherwise first child 299 * will always reuse it. Root anon_vma is never reused: 300 * it has self-parent reference and at least one child. 301 */ 302 if (!dst->anon_vma && src->anon_vma && 303 anon_vma != src->anon_vma && anon_vma->degree < 2) 304 dst->anon_vma = anon_vma; 305 } 306 if (dst->anon_vma) 307 dst->anon_vma->degree++; 308 unlock_anon_vma_root(root); 309 return 0; 310 311 enomem_failure: 312 /* 313 * dst->anon_vma is dropped here otherwise its degree can be incorrectly 314 * decremented in unlink_anon_vmas(). 315 * We can safely do this because callers of anon_vma_clone() don't care 316 * about dst->anon_vma if anon_vma_clone() failed. 317 */ 318 dst->anon_vma = NULL; 319 unlink_anon_vmas(dst); 320 return -ENOMEM; 321 } 322 323 /* 324 * Attach vma to its own anon_vma, as well as to the anon_vmas that 325 * the corresponding VMA in the parent process is attached to. 326 * Returns 0 on success, non-zero on failure. 327 */ 328 int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) 329 { 330 struct anon_vma_chain *avc; 331 struct anon_vma *anon_vma; 332 int error; 333 334 /* Don't bother if the parent process has no anon_vma here. */ 335 if (!pvma->anon_vma) 336 return 0; 337 338 /* Drop inherited anon_vma, we'll reuse existing or allocate new. */ 339 vma->anon_vma = NULL; 340 341 /* 342 * First, attach the new VMA to the parent VMA's anon_vmas, 343 * so rmap can find non-COWed pages in child processes. 344 */ 345 error = anon_vma_clone(vma, pvma); 346 if (error) 347 return error; 348 349 /* An existing anon_vma has been reused, all done then. */ 350 if (vma->anon_vma) 351 return 0; 352 353 /* Then add our own anon_vma. */ 354 anon_vma = anon_vma_alloc(); 355 if (!anon_vma) 356 goto out_error; 357 avc = anon_vma_chain_alloc(GFP_KERNEL); 358 if (!avc) 359 goto out_error_free_anon_vma; 360 361 /* 362 * The root anon_vma's spinlock is the lock actually used when we 363 * lock any of the anon_vmas in this anon_vma tree. 364 */ 365 anon_vma->root = pvma->anon_vma->root; 366 anon_vma->parent = pvma->anon_vma; 367 /* 368 * With refcounts, an anon_vma can stay around longer than the 369 * process it belongs to. The root anon_vma needs to be pinned until 370 * this anon_vma is freed, because the lock lives in the root. 371 */ 372 get_anon_vma(anon_vma->root); 373 /* Mark this anon_vma as the one where our new (COWed) pages go. */ 374 vma->anon_vma = anon_vma; 375 anon_vma_lock_write(anon_vma); 376 anon_vma_chain_link(vma, avc, anon_vma); 377 anon_vma->parent->degree++; 378 anon_vma_unlock_write(anon_vma); 379 380 return 0; 381 382 out_error_free_anon_vma: 383 put_anon_vma(anon_vma); 384 out_error: 385 unlink_anon_vmas(vma); 386 return -ENOMEM; 387 } 388 389 void unlink_anon_vmas(struct vm_area_struct *vma) 390 { 391 struct anon_vma_chain *avc, *next; 392 struct anon_vma *root = NULL; 393 394 /* 395 * Unlink each anon_vma chained to the VMA. This list is ordered 396 * from newest to oldest, ensuring the root anon_vma gets freed last. 397 */ 398 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { 399 struct anon_vma *anon_vma = avc->anon_vma; 400 401 root = lock_anon_vma_root(root, anon_vma); 402 anon_vma_interval_tree_remove(avc, &anon_vma->rb_root); 403 404 /* 405 * Leave empty anon_vmas on the list - we'll need 406 * to free them outside the lock. 407 */ 408 if (RB_EMPTY_ROOT(&anon_vma->rb_root.rb_root)) { 409 anon_vma->parent->degree--; 410 continue; 411 } 412 413 list_del(&avc->same_vma); 414 anon_vma_chain_free(avc); 415 } 416 if (vma->anon_vma) 417 vma->anon_vma->degree--; 418 unlock_anon_vma_root(root); 419 420 /* 421 * Iterate the list once more, it now only contains empty and unlinked 422 * anon_vmas, destroy them. Could not do before due to __put_anon_vma() 423 * needing to write-acquire the anon_vma->root->rwsem. 424 */ 425 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { 426 struct anon_vma *anon_vma = avc->anon_vma; 427 428 VM_WARN_ON(anon_vma->degree); 429 put_anon_vma(anon_vma); 430 431 list_del(&avc->same_vma); 432 anon_vma_chain_free(avc); 433 } 434 } 435 436 static void anon_vma_ctor(void *data) 437 { 438 struct anon_vma *anon_vma = data; 439 440 init_rwsem(&anon_vma->rwsem); 441 atomic_set(&anon_vma->refcount, 0); 442 anon_vma->rb_root = RB_ROOT_CACHED; 443 } 444 445 void __init anon_vma_init(void) 446 { 447 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma), 448 0, SLAB_TYPESAFE_BY_RCU|SLAB_PANIC|SLAB_ACCOUNT, 449 anon_vma_ctor); 450 anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, 451 SLAB_PANIC|SLAB_ACCOUNT); 452 } 453 454 /* 455 * Getting a lock on a stable anon_vma from a page off the LRU is tricky! 456 * 457 * Since there is no serialization what so ever against page_remove_rmap() 458 * the best this function can do is return a locked anon_vma that might 459 * have been relevant to this page. 460 * 461 * The page might have been remapped to a different anon_vma or the anon_vma 462 * returned may already be freed (and even reused). 463 * 464 * In case it was remapped to a different anon_vma, the new anon_vma will be a 465 * child of the old anon_vma, and the anon_vma lifetime rules will therefore 466 * ensure that any anon_vma obtained from the page will still be valid for as 467 * long as we observe page_mapped() [ hence all those page_mapped() tests ]. 468 * 469 * All users of this function must be very careful when walking the anon_vma 470 * chain and verify that the page in question is indeed mapped in it 471 * [ something equivalent to page_mapped_in_vma() ]. 472 * 473 * Since anon_vma's slab is SLAB_TYPESAFE_BY_RCU and we know from 474 * page_remove_rmap() that the anon_vma pointer from page->mapping is valid 475 * if there is a mapcount, we can dereference the anon_vma after observing 476 * those. 477 */ 478 struct anon_vma *page_get_anon_vma(struct page *page) 479 { 480 struct anon_vma *anon_vma = NULL; 481 unsigned long anon_mapping; 482 483 rcu_read_lock(); 484 anon_mapping = (unsigned long)READ_ONCE(page->mapping); 485 if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) 486 goto out; 487 if (!page_mapped(page)) 488 goto out; 489 490 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); 491 if (!atomic_inc_not_zero(&anon_vma->refcount)) { 492 anon_vma = NULL; 493 goto out; 494 } 495 496 /* 497 * If this page is still mapped, then its anon_vma cannot have been 498 * freed. But if it has been unmapped, we have no security against the 499 * anon_vma structure being freed and reused (for another anon_vma: 500 * SLAB_TYPESAFE_BY_RCU guarantees that - so the atomic_inc_not_zero() 501 * above cannot corrupt). 502 */ 503 if (!page_mapped(page)) { 504 rcu_read_unlock(); 505 put_anon_vma(anon_vma); 506 return NULL; 507 } 508 out: 509 rcu_read_unlock(); 510 511 return anon_vma; 512 } 513 514 /* 515 * Similar to page_get_anon_vma() except it locks the anon_vma. 516 * 517 * Its a little more complex as it tries to keep the fast path to a single 518 * atomic op -- the trylock. If we fail the trylock, we fall back to getting a 519 * reference like with page_get_anon_vma() and then block on the mutex. 520 */ 521 struct anon_vma *page_lock_anon_vma_read(struct page *page) 522 { 523 struct anon_vma *anon_vma = NULL; 524 struct anon_vma *root_anon_vma; 525 unsigned long anon_mapping; 526 527 rcu_read_lock(); 528 anon_mapping = (unsigned long)READ_ONCE(page->mapping); 529 if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) 530 goto out; 531 if (!page_mapped(page)) 532 goto out; 533 534 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); 535 root_anon_vma = READ_ONCE(anon_vma->root); 536 if (down_read_trylock(&root_anon_vma->rwsem)) { 537 /* 538 * If the page is still mapped, then this anon_vma is still 539 * its anon_vma, and holding the mutex ensures that it will 540 * not go away, see anon_vma_free(). 541 */ 542 if (!page_mapped(page)) { 543 up_read(&root_anon_vma->rwsem); 544 anon_vma = NULL; 545 } 546 goto out; 547 } 548 549 /* trylock failed, we got to sleep */ 550 if (!atomic_inc_not_zero(&anon_vma->refcount)) { 551 anon_vma = NULL; 552 goto out; 553 } 554 555 if (!page_mapped(page)) { 556 rcu_read_unlock(); 557 put_anon_vma(anon_vma); 558 return NULL; 559 } 560 561 /* we pinned the anon_vma, its safe to sleep */ 562 rcu_read_unlock(); 563 anon_vma_lock_read(anon_vma); 564 565 if (atomic_dec_and_test(&anon_vma->refcount)) { 566 /* 567 * Oops, we held the last refcount, release the lock 568 * and bail -- can't simply use put_anon_vma() because 569 * we'll deadlock on the anon_vma_lock_write() recursion. 570 */ 571 anon_vma_unlock_read(anon_vma); 572 __put_anon_vma(anon_vma); 573 anon_vma = NULL; 574 } 575 576 return anon_vma; 577 578 out: 579 rcu_read_unlock(); 580 return anon_vma; 581 } 582 583 void page_unlock_anon_vma_read(struct anon_vma *anon_vma) 584 { 585 anon_vma_unlock_read(anon_vma); 586 } 587 588 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH 589 /* 590 * Flush TLB entries for recently unmapped pages from remote CPUs. It is 591 * important if a PTE was dirty when it was unmapped that it's flushed 592 * before any IO is initiated on the page to prevent lost writes. Similarly, 593 * it must be flushed before freeing to prevent data leakage. 594 */ 595 void try_to_unmap_flush(void) 596 { 597 struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; 598 599 if (!tlb_ubc->flush_required) 600 return; 601 602 arch_tlbbatch_flush(&tlb_ubc->arch); 603 tlb_ubc->flush_required = false; 604 tlb_ubc->writable = false; 605 } 606 607 /* Flush iff there are potentially writable TLB entries that can race with IO */ 608 void try_to_unmap_flush_dirty(void) 609 { 610 struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; 611 612 if (tlb_ubc->writable) 613 try_to_unmap_flush(); 614 } 615 616 static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable) 617 { 618 struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc; 619 620 arch_tlbbatch_add_mm(&tlb_ubc->arch, mm); 621 tlb_ubc->flush_required = true; 622 623 /* 624 * Ensure compiler does not re-order the setting of tlb_flush_batched 625 * before the PTE is cleared. 626 */ 627 barrier(); 628 mm->tlb_flush_batched = true; 629 630 /* 631 * If the PTE was dirty then it's best to assume it's writable. The 632 * caller must use try_to_unmap_flush_dirty() or try_to_unmap_flush() 633 * before the page is queued for IO. 634 */ 635 if (writable) 636 tlb_ubc->writable = true; 637 } 638 639 /* 640 * Returns true if the TLB flush should be deferred to the end of a batch of 641 * unmap operations to reduce IPIs. 642 */ 643 static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags) 644 { 645 bool should_defer = false; 646 647 if (!(flags & TTU_BATCH_FLUSH)) 648 return false; 649 650 /* If remote CPUs need to be flushed then defer batch the flush */ 651 if (cpumask_any_but(mm_cpumask(mm), get_cpu()) < nr_cpu_ids) 652 should_defer = true; 653 put_cpu(); 654 655 return should_defer; 656 } 657 658 /* 659 * Reclaim unmaps pages under the PTL but do not flush the TLB prior to 660 * releasing the PTL if TLB flushes are batched. It's possible for a parallel 661 * operation such as mprotect or munmap to race between reclaim unmapping 662 * the page and flushing the page. If this race occurs, it potentially allows 663 * access to data via a stale TLB entry. Tracking all mm's that have TLB 664 * batching in flight would be expensive during reclaim so instead track 665 * whether TLB batching occurred in the past and if so then do a flush here 666 * if required. This will cost one additional flush per reclaim cycle paid 667 * by the first operation at risk such as mprotect and mumap. 668 * 669 * This must be called under the PTL so that an access to tlb_flush_batched 670 * that is potentially a "reclaim vs mprotect/munmap/etc" race will synchronise 671 * via the PTL. 672 */ 673 void flush_tlb_batched_pending(struct mm_struct *mm) 674 { 675 if (data_race(mm->tlb_flush_batched)) { 676 flush_tlb_mm(mm); 677 678 /* 679 * Do not allow the compiler to re-order the clearing of 680 * tlb_flush_batched before the tlb is flushed. 681 */ 682 barrier(); 683 mm->tlb_flush_batched = false; 684 } 685 } 686 #else 687 static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable) 688 { 689 } 690 691 static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags) 692 { 693 return false; 694 } 695 #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */ 696 697 /* 698 * At what user virtual address is page expected in vma? 699 * Caller should check the page is actually part of the vma. 700 */ 701 unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) 702 { 703 unsigned long address; 704 if (PageAnon(page)) { 705 struct anon_vma *page__anon_vma = page_anon_vma(page); 706 /* 707 * Note: swapoff's unuse_vma() is more efficient with this 708 * check, and needs it to match anon_vma when KSM is active. 709 */ 710 if (!vma->anon_vma || !page__anon_vma || 711 vma->anon_vma->root != page__anon_vma->root) 712 return -EFAULT; 713 } else if (page->mapping) { 714 if (!vma->vm_file || vma->vm_file->f_mapping != page->mapping) 715 return -EFAULT; 716 } else 717 return -EFAULT; 718 address = __vma_address(page, vma); 719 if (unlikely(address < vma->vm_start || address >= vma->vm_end)) 720 return -EFAULT; 721 return address; 722 } 723 724 pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address) 725 { 726 pgd_t *pgd; 727 p4d_t *p4d; 728 pud_t *pud; 729 pmd_t *pmd = NULL; 730 pmd_t pmde; 731 732 pgd = pgd_offset(mm, address); 733 if (!pgd_present(*pgd)) 734 goto out; 735 736 p4d = p4d_offset(pgd, address); 737 if (!p4d_present(*p4d)) 738 goto out; 739 740 pud = pud_offset(p4d, address); 741 if (!pud_present(*pud)) 742 goto out; 743 744 pmd = pmd_offset(pud, address); 745 /* 746 * Some THP functions use the sequence pmdp_huge_clear_flush(), set_pmd_at() 747 * without holding anon_vma lock for write. So when looking for a 748 * genuine pmde (in which to find pte), test present and !THP together. 749 */ 750 pmde = *pmd; 751 barrier(); 752 if (!pmd_present(pmde) || pmd_trans_huge(pmde)) 753 pmd = NULL; 754 out: 755 return pmd; 756 } 757 758 struct page_referenced_arg { 759 int mapcount; 760 int referenced; 761 unsigned long vm_flags; 762 struct mem_cgroup *memcg; 763 }; 764 /* 765 * arg: page_referenced_arg will be passed 766 */ 767 static bool page_referenced_one(struct page *page, struct vm_area_struct *vma, 768 unsigned long address, void *arg) 769 { 770 struct page_referenced_arg *pra = arg; 771 struct page_vma_mapped_walk pvmw = { 772 .page = page, 773 .vma = vma, 774 .address = address, 775 }; 776 int referenced = 0; 777 778 while (page_vma_mapped_walk(&pvmw)) { 779 address = pvmw.address; 780 781 if (vma->vm_flags & VM_LOCKED) { 782 page_vma_mapped_walk_done(&pvmw); 783 pra->vm_flags |= VM_LOCKED; 784 return false; /* To break the loop */ 785 } 786 787 if (pvmw.pte) { 788 if (ptep_clear_flush_young_notify(vma, address, 789 pvmw.pte)) { 790 /* 791 * Don't treat a reference through 792 * a sequentially read mapping as such. 793 * If the page has been used in another mapping, 794 * we will catch it; if this other mapping is 795 * already gone, the unmap path will have set 796 * PG_referenced or activated the page. 797 */ 798 if (likely(!(vma->vm_flags & VM_SEQ_READ))) 799 referenced++; 800 } 801 } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { 802 if (pmdp_clear_flush_young_notify(vma, address, 803 pvmw.pmd)) 804 referenced++; 805 } else { 806 /* unexpected pmd-mapped page? */ 807 WARN_ON_ONCE(1); 808 } 809 810 pra->mapcount--; 811 } 812 813 if (referenced) 814 clear_page_idle(page); 815 if (test_and_clear_page_young(page)) 816 referenced++; 817 818 if (referenced) { 819 pra->referenced++; 820 pra->vm_flags |= vma->vm_flags; 821 } 822 823 if (!pra->mapcount) 824 return false; /* To break the loop */ 825 826 return true; 827 } 828 829 static bool invalid_page_referenced_vma(struct vm_area_struct *vma, void *arg) 830 { 831 struct page_referenced_arg *pra = arg; 832 struct mem_cgroup *memcg = pra->memcg; 833 834 if (!mm_match_cgroup(vma->vm_mm, memcg)) 835 return true; 836 837 return false; 838 } 839 840 /** 841 * page_referenced - test if the page was referenced 842 * @page: the page to test 843 * @is_locked: caller holds lock on the page 844 * @memcg: target memory cgroup 845 * @vm_flags: collect encountered vma->vm_flags who actually referenced the page 846 * 847 * Quick test_and_clear_referenced for all mappings to a page, 848 * returns the number of ptes which referenced the page. 849 */ 850 int page_referenced(struct page *page, 851 int is_locked, 852 struct mem_cgroup *memcg, 853 unsigned long *vm_flags) 854 { 855 int we_locked = 0; 856 struct page_referenced_arg pra = { 857 .mapcount = total_mapcount(page), 858 .memcg = memcg, 859 }; 860 struct rmap_walk_control rwc = { 861 .rmap_one = page_referenced_one, 862 .arg = (void *)&pra, 863 .anon_lock = page_lock_anon_vma_read, 864 }; 865 866 *vm_flags = 0; 867 if (!pra.mapcount) 868 return 0; 869 870 if (!page_rmapping(page)) 871 return 0; 872 873 if (!is_locked && (!PageAnon(page) || PageKsm(page))) { 874 we_locked = trylock_page(page); 875 if (!we_locked) 876 return 1; 877 } 878 879 /* 880 * If we are reclaiming on behalf of a cgroup, skip 881 * counting on behalf of references from different 882 * cgroups 883 */ 884 if (memcg) { 885 rwc.invalid_vma = invalid_page_referenced_vma; 886 } 887 888 rmap_walk(page, &rwc); 889 *vm_flags = pra.vm_flags; 890 891 if (we_locked) 892 unlock_page(page); 893 894 return pra.referenced; 895 } 896 897 static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma, 898 unsigned long address, void *arg) 899 { 900 struct page_vma_mapped_walk pvmw = { 901 .page = page, 902 .vma = vma, 903 .address = address, 904 .flags = PVMW_SYNC, 905 }; 906 struct mmu_notifier_range range; 907 int *cleaned = arg; 908 909 /* 910 * We have to assume the worse case ie pmd for invalidation. Note that 911 * the page can not be free from this function. 912 */ 913 mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE, 914 0, vma, vma->vm_mm, address, 915 min(vma->vm_end, address + page_size(page))); 916 mmu_notifier_invalidate_range_start(&range); 917 918 while (page_vma_mapped_walk(&pvmw)) { 919 int ret = 0; 920 921 address = pvmw.address; 922 if (pvmw.pte) { 923 pte_t entry; 924 pte_t *pte = pvmw.pte; 925 926 if (!pte_dirty(*pte) && !pte_write(*pte)) 927 continue; 928 929 flush_cache_page(vma, address, pte_pfn(*pte)); 930 entry = ptep_clear_flush(vma, address, pte); 931 entry = pte_wrprotect(entry); 932 entry = pte_mkclean(entry); 933 set_pte_at(vma->vm_mm, address, pte, entry); 934 ret = 1; 935 } else { 936 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 937 pmd_t *pmd = pvmw.pmd; 938 pmd_t entry; 939 940 if (!pmd_dirty(*pmd) && !pmd_write(*pmd)) 941 continue; 942 943 flush_cache_page(vma, address, page_to_pfn(page)); 944 entry = pmdp_invalidate(vma, address, pmd); 945 entry = pmd_wrprotect(entry); 946 entry = pmd_mkclean(entry); 947 set_pmd_at(vma->vm_mm, address, pmd, entry); 948 ret = 1; 949 #else 950 /* unexpected pmd-mapped page? */ 951 WARN_ON_ONCE(1); 952 #endif 953 } 954 955 /* 956 * No need to call mmu_notifier_invalidate_range() as we are 957 * downgrading page table protection not changing it to point 958 * to a new page. 959 * 960 * See Documentation/vm/mmu_notifier.rst 961 */ 962 if (ret) 963 (*cleaned)++; 964 } 965 966 mmu_notifier_invalidate_range_end(&range); 967 968 return true; 969 } 970 971 static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg) 972 { 973 if (vma->vm_flags & VM_SHARED) 974 return false; 975 976 return true; 977 } 978 979 int page_mkclean(struct page *page) 980 { 981 int cleaned = 0; 982 struct address_space *mapping; 983 struct rmap_walk_control rwc = { 984 .arg = (void *)&cleaned, 985 .rmap_one = page_mkclean_one, 986 .invalid_vma = invalid_mkclean_vma, 987 }; 988 989 BUG_ON(!PageLocked(page)); 990 991 if (!page_mapped(page)) 992 return 0; 993 994 mapping = page_mapping(page); 995 if (!mapping) 996 return 0; 997 998 rmap_walk(page, &rwc); 999 1000 return cleaned; 1001 } 1002 EXPORT_SYMBOL_GPL(page_mkclean); 1003 1004 /** 1005 * page_move_anon_rmap - move a page to our anon_vma 1006 * @page: the page to move to our anon_vma 1007 * @vma: the vma the page belongs to 1008 * 1009 * When a page belongs exclusively to one process after a COW event, 1010 * that page can be moved into the anon_vma that belongs to just that 1011 * process, so the rmap code will not search the parent or sibling 1012 * processes. 1013 */ 1014 void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma) 1015 { 1016 struct anon_vma *anon_vma = vma->anon_vma; 1017 1018 page = compound_head(page); 1019 1020 VM_BUG_ON_PAGE(!PageLocked(page), page); 1021 VM_BUG_ON_VMA(!anon_vma, vma); 1022 1023 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 1024 /* 1025 * Ensure that anon_vma and the PAGE_MAPPING_ANON bit are written 1026 * simultaneously, so a concurrent reader (eg page_referenced()'s 1027 * PageAnon()) will not see one without the other. 1028 */ 1029 WRITE_ONCE(page->mapping, (struct address_space *) anon_vma); 1030 } 1031 1032 /** 1033 * __page_set_anon_rmap - set up new anonymous rmap 1034 * @page: Page or Hugepage to add to rmap 1035 * @vma: VM area to add page to. 1036 * @address: User virtual address of the mapping 1037 * @exclusive: the page is exclusively owned by the current process 1038 */ 1039 static void __page_set_anon_rmap(struct page *page, 1040 struct vm_area_struct *vma, unsigned long address, int exclusive) 1041 { 1042 struct anon_vma *anon_vma = vma->anon_vma; 1043 1044 BUG_ON(!anon_vma); 1045 1046 if (PageAnon(page)) 1047 return; 1048 1049 /* 1050 * If the page isn't exclusively mapped into this vma, 1051 * we must use the _oldest_ possible anon_vma for the 1052 * page mapping! 1053 */ 1054 if (!exclusive) 1055 anon_vma = anon_vma->root; 1056 1057 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 1058 page->mapping = (struct address_space *) anon_vma; 1059 page->index = linear_page_index(vma, address); 1060 } 1061 1062 /** 1063 * __page_check_anon_rmap - sanity check anonymous rmap addition 1064 * @page: the page to add the mapping to 1065 * @vma: the vm area in which the mapping is added 1066 * @address: the user virtual address mapped 1067 */ 1068 static void __page_check_anon_rmap(struct page *page, 1069 struct vm_area_struct *vma, unsigned long address) 1070 { 1071 /* 1072 * The page's anon-rmap details (mapping and index) are guaranteed to 1073 * be set up correctly at this point. 1074 * 1075 * We have exclusion against page_add_anon_rmap because the caller 1076 * always holds the page locked, except if called from page_dup_rmap, 1077 * in which case the page is already known to be setup. 1078 * 1079 * We have exclusion against page_add_new_anon_rmap because those pages 1080 * are initially only visible via the pagetables, and the pte is locked 1081 * over the call to page_add_new_anon_rmap. 1082 */ 1083 VM_BUG_ON_PAGE(page_anon_vma(page)->root != vma->anon_vma->root, page); 1084 VM_BUG_ON_PAGE(page_to_pgoff(page) != linear_page_index(vma, address), 1085 page); 1086 } 1087 1088 /** 1089 * page_add_anon_rmap - add pte mapping to an anonymous page 1090 * @page: the page to add the mapping to 1091 * @vma: the vm area in which the mapping is added 1092 * @address: the user virtual address mapped 1093 * @compound: charge the page as compound or small page 1094 * 1095 * The caller needs to hold the pte lock, and the page must be locked in 1096 * the anon_vma case: to serialize mapping,index checking after setting, 1097 * and to ensure that PageAnon is not being upgraded racily to PageKsm 1098 * (but PageKsm is never downgraded to PageAnon). 1099 */ 1100 void page_add_anon_rmap(struct page *page, 1101 struct vm_area_struct *vma, unsigned long address, bool compound) 1102 { 1103 do_page_add_anon_rmap(page, vma, address, compound ? RMAP_COMPOUND : 0); 1104 } 1105 1106 /* 1107 * Special version of the above for do_swap_page, which often runs 1108 * into pages that are exclusively owned by the current process. 1109 * Everybody else should continue to use page_add_anon_rmap above. 1110 */ 1111 void do_page_add_anon_rmap(struct page *page, 1112 struct vm_area_struct *vma, unsigned long address, int flags) 1113 { 1114 bool compound = flags & RMAP_COMPOUND; 1115 bool first; 1116 1117 if (unlikely(PageKsm(page))) 1118 lock_page_memcg(page); 1119 else 1120 VM_BUG_ON_PAGE(!PageLocked(page), page); 1121 1122 if (compound) { 1123 atomic_t *mapcount; 1124 VM_BUG_ON_PAGE(!PageLocked(page), page); 1125 VM_BUG_ON_PAGE(!PageTransHuge(page), page); 1126 mapcount = compound_mapcount_ptr(page); 1127 first = atomic_inc_and_test(mapcount); 1128 } else { 1129 first = atomic_inc_and_test(&page->_mapcount); 1130 } 1131 1132 if (first) { 1133 int nr = compound ? thp_nr_pages(page) : 1; 1134 /* 1135 * We use the irq-unsafe __{inc|mod}_zone_page_stat because 1136 * these counters are not modified in interrupt context, and 1137 * pte lock(a spinlock) is held, which implies preemption 1138 * disabled. 1139 */ 1140 if (compound) 1141 __inc_lruvec_page_state(page, NR_ANON_THPS); 1142 __mod_lruvec_page_state(page, NR_ANON_MAPPED, nr); 1143 } 1144 1145 if (unlikely(PageKsm(page))) { 1146 unlock_page_memcg(page); 1147 return; 1148 } 1149 1150 /* address might be in next vma when migration races vma_adjust */ 1151 if (first) 1152 __page_set_anon_rmap(page, vma, address, 1153 flags & RMAP_EXCLUSIVE); 1154 else 1155 __page_check_anon_rmap(page, vma, address); 1156 } 1157 1158 /** 1159 * page_add_new_anon_rmap - add pte mapping to a new anonymous page 1160 * @page: the page to add the mapping to 1161 * @vma: the vm area in which the mapping is added 1162 * @address: the user virtual address mapped 1163 * @compound: charge the page as compound or small page 1164 * 1165 * Same as page_add_anon_rmap but must only be called on *new* pages. 1166 * This means the inc-and-test can be bypassed. 1167 * Page does not have to be locked. 1168 */ 1169 void page_add_new_anon_rmap(struct page *page, 1170 struct vm_area_struct *vma, unsigned long address, bool compound) 1171 { 1172 int nr = compound ? thp_nr_pages(page) : 1; 1173 1174 VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma); 1175 __SetPageSwapBacked(page); 1176 if (compound) { 1177 VM_BUG_ON_PAGE(!PageTransHuge(page), page); 1178 /* increment count (starts at -1) */ 1179 atomic_set(compound_mapcount_ptr(page), 0); 1180 if (hpage_pincount_available(page)) 1181 atomic_set(compound_pincount_ptr(page), 0); 1182 1183 __inc_lruvec_page_state(page, NR_ANON_THPS); 1184 } else { 1185 /* Anon THP always mapped first with PMD */ 1186 VM_BUG_ON_PAGE(PageTransCompound(page), page); 1187 /* increment count (starts at -1) */ 1188 atomic_set(&page->_mapcount, 0); 1189 } 1190 __mod_lruvec_page_state(page, NR_ANON_MAPPED, nr); 1191 __page_set_anon_rmap(page, vma, address, 1); 1192 } 1193 1194 /** 1195 * page_add_file_rmap - add pte mapping to a file page 1196 * @page: the page to add the mapping to 1197 * @compound: charge the page as compound or small page 1198 * 1199 * The caller needs to hold the pte lock. 1200 */ 1201 void page_add_file_rmap(struct page *page, bool compound) 1202 { 1203 int i, nr = 1; 1204 1205 VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page); 1206 lock_page_memcg(page); 1207 if (compound && PageTransHuge(page)) { 1208 for (i = 0, nr = 0; i < thp_nr_pages(page); i++) { 1209 if (atomic_inc_and_test(&page[i]._mapcount)) 1210 nr++; 1211 } 1212 if (!atomic_inc_and_test(compound_mapcount_ptr(page))) 1213 goto out; 1214 if (PageSwapBacked(page)) 1215 __inc_node_page_state(page, NR_SHMEM_PMDMAPPED); 1216 else 1217 __inc_node_page_state(page, NR_FILE_PMDMAPPED); 1218 } else { 1219 if (PageTransCompound(page) && page_mapping(page)) { 1220 VM_WARN_ON_ONCE(!PageLocked(page)); 1221 1222 SetPageDoubleMap(compound_head(page)); 1223 if (PageMlocked(page)) 1224 clear_page_mlock(compound_head(page)); 1225 } 1226 if (!atomic_inc_and_test(&page->_mapcount)) 1227 goto out; 1228 } 1229 __mod_lruvec_page_state(page, NR_FILE_MAPPED, nr); 1230 out: 1231 unlock_page_memcg(page); 1232 } 1233 1234 static void page_remove_file_rmap(struct page *page, bool compound) 1235 { 1236 int i, nr = 1; 1237 1238 VM_BUG_ON_PAGE(compound && !PageHead(page), page); 1239 1240 /* Hugepages are not counted in NR_FILE_MAPPED for now. */ 1241 if (unlikely(PageHuge(page))) { 1242 /* hugetlb pages are always mapped with pmds */ 1243 atomic_dec(compound_mapcount_ptr(page)); 1244 return; 1245 } 1246 1247 /* page still mapped by someone else? */ 1248 if (compound && PageTransHuge(page)) { 1249 for (i = 0, nr = 0; i < thp_nr_pages(page); i++) { 1250 if (atomic_add_negative(-1, &page[i]._mapcount)) 1251 nr++; 1252 } 1253 if (!atomic_add_negative(-1, compound_mapcount_ptr(page))) 1254 return; 1255 if (PageSwapBacked(page)) 1256 __dec_node_page_state(page, NR_SHMEM_PMDMAPPED); 1257 else 1258 __dec_node_page_state(page, NR_FILE_PMDMAPPED); 1259 } else { 1260 if (!atomic_add_negative(-1, &page->_mapcount)) 1261 return; 1262 } 1263 1264 /* 1265 * We use the irq-unsafe __{inc|mod}_lruvec_page_state because 1266 * these counters are not modified in interrupt context, and 1267 * pte lock(a spinlock) is held, which implies preemption disabled. 1268 */ 1269 __mod_lruvec_page_state(page, NR_FILE_MAPPED, -nr); 1270 1271 if (unlikely(PageMlocked(page))) 1272 clear_page_mlock(page); 1273 } 1274 1275 static void page_remove_anon_compound_rmap(struct page *page) 1276 { 1277 int i, nr; 1278 1279 if (!atomic_add_negative(-1, compound_mapcount_ptr(page))) 1280 return; 1281 1282 /* Hugepages are not counted in NR_ANON_PAGES for now. */ 1283 if (unlikely(PageHuge(page))) 1284 return; 1285 1286 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) 1287 return; 1288 1289 __dec_lruvec_page_state(page, NR_ANON_THPS); 1290 1291 if (TestClearPageDoubleMap(page)) { 1292 /* 1293 * Subpages can be mapped with PTEs too. Check how many of 1294 * them are still mapped. 1295 */ 1296 for (i = 0, nr = 0; i < thp_nr_pages(page); i++) { 1297 if (atomic_add_negative(-1, &page[i]._mapcount)) 1298 nr++; 1299 } 1300 1301 /* 1302 * Queue the page for deferred split if at least one small 1303 * page of the compound page is unmapped, but at least one 1304 * small page is still mapped. 1305 */ 1306 if (nr && nr < thp_nr_pages(page)) 1307 deferred_split_huge_page(page); 1308 } else { 1309 nr = thp_nr_pages(page); 1310 } 1311 1312 if (unlikely(PageMlocked(page))) 1313 clear_page_mlock(page); 1314 1315 if (nr) 1316 __mod_lruvec_page_state(page, NR_ANON_MAPPED, -nr); 1317 } 1318 1319 /** 1320 * page_remove_rmap - take down pte mapping from a page 1321 * @page: page to remove mapping from 1322 * @compound: uncharge the page as compound or small page 1323 * 1324 * The caller needs to hold the pte lock. 1325 */ 1326 void page_remove_rmap(struct page *page, bool compound) 1327 { 1328 lock_page_memcg(page); 1329 1330 if (!PageAnon(page)) { 1331 page_remove_file_rmap(page, compound); 1332 goto out; 1333 } 1334 1335 if (compound) { 1336 page_remove_anon_compound_rmap(page); 1337 goto out; 1338 } 1339 1340 /* page still mapped by someone else? */ 1341 if (!atomic_add_negative(-1, &page->_mapcount)) 1342 goto out; 1343 1344 /* 1345 * We use the irq-unsafe __{inc|mod}_zone_page_stat because 1346 * these counters are not modified in interrupt context, and 1347 * pte lock(a spinlock) is held, which implies preemption disabled. 1348 */ 1349 __dec_lruvec_page_state(page, NR_ANON_MAPPED); 1350 1351 if (unlikely(PageMlocked(page))) 1352 clear_page_mlock(page); 1353 1354 if (PageTransCompound(page)) 1355 deferred_split_huge_page(compound_head(page)); 1356 1357 /* 1358 * It would be tidy to reset the PageAnon mapping here, 1359 * but that might overwrite a racing page_add_anon_rmap 1360 * which increments mapcount after us but sets mapping 1361 * before us: so leave the reset to free_unref_page, 1362 * and remember that it's only reliable while mapped. 1363 * Leaving it set also helps swapoff to reinstate ptes 1364 * faster for those pages still in swapcache. 1365 */ 1366 out: 1367 unlock_page_memcg(page); 1368 } 1369 1370 /* 1371 * @arg: enum ttu_flags will be passed to this argument 1372 */ 1373 static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, 1374 unsigned long address, void *arg) 1375 { 1376 struct mm_struct *mm = vma->vm_mm; 1377 struct page_vma_mapped_walk pvmw = { 1378 .page = page, 1379 .vma = vma, 1380 .address = address, 1381 }; 1382 pte_t pteval; 1383 struct page *subpage; 1384 bool ret = true; 1385 struct mmu_notifier_range range; 1386 enum ttu_flags flags = (enum ttu_flags)(long)arg; 1387 1388 /* munlock has nothing to gain from examining un-locked vmas */ 1389 if ((flags & TTU_MUNLOCK) && !(vma->vm_flags & VM_LOCKED)) 1390 return true; 1391 1392 if (IS_ENABLED(CONFIG_MIGRATION) && (flags & TTU_MIGRATION) && 1393 is_zone_device_page(page) && !is_device_private_page(page)) 1394 return true; 1395 1396 if (flags & TTU_SPLIT_HUGE_PMD) { 1397 split_huge_pmd_address(vma, address, 1398 flags & TTU_SPLIT_FREEZE, page); 1399 } 1400 1401 /* 1402 * For THP, we have to assume the worse case ie pmd for invalidation. 1403 * For hugetlb, it could be much worse if we need to do pud 1404 * invalidation in the case of pmd sharing. 1405 * 1406 * Note that the page can not be free in this function as call of 1407 * try_to_unmap() must hold a reference on the page. 1408 */ 1409 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, 1410 address, 1411 min(vma->vm_end, address + page_size(page))); 1412 if (PageHuge(page)) { 1413 /* 1414 * If sharing is possible, start and end will be adjusted 1415 * accordingly. 1416 */ 1417 adjust_range_if_pmd_sharing_possible(vma, &range.start, 1418 &range.end); 1419 } 1420 mmu_notifier_invalidate_range_start(&range); 1421 1422 while (page_vma_mapped_walk(&pvmw)) { 1423 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 1424 /* PMD-mapped THP migration entry */ 1425 if (!pvmw.pte && (flags & TTU_MIGRATION)) { 1426 VM_BUG_ON_PAGE(PageHuge(page) || !PageTransCompound(page), page); 1427 1428 set_pmd_migration_entry(&pvmw, page); 1429 continue; 1430 } 1431 #endif 1432 1433 /* 1434 * If the page is mlock()d, we cannot swap it out. 1435 * If it's recently referenced (perhaps page_referenced 1436 * skipped over this mm) then we should reactivate it. 1437 */ 1438 if (!(flags & TTU_IGNORE_MLOCK)) { 1439 if (vma->vm_flags & VM_LOCKED) { 1440 /* PTE-mapped THP are never mlocked */ 1441 if (!PageTransCompound(page)) { 1442 /* 1443 * Holding pte lock, we do *not* need 1444 * mmap_lock here 1445 */ 1446 mlock_vma_page(page); 1447 } 1448 ret = false; 1449 page_vma_mapped_walk_done(&pvmw); 1450 break; 1451 } 1452 if (flags & TTU_MUNLOCK) 1453 continue; 1454 } 1455 1456 /* Unexpected PMD-mapped THP? */ 1457 VM_BUG_ON_PAGE(!pvmw.pte, page); 1458 1459 subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte); 1460 address = pvmw.address; 1461 1462 if (PageHuge(page) && !PageAnon(page)) { 1463 /* 1464 * To call huge_pmd_unshare, i_mmap_rwsem must be 1465 * held in write mode. Caller needs to explicitly 1466 * do this outside rmap routines. 1467 */ 1468 VM_BUG_ON(!(flags & TTU_RMAP_LOCKED)); 1469 if (huge_pmd_unshare(mm, vma, &address, pvmw.pte)) { 1470 /* 1471 * huge_pmd_unshare unmapped an entire PMD 1472 * page. There is no way of knowing exactly 1473 * which PMDs may be cached for this mm, so 1474 * we must flush them all. start/end were 1475 * already adjusted above to cover this range. 1476 */ 1477 flush_cache_range(vma, range.start, range.end); 1478 flush_tlb_range(vma, range.start, range.end); 1479 mmu_notifier_invalidate_range(mm, range.start, 1480 range.end); 1481 1482 /* 1483 * The ref count of the PMD page was dropped 1484 * which is part of the way map counting 1485 * is done for shared PMDs. Return 'true' 1486 * here. When there is no other sharing, 1487 * huge_pmd_unshare returns false and we will 1488 * unmap the actual page and drop map count 1489 * to zero. 1490 */ 1491 page_vma_mapped_walk_done(&pvmw); 1492 break; 1493 } 1494 } 1495 1496 if (IS_ENABLED(CONFIG_MIGRATION) && 1497 (flags & TTU_MIGRATION) && 1498 is_zone_device_page(page)) { 1499 swp_entry_t entry; 1500 pte_t swp_pte; 1501 1502 pteval = ptep_get_and_clear(mm, pvmw.address, pvmw.pte); 1503 1504 /* 1505 * Store the pfn of the page in a special migration 1506 * pte. do_swap_page() will wait until the migration 1507 * pte is removed and then restart fault handling. 1508 */ 1509 entry = make_migration_entry(page, 0); 1510 swp_pte = swp_entry_to_pte(entry); 1511 1512 /* 1513 * pteval maps a zone device page and is therefore 1514 * a swap pte. 1515 */ 1516 if (pte_swp_soft_dirty(pteval)) 1517 swp_pte = pte_swp_mksoft_dirty(swp_pte); 1518 if (pte_swp_uffd_wp(pteval)) 1519 swp_pte = pte_swp_mkuffd_wp(swp_pte); 1520 set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte); 1521 /* 1522 * No need to invalidate here it will synchronize on 1523 * against the special swap migration pte. 1524 * 1525 * The assignment to subpage above was computed from a 1526 * swap PTE which results in an invalid pointer. 1527 * Since only PAGE_SIZE pages can currently be 1528 * migrated, just set it to page. This will need to be 1529 * changed when hugepage migrations to device private 1530 * memory are supported. 1531 */ 1532 subpage = page; 1533 goto discard; 1534 } 1535 1536 if (!(flags & TTU_IGNORE_ACCESS)) { 1537 if (ptep_clear_flush_young_notify(vma, address, 1538 pvmw.pte)) { 1539 ret = false; 1540 page_vma_mapped_walk_done(&pvmw); 1541 break; 1542 } 1543 } 1544 1545 /* Nuke the page table entry. */ 1546 flush_cache_page(vma, address, pte_pfn(*pvmw.pte)); 1547 if (should_defer_flush(mm, flags)) { 1548 /* 1549 * We clear the PTE but do not flush so potentially 1550 * a remote CPU could still be writing to the page. 1551 * If the entry was previously clean then the 1552 * architecture must guarantee that a clear->dirty 1553 * transition on a cached TLB entry is written through 1554 * and traps if the PTE is unmapped. 1555 */ 1556 pteval = ptep_get_and_clear(mm, address, pvmw.pte); 1557 1558 set_tlb_ubc_flush_pending(mm, pte_dirty(pteval)); 1559 } else { 1560 pteval = ptep_clear_flush(vma, address, pvmw.pte); 1561 } 1562 1563 /* Move the dirty bit to the page. Now the pte is gone. */ 1564 if (pte_dirty(pteval)) 1565 set_page_dirty(page); 1566 1567 /* Update high watermark before we lower rss */ 1568 update_hiwater_rss(mm); 1569 1570 if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) { 1571 pteval = swp_entry_to_pte(make_hwpoison_entry(subpage)); 1572 if (PageHuge(page)) { 1573 hugetlb_count_sub(compound_nr(page), mm); 1574 set_huge_swap_pte_at(mm, address, 1575 pvmw.pte, pteval, 1576 vma_mmu_pagesize(vma)); 1577 } else { 1578 dec_mm_counter(mm, mm_counter(page)); 1579 set_pte_at(mm, address, pvmw.pte, pteval); 1580 } 1581 1582 } else if (pte_unused(pteval) && !userfaultfd_armed(vma)) { 1583 /* 1584 * The guest indicated that the page content is of no 1585 * interest anymore. Simply discard the pte, vmscan 1586 * will take care of the rest. 1587 * A future reference will then fault in a new zero 1588 * page. When userfaultfd is active, we must not drop 1589 * this page though, as its main user (postcopy 1590 * migration) will not expect userfaults on already 1591 * copied pages. 1592 */ 1593 dec_mm_counter(mm, mm_counter(page)); 1594 /* We have to invalidate as we cleared the pte */ 1595 mmu_notifier_invalidate_range(mm, address, 1596 address + PAGE_SIZE); 1597 } else if (IS_ENABLED(CONFIG_MIGRATION) && 1598 (flags & (TTU_MIGRATION|TTU_SPLIT_FREEZE))) { 1599 swp_entry_t entry; 1600 pte_t swp_pte; 1601 1602 if (arch_unmap_one(mm, vma, address, pteval) < 0) { 1603 set_pte_at(mm, address, pvmw.pte, pteval); 1604 ret = false; 1605 page_vma_mapped_walk_done(&pvmw); 1606 break; 1607 } 1608 1609 /* 1610 * Store the pfn of the page in a special migration 1611 * pte. do_swap_page() will wait until the migration 1612 * pte is removed and then restart fault handling. 1613 */ 1614 entry = make_migration_entry(subpage, 1615 pte_write(pteval)); 1616 swp_pte = swp_entry_to_pte(entry); 1617 if (pte_soft_dirty(pteval)) 1618 swp_pte = pte_swp_mksoft_dirty(swp_pte); 1619 if (pte_uffd_wp(pteval)) 1620 swp_pte = pte_swp_mkuffd_wp(swp_pte); 1621 set_pte_at(mm, address, pvmw.pte, swp_pte); 1622 /* 1623 * No need to invalidate here it will synchronize on 1624 * against the special swap migration pte. 1625 */ 1626 } else if (PageAnon(page)) { 1627 swp_entry_t entry = { .val = page_private(subpage) }; 1628 pte_t swp_pte; 1629 /* 1630 * Store the swap location in the pte. 1631 * See handle_pte_fault() ... 1632 */ 1633 if (unlikely(PageSwapBacked(page) != PageSwapCache(page))) { 1634 WARN_ON_ONCE(1); 1635 ret = false; 1636 /* We have to invalidate as we cleared the pte */ 1637 mmu_notifier_invalidate_range(mm, address, 1638 address + PAGE_SIZE); 1639 page_vma_mapped_walk_done(&pvmw); 1640 break; 1641 } 1642 1643 /* MADV_FREE page check */ 1644 if (!PageSwapBacked(page)) { 1645 if (!PageDirty(page)) { 1646 /* Invalidate as we cleared the pte */ 1647 mmu_notifier_invalidate_range(mm, 1648 address, address + PAGE_SIZE); 1649 dec_mm_counter(mm, MM_ANONPAGES); 1650 goto discard; 1651 } 1652 1653 /* 1654 * If the page was redirtied, it cannot be 1655 * discarded. Remap the page to page table. 1656 */ 1657 set_pte_at(mm, address, pvmw.pte, pteval); 1658 SetPageSwapBacked(page); 1659 ret = false; 1660 page_vma_mapped_walk_done(&pvmw); 1661 break; 1662 } 1663 1664 if (swap_duplicate(entry) < 0) { 1665 set_pte_at(mm, address, pvmw.pte, pteval); 1666 ret = false; 1667 page_vma_mapped_walk_done(&pvmw); 1668 break; 1669 } 1670 if (arch_unmap_one(mm, vma, address, pteval) < 0) { 1671 set_pte_at(mm, address, pvmw.pte, pteval); 1672 ret = false; 1673 page_vma_mapped_walk_done(&pvmw); 1674 break; 1675 } 1676 if (list_empty(&mm->mmlist)) { 1677 spin_lock(&mmlist_lock); 1678 if (list_empty(&mm->mmlist)) 1679 list_add(&mm->mmlist, &init_mm.mmlist); 1680 spin_unlock(&mmlist_lock); 1681 } 1682 dec_mm_counter(mm, MM_ANONPAGES); 1683 inc_mm_counter(mm, MM_SWAPENTS); 1684 swp_pte = swp_entry_to_pte(entry); 1685 if (pte_soft_dirty(pteval)) 1686 swp_pte = pte_swp_mksoft_dirty(swp_pte); 1687 if (pte_uffd_wp(pteval)) 1688 swp_pte = pte_swp_mkuffd_wp(swp_pte); 1689 set_pte_at(mm, address, pvmw.pte, swp_pte); 1690 /* Invalidate as we cleared the pte */ 1691 mmu_notifier_invalidate_range(mm, address, 1692 address + PAGE_SIZE); 1693 } else { 1694 /* 1695 * This is a locked file-backed page, thus it cannot 1696 * be removed from the page cache and replaced by a new 1697 * page before mmu_notifier_invalidate_range_end, so no 1698 * concurrent thread might update its page table to 1699 * point at new page while a device still is using this 1700 * page. 1701 * 1702 * See Documentation/vm/mmu_notifier.rst 1703 */ 1704 dec_mm_counter(mm, mm_counter_file(page)); 1705 } 1706 discard: 1707 /* 1708 * No need to call mmu_notifier_invalidate_range() it has be 1709 * done above for all cases requiring it to happen under page 1710 * table lock before mmu_notifier_invalidate_range_end() 1711 * 1712 * See Documentation/vm/mmu_notifier.rst 1713 */ 1714 page_remove_rmap(subpage, PageHuge(page)); 1715 put_page(page); 1716 } 1717 1718 mmu_notifier_invalidate_range_end(&range); 1719 1720 return ret; 1721 } 1722 1723 static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg) 1724 { 1725 return vma_is_temporary_stack(vma); 1726 } 1727 1728 static int page_mapcount_is_zero(struct page *page) 1729 { 1730 return !total_mapcount(page); 1731 } 1732 1733 /** 1734 * try_to_unmap - try to remove all page table mappings to a page 1735 * @page: the page to get unmapped 1736 * @flags: action and flags 1737 * 1738 * Tries to remove all the page table entries which are mapping this 1739 * page, used in the pageout path. Caller must hold the page lock. 1740 * 1741 * If unmap is successful, return true. Otherwise, false. 1742 */ 1743 bool try_to_unmap(struct page *page, enum ttu_flags flags) 1744 { 1745 struct rmap_walk_control rwc = { 1746 .rmap_one = try_to_unmap_one, 1747 .arg = (void *)flags, 1748 .done = page_mapcount_is_zero, 1749 .anon_lock = page_lock_anon_vma_read, 1750 }; 1751 1752 /* 1753 * During exec, a temporary VMA is setup and later moved. 1754 * The VMA is moved under the anon_vma lock but not the 1755 * page tables leading to a race where migration cannot 1756 * find the migration ptes. Rather than increasing the 1757 * locking requirements of exec(), migration skips 1758 * temporary VMAs until after exec() completes. 1759 */ 1760 if ((flags & (TTU_MIGRATION|TTU_SPLIT_FREEZE)) 1761 && !PageKsm(page) && PageAnon(page)) 1762 rwc.invalid_vma = invalid_migration_vma; 1763 1764 if (flags & TTU_RMAP_LOCKED) 1765 rmap_walk_locked(page, &rwc); 1766 else 1767 rmap_walk(page, &rwc); 1768 1769 return !page_mapcount(page) ? true : false; 1770 } 1771 1772 static int page_not_mapped(struct page *page) 1773 { 1774 return !page_mapped(page); 1775 }; 1776 1777 /** 1778 * try_to_munlock - try to munlock a page 1779 * @page: the page to be munlocked 1780 * 1781 * Called from munlock code. Checks all of the VMAs mapping the page 1782 * to make sure nobody else has this page mlocked. The page will be 1783 * returned with PG_mlocked cleared if no other vmas have it mlocked. 1784 */ 1785 1786 void try_to_munlock(struct page *page) 1787 { 1788 struct rmap_walk_control rwc = { 1789 .rmap_one = try_to_unmap_one, 1790 .arg = (void *)TTU_MUNLOCK, 1791 .done = page_not_mapped, 1792 .anon_lock = page_lock_anon_vma_read, 1793 1794 }; 1795 1796 VM_BUG_ON_PAGE(!PageLocked(page) || PageLRU(page), page); 1797 VM_BUG_ON_PAGE(PageCompound(page) && PageDoubleMap(page), page); 1798 1799 rmap_walk(page, &rwc); 1800 } 1801 1802 void __put_anon_vma(struct anon_vma *anon_vma) 1803 { 1804 struct anon_vma *root = anon_vma->root; 1805 1806 anon_vma_free(anon_vma); 1807 if (root != anon_vma && atomic_dec_and_test(&root->refcount)) 1808 anon_vma_free(root); 1809 } 1810 1811 static struct anon_vma *rmap_walk_anon_lock(struct page *page, 1812 struct rmap_walk_control *rwc) 1813 { 1814 struct anon_vma *anon_vma; 1815 1816 if (rwc->anon_lock) 1817 return rwc->anon_lock(page); 1818 1819 /* 1820 * Note: remove_migration_ptes() cannot use page_lock_anon_vma_read() 1821 * because that depends on page_mapped(); but not all its usages 1822 * are holding mmap_lock. Users without mmap_lock are required to 1823 * take a reference count to prevent the anon_vma disappearing 1824 */ 1825 anon_vma = page_anon_vma(page); 1826 if (!anon_vma) 1827 return NULL; 1828 1829 anon_vma_lock_read(anon_vma); 1830 return anon_vma; 1831 } 1832 1833 /* 1834 * rmap_walk_anon - do something to anonymous page using the object-based 1835 * rmap method 1836 * @page: the page to be handled 1837 * @rwc: control variable according to each walk type 1838 * 1839 * Find all the mappings of a page using the mapping pointer and the vma chains 1840 * contained in the anon_vma struct it points to. 1841 * 1842 * When called from try_to_munlock(), the mmap_lock of the mm containing the vma 1843 * where the page was found will be held for write. So, we won't recheck 1844 * vm_flags for that VMA. That should be OK, because that vma shouldn't be 1845 * LOCKED. 1846 */ 1847 static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc, 1848 bool locked) 1849 { 1850 struct anon_vma *anon_vma; 1851 pgoff_t pgoff_start, pgoff_end; 1852 struct anon_vma_chain *avc; 1853 1854 if (locked) { 1855 anon_vma = page_anon_vma(page); 1856 /* anon_vma disappear under us? */ 1857 VM_BUG_ON_PAGE(!anon_vma, page); 1858 } else { 1859 anon_vma = rmap_walk_anon_lock(page, rwc); 1860 } 1861 if (!anon_vma) 1862 return; 1863 1864 pgoff_start = page_to_pgoff(page); 1865 pgoff_end = pgoff_start + thp_nr_pages(page) - 1; 1866 anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, 1867 pgoff_start, pgoff_end) { 1868 struct vm_area_struct *vma = avc->vma; 1869 unsigned long address = vma_address(page, vma); 1870 1871 cond_resched(); 1872 1873 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) 1874 continue; 1875 1876 if (!rwc->rmap_one(page, vma, address, rwc->arg)) 1877 break; 1878 if (rwc->done && rwc->done(page)) 1879 break; 1880 } 1881 1882 if (!locked) 1883 anon_vma_unlock_read(anon_vma); 1884 } 1885 1886 /* 1887 * rmap_walk_file - do something to file page using the object-based rmap method 1888 * @page: the page to be handled 1889 * @rwc: control variable according to each walk type 1890 * 1891 * Find all the mappings of a page using the mapping pointer and the vma chains 1892 * contained in the address_space struct it points to. 1893 * 1894 * When called from try_to_munlock(), the mmap_lock of the mm containing the vma 1895 * where the page was found will be held for write. So, we won't recheck 1896 * vm_flags for that VMA. That should be OK, because that vma shouldn't be 1897 * LOCKED. 1898 */ 1899 static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc, 1900 bool locked) 1901 { 1902 struct address_space *mapping = page_mapping(page); 1903 pgoff_t pgoff_start, pgoff_end; 1904 struct vm_area_struct *vma; 1905 1906 /* 1907 * The page lock not only makes sure that page->mapping cannot 1908 * suddenly be NULLified by truncation, it makes sure that the 1909 * structure at mapping cannot be freed and reused yet, 1910 * so we can safely take mapping->i_mmap_rwsem. 1911 */ 1912 VM_BUG_ON_PAGE(!PageLocked(page), page); 1913 1914 if (!mapping) 1915 return; 1916 1917 pgoff_start = page_to_pgoff(page); 1918 pgoff_end = pgoff_start + thp_nr_pages(page) - 1; 1919 if (!locked) 1920 i_mmap_lock_read(mapping); 1921 vma_interval_tree_foreach(vma, &mapping->i_mmap, 1922 pgoff_start, pgoff_end) { 1923 unsigned long address = vma_address(page, vma); 1924 1925 cond_resched(); 1926 1927 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) 1928 continue; 1929 1930 if (!rwc->rmap_one(page, vma, address, rwc->arg)) 1931 goto done; 1932 if (rwc->done && rwc->done(page)) 1933 goto done; 1934 } 1935 1936 done: 1937 if (!locked) 1938 i_mmap_unlock_read(mapping); 1939 } 1940 1941 void rmap_walk(struct page *page, struct rmap_walk_control *rwc) 1942 { 1943 if (unlikely(PageKsm(page))) 1944 rmap_walk_ksm(page, rwc); 1945 else if (PageAnon(page)) 1946 rmap_walk_anon(page, rwc, false); 1947 else 1948 rmap_walk_file(page, rwc, false); 1949 } 1950 1951 /* Like rmap_walk, but caller holds relevant rmap lock */ 1952 void rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc) 1953 { 1954 /* no ksm support for now */ 1955 VM_BUG_ON_PAGE(PageKsm(page), page); 1956 if (PageAnon(page)) 1957 rmap_walk_anon(page, rwc, true); 1958 else 1959 rmap_walk_file(page, rwc, true); 1960 } 1961 1962 #ifdef CONFIG_HUGETLB_PAGE 1963 /* 1964 * The following two functions are for anonymous (private mapped) hugepages. 1965 * Unlike common anonymous pages, anonymous hugepages have no accounting code 1966 * and no lru code, because we handle hugepages differently from common pages. 1967 */ 1968 void hugepage_add_anon_rmap(struct page *page, 1969 struct vm_area_struct *vma, unsigned long address) 1970 { 1971 struct anon_vma *anon_vma = vma->anon_vma; 1972 int first; 1973 1974 BUG_ON(!PageLocked(page)); 1975 BUG_ON(!anon_vma); 1976 /* address might be in next vma when migration races vma_adjust */ 1977 first = atomic_inc_and_test(compound_mapcount_ptr(page)); 1978 if (first) 1979 __page_set_anon_rmap(page, vma, address, 0); 1980 } 1981 1982 void hugepage_add_new_anon_rmap(struct page *page, 1983 struct vm_area_struct *vma, unsigned long address) 1984 { 1985 BUG_ON(address < vma->vm_start || address >= vma->vm_end); 1986 atomic_set(compound_mapcount_ptr(page), 0); 1987 if (hpage_pincount_available(page)) 1988 atomic_set(compound_pincount_ptr(page), 0); 1989 1990 __page_set_anon_rmap(page, vma, address, 1); 1991 } 1992 #endif /* CONFIG_HUGETLB_PAGE */ 1993